msm-3.4 (commit 35cca8ba3ee0e6a2085dbcac48fb2ccbaa72ba98) video/gpu/iommu .. and all the hacks that goes with that
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5ee7efd..216e16e 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -4,6 +4,7 @@
 	select HAVE_AOUT
 	select HAVE_DMA_API_DEBUG
 	select HAVE_IDE if PCI || ISA || PCMCIA
+        select HAVE_DMA_ATTRS
 	select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
 	select HAVE_MEMBLOCK
 	select RTC_LIB
@@ -750,6 +751,7 @@
 	select HAVE_CLK_PREPARE
 	select NEED_MACH_MEMORY_H
 	select NEED_MACH_IO_H
+	select SOC_BUS
 	help
 	  Support for Qualcomm MSM/QSD based systems.  This runs on the
 	  apps processor of the MSM/QSD and depends on a shared memory
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 595ecd2..9d7eb53 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -173,7 +173,8 @@
 	read_lock_irqsave(&device_info->lock, flags);
 
 	list_for_each_entry(b, &device_info->safe_buffers, node)
-		if (b->safe_dma_addr == safe_dma_addr) {
+		if (b->safe_dma_addr <= safe_dma_addr &&
+		    b->safe_dma_addr + b->size > safe_dma_addr) {
 			rb = b;
 			break;
 		}
@@ -254,7 +255,7 @@
 	if (buf == NULL) {
 		dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
 		       __func__, ptr);
-		return ~0;
+		return DMA_ERROR_CODE;
 	}
 
 	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -307,8 +308,9 @@
  * substitute the safe buffer for the unsafe one.
  * (basically move the buffer from an unsafe area to a safe one)
  */
-dma_addr_t __dma_map_page(struct device *dev, struct page *page,
-		unsigned long offset, size_t size, enum dma_data_direction dir)
+static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
+		unsigned long offset, size_t size, enum dma_data_direction dir,
+		struct dma_attrs *attrs)
 {
 	dma_addr_t dma_addr;
 	int ret;
@@ -320,21 +322,20 @@
 
 	ret = needs_bounce(dev, dma_addr, size);
 	if (ret < 0)
-		return ~0;
+		return DMA_ERROR_CODE;
 
 	if (ret == 0) {
-		__dma_page_cpu_to_dev(page, offset, size, dir);
+		arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
 		return dma_addr;
 	}
 
 	if (PageHighMem(page)) {
 		dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
-		return ~0;
+		return DMA_ERROR_CODE;
 	}
 
 	return map_single(dev, page_address(page) + offset, size, dir);
 }
-EXPORT_SYMBOL(__dma_map_page);
 
 /*
  * see if a mapped address was really a "safe" buffer and if so, copy
@@ -342,8 +343,8 @@
  * the safe buffer.  (basically return things back to the way they
  * should be)
  */
-void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
-		enum dma_data_direction dir)
+static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
+		enum dma_data_direction dir, struct dma_attrs *attrs)
 {
 	struct safe_buffer *buf;
 
@@ -352,19 +353,18 @@
 
 	buf = find_safe_buffer_dev(dev, dma_addr, __func__);
 	if (!buf) {
-		__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
-			dma_addr & ~PAGE_MASK, size, dir);
+		arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
 		return;
 	}
 
 	unmap_single(dev, buf, size, dir);
 }
-EXPORT_SYMBOL(__dma_unmap_page);
 
-int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
-		unsigned long off, size_t sz, enum dma_data_direction dir)
+static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
+		size_t sz, enum dma_data_direction dir)
 {
 	struct safe_buffer *buf;
+	unsigned long off;
 
 	dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
 		__func__, addr, off, sz, dir);
@@ -373,6 +373,8 @@
 	if (!buf)
 		return 1;
 
+	off = addr - buf->safe_dma_addr;
+
 	BUG_ON(buf->direction != dir);
 
 	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -388,12 +390,21 @@
 	}
 	return 0;
 }
-EXPORT_SYMBOL(dmabounce_sync_for_cpu);
 
-int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
-		unsigned long off, size_t sz, enum dma_data_direction dir)
+static void dmabounce_sync_for_cpu(struct device *dev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+	if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
+		return;
+
+	arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
+}
+
+static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
+		size_t sz, enum dma_data_direction dir)
 {
 	struct safe_buffer *buf;
+	unsigned long off;
 
 	dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
 		__func__, addr, off, sz, dir);
@@ -402,6 +413,8 @@
 	if (!buf)
 		return 1;
 
+	off = addr - buf->safe_dma_addr;
+
 	BUG_ON(buf->direction != dir);
 
 	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -417,7 +430,38 @@
 	}
 	return 0;
 }
-EXPORT_SYMBOL(dmabounce_sync_for_device);
+
+static void dmabounce_sync_for_device(struct device *dev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+	if (!__dmabounce_sync_for_device(dev, handle, size, dir))
+		return;
+
+	arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
+}
+
+static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
+{
+	if (dev->archdata.dmabounce)
+		return 0;
+
+	return arm_dma_ops.set_dma_mask(dev, dma_mask);
+}
+
+static struct dma_map_ops dmabounce_ops = {
+	.alloc			= arm_dma_alloc,
+	.free			= arm_dma_free,
+	.mmap			= arm_dma_mmap,
+	.map_page		= dmabounce_map_page,
+	.unmap_page		= dmabounce_unmap_page,
+	.sync_single_for_cpu	= dmabounce_sync_for_cpu,
+	.sync_single_for_device	= dmabounce_sync_for_device,
+	.map_sg			= arm_dma_map_sg,
+	.unmap_sg		= arm_dma_unmap_sg,
+	.sync_sg_for_cpu	= arm_dma_sync_sg_for_cpu,
+	.sync_sg_for_device	= arm_dma_sync_sg_for_device,
+	.set_dma_mask		= dmabounce_set_mask,
+};
 
 static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
 		const char *name, unsigned long size)
@@ -479,6 +523,7 @@
 #endif
 
 	dev->archdata.dmabounce = device_info;
+	set_dma_ops(dev, &dmabounce_ops);
 
 	dev_info(dev, "dmabounce: registered device\n");
 
@@ -497,6 +542,7 @@
 	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
 
 	dev->archdata.dmabounce = NULL;
+	set_dma_ops(dev, NULL);
 
 	if (!device_info) {
 		dev_warn(dev,
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 0f8aacd..b69c0d3 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -7,11 +7,15 @@
 #define ASMARM_DEVICE_H
 
 struct dev_archdata {
+	struct dma_map_ops	*dma_ops;
 #ifdef CONFIG_DMABOUNCE
 	struct dmabounce_device_info *dmabounce;
 #endif
 #ifdef CONFIG_IOMMU_API
-	void *iommu; 
+	void *iommu; /* private IOMMU data */
+#endif
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+	struct dma_iommu_mapping	*mapping;
 #endif
 };
 
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
new file mode 100644
index 0000000..799b094
--- /dev/null
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -0,0 +1,34 @@
+#ifndef ASMARM_DMA_IOMMU_H
+#define ASMARM_DMA_IOMMU_H
+
+#ifdef __KERNEL__
+
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
+#include <linux/kmemcheck.h>
+
+struct dma_iommu_mapping {
+	/* iommu specific data */
+	struct iommu_domain	*domain;
+
+	void			*bitmap;
+	size_t			bits;
+	unsigned int		order;
+	dma_addr_t		base;
+
+	spinlock_t		lock;
+	struct kref		kref;
+};
+
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
+			 int order);
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
+
+int arm_iommu_attach_device(struct device *dev,
+					struct dma_iommu_mapping *mapping);
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 2277ef7..abb222f 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -5,15 +5,44 @@
 
 #include <linux/mm_types.h>
 #include <linux/scatterlist.h>
+#include <linux/dma-attrs.h>
 #include <linux/dma-debug.h>
 
 #include <asm-generic/dma-coherent.h>
 #include <asm/memory.h>
 
+#define DMA_ERROR_CODE	(~0)
+extern struct dma_map_ops arm_dma_ops;
+
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+	if (dev && dev->archdata.dma_ops)
+		return dev->archdata.dma_ops;
+	return &arm_dma_ops;
+}
+
+static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
+{
+	BUG_ON(!dev);
+	dev->archdata.dma_ops = ops;
+}
+
+#include <asm-generic/dma-mapping-common.h>
+
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+	return get_dma_ops(dev)->set_dma_mask(dev, mask);
+}
+
 #ifdef __arch_page_to_dma
 #error Please update to __arch_pfn_to_dma
 #endif
 
+/*
+ * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
+ * functions used internally by the DMA-mapping API to provide DMA
+ * addresses. They must not be used by drivers.
+ */
 #ifndef __arch_pfn_to_dma
 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
 {
@@ -56,54 +85,18 @@
 }
 #endif
 
-static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
-	enum dma_data_direction dir)
-{
-	extern void ___dma_single_cpu_to_dev(const void *, size_t,
-		enum dma_data_direction);
-
-	if (!arch_is_coherent())
-		___dma_single_cpu_to_dev(kaddr, size, dir);
-}
-
-static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
-	enum dma_data_direction dir)
-{
-	extern void ___dma_single_dev_to_cpu(const void *, size_t,
-		enum dma_data_direction);
-
-	if (!arch_is_coherent())
-		___dma_single_dev_to_cpu(kaddr, size, dir);
-}
-
-static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
-	size_t size, enum dma_data_direction dir)
-{
-	extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
-		size_t, enum dma_data_direction);
-
-	if (!arch_is_coherent())
-		___dma_page_cpu_to_dev(page, off, size, dir);
-}
-
-static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
-	size_t size, enum dma_data_direction dir)
-{
-	extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
-		size_t, enum dma_data_direction);
-
-	if (!arch_is_coherent())
-		___dma_page_dev_to_cpu(page, off, size, dir);
-}
-
-extern int dma_supported(struct device *, u64);
-extern int dma_set_mask(struct device *, u64);
-
+/*
+ * DMA errors are defined by all-bits-set in the DMA address.
+ */
 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
-	return dma_addr == ~0;
+	return dma_addr == DMA_ERROR_CODE;
 }
 
+/*
+ * Dummy noncoherent implementation.  We don't provide a dma_cache_sync
+ * function so drivers using this API are highlighted with build warnings.
+ */
 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
 		dma_addr_t *handle, gfp_t gfp)
 {
@@ -116,6 +109,16 @@
 }
 
 
+/*
+ * dma_coherent_pre_ops - barrier functions for coherent memory before DMA.
+ * A barrier is required to ensure memory operations are complete before the
+ * initiation of a DMA xfer.
+ * If the coherent memory is Strongly Ordered
+ * - pre ARMv7 and 8x50 guarantees ordering wrt other mem accesses
+ * - ARMv7 guarantees ordering only within a 1KB block, so we need a barrier
+ * If coherent memory is normal then we need a barrier to prevent
+ * reordering
+ */
 static inline void dma_coherent_pre_ops(void)
 {
 #if COHERENT_IS_NORMAL == 1
@@ -127,6 +130,12 @@
 		barrier();
 #endif
 }
+/*
+ * dma_post_coherent_ops - barrier functions for coherent memory after DMA.
+ * If the coherent memory is Strongly Ordered we dont need a barrier since
+ * there are no speculative fetches to Strongly Ordered memory.
+ * If coherent memory is normal then we need a barrier to prevent reordering
+ */
 static inline void dma_coherent_post_ops(void)
 {
 #if COHERENT_IS_NORMAL == 1
@@ -139,103 +148,218 @@
 #endif
 }
 
-extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
+extern int dma_supported(struct device *dev, u64 mask);
 
-extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
+/**
+ * arm_dma_alloc - allocate consistent memory for DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @size: required memory size
+ * @handle: bus-specific DMA address
+ * @attrs: optinal attributes that specific mapping properties
+ *
+ * Allocate some memory for a device for performing DMA.  This function
+ * allocates pages, and will return the CPU-viewed address, and sets @handle
+ * to be the device-viewed address.
+ */
+extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+			   gfp_t gfp, struct dma_attrs *attrs);
 
-int dma_mmap_coherent(struct device *, struct vm_area_struct *,
-		void *, dma_addr_t, size_t);
+#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+				       dma_addr_t *dma_handle, gfp_t flag,
+				       struct dma_attrs *attrs)
+{
+	struct dma_map_ops *ops = get_dma_ops(dev);
+	void *cpu_addr;
+	BUG_ON(!ops);
+
+	cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
+	debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+	return cpu_addr;
+}
+
+/**
+ * arm_dma_free - free memory allocated by arm_dma_alloc
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @size: size of memory originally requested in dma_alloc_coherent
+ * @cpu_addr: CPU-view address returned from dma_alloc_coherent
+ * @handle: device-view address returned from dma_alloc_coherent
+ * @attrs: optinal attributes that specific mapping properties
+ *
+ * Free (and unmap) a DMA buffer previously allocated by
+ * arm_dma_alloc().
+ *
+ * References to memory and mappings associated with cpu_addr/handle
+ * during and after this call executing are illegal.
+ */
+extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
+			 dma_addr_t handle, struct dma_attrs *attrs);
+
+#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+				     void *cpu_addr, dma_addr_t dma_handle,
+				     struct dma_attrs *attrs)
+{
+	struct dma_map_ops *ops = get_dma_ops(dev);
+	BUG_ON(!ops);
+
+	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+	ops->free(dev, size, cpu_addr, dma_handle, attrs);
+}
+
+/**
+ * arm_dma_mmap - map a coherent DMA allocation into user space
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @vma: vm_area_struct describing requested user mapping
+ * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
+ * @handle: device-view address returned from dma_alloc_coherent
+ * @size: size of memory originally requested in dma_alloc_coherent
+ * @attrs: optinal attributes that specific mapping properties
+ *
+ * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
+ * into user space.  The coherent DMA buffer must not be freed by the
+ * driver until the user space mapping has been released.
+ */
+extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+			void *cpu_addr, dma_addr_t dma_addr, size_t size,
+			struct dma_attrs *attrs);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
+
+static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+				  void *cpu_addr, dma_addr_t dma_addr,
+				  size_t size, struct dma_attrs *attrs)
+{
+	struct dma_map_ops *ops = get_dma_ops(dev);
+	BUG_ON(!ops);
+	return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+}
+
+static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
+				       dma_addr_t *dma_handle, gfp_t flag)
+{
+	DEFINE_DMA_ATTRS(attrs);
+	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+	return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
+}
+
+static inline void dma_free_writecombine(struct device *dev, size_t size,
+				     void *cpu_addr, dma_addr_t dma_handle)
+{
+	DEFINE_DMA_ATTRS(attrs);
+	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+	return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
+
+static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+		      void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+	DEFINE_DMA_ATTRS(attrs);
+	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+	return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
+
+static inline void *dma_alloc_stronglyordered(struct device *dev, size_t size,
+				       dma_addr_t *dma_handle, gfp_t flag)
+{
+	DEFINE_DMA_ATTRS(attrs);
+	dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
+	return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
+}
+
+static inline void dma_free_stronglyordered(struct device *dev, size_t size,
+				     void *cpu_addr, dma_addr_t dma_handle)
+{
+	DEFINE_DMA_ATTRS(attrs);
+	dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
+	return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
+
+static inline int dma_mmap_stronglyordered(struct device *dev,
+		struct vm_area_struct *vma, void *cpu_addr,
+		dma_addr_t dma_addr, size_t size)
+{
+	DEFINE_DMA_ATTRS(attrs);
+	dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
+	return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
+
+static inline void *dma_alloc_nonconsistent(struct device *dev, size_t size,
+				       dma_addr_t *dma_handle, gfp_t flag)
+{
+	DEFINE_DMA_ATTRS(attrs);
+	dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+	return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
+}
+
+static inline void dma_free_nonconsistent(struct device *dev, size_t size,
+				     void *cpu_addr, dma_addr_t dma_handle)
+{
+	DEFINE_DMA_ATTRS(attrs);
+	dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+	return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
+
+static inline int dma_mmap_nonconsistent(struct device *dev,
+		struct vm_area_struct *vma, void *cpu_addr,
+		dma_addr_t dma_addr, size_t size)
+{
+	DEFINE_DMA_ATTRS(attrs);
+	dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+	return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
 
 
-extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
-		gfp_t);
 
-#define dma_free_writecombine(dev,size,cpu_addr,handle) \
-	dma_free_coherent(dev,size,cpu_addr,handle)
-
-int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
-		void *, dma_addr_t, size_t);
-
+/*
+ * This can be called during boot to increase the size of the consistent
+ * DMA region above it's default value of 2MB. It must be called before the
+ * memory allocator is initialised, i.e. before any core_initcall.
+ */
 extern void __init init_consistent_dma_size(unsigned long size);
 
+/*
+ * For SA-1111, IXP425, and ADI systems  the dma-mapping functions are "magic"
+ * and utilize bounce buffers as needed to work around limited DMA windows.
+ *
+ * On the SA-1111, a bug limits DMA to only certain regions of RAM.
+ * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
+ * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
+ *
+ * The following are helper functions used by the dmabounce subystem
+ *
+ */
 
-#ifdef CONFIG_DMABOUNCE
-
+/**
+ * dmabounce_register_dev
+ *
+ * @dev: valid struct device pointer
+ * @small_buf_size: size of buffers to use with small buffer pool
+ * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
+ * @needs_bounce_fn: called to determine whether buffer needs bouncing
+ *
+ * This function should be called by low-level platform code to register
+ * a device as requireing DMA buffer bouncing. The function will allocate
+ * appropriate DMA pools for the device.
+ */
 extern int dmabounce_register_dev(struct device *, unsigned long,
 		unsigned long, int (*)(struct device *, dma_addr_t, size_t));
 
+/**
+ * dmabounce_unregister_dev
+ *
+ * @dev: valid struct device pointer
+ *
+ * This function should be called by low-level platform code when device
+ * that was previously registered with dmabounce_register_dev is removed
+ * from the system.
+ *
+ */
 extern void dmabounce_unregister_dev(struct device *);
 
-extern dma_addr_t __dma_map_page(struct device *, struct page *,
-		unsigned long, size_t, enum dma_data_direction);
-extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
-		enum dma_data_direction);
 
-int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
-		size_t, enum dma_data_direction);
-int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
-		size_t, enum dma_data_direction);
-#else
-static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
-	unsigned long offset, size_t size, enum dma_data_direction dir)
-{
-	return 1;
-}
-
-static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
-	unsigned long offset, size_t size, enum dma_data_direction dir)
-{
-	return 1;
-}
-
-
-static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
-	     unsigned long offset, size_t size, enum dma_data_direction dir)
-{
-	__dma_page_cpu_to_dev(page, offset, size, dir);
-	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
-}
-
-static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
-		size_t size, enum dma_data_direction dir)
-{
-	__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
-		handle & ~PAGE_MASK, size, dir);
-}
-#endif 
-
-/**
- * dma_map_single - map a single buffer for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @cpu_addr: CPU direct mapped address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed.  The CPU
- * can regain ownership by calling dma_unmap_single() or
- * dma_sync_single_for_cpu().
- */
-static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
-		size_t size, enum dma_data_direction dir)
-{
-	unsigned long offset;
-	struct page *page;
-	dma_addr_t addr;
-
-	BUG_ON(!virt_addr_valid(cpu_addr));
-	BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
-	BUG_ON(!valid_dma_direction(dir));
-
-	page = virt_to_page(cpu_addr);
-	offset = (unsigned long)cpu_addr & ~PAGE_MASK;
-	addr = __dma_map_page(dev, page, offset, size, dir);
-	debug_dma_map_page(dev, page, offset, size, dir, addr, true);
-
-	return addr;
-}
 
 /**
  * dma_cache_pre_ops - clean or invalidate cache before dma transfer is
@@ -281,100 +405,24 @@
 
 	if (arch_has_speculative_dfetch() && !arch_is_coherent()
 	 && dir != DMA_TO_DEVICE)
+		/*
+		 * Treat DMA_BIDIRECTIONAL and DMA_FROM_DEVICE
+		 * identically: invalidate
+		 */
 		___dma_single_cpu_to_dev(virtual_addr,
 					 size, DMA_FROM_DEVICE);
 }
-
-/**
- * dma_map_page - map a portion of a page for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @page: page that buffer resides in
- * @offset: offset into page for start of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed.  The CPU
- * can regain ownership by calling dma_unmap_page().
+/*
+ * The scatter list versions of the above methods.
  */
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
-	     unsigned long offset, size_t size, enum dma_data_direction dir)
-{
-	dma_addr_t addr;
-
-	BUG_ON(!valid_dma_direction(dir));
-
-	addr = __dma_map_page(dev, page, offset, size, dir);
-	debug_dma_map_page(dev, page, offset, size, dir, addr, false);
-
-	return addr;
-}
-
-static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
-		size_t size, enum dma_data_direction dir)
-{
-	debug_dma_unmap_page(dev, handle, size, dir, true);
-	__dma_unmap_page(dev, handle, size, dir);
-}
-
-static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
-		size_t size, enum dma_data_direction dir)
-{
-	debug_dma_unmap_page(dev, handle, size, dir, false);
-	__dma_unmap_page(dev, handle, size, dir);
-}
-
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
-		dma_addr_t handle, unsigned long offset, size_t size,
-		enum dma_data_direction dir)
-{
-	BUG_ON(!valid_dma_direction(dir));
-
-	debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
-
-	if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
-		return;
-
-	__dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
-}
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
-		dma_addr_t handle, unsigned long offset, size_t size,
-		enum dma_data_direction dir)
-{
-	BUG_ON(!valid_dma_direction(dir));
-
-	debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
-
-	if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
-		return;
-
-	__dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
-}
-
-static inline void dma_sync_single_for_cpu(struct device *dev,
-		dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-	dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
-}
-
-static inline void dma_sync_single_for_device(struct device *dev,
-		dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-	dma_sync_single_range_for_device(dev, handle, 0, size, dir);
-}
-
-extern int dma_map_sg(struct device *, struct scatterlist *, int,
+extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
+		enum dma_data_direction, struct dma_attrs *attrs);
+extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
+		enum dma_data_direction, struct dma_attrs *attrs);
+extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
 		enum dma_data_direction);
-extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
-		enum dma_data_direction);
-extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
-		enum dma_data_direction);
-extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
+extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
 		enum dma_data_direction);
 
-
-#endif 
+#endif /* __KERNEL__ */
 #endif
diff --git a/arch/arm/mach-msm/board-m7-display.c b/arch/arm/mach-msm/board-m7-display.c
index cf6b44a..18d08fa 100644
--- a/arch/arm/mach-msm/board-m7-display.c
+++ b/arch/arm/mach-msm/board-m7-display.c
@@ -530,9 +530,6 @@
 
 static struct msm_panel_common_pdata mdp_pdata = {
 	.gpio = MDP_VSYNC_GPIO,
-	.mdp_core_clk_rate = 200000000,
-	.mdp_core_clk_table = mdp_core_clk_rate_table,
-	.num_mdp_clk = ARRAY_SIZE(mdp_core_clk_rate_table),
 #ifdef CONFIG_MSM_BUS_SCALING
 	.mdp_bus_scale_table = &mdp_bus_scale_pdata,
 #endif
@@ -543,7 +540,7 @@
 	.mem_hid = MEMTYPE_EBI1,
 #endif
 	.cont_splash_enabled = 0x00,
-	.mdp_gamma = m7_mdp_gamma,
+        //	.mdp_gamma = m7_mdp_gamma,
 	.mdp_iommu_split_domain = 1,
 	.mdp_max_clk = 200000000,
 };
diff --git a/arch/arm/mach-msm/clock-8960.c b/arch/arm/mach-msm/clock-8960.c
index e523a7a..ebb4379 100644
--- a/arch/arm/mach-msm/clock-8960.c
+++ b/arch/arm/mach-msm/clock-8960.c
@@ -5276,7 +5276,18 @@
 	CLK_LOOKUP("mem_iface_clk",	imem_p_clk.c,	"kgsl-3d0.0"),
 	CLK_LOOKUP("iface_clk",		mdp_p_clk.c,		"mdp.0"),
 	CLK_LOOKUP("iface_clk",		mdp_p_clk.c,	"footswitch-8x60.4"),
-	CLK_LOOKUP("iface_clk",		smmu_p_clk.c,		"msm_iommu"),
+	CLK_LOOKUP("iface_clk",		smmu_p_clk.c,	"msm_iommu-v0.0"),
+	CLK_LOOKUP("iface_clk",		smmu_p_clk.c,	"msm_iommu-v0.1"),
+	CLK_LOOKUP("iface_clk",		smmu_p_clk.c,	"msm_iommu-v0.2"),
+	CLK_LOOKUP("iface_clk",		smmu_p_clk.c,	"msm_iommu-v0.3"),
+	CLK_LOOKUP("iface_clk",		smmu_p_clk.c,	"msm_iommu-v0.4"),
+	CLK_LOOKUP("iface_clk",		smmu_p_clk.c,	"msm_iommu-v0.5"),
+	CLK_LOOKUP("iface_clk",		smmu_p_clk.c,	"msm_iommu-v0.6"),
+	CLK_LOOKUP("iface_clk",		smmu_p_clk.c,	"msm_iommu-v0.7"),
+	CLK_LOOKUP("iface_clk",		smmu_p_clk.c,	"msm_iommu-v0.8"),
+	CLK_LOOKUP("iface_clk",		smmu_p_clk.c,	"msm_iommu-v0.9"),
+	CLK_LOOKUP("iface_clk",		smmu_p_clk.c,	"msm_iommu-v0.10"),
+	CLK_LOOKUP("iface_clk",		smmu_p_clk.c,	"msm_iommu-v0.11"),
 	CLK_LOOKUP("iface_clk",		rot_p_clk.c,	"msm_rotator.0"),
 	CLK_LOOKUP("iface_clk",		rot_p_clk.c,	"footswitch-8x60.6"),
 	CLK_LOOKUP("iface_clk",		vcodec_p_clk.c,		"msm_vidc.0"),
diff --git a/arch/arm/mach-msm/devices-iommu.c b/arch/arm/mach-msm/devices-iommu.c
index ebee124..b91e7fe 100644
--- a/arch/arm/mach-msm/devices-iommu.c
+++ b/arch/arm/mach-msm/devices-iommu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -312,11 +312,6 @@
 	},
 };
 
-static struct platform_device msm_root_iommu_dev = {
-	.name = "msm_iommu",
-	.id = -1,
-};
-
 static struct msm_iommu_dev jpegd_iommu = {
 	.name = "jpegd",
 	.ncb = 2,
@@ -365,25 +360,25 @@
 static struct msm_iommu_dev gfx3d_iommu = {
 	.name = "gfx3d",
 	.ncb = 3,
-	.ttbr_split = 1,
+	.ttbr_split = 0,
 };
 
 static struct msm_iommu_dev gfx3d1_iommu = {
 	.name = "gfx3d1",
 	.ncb = 3,
-	.ttbr_split = 1,
+	.ttbr_split = 0,
 };
 
 static struct msm_iommu_dev gfx2d0_iommu = {
 	.name = "gfx2d0",
 	.ncb = 2,
-	.ttbr_split = 1,
+	.ttbr_split = 0,
 };
 
 static struct msm_iommu_dev gfx2d1_iommu = {
 	.name = "gfx2d1",
 	.ncb = 2,
-	.ttbr_split = 1,
+	.ttbr_split = 0,
 };
 
 static struct msm_iommu_dev vcap_iommu = {
@@ -392,10 +387,9 @@
 };
 
 static struct platform_device msm_device_iommu_jpegd = {
-	.name = "msm_iommu",
+	.name = "msm_iommu-v0",
 	.id = 0,
 	.dev = {
-		.parent = &msm_root_iommu_dev.dev,
 		.platform_data = &jpegd_iommu,
 	},
 	.num_resources = ARRAY_SIZE(msm_iommu_jpegd_resources),
@@ -403,10 +397,9 @@
 };
 
 static struct platform_device msm_device_iommu_vpe = {
-	.name = "msm_iommu",
+	.name = "msm_iommu-v0",
 	.id = 1,
 	.dev = {
-		.parent = &msm_root_iommu_dev.dev,
 		.platform_data = &vpe_iommu,
 	},
 	.num_resources = ARRAY_SIZE(msm_iommu_vpe_resources),
@@ -414,10 +407,9 @@
 };
 
 static struct platform_device msm_device_iommu_mdp0 = {
-	.name = "msm_iommu",
+	.name = "msm_iommu-v0",
 	.id = 2,
 	.dev = {
-		.parent = &msm_root_iommu_dev.dev,
 		.platform_data = &mdp0_iommu,
 	},
 	.num_resources = ARRAY_SIZE(msm_iommu_mdp0_resources),
@@ -425,10 +417,9 @@
 };
 
 static struct platform_device msm_device_iommu_mdp1 = {
-	.name = "msm_iommu",
+	.name = "msm_iommu-v0",
 	.id = 3,
 	.dev = {
-		.parent = &msm_root_iommu_dev.dev,
 		.platform_data = &mdp1_iommu,
 	},
 	.num_resources = ARRAY_SIZE(msm_iommu_mdp1_resources),
@@ -436,10 +427,9 @@
 };
 
 static struct platform_device msm_device_iommu_rot = {
-	.name = "msm_iommu",
+	.name = "msm_iommu-v0",
 	.id = 4,
 	.dev = {
-		.parent = &msm_root_iommu_dev.dev,
 		.platform_data = &rot_iommu,
 	},
 	.num_resources = ARRAY_SIZE(msm_iommu_rot_resources),
@@ -447,10 +437,9 @@
 };
 
 static struct platform_device msm_device_iommu_ijpeg = {
-	.name = "msm_iommu",
+	.name = "msm_iommu-v0",
 	.id = 5,
 	.dev = {
-		.parent = &msm_root_iommu_dev.dev,
 		.platform_data = &ijpeg_iommu,
 	},
 	.num_resources = ARRAY_SIZE(msm_iommu_ijpeg_resources),
@@ -458,10 +447,9 @@
 };
 
 static struct platform_device msm_device_iommu_vfe = {
-	.name = "msm_iommu",
+	.name = "msm_iommu-v0",
 	.id = 6,
 	.dev = {
-		.parent = &msm_root_iommu_dev.dev,
 		.platform_data = &vfe_iommu,
 	},
 	.num_resources = ARRAY_SIZE(msm_iommu_vfe_resources),
@@ -469,10 +457,9 @@
 };
 
 static struct platform_device msm_device_iommu_vcodec_a = {
-	.name = "msm_iommu",
+	.name = "msm_iommu-v0",
 	.id = 7,
 	.dev = {
-		.parent = &msm_root_iommu_dev.dev,
 		.platform_data = &vcodec_a_iommu,
 	},
 	.num_resources = ARRAY_SIZE(msm_iommu_vcodec_a_resources),
@@ -480,10 +467,9 @@
 };
 
 static struct platform_device msm_device_iommu_vcodec_b = {
-	.name = "msm_iommu",
+	.name = "msm_iommu-v0",
 	.id = 8,
 	.dev = {
-		.parent = &msm_root_iommu_dev.dev,
 		.platform_data = &vcodec_b_iommu,
 	},
 	.num_resources = ARRAY_SIZE(msm_iommu_vcodec_b_resources),
@@ -491,10 +477,9 @@
 };
 
 static struct platform_device msm_device_iommu_gfx3d = {
-	.name = "msm_iommu",
+	.name = "msm_iommu-v0",
 	.id = 9,
 	.dev = {
-		.parent = &msm_root_iommu_dev.dev,
 		.platform_data = &gfx3d_iommu,
 	},
 	.num_resources = ARRAY_SIZE(msm_iommu_gfx3d_resources),
@@ -502,10 +487,9 @@
 };
 
 static struct platform_device msm_device_iommu_gfx3d1 = {
-	.name = "msm_iommu",
+	.name = "msm_iommu-v0",
 	.id = 10,
 	.dev = {
-		.parent = &msm_root_iommu_dev.dev,
 		.platform_data = &gfx3d1_iommu,
 	},
 	.num_resources = ARRAY_SIZE(msm_iommu_gfx3d1_resources),
@@ -513,10 +497,9 @@
 };
 
 static struct platform_device msm_device_iommu_gfx2d0 = {
-	.name = "msm_iommu",
+	.name = "msm_iommu-v0",
 	.id = 10,
 	.dev = {
-		.parent = &msm_root_iommu_dev.dev,
 		.platform_data = &gfx2d0_iommu,
 	},
 	.num_resources = ARRAY_SIZE(msm_iommu_gfx2d0_resources),
@@ -524,10 +507,9 @@
 };
 
 static struct platform_device msm_device_iommu_gfx2d1 = {
-	.name = "msm_iommu",
+	.name = "msm_iommu-v0",
 	.id = 11,
 	.dev = {
-		.parent = &msm_root_iommu_dev.dev,
 		.platform_data = &gfx2d1_iommu,
 	},
 	.num_resources = ARRAY_SIZE(msm_iommu_gfx2d1_resources),
@@ -535,10 +517,9 @@
 };
 
 static struct platform_device msm_device_iommu_vcap = {
-	.name = "msm_iommu",
+	.name = "msm_iommu-v0",
 	.id = 11,
 	.dev = {
-		.parent = &msm_root_iommu_dev.dev,
 		.platform_data = &vcap_iommu,
 	},
 	.num_resources = ARRAY_SIZE(msm_iommu_vcap_resources),
@@ -939,8 +920,11 @@
 	&msm_device_iommu_gfx2d1,
 };
 
-static struct platform_device *msm_iommu_8064_devs[] = {
+static struct platform_device *msm_iommu_adreno3xx_gfx_devs[] = {
 	&msm_device_iommu_gfx3d1,
+};
+
+static struct platform_device *msm_iommu_vcap_devs[] = {
 	&msm_device_iommu_vcap,
 };
 
@@ -973,9 +957,12 @@
 	&msm_device_gfx2d1_2d1_ctx,
 };
 
-static struct platform_device *msm_iommu_8064_ctx_devs[] = {
+static struct platform_device *msm_iommu_adreno3xx_ctx_devs[] = {
 	&msm_device_gfx3d1_user_ctx,
 	&msm_device_gfx3d1_priv_ctx,
+};
+
+static struct platform_device *msm_iommu_vcap_ctx_devs[] = {
 	&msm_device_vcap_vc_ctx,
 	&msm_device_vcap_vp_ctx,
 };
@@ -988,22 +975,16 @@
 static int __init iommu_init(void)
 {
 	int ret;
-	if (!msm_soc_version_supports_iommu_v1()) {
-		pr_err("IOMMU v1 is not supported on this SoC version.\n");
+	if (!msm_soc_version_supports_iommu_v0()) {
+		pr_err("IOMMU v0 is not supported on this SoC version.\n");
 		return -ENODEV;
 	}
 
-	ret = platform_device_register(&msm_root_iommu_dev);
-	if (ret != 0) {
-		pr_err("Failed to register root IOMMU device!\n");
-		goto failure;
-	}
-
-	
+	/* Initialize common devs */
 	platform_add_devices(msm_iommu_common_devs,
 				ARRAY_SIZE(msm_iommu_common_devs));
 
-	
+	/* Initialize soc-specific devs */
 	if (cpu_is_msm8x60() || cpu_is_msm8960()) {
 		platform_add_devices(msm_iommu_jpegd_devs,
 				ARRAY_SIZE(msm_iommu_jpegd_devs));
@@ -1011,18 +992,21 @@
 				ARRAY_SIZE(msm_iommu_gfx2d_devs));
 	}
 
-	if (cpu_is_apq8064() || cpu_is_apq8064ab()) {
+	if (soc_class_is_apq8064() || cpu_is_msm8960ab()) {
 		platform_add_devices(msm_iommu_jpegd_devs,
 				ARRAY_SIZE(msm_iommu_jpegd_devs));
-		platform_add_devices(msm_iommu_8064_devs,
-				ARRAY_SIZE(msm_iommu_8064_devs));
+		platform_add_devices(msm_iommu_adreno3xx_gfx_devs,
+				ARRAY_SIZE(msm_iommu_adreno3xx_gfx_devs));
 	}
+	if (soc_class_is_apq8064())
+		platform_add_devices(msm_iommu_vcap_devs,
+				ARRAY_SIZE(msm_iommu_vcap_devs));
 
-	
+	/* Initialize common ctx_devs */
 	ret = platform_add_devices(msm_iommu_common_ctx_devs,
 				ARRAY_SIZE(msm_iommu_common_ctx_devs));
 
-	
+	/* Initialize soc-specific ctx_devs */
 	if (cpu_is_msm8x60() || cpu_is_msm8960()) {
 		platform_add_devices(msm_iommu_jpegd_ctx_devs,
 				ARRAY_SIZE(msm_iommu_jpegd_ctx_devs));
@@ -1030,28 +1014,29 @@
 				ARRAY_SIZE(msm_iommu_gfx2d_ctx_devs));
 	}
 
-	if (cpu_is_apq8064() || cpu_is_apq8064ab()) {
+	if (soc_class_is_apq8064() || cpu_is_msm8960ab()) {
 		platform_add_devices(msm_iommu_jpegd_ctx_devs,
 				ARRAY_SIZE(msm_iommu_jpegd_ctx_devs));
-		platform_add_devices(msm_iommu_8064_ctx_devs,
-				ARRAY_SIZE(msm_iommu_8064_ctx_devs));
+
+		platform_add_devices(msm_iommu_adreno3xx_ctx_devs,
+				ARRAY_SIZE(msm_iommu_adreno3xx_ctx_devs));
 	}
+	if (soc_class_is_apq8064())
+		platform_add_devices(msm_iommu_vcap_ctx_devs,
+			ARRAY_SIZE(msm_iommu_vcap_ctx_devs));
 
 	return 0;
-
-failure:
-	return ret;
 }
 
 static void __exit iommu_exit(void)
 {
 	int i;
 
-	
+	/* Common ctx_devs */
 	for (i = 0; i < ARRAY_SIZE(msm_iommu_common_ctx_devs); i++)
 		platform_device_unregister(msm_iommu_common_ctx_devs[i]);
 
-	
+	/* Common devs. */
 	for (i = 0; i < ARRAY_SIZE(msm_iommu_common_devs); ++i)
 		platform_device_unregister(msm_iommu_common_devs[i]);
 
@@ -1068,22 +1053,37 @@
 		for (i = 0; i < ARRAY_SIZE(msm_iommu_jpegd_devs); i++)
 			platform_device_unregister(msm_iommu_jpegd_devs[i]);
 	}
+	if (soc_class_is_apq8064()) {
+		for (i = 0; i < ARRAY_SIZE(msm_iommu_vcap_ctx_devs); i++)
+			platform_device_unregister(msm_iommu_vcap_ctx_devs[i]);
+	}
 
-	if (cpu_is_apq8064() || cpu_is_apq8064ab()) {
-		for (i = 0; i < ARRAY_SIZE(msm_iommu_8064_ctx_devs); i++)
-			platform_device_unregister(msm_iommu_8064_ctx_devs[i]);
+	if (soc_class_is_apq8064() || cpu_is_msm8960ab()) {
+		for (i = 0; i < ARRAY_SIZE(msm_iommu_adreno3xx_ctx_devs);
+			   i++)
+			platform_device_unregister(
+				msm_iommu_adreno3xx_ctx_devs[i]);
 
-		for (i = 0; i < ARRAY_SIZE(msm_iommu_jpegd_ctx_devs); i++)
-			platform_device_unregister(msm_iommu_jpegd_ctx_devs[i]);
+		for (i = 0; i < ARRAY_SIZE(msm_iommu_jpegd_ctx_devs);
+			   i++)
+			platform_device_unregister(
+				msm_iommu_jpegd_ctx_devs[i]);
 
-		for (i = 0; i < ARRAY_SIZE(msm_iommu_8064_devs); i++)
-			platform_device_unregister(msm_iommu_8064_devs[i]);
+		if (soc_class_is_apq8064()) {
+			for (i = 0; i < ARRAY_SIZE(msm_iommu_vcap_devs);
+			   i++)
+				platform_device_unregister(
+				msm_iommu_vcap_devs[i]);
+		}
+
+		for (i = 0; i < ARRAY_SIZE(msm_iommu_adreno3xx_gfx_devs);
+			   i++)
+			platform_device_unregister(
+				msm_iommu_adreno3xx_gfx_devs[i]);
 
 		for (i = 0; i < ARRAY_SIZE(msm_iommu_jpegd_devs); i++)
 			platform_device_unregister(msm_iommu_jpegd_devs[i]);
 	}
-
-	platform_device_unregister(&msm_root_iommu_dev);
 }
 
 subsys_initcall(iommu_init);
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 101e65a..6fb7401 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -524,21 +524,19 @@
 	void (*panel_config_gpio)(int);
 	int (*vga_switch)(int select_vga);
 	int *gpio_num;
-	int mdp_core_clk_rate;
-	unsigned num_mdp_clk;
-	int *mdp_core_clk_table;
 	u32 mdp_max_clk;
 #ifdef CONFIG_MSM_BUS_SCALING
 	struct msm_bus_scale_pdata *mdp_bus_scale_table;
 #endif
 	int mdp_rev;
-	u32 ov0_wb_size;  
-	u32 ov1_wb_size;  
+	u32 ov0_wb_size;  /* overlay0 writeback size */
+	u32 ov1_wb_size;  /* overlay1 writeback size */
 	u32 mem_hid;
 	char cont_splash_enabled;
+	u32 splash_screen_addr;
+	u32 splash_screen_size;
 	char mdp_iommu_split_domain;
-	int (*mdp_color_enhance)(void);
-	int (*mdp_gamma)(void);
+	u32 avtimer_phy;
 };
 
 
@@ -602,7 +600,7 @@
 	char dlane_swap;
 	void (*dsi_pwm_cfg)(void);
 	char enable_wled_bl_ctrl;
-	unsigned char (*shrink_pwm)(int val);
+	void (*gpio_set_backlight)(int bl_level);
 };
 
 struct lvds_panel_platform_data {
diff --git a/arch/arm/mach-msm/include/mach/iommu.h b/arch/arm/mach-msm/include/mach/iommu.h
index 3c44fad..f750dc8 100644
--- a/arch/arm/mach-msm/include/mach/iommu.h
+++ b/arch/arm/mach-msm/include/mach/iommu.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,14 +15,16 @@
 
 #include <linux/interrupt.h>
 #include <linux/clk.h>
+#include <linux/list.h>
 #include <linux/regulator/consumer.h>
 #include <mach/socinfo.h>
 
 extern pgprot_t     pgprot_kernel;
-extern struct platform_device *msm_iommu_root_dev;
+extern struct bus_type msm_iommu_sec_bus_type;
 
 /* Domain attributes */
 #define MSM_IOMMU_DOMAIN_PT_CACHEABLE	0x1
+#define MSM_IOMMU_DOMAIN_PT_SECURE	0x2
 
 /* Mask for the cache policy attribute */
 #define MSM_IOMMU_CP_MASK		0x03
@@ -38,6 +40,8 @@
 /* Maximum number of SMT entries allowed by the system */
 #define MAX_NUM_SMR	128
 
+#define MAX_NUM_BFB_REGS	32
+
 /**
  * struct msm_iommu_dev - a single IOMMU hardware instance
  * name		Human-readable name given to this IOMMU HW instance
@@ -64,10 +68,22 @@
 	int mids[MAX_NUM_MIDS];
 };
 
+/**
+ * struct msm_iommu_bfb_settings - a set of IOMMU BFB tuning parameters
+ * regs		An array of register offsets to configure
+ * data		Values to write to corresponding registers
+ * length	Number of valid entries in the offset/val arrays
+ */
+struct msm_iommu_bfb_settings {
+	unsigned int regs[MAX_NUM_BFB_REGS];
+	unsigned int data[MAX_NUM_BFB_REGS];
+	int length;
+};
 
 /**
  * struct msm_iommu_drvdata - A single IOMMU hardware instance
  * @base:	IOMMU config port base address (VA)
+ * @glb_base:	IOMMU config port base address for global register space (VA)
  * @ncb		The number of contexts on this IOMMU
  * @irq:	Interrupt number
  * @clk:	The bus clock for this IOMMU hardware instance
@@ -75,13 +91,20 @@
  * @aclk:	Alternate clock for this IOMMU core, if any
  * @name:	Human-readable name of this IOMMU device
  * @gdsc:	Regulator needed to power this HW block (v2 only)
- * @nsmr:	Size of the SMT on this HW block (v2 only)
+ * @bfb_settings: Optional BFB performance tuning parameters
+ * @dev:	Struct device this hardware instance is tied to
+ * @list:	List head to link all iommus together
+ * @clk_reg_virt: Optional clock register virtual address.
+ * @halt_enabled: Set to 1 if IOMMU halt is supported in the IOMMU, 0 otherwise.
+ * @asid:         List of ASID and their usage count (index is ASID value).
+ * @ctx_attach_count: Count of how many context are attached.
  *
  * A msm_iommu_drvdata holds the global driver data about a single piece
  * of an IOMMU hardware instance.
  */
 struct msm_iommu_drvdata {
 	void __iomem *base;
+	void __iomem *glb_base;
 	int ncb;
 	int ttbr_split;
 	struct clk *clk;
@@ -89,10 +112,43 @@
 	struct clk *aclk;
 	const char *name;
 	struct regulator *gdsc;
-	unsigned int nsmr;
+	struct regulator *alt_gdsc;
+	struct msm_iommu_bfb_settings *bfb_settings;
+	int sec_id;
+	struct device *dev;
+	struct list_head list;
+	void __iomem *clk_reg_virt;
+	int halt_enabled;
+	int *asid;
+	unsigned int ctx_attach_count;
 };
 
 /**
+ * struct iommu_access_ops - Callbacks for accessing IOMMU
+ * @iommu_power_on:     Turn on power to unit
+ * @iommu_power_off:    Turn off power to unit
+ * @iommu_clk_on:       Turn on clks to unit
+ * @iommu_clk_off:      Turn off clks to unit
+ * @iommu_lock_acquire: Acquire any locks needed
+ * @iommu_lock_release: Release locks needed
+ */
+struct iommu_access_ops {
+	int (*iommu_power_on)(struct msm_iommu_drvdata *);
+	void (*iommu_power_off)(struct msm_iommu_drvdata *);
+	int (*iommu_clk_on)(struct msm_iommu_drvdata *);
+	void (*iommu_clk_off)(struct msm_iommu_drvdata *);
+	void (*iommu_lock_acquire)(void);
+	void (*iommu_lock_release)(void);
+};
+
+void msm_iommu_add_drv(struct msm_iommu_drvdata *drv);
+void msm_iommu_remove_drv(struct msm_iommu_drvdata *drv);
+void program_iommu_bfb_settings(void __iomem *base,
+			const struct msm_iommu_bfb_settings *bfb_settings);
+void iommu_halt(const struct msm_iommu_drvdata *iommu_drvdata);
+void iommu_resume(const struct msm_iommu_drvdata *iommu_drvdata);
+
+/**
  * struct msm_iommu_ctx_drvdata - an IOMMU context bank instance
  * @num:		Hardware context number of this context
  * @pdev:		Platform device associated wit this HW instance
@@ -100,8 +156,12 @@
  *			attached to them
  * @attached_domain	Domain currently attached to this context (if any)
  * @name		Human-readable name of this context device
- * @sids		List of Stream IDs mapped to this context (v2 only)
- * @nsid		Number of Stream IDs mapped to this context (v2 only)
+ * @sids		List of Stream IDs mapped to this context
+ * @nsid		Number of Stream IDs mapped to this context
+ * @secure_context	true if this is a secure context programmed by
+			the secure environment, false otherwise
+ * @asid		ASID used with this context.
+ * @attach_count	Number of time this context has been attached.
  *
  * A msm_iommu_ctx_drvdata holds the driver data for a single context bank
  * within each IOMMU hardware instance
@@ -114,6 +174,9 @@
 	const char *name;
 	u32 sids[MAX_NUM_SMR];
 	unsigned int nsid;
+	unsigned int secure_context;
+	int asid;
+	int attach_count;
 };
 
 /*
@@ -191,17 +254,30 @@
 }
 #endif
 
+/*
+ * Function to program the global registers of an IOMMU securely.
+ * This should only be called on IOMMUs for which kernel programming
+ * of global registers is not possible
+ */
+void msm_iommu_sec_set_access_ops(struct iommu_access_ops *access_ops);
+int msm_iommu_sec_program_iommu(int sec_id);
 
-static inline int msm_soc_version_supports_iommu_v1(void)
+static inline int msm_soc_version_supports_iommu_v0(void)
 {
 #ifdef CONFIG_OF
 	struct device_node *node;
 
-	node = of_find_compatible_node(NULL, NULL, "qcom,msm-smmu-v2");
+	node = of_find_compatible_node(NULL, NULL, "qcom,msm-smmu-v1");
 	if (node) {
 		of_node_put(node);
 		return 0;
 	}
+
+	node = of_find_compatible_node(NULL, NULL, "qcom,msm-smmu-v0");
+	if (node) {
+		of_node_put(node);
+		return 1;
+	}
 #endif
 	if (cpu_is_msm8960() &&
 	    SOCINFO_VERSION_MAJOR(socinfo_get_version()) < 2)
diff --git a/arch/arm/mach-msm/include/mach/iommu_domains.h b/arch/arm/mach-msm/include/mach/iommu_domains.h
index 59d430f..9a89508 100644
--- a/arch/arm/mach-msm/include/mach/iommu_domains.h
+++ b/arch/arm/mach-msm/include/mach/iommu_domains.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,8 +13,11 @@
 #ifndef _ARCH_IOMMU_DOMAINS_H
 #define _ARCH_IOMMU_DOMAINS_H
 
+#include <linux/errno.h>
 #include <linux/memory_alloc.h>
 
+#define MSM_IOMMU_DOMAIN_SECURE	0x1
+
 enum {
 	VIDEO_DOMAIN,
 	CAMERA_DOMAIN,
@@ -69,11 +72,15 @@
 	int npartitions;
 	const char *client_name;
 	unsigned int domain_flags;
+	unsigned int is_secure;
 };
 
 #if defined(CONFIG_MSM_IOMMU)
 
+extern void msm_iommu_set_client_name(struct iommu_domain *domain,
+				      char const *name);
 extern struct iommu_domain *msm_get_iommu_domain(int domain_num);
+extern int msm_find_domain_no(const struct iommu_domain *domain);
 
 extern int msm_allocate_iova_address(unsigned int iommu_domain,
 					unsigned int partition_no,
@@ -90,6 +97,7 @@
 
 extern int msm_iommu_map_extra(struct iommu_domain *domain,
 						unsigned long start_iova,
+						phys_addr_t phys_addr,
 						unsigned long size,
 						unsigned long page_size,
 						int cached);
@@ -99,7 +107,7 @@
 						unsigned long size,
 						unsigned long page_size);
 
-extern int msm_iommu_map_contig_buffer(unsigned long phys,
+extern int msm_iommu_map_contig_buffer(phys_addr_t phys,
 				unsigned int domain_no,
 				unsigned int partition_no,
 				unsigned long size,
@@ -116,10 +124,19 @@
 extern int msm_register_domain(struct msm_iova_layout *layout);
 
 #else
+static inline void msm_iommu_set_client_name(struct iommu_domain *domain,
+					     char const *name)
+{
+}
+
 static inline struct iommu_domain
 	*msm_get_iommu_domain(int subsys_id) { return NULL; }
 
 
+static inline int msm_find_domain_no(const struct iommu_domain *domain)
+{
+	return -EINVAL;
+}
 
 static inline int msm_allocate_iova_address(unsigned int iommu_domain,
 					unsigned int partition_no,
@@ -139,6 +156,7 @@
 
 static inline int msm_iommu_map_extra(struct iommu_domain *domain,
 						unsigned long start_iova,
+						phys_addr_t phys_addr,
 						unsigned long size,
 						unsigned long page_size,
 						int cached)
@@ -153,7 +171,7 @@
 {
 }
 
-static inline int msm_iommu_map_contig_buffer(unsigned long phys,
+static inline int msm_iommu_map_contig_buffer(phys_addr_t phys,
 				unsigned int domain_no,
 				unsigned int partition_no,
 				unsigned long size,
diff --git a/arch/arm/mach-msm/include/mach/iommu_hw-v0.h b/arch/arm/mach-msm/include/mach/iommu_hw-v0.h
new file mode 100644
index 0000000..68dec79
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/iommu_hw-v0.h
@@ -0,0 +1,1868 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_IOMMU_HW_8XXX_H
+#define __ARCH_ARM_MACH_MSM_IOMMU_HW_8XXX_H
+
+#define CTX_SHIFT 12
+
+#define GET_GLOBAL_REG(reg, base) (readl_relaxed((base) + (reg)))
+#define GET_CTX_REG(reg, base, ctx) \
+			(readl_relaxed((base) + (reg) + ((ctx) << CTX_SHIFT)))
+
+#define SET_GLOBAL_REG(reg, base, val)	writel_relaxed((val), ((base) + (reg)))
+
+#define SET_CTX_REG(reg, base, ctx, val) \
+		writel_relaxed((val), ((base) + (reg) + ((ctx) << CTX_SHIFT)))
+
+/* Wrappers for numbered registers */
+#define SET_GLOBAL_REG_N(b, n, r, v) SET_GLOBAL_REG(b, ((r) + (n << 2)), (v))
+#define GET_GLOBAL_REG_N(b, n, r)    GET_GLOBAL_REG(b, ((r) + (n << 2)))
+
+/* Field wrappers */
+#define GET_GLOBAL_FIELD(b, r, F)    GET_FIELD(((b) + (r)), F##_MASK, F##_SHIFT)
+#define GET_CONTEXT_FIELD(b, c, r, F)	\
+	GET_FIELD(((b) + (r) + ((c) << CTX_SHIFT)), F##_MASK, F##_SHIFT)
+
+#define SET_GLOBAL_FIELD(b, r, F, v) \
+	SET_FIELD(((b) + (r)), F##_MASK, F##_SHIFT, (v))
+#define SET_CONTEXT_FIELD(b, c, r, F, v)	\
+	SET_FIELD(((b) + (r) + ((c) << CTX_SHIFT)), F##_MASK, F##_SHIFT, (v))
+
+#define GET_FIELD(addr, mask, shift) ((readl_relaxed(addr) >> (shift)) & (mask))
+
+#define SET_FIELD(addr, mask, shift, v) \
+do { \
+	int t = readl_relaxed(addr); \
+	writel_relaxed((t & ~((mask) << (shift))) + (((v) & \
+		       (mask)) << (shift)), addr);\
+} while (0)
+
+
+#define NUM_FL_PTE	4096
+#define NUM_SL_PTE	256
+#define NUM_TEX_CLASS	8
+
+/* First-level page table bits */
+#define FL_BASE_MASK		0xFFFFFC00
+#define FL_TYPE_TABLE		(1 << 0)
+#define FL_TYPE_SECT		(2 << 0)
+#define FL_SUPERSECTION		(1 << 18)
+#define FL_AP0			(1 << 10)
+#define FL_AP1			(1 << 11)
+#define FL_AP2			(1 << 15)
+#define FL_SHARED		(1 << 16)
+#define FL_BUFFERABLE		(1 << 2)
+#define FL_CACHEABLE		(1 << 3)
+#define FL_TEX0			(1 << 12)
+#define FL_OFFSET(va)		(((va) & 0xFFF00000) >> 20)
+#define FL_NG			(1 << 17)
+
+/* Second-level page table bits */
+#define SL_BASE_MASK_LARGE	0xFFFF0000
+#define SL_BASE_MASK_SMALL	0xFFFFF000
+#define SL_TYPE_LARGE		(1 << 0)
+#define SL_TYPE_SMALL		(2 << 0)
+#define SL_AP0			(1 << 4)
+#define SL_AP1			(2 << 4)
+#define SL_AP2			(1 << 9)
+#define SL_SHARED		(1 << 10)
+#define SL_BUFFERABLE		(1 << 2)
+#define SL_CACHEABLE		(1 << 3)
+#define SL_TEX0			(1 << 6)
+#define SL_OFFSET(va)		(((va) & 0xFF000) >> 12)
+#define SL_NG			(1 << 11)
+
+/* Memory type and cache policy attributes */
+#define MT_SO			0
+#define MT_DEV			1
+#define MT_NORMAL		2
+#define CP_NONCACHED		0
+#define CP_WB_WA		1
+#define CP_WT			2
+#define CP_WB_NWA		3
+
+/* Global register setters / getters */
+#define SET_M2VCBR_N(b, N, v)	 SET_GLOBAL_REG_N(M2VCBR_N, N, (b), (v))
+#define SET_CBACR_N(b, N, v)	 SET_GLOBAL_REG_N(CBACR_N, N, (b), (v))
+#define SET_TLBRSW(b, v)	 SET_GLOBAL_REG(TLBRSW, (b), (v))
+#define SET_TLBTR0(b, v)	 SET_GLOBAL_REG(TLBTR0, (b), (v))
+#define SET_TLBTR1(b, v)	 SET_GLOBAL_REG(TLBTR1, (b), (v))
+#define SET_TLBTR2(b, v)	 SET_GLOBAL_REG(TLBTR2, (b), (v))
+#define SET_TESTBUSCR(b, v)	 SET_GLOBAL_REG(TESTBUSCR, (b), (v))
+#define SET_GLOBAL_TLBIALL(b, v) SET_GLOBAL_REG(GLOBAL_TLBIALL, (b), (v))
+#define SET_TLBIVMID(b, v)	 SET_GLOBAL_REG(TLBIVMID, (b), (v))
+#define SET_CR(b, v)		 SET_GLOBAL_REG(CR, (b), (v))
+#define SET_EAR(b, v)		 SET_GLOBAL_REG(EAR, (b), (v))
+#define SET_ESR(b, v)		 SET_GLOBAL_REG(ESR, (b), (v))
+#define SET_ESRRESTORE(b, v)	 SET_GLOBAL_REG(ESRRESTORE, (b), (v))
+#define SET_ESYNR0(b, v)	 SET_GLOBAL_REG(ESYNR0, (b), (v))
+#define SET_ESYNR1(b, v)	 SET_GLOBAL_REG(ESYNR1, (b), (v))
+#define SET_RPU_ACR(b, v)	 SET_GLOBAL_REG(RPU_ACR, (b), (v))
+
+#define GET_M2VCBR_N(b, N)	 GET_GLOBAL_REG_N(M2VCBR_N, N, (b))
+#define GET_CBACR_N(b, N)	 GET_GLOBAL_REG_N(CBACR_N, N, (b))
+#define GET_TLBTR0(b)		 GET_GLOBAL_REG(TLBTR0, (b))
+#define GET_TLBTR1(b)		 GET_GLOBAL_REG(TLBTR1, (b))
+#define GET_TLBTR2(b)		 GET_GLOBAL_REG(TLBTR2, (b))
+#define GET_TESTBUSCR(b)	 GET_GLOBAL_REG(TESTBUSCR, (b))
+#define GET_GLOBAL_TLBIALL(b)	 GET_GLOBAL_REG(GLOBAL_TLBIALL, (b))
+#define GET_TLBIVMID(b)		 GET_GLOBAL_REG(TLBIVMID, (b))
+#define GET_CR(b)		 GET_GLOBAL_REG(CR, (b))
+#define GET_EAR(b)		 GET_GLOBAL_REG(EAR, (b))
+#define GET_ESR(b)		 GET_GLOBAL_REG(ESR, (b))
+#define GET_ESRRESTORE(b)	 GET_GLOBAL_REG(ESRRESTORE, (b))
+#define GET_ESYNR0(b)		 GET_GLOBAL_REG(ESYNR0, (b))
+#define GET_ESYNR1(b)		 GET_GLOBAL_REG(ESYNR1, (b))
+#define GET_REV(b)		 GET_GLOBAL_REG(REV, (b))
+#define GET_IDR(b)		 GET_GLOBAL_REG(IDR, (b))
+#define GET_RPU_ACR(b)		 GET_GLOBAL_REG(RPU_ACR, (b))
+
+
+/* Context register setters/getters */
+#define SET_SCTLR(b, c, v)	 SET_CTX_REG(SCTLR, (b), (c), (v))
+#define SET_ACTLR(b, c, v)	 SET_CTX_REG(ACTLR, (b), (c), (v))
+#define SET_CONTEXTIDR(b, c, v)	 SET_CTX_REG(CONTEXTIDR, (b), (c), (v))
+#define SET_TTBR0(b, c, v)	 SET_CTX_REG(TTBR0, (b), (c), (v))
+#define SET_TTBR1(b, c, v)	 SET_CTX_REG(TTBR1, (b), (c), (v))
+#define SET_TTBCR(b, c, v)	 SET_CTX_REG(TTBCR, (b), (c), (v))
+#define SET_PAR(b, c, v)	 SET_CTX_REG(PAR, (b), (c), (v))
+#define SET_FSR(b, c, v)	 SET_CTX_REG(FSR, (b), (c), (v))
+#define SET_FSRRESTORE(b, c, v)	 SET_CTX_REG(FSRRESTORE, (b), (c), (v))
+#define SET_FAR(b, c, v)	 SET_CTX_REG(FAR, (b), (c), (v))
+#define SET_FSYNR0(b, c, v)	 SET_CTX_REG(FSYNR0, (b), (c), (v))
+#define SET_FSYNR1(b, c, v)	 SET_CTX_REG(FSYNR1, (b), (c), (v))
+#define SET_PRRR(b, c, v)	 SET_CTX_REG(PRRR, (b), (c), (v))
+#define SET_NMRR(b, c, v)	 SET_CTX_REG(NMRR, (b), (c), (v))
+#define SET_TLBLKCR(b, c, v)	 SET_CTX_REG(TLBLCKR, (b), (c), (v))
+#define SET_V2PSR(b, c, v)	 SET_CTX_REG(V2PSR, (b), (c), (v))
+#define SET_TLBFLPTER(b, c, v)	 SET_CTX_REG(TLBFLPTER, (b), (c), (v))
+#define SET_TLBSLPTER(b, c, v)	 SET_CTX_REG(TLBSLPTER, (b), (c), (v))
+#define SET_BFBCR(b, c, v)	 SET_CTX_REG(BFBCR, (b), (c), (v))
+#define SET_CTX_TLBIALL(b, c, v) SET_CTX_REG(CTX_TLBIALL, (b), (c), (v))
+#define SET_TLBIASID(b, c, v)	 SET_CTX_REG(TLBIASID, (b), (c), (v))
+#define SET_TLBIVA(b, c, v)	 SET_CTX_REG(TLBIVA, (b), (c), (v))
+#define SET_TLBIVAA(b, c, v)	 SET_CTX_REG(TLBIVAA, (b), (c), (v))
+#define SET_V2PPR(b, c, v)	 SET_CTX_REG(V2PPR, (b), (c), (v))
+#define SET_V2PPW(b, c, v)	 SET_CTX_REG(V2PPW, (b), (c), (v))
+#define SET_V2PUR(b, c, v)	 SET_CTX_REG(V2PUR, (b), (c), (v))
+#define SET_V2PUW(b, c, v)	 SET_CTX_REG(V2PUW, (b), (c), (v))
+#define SET_RESUME(b, c, v)	 SET_CTX_REG(RESUME, (b), (c), (v))
+
+#define GET_SCTLR(b, c)		 GET_CTX_REG(SCTLR, (b), (c))
+#define GET_ACTLR(b, c)		 GET_CTX_REG(ACTLR, (b), (c))
+#define GET_CONTEXTIDR(b, c)	 GET_CTX_REG(CONTEXTIDR, (b), (c))
+#define GET_TTBR0(b, c)		 GET_CTX_REG(TTBR0, (b), (c))
+#define GET_TTBR1(b, c)		 GET_CTX_REG(TTBR1, (b), (c))
+#define GET_TTBCR(b, c)		 GET_CTX_REG(TTBCR, (b), (c))
+#define GET_PAR(b, c)		 GET_CTX_REG(PAR, (b), (c))
+#define GET_FSR(b, c)		 GET_CTX_REG(FSR, (b), (c))
+#define GET_FSRRESTORE(b, c)	 GET_CTX_REG(FSRRESTORE, (b), (c))
+#define GET_FAR(b, c)		 GET_CTX_REG(FAR, (b), (c))
+#define GET_FSYNR0(b, c)	 GET_CTX_REG(FSYNR0, (b), (c))
+#define GET_FSYNR1(b, c)	 GET_CTX_REG(FSYNR1, (b), (c))
+#define GET_PRRR(b, c)		 GET_CTX_REG(PRRR, (b), (c))
+#define GET_NMRR(b, c)		 GET_CTX_REG(NMRR, (b), (c))
+#define GET_TLBLCKR(b, c)	 GET_CTX_REG(TLBLCKR, (b), (c))
+#define GET_V2PSR(b, c)		 GET_CTX_REG(V2PSR, (b), (c))
+#define GET_TLBFLPTER(b, c)	 GET_CTX_REG(TLBFLPTER, (b), (c))
+#define GET_TLBSLPTER(b, c)	 GET_CTX_REG(TLBSLPTER, (b), (c))
+#define GET_BFBCR(b, c)		 GET_CTX_REG(BFBCR, (b), (c))
+#define GET_CTX_TLBIALL(b, c)	 GET_CTX_REG(CTX_TLBIALL, (b), (c))
+#define GET_TLBIASID(b, c)	 GET_CTX_REG(TLBIASID, (b), (c))
+#define GET_TLBIVA(b, c)	 GET_CTX_REG(TLBIVA, (b), (c))
+#define GET_TLBIVAA(b, c)	 GET_CTX_REG(TLBIVAA, (b), (c))
+#define GET_V2PPR(b, c)		 GET_CTX_REG(V2PPR, (b), (c))
+#define GET_V2PPW(b, c)		 GET_CTX_REG(V2PPW, (b), (c))
+#define GET_V2PUR(b, c)		 GET_CTX_REG(V2PUR, (b), (c))
+#define GET_V2PUW(b, c)		 GET_CTX_REG(V2PUW, (b), (c))
+#define GET_RESUME(b, c)	 GET_CTX_REG(RESUME, (b), (c))
+
+
+/* Global field setters / getters */
+/* Global Field Setters: */
+/* CBACR_N */
+#define SET_RWVMID(b, n, v)   SET_GLOBAL_FIELD(b, (n<<2)|(CBACR_N), RWVMID, v)
+#define SET_RWE(b, n, v)      SET_GLOBAL_FIELD(b, (n<<2)|(CBACR_N), RWE, v)
+#define SET_RWGE(b, n, v)     SET_GLOBAL_FIELD(b, (n<<2)|(CBACR_N), RWGE, v)
+#define SET_CBVMID(b, n, v)   SET_GLOBAL_FIELD(b, (n<<2)|(CBACR_N), CBVMID, v)
+#define SET_IRPTNDX(b, n, v)  SET_GLOBAL_FIELD(b, (n<<2)|(CBACR_N), IRPTNDX, v)
+
+
+/* M2VCBR_N */
+#define SET_VMID(b, n, v)     SET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), VMID, v)
+#define SET_CBNDX(b, n, v)    SET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), CBNDX, v)
+#define SET_BYPASSD(b, n, v)  SET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BYPASSD, v)
+#define SET_BPRCOSH(b, n, v)  SET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPRCOSH, v)
+#define SET_BPRCISH(b, n, v)  SET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPRCISH, v)
+#define SET_BPRCNSH(b, n, v)  SET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPRCNSH, v)
+#define SET_BPSHCFG(b, n, v)  SET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPSHCFG, v)
+#define SET_NSCFG(b, n, v)    SET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), NSCFG, v)
+#define SET_BPMTCFG(b, n, v)  SET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPMTCFG, v)
+#define SET_BPMEMTYPE(b, n, v) \
+	SET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPMEMTYPE, v)
+
+
+/* CR */
+#define SET_RPUE(b, v)		 SET_GLOBAL_FIELD(b, CR, RPUE, v)
+#define SET_RPUERE(b, v)	 SET_GLOBAL_FIELD(b, CR, RPUERE, v)
+#define SET_RPUEIE(b, v)	 SET_GLOBAL_FIELD(b, CR, RPUEIE, v)
+#define SET_DCDEE(b, v)		 SET_GLOBAL_FIELD(b, CR, DCDEE, v)
+#define SET_CLIENTPD(b, v)       SET_GLOBAL_FIELD(b, CR, CLIENTPD, v)
+#define SET_STALLD(b, v)	 SET_GLOBAL_FIELD(b, CR, STALLD, v)
+#define SET_TLBLKCRWE(b, v)      SET_GLOBAL_FIELD(b, CR, TLBLKCRWE, v)
+#define SET_CR_TLBIALLCFG(b, v)  SET_GLOBAL_FIELD(b, CR, CR_TLBIALLCFG, v)
+#define SET_TLBIVMIDCFG(b, v)    SET_GLOBAL_FIELD(b, CR, TLBIVMIDCFG, v)
+#define SET_CR_HUME(b, v)        SET_GLOBAL_FIELD(b, CR, CR_HUME, v)
+
+
+/* ESR */
+#define SET_CFG(b, v)		 SET_GLOBAL_FIELD(b, ESR, CFG, v)
+#define SET_BYPASS(b, v)	 SET_GLOBAL_FIELD(b, ESR, BYPASS, v)
+#define SET_ESR_MULTI(b, v)      SET_GLOBAL_FIELD(b, ESR, ESR_MULTI, v)
+
+
+/* ESYNR0 */
+#define SET_ESYNR0_AMID(b, v)    SET_GLOBAL_FIELD(b, ESYNR0, ESYNR0_AMID, v)
+#define SET_ESYNR0_APID(b, v)    SET_GLOBAL_FIELD(b, ESYNR0, ESYNR0_APID, v)
+#define SET_ESYNR0_ABID(b, v)    SET_GLOBAL_FIELD(b, ESYNR0, ESYNR0_ABID, v)
+#define SET_ESYNR0_AVMID(b, v)   SET_GLOBAL_FIELD(b, ESYNR0, ESYNR0_AVMID, v)
+#define SET_ESYNR0_ATID(b, v)    SET_GLOBAL_FIELD(b, ESYNR0, ESYNR0_ATID, v)
+
+
+/* ESYNR1 */
+#define SET_ESYNR1_AMEMTYPE(b, v) \
+			SET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_AMEMTYPE, v)
+#define SET_ESYNR1_ASHARED(b, v)  SET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_ASHARED, v)
+#define SET_ESYNR1_AINNERSHARED(b, v) \
+			SET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_AINNERSHARED, v)
+#define SET_ESYNR1_APRIV(b, v)   SET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_APRIV, v)
+#define SET_ESYNR1_APROTNS(b, v) SET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_APROTNS, v)
+#define SET_ESYNR1_AINST(b, v)   SET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_AINST, v)
+#define SET_ESYNR1_AWRITE(b, v)  SET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_AWRITE, v)
+#define SET_ESYNR1_ABURST(b, v)  SET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_ABURST, v)
+#define SET_ESYNR1_ALEN(b, v)    SET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_ALEN, v)
+#define SET_ESYNR1_ASIZE(b, v)   SET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_ASIZE, v)
+#define SET_ESYNR1_ALOCK(b, v)   SET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_ALOCK, v)
+#define SET_ESYNR1_AOOO(b, v)    SET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_AOOO, v)
+#define SET_ESYNR1_AFULL(b, v)   SET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_AFULL, v)
+#define SET_ESYNR1_AC(b, v)      SET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_AC, v)
+#define SET_ESYNR1_DCD(b, v)     SET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_DCD, v)
+
+
+/* TESTBUSCR */
+#define SET_TBE(b, v)		 SET_GLOBAL_FIELD(b, TESTBUSCR, TBE, v)
+#define SET_SPDMBE(b, v)	 SET_GLOBAL_FIELD(b, TESTBUSCR, SPDMBE, v)
+#define SET_WGSEL(b, v)		 SET_GLOBAL_FIELD(b, TESTBUSCR, WGSEL, v)
+#define SET_TBLSEL(b, v)	 SET_GLOBAL_FIELD(b, TESTBUSCR, TBLSEL, v)
+#define SET_TBHSEL(b, v)	 SET_GLOBAL_FIELD(b, TESTBUSCR, TBHSEL, v)
+#define SET_SPDM0SEL(b, v)       SET_GLOBAL_FIELD(b, TESTBUSCR, SPDM0SEL, v)
+#define SET_SPDM1SEL(b, v)       SET_GLOBAL_FIELD(b, TESTBUSCR, SPDM1SEL, v)
+#define SET_SPDM2SEL(b, v)       SET_GLOBAL_FIELD(b, TESTBUSCR, SPDM2SEL, v)
+#define SET_SPDM3SEL(b, v)       SET_GLOBAL_FIELD(b, TESTBUSCR, SPDM3SEL, v)
+
+
+/* TLBIVMID */
+#define SET_TLBIVMID_VMID(b, v)  SET_GLOBAL_FIELD(b, TLBIVMID, TLBIVMID_VMID, v)
+
+
+/* TLBRSW */
+#define SET_TLBRSW_INDEX(b, v)   SET_GLOBAL_FIELD(b, TLBRSW, TLBRSW_INDEX, v)
+#define SET_TLBBFBS(b, v)	 SET_GLOBAL_FIELD(b, TLBRSW, TLBBFBS, v)
+
+
+/* TLBTR0 */
+#define SET_PR(b, v)		 SET_GLOBAL_FIELD(b, TLBTR0, PR, v)
+#define SET_PW(b, v)		 SET_GLOBAL_FIELD(b, TLBTR0, PW, v)
+#define SET_UR(b, v)		 SET_GLOBAL_FIELD(b, TLBTR0, UR, v)
+#define SET_UW(b, v)		 SET_GLOBAL_FIELD(b, TLBTR0, UW, v)
+#define SET_XN(b, v)		 SET_GLOBAL_FIELD(b, TLBTR0, XN, v)
+#define SET_NSDESC(b, v)	 SET_GLOBAL_FIELD(b, TLBTR0, NSDESC, v)
+#define SET_ISH(b, v)		 SET_GLOBAL_FIELD(b, TLBTR0, ISH, v)
+#define SET_SH(b, v)		 SET_GLOBAL_FIELD(b, TLBTR0, SH, v)
+#define SET_MT(b, v)		 SET_GLOBAL_FIELD(b, TLBTR0, MT, v)
+#define SET_DPSIZR(b, v)	 SET_GLOBAL_FIELD(b, TLBTR0, DPSIZR, v)
+#define SET_DPSIZC(b, v)	 SET_GLOBAL_FIELD(b, TLBTR0, DPSIZC, v)
+
+
+/* TLBTR1 */
+#define SET_TLBTR1_VMID(b, v)    SET_GLOBAL_FIELD(b, TLBTR1, TLBTR1_VMID, v)
+#define SET_TLBTR1_PA(b, v)      SET_GLOBAL_FIELD(b, TLBTR1, TLBTR1_PA, v)
+
+
+/* TLBTR2 */
+#define SET_TLBTR2_ASID(b, v)    SET_GLOBAL_FIELD(b, TLBTR2, TLBTR2_ASID, v)
+#define SET_TLBTR2_V(b, v)       SET_GLOBAL_FIELD(b, TLBTR2, TLBTR2_V, v)
+#define SET_TLBTR2_NSTID(b, v)   SET_GLOBAL_FIELD(b, TLBTR2, TLBTR2_NSTID, v)
+#define SET_TLBTR2_NV(b, v)      SET_GLOBAL_FIELD(b, TLBTR2, TLBTR2_NV, v)
+#define SET_TLBTR2_VA(b, v)      SET_GLOBAL_FIELD(b, TLBTR2, TLBTR2_VA, v)
+
+
+/* Global Field Getters */
+/* CBACR_N */
+#define GET_RWVMID(b, n)	 GET_GLOBAL_FIELD(b, (n<<2)|(CBACR_N), RWVMID)
+#define GET_RWE(b, n)		 GET_GLOBAL_FIELD(b, (n<<2)|(CBACR_N), RWE)
+#define GET_RWGE(b, n)		 GET_GLOBAL_FIELD(b, (n<<2)|(CBACR_N), RWGE)
+#define GET_CBVMID(b, n)	 GET_GLOBAL_FIELD(b, (n<<2)|(CBACR_N), CBVMID)
+#define GET_IRPTNDX(b, n)	 GET_GLOBAL_FIELD(b, (n<<2)|(CBACR_N), IRPTNDX)
+
+
+/* M2VCBR_N */
+#define GET_VMID(b, n)       GET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), VMID)
+#define GET_CBNDX(b, n)      GET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), CBNDX)
+#define GET_BYPASSD(b, n)    GET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BYPASSD)
+#define GET_BPRCOSH(b, n)    GET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPRCOSH)
+#define GET_BPRCISH(b, n)    GET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPRCISH)
+#define GET_BPRCNSH(b, n)    GET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPRCNSH)
+#define GET_BPSHCFG(b, n)    GET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPSHCFG)
+#define GET_NSCFG(b, n)      GET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), NSCFG)
+#define GET_BPMTCFG(b, n)    GET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPMTCFG)
+#define GET_BPMEMTYPE(b, n)  GET_GLOBAL_FIELD(b, (n<<2)|(M2VCBR_N), BPMEMTYPE)
+
+
+/* CR */
+#define GET_RPUE(b)		 GET_GLOBAL_FIELD(b, CR, RPUE)
+#define GET_RPUERE(b)		 GET_GLOBAL_FIELD(b, CR, RPUERE)
+#define GET_RPUEIE(b)		 GET_GLOBAL_FIELD(b, CR, RPUEIE)
+#define GET_DCDEE(b)		 GET_GLOBAL_FIELD(b, CR, DCDEE)
+#define GET_CLIENTPD(b)		 GET_GLOBAL_FIELD(b, CR, CLIENTPD)
+#define GET_STALLD(b)		 GET_GLOBAL_FIELD(b, CR, STALLD)
+#define GET_TLBLKCRWE(b)	 GET_GLOBAL_FIELD(b, CR, TLBLKCRWE)
+#define GET_CR_TLBIALLCFG(b)	 GET_GLOBAL_FIELD(b, CR, CR_TLBIALLCFG)
+#define GET_TLBIVMIDCFG(b)	 GET_GLOBAL_FIELD(b, CR, TLBIVMIDCFG)
+#define GET_CR_HUME(b)		 GET_GLOBAL_FIELD(b, CR, CR_HUME)
+
+
+/* ESR */
+#define GET_CFG(b)		 GET_GLOBAL_FIELD(b, ESR, CFG)
+#define GET_BYPASS(b)		 GET_GLOBAL_FIELD(b, ESR, BYPASS)
+#define GET_ESR_MULTI(b)	 GET_GLOBAL_FIELD(b, ESR, ESR_MULTI)
+
+
+/* ESYNR0 */
+#define GET_ESYNR0_AMID(b)	 GET_GLOBAL_FIELD(b, ESYNR0, ESYNR0_AMID)
+#define GET_ESYNR0_APID(b)	 GET_GLOBAL_FIELD(b, ESYNR0, ESYNR0_APID)
+#define GET_ESYNR0_ABID(b)	 GET_GLOBAL_FIELD(b, ESYNR0, ESYNR0_ABID)
+#define GET_ESYNR0_AVMID(b)	 GET_GLOBAL_FIELD(b, ESYNR0, ESYNR0_AVMID)
+#define GET_ESYNR0_ATID(b)	 GET_GLOBAL_FIELD(b, ESYNR0, ESYNR0_ATID)
+
+
+/* ESYNR1 */
+#define GET_ESYNR1_AMEMTYPE(b)   GET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_AMEMTYPE)
+#define GET_ESYNR1_ASHARED(b)    GET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_ASHARED)
+#define GET_ESYNR1_AINNERSHARED(b) \
+			GET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_AINNERSHARED)
+#define GET_ESYNR1_APRIV(b)      GET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_APRIV)
+#define GET_ESYNR1_APROTNS(b)	 GET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_APROTNS)
+#define GET_ESYNR1_AINST(b)	 GET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_AINST)
+#define GET_ESYNR1_AWRITE(b)	 GET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_AWRITE)
+#define GET_ESYNR1_ABURST(b)	 GET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_ABURST)
+#define GET_ESYNR1_ALEN(b)	 GET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_ALEN)
+#define GET_ESYNR1_ASIZE(b)	 GET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_ASIZE)
+#define GET_ESYNR1_ALOCK(b)	 GET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_ALOCK)
+#define GET_ESYNR1_AOOO(b)	 GET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_AOOO)
+#define GET_ESYNR1_AFULL(b)	 GET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_AFULL)
+#define GET_ESYNR1_AC(b)	 GET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_AC)
+#define GET_ESYNR1_DCD(b)	 GET_GLOBAL_FIELD(b, ESYNR1, ESYNR1_DCD)
+
+
+/* IDR */
+#define GET_NM2VCBMT(b)		 GET_GLOBAL_FIELD(b, IDR, NM2VCBMT)
+#define GET_HTW(b)		 GET_GLOBAL_FIELD(b, IDR, HTW)
+#define GET_HUM(b)		 GET_GLOBAL_FIELD(b, IDR, HUM)
+#define GET_TLBSIZE(b)		 GET_GLOBAL_FIELD(b, IDR, TLBSIZE)
+#define GET_NCB(b)		 GET_GLOBAL_FIELD(b, IDR, NCB)
+#define GET_NIRPT(b)		 GET_GLOBAL_FIELD(b, IDR, NIRPT)
+
+
+/* REV */
+#define GET_MAJOR(b)		 GET_GLOBAL_FIELD(b, REV, MAJOR)
+#define GET_MINOR(b)		 GET_GLOBAL_FIELD(b, REV, MINOR)
+
+
+/* TESTBUSCR */
+#define GET_TBE(b)		 GET_GLOBAL_FIELD(b, TESTBUSCR, TBE)
+#define GET_SPDMBE(b)		 GET_GLOBAL_FIELD(b, TESTBUSCR, SPDMBE)
+#define GET_WGSEL(b)		 GET_GLOBAL_FIELD(b, TESTBUSCR, WGSEL)
+#define GET_TBLSEL(b)		 GET_GLOBAL_FIELD(b, TESTBUSCR, TBLSEL)
+#define GET_TBHSEL(b)		 GET_GLOBAL_FIELD(b, TESTBUSCR, TBHSEL)
+#define GET_SPDM0SEL(b)		 GET_GLOBAL_FIELD(b, TESTBUSCR, SPDM0SEL)
+#define GET_SPDM1SEL(b)		 GET_GLOBAL_FIELD(b, TESTBUSCR, SPDM1SEL)
+#define GET_SPDM2SEL(b)		 GET_GLOBAL_FIELD(b, TESTBUSCR, SPDM2SEL)
+#define GET_SPDM3SEL(b)		 GET_GLOBAL_FIELD(b, TESTBUSCR, SPDM3SEL)
+
+
+/* TLBIVMID */
+#define GET_TLBIVMID_VMID(b)	 GET_GLOBAL_FIELD(b, TLBIVMID, TLBIVMID_VMID)
+
+
+/* TLBTR0 */
+#define GET_PR(b)		 GET_GLOBAL_FIELD(b, TLBTR0, PR)
+#define GET_PW(b)		 GET_GLOBAL_FIELD(b, TLBTR0, PW)
+#define GET_UR(b)		 GET_GLOBAL_FIELD(b, TLBTR0, UR)
+#define GET_UW(b)		 GET_GLOBAL_FIELD(b, TLBTR0, UW)
+#define GET_XN(b)		 GET_GLOBAL_FIELD(b, TLBTR0, XN)
+#define GET_NSDESC(b)		 GET_GLOBAL_FIELD(b, TLBTR0, NSDESC)
+#define GET_ISH(b)		 GET_GLOBAL_FIELD(b, TLBTR0, ISH)
+#define GET_SH(b)		 GET_GLOBAL_FIELD(b, TLBTR0, SH)
+#define GET_MT(b)		 GET_GLOBAL_FIELD(b, TLBTR0, MT)
+#define GET_DPSIZR(b)		 GET_GLOBAL_FIELD(b, TLBTR0, DPSIZR)
+#define GET_DPSIZC(b)		 GET_GLOBAL_FIELD(b, TLBTR0, DPSIZC)
+
+
+/* TLBTR1 */
+#define GET_TLBTR1_VMID(b)	 GET_GLOBAL_FIELD(b, TLBTR1, TLBTR1_VMID)
+#define GET_TLBTR1_PA(b)	 GET_GLOBAL_FIELD(b, TLBTR1, TLBTR1_PA)
+
+
+/* TLBTR2 */
+#define GET_TLBTR2_ASID(b)	 GET_GLOBAL_FIELD(b, TLBTR2, TLBTR2_ASID)
+#define GET_TLBTR2_V(b)		 GET_GLOBAL_FIELD(b, TLBTR2, TLBTR2_V)
+#define GET_TLBTR2_NSTID(b)	 GET_GLOBAL_FIELD(b, TLBTR2, TLBTR2_NSTID)
+#define GET_TLBTR2_NV(b)	 GET_GLOBAL_FIELD(b, TLBTR2, TLBTR2_NV)
+#define GET_TLBTR2_VA(b)	 GET_GLOBAL_FIELD(b, TLBTR2, TLBTR2_VA)
+
+
+/* Context Register setters / getters */
+/* Context Register setters */
+/* ACTLR */
+#define SET_CFERE(b, c, v)	 SET_CONTEXT_FIELD(b, c, ACTLR, CFERE, v)
+#define SET_CFEIE(b, c, v)	 SET_CONTEXT_FIELD(b, c, ACTLR, CFEIE, v)
+#define SET_PTSHCFG(b, c, v)	 SET_CONTEXT_FIELD(b, c, ACTLR, PTSHCFG, v)
+#define SET_RCOSH(b, c, v)	 SET_CONTEXT_FIELD(b, c, ACTLR, RCOSH, v)
+#define SET_RCISH(b, c, v)	 SET_CONTEXT_FIELD(b, c, ACTLR, RCISH, v)
+#define SET_RCNSH(b, c, v)	 SET_CONTEXT_FIELD(b, c, ACTLR, RCNSH, v)
+#define SET_PRIVCFG(b, c, v)	 SET_CONTEXT_FIELD(b, c, ACTLR, PRIVCFG, v)
+#define SET_DNA(b, c, v)	 SET_CONTEXT_FIELD(b, c, ACTLR, DNA, v)
+#define SET_DNLV2PA(b, c, v)	 SET_CONTEXT_FIELD(b, c, ACTLR, DNLV2PA, v)
+#define SET_TLBMCFG(b, c, v)	 SET_CONTEXT_FIELD(b, c, ACTLR, TLBMCFG, v)
+#define SET_CFCFG(b, c, v)	 SET_CONTEXT_FIELD(b, c, ACTLR, CFCFG, v)
+#define SET_TIPCF(b, c, v)	 SET_CONTEXT_FIELD(b, c, ACTLR, TIPCF, v)
+#define SET_V2PCFG(b, c, v)	 SET_CONTEXT_FIELD(b, c, ACTLR, V2PCFG, v)
+#define SET_HUME(b, c, v)	 SET_CONTEXT_FIELD(b, c, ACTLR, HUME, v)
+#define SET_PTMTCFG(b, c, v)	 SET_CONTEXT_FIELD(b, c, ACTLR, PTMTCFG, v)
+#define SET_PTMEMTYPE(b, c, v)	 SET_CONTEXT_FIELD(b, c, ACTLR, PTMEMTYPE, v)
+
+
+/* BFBCR */
+#define SET_BFBDFE(b, c, v)	 SET_CONTEXT_FIELD(b, c, BFBCR, BFBDFE, v)
+#define SET_BFBSFE(b, c, v)	 SET_CONTEXT_FIELD(b, c, BFBCR, BFBSFE, v)
+#define SET_SFVS(b, c, v)	 SET_CONTEXT_FIELD(b, c, BFBCR, SFVS, v)
+#define SET_FLVIC(b, c, v)	 SET_CONTEXT_FIELD(b, c, BFBCR, FLVIC, v)
+#define SET_SLVIC(b, c, v)	 SET_CONTEXT_FIELD(b, c, BFBCR, SLVIC, v)
+
+
+/* CONTEXTIDR */
+#define SET_CONTEXTIDR_ASID(b, c, v)   \
+		SET_CONTEXT_FIELD(b, c, CONTEXTIDR, CONTEXTIDR_ASID, v)
+#define SET_CONTEXTIDR_PROCID(b, c, v) \
+		SET_CONTEXT_FIELD(b, c, CONTEXTIDR, PROCID, v)
+
+
+/* FSR */
+#define SET_TF(b, c, v)		 SET_CONTEXT_FIELD(b, c, FSR, TF, v)
+#define SET_AFF(b, c, v)	 SET_CONTEXT_FIELD(b, c, FSR, AFF, v)
+#define SET_APF(b, c, v)	 SET_CONTEXT_FIELD(b, c, FSR, APF, v)
+#define SET_TLBMF(b, c, v)	 SET_CONTEXT_FIELD(b, c, FSR, TLBMF, v)
+#define SET_HTWDEEF(b, c, v)	 SET_CONTEXT_FIELD(b, c, FSR, HTWDEEF, v)
+#define SET_HTWSEEF(b, c, v)	 SET_CONTEXT_FIELD(b, c, FSR, HTWSEEF, v)
+#define SET_MHF(b, c, v)	 SET_CONTEXT_FIELD(b, c, FSR, MHF, v)
+#define SET_SL(b, c, v)		 SET_CONTEXT_FIELD(b, c, FSR, SL, v)
+#define SET_SS(b, c, v)		 SET_CONTEXT_FIELD(b, c, FSR, SS, v)
+#define SET_MULTI(b, c, v)	 SET_CONTEXT_FIELD(b, c, FSR, MULTI, v)
+
+
+/* FSYNR0 */
+#define SET_AMID(b, c, v)	 SET_CONTEXT_FIELD(b, c, FSYNR0, AMID, v)
+#define SET_APID(b, c, v)	 SET_CONTEXT_FIELD(b, c, FSYNR0, APID, v)
+#define SET_ABID(b, c, v)	 SET_CONTEXT_FIELD(b, c, FSYNR0, ABID, v)
+#define SET_ATID(b, c, v)	 SET_CONTEXT_FIELD(b, c, FSYNR0, ATID, v)
+
+
+/* FSYNR1 */
+#define SET_AMEMTYPE(b, c, v)	 SET_CONTEXT_FIELD(b, c, FSYNR1, AMEMTYPE, v)
+#define SET_ASHARED(b, c, v)	 SET_CONTEXT_FIELD(b, c, FSYNR1, ASHARED, v)
+#define SET_AINNERSHARED(b, c, v)  \
+				SET_CONTEXT_FIELD(b, c, FSYNR1, AINNERSHARED, v)
+#define SET_APRIV(b, c, v)	 SET_CONTEXT_FIELD(b, c, FSYNR1, APRIV, v)
+#define SET_APROTNS(b, c, v)	 SET_CONTEXT_FIELD(b, c, FSYNR1, APROTNS, v)
+#define SET_AINST(b, c, v)	 SET_CONTEXT_FIELD(b, c, FSYNR1, AINST, v)
+#define SET_AWRITE(b, c, v)	 SET_CONTEXT_FIELD(b, c, FSYNR1, AWRITE, v)
+#define SET_ABURST(b, c, v)	 SET_CONTEXT_FIELD(b, c, FSYNR1, ABURST, v)
+#define SET_ALEN(b, c, v)	 SET_CONTEXT_FIELD(b, c, FSYNR1, ALEN, v)
+#define SET_FSYNR1_ASIZE(b, c, v) \
+				SET_CONTEXT_FIELD(b, c, FSYNR1, FSYNR1_ASIZE, v)
+#define SET_ALOCK(b, c, v)	 SET_CONTEXT_FIELD(b, c, FSYNR1, ALOCK, v)
+#define SET_AFULL(b, c, v)	 SET_CONTEXT_FIELD(b, c, FSYNR1, AFULL, v)
+
+
+/* NMRR */
+#define SET_ICPC0(b, c, v)	 SET_CONTEXT_FIELD(b, c, NMRR, ICPC0, v)
+#define SET_ICPC1(b, c, v)	 SET_CONTEXT_FIELD(b, c, NMRR, ICPC1, v)
+#define SET_ICPC2(b, c, v)	 SET_CONTEXT_FIELD(b, c, NMRR, ICPC2, v)
+#define SET_ICPC3(b, c, v)	 SET_CONTEXT_FIELD(b, c, NMRR, ICPC3, v)
+#define SET_ICPC4(b, c, v)	 SET_CONTEXT_FIELD(b, c, NMRR, ICPC4, v)
+#define SET_ICPC5(b, c, v)	 SET_CONTEXT_FIELD(b, c, NMRR, ICPC5, v)
+#define SET_ICPC6(b, c, v)	 SET_CONTEXT_FIELD(b, c, NMRR, ICPC6, v)
+#define SET_ICPC7(b, c, v)	 SET_CONTEXT_FIELD(b, c, NMRR, ICPC7, v)
+#define SET_OCPC0(b, c, v)	 SET_CONTEXT_FIELD(b, c, NMRR, OCPC0, v)
+#define SET_OCPC1(b, c, v)	 SET_CONTEXT_FIELD(b, c, NMRR, OCPC1, v)
+#define SET_OCPC2(b, c, v)	 SET_CONTEXT_FIELD(b, c, NMRR, OCPC2, v)
+#define SET_OCPC3(b, c, v)	 SET_CONTEXT_FIELD(b, c, NMRR, OCPC3, v)
+#define SET_OCPC4(b, c, v)	 SET_CONTEXT_FIELD(b, c, NMRR, OCPC4, v)
+#define SET_OCPC5(b, c, v)	 SET_CONTEXT_FIELD(b, c, NMRR, OCPC5, v)
+#define SET_OCPC6(b, c, v)	 SET_CONTEXT_FIELD(b, c, NMRR, OCPC6, v)
+#define SET_OCPC7(b, c, v)	 SET_CONTEXT_FIELD(b, c, NMRR, OCPC7, v)
+
+
+/* PAR */
+#define SET_FAULT(b, c, v)	 SET_CONTEXT_FIELD(b, c, PAR, FAULT, v)
+
+#define SET_FAULT_TF(b, c, v)	 SET_CONTEXT_FIELD(b, c, PAR, FAULT_TF, v)
+#define SET_FAULT_AFF(b, c, v)	 SET_CONTEXT_FIELD(b, c, PAR, FAULT_AFF, v)
+#define SET_FAULT_APF(b, c, v)	 SET_CONTEXT_FIELD(b, c, PAR, FAULT_APF, v)
+#define SET_FAULT_TLBMF(b, c, v) SET_CONTEXT_FIELD(b, c, PAR, FAULT_TLBMF, v)
+#define SET_FAULT_HTWDEEF(b, c, v) \
+				SET_CONTEXT_FIELD(b, c, PAR, FAULT_HTWDEEF, v)
+#define SET_FAULT_HTWSEEF(b, c, v) \
+				SET_CONTEXT_FIELD(b, c, PAR, FAULT_HTWSEEF, v)
+#define SET_FAULT_MHF(b, c, v)	 SET_CONTEXT_FIELD(b, c, PAR, FAULT_MHF, v)
+#define SET_FAULT_SL(b, c, v)	 SET_CONTEXT_FIELD(b, c, PAR, FAULT_SL, v)
+#define SET_FAULT_SS(b, c, v)	 SET_CONTEXT_FIELD(b, c, PAR, FAULT_SS, v)
+
+#define SET_NOFAULT_SS(b, c, v)	 SET_CONTEXT_FIELD(b, c, PAR, NOFAULT_SS, v)
+#define SET_NOFAULT_MT(b, c, v)	 SET_CONTEXT_FIELD(b, c, PAR, NOFAULT_MT, v)
+#define SET_NOFAULT_SH(b, c, v)	 SET_CONTEXT_FIELD(b, c, PAR, NOFAULT_SH, v)
+#define SET_NOFAULT_NS(b, c, v)	 SET_CONTEXT_FIELD(b, c, PAR, NOFAULT_NS, v)
+#define SET_NOFAULT_NOS(b, c, v) SET_CONTEXT_FIELD(b, c, PAR, NOFAULT_NOS, v)
+#define SET_NPFAULT_PA(b, c, v)	 SET_CONTEXT_FIELD(b, c, PAR, NPFAULT_PA, v)
+
+
+/* PRRR */
+#define SET_MTC0(b, c, v)	 SET_CONTEXT_FIELD(b, c, PRRR, MTC0, v)
+#define SET_MTC1(b, c, v)	 SET_CONTEXT_FIELD(b, c, PRRR, MTC1, v)
+#define SET_MTC2(b, c, v)	 SET_CONTEXT_FIELD(b, c, PRRR, MTC2, v)
+#define SET_MTC3(b, c, v)	 SET_CONTEXT_FIELD(b, c, PRRR, MTC3, v)
+#define SET_MTC4(b, c, v)	 SET_CONTEXT_FIELD(b, c, PRRR, MTC4, v)
+#define SET_MTC5(b, c, v)	 SET_CONTEXT_FIELD(b, c, PRRR, MTC5, v)
+#define SET_MTC6(b, c, v)	 SET_CONTEXT_FIELD(b, c, PRRR, MTC6, v)
+#define SET_MTC7(b, c, v)	 SET_CONTEXT_FIELD(b, c, PRRR, MTC7, v)
+#define SET_SHDSH0(b, c, v)	 SET_CONTEXT_FIELD(b, c, PRRR, SHDSH0, v)
+#define SET_SHDSH1(b, c, v)	 SET_CONTEXT_FIELD(b, c, PRRR, SHDSH1, v)
+#define SET_SHNMSH0(b, c, v)	 SET_CONTEXT_FIELD(b, c, PRRR, SHNMSH0, v)
+#define SET_SHNMSH1(b, c, v)     SET_CONTEXT_FIELD(b, c, PRRR, SHNMSH1, v)
+#define SET_NOS0(b, c, v)	 SET_CONTEXT_FIELD(b, c, PRRR, NOS0, v)
+#define SET_NOS1(b, c, v)	 SET_CONTEXT_FIELD(b, c, PRRR, NOS1, v)
+#define SET_NOS2(b, c, v)	 SET_CONTEXT_FIELD(b, c, PRRR, NOS2, v)
+#define SET_NOS3(b, c, v)	 SET_CONTEXT_FIELD(b, c, PRRR, NOS3, v)
+#define SET_NOS4(b, c, v)	 SET_CONTEXT_FIELD(b, c, PRRR, NOS4, v)
+#define SET_NOS5(b, c, v)	 SET_CONTEXT_FIELD(b, c, PRRR, NOS5, v)
+#define SET_NOS6(b, c, v)	 SET_CONTEXT_FIELD(b, c, PRRR, NOS6, v)
+#define SET_NOS7(b, c, v)	 SET_CONTEXT_FIELD(b, c, PRRR, NOS7, v)
+
+
+/* RESUME */
+#define SET_TNR(b, c, v)	 SET_CONTEXT_FIELD(b, c, RESUME, TNR, v)
+
+
+/* SCTLR */
+#define SET_M(b, c, v)		 SET_CONTEXT_FIELD(b, c, SCTLR, M, v)
+#define SET_TRE(b, c, v)	 SET_CONTEXT_FIELD(b, c, SCTLR, TRE, v)
+#define SET_AFE(b, c, v)	 SET_CONTEXT_FIELD(b, c, SCTLR, AFE, v)
+#define SET_HAF(b, c, v)	 SET_CONTEXT_FIELD(b, c, SCTLR, HAF, v)
+#define SET_BE(b, c, v)		 SET_CONTEXT_FIELD(b, c, SCTLR, BE, v)
+#define SET_AFFD(b, c, v)	 SET_CONTEXT_FIELD(b, c, SCTLR, AFFD, v)
+
+
+/* TLBLKCR */
+#define SET_LKE(b, c, v)	   SET_CONTEXT_FIELD(b, c, TLBLKCR, LKE, v)
+#define SET_TLBLKCR_TLBIALLCFG(b, c, v) \
+			SET_CONTEXT_FIELD(b, c, TLBLKCR, TLBLCKR_TLBIALLCFG, v)
+#define SET_TLBIASIDCFG(b, c, v) \
+			SET_CONTEXT_FIELD(b, c, TLBLKCR, TLBIASIDCFG, v)
+#define SET_TLBIVAACFG(b, c, v)	SET_CONTEXT_FIELD(b, c, TLBLKCR, TLBIVAACFG, v)
+#define SET_FLOOR(b, c, v)	SET_CONTEXT_FIELD(b, c, TLBLKCR, FLOOR, v)
+#define SET_VICTIM(b, c, v)	SET_CONTEXT_FIELD(b, c, TLBLKCR, VICTIM, v)
+
+
+/* TTBCR */
+#define SET_N(b, c, v)	         SET_CONTEXT_FIELD(b, c, TTBCR, N, v)
+#define SET_PD0(b, c, v)	 SET_CONTEXT_FIELD(b, c, TTBCR, PD0, v)
+#define SET_PD1(b, c, v)	 SET_CONTEXT_FIELD(b, c, TTBCR, PD1, v)
+
+
+/* TTBR0 */
+#define SET_TTBR0_IRGNH(b, c, v) SET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_IRGNH, v)
+#define SET_TTBR0_SH(b, c, v)	 SET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_SH, v)
+#define SET_TTBR0_ORGN(b, c, v)	 SET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_ORGN, v)
+#define SET_TTBR0_NOS(b, c, v)	 SET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_NOS, v)
+#define SET_TTBR0_IRGNL(b, c, v) SET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_IRGNL, v)
+#define SET_TTBR0_PA(b, c, v)	 SET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_PA, v)
+
+
+/* TTBR1 */
+#define SET_TTBR1_IRGNH(b, c, v) SET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_IRGNH, v)
+#define SET_TTBR1_SH(b, c, v)	 SET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_SH, v)
+#define SET_TTBR1_ORGN(b, c, v)	 SET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_ORGN, v)
+#define SET_TTBR1_NOS(b, c, v)	 SET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_NOS, v)
+#define SET_TTBR1_IRGNL(b, c, v) SET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_IRGNL, v)
+#define SET_TTBR1_PA(b, c, v)	 SET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_PA, v)
+
+
+/* V2PSR */
+#define SET_HIT(b, c, v)	 SET_CONTEXT_FIELD(b, c, V2PSR, HIT, v)
+#define SET_INDEX(b, c, v)	 SET_CONTEXT_FIELD(b, c, V2PSR, INDEX, v)
+
+
+/* Context Register getters */
+/* ACTLR */
+#define GET_CFERE(b, c)	        GET_CONTEXT_FIELD(b, c, ACTLR, CFERE)
+#define GET_CFEIE(b, c)	        GET_CONTEXT_FIELD(b, c, ACTLR, CFEIE)
+#define GET_PTSHCFG(b, c)       GET_CONTEXT_FIELD(b, c, ACTLR, PTSHCFG)
+#define GET_RCOSH(b, c)	        GET_CONTEXT_FIELD(b, c, ACTLR, RCOSH)
+#define GET_RCISH(b, c)	        GET_CONTEXT_FIELD(b, c, ACTLR, RCISH)
+#define GET_RCNSH(b, c)	        GET_CONTEXT_FIELD(b, c, ACTLR, RCNSH)
+#define GET_PRIVCFG(b, c)       GET_CONTEXT_FIELD(b, c, ACTLR, PRIVCFG)
+#define GET_DNA(b, c)	        GET_CONTEXT_FIELD(b, c, ACTLR, DNA)
+#define GET_DNLV2PA(b, c)       GET_CONTEXT_FIELD(b, c, ACTLR, DNLV2PA)
+#define GET_TLBMCFG(b, c)       GET_CONTEXT_FIELD(b, c, ACTLR, TLBMCFG)
+#define GET_CFCFG(b, c)	        GET_CONTEXT_FIELD(b, c, ACTLR, CFCFG)
+#define GET_TIPCF(b, c)	        GET_CONTEXT_FIELD(b, c, ACTLR, TIPCF)
+#define GET_V2PCFG(b, c)        GET_CONTEXT_FIELD(b, c, ACTLR, V2PCFG)
+#define GET_HUME(b, c)	        GET_CONTEXT_FIELD(b, c, ACTLR, HUME)
+#define GET_PTMTCFG(b, c)       GET_CONTEXT_FIELD(b, c, ACTLR, PTMTCFG)
+#define GET_PTMEMTYPE(b, c)     GET_CONTEXT_FIELD(b, c, ACTLR, PTMEMTYPE)
+
+/* BFBCR */
+#define GET_BFBDFE(b, c)	GET_CONTEXT_FIELD(b, c, BFBCR, BFBDFE)
+#define GET_BFBSFE(b, c)	GET_CONTEXT_FIELD(b, c, BFBCR, BFBSFE)
+#define GET_SFVS(b, c)		GET_CONTEXT_FIELD(b, c, BFBCR, SFVS)
+#define GET_FLVIC(b, c)		GET_CONTEXT_FIELD(b, c, BFBCR, FLVIC)
+#define GET_SLVIC(b, c)		GET_CONTEXT_FIELD(b, c, BFBCR, SLVIC)
+
+
+/* CONTEXTIDR */
+#define GET_CONTEXTIDR_ASID(b, c) \
+			GET_CONTEXT_FIELD(b, c, CONTEXTIDR, CONTEXTIDR_ASID)
+#define GET_CONTEXTIDR_PROCID(b, c) GET_CONTEXT_FIELD(b, c, CONTEXTIDR, PROCID)
+
+
+/* FSR */
+#define GET_TF(b, c)		GET_CONTEXT_FIELD(b, c, FSR, TF)
+#define GET_AFF(b, c)		GET_CONTEXT_FIELD(b, c, FSR, AFF)
+#define GET_APF(b, c)		GET_CONTEXT_FIELD(b, c, FSR, APF)
+#define GET_TLBMF(b, c)		GET_CONTEXT_FIELD(b, c, FSR, TLBMF)
+#define GET_HTWDEEF(b, c)	GET_CONTEXT_FIELD(b, c, FSR, HTWDEEF)
+#define GET_HTWSEEF(b, c)	GET_CONTEXT_FIELD(b, c, FSR, HTWSEEF)
+#define GET_MHF(b, c)		GET_CONTEXT_FIELD(b, c, FSR, MHF)
+#define GET_SL(b, c)		GET_CONTEXT_FIELD(b, c, FSR, SL)
+#define GET_SS(b, c)		GET_CONTEXT_FIELD(b, c, FSR, SS)
+#define GET_MULTI(b, c)		GET_CONTEXT_FIELD(b, c, FSR, MULTI)
+
+
+/* FSYNR0 */
+#define GET_AMID(b, c)		GET_CONTEXT_FIELD(b, c, FSYNR0, AMID)
+#define GET_APID(b, c)		GET_CONTEXT_FIELD(b, c, FSYNR0, APID)
+#define GET_ABID(b, c)		GET_CONTEXT_FIELD(b, c, FSYNR0, ABID)
+#define GET_ATID(b, c)		GET_CONTEXT_FIELD(b, c, FSYNR0, ATID)
+
+
+/* FSYNR1 */
+#define GET_AMEMTYPE(b, c)	GET_CONTEXT_FIELD(b, c, FSYNR1, AMEMTYPE)
+#define GET_ASHARED(b, c)	GET_CONTEXT_FIELD(b, c, FSYNR1, ASHARED)
+#define GET_AINNERSHARED(b, c)  GET_CONTEXT_FIELD(b, c, FSYNR1, AINNERSHARED)
+#define GET_APRIV(b, c)		GET_CONTEXT_FIELD(b, c, FSYNR1, APRIV)
+#define GET_APROTNS(b, c)	GET_CONTEXT_FIELD(b, c, FSYNR1, APROTNS)
+#define GET_AINST(b, c)		GET_CONTEXT_FIELD(b, c, FSYNR1, AINST)
+#define GET_AWRITE(b, c)	GET_CONTEXT_FIELD(b, c, FSYNR1, AWRITE)
+#define GET_ABURST(b, c)	GET_CONTEXT_FIELD(b, c, FSYNR1, ABURST)
+#define GET_ALEN(b, c)		GET_CONTEXT_FIELD(b, c, FSYNR1, ALEN)
+#define GET_FSYNR1_ASIZE(b, c)	GET_CONTEXT_FIELD(b, c, FSYNR1, FSYNR1_ASIZE)
+#define GET_ALOCK(b, c)		GET_CONTEXT_FIELD(b, c, FSYNR1, ALOCK)
+#define GET_AFULL(b, c)		GET_CONTEXT_FIELD(b, c, FSYNR1, AFULL)
+
+
+/* NMRR */
+#define GET_ICPC0(b, c)		GET_CONTEXT_FIELD(b, c, NMRR, ICPC0)
+#define GET_ICPC1(b, c)		GET_CONTEXT_FIELD(b, c, NMRR, ICPC1)
+#define GET_ICPC2(b, c)		GET_CONTEXT_FIELD(b, c, NMRR, ICPC2)
+#define GET_ICPC3(b, c)		GET_CONTEXT_FIELD(b, c, NMRR, ICPC3)
+#define GET_ICPC4(b, c)		GET_CONTEXT_FIELD(b, c, NMRR, ICPC4)
+#define GET_ICPC5(b, c)		GET_CONTEXT_FIELD(b, c, NMRR, ICPC5)
+#define GET_ICPC6(b, c)		GET_CONTEXT_FIELD(b, c, NMRR, ICPC6)
+#define GET_ICPC7(b, c)		GET_CONTEXT_FIELD(b, c, NMRR, ICPC7)
+#define GET_OCPC0(b, c)		GET_CONTEXT_FIELD(b, c, NMRR, OCPC0)
+#define GET_OCPC1(b, c)		GET_CONTEXT_FIELD(b, c, NMRR, OCPC1)
+#define GET_OCPC2(b, c)		GET_CONTEXT_FIELD(b, c, NMRR, OCPC2)
+#define GET_OCPC3(b, c)		GET_CONTEXT_FIELD(b, c, NMRR, OCPC3)
+#define GET_OCPC4(b, c)		GET_CONTEXT_FIELD(b, c, NMRR, OCPC4)
+#define GET_OCPC5(b, c)		GET_CONTEXT_FIELD(b, c, NMRR, OCPC5)
+#define GET_OCPC6(b, c)		GET_CONTEXT_FIELD(b, c, NMRR, OCPC6)
+#define GET_OCPC7(b, c)		GET_CONTEXT_FIELD(b, c, NMRR, OCPC7)
+#define NMRR_ICP(nmrr, n)	(((nmrr) & (3 << ((n) * 2))) >> ((n) * 2))
+#define NMRR_OCP(nmrr, n)	(((nmrr) & (3 << ((n) * 2 + 16))) >> \
+								((n) * 2 + 16))
+
+/* PAR */
+#define GET_FAULT(b, c)		GET_CONTEXT_FIELD(b, c, PAR, FAULT)
+
+#define GET_FAULT_TF(b, c)	GET_CONTEXT_FIELD(b, c, PAR, FAULT_TF)
+#define GET_FAULT_AFF(b, c)	GET_CONTEXT_FIELD(b, c, PAR, FAULT_AFF)
+#define GET_FAULT_APF(b, c)	GET_CONTEXT_FIELD(b, c, PAR, FAULT_APF)
+#define GET_FAULT_TLBMF(b, c)   GET_CONTEXT_FIELD(b, c, PAR, FAULT_TLBMF)
+#define GET_FAULT_HTWDEEF(b, c) GET_CONTEXT_FIELD(b, c, PAR, FAULT_HTWDEEF)
+#define GET_FAULT_HTWSEEF(b, c) GET_CONTEXT_FIELD(b, c, PAR, FAULT_HTWSEEF)
+#define GET_FAULT_MHF(b, c)	GET_CONTEXT_FIELD(b, c, PAR, FAULT_MHF)
+#define GET_FAULT_SL(b, c)	GET_CONTEXT_FIELD(b, c, PAR, FAULT_SL)
+#define GET_FAULT_SS(b, c)	GET_CONTEXT_FIELD(b, c, PAR, FAULT_SS)
+
+#define GET_NOFAULT_SS(b, c)	GET_CONTEXT_FIELD(b, c, PAR, PAR_NOFAULT_SS)
+#define GET_NOFAULT_MT(b, c)	GET_CONTEXT_FIELD(b, c, PAR, PAR_NOFAULT_MT)
+#define GET_NOFAULT_SH(b, c)	GET_CONTEXT_FIELD(b, c, PAR, PAR_NOFAULT_SH)
+#define GET_NOFAULT_NS(b, c)	GET_CONTEXT_FIELD(b, c, PAR, PAR_NOFAULT_NS)
+#define GET_NOFAULT_NOS(b, c)   GET_CONTEXT_FIELD(b, c, PAR, PAR_NOFAULT_NOS)
+#define GET_NPFAULT_PA(b, c)	GET_CONTEXT_FIELD(b, c, PAR, PAR_NPFAULT_PA)
+
+
+/* PRRR */
+#define GET_MTC0(b, c)		GET_CONTEXT_FIELD(b, c, PRRR, MTC0)
+#define GET_MTC1(b, c)		GET_CONTEXT_FIELD(b, c, PRRR, MTC1)
+#define GET_MTC2(b, c)		GET_CONTEXT_FIELD(b, c, PRRR, MTC2)
+#define GET_MTC3(b, c)		GET_CONTEXT_FIELD(b, c, PRRR, MTC3)
+#define GET_MTC4(b, c)		GET_CONTEXT_FIELD(b, c, PRRR, MTC4)
+#define GET_MTC5(b, c)		GET_CONTEXT_FIELD(b, c, PRRR, MTC5)
+#define GET_MTC6(b, c)		GET_CONTEXT_FIELD(b, c, PRRR, MTC6)
+#define GET_MTC7(b, c)		GET_CONTEXT_FIELD(b, c, PRRR, MTC7)
+#define GET_SHDSH0(b, c)	GET_CONTEXT_FIELD(b, c, PRRR, SHDSH0)
+#define GET_SHDSH1(b, c)	GET_CONTEXT_FIELD(b, c, PRRR, SHDSH1)
+#define GET_SHNMSH0(b, c)	GET_CONTEXT_FIELD(b, c, PRRR, SHNMSH0)
+#define GET_SHNMSH1(b, c)	GET_CONTEXT_FIELD(b, c, PRRR, SHNMSH1)
+#define GET_NOS0(b, c)		GET_CONTEXT_FIELD(b, c, PRRR, NOS0)
+#define GET_NOS1(b, c)		GET_CONTEXT_FIELD(b, c, PRRR, NOS1)
+#define GET_NOS2(b, c)		GET_CONTEXT_FIELD(b, c, PRRR, NOS2)
+#define GET_NOS3(b, c)		GET_CONTEXT_FIELD(b, c, PRRR, NOS3)
+#define GET_NOS4(b, c)		GET_CONTEXT_FIELD(b, c, PRRR, NOS4)
+#define GET_NOS5(b, c)		GET_CONTEXT_FIELD(b, c, PRRR, NOS5)
+#define GET_NOS6(b, c)		GET_CONTEXT_FIELD(b, c, PRRR, NOS6)
+#define GET_NOS7(b, c)		GET_CONTEXT_FIELD(b, c, PRRR, NOS7)
+#define PRRR_NOS(prrr, n)	 ((prrr) & (1 << ((n) + 24)) ? 1 : 0)
+#define PRRR_MT(prrr, n)	 ((((prrr) & (3 << ((n) * 2))) >> ((n) * 2)))
+
+
+/* RESUME */
+#define GET_TNR(b, c)		GET_CONTEXT_FIELD(b, c, RESUME, TNR)
+
+
+/* SCTLR */
+#define GET_M(b, c)		GET_CONTEXT_FIELD(b, c, SCTLR, M)
+#define GET_TRE(b, c)		GET_CONTEXT_FIELD(b, c, SCTLR, TRE)
+#define GET_AFE(b, c)		GET_CONTEXT_FIELD(b, c, SCTLR, AFE)
+#define GET_HAF(b, c)		GET_CONTEXT_FIELD(b, c, SCTLR, HAF)
+#define GET_BE(b, c)		GET_CONTEXT_FIELD(b, c, SCTLR, BE)
+#define GET_AFFD(b, c)		GET_CONTEXT_FIELD(b, c, SCTLR, AFFD)
+
+
+/* TLBLKCR */
+#define GET_LKE(b, c)		GET_CONTEXT_FIELD(b, c, TLBLKCR, LKE)
+#define GET_TLBLCKR_TLBIALLCFG(b, c) \
+			GET_CONTEXT_FIELD(b, c, TLBLKCR, TLBLCKR_TLBIALLCFG)
+#define GET_TLBIASIDCFG(b, c)   GET_CONTEXT_FIELD(b, c, TLBLKCR, TLBIASIDCFG)
+#define GET_TLBIVAACFG(b, c)	GET_CONTEXT_FIELD(b, c, TLBLKCR, TLBIVAACFG)
+#define GET_FLOOR(b, c)		GET_CONTEXT_FIELD(b, c, TLBLKCR, FLOOR)
+#define GET_VICTIM(b, c)	GET_CONTEXT_FIELD(b, c, TLBLKCR, VICTIM)
+
+
+/* TTBCR */
+#define GET_N(b, c)		GET_CONTEXT_FIELD(b, c, TTBCR, N)
+#define GET_PD0(b, c)		GET_CONTEXT_FIELD(b, c, TTBCR, PD0)
+#define GET_PD1(b, c)		GET_CONTEXT_FIELD(b, c, TTBCR, PD1)
+
+
+/* TTBR0 */
+#define GET_TTBR0_IRGNH(b, c)	GET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_IRGNH)
+#define GET_TTBR0_SH(b, c)	GET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_SH)
+#define GET_TTBR0_ORGN(b, c)	GET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_ORGN)
+#define GET_TTBR0_NOS(b, c)	GET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_NOS)
+#define GET_TTBR0_IRGNL(b, c)	GET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_IRGNL)
+#define GET_TTBR0_PA(b, c)	GET_CONTEXT_FIELD(b, c, TTBR0, TTBR0_PA)
+
+
+/* TTBR1 */
+#define GET_TTBR1_IRGNH(b, c)	GET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_IRGNH)
+#define GET_TTBR1_SH(b, c)	GET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_SH)
+#define GET_TTBR1_ORGN(b, c)	GET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_ORGN)
+#define GET_TTBR1_NOS(b, c)	GET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_NOS)
+#define GET_TTBR1_IRGNL(b, c)	GET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_IRGNL)
+#define GET_TTBR1_PA(b, c)	GET_CONTEXT_FIELD(b, c, TTBR1, TTBR1_PA)
+
+
+/* V2PSR */
+#define GET_HIT(b, c)		GET_CONTEXT_FIELD(b, c, V2PSR, HIT)
+#define GET_INDEX(b, c)		GET_CONTEXT_FIELD(b, c, V2PSR, INDEX)
+
+
+/* Global Registers */
+#define M2VCBR_N	(0x000)
+#define CBACR_N		(0x800)
+#define TLBRSW		(0xE00)
+#define TLBTR0		(0xE80)
+#define TLBTR1		(0xE84)
+#define TLBTR2		(0xE88)
+#define TESTBUSCR	(0xE8C)
+#define GLOBAL_TLBIALL	(0xF00)
+#define TLBIVMID	(0xF04)
+#define CR		(0xF80)
+#define EAR		(0xF84)
+#define ESR		(0xF88)
+#define ESRRESTORE	(0xF8C)
+#define ESYNR0		(0xF90)
+#define ESYNR1		(0xF94)
+#define REV		(0xFF4)
+#define IDR		(0xFF8)
+#define RPU_ACR		(0xFFC)
+
+/* Event Monitor (EM) Registers */
+#define EMMC		(0xE000)
+#define EMCS		(0xE004)
+#define EMCC_N		(0xE100)
+#define EMC_N		(0xE200)
+
+/* Context Bank Registers */
+#define SCTLR		(0x000)
+#define ACTLR		(0x004)
+#define CONTEXTIDR	(0x008)
+#define TTBR0		(0x010)
+#define TTBR1		(0x014)
+#define TTBCR		(0x018)
+#define PAR		(0x01C)
+#define FSR		(0x020)
+#define FSRRESTORE	(0x024)
+#define FAR		(0x028)
+#define FSYNR0		(0x02C)
+#define FSYNR1		(0x030)
+#define PRRR		(0x034)
+#define NMRR		(0x038)
+#define TLBLCKR		(0x03C)
+#define V2PSR		(0x040)
+#define TLBFLPTER	(0x044)
+#define TLBSLPTER	(0x048)
+#define BFBCR		(0x04C)
+#define CTX_TLBIALL	(0x800)
+#define TLBIASID	(0x804)
+#define TLBIVA		(0x808)
+#define TLBIVAA		(0x80C)
+#define V2PPR		(0x810)
+#define V2PPW		(0x814)
+#define V2PUR		(0x818)
+#define V2PUW		(0x81C)
+#define RESUME		(0x820)
+
+
+/* Global Register Fields */
+/* CBACRn */
+#define RWVMID        (RWVMID_MASK       << RWVMID_SHIFT)
+#define RWE           (RWE_MASK          << RWE_SHIFT)
+#define RWGE          (RWGE_MASK         << RWGE_SHIFT)
+#define CBVMID        (CBVMID_MASK       << CBVMID_SHIFT)
+#define IRPTNDX       (IRPTNDX_MASK      << IRPTNDX_SHIFT)
+
+
+/* CR */
+#define RPUE          (RPUE_MASK          << RPUE_SHIFT)
+#define RPUERE        (RPUERE_MASK        << RPUERE_SHIFT)
+#define RPUEIE        (RPUEIE_MASK        << RPUEIE_SHIFT)
+#define DCDEE         (DCDEE_MASK         << DCDEE_SHIFT)
+#define CLIENTPD      (CLIENTPD_MASK      << CLIENTPD_SHIFT)
+#define STALLD        (STALLD_MASK        << STALLD_SHIFT)
+#define TLBLKCRWE     (TLBLKCRWE_MASK     << TLBLKCRWE_SHIFT)
+#define CR_TLBIALLCFG (CR_TLBIALLCFG_MASK << CR_TLBIALLCFG_SHIFT)
+#define TLBIVMIDCFG   (TLBIVMIDCFG_MASK   << TLBIVMIDCFG_SHIFT)
+#define CR_HUME       (CR_HUME_MASK       << CR_HUME_SHIFT)
+
+
+/* ESR */
+#define CFG           (CFG_MASK          << CFG_SHIFT)
+#define BYPASS        (BYPASS_MASK       << BYPASS_SHIFT)
+#define ESR_MULTI     (ESR_MULTI_MASK    << ESR_MULTI_SHIFT)
+
+
+/* ESYNR0 */
+#define ESYNR0_AMID   (ESYNR0_AMID_MASK  << ESYNR0_AMID_SHIFT)
+#define ESYNR0_APID   (ESYNR0_APID_MASK  << ESYNR0_APID_SHIFT)
+#define ESYNR0_ABID   (ESYNR0_ABID_MASK  << ESYNR0_ABID_SHIFT)
+#define ESYNR0_AVMID  (ESYNR0_AVMID_MASK << ESYNR0_AVMID_SHIFT)
+#define ESYNR0_ATID   (ESYNR0_ATID_MASK  << ESYNR0_ATID_SHIFT)
+
+
+/* ESYNR1 */
+#define ESYNR1_AMEMTYPE      (ESYNR1_AMEMTYPE_MASK    << ESYNR1_AMEMTYPE_SHIFT)
+#define ESYNR1_ASHARED       (ESYNR1_ASHARED_MASK     << ESYNR1_ASHARED_SHIFT)
+#define ESYNR1_AINNERSHARED  (ESYNR1_AINNERSHARED_MASK<< \
+						ESYNR1_AINNERSHARED_SHIFT)
+#define ESYNR1_APRIV         (ESYNR1_APRIV_MASK       << ESYNR1_APRIV_SHIFT)
+#define ESYNR1_APROTNS       (ESYNR1_APROTNS_MASK     << ESYNR1_APROTNS_SHIFT)
+#define ESYNR1_AINST         (ESYNR1_AINST_MASK       << ESYNR1_AINST_SHIFT)
+#define ESYNR1_AWRITE        (ESYNR1_AWRITE_MASK      << ESYNR1_AWRITE_SHIFT)
+#define ESYNR1_ABURST        (ESYNR1_ABURST_MASK      << ESYNR1_ABURST_SHIFT)
+#define ESYNR1_ALEN          (ESYNR1_ALEN_MASK        << ESYNR1_ALEN_SHIFT)
+#define ESYNR1_ASIZE         (ESYNR1_ASIZE_MASK       << ESYNR1_ASIZE_SHIFT)
+#define ESYNR1_ALOCK         (ESYNR1_ALOCK_MASK       << ESYNR1_ALOCK_SHIFT)
+#define ESYNR1_AOOO          (ESYNR1_AOOO_MASK        << ESYNR1_AOOO_SHIFT)
+#define ESYNR1_AFULL         (ESYNR1_AFULL_MASK       << ESYNR1_AFULL_SHIFT)
+#define ESYNR1_AC            (ESYNR1_AC_MASK          << ESYNR1_AC_SHIFT)
+#define ESYNR1_DCD           (ESYNR1_DCD_MASK         << ESYNR1_DCD_SHIFT)
+
+
+/* IDR */
+#define NM2VCBMT      (NM2VCBMT_MASK     << NM2VCBMT_SHIFT)
+#define HTW           (HTW_MASK          << HTW_SHIFT)
+#define HUM           (HUM_MASK          << HUM_SHIFT)
+#define TLBSIZE       (TLBSIZE_MASK      << TLBSIZE_SHIFT)
+#define NCB           (NCB_MASK          << NCB_SHIFT)
+#define NIRPT         (NIRPT_MASK        << NIRPT_SHIFT)
+
+
+/* M2VCBRn */
+#define VMID          (VMID_MASK         << VMID_SHIFT)
+#define CBNDX         (CBNDX_MASK        << CBNDX_SHIFT)
+#define BYPASSD       (BYPASSD_MASK      << BYPASSD_SHIFT)
+#define BPRCOSH       (BPRCOSH_MASK      << BPRCOSH_SHIFT)
+#define BPRCISH       (BPRCISH_MASK      << BPRCISH_SHIFT)
+#define BPRCNSH       (BPRCNSH_MASK      << BPRCNSH_SHIFT)
+#define BPSHCFG       (BPSHCFG_MASK      << BPSHCFG_SHIFT)
+#define NSCFG         (NSCFG_MASK        << NSCFG_SHIFT)
+#define BPMTCFG       (BPMTCFG_MASK      << BPMTCFG_SHIFT)
+#define BPMEMTYPE     (BPMEMTYPE_MASK    << BPMEMTYPE_SHIFT)
+
+
+/* REV */
+#define IDR_MINOR     (MINOR_MASK        << MINOR_SHIFT)
+#define IDR_MAJOR     (MAJOR_MASK        << MAJOR_SHIFT)
+
+
+/* TESTBUSCR */
+#define TBE           (TBE_MASK          << TBE_SHIFT)
+#define SPDMBE        (SPDMBE_MASK       << SPDMBE_SHIFT)
+#define WGSEL         (WGSEL_MASK        << WGSEL_SHIFT)
+#define TBLSEL        (TBLSEL_MASK       << TBLSEL_SHIFT)
+#define TBHSEL        (TBHSEL_MASK       << TBHSEL_SHIFT)
+#define SPDM0SEL      (SPDM0SEL_MASK     << SPDM0SEL_SHIFT)
+#define SPDM1SEL      (SPDM1SEL_MASK     << SPDM1SEL_SHIFT)
+#define SPDM2SEL      (SPDM2SEL_MASK     << SPDM2SEL_SHIFT)
+#define SPDM3SEL      (SPDM3SEL_MASK     << SPDM3SEL_SHIFT)
+
+
+/* TLBIVMID */
+#define TLBIVMID_VMID (TLBIVMID_VMID_MASK << TLBIVMID_VMID_SHIFT)
+
+
+/* TLBRSW */
+#define TLBRSW_INDEX  (TLBRSW_INDEX_MASK << TLBRSW_INDEX_SHIFT)
+#define TLBBFBS       (TLBBFBS_MASK      << TLBBFBS_SHIFT)
+
+
+/* TLBTR0 */
+#define PR            (PR_MASK           << PR_SHIFT)
+#define PW            (PW_MASK           << PW_SHIFT)
+#define UR            (UR_MASK           << UR_SHIFT)
+#define UW            (UW_MASK           << UW_SHIFT)
+#define XN            (XN_MASK           << XN_SHIFT)
+#define NSDESC        (NSDESC_MASK       << NSDESC_SHIFT)
+#define ISH           (ISH_MASK          << ISH_SHIFT)
+#define SH            (SH_MASK           << SH_SHIFT)
+#define MT            (MT_MASK           << MT_SHIFT)
+#define DPSIZR        (DPSIZR_MASK       << DPSIZR_SHIFT)
+#define DPSIZC        (DPSIZC_MASK       << DPSIZC_SHIFT)
+
+
+/* TLBTR1 */
+#define TLBTR1_VMID   (TLBTR1_VMID_MASK  << TLBTR1_VMID_SHIFT)
+#define TLBTR1_PA     (TLBTR1_PA_MASK    << TLBTR1_PA_SHIFT)
+
+
+/* TLBTR2 */
+#define TLBTR2_ASID   (TLBTR2_ASID_MASK  << TLBTR2_ASID_SHIFT)
+#define TLBTR2_V      (TLBTR2_V_MASK     << TLBTR2_V_SHIFT)
+#define TLBTR2_NSTID  (TLBTR2_NSTID_MASK << TLBTR2_NSTID_SHIFT)
+#define TLBTR2_NV     (TLBTR2_NV_MASK    << TLBTR2_NV_SHIFT)
+#define TLBTR2_VA     (TLBTR2_VA_MASK    << TLBTR2_VA_SHIFT)
+
+
+/* Context Register Fields */
+/* ACTLR */
+#define CFERE              (CFERE_MASK              << CFERE_SHIFT)
+#define CFEIE              (CFEIE_MASK              << CFEIE_SHIFT)
+#define PTSHCFG            (PTSHCFG_MASK            << PTSHCFG_SHIFT)
+#define RCOSH              (RCOSH_MASK              << RCOSH_SHIFT)
+#define RCISH              (RCISH_MASK              << RCISH_SHIFT)
+#define RCNSH              (RCNSH_MASK              << RCNSH_SHIFT)
+#define PRIVCFG            (PRIVCFG_MASK            << PRIVCFG_SHIFT)
+#define DNA                (DNA_MASK                << DNA_SHIFT)
+#define DNLV2PA            (DNLV2PA_MASK            << DNLV2PA_SHIFT)
+#define TLBMCFG            (TLBMCFG_MASK            << TLBMCFG_SHIFT)
+#define CFCFG              (CFCFG_MASK              << CFCFG_SHIFT)
+#define TIPCF              (TIPCF_MASK              << TIPCF_SHIFT)
+#define V2PCFG             (V2PCFG_MASK             << V2PCFG_SHIFT)
+#define HUME               (HUME_MASK               << HUME_SHIFT)
+#define PTMTCFG            (PTMTCFG_MASK            << PTMTCFG_SHIFT)
+#define PTMEMTYPE          (PTMEMTYPE_MASK          << PTMEMTYPE_SHIFT)
+
+
+/* BFBCR */
+#define BFBDFE             (BFBDFE_MASK             << BFBDFE_SHIFT)
+#define BFBSFE             (BFBSFE_MASK             << BFBSFE_SHIFT)
+#define SFVS               (SFVS_MASK               << SFVS_SHIFT)
+#define FLVIC              (FLVIC_MASK              << FLVIC_SHIFT)
+#define SLVIC              (SLVIC_MASK              << SLVIC_SHIFT)
+
+
+/* CONTEXTIDR */
+#define CONTEXTIDR_ASID    (CONTEXTIDR_ASID_MASK    << CONTEXTIDR_ASID_SHIFT)
+#define PROCID             (PROCID_MASK             << PROCID_SHIFT)
+
+
+/* FSR */
+#define TF                 (TF_MASK                 << TF_SHIFT)
+#define AFF                (AFF_MASK                << AFF_SHIFT)
+#define APF                (APF_MASK                << APF_SHIFT)
+#define TLBMF              (TLBMF_MASK              << TLBMF_SHIFT)
+#define HTWDEEF            (HTWDEEF_MASK            << HTWDEEF_SHIFT)
+#define HTWSEEF            (HTWSEEF_MASK            << HTWSEEF_SHIFT)
+#define MHF                (MHF_MASK                << MHF_SHIFT)
+#define SL                 (SL_MASK                 << SL_SHIFT)
+#define SS                 (SS_MASK                 << SS_SHIFT)
+#define MULTI              (MULTI_MASK              << MULTI_SHIFT)
+
+
+/* FSYNR0 */
+#define AMID               (AMID_MASK               << AMID_SHIFT)
+#define APID               (APID_MASK               << APID_SHIFT)
+#define ABID               (ABID_MASK               << ABID_SHIFT)
+#define ATID               (ATID_MASK               << ATID_SHIFT)
+
+
+/* FSYNR1 */
+#define AMEMTYPE           (AMEMTYPE_MASK           << AMEMTYPE_SHIFT)
+#define ASHARED            (ASHARED_MASK            << ASHARED_SHIFT)
+#define AINNERSHARED       (AINNERSHARED_MASK       << AINNERSHARED_SHIFT)
+#define APRIV              (APRIV_MASK              << APRIV_SHIFT)
+#define APROTNS            (APROTNS_MASK            << APROTNS_SHIFT)
+#define AINST              (AINST_MASK              << AINST_SHIFT)
+#define AWRITE             (AWRITE_MASK             << AWRITE_SHIFT)
+#define ABURST             (ABURST_MASK             << ABURST_SHIFT)
+#define ALEN               (ALEN_MASK               << ALEN_SHIFT)
+#define FSYNR1_ASIZE       (FSYNR1_ASIZE_MASK       << FSYNR1_ASIZE_SHIFT)
+#define ALOCK              (ALOCK_MASK              << ALOCK_SHIFT)
+#define AFULL              (AFULL_MASK              << AFULL_SHIFT)
+
+
+/* NMRR */
+#define ICPC0              (ICPC0_MASK              << ICPC0_SHIFT)
+#define ICPC1              (ICPC1_MASK              << ICPC1_SHIFT)
+#define ICPC2              (ICPC2_MASK              << ICPC2_SHIFT)
+#define ICPC3              (ICPC3_MASK              << ICPC3_SHIFT)
+#define ICPC4              (ICPC4_MASK              << ICPC4_SHIFT)
+#define ICPC5              (ICPC5_MASK              << ICPC5_SHIFT)
+#define ICPC6              (ICPC6_MASK              << ICPC6_SHIFT)
+#define ICPC7              (ICPC7_MASK              << ICPC7_SHIFT)
+#define OCPC0              (OCPC0_MASK              << OCPC0_SHIFT)
+#define OCPC1              (OCPC1_MASK              << OCPC1_SHIFT)
+#define OCPC2              (OCPC2_MASK              << OCPC2_SHIFT)
+#define OCPC3              (OCPC3_MASK              << OCPC3_SHIFT)
+#define OCPC4              (OCPC4_MASK              << OCPC4_SHIFT)
+#define OCPC5              (OCPC5_MASK              << OCPC5_SHIFT)
+#define OCPC6              (OCPC6_MASK              << OCPC6_SHIFT)
+#define OCPC7              (OCPC7_MASK              << OCPC7_SHIFT)
+
+
+/* PAR */
+#define FAULT              (FAULT_MASK              << FAULT_SHIFT)
+/* If a fault is present, these are the
+same as the fault fields in the FAR */
+#define FAULT_TF           (FAULT_TF_MASK           << FAULT_TF_SHIFT)
+#define FAULT_AFF          (FAULT_AFF_MASK          << FAULT_AFF_SHIFT)
+#define FAULT_APF          (FAULT_APF_MASK          << FAULT_APF_SHIFT)
+#define FAULT_TLBMF        (FAULT_TLBMF_MASK        << FAULT_TLBMF_SHIFT)
+#define FAULT_HTWDEEF      (FAULT_HTWDEEF_MASK      << FAULT_HTWDEEF_SHIFT)
+#define FAULT_HTWSEEF      (FAULT_HTWSEEF_MASK      << FAULT_HTWSEEF_SHIFT)
+#define FAULT_MHF          (FAULT_MHF_MASK          << FAULT_MHF_SHIFT)
+#define FAULT_SL           (FAULT_SL_MASK           << FAULT_SL_SHIFT)
+#define FAULT_SS           (FAULT_SS_MASK           << FAULT_SS_SHIFT)
+
+/* If NO fault is present, the following fields are in effect */
+/* (FAULT remains as before) */
+#define PAR_NOFAULT_SS     (PAR_NOFAULT_SS_MASK     << PAR_NOFAULT_SS_SHIFT)
+#define PAR_NOFAULT_MT     (PAR_NOFAULT_MT_MASK     << PAR_NOFAULT_MT_SHIFT)
+#define PAR_NOFAULT_SH     (PAR_NOFAULT_SH_MASK     << PAR_NOFAULT_SH_SHIFT)
+#define PAR_NOFAULT_NS     (PAR_NOFAULT_NS_MASK     << PAR_NOFAULT_NS_SHIFT)
+#define PAR_NOFAULT_NOS    (PAR_NOFAULT_NOS_MASK    << PAR_NOFAULT_NOS_SHIFT)
+#define PAR_NPFAULT_PA     (PAR_NPFAULT_PA_MASK     << PAR_NPFAULT_PA_SHIFT)
+
+
+/* PRRR */
+#define MTC0               (MTC0_MASK               << MTC0_SHIFT)
+#define MTC1               (MTC1_MASK               << MTC1_SHIFT)
+#define MTC2               (MTC2_MASK               << MTC2_SHIFT)
+#define MTC3               (MTC3_MASK               << MTC3_SHIFT)
+#define MTC4               (MTC4_MASK               << MTC4_SHIFT)
+#define MTC5               (MTC5_MASK               << MTC5_SHIFT)
+#define MTC6               (MTC6_MASK               << MTC6_SHIFT)
+#define MTC7               (MTC7_MASK               << MTC7_SHIFT)
+#define SHDSH0             (SHDSH0_MASK             << SHDSH0_SHIFT)
+#define SHDSH1             (SHDSH1_MASK             << SHDSH1_SHIFT)
+#define SHNMSH0            (SHNMSH0_MASK            << SHNMSH0_SHIFT)
+#define SHNMSH1            (SHNMSH1_MASK            << SHNMSH1_SHIFT)
+#define NOS0               (NOS0_MASK               << NOS0_SHIFT)
+#define NOS1               (NOS1_MASK               << NOS1_SHIFT)
+#define NOS2               (NOS2_MASK               << NOS2_SHIFT)
+#define NOS3               (NOS3_MASK               << NOS3_SHIFT)
+#define NOS4               (NOS4_MASK               << NOS4_SHIFT)
+#define NOS5               (NOS5_MASK               << NOS5_SHIFT)
+#define NOS6               (NOS6_MASK               << NOS6_SHIFT)
+#define NOS7               (NOS7_MASK               << NOS7_SHIFT)
+
+
+/* RESUME */
+#define TNR                (TNR_MASK                << TNR_SHIFT)
+
+
+/* SCTLR */
+#define M                  (M_MASK                  << M_SHIFT)
+#define TRE                (TRE_MASK                << TRE_SHIFT)
+#define AFE                (AFE_MASK                << AFE_SHIFT)
+#define HAF                (HAF_MASK                << HAF_SHIFT)
+#define BE                 (BE_MASK                 << BE_SHIFT)
+#define AFFD               (AFFD_MASK               << AFFD_SHIFT)
+
+
+/* TLBIASID */
+#define TLBIASID_ASID      (TLBIASID_ASID_MASK      << TLBIASID_ASID_SHIFT)
+
+
+/* TLBIVA */
+#define TLBIVA_ASID        (TLBIVA_ASID_MASK        << TLBIVA_ASID_SHIFT)
+#define TLBIVA_VA          (TLBIVA_VA_MASK          << TLBIVA_VA_SHIFT)
+
+
+/* TLBIVAA */
+#define TLBIVAA_VA         (TLBIVAA_VA_MASK         << TLBIVAA_VA_SHIFT)
+
+
+/* TLBLCKR */
+#define LKE                (LKE_MASK                << LKE_SHIFT)
+#define TLBLCKR_TLBIALLCFG (TLBLCKR_TLBIALLCFG_MASK<<TLBLCKR_TLBIALLCFG_SHIFT)
+#define TLBIASIDCFG        (TLBIASIDCFG_MASK        << TLBIASIDCFG_SHIFT)
+#define TLBIVAACFG         (TLBIVAACFG_MASK         << TLBIVAACFG_SHIFT)
+#define FLOOR              (FLOOR_MASK              << FLOOR_SHIFT)
+#define VICTIM             (VICTIM_MASK             << VICTIM_SHIFT)
+
+
+/* TTBCR */
+#define N                  (N_MASK                  << N_SHIFT)
+#define PD0                (PD0_MASK                << PD0_SHIFT)
+#define PD1                (PD1_MASK                << PD1_SHIFT)
+
+
+/* TTBR0 */
+#define TTBR0_IRGNH        (TTBR0_IRGNH_MASK        << TTBR0_IRGNH_SHIFT)
+#define TTBR0_SH           (TTBR0_SH_MASK           << TTBR0_SH_SHIFT)
+#define TTBR0_ORGN         (TTBR0_ORGN_MASK         << TTBR0_ORGN_SHIFT)
+#define TTBR0_NOS          (TTBR0_NOS_MASK          << TTBR0_NOS_SHIFT)
+#define TTBR0_IRGNL        (TTBR0_IRGNL_MASK        << TTBR0_IRGNL_SHIFT)
+#define TTBR0_PA           (TTBR0_PA_MASK           << TTBR0_PA_SHIFT)
+
+
+/* TTBR1 */
+#define TTBR1_IRGNH        (TTBR1_IRGNH_MASK        << TTBR1_IRGNH_SHIFT)
+#define TTBR1_SH           (TTBR1_SH_MASK           << TTBR1_SH_SHIFT)
+#define TTBR1_ORGN         (TTBR1_ORGN_MASK         << TTBR1_ORGN_SHIFT)
+#define TTBR1_NOS          (TTBR1_NOS_MASK          << TTBR1_NOS_SHIFT)
+#define TTBR1_IRGNL        (TTBR1_IRGNL_MASK        << TTBR1_IRGNL_SHIFT)
+#define TTBR1_PA           (TTBR1_PA_MASK           << TTBR1_PA_SHIFT)
+
+
+/* V2PSR */
+#define HIT                (HIT_MASK                << HIT_SHIFT)
+#define INDEX              (INDEX_MASK              << INDEX_SHIFT)
+
+
+/* V2Pxx */
+#define V2Pxx_INDEX        (V2Pxx_INDEX_MASK        << V2Pxx_INDEX_SHIFT)
+#define V2Pxx_VA           (V2Pxx_VA_MASK           << V2Pxx_VA_SHIFT)
+
+
+/* Global Register Masks */
+/* CBACRn */
+#define RWVMID_MASK               0x1F
+#define RWE_MASK                  0x01
+#define RWGE_MASK                 0x01
+#define CBVMID_MASK               0x1F
+#define IRPTNDX_MASK              0xFF
+
+
+/* CR */
+#define RPUE_MASK                 0x01
+#define RPUERE_MASK               0x01
+#define RPUEIE_MASK               0x01
+#define DCDEE_MASK                0x01
+#define CLIENTPD_MASK             0x01
+#define STALLD_MASK               0x01
+#define TLBLKCRWE_MASK            0x01
+#define CR_TLBIALLCFG_MASK        0x01
+#define TLBIVMIDCFG_MASK          0x01
+#define CR_HUME_MASK              0x01
+
+
+/* ESR */
+#define CFG_MASK                  0x01
+#define BYPASS_MASK               0x01
+#define ESR_MULTI_MASK            0x01
+
+
+/* ESYNR0 */
+#define ESYNR0_AMID_MASK          0xFF
+#define ESYNR0_APID_MASK          0x1F
+#define ESYNR0_ABID_MASK          0x07
+#define ESYNR0_AVMID_MASK         0x1F
+#define ESYNR0_ATID_MASK          0xFF
+
+
+/* ESYNR1 */
+#define ESYNR1_AMEMTYPE_MASK             0x07
+#define ESYNR1_ASHARED_MASK              0x01
+#define ESYNR1_AINNERSHARED_MASK         0x01
+#define ESYNR1_APRIV_MASK                0x01
+#define ESYNR1_APROTNS_MASK              0x01
+#define ESYNR1_AINST_MASK                0x01
+#define ESYNR1_AWRITE_MASK               0x01
+#define ESYNR1_ABURST_MASK               0x01
+#define ESYNR1_ALEN_MASK                 0x0F
+#define ESYNR1_ASIZE_MASK                0x01
+#define ESYNR1_ALOCK_MASK                0x03
+#define ESYNR1_AOOO_MASK                 0x01
+#define ESYNR1_AFULL_MASK                0x01
+#define ESYNR1_AC_MASK                   0x01
+#define ESYNR1_DCD_MASK                  0x01
+
+
+/* IDR */
+#define NM2VCBMT_MASK             0x1FF
+#define HTW_MASK                  0x01
+#define HUM_MASK                  0x01
+#define TLBSIZE_MASK              0x0F
+#define NCB_MASK                  0xFF
+#define NIRPT_MASK                0xFF
+
+
+/* M2VCBRn */
+#define VMID_MASK                 0x1F
+#define CBNDX_MASK                0xFF
+#define BYPASSD_MASK              0x01
+#define BPRCOSH_MASK              0x01
+#define BPRCISH_MASK              0x01
+#define BPRCNSH_MASK              0x01
+#define BPSHCFG_MASK              0x03
+#define NSCFG_MASK                0x03
+#define BPMTCFG_MASK              0x01
+#define BPMEMTYPE_MASK            0x07
+
+
+/* REV */
+#define MINOR_MASK                0x0F
+#define MAJOR_MASK                0x0F
+
+
+/* TESTBUSCR */
+#define TBE_MASK                  0x01
+#define SPDMBE_MASK               0x01
+#define WGSEL_MASK                0x03
+#define TBLSEL_MASK               0x03
+#define TBHSEL_MASK               0x03
+#define SPDM0SEL_MASK             0x0F
+#define SPDM1SEL_MASK             0x0F
+#define SPDM2SEL_MASK             0x0F
+#define SPDM3SEL_MASK             0x0F
+
+
+/* TLBIMID */
+#define TLBIVMID_VMID_MASK        0x1F
+
+
+/* TLBRSW */
+#define TLBRSW_INDEX_MASK         0xFF
+#define TLBBFBS_MASK              0x03
+
+
+/* TLBTR0 */
+#define PR_MASK                   0x01
+#define PW_MASK                   0x01
+#define UR_MASK                   0x01
+#define UW_MASK                   0x01
+#define XN_MASK                   0x01
+#define NSDESC_MASK               0x01
+#define ISH_MASK                  0x01
+#define SH_MASK                   0x01
+#define MT_MASK                   0x07
+#define DPSIZR_MASK               0x07
+#define DPSIZC_MASK               0x07
+
+
+/* TLBTR1 */
+#define TLBTR1_VMID_MASK          0x1F
+#define TLBTR1_PA_MASK            0x000FFFFF
+
+
+/* TLBTR2 */
+#define TLBTR2_ASID_MASK          0xFF
+#define TLBTR2_V_MASK             0x01
+#define TLBTR2_NSTID_MASK         0x01
+#define TLBTR2_NV_MASK            0x01
+#define TLBTR2_VA_MASK            0x000FFFFF
+
+
+/* Global Register Shifts */
+/* CBACRn */
+#define RWVMID_SHIFT             0
+#define RWE_SHIFT                8
+#define RWGE_SHIFT               9
+#define CBVMID_SHIFT             16
+#define IRPTNDX_SHIFT            24
+
+
+/* CR */
+#define RPUE_SHIFT               0
+#define RPUERE_SHIFT             1
+#define RPUEIE_SHIFT             2
+#define DCDEE_SHIFT              3
+#define CLIENTPD_SHIFT           4
+#define STALLD_SHIFT             5
+#define TLBLKCRWE_SHIFT          6
+#define CR_TLBIALLCFG_SHIFT      7
+#define TLBIVMIDCFG_SHIFT        8
+#define CR_HUME_SHIFT            9
+
+
+/* ESR */
+#define CFG_SHIFT                0
+#define BYPASS_SHIFT             1
+#define ESR_MULTI_SHIFT          31
+
+
+/* ESYNR0 */
+#define ESYNR0_AMID_SHIFT        0
+#define ESYNR0_APID_SHIFT        8
+#define ESYNR0_ABID_SHIFT        13
+#define ESYNR0_AVMID_SHIFT       16
+#define ESYNR0_ATID_SHIFT        24
+
+
+/* ESYNR1 */
+#define ESYNR1_AMEMTYPE_SHIFT           0
+#define ESYNR1_ASHARED_SHIFT            3
+#define ESYNR1_AINNERSHARED_SHIFT       4
+#define ESYNR1_APRIV_SHIFT              5
+#define ESYNR1_APROTNS_SHIFT            6
+#define ESYNR1_AINST_SHIFT              7
+#define ESYNR1_AWRITE_SHIFT             8
+#define ESYNR1_ABURST_SHIFT             10
+#define ESYNR1_ALEN_SHIFT               12
+#define ESYNR1_ASIZE_SHIFT              16
+#define ESYNR1_ALOCK_SHIFT              20
+#define ESYNR1_AOOO_SHIFT               22
+#define ESYNR1_AFULL_SHIFT              24
+#define ESYNR1_AC_SHIFT                 30
+#define ESYNR1_DCD_SHIFT                31
+
+
+/* IDR */
+#define NM2VCBMT_SHIFT           0
+#define HTW_SHIFT                9
+#define HUM_SHIFT                10
+#define TLBSIZE_SHIFT            12
+#define NCB_SHIFT                16
+#define NIRPT_SHIFT              24
+
+
+/* M2VCBRn */
+#define VMID_SHIFT               0
+#define CBNDX_SHIFT              8
+#define BYPASSD_SHIFT            16
+#define BPRCOSH_SHIFT            17
+#define BPRCISH_SHIFT            18
+#define BPRCNSH_SHIFT            19
+#define BPSHCFG_SHIFT            20
+#define NSCFG_SHIFT              22
+#define BPMTCFG_SHIFT            24
+#define BPMEMTYPE_SHIFT          25
+
+
+/* REV */
+#define MINOR_SHIFT              0
+#define MAJOR_SHIFT              4
+
+
+/* TESTBUSCR */
+#define TBE_SHIFT                0
+#define SPDMBE_SHIFT             1
+#define WGSEL_SHIFT              8
+#define TBLSEL_SHIFT             12
+#define TBHSEL_SHIFT             14
+#define SPDM0SEL_SHIFT           16
+#define SPDM1SEL_SHIFT           20
+#define SPDM2SEL_SHIFT           24
+#define SPDM3SEL_SHIFT           28
+
+
+/* TLBIMID */
+#define TLBIVMID_VMID_SHIFT      0
+
+
+/* TLBRSW */
+#define TLBRSW_INDEX_SHIFT       0
+#define TLBBFBS_SHIFT            8
+
+
+/* TLBTR0 */
+#define PR_SHIFT                 0
+#define PW_SHIFT                 1
+#define UR_SHIFT                 2
+#define UW_SHIFT                 3
+#define XN_SHIFT                 4
+#define NSDESC_SHIFT             6
+#define ISH_SHIFT                7
+#define SH_SHIFT                 8
+#define MT_SHIFT                 9
+#define DPSIZR_SHIFT             16
+#define DPSIZC_SHIFT             20
+
+
+/* TLBTR1 */
+#define TLBTR1_VMID_SHIFT        0
+#define TLBTR1_PA_SHIFT          12
+
+
+/* TLBTR2 */
+#define TLBTR2_ASID_SHIFT        0
+#define TLBTR2_V_SHIFT           8
+#define TLBTR2_NSTID_SHIFT       9
+#define TLBTR2_NV_SHIFT          10
+#define TLBTR2_VA_SHIFT          12
+
+
+/* Context Register Masks */
+/* ACTLR */
+#define CFERE_MASK                       0x01
+#define CFEIE_MASK                       0x01
+#define PTSHCFG_MASK                     0x03
+#define RCOSH_MASK                       0x01
+#define RCISH_MASK                       0x01
+#define RCNSH_MASK                       0x01
+#define PRIVCFG_MASK                     0x03
+#define DNA_MASK                         0x01
+#define DNLV2PA_MASK                     0x01
+#define TLBMCFG_MASK                     0x03
+#define CFCFG_MASK                       0x01
+#define TIPCF_MASK                       0x01
+#define V2PCFG_MASK                      0x03
+#define HUME_MASK                        0x01
+#define PTMTCFG_MASK                     0x01
+#define PTMEMTYPE_MASK                   0x07
+
+
+/* BFBCR */
+#define BFBDFE_MASK                      0x01
+#define BFBSFE_MASK                      0x01
+#define SFVS_MASK                        0x01
+#define FLVIC_MASK                       0x0F
+#define SLVIC_MASK                       0x0F
+
+
+/* CONTEXTIDR */
+#define CONTEXTIDR_ASID_MASK             0xFF
+#define PROCID_MASK                      0x00FFFFFF
+
+
+/* FSR */
+#define TF_MASK                          0x01
+#define AFF_MASK                         0x01
+#define APF_MASK                         0x01
+#define TLBMF_MASK                       0x01
+#define HTWDEEF_MASK                     0x01
+#define HTWSEEF_MASK                     0x01
+#define MHF_MASK                         0x01
+#define SL_MASK                          0x01
+#define SS_MASK                          0x01
+#define MULTI_MASK                       0x01
+
+
+/* FSYNR0 */
+#define AMID_MASK                        0xFF
+#define APID_MASK                        0x1F
+#define ABID_MASK                        0x07
+#define ATID_MASK                        0xFF
+
+
+/* FSYNR1 */
+#define AMEMTYPE_MASK                    0x07
+#define ASHARED_MASK                     0x01
+#define AINNERSHARED_MASK                0x01
+#define APRIV_MASK                       0x01
+#define APROTNS_MASK                     0x01
+#define AINST_MASK                       0x01
+#define AWRITE_MASK                      0x01
+#define ABURST_MASK                      0x01
+#define ALEN_MASK                        0x0F
+#define FSYNR1_ASIZE_MASK                0x07
+#define ALOCK_MASK                       0x03
+#define AFULL_MASK                       0x01
+
+
+/* NMRR */
+#define ICPC0_MASK                       0x03
+#define ICPC1_MASK                       0x03
+#define ICPC2_MASK                       0x03
+#define ICPC3_MASK                       0x03
+#define ICPC4_MASK                       0x03
+#define ICPC5_MASK                       0x03
+#define ICPC6_MASK                       0x03
+#define ICPC7_MASK                       0x03
+#define OCPC0_MASK                       0x03
+#define OCPC1_MASK                       0x03
+#define OCPC2_MASK                       0x03
+#define OCPC3_MASK                       0x03
+#define OCPC4_MASK                       0x03
+#define OCPC5_MASK                       0x03
+#define OCPC6_MASK                       0x03
+#define OCPC7_MASK                       0x03
+
+
+/* PAR */
+#define FAULT_MASK                       0x01
+/* If a fault is present, these are the
+same as the fault fields in the FAR */
+#define FAULT_TF_MASK                    0x01
+#define FAULT_AFF_MASK                   0x01
+#define FAULT_APF_MASK                   0x01
+#define FAULT_TLBMF_MASK                 0x01
+#define FAULT_HTWDEEF_MASK               0x01
+#define FAULT_HTWSEEF_MASK               0x01
+#define FAULT_MHF_MASK                   0x01
+#define FAULT_SL_MASK                    0x01
+#define FAULT_SS_MASK                    0x01
+
+/* If NO fault is present, the following
+ * fields are in effect
+ * (FAULT remains as before) */
+#define PAR_NOFAULT_SS_MASK              0x01
+#define PAR_NOFAULT_MT_MASK              0x07
+#define PAR_NOFAULT_SH_MASK              0x01
+#define PAR_NOFAULT_NS_MASK              0x01
+#define PAR_NOFAULT_NOS_MASK             0x01
+#define PAR_NPFAULT_PA_MASK              0x000FFFFF
+
+
+/* PRRR */
+#define MTC0_MASK                        0x03
+#define MTC1_MASK                        0x03
+#define MTC2_MASK                        0x03
+#define MTC3_MASK                        0x03
+#define MTC4_MASK                        0x03
+#define MTC5_MASK                        0x03
+#define MTC6_MASK                        0x03
+#define MTC7_MASK                        0x03
+#define SHDSH0_MASK                      0x01
+#define SHDSH1_MASK                      0x01
+#define SHNMSH0_MASK                     0x01
+#define SHNMSH1_MASK                     0x01
+#define NOS0_MASK                        0x01
+#define NOS1_MASK                        0x01
+#define NOS2_MASK                        0x01
+#define NOS3_MASK                        0x01
+#define NOS4_MASK                        0x01
+#define NOS5_MASK                        0x01
+#define NOS6_MASK                        0x01
+#define NOS7_MASK                        0x01
+
+
+/* RESUME */
+#define TNR_MASK                         0x01
+
+
+/* SCTLR */
+#define M_MASK                           0x01
+#define TRE_MASK                         0x01
+#define AFE_MASK                         0x01
+#define HAF_MASK                         0x01
+#define BE_MASK                          0x01
+#define AFFD_MASK                        0x01
+
+
+/* TLBIASID */
+#define TLBIASID_ASID_MASK               0xFF
+
+
+/* TLBIVA */
+#define TLBIVA_ASID_MASK                 0xFF
+#define TLBIVA_VA_MASK                   0x000FFFFF
+
+
+/* TLBIVAA */
+#define TLBIVAA_VA_MASK                  0x000FFFFF
+
+
+/* TLBLCKR */
+#define LKE_MASK                         0x01
+#define TLBLCKR_TLBIALLCFG_MASK          0x01
+#define TLBIASIDCFG_MASK                 0x01
+#define TLBIVAACFG_MASK                  0x01
+#define FLOOR_MASK                       0xFF
+#define VICTIM_MASK                      0xFF
+
+
+/* TTBCR */
+#define N_MASK                           0x07
+#define PD0_MASK                         0x01
+#define PD1_MASK                         0x01
+
+
+/* TTBR0 */
+#define TTBR0_IRGNH_MASK                 0x01
+#define TTBR0_SH_MASK                    0x01
+#define TTBR0_ORGN_MASK                  0x03
+#define TTBR0_NOS_MASK                   0x01
+#define TTBR0_IRGNL_MASK                 0x01
+#define TTBR0_PA_MASK                    0x0003FFFF
+
+
+/* TTBR1 */
+#define TTBR1_IRGNH_MASK                 0x01
+#define TTBR1_SH_MASK                    0x01
+#define TTBR1_ORGN_MASK                  0x03
+#define TTBR1_NOS_MASK                   0x01
+#define TTBR1_IRGNL_MASK                 0x01
+#define TTBR1_PA_MASK                    0x0003FFFF
+
+
+/* V2PSR */
+#define HIT_MASK                         0x01
+#define INDEX_MASK                       0xFF
+
+
+/* V2Pxx */
+#define V2Pxx_INDEX_MASK                 0xFF
+#define V2Pxx_VA_MASK                    0x000FFFFF
+
+
+/* Context Register Shifts */
+/* ACTLR */
+#define CFERE_SHIFT                    0
+#define CFEIE_SHIFT                    1
+#define PTSHCFG_SHIFT                  2
+#define RCOSH_SHIFT                    4
+#define RCISH_SHIFT                    5
+#define RCNSH_SHIFT                    6
+#define PRIVCFG_SHIFT                  8
+#define DNA_SHIFT                      10
+#define DNLV2PA_SHIFT                  11
+#define TLBMCFG_SHIFT                  12
+#define CFCFG_SHIFT                    14
+#define TIPCF_SHIFT                    15
+#define V2PCFG_SHIFT                   16
+#define HUME_SHIFT                     18
+#define PTMTCFG_SHIFT                  20
+#define PTMEMTYPE_SHIFT                21
+
+
+/* BFBCR */
+#define BFBDFE_SHIFT                   0
+#define BFBSFE_SHIFT                   1
+#define SFVS_SHIFT                     2
+#define FLVIC_SHIFT                    4
+#define SLVIC_SHIFT                    8
+
+
+/* CONTEXTIDR */
+#define CONTEXTIDR_ASID_SHIFT          0
+#define PROCID_SHIFT                   8
+
+
+/* FSR */
+#define TF_SHIFT                       1
+#define AFF_SHIFT                      2
+#define APF_SHIFT                      3
+#define TLBMF_SHIFT                    4
+#define HTWDEEF_SHIFT                  5
+#define HTWSEEF_SHIFT                  6
+#define MHF_SHIFT                      7
+#define SL_SHIFT                       16
+#define SS_SHIFT                       30
+#define MULTI_SHIFT                    31
+
+
+/* FSYNR0 */
+#define AMID_SHIFT                     0
+#define APID_SHIFT                     8
+#define ABID_SHIFT                     13
+#define ATID_SHIFT                     24
+
+
+/* FSYNR1 */
+#define AMEMTYPE_SHIFT                 0
+#define ASHARED_SHIFT                  3
+#define AINNERSHARED_SHIFT             4
+#define APRIV_SHIFT                    5
+#define APROTNS_SHIFT                  6
+#define AINST_SHIFT                    7
+#define AWRITE_SHIFT                   8
+#define ABURST_SHIFT                   10
+#define ALEN_SHIFT                     12
+#define FSYNR1_ASIZE_SHIFT             16
+#define ALOCK_SHIFT                    20
+#define AFULL_SHIFT                    24
+
+
+/* NMRR */
+#define ICPC0_SHIFT                    0
+#define ICPC1_SHIFT                    2
+#define ICPC2_SHIFT                    4
+#define ICPC3_SHIFT                    6
+#define ICPC4_SHIFT                    8
+#define ICPC5_SHIFT                    10
+#define ICPC6_SHIFT                    12
+#define ICPC7_SHIFT                    14
+#define OCPC0_SHIFT                    16
+#define OCPC1_SHIFT                    18
+#define OCPC2_SHIFT                    20
+#define OCPC3_SHIFT                    22
+#define OCPC4_SHIFT                    24
+#define OCPC5_SHIFT                    26
+#define OCPC6_SHIFT                    28
+#define OCPC7_SHIFT                    30
+
+
+/* PAR */
+#define FAULT_SHIFT                    0
+/* If a fault is present, these are the
+same as the fault fields in the FAR */
+#define FAULT_TF_SHIFT                 1
+#define FAULT_AFF_SHIFT                2
+#define FAULT_APF_SHIFT                3
+#define FAULT_TLBMF_SHIFT              4
+#define FAULT_HTWDEEF_SHIFT            5
+#define FAULT_HTWSEEF_SHIFT            6
+#define FAULT_MHF_SHIFT                7
+#define FAULT_SL_SHIFT                 16
+#define FAULT_SS_SHIFT                 30
+
+/* If NO fault is present, the following
+ * fields are in effect
+ * (FAULT remains as before) */
+#define PAR_NOFAULT_SS_SHIFT           1
+#define PAR_NOFAULT_MT_SHIFT           4
+#define PAR_NOFAULT_SH_SHIFT           7
+#define PAR_NOFAULT_NS_SHIFT           9
+#define PAR_NOFAULT_NOS_SHIFT          10
+#define PAR_NPFAULT_PA_SHIFT           12
+
+
+/* PRRR */
+#define MTC0_SHIFT                     0
+#define MTC1_SHIFT                     2
+#define MTC2_SHIFT                     4
+#define MTC3_SHIFT                     6
+#define MTC4_SHIFT                     8
+#define MTC5_SHIFT                     10
+#define MTC6_SHIFT                     12
+#define MTC7_SHIFT                     14
+#define SHDSH0_SHIFT                   16
+#define SHDSH1_SHIFT                   17
+#define SHNMSH0_SHIFT                  18
+#define SHNMSH1_SHIFT                  19
+#define NOS0_SHIFT                     24
+#define NOS1_SHIFT                     25
+#define NOS2_SHIFT                     26
+#define NOS3_SHIFT                     27
+#define NOS4_SHIFT                     28
+#define NOS5_SHIFT                     29
+#define NOS6_SHIFT                     30
+#define NOS7_SHIFT                     31
+
+
+/* RESUME */
+#define TNR_SHIFT                      0
+
+
+/* SCTLR */
+#define M_SHIFT                        0
+#define TRE_SHIFT                      1
+#define AFE_SHIFT                      2
+#define HAF_SHIFT                      3
+#define BE_SHIFT                       4
+#define AFFD_SHIFT                     5
+
+
+/* TLBIASID */
+#define TLBIASID_ASID_SHIFT            0
+
+
+/* TLBIVA */
+#define TLBIVA_ASID_SHIFT              0
+#define TLBIVA_VA_SHIFT                12
+
+
+/* TLBIVAA */
+#define TLBIVAA_VA_SHIFT               12
+
+
+/* TLBLCKR */
+#define LKE_SHIFT                      0
+#define TLBLCKR_TLBIALLCFG_SHIFT       1
+#define TLBIASIDCFG_SHIFT              2
+#define TLBIVAACFG_SHIFT               3
+#define FLOOR_SHIFT                    8
+#define VICTIM_SHIFT                   8
+
+
+/* TTBCR */
+#define N_SHIFT                        3
+#define PD0_SHIFT                      4
+#define PD1_SHIFT                      5
+
+
+/* TTBR0 */
+#define TTBR0_IRGNH_SHIFT              0
+#define TTBR0_SH_SHIFT                 1
+#define TTBR0_ORGN_SHIFT               3
+#define TTBR0_NOS_SHIFT                5
+#define TTBR0_IRGNL_SHIFT              6
+#define TTBR0_PA_SHIFT                 14
+
+
+/* TTBR1 */
+#define TTBR1_IRGNH_SHIFT              0
+#define TTBR1_SH_SHIFT                 1
+#define TTBR1_ORGN_SHIFT               3
+#define TTBR1_NOS_SHIFT                5
+#define TTBR1_IRGNL_SHIFT              6
+#define TTBR1_PA_SHIFT                 14
+
+
+/* V2PSR */
+#define HIT_SHIFT                      0
+#define INDEX_SHIFT                    8
+
+
+/* V2Pxx */
+#define V2Pxx_INDEX_SHIFT              0
+#define V2Pxx_VA_SHIFT                 12
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/iommu_hw-v1.h b/arch/arm/mach-msm/include/mach/iommu_hw-v1.h
new file mode 100644
index 0000000..554f7e0
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/iommu_hw-v1.h
@@ -0,0 +1,2122 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_IOMMU_HW_V2_H
+#define __ARCH_ARM_MACH_MSM_IOMMU_HW_V2_H
+
+#define CTX_SHIFT  12
+#define CTX_OFFSET 0x8000
+
+#define GET_GLOBAL_REG(reg, base) (readl_relaxed((base) + (reg)))
+#define GET_CTX_REG(reg, base, ctx) \
+	(readl_relaxed((base) + CTX_OFFSET + (reg) + ((ctx) << CTX_SHIFT)))
+
+#define SET_GLOBAL_REG(reg, base, val)	writel_relaxed((val), ((base) + (reg)))
+
+#define SET_CTX_REG(reg, base, ctx, val) \
+	writel_relaxed((val), \
+		((base) + CTX_OFFSET + (reg) + ((ctx) << CTX_SHIFT)))
+
+/* Wrappers for numbered registers */
+#define SET_GLOBAL_REG_N(b, n, r, v) SET_GLOBAL_REG((b), ((r) + (n << 2)), (v))
+#define GET_GLOBAL_REG_N(b, n, r)    GET_GLOBAL_REG((b), ((r) + (n << 2)))
+
+/* Field wrappers */
+#define GET_GLOBAL_FIELD(b, r, F) \
+	GET_FIELD(((b) + (r)), r##_##F##_MASK, r##_##F##_SHIFT)
+#define GET_CONTEXT_FIELD(b, c, r, F) \
+	GET_FIELD(((b) + CTX_OFFSET + (r) + ((c) << CTX_SHIFT)), \
+			r##_##F##_MASK, r##_##F##_SHIFT)
+
+#define SET_GLOBAL_FIELD(b, r, F, v) \
+	SET_FIELD(((b) + (r)), r##_##F##_MASK, r##_##F##_SHIFT, (v))
+#define SET_CONTEXT_FIELD(b, c, r, F, v) \
+	SET_FIELD(((b) + CTX_OFFSET + (r) + ((c) << CTX_SHIFT)), \
+			r##_##F##_MASK, r##_##F##_SHIFT, (v))
+
+/* Wrappers for numbered field registers */
+#define SET_GLOBAL_FIELD_N(b, n, r, F, v) \
+	SET_FIELD(((b) + ((n) << 2) + (r)), r##_##F##_MASK, r##_##F##_SHIFT, v)
+#define GET_GLOBAL_FIELD_N(b, n, r, F) \
+	GET_FIELD(((b) + ((n) << 2) + (r)), r##_##F##_MASK, r##_##F##_SHIFT)
+
+#define GET_FIELD(addr, mask, shift) ((readl_relaxed(addr) >> (shift)) & (mask))
+
+#define SET_FIELD(addr, mask, shift, v) \
+do { \
+	int t = readl_relaxed(addr); \
+	writel_relaxed((t & ~((mask) << (shift))) + (((v) & \
+			(mask)) << (shift)), addr); \
+} while (0)
+
+
+/* Global register space 0 setters / getters */
+#define SET_CR0(b, v)            SET_GLOBAL_REG(CR0, (b), (v))
+#define SET_SCR1(b, v)           SET_GLOBAL_REG(SCR1, (b), (v))
+#define SET_CR2(b, v)            SET_GLOBAL_REG(CR2, (b), (v))
+#define SET_ACR(b, v)            SET_GLOBAL_REG(ACR, (b), (v))
+#define SET_IDR0(b, N, v)        SET_GLOBAL_REG(IDR0, (b), (v))
+#define SET_IDR1(b, N, v)        SET_GLOBAL_REG(IDR1, (b), (v))
+#define SET_IDR2(b, N, v)        SET_GLOBAL_REG(IDR2, (b), (v))
+#define SET_IDR7(b, N, v)        SET_GLOBAL_REG(IDR7, (b), (v))
+#define SET_GFAR(b, v)           SET_GLOBAL_REG(GFAR, (b), (v))
+#define SET_GFSR(b, v)           SET_GLOBAL_REG(GFSR, (b), (v))
+#define SET_GFSRRESTORE(b, v)    SET_GLOBAL_REG(GFSRRESTORE, (b), (v))
+#define SET_GFSYNR0(b, v)        SET_GLOBAL_REG(GFSYNR0, (b), (v))
+#define SET_GFSYNR1(b, v)        SET_GLOBAL_REG(GFSYNR1, (b), (v))
+#define SET_GFSYNR2(b, v)        SET_GLOBAL_REG(GFSYNR2, (b), (v))
+#define SET_TLBIVMID(b, v)       SET_GLOBAL_REG(TLBIVMID, (b), (v))
+#define SET_TLBIALLNSNH(b, v)    SET_GLOBAL_REG(TLBIALLNSNH, (b), (v))
+#define SET_TLBIALLH(b, v)       SET_GLOBAL_REG(TLBIALLH, (b), (v))
+#define SET_TLBGSYNC(b, v)       SET_GLOBAL_REG(TLBGSYNC, (b), (v))
+#define SET_TLBGSTATUS(b, v)     SET_GLOBAL_REG(TLBSTATUS, (b), (v))
+#define SET_TLBIVAH(b, v)        SET_GLOBAL_REG(TLBIVAH, (b), (v))
+#define SET_GATS1UR(b, v)        SET_GLOBAL_REG(GATS1UR, (b), (v))
+#define SET_GATS1UW(b, v)        SET_GLOBAL_REG(GATS1UW, (b), (v))
+#define SET_GATS1PR(b, v)        SET_GLOBAL_REG(GATS1PR, (b), (v))
+#define SET_GATS1PW(b, v)        SET_GLOBAL_REG(GATS1PW, (b), (v))
+#define SET_GATS12UR(b, v)       SET_GLOBAL_REG(GATS12UR, (b), (v))
+#define SET_GATS12UW(b, v)       SET_GLOBAL_REG(GATS12UW, (b), (v))
+#define SET_GATS12PR(b, v)       SET_GLOBAL_REG(GATS12PR, (b), (v))
+#define SET_GATS12PW(b, v)       SET_GLOBAL_REG(GATS12PW, (b), (v))
+#define SET_GPAR(b, v)           SET_GLOBAL_REG(GPAR, (b), (v))
+#define SET_GATSR(b, v)          SET_GLOBAL_REG(GATSR, (b), (v))
+#define SET_NSCR0(b, v)          SET_GLOBAL_REG(NSCR0, (b), (v))
+#define SET_NSCR2(b, v)          SET_GLOBAL_REG(NSCR2, (b), (v))
+#define SET_NSACR(b, v)          SET_GLOBAL_REG(NSACR, (b), (v))
+#define SET_PMCR(b, v)           SET_GLOBAL_REG(PMCR, (b), (v))
+#define SET_SMR_N(b, N, v)       SET_GLOBAL_REG_N(SMR, N, (b), (v))
+#define SET_S2CR_N(b, N, v)      SET_GLOBAL_REG_N(S2CR, N, (b), (v))
+
+#define GET_CR0(b)               GET_GLOBAL_REG(CR0, (b))
+#define GET_SCR1(b)              GET_GLOBAL_REG(SCR1, (b))
+#define GET_CR2(b)               GET_GLOBAL_REG(CR2, (b))
+#define GET_ACR(b)               GET_GLOBAL_REG(ACR, (b))
+#define GET_IDR0(b, N)           GET_GLOBAL_REG(IDR0, (b))
+#define GET_IDR1(b, N)           GET_GLOBAL_REG(IDR1, (b))
+#define GET_IDR2(b, N)           GET_GLOBAL_REG(IDR2, (b))
+#define GET_IDR7(b, N)           GET_GLOBAL_REG(IDR7, (b))
+#define GET_GFAR(b)              GET_GLOBAL_REG(GFAR, (b))
+#define GET_GFSR(b)              GET_GLOBAL_REG(GFSR, (b))
+#define GET_GFSRRESTORE(b)       GET_GLOBAL_REG(GFSRRESTORE, (b))
+#define GET_GFSYNR0(b)           GET_GLOBAL_REG(GFSYNR0, (b))
+#define GET_GFSYNR1(b)           GET_GLOBAL_REG(GFSYNR1, (b))
+#define GET_GFSYNR2(b)           GET_GLOBAL_REG(GFSYNR2, (b))
+#define GET_TLBIVMID(b)          GET_GLOBAL_REG(TLBIVMID, (b))
+#define GET_TLBIALLNSNH(b)       GET_GLOBAL_REG(TLBIALLNSNH, (b))
+#define GET_TLBIALLH(b)          GET_GLOBAL_REG(TLBIALLH, (b))
+#define GET_TLBGSYNC(b)          GET_GLOBAL_REG(TLBGSYNC, (b))
+#define GET_TLBGSTATUS(b)        GET_GLOBAL_REG(TLBSTATUS, (b))
+#define GET_TLBIVAH(b)           GET_GLOBAL_REG(TLBIVAH, (b))
+#define GET_GATS1UR(b)           GET_GLOBAL_REG(GATS1UR, (b))
+#define GET_GATS1UW(b)           GET_GLOBAL_REG(GATS1UW, (b))
+#define GET_GATS1PR(b)           GET_GLOBAL_REG(GATS1PR, (b))
+#define GET_GATS1PW(b)           GET_GLOBAL_REG(GATS1PW, (b))
+#define GET_GATS12UR(b)          GET_GLOBAL_REG(GATS12UR, (b))
+#define GET_GATS12UW(b)          GET_GLOBAL_REG(GATS12UW, (b))
+#define GET_GATS12PR(b)          GET_GLOBAL_REG(GATS12PR, (b))
+#define GET_GATS12PW(b)          GET_GLOBAL_REG(GATS12PW, (b))
+#define GET_GPAR(b)              GET_GLOBAL_REG(GPAR, (b))
+#define GET_GATSR(b)             GET_GLOBAL_REG(GATSR, (b))
+#define GET_NSCR0(b)             GET_GLOBAL_REG(NSCR0, (b))
+#define GET_NSCR2(b)             GET_GLOBAL_REG(NSCR2, (b))
+#define GET_NSACR(b)             GET_GLOBAL_REG(NSACR, (b))
+#define GET_PMCR(b, v)           GET_GLOBAL_REG(PMCR, (b))
+#define GET_SMR_N(b, N)          GET_GLOBAL_REG_N(SMR, N, (b))
+#define GET_S2CR_N(b, N)         GET_GLOBAL_REG_N(S2CR, N, (b))
+
+/* Global register space 1 setters / getters */
+#define SET_CBAR_N(b, N, v)      SET_GLOBAL_REG_N(CBAR, N, (b), (v))
+#define SET_CBFRSYNRA_N(b, N, v) SET_GLOBAL_REG_N(CBFRSYNRA, N, (b), (v))
+
+#define GET_CBAR_N(b, N)         GET_GLOBAL_REG_N(CBAR, N, (b))
+#define GET_CBFRSYNRA_N(b, N)    GET_GLOBAL_REG_N(CBFRSYNRA, N, (b))
+
+/* Implementation defined register setters/getters */
+#define SET_MICRO_MMU_CTRL_HALT_REQ(b, v) \
+				SET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, HALT_REQ, v)
+#define GET_MICRO_MMU_CTRL_IDLE(b) \
+				GET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, IDLE)
+#define SET_PREDICTIONDIS0(b, v) SET_GLOBAL_REG(PREDICTIONDIS0, (b), (v))
+#define SET_PREDICTIONDIS1(b, v) SET_GLOBAL_REG(PREDICTIONDIS1, (b), (v))
+#define SET_S1L1BFBLP0(b, v)     SET_GLOBAL_REG(S1L1BFBLP0, (b), (v))
+
+/* SSD register setters/getters */
+#define SET_SSDR_N(b, N, v)      SET_GLOBAL_REG_N(SSDR_N, N, (b), (v))
+
+#define GET_SSDR_N(b, N)         GET_GLOBAL_REG_N(SSDR_N, N, (b))
+
+/* Context bank register setters/getters */
+#define SET_SCTLR(b, c, v)       SET_CTX_REG(CB_SCTLR, (b), (c), (v))
+#define SET_ACTLR(b, c, v)       SET_CTX_REG(CB_ACTLR, (b), (c), (v))
+#define SET_RESUME(b, c, v)      SET_CTX_REG(CB_RESUME, (b), (c), (v))
+#define SET_TTBR0(b, c, v)       SET_CTX_REG(CB_TTBR0, (b), (c), (v))
+#define SET_TTBR1(b, c, v)       SET_CTX_REG(CB_TTBR1, (b), (c), (v))
+#define SET_TTBCR(b, c, v)       SET_CTX_REG(CB_TTBCR, (b), (c), (v))
+#define SET_CONTEXTIDR(b, c, v)  SET_CTX_REG(CB_CONTEXTIDR, (b), (c), (v))
+#define SET_PRRR(b, c, v)        SET_CTX_REG(CB_PRRR, (b), (c), (v))
+#define SET_NMRR(b, c, v)        SET_CTX_REG(CB_NMRR, (b), (c), (v))
+#define SET_PAR(b, c, v)         SET_CTX_REG(CB_PAR, (b), (c), (v))
+#define SET_FSR(b, c, v)         SET_CTX_REG(CB_FSR, (b), (c), (v))
+#define SET_FSRRESTORE(b, c, v)  SET_CTX_REG(CB_FSRRESTORE, (b), (c), (v))
+#define SET_FAR(b, c, v)         SET_CTX_REG(CB_FAR, (b), (c), (v))
+#define SET_FSYNR0(b, c, v)      SET_CTX_REG(CB_FSYNR0, (b), (c), (v))
+#define SET_FSYNR1(b, c, v)      SET_CTX_REG(CB_FSYNR1, (b), (c), (v))
+#define SET_TLBIVA(b, c, v)      SET_CTX_REG(CB_TLBIVA, (b), (c), (v))
+#define SET_TLBIVAA(b, c, v)     SET_CTX_REG(CB_TLBIVAA, (b), (c), (v))
+#define SET_TLBIASID(b, c, v)    SET_CTX_REG(CB_TLBIASID, (b), (c), (v))
+#define SET_TLBIALL(b, c, v)     SET_CTX_REG(CB_TLBIALL, (b), (c), (v))
+#define SET_TLBIVAL(b, c, v)     SET_CTX_REG(CB_TLBIVAL, (b), (c), (v))
+#define SET_TLBIVAAL(b, c, v)    SET_CTX_REG(CB_TLBIVAAL, (b), (c), (v))
+#define SET_TLBSYNC(b, c, v)     SET_CTX_REG(CB_TLBSYNC, (b), (c), (v))
+#define SET_TLBSTATUS(b, c, v)   SET_CTX_REG(CB_TLBSTATUS, (b), (c), (v))
+#define SET_ATS1PR(b, c, v)      SET_CTX_REG(CB_ATS1PR, (b), (c), (v))
+#define SET_ATS1PW(b, c, v)      SET_CTX_REG(CB_ATS1PW, (b), (c), (v))
+#define SET_ATS1UR(b, c, v)      SET_CTX_REG(CB_ATS1UR, (b), (c), (v))
+#define SET_ATS1UW(b, c, v)      SET_CTX_REG(CB_ATS1UW, (b), (c), (v))
+#define SET_ATSR(b, c, v)        SET_CTX_REG(CB_ATSR, (b), (c), (v))
+
+#define GET_SCTLR(b, c)          GET_CTX_REG(CB_SCTLR, (b), (c))
+#define GET_ACTLR(b, c)          GET_CTX_REG(CB_ACTLR, (b), (c))
+#define GET_RESUME(b, c)         GET_CTX_REG(CB_RESUME, (b), (c))
+#define GET_TTBR0(b, c)          GET_CTX_REG(CB_TTBR0, (b), (c))
+#define GET_TTBR1(b, c)          GET_CTX_REG(CB_TTBR1, (b), (c))
+#define GET_TTBCR(b, c)          GET_CTX_REG(CB_TTBCR, (b), (c))
+#define GET_CONTEXTIDR(b, c)     GET_CTX_REG(CB_CONTEXTIDR, (b), (c))
+#define GET_PRRR(b, c)           GET_CTX_REG(CB_PRRR, (b), (c))
+#define GET_NMRR(b, c)           GET_CTX_REG(CB_NMRR, (b), (c))
+#define GET_PAR(b, c)            GET_CTX_REG(CB_PAR, (b), (c))
+#define GET_FSR(b, c)            GET_CTX_REG(CB_FSR, (b), (c))
+#define GET_FSRRESTORE(b, c)     GET_CTX_REG(CB_FSRRESTORE, (b), (c))
+#define GET_FAR(b, c)            GET_CTX_REG(CB_FAR, (b), (c))
+#define GET_FSYNR0(b, c)         GET_CTX_REG(CB_FSYNR0, (b), (c))
+#define GET_FSYNR1(b, c)         GET_CTX_REG(CB_FSYNR1, (b), (c))
+#define GET_TLBIVA(b, c)         GET_CTX_REG(CB_TLBIVA, (b), (c))
+#define GET_TLBIVAA(b, c)        GET_CTX_REG(CB_TLBIVAA, (b), (c))
+#define GET_TLBIASID(b, c)       GET_CTX_REG(CB_TLBIASID, (b), (c))
+#define GET_TLBIALL(b, c)        GET_CTX_REG(CB_TLBIALL, (b), (c))
+#define GET_TLBIVAL(b, c)        GET_CTX_REG(CB_TLBIVAL, (b), (c))
+#define GET_TLBIVAAL(b, c)       GET_CTX_REG(CB_TLBIVAAL, (b), (c))
+#define GET_TLBSYNC(b, c)        GET_CTX_REG(CB_TLBSYNC, (b), (c))
+#define GET_TLBSTATUS(b, c)      GET_CTX_REG(CB_TLBSTATUS, (b), (c))
+#define GET_ATS1PR(b, c)         GET_CTX_REG(CB_ATS1PR, (b), (c))
+#define GET_ATS1PW(b, c)         GET_CTX_REG(CB_ATS1PW, (b), (c))
+#define GET_ATS1UR(b, c)         GET_CTX_REG(CB_ATS1UR, (b), (c))
+#define GET_ATS1UW(b, c)         GET_CTX_REG(CB_ATS1UW, (b), (c))
+#define GET_ATSR(b, c)           GET_CTX_REG(CB_ATSR, (b), (c))
+
+/* Global Register field setters / getters */
+/* Configuration Register: CR0 */
+#define SET_CR0_NSCFG(b, v)        SET_GLOBAL_FIELD(b, CR0, NSCFG, v)
+#define SET_CR0_WACFG(b, v)        SET_GLOBAL_FIELD(b, CR0, WACFG, v)
+#define SET_CR0_RACFG(b, v)        SET_GLOBAL_FIELD(b, CR0, RACFG, v)
+#define SET_CR0_SHCFG(b, v)        SET_GLOBAL_FIELD(b, CR0, SHCFG, v)
+#define SET_CR0_SMCFCFG(b, v)      SET_GLOBAL_FIELD(b, CR0, SMCFCFG, v)
+#define SET_CR0_MTCFG(b, v)        SET_GLOBAL_FIELD(b, CR0, MTCFG, v)
+#define SET_CR0_BSU(b, v)          SET_GLOBAL_FIELD(b, CR0, BSU, v)
+#define SET_CR0_FB(b, v)           SET_GLOBAL_FIELD(b, CR0, FB, v)
+#define SET_CR0_PTM(b, v)          SET_GLOBAL_FIELD(b, CR0, PTM, v)
+#define SET_CR0_VMIDPNE(b, v)      SET_GLOBAL_FIELD(b, CR0, VMIDPNE, v)
+#define SET_CR0_USFCFG(b, v)       SET_GLOBAL_FIELD(b, CR0, USFCFG, v)
+#define SET_CR0_GSE(b, v)          SET_GLOBAL_FIELD(b, CR0, GSE, v)
+#define SET_CR0_STALLD(b, v)       SET_GLOBAL_FIELD(b, CR0, STALLD, v)
+#define SET_CR0_TRANSIENTCFG(b, v) SET_GLOBAL_FIELD(b, CR0, TRANSIENTCFG, v)
+#define SET_CR0_GCFGFIE(b, v)      SET_GLOBAL_FIELD(b, CR0, GCFGFIE, v)
+#define SET_CR0_GCFGFRE(b, v)      SET_GLOBAL_FIELD(b, CR0, GCFGFRE, v)
+#define SET_CR0_GFIE(b, v)         SET_GLOBAL_FIELD(b, CR0, GFIE, v)
+#define SET_CR0_GFRE(b, v)         SET_GLOBAL_FIELD(b, CR0, GFRE, v)
+#define SET_CR0_CLIENTPD(b, v)     SET_GLOBAL_FIELD(b, CR0, CLIENTPD, v)
+
+#define GET_CR0_NSCFG(b)           GET_GLOBAL_FIELD(b, CR0, NSCFG)
+#define GET_CR0_WACFG(b)           GET_GLOBAL_FIELD(b, CR0, WACFG)
+#define GET_CR0_RACFG(b)           GET_GLOBAL_FIELD(b, CR0, RACFG)
+#define GET_CR0_SHCFG(b)           GET_GLOBAL_FIELD(b, CR0, SHCFG)
+#define GET_CR0_SMCFCFG(b)         GET_GLOBAL_FIELD(b, CR0, SMCFCFG)
+#define GET_CR0_MTCFG(b)           GET_GLOBAL_FIELD(b, CR0, MTCFG)
+#define GET_CR0_BSU(b)             GET_GLOBAL_FIELD(b, CR0, BSU)
+#define GET_CR0_FB(b)              GET_GLOBAL_FIELD(b, CR0, FB)
+#define GET_CR0_PTM(b)             GET_GLOBAL_FIELD(b, CR0, PTM)
+#define GET_CR0_VMIDPNE(b)         GET_GLOBAL_FIELD(b, CR0, VMIDPNE)
+#define GET_CR0_USFCFG(b)          GET_GLOBAL_FIELD(b, CR0, USFCFG)
+#define GET_CR0_GSE(b)             GET_GLOBAL_FIELD(b, CR0, GSE)
+#define GET_CR0_STALLD(b)          GET_GLOBAL_FIELD(b, CR0, STALLD)
+#define GET_CR0_TRANSIENTCFG(b)    GET_GLOBAL_FIELD(b, CR0, TRANSIENTCFG)
+#define GET_CR0_GCFGFIE(b)         GET_GLOBAL_FIELD(b, CR0, GCFGFIE)
+#define GET_CR0_GCFGFRE(b)         GET_GLOBAL_FIELD(b, CR0, GCFGFRE)
+#define GET_CR0_GFIE(b)            GET_GLOBAL_FIELD(b, CR0, GFIE)
+#define GET_CR0_GFRE(b)            GET_GLOBAL_FIELD(b, CR0, GFRE)
+#define GET_CR0_CLIENTPD(b)        GET_GLOBAL_FIELD(b, CR0, CLIENTPD)
+
+/* Configuration Register: CR2 */
+#define SET_CR2_BPVMID(b, v)     SET_GLOBAL_FIELD(b, CR2, BPVMID, v)
+
+#define GET_CR2_BPVMID(b)        GET_GLOBAL_FIELD(b, CR2, BPVMID)
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define SET_GATS1PR_ADDR(b, v)   SET_GLOBAL_FIELD(b, GATS1PR, ADDR, v)
+#define SET_GATS1PR_NDX(b, v)    SET_GLOBAL_FIELD(b, GATS1PR, NDX, v)
+
+#define GET_GATS1PR_ADDR(b)      GET_GLOBAL_FIELD(b, GATS1PR, ADDR)
+#define GET_GATS1PR_NDX(b)       GET_GLOBAL_FIELD(b, GATS1PR, NDX)
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define SET_GATS1PW_ADDR(b, v)   SET_GLOBAL_FIELD(b, GATS1PW, ADDR, v)
+#define SET_GATS1PW_NDX(b, v)    SET_GLOBAL_FIELD(b, GATS1PW, NDX, v)
+
+#define GET_GATS1PW_ADDR(b)      GET_GLOBAL_FIELD(b, GATS1PW, ADDR)
+#define GET_GATS1PW_NDX(b)       GET_GLOBAL_FIELD(b, GATS1PW, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define SET_GATS1UR_ADDR(b, v)   SET_GLOBAL_FIELD(b, GATS1UR, ADDR, v)
+#define SET_GATS1UR_NDX(b, v)    SET_GLOBAL_FIELD(b, GATS1UR, NDX, v)
+
+#define GET_GATS1UR_ADDR(b)      GET_GLOBAL_FIELD(b, GATS1UR, ADDR)
+#define GET_GATS1UR_NDX(b)       GET_GLOBAL_FIELD(b, GATS1UR, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UW */
+#define SET_GATS1UW_ADDR(b, v)   SET_GLOBAL_FIELD(b, GATS1UW, ADDR, v)
+#define SET_GATS1UW_NDX(b, v)    SET_GLOBAL_FIELD(b, GATS1UW, NDX, v)
+
+#define GET_GATS1UW_ADDR(b)      GET_GLOBAL_FIELD(b, GATS1UW, ADDR)
+#define GET_GATS1UW_NDX(b)       GET_GLOBAL_FIELD(b, GATS1UW, NDX)
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS12PR */
+#define SET_GATS12PR_ADDR(b, v)  SET_GLOBAL_FIELD(b, GATS12PR, ADDR, v)
+#define SET_GATS12PR_NDX(b, v)   SET_GLOBAL_FIELD(b, GATS12PR, NDX, v)
+
+#define GET_GATS12PR_ADDR(b)     GET_GLOBAL_FIELD(b, GATS12PR, ADDR)
+#define GET_GATS12PR_NDX(b)      GET_GLOBAL_FIELD(b, GATS12PR, NDX)
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define SET_GATS12PW_ADDR(b, v)  SET_GLOBAL_FIELD(b, GATS12PW, ADDR, v)
+#define SET_GATS12PW_NDX(b, v)   SET_GLOBAL_FIELD(b, GATS12PW, NDX, v)
+
+#define GET_GATS12PW_ADDR(b)     GET_GLOBAL_FIELD(b, GATS12PW, ADDR)
+#define GET_GATS12PW_NDX(b)      GET_GLOBAL_FIELD(b, GATS12PW, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define SET_GATS12UR_ADDR(b, v)  SET_GLOBAL_FIELD(b, GATS12UR, ADDR, v)
+#define SET_GATS12UR_NDX(b, v)   SET_GLOBAL_FIELD(b, GATS12UR, NDX, v)
+
+#define GET_GATS12UR_ADDR(b)     GET_GLOBAL_FIELD(b, GATS12UR, ADDR)
+#define GET_GATS12UR_NDX(b)      GET_GLOBAL_FIELD(b, GATS12UR, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UW */
+#define SET_GATS12UW_ADDR(b, v)  SET_GLOBAL_FIELD(b, GATS12UW, ADDR, v)
+#define SET_GATS12UW_NDX(b, v)   SET_GLOBAL_FIELD(b, GATS12UW, NDX, v)
+
+#define GET_GATS12UW_ADDR(b)     GET_GLOBAL_FIELD(b, GATS12UW, ADDR)
+#define GET_GATS12UW_NDX(b)      GET_GLOBAL_FIELD(b, GATS12UW, NDX)
+
+/* Global Address Translation Status Register: GATSR */
+#define SET_GATSR_ACTIVE(b, v)   SET_GLOBAL_FIELD(b, GATSR, ACTIVE, v)
+
+#define GET_GATSR_ACTIVE(b)      GET_GLOBAL_FIELD(b, GATSR, ACTIVE)
+
+/* Global Fault Address Register: GFAR */
+#define SET_GFAR_FADDR(b, v)     SET_GLOBAL_FIELD(b, GFAR, FADDR, v)
+
+#define GET_GFAR_FADDR(b)        GET_GLOBAL_FIELD(b, GFAR, FADDR)
+
+/* Global Fault Status Register: GFSR */
+#define SET_GFSR_ICF(b, v)        SET_GLOBAL_FIELD(b, GFSR, ICF, v)
+#define SET_GFSR_USF(b, v)        SET_GLOBAL_FIELD(b, GFSR, USF, v)
+#define SET_GFSR_SMCF(b, v)       SET_GLOBAL_FIELD(b, GFSR, SMCF, v)
+#define SET_GFSR_UCBF(b, v)       SET_GLOBAL_FIELD(b, GFSR, UCBF, v)
+#define SET_GFSR_UCIF(b, v)       SET_GLOBAL_FIELD(b, GFSR, UCIF, v)
+#define SET_GFSR_CAF(b, v)        SET_GLOBAL_FIELD(b, GFSR, CAF, v)
+#define SET_GFSR_EF(b, v)         SET_GLOBAL_FIELD(b, GFSR, EF, v)
+#define SET_GFSR_PF(b, v)         SET_GLOBAL_FIELD(b, GFSR, PF, v)
+#define SET_GFSR_MULTI(b, v)      SET_GLOBAL_FIELD(b, GFSR, MULTI, v)
+
+#define GET_GFSR_ICF(b)           GET_GLOBAL_FIELD(b, GFSR, ICF)
+#define GET_GFSR_USF(b)           GET_GLOBAL_FIELD(b, GFSR, USF)
+#define GET_GFSR_SMCF(b)          GET_GLOBAL_FIELD(b, GFSR, SMCF)
+#define GET_GFSR_UCBF(b)          GET_GLOBAL_FIELD(b, GFSR, UCBF)
+#define GET_GFSR_UCIF(b)          GET_GLOBAL_FIELD(b, GFSR, UCIF)
+#define GET_GFSR_CAF(b)           GET_GLOBAL_FIELD(b, GFSR, CAF)
+#define GET_GFSR_EF(b)            GET_GLOBAL_FIELD(b, GFSR, EF)
+#define GET_GFSR_PF(b)            GET_GLOBAL_FIELD(b, GFSR, PF)
+#define GET_GFSR_MULTI(b)         GET_GLOBAL_FIELD(b, GFSR, MULTI)
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define SET_GFSYNR0_NESTED(b, v)  SET_GLOBAL_FIELD(b, GFSYNR0, NESTED, v)
+#define SET_GFSYNR0_WNR(b, v)     SET_GLOBAL_FIELD(b, GFSYNR0, WNR, v)
+#define SET_GFSYNR0_PNU(b, v)     SET_GLOBAL_FIELD(b, GFSYNR0, PNU, v)
+#define SET_GFSYNR0_IND(b, v)     SET_GLOBAL_FIELD(b, GFSYNR0, IND, v)
+#define SET_GFSYNR0_NSSTATE(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, NSSTATE, v)
+#define SET_GFSYNR0_NSATTR(b, v)  SET_GLOBAL_FIELD(b, GFSYNR0, NSATTR, v)
+
+#define GET_GFSYNR0_NESTED(b)     GET_GLOBAL_FIELD(b, GFSYNR0, NESTED)
+#define GET_GFSYNR0_WNR(b)        GET_GLOBAL_FIELD(b, GFSYNR0, WNR)
+#define GET_GFSYNR0_PNU(b)        GET_GLOBAL_FIELD(b, GFSYNR0, PNU)
+#define GET_GFSYNR0_IND(b)        GET_GLOBAL_FIELD(b, GFSYNR0, IND)
+#define GET_GFSYNR0_NSSTATE(b)    GET_GLOBAL_FIELD(b, GFSYNR0, NSSTATE)
+#define GET_GFSYNR0_NSATTR(b)     GET_GLOBAL_FIELD(b, GFSYNR0, NSATTR)
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define SET_GFSYNR1_SID(b, v)     SET_GLOBAL_FIELD(b, GFSYNR1, SID, v)
+
+#define GET_GFSYNR1_SID(b)        GET_GLOBAL_FIELD(b, GFSYNR1, SID)
+
+/* Global Physical Address Register: GPAR */
+#define SET_GPAR_F(b, v)          SET_GLOBAL_FIELD(b, GPAR, F, v)
+#define SET_GPAR_SS(b, v)         SET_GLOBAL_FIELD(b, GPAR, SS, v)
+#define SET_GPAR_OUTER(b, v)      SET_GLOBAL_FIELD(b, GPAR, OUTER, v)
+#define SET_GPAR_INNER(b, v)      SET_GLOBAL_FIELD(b, GPAR, INNER, v)
+#define SET_GPAR_SH(b, v)         SET_GLOBAL_FIELD(b, GPAR, SH, v)
+#define SET_GPAR_NS(b, v)         SET_GLOBAL_FIELD(b, GPAR, NS, v)
+#define SET_GPAR_NOS(b, v)        SET_GLOBAL_FIELD(b, GPAR, NOS, v)
+#define SET_GPAR_PA(b, v)         SET_GLOBAL_FIELD(b, GPAR, PA, v)
+#define SET_GPAR_TF(b, v)         SET_GLOBAL_FIELD(b, GPAR, TF, v)
+#define SET_GPAR_AFF(b, v)        SET_GLOBAL_FIELD(b, GPAR, AFF, v)
+#define SET_GPAR_PF(b, v)         SET_GLOBAL_FIELD(b, GPAR, PF, v)
+#define SET_GPAR_EF(b, v)         SET_GLOBAL_FIELD(b, GPAR, EF, v)
+#define SET_GPAR_TLCMCF(b, v)     SET_GLOBAL_FIELD(b, GPAR, TLCMCF, v)
+#define SET_GPAR_TLBLKF(b, v)     SET_GLOBAL_FIELD(b, GPAR, TLBLKF, v)
+#define SET_GPAR_UCBF(b, v)       SET_GLOBAL_FIELD(b, GPAR, UCBF, v)
+
+#define GET_GPAR_F(b)             GET_GLOBAL_FIELD(b, GPAR, F)
+#define GET_GPAR_SS(b)            GET_GLOBAL_FIELD(b, GPAR, SS)
+#define GET_GPAR_OUTER(b)         GET_GLOBAL_FIELD(b, GPAR, OUTER)
+#define GET_GPAR_INNER(b)         GET_GLOBAL_FIELD(b, GPAR, INNER)
+#define GET_GPAR_SH(b)            GET_GLOBAL_FIELD(b, GPAR, SH)
+#define GET_GPAR_NS(b)            GET_GLOBAL_FIELD(b, GPAR, NS)
+#define GET_GPAR_NOS(b)           GET_GLOBAL_FIELD(b, GPAR, NOS)
+#define GET_GPAR_PA(b)            GET_GLOBAL_FIELD(b, GPAR, PA)
+#define GET_GPAR_TF(b)            GET_GLOBAL_FIELD(b, GPAR, TF)
+#define GET_GPAR_AFF(b)           GET_GLOBAL_FIELD(b, GPAR, AFF)
+#define GET_GPAR_PF(b)            GET_GLOBAL_FIELD(b, GPAR, PF)
+#define GET_GPAR_EF(b)            GET_GLOBAL_FIELD(b, GPAR, EF)
+#define GET_GPAR_TLCMCF(b)        GET_GLOBAL_FIELD(b, GPAR, TLCMCF)
+#define GET_GPAR_TLBLKF(b)        GET_GLOBAL_FIELD(b, GPAR, TLBLKF)
+#define GET_GPAR_UCBF(b)          GET_GLOBAL_FIELD(b, GPAR, UCBF)
+
+/* Identification Register: IDR0 */
+#define SET_IDR0_NUMSMRG(b, v)    SET_GLOBAL_FIELD(b, IDR0, NUMSMRG, v)
+#define SET_IDR0_NUMSIDB(b, v)    SET_GLOBAL_FIELD(b, IDR0, NUMSIDB, v)
+#define SET_IDR0_BTM(b, v)        SET_GLOBAL_FIELD(b, IDR0, BTM, v)
+#define SET_IDR0_CTTW(b, v)       SET_GLOBAL_FIELD(b, IDR0, CTTW, v)
+#define SET_IDR0_NUMIRPT(b, v)    SET_GLOBAL_FIELD(b, IDR0, NUMIRPT, v)
+#define SET_IDR0_PTFS(b, v)       SET_GLOBAL_FIELD(b, IDR0, PTFS, v)
+#define SET_IDR0_SMS(b, v)        SET_GLOBAL_FIELD(b, IDR0, SMS, v)
+#define SET_IDR0_NTS(b, v)        SET_GLOBAL_FIELD(b, IDR0, NTS, v)
+#define SET_IDR0_S2TS(b, v)       SET_GLOBAL_FIELD(b, IDR0, S2TS, v)
+#define SET_IDR0_S1TS(b, v)       SET_GLOBAL_FIELD(b, IDR0, S1TS, v)
+#define SET_IDR0_SES(b, v)        SET_GLOBAL_FIELD(b, IDR0, SES, v)
+
+#define GET_IDR0_NUMSMRG(b)       GET_GLOBAL_FIELD(b, IDR0, NUMSMRG)
+#define GET_IDR0_NUMSIDB(b)       GET_GLOBAL_FIELD(b, IDR0, NUMSIDB)
+#define GET_IDR0_BTM(b)           GET_GLOBAL_FIELD(b, IDR0, BTM)
+#define GET_IDR0_CTTW(b)          GET_GLOBAL_FIELD(b, IDR0, CTTW)
+#define GET_IDR0_NUMIRPT(b)       GET_GLOBAL_FIELD(b, IDR0, NUMIRPT)
+#define GET_IDR0_PTFS(b)          GET_GLOBAL_FIELD(b, IDR0, PTFS)
+#define GET_IDR0_SMS(b)           GET_GLOBAL_FIELD(b, IDR0, SMS)
+#define GET_IDR0_NTS(b)           GET_GLOBAL_FIELD(b, IDR0, NTS)
+#define GET_IDR0_S2TS(b)          GET_GLOBAL_FIELD(b, IDR0, S2TS)
+#define GET_IDR0_S1TS(b)          GET_GLOBAL_FIELD(b, IDR0, S1TS)
+#define GET_IDR0_SES(b)           GET_GLOBAL_FIELD(b, IDR0, SES)
+
+/* Identification Register: IDR1 */
+#define SET_IDR1_NUMCB(b, v)       SET_GLOBAL_FIELD(b, IDR1, NUMCB, v)
+#define SET_IDR1_NUMSSDNDXB(b, v)  SET_GLOBAL_FIELD(b, IDR1, NUMSSDNDXB, v)
+#define SET_IDR1_SSDTP(b, v)       SET_GLOBAL_FIELD(b, IDR1, SSDTP, v)
+#define SET_IDR1_SMCD(b, v)        SET_GLOBAL_FIELD(b, IDR1, SMCD, v)
+#define SET_IDR1_NUMS2CB(b, v)     SET_GLOBAL_FIELD(b, IDR1, NUMS2CB, v)
+#define SET_IDR1_NUMPAGENDXB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMPAGENDXB, v)
+#define SET_IDR1_PAGESIZE(b, v)    SET_GLOBAL_FIELD(b, IDR1, PAGESIZE, v)
+
+#define GET_IDR1_NUMCB(b)          GET_GLOBAL_FIELD(b, IDR1, NUMCB)
+#define GET_IDR1_NUMSSDNDXB(b)     GET_GLOBAL_FIELD(b, IDR1, NUMSSDNDXB)
+#define GET_IDR1_SSDTP(b)          GET_GLOBAL_FIELD(b, IDR1, SSDTP)
+#define GET_IDR1_SMCD(b)           GET_GLOBAL_FIELD(b, IDR1, SMCD)
+#define GET_IDR1_NUMS2CB(b)        GET_GLOBAL_FIELD(b, IDR1, NUMS2CB)
+#define GET_IDR1_NUMPAGENDXB(b)    GET_GLOBAL_FIELD(b, IDR1, NUMPAGENDXB)
+#define GET_IDR1_PAGESIZE(b)       GET_GLOBAL_FIELD(b, IDR1, PAGESIZE)
+
+/* Identification Register: IDR2 */
+#define SET_IDR2_IAS(b, v)       SET_GLOBAL_FIELD(b, IDR2, IAS, v)
+#define SET_IDR2_OAS(b, v)       SET_GLOBAL_FIELD(b, IDR2, OAS, v)
+
+#define GET_IDR2_IAS(b)          GET_GLOBAL_FIELD(b, IDR2, IAS)
+#define GET_IDR2_OAS(b)          GET_GLOBAL_FIELD(b, IDR2, OAS)
+
+/* Identification Register: IDR7 */
+#define SET_IDR7_MINOR(b, v)     SET_GLOBAL_FIELD(b, IDR7, MINOR, v)
+#define SET_IDR7_MAJOR(b, v)     SET_GLOBAL_FIELD(b, IDR7, MAJOR, v)
+
+#define GET_IDR7_MINOR(b)        GET_GLOBAL_FIELD(b, IDR7, MINOR)
+#define GET_IDR7_MAJOR(b)        GET_GLOBAL_FIELD(b, IDR7, MAJOR)
+
+/* Stream to Context Register: S2CR_N */
+#define SET_S2CR_CBNDX(b, n, v)   SET_GLOBAL_FIELD_N(b, n, S2CR, CBNDX, v)
+#define SET_S2CR_SHCFG(b, n, v)   SET_GLOBAL_FIELD_N(b, n, S2CR, SHCFG, v)
+#define SET_S2CR_MTCFG(b, n, v)   SET_GLOBAL_FIELD_N(b, n, S2CR, MTCFG, v)
+#define SET_S2CR_MEMATTR(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, MEMATTR, v)
+#define SET_S2CR_TYPE(b, n, v)    SET_GLOBAL_FIELD_N(b, n, S2CR, TYPE, v)
+#define SET_S2CR_NSCFG(b, n, v)   SET_GLOBAL_FIELD_N(b, n, S2CR, NSCFG, v)
+#define SET_S2CR_RACFG(b, n, v)   SET_GLOBAL_FIELD_N(b, n, S2CR, RACFG, v)
+#define SET_S2CR_WACFG(b, n, v)   SET_GLOBAL_FIELD_N(b, n, S2CR, WACFG, v)
+#define SET_S2CR_PRIVCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, PRIVCFG, v)
+#define SET_S2CR_INSTCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, INSTCFG, v)
+#define SET_S2CR_TRANSIENTCFG(b, n, v) \
+				SET_GLOBAL_FIELD_N(b, n, S2CR, TRANSIENTCFG, v)
+#define SET_S2CR_VMID(b, n, v)    SET_GLOBAL_FIELD_N(b, n, S2CR, VMID, v)
+#define SET_S2CR_BSU(b, n, v)     SET_GLOBAL_FIELD_N(b, n, S2CR, BSU, v)
+#define SET_S2CR_FB(b, n, v)      SET_GLOBAL_FIELD_N(b, n, S2CR, FB, v)
+
+#define GET_S2CR_CBNDX(b, n)      GET_GLOBAL_FIELD_N(b, n, S2CR, CBNDX)
+#define GET_S2CR_SHCFG(b, n)      GET_GLOBAL_FIELD_N(b, n, S2CR, SHCFG)
+#define GET_S2CR_MTCFG(b, n)      GET_GLOBAL_FIELD_N(b, n, S2CR, MTCFG)
+#define GET_S2CR_MEMATTR(b, n)    GET_GLOBAL_FIELD_N(b, n, S2CR, MEMATTR)
+#define GET_S2CR_TYPE(b, n)       GET_GLOBAL_FIELD_N(b, n, S2CR, TYPE)
+#define GET_S2CR_NSCFG(b, n)      GET_GLOBAL_FIELD_N(b, n, S2CR, NSCFG)
+#define GET_S2CR_RACFG(b, n)      GET_GLOBAL_FIELD_N(b, n, S2CR, RACFG)
+#define GET_S2CR_WACFG(b, n)      GET_GLOBAL_FIELD_N(b, n, S2CR, WACFG)
+#define GET_S2CR_PRIVCFG(b, n)    GET_GLOBAL_FIELD_N(b, n, S2CR, PRIVCFG)
+#define GET_S2CR_INSTCFG(b, n)    GET_GLOBAL_FIELD_N(b, n, S2CR, INSTCFG)
+#define GET_S2CR_TRANSIENTCFG(b, n) \
+				GET_GLOBAL_FIELD_N(b, n, S2CR, TRANSIENTCFG)
+#define GET_S2CR_VMID(b, n)       GET_GLOBAL_FIELD_N(b, n, S2CR, VMID)
+#define GET_S2CR_BSU(b, n)        GET_GLOBAL_FIELD_N(b, n, S2CR, BSU)
+#define GET_S2CR_FB(b, n)         GET_GLOBAL_FIELD_N(b, n, S2CR, FB)
+
+/* Stream Match Register: SMR_N */
+#define SET_SMR_ID(b, n, v)       SET_GLOBAL_FIELD_N(b, n, SMR, ID, v)
+#define SET_SMR_MASK(b, n, v)     SET_GLOBAL_FIELD_N(b, n, SMR, MASK, v)
+#define SET_SMR_VALID(b, n, v)    SET_GLOBAL_FIELD_N(b, n, SMR, VALID, v)
+
+#define GET_SMR_ID(b, n)          GET_GLOBAL_FIELD_N(b, n, SMR, ID)
+#define GET_SMR_MASK(b, n)        GET_GLOBAL_FIELD_N(b, n, SMR, MASK)
+#define GET_SMR_VALID(b, n)       GET_GLOBAL_FIELD_N(b, n, SMR, VALID)
+
+/* Global TLB Status: TLBGSTATUS */
+#define SET_TLBGSTATUS_GSACTIVE(b, v) \
+				SET_GLOBAL_FIELD(b, TLBGSTATUS, GSACTIVE, v)
+
+#define GET_TLBGSTATUS_GSACTIVE(b)    \
+				GET_GLOBAL_FIELD(b, TLBGSTATUS, GSACTIVE)
+
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define SET_TLBIVAH_ADDR(b, v)  SET_GLOBAL_FIELD(b, TLBIVAH, ADDR, v)
+
+#define GET_TLBIVAH_ADDR(b)     GET_GLOBAL_FIELD(b, TLBIVAH, ADDR)
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define SET_TLBIVMID_VMID(b, v) SET_GLOBAL_FIELD(b, TLBIVMID, VMID, v)
+
+#define GET_TLBIVMID_VMID(b)    GET_GLOBAL_FIELD(b, TLBIVMID, VMID)
+
+/* Global Register Space 1 Field setters/getters*/
+/* Context Bank Attribute Register: CBAR_N */
+#define SET_CBAR_VMID(b, n, v)     SET_GLOBAL_FIELD_N(b, n, CBAR, VMID, v)
+#define SET_CBAR_CBNDX(b, n, v)    SET_GLOBAL_FIELD_N(b, n, CBAR, CBNDX, v)
+#define SET_CBAR_BPSHCFG(b, n, v)  SET_GLOBAL_FIELD_N(b, n, CBAR, BPSHCFG, v)
+#define SET_CBAR_HYPC(b, n, v)     SET_GLOBAL_FIELD_N(b, n, CBAR, HYPC, v)
+#define SET_CBAR_FB(b, n, v)       SET_GLOBAL_FIELD_N(b, n, CBAR, FB, v)
+#define SET_CBAR_MEMATTR(b, n, v)  SET_GLOBAL_FIELD_N(b, n, CBAR, MEMATTR, v)
+#define SET_CBAR_TYPE(b, n, v)     SET_GLOBAL_FIELD_N(b, n, CBAR, TYPE, v)
+#define SET_CBAR_BSU(b, n, v)      SET_GLOBAL_FIELD_N(b, n, CBAR, BSU, v)
+#define SET_CBAR_RACFG(b, n, v)    SET_GLOBAL_FIELD_N(b, n, CBAR, RACFG, v)
+#define SET_CBAR_WACFG(b, n, v)    SET_GLOBAL_FIELD_N(b, n, CBAR, WACFG, v)
+#define SET_CBAR_IRPTNDX(b, n, v)  SET_GLOBAL_FIELD_N(b, n, CBAR, IRPTNDX, v)
+
+#define GET_CBAR_VMID(b, n)        GET_GLOBAL_FIELD_N(b, n, CBAR, VMID)
+#define GET_CBAR_CBNDX(b, n)       GET_GLOBAL_FIELD_N(b, n, CBAR, CBNDX)
+#define GET_CBAR_BPSHCFG(b, n)     GET_GLOBAL_FIELD_N(b, n, CBAR, BPSHCFG)
+#define GET_CBAR_HYPC(b, n)        GET_GLOBAL_FIELD_N(b, n, CBAR, HYPC)
+#define GET_CBAR_FB(b, n)          GET_GLOBAL_FIELD_N(b, n, CBAR, FB)
+#define GET_CBAR_MEMATTR(b, n)     GET_GLOBAL_FIELD_N(b, n, CBAR, MEMATTR)
+#define GET_CBAR_TYPE(b, n)        GET_GLOBAL_FIELD_N(b, n, CBAR, TYPE)
+#define GET_CBAR_BSU(b, n)         GET_GLOBAL_FIELD_N(b, n, CBAR, BSU)
+#define GET_CBAR_RACFG(b, n)       GET_GLOBAL_FIELD_N(b, n, CBAR, RACFG)
+#define GET_CBAR_WACFG(b, n)       GET_GLOBAL_FIELD_N(b, n, CBAR, WACFG)
+#define GET_CBAR_IRPTNDX(b, n)     GET_GLOBAL_FIELD_N(b, n, CBAR, IRPTNDX)
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA_N */
+#define SET_CBFRSYNRA_SID(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBFRSYNRA, SID, v)
+
+#define GET_CBFRSYNRA_SID(b, n)    GET_GLOBAL_FIELD_N(b, n, CBFRSYNRA, SID)
+
+/* Stage 1 Context Bank Format Fields */
+#define SET_CB_ACTLR_REQPRIORITY (b, c, v) \
+		SET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITY, v)
+#define SET_CB_ACTLR_REQPRIORITYCFG(b, c, v) \
+		SET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITYCFG, v)
+#define SET_CB_ACTLR_PRIVCFG(b, c, v) \
+		SET_CONTEXT_FIELD(b, c, CB_ACTLR, PRIVCFG, v)
+#define SET_CB_ACTLR_BPRCOSH(b, c, v) \
+		SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCOSH, v)
+#define SET_CB_ACTLR_BPRCISH(b, c, v) \
+		SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCISH, v)
+#define SET_CB_ACTLR_BPRCNSH(b, c, v) \
+		SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCNSH, v)
+
+#define GET_CB_ACTLR_REQPRIORITY (b, c) \
+		GET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITY)
+#define GET_CB_ACTLR_REQPRIORITYCFG(b, c) \
+		GET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITYCFG)
+#define GET_CB_ACTLR_PRIVCFG(b, c)  GET_CONTEXT_FIELD(b, c, CB_ACTLR, PRIVCFG)
+#define GET_CB_ACTLR_BPRCOSH(b, c)  GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCOSH)
+#define GET_CB_ACTLR_BPRCISH(b, c)  GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCISH)
+#define GET_CB_ACTLR_BPRCNSH(b, c)  GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCNSH)
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define SET_CB_ATS1PR_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1PR, ADDR, v)
+
+#define GET_CB_ATS1PR_ADDR(b, c)    GET_CONTEXT_FIELD(b, c, CB_ATS1PR, ADDR)
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define SET_CB_ATS1PW_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1PW, ADDR, v)
+
+#define GET_CB_ATS1PW_ADDR(b, c)    GET_CONTEXT_FIELD(b, c, CB_ATS1PW, ADDR)
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define SET_CB_ATS1UR_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1UR, ADDR, v)
+
+#define GET_CB_ATS1UR_ADDR(b, c)    GET_CONTEXT_FIELD(b, c, CB_ATS1UR, ADDR)
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define SET_CB_ATS1UW_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1UW, ADDR, v)
+
+#define GET_CB_ATS1UW_ADDR(b, c)    GET_CONTEXT_FIELD(b, c, CB_ATS1UW, ADDR)
+
+/* Address Translation Status Register: CB_ATSR */
+#define SET_CB_ATSR_ACTIVE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATSR, ACTIVE, v)
+
+#define GET_CB_ATSR_ACTIVE(b, c)    GET_CONTEXT_FIELD(b, c, CB_ATSR, ACTIVE)
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define SET_CB_CONTEXTIDR_ASID(b, c, v) \
+			SET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, ASID, v)
+#define SET_CB_CONTEXTIDR_PROCID(b, c, v) \
+			SET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, PROCID, v)
+
+#define GET_CB_CONTEXTIDR_ASID(b, c)    \
+			GET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, ASID)
+#define GET_CB_CONTEXTIDR_PROCID(b, c)    \
+			GET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, PROCID)
+
+/* Fault Address Register: CB_FAR */
+#define SET_CB_FAR_FADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FAR, FADDR, v)
+
+#define GET_CB_FAR_FADDR(b, c)    GET_CONTEXT_FIELD(b, c, CB_FAR, FADDR)
+
+/* Fault Status Register: CB_FSR */
+#define SET_CB_FSR_TF(b, c, v)     SET_CONTEXT_FIELD(b, c, CB_FSR, TF, v)
+#define SET_CB_FSR_AFF(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_FSR, AFF, v)
+#define SET_CB_FSR_PF(b, c, v)     SET_CONTEXT_FIELD(b, c, CB_FSR, PF, v)
+#define SET_CB_FSR_EF(b, c, v)     SET_CONTEXT_FIELD(b, c, CB_FSR, EF, v)
+#define SET_CB_FSR_TLBMCF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, TLBMCF, v)
+#define SET_CB_FSR_TLBLKF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, TLBLKF, v)
+#define SET_CB_FSR_SS(b, c, v)     SET_CONTEXT_FIELD(b, c, CB_FSR, SS, v)
+#define SET_CB_FSR_MULTI(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_FSR, MULTI, v)
+
+#define GET_CB_FSR_TF(b, c)        GET_CONTEXT_FIELD(b, c, CB_FSR, TF)
+#define GET_CB_FSR_AFF(b, c)       GET_CONTEXT_FIELD(b, c, CB_FSR, AFF)
+#define GET_CB_FSR_PF(b, c)        GET_CONTEXT_FIELD(b, c, CB_FSR, PF)
+#define GET_CB_FSR_EF(b, c)        GET_CONTEXT_FIELD(b, c, CB_FSR, EF)
+#define GET_CB_FSR_TLBMCF(b, c)    GET_CONTEXT_FIELD(b, c, CB_FSR, TLBMCF)
+#define GET_CB_FSR_TLBLKF(b, c)    GET_CONTEXT_FIELD(b, c, CB_FSR, TLBLKF)
+#define GET_CB_FSR_SS(b, c)        GET_CONTEXT_FIELD(b, c, CB_FSR, SS)
+#define GET_CB_FSR_MULTI(b, c)     GET_CONTEXT_FIELD(b, c, CB_FSR, MULTI)
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define SET_CB_FSYNR0_PLVL(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PLVL, v)
+#define SET_CB_FSYNR0_S1PTWF(b, c, v) \
+				SET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1PTWF, v)
+#define SET_CB_FSYNR0_WNR(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_FSYNR0, WNR, v)
+#define SET_CB_FSYNR0_PNU(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PNU, v)
+#define SET_CB_FSYNR0_IND(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_FSYNR0, IND, v)
+#define SET_CB_FSYNR0_NSSTATE(b, c, v) \
+				SET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSSTATE, v)
+#define SET_CB_FSYNR0_NSATTR(b, c, v) \
+				SET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSATTR, v)
+#define SET_CB_FSYNR0_ATOF(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_FSYNR0, ATOF, v)
+#define SET_CB_FSYNR0_PTWF(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PTWF, v)
+#define SET_CB_FSYNR0_AFR(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_FSYNR0, AFR, v)
+#define SET_CB_FSYNR0_S1CBNDX(b, c, v) \
+				SET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1CBNDX, v)
+
+#define GET_CB_FSYNR0_PLVL(b, c)    GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PLVL)
+#define GET_CB_FSYNR0_S1PTWF(b, c)    \
+				GET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1PTWF)
+#define GET_CB_FSYNR0_WNR(b, c)     GET_CONTEXT_FIELD(b, c, CB_FSYNR0, WNR)
+#define GET_CB_FSYNR0_PNU(b, c)     GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PNU)
+#define GET_CB_FSYNR0_IND(b, c)     GET_CONTEXT_FIELD(b, c, CB_FSYNR0, IND)
+#define GET_CB_FSYNR0_NSSTATE(b, c)    \
+				GET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSSTATE)
+#define GET_CB_FSYNR0_NSATTR(b, c)    \
+				GET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSATTR)
+#define GET_CB_FSYNR0_ATOF(b, c)     GET_CONTEXT_FIELD(b, c, CB_FSYNR0, ATOF)
+#define GET_CB_FSYNR0_PTWF(b, c)     GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PTWF)
+#define GET_CB_FSYNR0_AFR(b, c)      GET_CONTEXT_FIELD(b, c, CB_FSYNR0, AFR)
+#define GET_CB_FSYNR0_S1CBNDX(b, c)    \
+				GET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1CBNDX)
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define SET_CB_NMRR_IR0(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, IR0, v)
+#define SET_CB_NMRR_IR1(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, IR1, v)
+#define SET_CB_NMRR_IR2(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, IR2, v)
+#define SET_CB_NMRR_IR3(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, IR3, v)
+#define SET_CB_NMRR_IR4(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, IR4, v)
+#define SET_CB_NMRR_IR5(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, IR5, v)
+#define SET_CB_NMRR_IR6(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, IR6, v)
+#define SET_CB_NMRR_IR7(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, IR7, v)
+#define SET_CB_NMRR_OR0(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, OR0, v)
+#define SET_CB_NMRR_OR1(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, OR1, v)
+#define SET_CB_NMRR_OR2(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, OR2, v)
+#define SET_CB_NMRR_OR3(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, OR3, v)
+#define SET_CB_NMRR_OR4(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, OR4, v)
+#define SET_CB_NMRR_OR5(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, OR5, v)
+#define SET_CB_NMRR_OR6(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, OR6, v)
+#define SET_CB_NMRR_OR7(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_NMRR, OR7, v)
+
+#define GET_CB_NMRR_IR0(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, IR0)
+#define GET_CB_NMRR_IR1(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, IR1)
+#define GET_CB_NMRR_IR2(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, IR2)
+#define GET_CB_NMRR_IR3(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, IR3)
+#define GET_CB_NMRR_IR4(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, IR4)
+#define GET_CB_NMRR_IR5(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, IR5)
+#define GET_CB_NMRR_IR6(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, IR6)
+#define GET_CB_NMRR_IR7(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, IR7)
+#define GET_CB_NMRR_OR0(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, OR0)
+#define GET_CB_NMRR_OR1(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, OR1)
+#define GET_CB_NMRR_OR2(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, OR2)
+#define GET_CB_NMRR_OR3(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, OR3)
+#define GET_CB_NMRR_OR4(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, OR4)
+#define GET_CB_NMRR_OR5(b, c)       GET_CONTEXT_FIELD(b, c, CB_NMRR, OR5)
+
+/* Physical Address Register: CB_PAR */
+#define SET_CB_PAR_F(b, c, v)       SET_CONTEXT_FIELD(b, c, CB_PAR, F, v)
+#define SET_CB_PAR_SS(b, c, v)      SET_CONTEXT_FIELD(b, c, CB_PAR, SS, v)
+#define SET_CB_PAR_OUTER(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_PAR, OUTER, v)
+#define SET_CB_PAR_INNER(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_PAR, INNER, v)
+#define SET_CB_PAR_SH(b, c, v)      SET_CONTEXT_FIELD(b, c, CB_PAR, SH, v)
+#define SET_CB_PAR_NS(b, c, v)      SET_CONTEXT_FIELD(b, c, CB_PAR, NS, v)
+#define SET_CB_PAR_NOS(b, c, v)     SET_CONTEXT_FIELD(b, c, CB_PAR, NOS, v)
+#define SET_CB_PAR_PA(b, c, v)      SET_CONTEXT_FIELD(b, c, CB_PAR, PA, v)
+#define SET_CB_PAR_TF(b, c, v)      SET_CONTEXT_FIELD(b, c, CB_PAR, TF, v)
+#define SET_CB_PAR_AFF(b, c, v)     SET_CONTEXT_FIELD(b, c, CB_PAR, AFF, v)
+#define SET_CB_PAR_PF(b, c, v)      SET_CONTEXT_FIELD(b, c, CB_PAR, PF, v)
+#define SET_CB_PAR_TLBMCF(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_PAR, TLBMCF, v)
+#define SET_CB_PAR_TLBLKF(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_PAR, TLBLKF, v)
+#define SET_CB_PAR_ATOT(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PAR, ATOT, v)
+#define SET_CB_PAR_PLVL(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PAR, PLVL, v)
+#define SET_CB_PAR_STAGE(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_PAR, STAGE, v)
+
+#define GET_CB_PAR_F(b, c)          GET_CONTEXT_FIELD(b, c, CB_PAR, F)
+#define GET_CB_PAR_SS(b, c)         GET_CONTEXT_FIELD(b, c, CB_PAR, SS)
+#define GET_CB_PAR_OUTER(b, c)      GET_CONTEXT_FIELD(b, c, CB_PAR, OUTER)
+#define GET_CB_PAR_INNER(b, c)      GET_CONTEXT_FIELD(b, c, CB_PAR, INNER)
+#define GET_CB_PAR_SH(b, c)         GET_CONTEXT_FIELD(b, c, CB_PAR, SH)
+#define GET_CB_PAR_NS(b, c)         GET_CONTEXT_FIELD(b, c, CB_PAR, NS)
+#define GET_CB_PAR_NOS(b, c)        GET_CONTEXT_FIELD(b, c, CB_PAR, NOS)
+#define GET_CB_PAR_PA(b, c)         GET_CONTEXT_FIELD(b, c, CB_PAR, PA)
+#define GET_CB_PAR_TF(b, c)         GET_CONTEXT_FIELD(b, c, CB_PAR, TF)
+#define GET_CB_PAR_AFF(b, c)        GET_CONTEXT_FIELD(b, c, CB_PAR, AFF)
+#define GET_CB_PAR_PF(b, c)         GET_CONTEXT_FIELD(b, c, CB_PAR, PF)
+#define GET_CB_PAR_TLBMCF(b, c)     GET_CONTEXT_FIELD(b, c, CB_PAR, TLBMCF)
+#define GET_CB_PAR_TLBLKF(b, c)     GET_CONTEXT_FIELD(b, c, CB_PAR, TLBLKF)
+#define GET_CB_PAR_ATOT(b, c)       GET_CONTEXT_FIELD(b, c, CB_PAR, ATOT)
+#define GET_CB_PAR_PLVL(b, c)       GET_CONTEXT_FIELD(b, c, CB_PAR, PLVL)
+#define GET_CB_PAR_STAGE(b, c)      GET_CONTEXT_FIELD(b, c, CB_PAR, STAGE)
+
+/* Primary Region Remap Register: CB_PRRR */
+#define SET_CB_PRRR_TR0(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, TR0, v)
+#define SET_CB_PRRR_TR1(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, TR1, v)
+#define SET_CB_PRRR_TR2(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, TR2, v)
+#define SET_CB_PRRR_TR3(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, TR3, v)
+#define SET_CB_PRRR_TR4(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, TR4, v)
+#define SET_CB_PRRR_TR5(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, TR5, v)
+#define SET_CB_PRRR_TR6(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, TR6, v)
+#define SET_CB_PRRR_TR7(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, TR7, v)
+#define SET_CB_PRRR_DS0(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, DS0, v)
+#define SET_CB_PRRR_DS1(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, DS1, v)
+#define SET_CB_PRRR_NS0(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, NS0, v)
+#define SET_CB_PRRR_NS1(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_PRRR, NS1, v)
+#define SET_CB_PRRR_NOS0(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS0, v)
+#define SET_CB_PRRR_NOS1(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS1, v)
+#define SET_CB_PRRR_NOS2(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS2, v)
+#define SET_CB_PRRR_NOS3(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS3, v)
+#define SET_CB_PRRR_NOS4(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS4, v)
+#define SET_CB_PRRR_NOS5(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS5, v)
+#define SET_CB_PRRR_NOS6(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS6, v)
+#define SET_CB_PRRR_NOS7(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS7, v)
+
+#define GET_CB_PRRR_TR0(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, TR0)
+#define GET_CB_PRRR_TR1(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, TR1)
+#define GET_CB_PRRR_TR2(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, TR2)
+#define GET_CB_PRRR_TR3(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, TR3)
+#define GET_CB_PRRR_TR4(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, TR4)
+#define GET_CB_PRRR_TR5(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, TR5)
+#define GET_CB_PRRR_TR6(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, TR6)
+#define GET_CB_PRRR_TR7(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, TR7)
+#define GET_CB_PRRR_DS0(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, DS0)
+#define GET_CB_PRRR_DS1(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, DS1)
+#define GET_CB_PRRR_NS0(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, NS0)
+#define GET_CB_PRRR_NS1(b, c)       GET_CONTEXT_FIELD(b, c, CB_PRRR, NS1)
+#define GET_CB_PRRR_NOS0(b, c)      GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS0)
+#define GET_CB_PRRR_NOS1(b, c)      GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS1)
+#define GET_CB_PRRR_NOS2(b, c)      GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS2)
+#define GET_CB_PRRR_NOS3(b, c)      GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS3)
+#define GET_CB_PRRR_NOS4(b, c)      GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS4)
+#define GET_CB_PRRR_NOS5(b, c)      GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS5)
+#define GET_CB_PRRR_NOS6(b, c)      GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS6)
+#define GET_CB_PRRR_NOS7(b, c)      GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS7)
+
+/* Transaction Resume: CB_RESUME */
+#define SET_CB_RESUME_TNR(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_RESUME, TNR, v)
+
+#define GET_CB_RESUME_TNR(b, c)     GET_CONTEXT_FIELD(b, c, CB_RESUME, TNR)
+
+/* System Control Register: CB_SCTLR */
+#define SET_CB_SCTLR_M(b, c, v)     SET_CONTEXT_FIELD(b, c, CB_SCTLR, M, v)
+#define SET_CB_SCTLR_TRE(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_SCTLR, TRE, v)
+#define SET_CB_SCTLR_AFE(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_SCTLR, AFE, v)
+#define SET_CB_SCTLR_AFFD(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_SCTLR, AFFD, v)
+#define SET_CB_SCTLR_E(b, c, v)     SET_CONTEXT_FIELD(b, c, CB_SCTLR, E, v)
+#define SET_CB_SCTLR_CFRE(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFRE, v)
+#define SET_CB_SCTLR_CFIE(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFIE, v)
+#define SET_CB_SCTLR_CFCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFCFG, v)
+#define SET_CB_SCTLR_HUPCF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, HUPCF, v)
+#define SET_CB_SCTLR_WXN(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_SCTLR, WXN, v)
+#define SET_CB_SCTLR_UWXN(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_SCTLR, UWXN, v)
+#define SET_CB_SCTLR_ASIDPNE(b, c, v) \
+			SET_CONTEXT_FIELD(b, c, CB_SCTLR, ASIDPNE, v)
+#define SET_CB_SCTLR_TRANSIENTCFG(b, c, v) \
+			SET_CONTEXT_FIELD(b, c, CB_SCTLR, TRANSIENTCFG, v)
+#define SET_CB_SCTLR_MEMATTR(b, c, v) \
+			SET_CONTEXT_FIELD(b, c, CB_SCTLR, MEMATTR, v)
+#define SET_CB_SCTLR_MTCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, MTCFG, v)
+#define SET_CB_SCTLR_SHCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, SHCFG, v)
+#define SET_CB_SCTLR_RACFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, RACFG, v)
+#define SET_CB_SCTLR_WACFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, WACFG, v)
+#define SET_CB_SCTLR_NSCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, NSCFG, v)
+
+#define GET_CB_SCTLR_M(b, c)        GET_CONTEXT_FIELD(b, c, CB_SCTLR, M)
+#define GET_CB_SCTLR_TRE(b, c)      GET_CONTEXT_FIELD(b, c, CB_SCTLR, TRE)
+#define GET_CB_SCTLR_AFE(b, c)      GET_CONTEXT_FIELD(b, c, CB_SCTLR, AFE)
+#define GET_CB_SCTLR_AFFD(b, c)     GET_CONTEXT_FIELD(b, c, CB_SCTLR, AFFD)
+#define GET_CB_SCTLR_E(b, c)        GET_CONTEXT_FIELD(b, c, CB_SCTLR, E)
+#define GET_CB_SCTLR_CFRE(b, c)     GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFRE)
+#define GET_CB_SCTLR_CFIE(b, c)     GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFIE)
+#define GET_CB_SCTLR_CFCFG(b, c)    GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFCFG)
+#define GET_CB_SCTLR_HUPCF(b, c)    GET_CONTEXT_FIELD(b, c, CB_SCTLR, HUPCF)
+#define GET_CB_SCTLR_WXN(b, c)      GET_CONTEXT_FIELD(b, c, CB_SCTLR, WXN)
+#define GET_CB_SCTLR_UWXN(b, c)     GET_CONTEXT_FIELD(b, c, CB_SCTLR, UWXN)
+#define GET_CB_SCTLR_ASIDPNE(b, c)    \
+			GET_CONTEXT_FIELD(b, c, CB_SCTLR, ASIDPNE)
+#define GET_CB_SCTLR_TRANSIENTCFG(b, c)    \
+			GET_CONTEXT_FIELD(b, c, CB_SCTLR, TRANSIENTCFG)
+#define GET_CB_SCTLR_MEMATTR(b, c)    \
+			GET_CONTEXT_FIELD(b, c, CB_SCTLR, MEMATTR)
+#define GET_CB_SCTLR_MTCFG(b, c)    GET_CONTEXT_FIELD(b, c, CB_SCTLR, MTCFG)
+#define GET_CB_SCTLR_SHCFG(b, c)    GET_CONTEXT_FIELD(b, c, CB_SCTLR, SHCFG)
+#define GET_CB_SCTLR_RACFG(b, c)    GET_CONTEXT_FIELD(b, c, CB_SCTLR, RACFG)
+#define GET_CB_SCTLR_WACFG(b, c)    GET_CONTEXT_FIELD(b, c, CB_SCTLR, WACFG)
+#define GET_CB_SCTLR_NSCFG(b, c)    GET_CONTEXT_FIELD(b, c, CB_SCTLR, NSCFG)
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define SET_CB_TLBIASID_ASID(b, c, v) \
+				SET_CONTEXT_FIELD(b, c, CB_TLBIASID, ASID, v)
+
+#define GET_CB_TLBIASID_ASID(b, c)    \
+				GET_CONTEXT_FIELD(b, c, CB_TLBIASID, ASID)
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define SET_CB_TLBIVA_ASID(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVA, ASID, v)
+#define SET_CB_TLBIVA_VA(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_TLBIVA, VA, v)
+
+#define GET_CB_TLBIVA_ASID(b, c)    GET_CONTEXT_FIELD(b, c, CB_TLBIVA, ASID)
+#define GET_CB_TLBIVA_VA(b, c)      GET_CONTEXT_FIELD(b, c, CB_TLBIVA, VA)
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define SET_CB_TLBIVAA_VA(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_TLBIVAA, VA, v)
+
+#define GET_CB_TLBIVAA_VA(b, c)     GET_CONTEXT_FIELD(b, c, CB_TLBIVAA, VA)
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define SET_CB_TLBIVAAL_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVAAL, VA, v)
+
+#define GET_CB_TLBIVAAL_VA(b, c)    GET_CONTEXT_FIELD(b, c, CB_TLBIVAAL, VA)
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define SET_CB_TLBIVAL_ASID(b, c, v) \
+			SET_CONTEXT_FIELD(b, c, CB_TLBIVAL, ASID, v)
+#define SET_CB_TLBIVAL_VA(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_TLBIVAL, VA, v)
+
+#define GET_CB_TLBIVAL_ASID(b, c)    \
+			GET_CONTEXT_FIELD(b, c, CB_TLBIVAL, ASID)
+#define GET_CB_TLBIVAL_VA(b, c)      GET_CONTEXT_FIELD(b, c, CB_TLBIVAL, VA)
+
+/* TLB Status: CB_TLBSTATUS */
+#define SET_CB_TLBSTATUS_SACTIVE(b, c, v) \
+			SET_CONTEXT_FIELD(b, c, CB_TLBSTATUS, SACTIVE, v)
+
+#define GET_CB_TLBSTATUS_SACTIVE(b, c)    \
+			GET_CONTEXT_FIELD(b, c, CB_TLBSTATUS, SACTIVE)
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define SET_CB_TTBCR_T0SZ(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_TTBCR, T0SZ, v)
+#define SET_CB_TTBCR_PD0(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_TTBCR, PD0, v)
+#define SET_CB_TTBCR_PD1(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_TTBCR, PD1, v)
+#define SET_CB_TTBCR_NSCFG0(b, c, v) \
+			SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG0, v)
+#define SET_CB_TTBCR_NSCFG1(b, c, v) \
+			SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1, v)
+#define SET_CB_TTBCR_EAE(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_TTBCR, EAE, v)
+
+#define GET_CB_TTBCR_T0SZ(b, c)      GET_CONTEXT_FIELD(b, c, CB_TTBCR, T0SZ)
+#define GET_CB_TTBCR_PD0(b, c)       GET_CONTEXT_FIELD(b, c, CB_TTBCR, PD0)
+#define GET_CB_TTBCR_PD1(b, c)       GET_CONTEXT_FIELD(b, c, CB_TTBCR, PD1)
+#define GET_CB_TTBCR_NSCFG0(b, c)    \
+			GET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG0)
+#define GET_CB_TTBCR_NSCFG1(b, c)    \
+			GET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1)
+#define GET_CB_TTBCR_EAE(b, c)       GET_CONTEXT_FIELD(b, c, CB_TTBCR, EAE)
+
+/* Translation Table Base Register 0: CB_TTBR */
+#define SET_CB_TTBR0_IRGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN1, v)
+#define SET_CB_TTBR0_S(b, c, v)     SET_CONTEXT_FIELD(b, c, CB_TTBR0, S, v)
+#define SET_CB_TTBR0_RGN(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_TTBR0, RGN, v)
+#define SET_CB_TTBR0_NOS(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_TTBR0, NOS, v)
+#define SET_CB_TTBR0_IRGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN0, v)
+#define SET_CB_TTBR0_ADDR(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_TTBR0, ADDR, v)
+
+#define GET_CB_TTBR0_IRGN1(b, c)    GET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN1)
+#define GET_CB_TTBR0_S(b, c)        GET_CONTEXT_FIELD(b, c, CB_TTBR0, S)
+#define GET_CB_TTBR0_RGN(b, c)      GET_CONTEXT_FIELD(b, c, CB_TTBR0, RGN)
+#define GET_CB_TTBR0_NOS(b, c)      GET_CONTEXT_FIELD(b, c, CB_TTBR0, NOS)
+#define GET_CB_TTBR0_IRGN0(b, c)    GET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN0)
+#define GET_CB_TTBR0_ADDR(b, c)     GET_CONTEXT_FIELD(b, c, CB_TTBR0, ADDR)
+
+/* Translation Table Base Register 1: CB_TTBR1 */
+#define SET_CB_TTBR1_IRGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN1, v)
+#define SET_CB_TTBR1_0S(b, c, v)    SET_CONTEXT_FIELD(b, c, CB_TTBR1, S, v)
+#define SET_CB_TTBR1_RGN(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_TTBR1, RGN, v)
+#define SET_CB_TTBR1_NOS(b, c, v)   SET_CONTEXT_FIELD(b, c, CB_TTBR1, NOS, v)
+#define SET_CB_TTBR1_IRGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN0, v)
+#define SET_CB_TTBR1_ADDR(b, c, v)  SET_CONTEXT_FIELD(b, c, CB_TTBR1, ADDR, v)
+
+#define GET_CB_TTBR1_IRGN1(b, c)    GET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN1)
+#define GET_CB_TTBR1_0S(b, c)       GET_CONTEXT_FIELD(b, c, CB_TTBR1, S)
+#define GET_CB_TTBR1_RGN(b, c)      GET_CONTEXT_FIELD(b, c, CB_TTBR1, RGN)
+#define GET_CB_TTBR1_NOS(b, c)      GET_CONTEXT_FIELD(b, c, CB_TTBR1, NOS)
+#define GET_CB_TTBR1_IRGN0(b, c)    GET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN0)
+#define GET_CB_TTBR1_ADDR(b, c)     GET_CONTEXT_FIELD(b, c, CB_TTBR1, ADDR)
+
+/* Global Register Space 0 */
+#define CR0		(0x0000)
+#define SCR1		(0x0004)
+#define CR2		(0x0008)
+#define ACR		(0x0010)
+#define IDR0		(0x0020)
+#define IDR1		(0x0024)
+#define IDR2		(0x0028)
+#define IDR7		(0x003C)
+#define GFAR		(0x0040)
+#define GFSR		(0x0044)
+#define GFSRRESTORE	(0x004C)
+#define GFSYNR0		(0x0050)
+#define GFSYNR1		(0x0054)
+#define GFSYNR2		(0x0058)
+#define TLBIVMID	(0x0064)
+#define TLBIALLNSNH	(0x0068)
+#define TLBIALLH	(0x006C)
+#define TLBGSYNC	(0x0070)
+#define TLBGSTATUS	(0x0074)
+#define TLBIVAH		(0x0078)
+#define GATS1UR		(0x0100)
+#define GATS1UW		(0x0108)
+#define GATS1PR		(0x0110)
+#define GATS1PW		(0x0118)
+#define GATS12UR	(0x0120)
+#define GATS12UW	(0x0128)
+#define GATS12PR	(0x0130)
+#define GATS12PW	(0x0138)
+#define GPAR		(0x0180)
+#define GATSR		(0x0188)
+#define NSCR0		(0x0400)
+#define NSCR2		(0x0408)
+#define NSACR		(0x0410)
+#define SMR		(0x0800)
+#define S2CR		(0x0C00)
+
+/* Global Register Space 1 */
+#define CBAR		(0x1000)
+#define CBFRSYNRA	(0x1400)
+
+/* Implementation defined Register Space */
+#define MICRO_MMU_CTRL	(0x2000)
+#define PREDICTIONDIS0	(0x204C)
+#define PREDICTIONDIS1	(0x2050)
+#define S1L1BFBLP0	(0x215C)
+
+/* Performance Monitoring Register Space */
+#define PMEVCNTR_N	(0x3000)
+#define PMEVTYPER_N	(0x3400)
+#define PMCGCR_N	(0x3800)
+#define PMCGSMR_N	(0x3A00)
+#define PMCNTENSET_N	(0x3C00)
+#define PMCNTENCLR_N	(0x3C20)
+#define PMINTENSET_N	(0x3C40)
+#define PMINTENCLR_N	(0x3C60)
+#define PMOVSCLR_N	(0x3C80)
+#define PMOVSSET_N	(0x3CC0)
+#define PMCFGR		(0x3E00)
+#define PMCR		(0x3E04)
+#define PMCEID0		(0x3E20)
+#define PMCEID1		(0x3E24)
+#define PMAUTHSTATUS	(0x3FB8)
+#define PMDEVTYPE	(0x3FCC)
+
+/* Secure Status Determination Address Space */
+#define SSDR_N		(0x4000)
+
+/* Stage 1 Context Bank Format */
+#define CB_SCTLR	(0x000)
+#define CB_ACTLR	(0x004)
+#define CB_RESUME	(0x008)
+#define CB_TTBR0	(0x020)
+#define CB_TTBR1	(0x028)
+#define CB_TTBCR	(0x030)
+#define CB_CONTEXTIDR	(0x034)
+#define CB_PRRR		(0x038)
+#define CB_NMRR		(0x03C)
+#define CB_PAR		(0x050)
+#define CB_FSR		(0x058)
+#define CB_FSRRESTORE	(0x05C)
+#define CB_FAR		(0x060)
+#define CB_FSYNR0	(0x068)
+#define CB_FSYNR1	(0x06C)
+#define CB_TLBIVA	(0x600)
+#define CB_TLBIVAA	(0x608)
+#define CB_TLBIASID	(0x610)
+#define CB_TLBIALL	(0x618)
+#define CB_TLBIVAL	(0x620)
+#define CB_TLBIVAAL	(0x628)
+#define CB_TLBSYNC	(0x7F0)
+#define CB_TLBSTATUS	(0x7F4)
+#define CB_ATS1PR	(0x800)
+#define CB_ATS1PW	(0x808)
+#define CB_ATS1UR	(0x810)
+#define CB_ATS1UW	(0x818)
+#define CB_ATSR		(0x8F0)
+#define CB_PMXEVCNTR_N	(0xE00)
+#define CB_PMXEVTYPER_N	(0xE80)
+#define CB_PMCFGR	(0xF00)
+#define CB_PMCR		(0xF04)
+#define CB_PMCEID0	(0xF20)
+#define CB_PMCEID1	(0xF24)
+#define CB_PMCNTENSET	(0xF40)
+#define CB_PMCNTENCLR	(0xF44)
+#define CB_PMINTENSET	(0xF48)
+#define CB_PMINTENCLR	(0xF4C)
+#define CB_PMOVSCLR	(0xF50)
+#define CB_PMOVSSET	(0xF58)
+#define CB_PMAUTHSTATUS	(0xFB8)
+
+/* Global Register Fields */
+/* Configuration Register: CR0 */
+#define CR0_NSCFG         (CR0_NSCFG_MASK         << CR0_NSCFG_SHIFT)
+#define CR0_WACFG         (CR0_WACFG_MASK         << CR0_WACFG_SHIFT)
+#define CR0_RACFG         (CR0_RACFG_MASK         << CR0_RACFG_SHIFT)
+#define CR0_SHCFG         (CR0_SHCFG_MASK         << CR0_SHCFG_SHIFT)
+#define CR0_SMCFCFG       (CR0_SMCFCFG_MASK       << CR0_SMCFCFG_SHIFT)
+#define CR0_MTCFG         (CR0_MTCFG_MASK         << CR0_MTCFG_SHIFT)
+#define CR0_MEMATTR       (CR0_MEMATTR_MASK       << CR0_MEMATTR_SHIFT)
+#define CR0_BSU           (CR0_BSU_MASK           << CR0_BSU_SHIFT)
+#define CR0_FB            (CR0_FB_MASK            << CR0_FB_SHIFT)
+#define CR0_PTM           (CR0_PTM_MASK           << CR0_PTM_SHIFT)
+#define CR0_VMIDPNE       (CR0_VMIDPNE_MASK       << CR0_VMIDPNE_SHIFT)
+#define CR0_USFCFG        (CR0_USFCFG_MASK        << CR0_USFCFG_SHIFT)
+#define CR0_GSE           (CR0_GSE_MASK           << CR0_GSE_SHIFT)
+#define CR0_STALLD        (CR0_STALLD_MASK        << CR0_STALLD_SHIFT)
+#define CR0_TRANSIENTCFG  (CR0_TRANSIENTCFG_MASK  << CR0_TRANSIENTCFG_SHIFT)
+#define CR0_GCFGFIE       (CR0_GCFGFIE_MASK       << CR0_GCFGFIE_SHIFT)
+#define CR0_GCFGFRE       (CR0_GCFGFRE_MASK       << CR0_GCFGFRE_SHIFT)
+#define CR0_GFIE          (CR0_GFIE_MASK          << CR0_GFIE_SHIFT)
+#define CR0_GFRE          (CR0_GFRE_MASK          << CR0_GFRE_SHIFT)
+#define CR0_CLIENTPD      (CR0_CLIENTPD_MASK      << CR0_CLIENTPD_SHIFT)
+
+/* Configuration Register: CR2 */
+#define CR2_BPVMID        (CR2_BPVMID_MASK << CR2_BPVMID_SHIFT)
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define GATS1PR_ADDR  (GATS1PR_ADDR_MASK  << GATS1PR_ADDR_SHIFT)
+#define GATS1PR_NDX   (GATS1PR_NDX_MASK   << GATS1PR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define GATS1PW_ADDR  (GATS1PW_ADDR_MASK  << GATS1PW_ADDR_SHIFT)
+#define GATS1PW_NDX   (GATS1PW_NDX_MASK   << GATS1PW_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define GATS1UR_ADDR  (GATS1UR_ADDR_MASK  << GATS1UR_ADDR_SHIFT)
+#define GATS1UR_NDX   (GATS1UR_NDX_MASK   << GATS1UR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1, User Write: GATS1UW */
+#define GATS1UW_ADDR  (GATS1UW_ADDR_MASK  << GATS1UW_ADDR_SHIFT)
+#define GATS1UW_NDX   (GATS1UW_NDX_MASK   << GATS1UW_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS1PR */
+#define GATS12PR_ADDR (GATS12PR_ADDR_MASK << GATS12PR_ADDR_SHIFT)
+#define GATS12PR_NDX  (GATS12PR_NDX_MASK  << GATS12PR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS1PW */
+#define GATS12PW_ADDR (GATS12PW_ADDR_MASK << GATS12PW_ADDR_SHIFT)
+#define GATS12PW_NDX  (GATS12PW_NDX_MASK  << GATS12PW_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, User Read: GATS1UR */
+#define GATS12UR_ADDR (GATS12UR_ADDR_MASK << GATS12UR_ADDR_SHIFT)
+#define GATS12UR_NDX  (GATS12UR_NDX_MASK  << GATS12UR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, User Write: GATS1UW */
+#define GATS12UW_ADDR (GATS12UW_ADDR_MASK << GATS12UW_ADDR_SHIFT)
+#define GATS12UW_NDX  (GATS12UW_NDX_MASK  << GATS12UW_NDX_SHIFT)
+
+/* Global Address Translation Status Register: GATSR */
+#define GATSR_ACTIVE  (GATSR_ACTIVE_MASK  << GATSR_ACTIVE_SHIFT)
+
+/* Global Fault Address Register: GFAR */
+#define GFAR_FADDR    (GFAR_FADDR_MASK << GFAR_FADDR_SHIFT)
+
+/* Global Fault Status Register: GFSR */
+#define GFSR_ICF      (GFSR_ICF_MASK   << GFSR_ICF_SHIFT)
+#define GFSR_USF      (GFSR_USF_MASK   << GFSR_USF_SHIFT)
+#define GFSR_SMCF     (GFSR_SMCF_MASK  << GFSR_SMCF_SHIFT)
+#define GFSR_UCBF     (GFSR_UCBF_MASK  << GFSR_UCBF_SHIFT)
+#define GFSR_UCIF     (GFSR_UCIF_MASK  << GFSR_UCIF_SHIFT)
+#define GFSR_CAF      (GFSR_CAF_MASK   << GFSR_CAF_SHIFT)
+#define GFSR_EF       (GFSR_EF_MASK    << GFSR_EF_SHIFT)
+#define GFSR_PF       (GFSR_PF_MASK    << GFSR_PF_SHIFT)
+#define GFSR_MULTI    (GFSR_MULTI_MASK << GFSR_MULTI_SHIFT)
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define GFSYNR0_NESTED  (GFSYNR0_NESTED_MASK  << GFSYNR0_NESTED_SHIFT)
+#define GFSYNR0_WNR     (GFSYNR0_WNR_MASK     << GFSYNR0_WNR_SHIFT)
+#define GFSYNR0_PNU     (GFSYNR0_PNU_MASK     << GFSYNR0_PNU_SHIFT)
+#define GFSYNR0_IND     (GFSYNR0_IND_MASK     << GFSYNR0_IND_SHIFT)
+#define GFSYNR0_NSSTATE (GFSYNR0_NSSTATE_MASK << GFSYNR0_NSSTATE_SHIFT)
+#define GFSYNR0_NSATTR  (GFSYNR0_NSATTR_MASK  << GFSYNR0_NSATTR_SHIFT)
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define GFSYNR1_SID     (GFSYNR1_SID_MASK     << GFSYNR1_SID_SHIFT)
+
+/* Global Physical Address Register: GPAR */
+#define GPAR_F          (GPAR_F_MASK      << GPAR_F_SHIFT)
+#define GPAR_SS         (GPAR_SS_MASK     << GPAR_SS_SHIFT)
+#define GPAR_OUTER      (GPAR_OUTER_MASK  << GPAR_OUTER_SHIFT)
+#define GPAR_INNER      (GPAR_INNER_MASK  << GPAR_INNER_SHIFT)
+#define GPAR_SH         (GPAR_SH_MASK     << GPAR_SH_SHIFT)
+#define GPAR_NS         (GPAR_NS_MASK     << GPAR_NS_SHIFT)
+#define GPAR_NOS        (GPAR_NOS_MASK    << GPAR_NOS_SHIFT)
+#define GPAR_PA         (GPAR_PA_MASK     << GPAR_PA_SHIFT)
+#define GPAR_TF         (GPAR_TF_MASK     << GPAR_TF_SHIFT)
+#define GPAR_AFF        (GPAR_AFF_MASK    << GPAR_AFF_SHIFT)
+#define GPAR_PF         (GPAR_PF_MASK     << GPAR_PF_SHIFT)
+#define GPAR_EF         (GPAR_EF_MASK     << GPAR_EF_SHIFT)
+#define GPAR_TLCMCF     (GPAR_TLBMCF_MASK << GPAR_TLCMCF_SHIFT)
+#define GPAR_TLBLKF     (GPAR_TLBLKF_MASK << GPAR_TLBLKF_SHIFT)
+#define GPAR_UCBF       (GPAR_UCBF_MASK   << GFAR_UCBF_SHIFT)
+
+/* Identification Register: IDR0 */
+#define IDR0_NUMSMRG    (IDR0_NUMSMRG_MASK  << IDR0_NUMSMGR_SHIFT)
+#define IDR0_NUMSIDB    (IDR0_NUMSIDB_MASK  << IDR0_NUMSIDB_SHIFT)
+#define IDR0_BTM        (IDR0_BTM_MASK      << IDR0_BTM_SHIFT)
+#define IDR0_CTTW       (IDR0_CTTW_MASK     << IDR0_CTTW_SHIFT)
+#define IDR0_NUMIRPT    (IDR0_NUMIPRT_MASK  << IDR0_NUMIRPT_SHIFT)
+#define IDR0_PTFS       (IDR0_PTFS_MASK     << IDR0_PTFS_SHIFT)
+#define IDR0_SMS        (IDR0_SMS_MASK      << IDR0_SMS_SHIFT)
+#define IDR0_NTS        (IDR0_NTS_MASK      << IDR0_NTS_SHIFT)
+#define IDR0_S2TS       (IDR0_S2TS_MASK     << IDR0_S2TS_SHIFT)
+#define IDR0_S1TS       (IDR0_S1TS_MASK     << IDR0_S1TS_SHIFT)
+#define IDR0_SES        (IDR0_SES_MASK      << IDR0_SES_SHIFT)
+
+/* Identification Register: IDR1 */
+#define IDR1_NUMCB       (IDR1_NUMCB_MASK       << IDR1_NUMCB_SHIFT)
+#define IDR1_NUMSSDNDXB  (IDR1_NUMSSDNDXB_MASK  << IDR1_NUMSSDNDXB_SHIFT)
+#define IDR1_SSDTP       (IDR1_SSDTP_MASK       << IDR1_SSDTP_SHIFT)
+#define IDR1_SMCD        (IDR1_SMCD_MASK        << IDR1_SMCD_SHIFT)
+#define IDR1_NUMS2CB     (IDR1_NUMS2CB_MASK     << IDR1_NUMS2CB_SHIFT)
+#define IDR1_NUMPAGENDXB (IDR1_NUMPAGENDXB_MASK << IDR1_NUMPAGENDXB_SHIFT)
+#define IDR1_PAGESIZE    (IDR1_PAGESIZE_MASK    << IDR1_PAGESIZE_SHIFT)
+
+/* Identification Register: IDR2 */
+#define IDR2_IAS         (IDR2_IAS_MASK << IDR2_IAS_SHIFT)
+#define IDR1_OAS         (IDR2_OAS_MASK << IDR2_OAS_SHIFT)
+
+/* Identification Register: IDR7 */
+#define IDR7_MINOR       (IDR7_MINOR_MASK << IDR7_MINOR_SHIFT)
+#define IDR7_MAJOR       (IDR7_MAJOR_MASK << IDR7_MAJOR_SHIFT)
+
+/* Stream to Context Register: S2CR */
+#define S2CR_CBNDX        (S2CR_CBNDX_MASK         << S2cR_CBNDX_SHIFT)
+#define S2CR_SHCFG        (S2CR_SHCFG_MASK         << s2CR_SHCFG_SHIFT)
+#define S2CR_MTCFG        (S2CR_MTCFG_MASK         << S2CR_MTCFG_SHIFT)
+#define S2CR_MEMATTR      (S2CR_MEMATTR_MASK       << S2CR_MEMATTR_SHIFT)
+#define S2CR_TYPE         (S2CR_TYPE_MASK          << S2CR_TYPE_SHIFT)
+#define S2CR_NSCFG        (S2CR_NSCFG_MASK         << S2CR_NSCFG_SHIFT)
+#define S2CR_RACFG        (S2CR_RACFG_MASK         << S2CR_RACFG_SHIFT)
+#define S2CR_WACFG        (S2CR_WACFG_MASK         << S2CR_WACFG_SHIFT)
+#define S2CR_PRIVCFG      (S2CR_PRIVCFG_MASK       << S2CR_PRIVCFG_SHIFT)
+#define S2CR_INSTCFG      (S2CR_INSTCFG_MASK       << S2CR_INSTCFG_SHIFT)
+#define S2CR_TRANSIENTCFG (S2CR_TRANSIENTCFG_MASK  << S2CR_TRANSIENTCFG_SHIFT)
+#define S2CR_VMID         (S2CR_VMID_MASK          << S2CR_VMID_SHIFT)
+#define S2CR_BSU          (S2CR_BSU_MASK           << S2CR_BSU_SHIFT)
+#define S2CR_FB           (S2CR_FB_MASK            << S2CR_FB_SHIFT)
+
+/* Stream Match Register: SMR */
+#define SMR_ID            (SMR_ID_MASK    << SMR_ID_SHIFT)
+#define SMR_MASK          (SMR_MASK_MASK  << SMR_MASK_SHIFT)
+#define SMR_VALID         (SMR_VALID_MASK << SMR_VALID_SHIFT)
+
+/* Global TLB Status: TLBGSTATUS */
+#define TLBGSTATUS_GSACTIVE (TLBGSTATUS_GSACTIVE_MASK << \
+					TLBGSTATUS_GSACTIVE_SHIFT)
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define TLBIVAH_ADDR  (TLBIVAH_ADDR_MASK << TLBIVAH_ADDR_SHIFT)
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define TLBIVMID_VMID (TLBIVMID_VMID_MASK << TLBIVMID_VMID_SHIFT)
+
+/* Context Bank Attribute Register: CBAR */
+#define CBAR_VMID       (CBAR_VMID_MASK    << CBAR_VMID_SHIFT)
+#define CBAR_CBNDX      (CBAR_CBNDX_MASK   << CBAR_CBNDX_SHIFT)
+#define CBAR_BPSHCFG    (CBAR_BPSHCFG_MASK << CBAR_BPSHCFG_SHIFT)
+#define CBAR_HYPC       (CBAR_HYPC_MASK    << CBAR_HYPC_SHIFT)
+#define CBAR_FB         (CBAR_FB_MASK      << CBAR_FB_SHIFT)
+#define CBAR_MEMATTR    (CBAR_MEMATTR_MASK << CBAR_MEMATTR_SHIFT)
+#define CBAR_TYPE       (CBAR_TYPE_MASK    << CBAR_TYPE_SHIFT)
+#define CBAR_BSU        (CBAR_BSU_MASK     << CBAR_BSU_SHIFT)
+#define CBAR_RACFG      (CBAR_RACFG_MASK   << CBAR_RACFG_SHIFT)
+#define CBAR_WACFG      (CBAR_WACFG_MASK   << CBAR_WACFG_SHIFT)
+#define CBAR_IRPTNDX    (CBAR_IRPTNDX_MASK << CBAR_IRPTNDX_SHIFT)
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
+#define CBFRSYNRA_SID   (CBFRSYNRA_SID_MASK << CBFRSYNRA_SID_SHIFT)
+
+/* Performance Monitoring Register Fields */
+
+/* Stage 1 Context Bank Format Fields */
+/* Auxiliary Control Register: CB_ACTLR */
+#define CB_ACTLR_REQPRIORITY \
+		(CB_ACTLR_REQPRIORITY_MASK << CB_ACTLR_REQPRIORITY_SHIFT)
+#define CB_ACTLR_REQPRIORITYCFG \
+		(CB_ACTLR_REQPRIORITYCFG_MASK << CB_ACTLR_REQPRIORITYCFG_SHIFT)
+#define CB_ACTLR_PRIVCFG (CB_ACTLR_PRIVCFG_MASK << CB_ACTLR_PRIVCFG_SHIFT)
+#define CB_ACTLR_BPRCOSH (CB_ACTLR_BPRCOSH_MASK << CB_ACTLR_BPRCOSH_SHIFT)
+#define CB_ACTLR_BPRCISH (CB_ACTLR_BPRCISH_MASK << CB_ACTLR_BPRCISH_SHIFT)
+#define CB_ACTLR_BPRCNSH (CB_ACTLR_BPRCNSH_MASK << CB_ACTLR_BPRCNSH_SHIFT)
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define CB_ATS1PR_ADDR  (CB_ATS1PR_ADDR_MASK << CB_ATS1PR_ADDR_SHIFT)
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define CB_ATS1PW_ADDR  (CB_ATS1PW_ADDR_MASK << CB_ATS1PW_ADDR_SHIFT)
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define CB_ATS1UR_ADDR  (CB_ATS1UR_ADDR_MASK << CB_ATS1UR_ADDR_SHIFT)
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define CB_ATS1UW_ADDR  (CB_ATS1UW_ADDR_MASK << CB_ATS1UW_ADDR_SHIFT)
+
+/* Address Translation Status Register: CB_ATSR */
+#define CB_ATSR_ACTIVE  (CB_ATSR_ACTIVE_MASK << CB_ATSR_ACTIVE_SHIFT)
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define CB_CONTEXTIDR_ASID    (CB_CONTEXTIDR_ASID_MASK << \
+				CB_CONTEXTIDR_ASID_SHIFT)
+#define CB_CONTEXTIDR_PROCID  (CB_CONTEXTIDR_PROCID_MASK << \
+				CB_CONTEXTIDR_PROCID_SHIFT)
+
+/* Fault Address Register: CB_FAR */
+#define CB_FAR_FADDR  (CB_FAR_FADDR_MASK << CB_FAR_FADDR_SHIFT)
+
+/* Fault Status Register: CB_FSR */
+#define CB_FSR_TF     (CB_FSR_TF_MASK     << CB_FSR_TF_SHIFT)
+#define CB_FSR_AFF    (CB_FSR_AFF_MASK    << CB_FSR_AFF_SHIFT)
+#define CB_FSR_PF     (CB_FSR_PF_MASK     << CB_FSR_PF_SHIFT)
+#define CB_FSR_EF     (CB_FSR_EF_MASK     << CB_FSR_EF_SHIFT)
+#define CB_FSR_TLBMCF (CB_FSR_TLBMCF_MASK << CB_FSR_TLBMCF_SHIFT)
+#define CB_FSR_TLBLKF (CB_FSR_TLBLKF_MASK << CB_FSR_TLBLKF_SHIFT)
+#define CB_FSR_SS     (CB_FSR_SS_MASK     << CB_FSR_SS_SHIFT)
+#define CB_FSR_MULTI  (CB_FSR_MULTI_MASK  << CB_FSR_MULTI_SHIFT)
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define CB_FSYNR0_PLVL     (CB_FSYNR0_PLVL_MASK    << CB_FSYNR0_PLVL_SHIFT)
+#define CB_FSYNR0_S1PTWF   (CB_FSYNR0_S1PTWF_MASK  << CB_FSYNR0_S1PTWF_SHIFT)
+#define CB_FSYNR0_WNR      (CB_FSYNR0_WNR_MASK     << CB_FSYNR0_WNR_SHIFT)
+#define CB_FSYNR0_PNU      (CB_FSYNR0_PNU_MASK     << CB_FSYNR0_PNU_SHIFT)
+#define CB_FSYNR0_IND      (CB_FSYNR0_IND_MASK     << CB_FSYNR0_IND_SHIFT)
+#define CB_FSYNR0_NSSTATE  (CB_FSYNR0_NSSTATE_MASK << CB_FSYNR0_NSSTATE_SHIFT)
+#define CB_FSYNR0_NSATTR   (CB_FSYNR0_NSATTR_MASK  << CB_FSYNR0_NSATTR_SHIFT)
+#define CB_FSYNR0_ATOF     (CB_FSYNR0_ATOF_MASK    << CB_FSYNR0_ATOF_SHIFT)
+#define CB_FSYNR0_PTWF     (CB_FSYNR0_PTWF_MASK    << CB_FSYNR0_PTWF_SHIFT)
+#define CB_FSYNR0_AFR      (CB_FSYNR0_AFR_MASK     << CB_FSYNR0_AFR_SHIFT)
+#define CB_FSYNR0_S1CBNDX  (CB_FSYNR0_S1CBNDX_MASK << CB_FSYNR0_S1CBNDX_SHIFT)
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define CB_NMRR_IR0        (CB_NMRR_IR0_MASK   << CB_NMRR_IR0_SHIFT)
+#define CB_NMRR_IR1        (CB_NMRR_IR1_MASK   << CB_NMRR_IR1_SHIFT)
+#define CB_NMRR_IR2        (CB_NMRR_IR2_MASK   << CB_NMRR_IR2_SHIFT)
+#define CB_NMRR_IR3        (CB_NMRR_IR3_MASK   << CB_NMRR_IR3_SHIFT)
+#define CB_NMRR_IR4        (CB_NMRR_IR4_MASK   << CB_NMRR_IR4_SHIFT)
+#define CB_NMRR_IR5        (CB_NMRR_IR5_MASK   << CB_NMRR_IR5_SHIFT)
+#define CB_NMRR_IR6        (CB_NMRR_IR6_MASK   << CB_NMRR_IR6_SHIFT)
+#define CB_NMRR_IR7        (CB_NMRR_IR7_MASK   << CB_NMRR_IR7_SHIFT)
+#define CB_NMRR_OR0        (CB_NMRR_OR0_MASK   << CB_NMRR_OR0_SHIFT)
+#define CB_NMRR_OR1        (CB_NMRR_OR1_MASK   << CB_NMRR_OR1_SHIFT)
+#define CB_NMRR_OR2        (CB_NMRR_OR2_MASK   << CB_NMRR_OR2_SHIFT)
+#define CB_NMRR_OR3        (CB_NMRR_OR3_MASK   << CB_NMRR_OR3_SHIFT)
+#define CB_NMRR_OR4        (CB_NMRR_OR4_MASK   << CB_NMRR_OR4_SHIFT)
+#define CB_NMRR_OR5        (CB_NMRR_OR5_MASK   << CB_NMRR_OR5_SHIFT)
+#define CB_NMRR_OR6        (CB_NMRR_OR6_MASK   << CB_NMRR_OR6_SHIFT)
+#define CB_NMRR_OR7        (CB_NMRR_OR7_MASK   << CB_NMRR_OR7_SHIFT)
+
+/* Physical Address Register: CB_PAR */
+#define CB_PAR_F           (CB_PAR_F_MASK      << CB_PAR_F_SHIFT)
+#define CB_PAR_SS          (CB_PAR_SS_MASK     << CB_PAR_SS_SHIFT)
+#define CB_PAR_OUTER       (CB_PAR_OUTER_MASK  << CB_PAR_OUTER_SHIFT)
+#define CB_PAR_INNER       (CB_PAR_INNER_MASK  << CB_PAR_INNER_SHIFT)
+#define CB_PAR_SH          (CB_PAR_SH_MASK     << CB_PAR_SH_SHIFT)
+#define CB_PAR_NS          (CB_PAR_NS_MASK     << CB_PAR_NS_SHIFT)
+#define CB_PAR_NOS         (CB_PAR_NOS_MASK    << CB_PAR_NOS_SHIFT)
+#define CB_PAR_PA          (CB_PAR_PA_MASK     << CB_PAR_PA_SHIFT)
+#define CB_PAR_TF          (CB_PAR_TF_MASK     << CB_PAR_TF_SHIFT)
+#define CB_PAR_AFF         (CB_PAR_AFF_MASK    << CB_PAR_AFF_SHIFT)
+#define CB_PAR_PF          (CB_PAR_PF_MASK     << CB_PAR_PF_SHIFT)
+#define CB_PAR_TLBMCF      (CB_PAR_TLBMCF_MASK << CB_PAR_TLBMCF_SHIFT)
+#define CB_PAR_TLBLKF      (CB_PAR_TLBLKF_MASK << CB_PAR_TLBLKF_SHIFT)
+#define CB_PAR_ATOT        (CB_PAR_ATOT_MASK   << CB_PAR_ATOT_SHIFT)
+#define CB_PAR_PLVL        (CB_PAR_PLVL_MASK   << CB_PAR_PLVL_SHIFT)
+#define CB_PAR_STAGE       (CB_PAR_STAGE_MASK  << CB_PAR_STAGE_SHIFT)
+
+/* Primary Region Remap Register: CB_PRRR */
+#define CB_PRRR_TR0        (CB_PRRR_TR0_MASK   << CB_PRRR_TR0_SHIFT)
+#define CB_PRRR_TR1        (CB_PRRR_TR1_MASK   << CB_PRRR_TR1_SHIFT)
+#define CB_PRRR_TR2        (CB_PRRR_TR2_MASK   << CB_PRRR_TR2_SHIFT)
+#define CB_PRRR_TR3        (CB_PRRR_TR3_MASK   << CB_PRRR_TR3_SHIFT)
+#define CB_PRRR_TR4        (CB_PRRR_TR4_MASK   << CB_PRRR_TR4_SHIFT)
+#define CB_PRRR_TR5        (CB_PRRR_TR5_MASK   << CB_PRRR_TR5_SHIFT)
+#define CB_PRRR_TR6        (CB_PRRR_TR6_MASK   << CB_PRRR_TR6_SHIFT)
+#define CB_PRRR_TR7        (CB_PRRR_TR7_MASK   << CB_PRRR_TR7_SHIFT)
+#define CB_PRRR_DS0        (CB_PRRR_DS0_MASK   << CB_PRRR_DS0_SHIFT)
+#define CB_PRRR_DS1        (CB_PRRR_DS1_MASK   << CB_PRRR_DS1_SHIFT)
+#define CB_PRRR_NS0        (CB_PRRR_NS0_MASK   << CB_PRRR_NS0_SHIFT)
+#define CB_PRRR_NS1        (CB_PRRR_NS1_MASK   << CB_PRRR_NS1_SHIFT)
+#define CB_PRRR_NOS0       (CB_PRRR_NOS0_MASK  << CB_PRRR_NOS0_SHIFT)
+#define CB_PRRR_NOS1       (CB_PRRR_NOS1_MASK  << CB_PRRR_NOS1_SHIFT)
+#define CB_PRRR_NOS2       (CB_PRRR_NOS2_MASK  << CB_PRRR_NOS2_SHIFT)
+#define CB_PRRR_NOS3       (CB_PRRR_NOS3_MASK  << CB_PRRR_NOS3_SHIFT)
+#define CB_PRRR_NOS4       (CB_PRRR_NOS4_MASK  << CB_PRRR_NOS4_SHIFT)
+#define CB_PRRR_NOS5       (CB_PRRR_NOS5_MASK  << CB_PRRR_NOS5_SHIFT)
+#define CB_PRRR_NOS6       (CB_PRRR_NOS6_MASK  << CB_PRRR_NOS6_SHIFT)
+#define CB_PRRR_NOS7       (CB_PRRR_NOS7_MASK  << CB_PRRR_NOS7_SHIFT)
+
+/* Transaction Resume: CB_RESUME */
+#define CB_RESUME_TNR      (CB_RESUME_TNR_MASK << CB_RESUME_TNR_SHIFT)
+
+/* System Control Register: CB_SCTLR */
+#define CB_SCTLR_M           (CB_SCTLR_M_MASK       << CB_SCTLR_M_SHIFT)
+#define CB_SCTLR_TRE         (CB_SCTLR_TRE_MASK     << CB_SCTLR_TRE_SHIFT)
+#define CB_SCTLR_AFE         (CB_SCTLR_AFE_MASK     << CB_SCTLR_AFE_SHIFT)
+#define CB_SCTLR_AFFD        (CB_SCTLR_AFFD_MASK    << CB_SCTLR_AFFD_SHIFT)
+#define CB_SCTLR_E           (CB_SCTLR_E_MASK       << CB_SCTLR_E_SHIFT)
+#define CB_SCTLR_CFRE        (CB_SCTLR_CFRE_MASK    << CB_SCTLR_CFRE_SHIFT)
+#define CB_SCTLR_CFIE        (CB_SCTLR_CFIE_MASK    << CB_SCTLR_CFIE_SHIFT)
+#define CB_SCTLR_CFCFG       (CB_SCTLR_CFCFG_MASK   << CB_SCTLR_CFCFG_SHIFT)
+#define CB_SCTLR_HUPCF       (CB_SCTLR_HUPCF_MASK   << CB_SCTLR_HUPCF_SHIFT)
+#define CB_SCTLR_WXN         (CB_SCTLR_WXN_MASK     << CB_SCTLR_WXN_SHIFT)
+#define CB_SCTLR_UWXN        (CB_SCTLR_UWXN_MASK    << CB_SCTLR_UWXN_SHIFT)
+#define CB_SCTLR_ASIDPNE     (CB_SCTLR_ASIDPNE_MASK << CB_SCTLR_ASIDPNE_SHIFT)
+#define CB_SCTLR_TRANSIENTCFG (CB_SCTLR_TRANSIENTCFG_MASK << \
+						CB_SCTLR_TRANSIENTCFG_SHIFT)
+#define CB_SCTLR_MEMATTR     (CB_SCTLR_MEMATTR_MASK << CB_SCTLR_MEMATTR_SHIFT)
+#define CB_SCTLR_MTCFG       (CB_SCTLR_MTCFG_MASK   << CB_SCTLR_MTCFG_SHIFT)
+#define CB_SCTLR_SHCFG       (CB_SCTLR_SHCFG_MASK   << CB_SCTLR_SHCFG_SHIFT)
+#define CB_SCTLR_RACFG       (CB_SCTLR_RACFG_MASK   << CB_SCTLR_RACFG_SHIFT)
+#define CB_SCTLR_WACFG       (CB_SCTLR_WACFG_MASK   << CB_SCTLR_WACFG_SHIFT)
+#define CB_SCTLR_NSCFG       (CB_SCTLR_NSCFG_MASK   << CB_SCTLR_NSCFG_SHIFT)
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define CB_TLBIASID_ASID     (CB_TLBIASID_ASID_MASK << CB_TLBIASID_ASID_SHIFT)
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define CB_TLBIVA_ASID       (CB_TLBIVA_ASID_MASK   << CB_TLBIVA_ASID_SHIFT)
+#define CB_TLBIVA_VA         (CB_TLBIVA_VA_MASK     << CB_TLBIVA_VA_SHIFT)
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define CB_TLBIVAA_VA        (CB_TLBIVAA_VA_MASK    << CB_TLBIVAA_VA_SHIFT)
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define CB_TLBIVAAL_VA       (CB_TLBIVAAL_VA_MASK   << CB_TLBIVAAL_VA_SHIFT)
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define CB_TLBIVAL_ASID      (CB_TLBIVAL_ASID_MASK  << CB_TLBIVAL_ASID_SHIFT)
+#define CB_TLBIVAL_VA        (CB_TLBIVAL_VA_MASK    << CB_TLBIVAL_VA_SHIFT)
+
+/* TLB Status: CB_TLBSTATUS */
+#define CB_TLBSTATUS_SACTIVE (CB_TLBSTATUS_SACTIVE_MASK << \
+						CB_TLBSTATUS_SACTIVE_SHIFT)
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_T0SZ        (CB_TTBCR_T0SZ_MASK    << CB_TTBCR_T0SZ_SHIFT)
+#define CB_TTBCR_PD0         (CB_TTBCR_PD0_MASK     << CB_TTBCR_PD0_SHIFT)
+#define CB_TTBCR_PD1         (CB_TTBCR_PD1_MASK     << CB_TTBCR_PD1_SHIFT)
+#define CB_TTBCR_NSCFG0      (CB_TTBCR_NSCFG0_MASK  << CB_TTBCR_NSCFG0_SHIFT)
+#define CB_TTBCR_NSCFG1      (CB_TTBCR_NSCFG1_MASK  << CB_TTBCR_NSCFG1_SHIFT)
+#define CB_TTBCR_EAE         (CB_TTBCR_EAE_MASK     << CB_TTBCR_EAE_SHIFT)
+
+/* Translation Table Base Register 0: CB_TTBR0 */
+#define CB_TTBR0_IRGN1       (CB_TTBR0_IRGN1_MASK   << CB_TTBR0_IRGN1_SHIFT)
+#define CB_TTBR0_S           (CB_TTBR0_S_MASK       << CB_TTBR0_S_SHIFT)
+#define CB_TTBR0_RGN         (CB_TTBR0_RGN_MASK     << CB_TTBR0_RGN_SHIFT)
+#define CB_TTBR0_NOS         (CB_TTBR0_NOS_MASK     << CB_TTBR0_NOS_SHIFT)
+#define CB_TTBR0_IRGN0       (CB_TTBR0_IRGN0_MASK   << CB_TTBR0_IRGN0_SHIFT)
+#define CB_TTBR0_ADDR        (CB_TTBR0_ADDR_MASK    << CB_TTBR0_ADDR_SHIFT)
+
+/* Translation Table Base Register 1: CB_TTBR1 */
+#define CB_TTBR1_IRGN1       (CB_TTBR1_IRGN1_MASK   << CB_TTBR1_IRGN1_SHIFT)
+#define CB_TTBR1_S           (CB_TTBR1_S_MASK       << CB_TTBR1_S_SHIFT)
+#define CB_TTBR1_RGN         (CB_TTBR1_RGN_MASK     << CB_TTBR1_RGN_SHIFT)
+#define CB_TTBR1_NOS         (CB_TTBR1_NOS_MASK     << CB_TTBR1_NOS_SHIFT)
+#define CB_TTBR1_IRGN0       (CB_TTBR1_IRGN0_MASK   << CB_TTBR1_IRGN0_SHIFT)
+#define CB_TTBR1_ADDR        (CB_TTBR1_ADDR_MASK    << CB_TTBR1_ADDR_SHIFT)
+
+/* Global Register Masks */
+/* Configuration Register 0 */
+#define CR0_NSCFG_MASK          0x03
+#define CR0_WACFG_MASK          0x03
+#define CR0_RACFG_MASK          0x03
+#define CR0_SHCFG_MASK          0x03
+#define CR0_SMCFCFG_MASK        0x01
+#define CR0_MTCFG_MASK          0x01
+#define CR0_MEMATTR_MASK        0x0F
+#define CR0_BSU_MASK            0x03
+#define CR0_FB_MASK             0x01
+#define CR0_PTM_MASK            0x01
+#define CR0_VMIDPNE_MASK        0x01
+#define CR0_USFCFG_MASK         0x01
+#define CR0_GSE_MASK            0x01
+#define CR0_STALLD_MASK         0x01
+#define CR0_TRANSIENTCFG_MASK   0x03
+#define CR0_GCFGFIE_MASK        0x01
+#define CR0_GCFGFRE_MASK        0x01
+#define CR0_GFIE_MASK           0x01
+#define CR0_GFRE_MASK           0x01
+#define CR0_CLIENTPD_MASK       0x01
+
+/* Configuration Register 2 */
+#define CR2_BPVMID_MASK         0xFF
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define GATS1PR_ADDR_MASK       0xFFFFF
+#define GATS1PR_NDX_MASK        0xFF
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define GATS1PW_ADDR_MASK       0xFFFFF
+#define GATS1PW_NDX_MASK        0xFF
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define GATS1UR_ADDR_MASK       0xFFFFF
+#define GATS1UR_NDX_MASK        0xFF
+
+/* Global Address Translation, Stage 1, User Write: GATS1UW */
+#define GATS1UW_ADDR_MASK       0xFFFFF
+#define GATS1UW_NDX_MASK        0xFF
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS1PR */
+#define GATS12PR_ADDR_MASK      0xFFFFF
+#define GATS12PR_NDX_MASK       0xFF
+
+/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS1PW */
+#define GATS12PW_ADDR_MASK      0xFFFFF
+#define GATS12PW_NDX_MASK       0xFF
+
+/* Global Address Translation, Stage 1 and 2, User Read: GATS1UR */
+#define GATS12UR_ADDR_MASK      0xFFFFF
+#define GATS12UR_NDX_MASK       0xFF
+
+/* Global Address Translation, Stage 1 and 2, User Write: GATS1UW */
+#define GATS12UW_ADDR_MASK      0xFFFFF
+#define GATS12UW_NDX_MASK       0xFF
+
+/* Global Address Translation Status Register: GATSR */
+#define GATSR_ACTIVE_MASK       0x01
+
+/* Global Fault Address Register: GFAR */
+#define GFAR_FADDR_MASK         0xFFFFFFFF
+
+/* Global Fault Status Register: GFSR */
+#define GFSR_ICF_MASK           0x01
+#define GFSR_USF_MASK           0x01
+#define GFSR_SMCF_MASK          0x01
+#define GFSR_UCBF_MASK          0x01
+#define GFSR_UCIF_MASK          0x01
+#define GFSR_CAF_MASK           0x01
+#define GFSR_EF_MASK            0x01
+#define GFSR_PF_MASK            0x01
+#define GFSR_MULTI_MASK         0x01
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define GFSYNR0_NESTED_MASK     0x01
+#define GFSYNR0_WNR_MASK        0x01
+#define GFSYNR0_PNU_MASK        0x01
+#define GFSYNR0_IND_MASK        0x01
+#define GFSYNR0_NSSTATE_MASK    0x01
+#define GFSYNR0_NSATTR_MASK     0x01
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define GFSYNR1_SID_MASK        0x7FFF
+#define GFSYNr1_SSD_IDX_MASK    0x7FFF
+
+/* Global Physical Address Register: GPAR */
+#define GPAR_F_MASK             0x01
+#define GPAR_SS_MASK            0x01
+#define GPAR_OUTER_MASK         0x03
+#define GPAR_INNER_MASK         0x03
+#define GPAR_SH_MASK            0x01
+#define GPAR_NS_MASK            0x01
+#define GPAR_NOS_MASK           0x01
+#define GPAR_PA_MASK            0xFFFFF
+#define GPAR_TF_MASK            0x01
+#define GPAR_AFF_MASK           0x01
+#define GPAR_PF_MASK            0x01
+#define GPAR_EF_MASK            0x01
+#define GPAR_TLBMCF_MASK        0x01
+#define GPAR_TLBLKF_MASK        0x01
+#define GPAR_UCBF_MASK          0x01
+
+/* Identification Register: IDR0 */
+#define IDR0_NUMSMRG_MASK       0xFF
+#define IDR0_NUMSIDB_MASK       0x0F
+#define IDR0_BTM_MASK           0x01
+#define IDR0_CTTW_MASK          0x01
+#define IDR0_NUMIPRT_MASK       0xFF
+#define IDR0_PTFS_MASK          0x01
+#define IDR0_SMS_MASK           0x01
+#define IDR0_NTS_MASK           0x01
+#define IDR0_S2TS_MASK          0x01
+#define IDR0_S1TS_MASK          0x01
+#define IDR0_SES_MASK           0x01
+
+/* Identification Register: IDR1 */
+#define IDR1_NUMCB_MASK         0xFF
+#define IDR1_NUMSSDNDXB_MASK    0x0F
+#define IDR1_SSDTP_MASK         0x01
+#define IDR1_SMCD_MASK          0x01
+#define IDR1_NUMS2CB_MASK       0xFF
+#define IDR1_NUMPAGENDXB_MASK   0x07
+#define IDR1_PAGESIZE_MASK      0x01
+
+/* Identification Register: IDR2 */
+#define IDR2_IAS_MASK           0x0F
+#define IDR2_OAS_MASK           0x0F
+
+/* Identification Register: IDR7 */
+#define IDR7_MINOR_MASK         0x0F
+#define IDR7_MAJOR_MASK         0x0F
+
+/* Stream to Context Register: S2CR */
+#define S2CR_CBNDX_MASK         0xFF
+#define S2CR_SHCFG_MASK         0x03
+#define S2CR_MTCFG_MASK         0x01
+#define S2CR_MEMATTR_MASK       0x0F
+#define S2CR_TYPE_MASK          0x03
+#define S2CR_NSCFG_MASK         0x03
+#define S2CR_RACFG_MASK         0x03
+#define S2CR_WACFG_MASK         0x03
+#define S2CR_PRIVCFG_MASK       0x03
+#define S2CR_INSTCFG_MASK       0x03
+#define S2CR_TRANSIENTCFG_MASK  0x03
+#define S2CR_VMID_MASK          0xFF
+#define S2CR_BSU_MASK           0x03
+#define S2CR_FB_MASK            0x01
+
+/* Stream Match Register: SMR */
+#define SMR_ID_MASK             0x7FFF
+#define SMR_MASK_MASK           0x7FFF
+#define SMR_VALID_MASK          0x01
+
+/* Global TLB Status: TLBGSTATUS */
+#define TLBGSTATUS_GSACTIVE_MASK 0x01
+
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define TLBIVAH_ADDR_MASK       0xFFFFF
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define TLBIVMID_VMID_MASK      0xFF
+
+/* Global Register Space 1 Mask */
+/* Context Bank Attribute Register: CBAR */
+#define CBAR_VMID_MASK          0xFF
+#define CBAR_CBNDX_MASK         0x03
+#define CBAR_BPSHCFG_MASK       0x03
+#define CBAR_HYPC_MASK          0x01
+#define CBAR_FB_MASK            0x01
+#define CBAR_MEMATTR_MASK       0x0F
+#define CBAR_TYPE_MASK          0x03
+#define CBAR_BSU_MASK           0x03
+#define CBAR_RACFG_MASK         0x03
+#define CBAR_WACFG_MASK         0x03
+#define CBAR_IRPTNDX_MASK       0xFF
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
+#define CBFRSYNRA_SID_MASK      0x7FFF
+
+/* Implementation defined register space masks */
+#define MICRO_MMU_CTRL_HALT_REQ_MASK          0x01
+#define MICRO_MMU_CTRL_IDLE_MASK              0x01
+
+/* Stage 1 Context Bank Format Masks */
+/* Auxiliary Control Register: CB_ACTLR */
+#define CB_ACTLR_REQPRIORITY_MASK    0x3
+#define CB_ACTLR_REQPRIORITYCFG_MASK 0x1
+#define CB_ACTLR_PRIVCFG_MASK        0x3
+#define CB_ACTLR_BPRCOSH_MASK        0x1
+#define CB_ACTLR_BPRCISH_MASK        0x1
+#define CB_ACTLR_BPRCNSH_MASK        0x1
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define CB_ATS1PR_ADDR_MASK     0xFFFFF
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define CB_ATS1PW_ADDR_MASK     0xFFFFF
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define CB_ATS1UR_ADDR_MASK     0xFFFFF
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define CB_ATS1UW_ADDR_MASK     0xFFFFF
+
+/* Address Translation Status Register: CB_ATSR */
+#define CB_ATSR_ACTIVE_MASK     0x01
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define CB_CONTEXTIDR_ASID_MASK   0xFF
+#define CB_CONTEXTIDR_PROCID_MASK 0xFFFFFF
+
+/* Fault Address Register: CB_FAR */
+#define CB_FAR_FADDR_MASK       0xFFFFFFFF
+
+/* Fault Status Register: CB_FSR */
+#define CB_FSR_TF_MASK          0x01
+#define CB_FSR_AFF_MASK         0x01
+#define CB_FSR_PF_MASK          0x01
+#define CB_FSR_EF_MASK          0x01
+#define CB_FSR_TLBMCF_MASK      0x01
+#define CB_FSR_TLBLKF_MASK      0x01
+#define CB_FSR_SS_MASK          0x01
+#define CB_FSR_MULTI_MASK       0x01
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define CB_FSYNR0_PLVL_MASK     0x03
+#define CB_FSYNR0_S1PTWF_MASK   0x01
+#define CB_FSYNR0_WNR_MASK      0x01
+#define CB_FSYNR0_PNU_MASK      0x01
+#define CB_FSYNR0_IND_MASK      0x01
+#define CB_FSYNR0_NSSTATE_MASK  0x01
+#define CB_FSYNR0_NSATTR_MASK   0x01
+#define CB_FSYNR0_ATOF_MASK     0x01
+#define CB_FSYNR0_PTWF_MASK     0x01
+#define CB_FSYNR0_AFR_MASK      0x01
+#define CB_FSYNR0_S1CBNDX_MASK  0xFF
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define CB_NMRR_IR0_MASK        0x03
+#define CB_NMRR_IR1_MASK        0x03
+#define CB_NMRR_IR2_MASK        0x03
+#define CB_NMRR_IR3_MASK        0x03
+#define CB_NMRR_IR4_MASK        0x03
+#define CB_NMRR_IR5_MASK        0x03
+#define CB_NMRR_IR6_MASK        0x03
+#define CB_NMRR_IR7_MASK        0x03
+#define CB_NMRR_OR0_MASK        0x03
+#define CB_NMRR_OR1_MASK        0x03
+#define CB_NMRR_OR2_MASK        0x03
+#define CB_NMRR_OR3_MASK        0x03
+#define CB_NMRR_OR4_MASK        0x03
+#define CB_NMRR_OR5_MASK        0x03
+#define CB_NMRR_OR6_MASK        0x03
+#define CB_NMRR_OR7_MASK        0x03
+
+/* Physical Address Register: CB_PAR */
+#define CB_PAR_F_MASK           0x01
+#define CB_PAR_SS_MASK          0x01
+#define CB_PAR_OUTER_MASK       0x03
+#define CB_PAR_INNER_MASK       0x07
+#define CB_PAR_SH_MASK          0x01
+#define CB_PAR_NS_MASK          0x01
+#define CB_PAR_NOS_MASK         0x01
+#define CB_PAR_PA_MASK          0xFFFFF
+#define CB_PAR_TF_MASK          0x01
+#define CB_PAR_AFF_MASK         0x01
+#define CB_PAR_PF_MASK          0x01
+#define CB_PAR_TLBMCF_MASK      0x01
+#define CB_PAR_TLBLKF_MASK      0x01
+#define CB_PAR_ATOT_MASK        0x01
+#define CB_PAR_PLVL_MASK        0x03
+#define CB_PAR_STAGE_MASK       0x01
+
+/* Primary Region Remap Register: CB_PRRR */
+#define CB_PRRR_TR0_MASK        0x03
+#define CB_PRRR_TR1_MASK        0x03
+#define CB_PRRR_TR2_MASK        0x03
+#define CB_PRRR_TR3_MASK        0x03
+#define CB_PRRR_TR4_MASK        0x03
+#define CB_PRRR_TR5_MASK        0x03
+#define CB_PRRR_TR6_MASK        0x03
+#define CB_PRRR_TR7_MASK        0x03
+#define CB_PRRR_DS0_MASK        0x01
+#define CB_PRRR_DS1_MASK        0x01
+#define CB_PRRR_NS0_MASK        0x01
+#define CB_PRRR_NS1_MASK        0x01
+#define CB_PRRR_NOS0_MASK       0x01
+#define CB_PRRR_NOS1_MASK       0x01
+#define CB_PRRR_NOS2_MASK       0x01
+#define CB_PRRR_NOS3_MASK       0x01
+#define CB_PRRR_NOS4_MASK       0x01
+#define CB_PRRR_NOS5_MASK       0x01
+#define CB_PRRR_NOS6_MASK       0x01
+#define CB_PRRR_NOS7_MASK       0x01
+
+/* Transaction Resume: CB_RESUME */
+#define CB_RESUME_TNR_MASK      0x01
+
+/* System Control Register: CB_SCTLR */
+#define CB_SCTLR_M_MASK            0x01
+#define CB_SCTLR_TRE_MASK          0x01
+#define CB_SCTLR_AFE_MASK          0x01
+#define CB_SCTLR_AFFD_MASK         0x01
+#define CB_SCTLR_E_MASK            0x01
+#define CB_SCTLR_CFRE_MASK         0x01
+#define CB_SCTLR_CFIE_MASK         0x01
+#define CB_SCTLR_CFCFG_MASK        0x01
+#define CB_SCTLR_HUPCF_MASK        0x01
+#define CB_SCTLR_WXN_MASK          0x01
+#define CB_SCTLR_UWXN_MASK         0x01
+#define CB_SCTLR_ASIDPNE_MASK      0x01
+#define CB_SCTLR_TRANSIENTCFG_MASK 0x03
+#define CB_SCTLR_MEMATTR_MASK      0x0F
+#define CB_SCTLR_MTCFG_MASK        0x01
+#define CB_SCTLR_SHCFG_MASK        0x03
+#define CB_SCTLR_RACFG_MASK        0x03
+#define CB_SCTLR_WACFG_MASK        0x03
+#define CB_SCTLR_NSCFG_MASK        0x03
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define CB_TLBIASID_ASID_MASK      0xFF
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define CB_TLBIVA_ASID_MASK        0xFF
+#define CB_TLBIVA_VA_MASK          0xFFFFF
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define CB_TLBIVAA_VA_MASK         0xFFFFF
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define CB_TLBIVAAL_VA_MASK        0xFFFFF
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define CB_TLBIVAL_ASID_MASK       0xFF
+#define CB_TLBIVAL_VA_MASK         0xFFFFF
+
+/* TLB Status: CB_TLBSTATUS */
+#define CB_TLBSTATUS_SACTIVE_MASK  0x01
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_T0SZ_MASK         0x07
+#define CB_TTBCR_PD0_MASK          0x01
+#define CB_TTBCR_PD1_MASK          0x01
+#define CB_TTBCR_NSCFG0_MASK       0x01
+#define CB_TTBCR_NSCFG1_MASK       0x01
+#define CB_TTBCR_EAE_MASK          0x01
+
+/* Translation Table Base Register 0/1: CB_TTBR */
+#define CB_TTBR0_IRGN1_MASK        0x01
+#define CB_TTBR0_S_MASK            0x01
+#define CB_TTBR0_RGN_MASK          0x01
+#define CB_TTBR0_NOS_MASK          0x01
+#define CB_TTBR0_IRGN0_MASK        0x01
+#define CB_TTBR0_ADDR_MASK         0xFFFFFF
+
+#define CB_TTBR1_IRGN1_MASK        0x1
+#define CB_TTBR1_S_MASK            0x1
+#define CB_TTBR1_RGN_MASK          0x1
+#define CB_TTBR1_NOS_MASK          0X1
+#define CB_TTBR1_IRGN0_MASK        0X1
+#define CB_TTBR1_ADDR_MASK         0xFFFFFF
+
+/* Global Register Shifts */
+/* Configuration Register: CR0 */
+#define CR0_NSCFG_SHIFT            28
+#define CR0_WACFG_SHIFT            26
+#define CR0_RACFG_SHIFT            24
+#define CR0_SHCFG_SHIFT            22
+#define CR0_SMCFCFG_SHIFT          21
+#define CR0_MTCFG_SHIFT            20
+#define CR0_MEMATTR_SHIFT          16
+#define CR0_BSU_SHIFT              14
+#define CR0_FB_SHIFT               13
+#define CR0_PTM_SHIFT              12
+#define CR0_VMIDPNE_SHIFT          11
+#define CR0_USFCFG_SHIFT           10
+#define CR0_GSE_SHIFT              9
+#define CR0_STALLD_SHIFT           8
+#define CR0_TRANSIENTCFG_SHIFT     6
+#define CR0_GCFGFIE_SHIFT          5
+#define CR0_GCFGFRE_SHIFT          4
+#define CR0_GFIE_SHIFT             2
+#define CR0_GFRE_SHIFT             1
+#define CR0_CLIENTPD_SHIFT         0
+
+/* Configuration Register: CR2 */
+#define CR2_BPVMID_SHIFT           0
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define GATS1PR_ADDR_SHIFT         12
+#define GATS1PR_NDX_SHIFT          0
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define GATS1PW_ADDR_SHIFT         12
+#define GATS1PW_NDX_SHIFT          0
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define GATS1UR_ADDR_SHIFT         12
+#define GATS1UR_NDX_SHIFT          0
+
+/* Global Address Translation, Stage 1, User Write: GATS1UW */
+#define GATS1UW_ADDR_SHIFT         12
+#define GATS1UW_NDX_SHIFT          0
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS12PR */
+#define GATS12PR_ADDR_SHIFT        12
+#define GATS12PR_NDX_SHIFT         0
+
+/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS12PW */
+#define GATS12PW_ADDR_SHIFT        12
+#define GATS12PW_NDX_SHIFT         0
+
+/* Global Address Translation, Stage 1 and 2, User Read: GATS12UR */
+#define GATS12UR_ADDR_SHIFT        12
+#define GATS12UR_NDX_SHIFT         0
+
+/* Global Address Translation, Stage 1 and 2, User Write: GATS12UW */
+#define GATS12UW_ADDR_SHIFT        12
+#define GATS12UW_NDX_SHIFT         0
+
+/* Global Address Translation Status Register: GATSR */
+#define GATSR_ACTIVE_SHIFT         0
+
+/* Global Fault Address Register: GFAR */
+#define GFAR_FADDR_SHIFT           0
+
+/* Global Fault Status Register: GFSR */
+#define GFSR_ICF_SHIFT             0
+#define GFSR_USF_SHIFT             1
+#define GFSR_SMCF_SHIFT            2
+#define GFSR_UCBF_SHIFT            3
+#define GFSR_UCIF_SHIFT            4
+#define GFSR_CAF_SHIFT             5
+#define GFSR_EF_SHIFT              6
+#define GFSR_PF_SHIFT              7
+#define GFSR_MULTI_SHIFT           31
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define GFSYNR0_NESTED_SHIFT       0
+#define GFSYNR0_WNR_SHIFT          1
+#define GFSYNR0_PNU_SHIFT          2
+#define GFSYNR0_IND_SHIFT          3
+#define GFSYNR0_NSSTATE_SHIFT      4
+#define GFSYNR0_NSATTR_SHIFT       5
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define GFSYNR1_SID_SHIFT          0
+
+/* Global Physical Address Register: GPAR */
+#define GPAR_F_SHIFT               0
+#define GPAR_SS_SHIFT              1
+#define GPAR_OUTER_SHIFT           2
+#define GPAR_INNER_SHIFT           4
+#define GPAR_SH_SHIFT              7
+#define GPAR_NS_SHIFT              9
+#define GPAR_NOS_SHIFT             10
+#define GPAR_PA_SHIFT              12
+#define GPAR_TF_SHIFT              1
+#define GPAR_AFF_SHIFT             2
+#define GPAR_PF_SHIFT              3
+#define GPAR_EF_SHIFT              4
+#define GPAR_TLCMCF_SHIFT          5
+#define GPAR_TLBLKF_SHIFT          6
+#define GFAR_UCBF_SHIFT            30
+
+/* Identification Register: IDR0 */
+#define IDR0_NUMSMRG_SHIFT         0
+#define IDR0_NUMSIDB_SHIFT         9
+#define IDR0_BTM_SHIFT             13
+#define IDR0_CTTW_SHIFT            14
+#define IDR0_NUMIRPT_SHIFT         16
+#define IDR0_PTFS_SHIFT            24
+#define IDR0_SMS_SHIFT             27
+#define IDR0_NTS_SHIFT             28
+#define IDR0_S2TS_SHIFT            29
+#define IDR0_S1TS_SHIFT            30
+#define IDR0_SES_SHIFT             31
+
+/* Identification Register: IDR1 */
+#define IDR1_NUMCB_SHIFT           0
+#define IDR1_NUMSSDNDXB_SHIFT      8
+#define IDR1_SSDTP_SHIFT           12
+#define IDR1_SMCD_SHIFT            15
+#define IDR1_NUMS2CB_SHIFT         16
+#define IDR1_NUMPAGENDXB_SHIFT     28
+#define IDR1_PAGESIZE_SHIFT        31
+
+/* Identification Register: IDR2 */
+#define IDR2_IAS_SHIFT             0
+#define IDR2_OAS_SHIFT             4
+
+/* Identification Register: IDR7 */
+#define IDR7_MINOR_SHIFT           0
+#define IDR7_MAJOR_SHIFT           4
+
+/* Stream to Context Register: S2CR */
+#define S2CR_CBNDX_SHIFT           0
+#define s2CR_SHCFG_SHIFT           8
+#define S2CR_MTCFG_SHIFT           11
+#define S2CR_MEMATTR_SHIFT         12
+#define S2CR_TYPE_SHIFT            16
+#define S2CR_NSCFG_SHIFT           18
+#define S2CR_RACFG_SHIFT           20
+#define S2CR_WACFG_SHIFT           22
+#define S2CR_PRIVCFG_SHIFT         24
+#define S2CR_INSTCFG_SHIFT         26
+#define S2CR_TRANSIENTCFG_SHIFT    28
+#define S2CR_VMID_SHIFT            0
+#define S2CR_BSU_SHIFT             24
+#define S2CR_FB_SHIFT              26
+
+/* Stream Match Register: SMR */
+#define SMR_ID_SHIFT               0
+#define SMR_MASK_SHIFT             16
+#define SMR_VALID_SHIFT            31
+
+/* Global TLB Status: TLBGSTATUS */
+#define TLBGSTATUS_GSACTIVE_SHIFT  0
+
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define TLBIVAH_ADDR_SHIFT         12
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define TLBIVMID_VMID_SHIFT        0
+
+/* Context Bank Attribute Register: CBAR */
+#define CBAR_VMID_SHIFT            0
+#define CBAR_CBNDX_SHIFT           8
+#define CBAR_BPSHCFG_SHIFT         8
+#define CBAR_HYPC_SHIFT            10
+#define CBAR_FB_SHIFT              11
+#define CBAR_MEMATTR_SHIFT         12
+#define CBAR_TYPE_SHIFT            16
+#define CBAR_BSU_SHIFT             18
+#define CBAR_RACFG_SHIFT           20
+#define CBAR_WACFG_SHIFT           22
+#define CBAR_IRPTNDX_SHIFT         24
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
+#define CBFRSYNRA_SID_SHIFT        0
+
+/* Implementation defined register space shift */
+#define MICRO_MMU_CTRL_HALT_REQ_SHIFT         0x02
+#define MICRO_MMU_CTRL_IDLE_SHIFT             0x03
+
+/* Stage 1 Context Bank Format Shifts */
+/* Auxiliary Control Register: CB_ACTLR */
+#define CB_ACTLR_REQPRIORITY_SHIFT     0
+#define CB_ACTLR_REQPRIORITYCFG_SHIFT  4
+#define CB_ACTLR_PRIVCFG_SHIFT         8
+#define CB_ACTLR_BPRCOSH_SHIFT         28
+#define CB_ACTLR_BPRCISH_SHIFT         29
+#define CB_ACTLR_BPRCNSH_SHIFT         30
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define CB_ATS1PR_ADDR_SHIFT       12
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define CB_ATS1PW_ADDR_SHIFT       12
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define CB_ATS1UR_ADDR_SHIFT       12
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define CB_ATS1UW_ADDR_SHIFT       12
+
+/* Address Translation Status Register: CB_ATSR */
+#define CB_ATSR_ACTIVE_SHIFT       0
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define CB_CONTEXTIDR_ASID_SHIFT   0
+#define CB_CONTEXTIDR_PROCID_SHIFT 8
+
+/* Fault Address Register: CB_FAR */
+#define CB_FAR_FADDR_SHIFT         0
+
+/* Fault Status Register: CB_FSR */
+#define CB_FSR_TF_SHIFT            1
+#define CB_FSR_AFF_SHIFT           2
+#define CB_FSR_PF_SHIFT            3
+#define CB_FSR_EF_SHIFT            4
+#define CB_FSR_TLBMCF_SHIFT        5
+#define CB_FSR_TLBLKF_SHIFT        6
+#define CB_FSR_SS_SHIFT            30
+#define CB_FSR_MULTI_SHIFT         31
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define CB_FSYNR0_PLVL_SHIFT       0
+#define CB_FSYNR0_S1PTWF_SHIFT     3
+#define CB_FSYNR0_WNR_SHIFT        4
+#define CB_FSYNR0_PNU_SHIFT        5
+#define CB_FSYNR0_IND_SHIFT        6
+#define CB_FSYNR0_NSSTATE_SHIFT    7
+#define CB_FSYNR0_NSATTR_SHIFT     8
+#define CB_FSYNR0_ATOF_SHIFT       9
+#define CB_FSYNR0_PTWF_SHIFT       10
+#define CB_FSYNR0_AFR_SHIFT        11
+#define CB_FSYNR0_S1CBNDX_SHIFT    16
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define CB_NMRR_IR0_SHIFT          0
+#define CB_NMRR_IR1_SHIFT          2
+#define CB_NMRR_IR2_SHIFT          4
+#define CB_NMRR_IR3_SHIFT          6
+#define CB_NMRR_IR4_SHIFT          8
+#define CB_NMRR_IR5_SHIFT          10
+#define CB_NMRR_IR6_SHIFT          12
+#define CB_NMRR_IR7_SHIFT          14
+#define CB_NMRR_OR0_SHIFT          16
+#define CB_NMRR_OR1_SHIFT          18
+#define CB_NMRR_OR2_SHIFT          20
+#define CB_NMRR_OR3_SHIFT          22
+#define CB_NMRR_OR4_SHIFT          24
+#define CB_NMRR_OR5_SHIFT          26
+#define CB_NMRR_OR6_SHIFT          28
+#define CB_NMRR_OR7_SHIFT          30
+
+/* Physical Address Register: CB_PAR */
+#define CB_PAR_F_SHIFT             0
+#define CB_PAR_SS_SHIFT            1
+#define CB_PAR_OUTER_SHIFT         2
+#define CB_PAR_INNER_SHIFT         4
+#define CB_PAR_SH_SHIFT            7
+#define CB_PAR_NS_SHIFT            9
+#define CB_PAR_NOS_SHIFT           10
+#define CB_PAR_PA_SHIFT            12
+#define CB_PAR_TF_SHIFT            1
+#define CB_PAR_AFF_SHIFT           2
+#define CB_PAR_PF_SHIFT            3
+#define CB_PAR_TLBMCF_SHIFT        5
+#define CB_PAR_TLBLKF_SHIFT        6
+#define CB_PAR_ATOT_SHIFT          31
+#define CB_PAR_PLVL_SHIFT          0
+#define CB_PAR_STAGE_SHIFT         3
+
+/* Primary Region Remap Register: CB_PRRR */
+#define CB_PRRR_TR0_SHIFT          0
+#define CB_PRRR_TR1_SHIFT          2
+#define CB_PRRR_TR2_SHIFT          4
+#define CB_PRRR_TR3_SHIFT          6
+#define CB_PRRR_TR4_SHIFT          8
+#define CB_PRRR_TR5_SHIFT          10
+#define CB_PRRR_TR6_SHIFT          12
+#define CB_PRRR_TR7_SHIFT          14
+#define CB_PRRR_DS0_SHIFT          16
+#define CB_PRRR_DS1_SHIFT          17
+#define CB_PRRR_NS0_SHIFT          18
+#define CB_PRRR_NS1_SHIFT          19
+#define CB_PRRR_NOS0_SHIFT         24
+#define CB_PRRR_NOS1_SHIFT         25
+#define CB_PRRR_NOS2_SHIFT         26
+#define CB_PRRR_NOS3_SHIFT         27
+#define CB_PRRR_NOS4_SHIFT         28
+#define CB_PRRR_NOS5_SHIFT         29
+#define CB_PRRR_NOS6_SHIFT         30
+#define CB_PRRR_NOS7_SHIFT         31
+
+/* Transaction Resume: CB_RESUME */
+#define CB_RESUME_TNR_SHIFT        0
+
+/* System Control Register: CB_SCTLR */
+#define CB_SCTLR_M_SHIFT            0
+#define CB_SCTLR_TRE_SHIFT          1
+#define CB_SCTLR_AFE_SHIFT          2
+#define CB_SCTLR_AFFD_SHIFT         3
+#define CB_SCTLR_E_SHIFT            4
+#define CB_SCTLR_CFRE_SHIFT         5
+#define CB_SCTLR_CFIE_SHIFT         6
+#define CB_SCTLR_CFCFG_SHIFT        7
+#define CB_SCTLR_HUPCF_SHIFT        8
+#define CB_SCTLR_WXN_SHIFT          9
+#define CB_SCTLR_UWXN_SHIFT         10
+#define CB_SCTLR_ASIDPNE_SHIFT      12
+#define CB_SCTLR_TRANSIENTCFG_SHIFT 14
+#define CB_SCTLR_MEMATTR_SHIFT      16
+#define CB_SCTLR_MTCFG_SHIFT        20
+#define CB_SCTLR_SHCFG_SHIFT        22
+#define CB_SCTLR_RACFG_SHIFT        24
+#define CB_SCTLR_WACFG_SHIFT        26
+#define CB_SCTLR_NSCFG_SHIFT        28
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define CB_TLBIASID_ASID_SHIFT      0
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define CB_TLBIVA_ASID_SHIFT        0
+#define CB_TLBIVA_VA_SHIFT          12
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define CB_TLBIVAA_VA_SHIFT         12
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define CB_TLBIVAAL_VA_SHIFT        12
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define CB_TLBIVAL_ASID_SHIFT       0
+#define CB_TLBIVAL_VA_SHIFT         12
+
+/* TLB Status: CB_TLBSTATUS */
+#define CB_TLBSTATUS_SACTIVE_SHIFT  0
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_T0SZ_SHIFT         0
+#define CB_TTBCR_PD0_SHIFT          4
+#define CB_TTBCR_PD1_SHIFT          5
+#define CB_TTBCR_NSCFG0_SHIFT       14
+#define CB_TTBCR_NSCFG1_SHIFT       30
+#define CB_TTBCR_EAE_SHIFT          31
+
+/* Translation Table Base Register 0/1: CB_TTBR */
+#define CB_TTBR0_IRGN1_SHIFT        0
+#define CB_TTBR0_S_SHIFT            1
+#define CB_TTBR0_RGN_SHIFT          3
+#define CB_TTBR0_NOS_SHIFT          5
+#define CB_TTBR0_IRGN0_SHIFT        6
+#define CB_TTBR0_ADDR_SHIFT         14
+
+#define CB_TTBR1_IRGN1_SHIFT        0
+#define CB_TTBR1_S_SHIFT            1
+#define CB_TTBR1_RGN_SHIFT          3
+#define CB_TTBR1_NOS_SHIFT          5
+#define CB_TTBR1_IRGN0_SHIFT        6
+#define CB_TTBR1_ADDR_SHIFT         14
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/iommu_perfmon.h b/arch/arm/mach-msm/include/mach/iommu_perfmon.h
new file mode 100644
index 0000000..dcae83b
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/iommu_perfmon.h
@@ -0,0 +1,233 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/irqreturn.h>
+
+#ifndef MSM_IOMMU_PERFMON_H
+#define MSM_IOMMU_PERFMON_H
+
+/**
+ * struct iommu_pmon_counter - container for a performance counter.
+ * @counter_no:          counter number within the group
+ * @absolute_counter_no: counter number within IOMMU PMU
+ * @value:               cached counter value
+ * @overflow_count:      no of times counter has overflowed
+ * @enabled:             indicates whether counter is enabled or not
+ * @current_event_class: current selected event class, -1 if none
+ * @counter_dir:         debugfs directory for this counter
+ * @cnt_group:           group this counter belongs to
+ */
+struct iommu_pmon_counter {
+	unsigned int counter_no;
+	unsigned int absolute_counter_no;
+	unsigned long value;
+	unsigned long overflow_count;
+	unsigned int enabled;
+	int current_event_class;
+	struct dentry *counter_dir;
+	struct iommu_pmon_cnt_group *cnt_group;
+};
+
+/**
+ * struct iommu_pmon_cnt_group - container for a perf mon counter group.
+ * @grp_no:       group number
+ * @num_counters: number of counters in this group
+ * @counters:     list of counter in this group
+ * @group_dir:    debugfs directory for this group
+ * @pmon:         pointer to the iommu_pmon object this group belongs to
+ */
+struct iommu_pmon_cnt_group {
+	unsigned int grp_no;
+	unsigned int num_counters;
+	struct iommu_pmon_counter *counters;
+	struct dentry *group_dir;
+	struct iommu_pmon *pmon;
+};
+
+/**
+ * struct iommu_info - container for a perf mon iommu info.
+ * @iommu_name: name of the iommu from device tree
+ * @base:       virtual base address for this iommu
+ * @evt_irq:    irq number for event overflow interrupt
+ * @iommu_dev:  pointer to iommu device
+ * @ops:        iommu access operations pointer.
+ * @hw_ops:     iommu pm hw access operations pointer.
+ */
+struct iommu_info {
+	const char *iommu_name;
+	void *base;
+	int evt_irq;
+	struct device *iommu_dev;
+	struct iommu_access_ops *ops;
+	struct iommu_pm_hw_ops *hw_ops;
+};
+
+/**
+ * struct iommu_pmon - main container for a perf mon data.
+ * @iommu_dir:            debugfs directory for this iommu
+ * @iommu:                iommu_info instance
+ * @iommu_list:           iommu_list head
+ * @cnt_grp:              list of counter groups
+ * @num_groups:           number of counter groups
+ * @num_counters:         number of counters per group
+ * @event_cls_supported:  an array of event classes supported for this PMU
+ * @nevent_cls_supported: number of event classes supported.
+ * @enabled:              Indicates whether perf. mon is enabled or not
+ * @iommu_attached        Indicates whether iommu is attached or not.
+ * @lock:                 mutex used to synchronize access to shared data
+ */
+struct iommu_pmon {
+	struct dentry *iommu_dir;
+	struct iommu_info iommu;
+	struct list_head iommu_list;
+	struct iommu_pmon_cnt_group *cnt_grp;
+	u32 num_groups;
+	u32 num_counters;
+	u32 *event_cls_supported;
+	u32 nevent_cls_supported;
+	unsigned int enabled;
+	unsigned int iommu_attach_count;
+	struct mutex lock;
+};
+
+/**
+ * struct iommu_hw_ops - Callbacks for accessing IOMMU HW
+ * @initialize_hw: Call to do any initialization before enabling ovf interrupts
+ * @is_hw_access_ok: Returns 1 if we can access HW, 0 otherwise
+ * @grp_enable: Call to enable a counter group
+ * @grp_disable: Call to disable a counter group
+ * @enable_pm: Call to enable PM
+ * @disable_pm: Call to disable PM
+ * @reset_counters:  Call to reset counters
+ * @check_for_overflow:  Call to check for overflow
+ * @evt_ovfl_int_handler: Overflow interrupt handler callback
+ * @counter_enable: Call to enable counters
+ * @counter_disable: Call to disable counters
+ * @ovfl_int_enable: Call to enable overflow interrupts
+ * @ovfl_int_disable: Call to disable overflow interrupts
+ * @set_event_class: Call to set event class
+ * @read_counter: Call to read a counter value
+ */
+struct iommu_pm_hw_ops {
+	void (*initialize_hw)(const struct iommu_pmon *);
+	unsigned int (*is_hw_access_OK)(const struct iommu_pmon *);
+	void (*grp_enable)(struct iommu_info *, unsigned int);
+	void (*grp_disable)(struct iommu_info *, unsigned int);
+	void (*enable_pm)(struct iommu_info *);
+	void (*disable_pm)(struct iommu_info *);
+	void (*reset_counters)(const struct iommu_info *);
+	void (*check_for_overflow)(struct iommu_pmon *);
+	irqreturn_t (*evt_ovfl_int_handler)(int, void *);
+	void (*counter_enable)(struct iommu_info *,
+			       struct iommu_pmon_counter *);
+	void (*counter_disable)(struct iommu_info *,
+			       struct iommu_pmon_counter *);
+	void (*ovfl_int_enable)(struct iommu_info *,
+				const struct iommu_pmon_counter *);
+	void (*ovfl_int_disable)(struct iommu_info *,
+				const struct iommu_pmon_counter *);
+	void (*set_event_class)(struct iommu_pmon *pmon, unsigned int,
+				unsigned int);
+	unsigned int (*read_counter)(struct iommu_pmon_counter *);
+};
+
+extern struct iommu_access_ops iommu_access_ops_v0;
+extern struct iommu_access_ops iommu_access_ops_v1;
+#define MSM_IOMMU_PMU_NO_EVENT_CLASS -1
+
+#ifdef CONFIG_MSM_IOMMU_PMON
+
+/**
+ * Get pointer to PMU hardware access functions for IOMMUv0 PMU
+ */
+struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v0(void);
+
+/**
+ * Get pointer to PMU hardware access functions for IOMMUv1 PMU
+ */
+struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v1(void);
+
+/**
+ * Allocate memory for performance monitor structure. Must
+ * be called before iommu_pm_iommu_register
+ */
+struct iommu_pmon *msm_iommu_pm_alloc(struct device *iommu_dev);
+
+/**
+ * Free memory previously allocated with iommu_pm_alloc
+ */
+void msm_iommu_pm_free(struct device *iommu_dev);
+
+/**
+ * Register iommu with the performance monitor module.
+ */
+int msm_iommu_pm_iommu_register(struct iommu_pmon *info);
+
+/**
+ * Unregister iommu with the performance monitor module.
+ */
+void msm_iommu_pm_iommu_unregister(struct device *dev);
+
+/**
+ * Called by iommu driver when attaching is complete
+ * Must NOT be called with IOMMU mutexes held.
+ * @param iommu_dev IOMMU device that is attached
+  */
+void msm_iommu_attached(struct device *dev);
+
+/**
+ * Called by iommu driver before detaching.
+ * Must NOT be called with IOMMU mutexes held.
+ * @param iommu_dev IOMMU device that is going to be detached
+  */
+void msm_iommu_detached(struct device *dev);
+#else
+static inline struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v0(void)
+{
+	return NULL;
+}
+
+static inline struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v1(void)
+{
+	return NULL;
+}
+
+static inline struct iommu_pmon *msm_iommu_pm_alloc(struct device *iommu_dev)
+{
+	return NULL;
+}
+
+static inline void msm_iommu_pm_free(struct device *iommu_dev)
+{
+	return;
+}
+
+static inline int msm_iommu_pm_iommu_register(struct iommu_pmon *info)
+{
+	return -EIO;
+}
+
+static inline void msm_iommu_pm_iommu_unregister(struct device *dev)
+{
+}
+
+static inline void msm_iommu_attached(struct device *dev)
+{
+}
+
+static inline void msm_iommu_detached(struct device *dev)
+{
+}
+#endif
+#endif
diff --git a/arch/arm/mach-msm/include/mach/ion.h b/arch/arm/mach-msm/include/mach/ion.h
index 9fbc720..b472d27 100644
--- a/arch/arm/mach-msm/include/mach/ion.h
+++ b/arch/arm/mach-msm/include/mach/ion.h
@@ -1,6 +1,6 @@
 /**
  *
- * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
diff --git a/arch/arm/mach-msm/include/mach/kgsl.h b/arch/arm/mach-msm/include/mach/kgsl.h
index a22b628..b68aff8 100644
--- a/arch/arm/mach-msm/include/mach/kgsl.h
+++ b/arch/arm/mach-msm/include/mach/kgsl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -21,12 +21,13 @@
 #define KGSL_CLK_MEM_IFACE 0x00000010
 #define KGSL_CLK_AXI	0x00000020
 
-#define KGSL_MAX_PWRLEVELS 5
+#define KGSL_MAX_PWRLEVELS 10
 
 #define KGSL_CONVERT_TO_MBPS(val) \
 	(val*1000*1000U)
 
 #define KGSL_3D0_REG_MEMORY	"kgsl_3d0_reg_memory"
+#define KGSL_3D0_SHADER_MEMORY	"kgsl_3d0_shader_memory"
 #define KGSL_3D0_IRQ		"kgsl_3d0_irq"
 #define KGSL_2D0_REG_MEMORY	"kgsl_2d0_reg_memory"
 #define KGSL_2D0_IRQ		"kgsl_2d0_irq"
@@ -72,6 +73,7 @@
 	unsigned int nap_allowed;
 	unsigned int clk_map;
 	unsigned int idle_needed;
+	unsigned int step_mul;
 	struct msm_bus_scale_pdata *bus_scale_table;
 	struct kgsl_device_iommu_data *iommu_data;
 	int iommu_count;
diff --git a/arch/arm/mach-msm/include/mach/msm_iommu_priv.h b/arch/arm/mach-msm/include/mach/msm_iommu_priv.h
new file mode 100644
index 0000000..a2f4836
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/msm_iommu_priv.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_IOMMU_PRIV_H
+#define MSM_IOMMU_PRIV_H
+
+/**
+ * struct msm_iommu_pt - Container for first level page table and its
+ * attributes.
+ * fl_table: Pointer to the first level page table.
+ * redirect: Set to 1 if L2 redirect for page tables are enabled, 0 otherwise.
+ */
+struct msm_iommu_pt {
+	unsigned long *fl_table;
+	int redirect;
+};
+
+/**
+ * struct msm_iommu_priv - Container for page table attributes and other
+ * private iommu domain information.
+ * attributes.
+ * pt: Page table attribute structure
+ * list_attached: List of devices (contexts) attached to this domain.
+ * client_name: Name of the domain client.
+ */
+struct msm_iommu_priv {
+	struct msm_iommu_pt pt;
+	struct list_head list_attached;
+	const char *client_name;
+};
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/scm.h b/arch/arm/mach-msm/include/mach/scm.h
index 6cccda5..fb08595 100644
--- a/arch/arm/mach-msm/include/mach/scm.h
+++ b/arch/arm/mach-msm/include/mach/scm.h
@@ -22,6 +22,7 @@
 #define SCM_SVC_FUSE			0x8
 #define SCM_SVC_PWR			0x9
 #define SCM_SVC_CP			0xC
+#define SCM_SVC_MP			0xC
 #define SCM_SVC_DCVS			0xD
 #define SCM_SVC_TZSCHEDULER		0xFC
 #define SCM_SVC_OEM				0xFE
diff --git a/arch/arm/mach-msm/include/mach/socinfo.h b/arch/arm/mach-msm/include/mach/socinfo.h
index 726c888..b898fe8 100644
--- a/arch/arm/mach-msm/include/mach/socinfo.h
+++ b/arch/arm/mach-msm/include/mach/socinfo.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -22,29 +22,46 @@
 
 #include <asm/cputype.h>
 #include <asm/mach-types.h>
+/*
+ * SOC version type with major number in the upper 16 bits and minor
+ * number in the lower 16 bits.  For example:
+ *   1.0 -> 0x00010000
+ *   2.3 -> 0x00020003
+ */
 #define SOCINFO_VERSION_MAJOR(ver) ((ver & 0xffff0000) >> 16)
 #define SOCINFO_VERSION_MINOR(ver) (ver & 0x0000ffff)
 
 #ifdef CONFIG_OF
-#define early_machine_is_msm8974()	\
-	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8974")
-#define machine_is_msm8974()		\
-	of_machine_is_compatible("qcom,msm8974")
-#define machine_is_msm8974_sim()		\
-	of_machine_is_compatible("qcom,msm8974-sim")
-#define machine_is_msm8974_rumi()	\
-	of_machine_is_compatible("qcom,msm8974-rumi")
-#define early_machine_is_msm9625()	\
-	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm9625")
-#define machine_is_msm9625()		\
-	of_machine_is_compatible("qcom,msm9625")
+#define of_board_is_sim()	of_machine_is_compatible("qcom,sim")
+#define of_board_is_rumi()	of_machine_is_compatible("qcom,rumi")
+#define of_board_is_fluid()	of_machine_is_compatible("qcom,fluid")
+#define of_board_is_liquid()	of_machine_is_compatible("qcom,liquid")
+
+#define machine_is_msm8974()	of_machine_is_compatible("qcom,msm8974")
+#define machine_is_msm9625()	of_machine_is_compatible("qcom,msm9625")
+#define machine_is_msm8610()	of_machine_is_compatible("qcom,msm8610")
+#define machine_is_msm8226()	of_machine_is_compatible("qcom,msm8226")
+
+#define early_machine_is_msm8610()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8610")
+#define early_machine_is_mpq8092()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,mpq8092")
+#define early_machine_is_msmzinc()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmzinc")
 #else
-#define early_machine_is_msm8974()	0
+#define of_board_is_sim()		0
+#define of_board_is_rumi()		0
+#define of_board_is_fluid()		0
+#define of_board_is_liquid()		0
+
 #define machine_is_msm8974()		0
-#define machine_is_msm8974_sim()	0
-#define machine_is_msm8974_rumi()	0
-#define early_machine_is_msm9625()	0
 #define machine_is_msm9625()		0
+#define machine_is_msm8610()		0
+#define machine_is_msm8226()		0
+
+#define early_machine_is_msm8610()	0
+#define early_machine_is_mpq8092()	0
+#define early_machine_is_msmzinc()	0
 #endif
 
 #define PLATFORM_SUBTYPE_SGLTE	6
@@ -68,14 +85,21 @@
 	MSM_CPU_7X25AB,
 	MSM_CPU_8064,
 	MSM_CPU_8064AB,
+	MSM_CPU_8064AA,
 	MSM_CPU_8930,
 	MSM_CPU_8930AA,
+	MSM_CPU_8930AB,
 	MSM_CPU_7X27AA,
 	MSM_CPU_9615,
 	MSM_CPU_8974,
 	MSM_CPU_8627,
 	MSM_CPU_8625,
-	MSM_CPU_9625
+	MSM_CPU_9625,
+	MSM_CPU_8092,
+	MSM_CPU_8226,
+	MSM_CPU_8610,
+	MSM_CPU_8625Q,
+	MSM_CPU_ZINC,
 };
 
 enum pmic_model {
@@ -108,7 +132,9 @@
 int __init socinfo_init(void) __must_check;
 const int read_msm_cpu_type(void);
 const int get_core_count(void);
+const int cpu_is_krait(void);
 const int cpu_is_krait_v1(void);
+const int cpu_is_krait_v2(void);
 const int cpu_is_krait_v3(void);
 
 static inline int cpu_is_msm7x01(void)
@@ -288,6 +314,15 @@
 #endif
 }
 
+static inline int cpu_is_apq8064aa(void)
+{
+#ifdef CONFIG_ARCH_APQ8064
+	return read_msm_cpu_type() == MSM_CPU_8064AA;
+#else
+	return 0;
+#endif
+}
+
 static inline int cpu_is_msm8930(void)
 {
 #ifdef CONFIG_ARCH_MSM8930
@@ -306,8 +341,18 @@
 #endif
 }
 
+static inline int cpu_is_msm8930ab(void)
+{
+#ifdef CONFIG_ARCH_MSM8930
+	return read_msm_cpu_type() == MSM_CPU_8930AB;
+#else
+	return 0;
+#endif
+}
+
 static inline int cpu_is_msm8627(void)
 {
+/* 8930 and 8627 will share the same CONFIG_ARCH type unless otherwise needed */
 #ifdef CONFIG_ARCH_MSM8930
 	return read_msm_cpu_type() == MSM_CPU_8627;
 #else
@@ -351,4 +396,81 @@
 #endif
 }
 
+static inline int cpu_is_msm8974(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+	enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+	BUG_ON(cpu == MSM_CPU_UNKNOWN);
+	return cpu == MSM_CPU_8974;
+#else
+	return 0;
+#endif
+}
+
+static inline int cpu_is_mpq8092(void)
+{
+#ifdef CONFIG_ARCH_MPQ8092
+	enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+	BUG_ON(cpu == MSM_CPU_UNKNOWN);
+	return cpu == MSM_CPU_8092;
+#else
+	return 0;
+#endif
+
+}
+
+static inline int cpu_is_msm8226(void)
+{
+#ifdef CONFIG_ARCH_MSM8226
+	enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+	BUG_ON(cpu == MSM_CPU_UNKNOWN);
+	return cpu == MSM_CPU_8226;
+#else
+	return 0;
+#endif
+}
+
+static inline int cpu_is_msm8610(void)
+{
+#ifdef CONFIG_ARCH_MSM8610
+	enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+	BUG_ON(cpu == MSM_CPU_UNKNOWN);
+	return cpu == MSM_CPU_8610;
+#else
+	return 0;
+#endif
+}
+
+static inline int cpu_is_msm8625q(void)
+{
+#ifdef CONFIG_ARCH_MSM8625
+	enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+	BUG_ON(cpu == MSM_CPU_UNKNOWN);
+	return cpu == MSM_CPU_8625Q;
+#else
+	return 0;
+#endif
+}
+
+static inline int soc_class_is_msm8960(void)
+{
+	return cpu_is_msm8960() || cpu_is_msm8960ab();
+}
+
+static inline int soc_class_is_apq8064(void)
+{
+	return cpu_is_apq8064() || cpu_is_apq8064ab() || cpu_is_apq8064aa();
+}
+
+static inline int soc_class_is_msm8930(void)
+{
+	return cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_msm8930ab() ||
+	       cpu_is_msm8627();
+}
+
 #endif
diff --git a/arch/arm/mach-msm/iommu_domains.c b/arch/arm/mach-msm/iommu_domains.c
index 6053be3..18562a3 100644
--- a/arch/arm/mach-msm/iommu_domains.c
+++ b/arch/arm/mach-msm/iommu_domains.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,15 +17,16 @@
 #include <linux/platform_device.h>
 #include <linux/rbtree.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
 #include <asm/sizes.h>
 #include <asm/page.h>
 #include <mach/iommu.h>
 #include <mach/iommu_domains.h>
+#include <mach/msm_iommu_priv.h>
 #include <mach/socinfo.h>
-#include <mach/msm_subsystem_map.h>
-
-/* dummy 64K for overmapping */
-char iommu_dummy[2*SZ_64K-4];
 
 struct msm_iova_data {
 	struct rb_node node;
@@ -39,27 +40,45 @@
 DEFINE_MUTEX(domain_mutex);
 static atomic_t domain_nums = ATOMIC_INIT(-1);
 
+void msm_iommu_set_client_name(struct iommu_domain *domain, char const *name)
+{
+	struct msm_iommu_priv *priv = domain->priv;
+	priv->client_name = name;
+}
+
 int msm_use_iommu()
 {
 	return iommu_present(&platform_bus_type);
 }
 
+bool msm_iommu_page_size_is_supported(unsigned long page_size)
+{
+	return page_size == SZ_4K
+		|| page_size == SZ_64K
+		|| page_size == SZ_1M
+		|| page_size == SZ_16M;
+}
+
 int msm_iommu_map_extra(struct iommu_domain *domain,
 				unsigned long start_iova,
+				phys_addr_t phy_addr,
 				unsigned long size,
 				unsigned long page_size,
-				int cached)
+				int prot)
 {
 	int ret = 0;
 	int i = 0;
-	unsigned long phy_addr = ALIGN(virt_to_phys(iommu_dummy), page_size);
 	unsigned long temp_iova = start_iova;
-	if (page_size == SZ_4K) {
+	/* the extra "padding" should never be written to. map it
+	 * read-only. */
+	prot &= ~IOMMU_WRITE;
+
+	if (msm_iommu_page_size_is_supported(page_size)) {
 		struct scatterlist *sglist;
 		unsigned int nrpages = PFN_ALIGN(size) >> PAGE_SHIFT;
 		struct page *dummy_page = phys_to_page(phy_addr);
 
-		sglist = kmalloc(sizeof(*sglist) * nrpages, GFP_KERNEL);
+		sglist = vmalloc(sizeof(*sglist) * nrpages);
 		if (!sglist) {
 			ret = -ENOMEM;
 			goto out;
@@ -70,13 +89,13 @@
 		for (i = 0; i < nrpages; i++)
 			sg_set_page(&sglist[i], dummy_page, PAGE_SIZE, 0);
 
-		ret = iommu_map_range(domain, temp_iova, sglist, size, cached);
+		ret = iommu_map_range(domain, temp_iova, sglist, size, prot);
 		if (ret) {
 			pr_err("%s: could not map extra %lx in domain %p\n",
 				__func__, start_iova, domain);
 		}
 
-		kfree(sglist);
+		vfree(sglist);
 	} else {
 		unsigned long order = get_order(page_size);
 		unsigned long aligned_size = ALIGN(size, page_size);
@@ -84,7 +103,7 @@
 
 		for (i = 0; i < nrpages; i++) {
 			ret = iommu_map(domain, temp_iova, phy_addr, page_size,
-						cached);
+						prot);
 			if (ret) {
 				pr_err("%s: could not map %lx in domain %p, error: %d\n",
 					__func__, start_iova, domain, ret);
@@ -122,7 +141,7 @@
 
 static int msm_iommu_map_iova_phys(struct iommu_domain *domain,
 				unsigned long iova,
-				unsigned long phys,
+				phys_addr_t phys,
 				unsigned long size,
 				int cached)
 {
@@ -131,7 +150,7 @@
 	int prot = IOMMU_WRITE | IOMMU_READ;
 	prot |= cached ? IOMMU_CACHE : 0;
 
-	sglist = kmalloc(sizeof(*sglist), GFP_KERNEL);
+	sglist = vmalloc(sizeof(*sglist));
 	if (!sglist) {
 		ret = -ENOMEM;
 		goto err1;
@@ -148,13 +167,13 @@
 			__func__, iova, domain);
 	}
 
-	kfree(sglist);
+	vfree(sglist);
 err1:
 	return ret;
 
 }
 
-int msm_iommu_map_contig_buffer(unsigned long phys,
+int msm_iommu_map_contig_buffer(phys_addr_t phys,
 				unsigned int domain_no,
 				unsigned int partition_no,
 				unsigned long size,
@@ -265,6 +284,28 @@
 	else
 		return NULL;
 }
+EXPORT_SYMBOL(msm_get_iommu_domain);
+
+int msm_find_domain_no(const struct iommu_domain *domain)
+{
+	struct rb_root *root = &domain_root;
+	struct rb_node *n;
+	struct msm_iova_data *node;
+	int domain_num = -EINVAL;
+
+	mutex_lock(&domain_mutex);
+
+	for (n = rb_first(root); n; n = rb_next(n)) {
+		node = rb_entry(n, struct msm_iova_data, node);
+		if (node->domain == domain) {
+			domain_num = node->domain_num;
+			break;
+		}
+	}
+	mutex_unlock(&domain_mutex);
+	return domain_num;
+}
+EXPORT_SYMBOL(msm_find_domain_no);
 
 int msm_allocate_iova_address(unsigned int iommu_domain,
 					unsigned int partition_no,
@@ -342,6 +383,7 @@
 	int i;
 	struct msm_iova_data *data;
 	struct mem_pool *pools;
+	struct bus_type *bus;
 
 	if (!layout)
 		return -EINVAL;
@@ -387,11 +429,18 @@
 		}
 	}
 
+	bus = layout->is_secure == MSM_IOMMU_DOMAIN_SECURE ?
+					&msm_iommu_sec_bus_type :
+					&platform_bus_type;
+
 	data->pools = pools;
 	data->npools = layout->npartitions;
 	data->domain_num = atomic_inc_return(&domain_nums);
-	data->domain = iommu_domain_alloc(&platform_bus_type,
-					  layout->domain_flags);
+	data->domain = iommu_domain_alloc(bus, layout->domain_flags);
+	if (!data->domain)
+		goto out;
+
+	msm_iommu_set_client_name(data->domain, layout->client_name);
 
 	add_domain(data);
 
@@ -404,7 +453,172 @@
 }
 EXPORT_SYMBOL(msm_register_domain);
 
-static int __init iommu_domain_probe(struct platform_device *pdev)
+static int find_and_add_contexts(struct iommu_group *group,
+				 const struct device_node *node,
+				 unsigned int num_contexts)
+{
+	unsigned int i;
+	struct device *ctx;
+	const char *name;
+	struct device_node *ctx_node;
+	int ret_val = 0;
+
+	for (i = 0; i < num_contexts; ++i) {
+		ctx_node = of_parse_phandle((struct device_node *) node,
+					    "qcom,iommu-contexts", i);
+		if (!ctx_node) {
+			pr_err("Unable to parse phandle #%u\n", i);
+			ret_val = -EINVAL;
+			goto out;
+		}
+		if (of_property_read_string(ctx_node, "label", &name)) {
+			pr_err("Could not find label property\n");
+			ret_val = -EINVAL;
+			goto out;
+		}
+		ctx = msm_iommu_get_ctx(name);
+		if (!ctx) {
+			pr_err("Unable to find context %s\n", name);
+			ret_val = -EINVAL;
+			goto out;
+		}
+		iommu_group_add_device(group, ctx);
+	}
+out:
+	return ret_val;
+}
+
+static int create_and_add_domain(struct iommu_group *group,
+				 struct device_node const *node,
+				 char const *name)
+{
+	unsigned int ret_val = 0;
+	unsigned int i, j;
+	struct msm_iova_layout l;
+	struct msm_iova_partition *part = 0;
+	struct iommu_domain *domain = 0;
+	unsigned int *addr_array;
+	unsigned int array_size;
+	int domain_no;
+	int secure_domain;
+	int l2_redirect;
+
+	if (of_get_property(node, "qcom,virtual-addr-pool", &array_size)) {
+		l.npartitions = array_size / sizeof(unsigned int) / 2;
+		part = kmalloc(
+			sizeof(struct msm_iova_partition) * l.npartitions,
+			       GFP_KERNEL);
+		if (!part) {
+			pr_err("%s: could not allocate space for partition",
+				__func__);
+			ret_val = -ENOMEM;
+			goto out;
+		}
+		addr_array = kmalloc(array_size, GFP_KERNEL);
+		if (!addr_array) {
+			pr_err("%s: could not allocate space for partition",
+				__func__);
+			ret_val = -ENOMEM;
+			goto free_mem;
+		}
+
+		ret_val = of_property_read_u32_array(node,
+					"qcom,virtual-addr-pool",
+					addr_array,
+					array_size/sizeof(unsigned int));
+		if (ret_val) {
+			ret_val = -EINVAL;
+			goto free_mem;
+		}
+
+		for (i = 0, j = 0; j < l.npartitions * 2; i++, j += 2) {
+			part[i].start = addr_array[j];
+			part[i].size = addr_array[j+1];
+		}
+	} else {
+		l.npartitions = 1;
+		part = kmalloc(
+			sizeof(struct msm_iova_partition) * l.npartitions,
+			       GFP_KERNEL);
+		if (!part) {
+			pr_err("%s: could not allocate space for partition",
+				__func__);
+			ret_val = -ENOMEM;
+			goto out;
+		}
+		part[0].start = 0x0;
+		part[0].size = 0xFFFFFFFF;
+	}
+
+	l.client_name = name;
+	l.partitions = part;
+
+	secure_domain = of_property_read_bool(node, "qcom,secure-domain");
+	l.is_secure = (secure_domain) ? MSM_IOMMU_DOMAIN_SECURE : 0;
+
+	l2_redirect = of_property_read_bool(node, "qcom,l2-redirect");
+	l.domain_flags = (l2_redirect) ? MSM_IOMMU_DOMAIN_PT_CACHEABLE : 0;
+
+	domain_no = msm_register_domain(&l);
+	if (domain_no >= 0)
+		domain = msm_get_iommu_domain(domain_no);
+	else
+		ret_val = domain_no;
+
+	iommu_group_set_iommudata(group, domain, NULL);
+
+free_mem:
+	kfree(part);
+out:
+	return ret_val;
+}
+
+static int iommu_domain_parse_dt(const struct device_node *dt_node)
+{
+	struct device_node *node;
+	int sz;
+	unsigned int num_contexts;
+	int ret_val = 0;
+	struct iommu_group *group = 0;
+	const char *name;
+
+	for_each_child_of_node(dt_node, node) {
+		group = iommu_group_alloc();
+		if (IS_ERR(group)) {
+			ret_val = PTR_ERR(group);
+			goto out;
+		}
+		if (of_property_read_string(node, "label", &name)) {
+			ret_val = -EINVAL;
+			goto free_group;
+		}
+		iommu_group_set_name(group, name);
+
+		if (!of_get_property(node, "qcom,iommu-contexts", &sz)) {
+			pr_err("Could not find qcom,iommu-contexts property\n");
+			ret_val = -EINVAL;
+			goto free_group;
+		}
+		num_contexts = sz / sizeof(unsigned int);
+
+		ret_val = find_and_add_contexts(group, node, num_contexts);
+		if (ret_val) {
+			ret_val = -EINVAL;
+			goto free_group;
+		}
+		ret_val = create_and_add_domain(group, node, name);
+		if (ret_val) {
+			ret_val = -EINVAL;
+			goto free_group;
+		}
+	}
+free_group:
+	/* No iommu_group_free() function */
+out:
+	return ret_val;
+}
+
+static int iommu_domain_probe(struct platform_device *pdev)
 {
 	struct iommu_domains_pdata *p  = pdev->dev.platform_data;
 	int i, j;
@@ -412,7 +626,9 @@
 	if (!msm_use_iommu())
 		return -ENODEV;
 
-	if (!p)
+	if (pdev->dev.of_node)
+		return iommu_domain_parse_dt(pdev->dev.of_node);
+	else if (!p)
 		return -ENODEV;
 
 	for (i = 0; i < p->ndomains; i++) {
@@ -458,26 +674,48 @@
 			continue;
 
 		if (iommu_attach_device(domain, ctx)) {
-			WARN(1, "%s: could not attach domain %p to context %s."
-				" iommu programming will not occur.\n",
-				__func__, domain,
-				p->domain_names[i].name);
+			WARN(1, "%s: could not attach domain %p to context %s. iommu programming will not occur.\n",
+				__func__, domain, p->domain_names[i].name);
 			continue;
 		}
 	}
-
 	return 0;
 }
 
+static int __devexit iommu_domain_exit(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static struct of_device_id msm_iommu_domain_match_table[] = {
+	{ .name = "qcom,iommu-domains", },
+	{}
+};
+
 static struct platform_driver iommu_domain_driver = {
 	.driver         = {
 		.name = "iommu_domains",
+		.of_match_table = msm_iommu_domain_match_table,
 		.owner = THIS_MODULE
 	},
+	.probe		= iommu_domain_probe,
+	.remove		= __devexit_p(iommu_domain_exit),
 };
 
 static int __init msm_subsystem_iommu_init(void)
 {
-	return platform_driver_probe(&iommu_domain_driver, iommu_domain_probe);
+	int ret;
+	ret = platform_driver_register(&iommu_domain_driver);
+	if (ret != 0)
+		pr_err("Failed to register IOMMU domain driver\n");
+	return ret;
 }
+
+static void __exit msm_subsystem_iommu_exit(void)
+{
+	platform_driver_unregister(&iommu_domain_driver);
+}
+
 device_initcall(msm_subsystem_iommu_init);
+module_exit(msm_subsystem_iommu_exit);
+
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
index 33b5099..a26b188 100644
--- a/arch/arm/mach-msm/platsmp.c
+++ b/arch/arm/mach-msm/platsmp.c
@@ -92,11 +92,6 @@
 	if (machine_is_apq8064_sim())
 		writel_relaxed(0xf0000, base_ptr+0x04);
 
-	if (machine_is_msm8974_sim()) {
-		writel_relaxed(0x800, base_ptr+0x04);
-		writel_relaxed(0x3FFF, base_ptr+0x14);
-	}
-
 	mb();
 	iounmap(base_ptr);
 	return 0;
@@ -156,9 +151,6 @@
 	    machine_is_apq8064_sim())
 		return krait_release_secondary_sim(0x02088000, cpu);
 
-	if (machine_is_msm8974_sim())
-		return krait_release_secondary_sim(0xf9088000, cpu);
-
 	if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm8930aa() ||
 	    cpu_is_apq8064() || cpu_is_msm8627() || cpu_is_apq8064ab())
 		return krait_release_secondary(0x02088000, cpu);
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index 4a53c84..f3a7cf4 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -10,10 +10,22 @@
  * GNU General Public License for more details.
  *
  */
+/*
+ * SOC Info Routines
+ *
+ */
 
-#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sys_soc.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
 #include <linux/sysdev.h>
+#include <linux/types.h>
+
 #include <asm/mach-types.h>
+
 #include <mach/socinfo.h>
 
 #include "smd_private.h"
@@ -29,8 +41,11 @@
 	HW_PLATFORM_SVLTE_SURF	= 5,
 	HW_PLATFORM_MTP  = 8,
 	HW_PLATFORM_LIQUID  = 9,
-	
+	/* Dragonboard platform id is assigned as 10 in CDT */
 	HW_PLATFORM_DRAGON	= 10,
+	HW_PLATFORM_QRD	= 11,
+	HW_PLATFORM_HRD	= 13,
+	HW_PLATFORM_DTV	= 14,
 	HW_PLATFORM_INVALID
 };
 
@@ -43,7 +58,10 @@
 	[HW_PLATFORM_SVLTE_SURF] = "SLVTE_SURF",
 	[HW_PLATFORM_MTP] = "MTP",
 	[HW_PLATFORM_LIQUID] = "Liquid",
-	[HW_PLATFORM_DRAGON] = "Dragon"
+	[HW_PLATFORM_DRAGON] = "Dragon",
+	[HW_PLATFORM_QRD] = "QRD",
+	[HW_PLATFORM_HRD] = "HRD",
+	[HW_PLATFORM_DTV] = "DTV",
 };
 
 enum {
@@ -66,6 +84,7 @@
 	[PLATFORM_SUBTYPE_STRANGE_2A] = "strange_2a,"
 };
 
+/* Used to parse shared memory.  Must match the modem. */
 struct socinfo_v1 {
 	uint32_t format;
 	uint32_t id;
@@ -76,7 +95,7 @@
 struct socinfo_v2 {
 	struct socinfo_v1 v1;
 
-	
+	/* only valid when format==2 */
 	uint32_t raw_id;
 	uint32_t raw_version;
 };
@@ -84,39 +103,49 @@
 struct socinfo_v3 {
 	struct socinfo_v2 v2;
 
-	
+	/* only valid when format==3 */
 	uint32_t hw_platform;
 };
 
 struct socinfo_v4 {
 	struct socinfo_v3 v3;
 
-	
+	/* only valid when format==4 */
 	uint32_t platform_version;
 };
 
 struct socinfo_v5 {
 	struct socinfo_v4 v4;
 
-	
+	/* only valid when format==5 */
 	uint32_t accessory_chip;
 };
 
 struct socinfo_v6 {
 	struct socinfo_v5 v5;
 
-	
+	/* only valid when format==6 */
 	uint32_t hw_platform_subtype;
 };
 
 struct socinfo_v7 {
 	struct socinfo_v6 v6;
 
-	
+	/* only valid when format==7 */
 	uint32_t pmic_model;
 	uint32_t pmic_die_revision;
 };
 
+struct socinfo_v8 {
+	struct socinfo_v7 v7;
+
+	/* only valid when format==8*/
+	uint32_t pmic_model_1;
+	uint32_t pmic_die_revision_1;
+	uint32_t pmic_model_2;
+	uint32_t pmic_die_revision_2;
+};
+
 static union {
 	struct socinfo_v1 v1;
 	struct socinfo_v2 v2;
@@ -125,11 +154,12 @@
 	struct socinfo_v5 v5;
 	struct socinfo_v6 v6;
 	struct socinfo_v7 v7;
+	struct socinfo_v8 v8;
 } *socinfo;
 
 static enum msm_cpu cpu_of_id[] = {
 
-	
+	/* 7x01 IDs */
 	[1]  = MSM_CPU_7X01,
 	[16] = MSM_CPU_7X01,
 	[17] = MSM_CPU_7X01,
@@ -143,135 +173,185 @@
 	[34] = MSM_CPU_7X01,
 	[35] = MSM_CPU_7X01,
 
-	
+	/* 7x25 IDs */
 	[20] = MSM_CPU_7X25,
-	[21] = MSM_CPU_7X25, 
-	[24] = MSM_CPU_7X25, 
-	[27] = MSM_CPU_7X25, 
+	[21] = MSM_CPU_7X25, /* 7225 */
+	[24] = MSM_CPU_7X25, /* 7525 */
+	[27] = MSM_CPU_7X25, /* 7625 */
 	[39] = MSM_CPU_7X25,
 	[40] = MSM_CPU_7X25,
 	[41] = MSM_CPU_7X25,
 	[42] = MSM_CPU_7X25,
-	[62] = MSM_CPU_7X25, 
-	[63] = MSM_CPU_7X25, 
-	[66] = MSM_CPU_7X25, 
+	[62] = MSM_CPU_7X25, /* 7625-1 */
+	[63] = MSM_CPU_7X25, /* 7225-1 */
+	[66] = MSM_CPU_7X25, /* 7225-2 */
 
 
-	
+	/* 7x27 IDs */
 	[43] = MSM_CPU_7X27,
 	[44] = MSM_CPU_7X27,
 	[61] = MSM_CPU_7X27,
-	[67] = MSM_CPU_7X27, 
-	[68] = MSM_CPU_7X27, 
-	[69] = MSM_CPU_7X27, 
+	[67] = MSM_CPU_7X27, /* 7227-1 */
+	[68] = MSM_CPU_7X27, /* 7627-1 */
+	[69] = MSM_CPU_7X27, /* 7627-2 */
 
 
-	
+	/* 8x50 IDs */
 	[30] = MSM_CPU_8X50,
 	[36] = MSM_CPU_8X50,
 	[37] = MSM_CPU_8X50,
 	[38] = MSM_CPU_8X50,
 
-	
+	/* 7x30 IDs */
 	[59] = MSM_CPU_7X30,
 	[60] = MSM_CPU_7X30,
 
-	
+	/* 8x55 IDs */
 	[74] = MSM_CPU_8X55,
 	[75] = MSM_CPU_8X55,
 	[85] = MSM_CPU_8X55,
 
-	
+	/* 8x60 IDs */
 	[70] = MSM_CPU_8X60,
 	[71] = MSM_CPU_8X60,
 	[86] = MSM_CPU_8X60,
 
-	
+	/* 8960 IDs */
 	[87] = MSM_CPU_8960,
 
-	
+	/* 7x25A IDs */
 	[88] = MSM_CPU_7X25A,
 	[89] = MSM_CPU_7X25A,
 	[96] = MSM_CPU_7X25A,
 
-	
+	/* 7x27A IDs */
 	[90] = MSM_CPU_7X27A,
 	[91] = MSM_CPU_7X27A,
 	[92] = MSM_CPU_7X27A,
 	[97] = MSM_CPU_7X27A,
 
-	
+	/* FSM9xxx ID */
 	[94] = FSM_CPU_9XXX,
 	[95] = FSM_CPU_9XXX,
 
-	
+	/*  7x25AA ID */
 	[98] = MSM_CPU_7X25AA,
 	[99] = MSM_CPU_7X25AA,
 	[100] = MSM_CPU_7X25AA,
 
-	
+	/*  7x27AA ID */
 	[101] = MSM_CPU_7X27AA,
 	[102] = MSM_CPU_7X27AA,
 	[103] = MSM_CPU_7X27AA,
 	[136] = MSM_CPU_7X27AA,
 
-	
+	/* 9x15 ID */
 	[104] = MSM_CPU_9615,
 	[105] = MSM_CPU_9615,
 	[106] = MSM_CPU_9615,
 	[107] = MSM_CPU_9615,
+	[171] = MSM_CPU_9615,
 
-	
+	/* 8064 IDs */
 	[109] = MSM_CPU_8064,
 
-	
+	/* 8930 IDs */
 	[116] = MSM_CPU_8930,
 	[117] = MSM_CPU_8930,
 	[118] = MSM_CPU_8930,
 	[119] = MSM_CPU_8930,
+	[179] = MSM_CPU_8930,
 
-	
+	/* 8627 IDs */
 	[120] = MSM_CPU_8627,
 	[121] = MSM_CPU_8627,
 
-	
+	/* 8660A ID */
 	[122] = MSM_CPU_8960,
 
-	
+	/* 8260A ID */
 	[123] = MSM_CPU_8960,
 
-	
+	/* 8060A ID */
 	[124] = MSM_CPU_8960,
 
-	
+	/* 8974 IDs */
 	[126] = MSM_CPU_8974,
+	[184] = MSM_CPU_8974,
 
-	
+	/* 8625 IDs */
 	[127] = MSM_CPU_8625,
 	[128] = MSM_CPU_8625,
 	[129] = MSM_CPU_8625,
 	[137] = MSM_CPU_8625,
+	[167] = MSM_CPU_8625,
 
-	
+	/* 8064 MPQ ID */
 	[130] = MSM_CPU_8064,
 
-	
+	/* 7x25AB IDs */
 	[131] = MSM_CPU_7X25AB,
 	[132] = MSM_CPU_7X25AB,
 	[133] = MSM_CPU_7X25AB,
 	[135] = MSM_CPU_7X25AB,
 
-	
+	/* 9625 IDs */
 	[134] = MSM_CPU_9625,
+	[148] = MSM_CPU_9625,
+	[149] = MSM_CPU_9625,
+	[150] = MSM_CPU_9625,
+	[151] = MSM_CPU_9625,
+	[152] = MSM_CPU_9625,
+	[173] = MSM_CPU_9625,
+	[174] = MSM_CPU_9625,
+	[175] = MSM_CPU_9625,
 
-	
+	/* 8960AB IDs */
+	[138] = MSM_CPU_8960AB,
+	[139] = MSM_CPU_8960AB,
+	[140] = MSM_CPU_8960AB,
+	[141] = MSM_CPU_8960AB,
+
+	/* 8930AA IDs */
 	[142] = MSM_CPU_8930AA,
 	[143] = MSM_CPU_8930AA,
 	[144] = MSM_CPU_8930AA,
+	[160] = MSM_CPU_8930AA,
+	[180] = MSM_CPU_8930AA,
 
-	
+	/* 8226 IDs */
+	[145] = MSM_CPU_8226,
+
+	/* 8092 IDs */
+	[146] = MSM_CPU_8092,
+
+	/* 8610 IDs */
+	[147] = MSM_CPU_8610,
+
+	/* 8064AB IDs */
 	[153] = MSM_CPU_8064AB,
 
+	/* 8930AB IDs */
+	[154] = MSM_CPU_8930AB,
+	[155] = MSM_CPU_8930AB,
+	[156] = MSM_CPU_8930AB,
+	[157] = MSM_CPU_8930AB,
+	[181] = MSM_CPU_8930AB,
+
+	/* 8625Q IDs */
+	[168] = MSM_CPU_8625Q,
+	[169] = MSM_CPU_8625Q,
+	[170] = MSM_CPU_8625Q,
+
+	/* 8064AA IDs */
+	[172] = MSM_CPU_8064AA,
+
+	/* zinc IDs */
+	[178] = MSM_CPU_ZINC,
+
+	/* Uninitialized IDs are not known to run Linux.
+	   MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
+	   considered as unknown CPU. */
 };
 
 static enum msm_cpu cur_cpu;
@@ -326,6 +406,8 @@
 		: 0;
 }
 
+/* This information is directly encoded by the machine id */
+/* Thus no external callers rely on this information at the moment */
 static uint32_t socinfo_get_accessory_chip(void)
 {
 	return socinfo ?
@@ -355,6 +437,11 @@
 		: 0;
 }
 
+static uint32_t socinfo_get_format(void)
+{
+	return socinfo ? socinfo->v1.format : 0;
+}
+
 enum msm_cpu socinfo_get_msm_cpu(void)
 {
 	return cur_cpu;
@@ -563,6 +650,100 @@
 		socinfo_get_pmic_die_revision());
 }
 
+static ssize_t
+msm_get_vendor(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "Qualcomm\n");
+}
+
+static ssize_t
+msm_get_raw_id(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		socinfo_get_raw_id());
+}
+
+static ssize_t
+msm_get_raw_version(struct device *dev,
+		     struct device_attribute *attr,
+		     char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		socinfo_get_raw_version());
+}
+
+static ssize_t
+msm_get_build_id(struct device *dev,
+		   struct device_attribute *attr,
+		   char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%-.32s\n",
+			socinfo_get_build_id());
+}
+
+static ssize_t
+msm_get_hw_platform(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	uint32_t hw_type;
+	hw_type = socinfo_get_platform_type();
+
+	return snprintf(buf, PAGE_SIZE, "%-.32s\n",
+			hw_platform[hw_type]);
+}
+
+static ssize_t
+msm_get_platform_version(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		socinfo_get_platform_version());
+}
+
+static ssize_t
+msm_get_accessory_chip(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		socinfo_get_accessory_chip());
+}
+
+static ssize_t
+msm_get_platform_subtype(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	uint32_t hw_subtype;
+	hw_subtype = socinfo_get_platform_subtype();
+	return snprintf(buf, PAGE_SIZE, "%-.32s\n",
+		hw_platform_subtype[hw_subtype]);
+}
+
+static ssize_t
+msm_get_pmic_model(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		socinfo_get_pmic_model());
+}
+
+static ssize_t
+msm_get_pmic_die_revision(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			 socinfo_get_pmic_die_revision());
+}
+
 static struct sysdev_attribute socinfo_v1_files[] = {
 	_SYSDEV_ATTR(id, 0444, socinfo_show_id, NULL),
 	_SYSDEV_ATTR(version, 0444, socinfo_show_version, NULL),
@@ -600,6 +781,42 @@
 			socinfo_show_pmic_die_revision, NULL),
 };
 
+static struct device_attribute msm_soc_attr_raw_version =
+	__ATTR(raw_version, S_IRUGO, msm_get_raw_version,  NULL);
+
+static struct device_attribute msm_soc_attr_raw_id =
+	__ATTR(raw_id, S_IRUGO, msm_get_raw_id,  NULL);
+
+static struct device_attribute msm_soc_attr_vendor =
+	__ATTR(vendor, S_IRUGO, msm_get_vendor,  NULL);
+
+static struct device_attribute msm_soc_attr_build_id =
+	__ATTR(build_id, S_IRUGO, msm_get_build_id, NULL);
+
+static struct device_attribute msm_soc_attr_hw_platform =
+	__ATTR(hw_platform, S_IRUGO, msm_get_hw_platform, NULL);
+
+
+static struct device_attribute msm_soc_attr_platform_version =
+	__ATTR(platform_version, S_IRUGO,
+			msm_get_platform_version, NULL);
+
+static struct device_attribute msm_soc_attr_accessory_chip =
+	__ATTR(accessory_chip, S_IRUGO,
+			msm_get_accessory_chip, NULL);
+
+static struct device_attribute msm_soc_attr_platform_subtype =
+	__ATTR(platform_subtype, S_IRUGO,
+			msm_get_platform_subtype, NULL);
+
+static struct device_attribute msm_soc_attr_pmic_model =
+	__ATTR(pmic_model, S_IRUGO,
+			msm_get_pmic_model, NULL);
+
+static struct device_attribute msm_soc_attr_pmic_die_revision =
+	__ATTR(pmic_die_revision, S_IRUGO,
+			msm_get_pmic_die_revision, NULL);
+
 static struct sysdev_class soc_sysdev_class = {
 	.name = "soc",
 };
@@ -625,15 +842,110 @@
 	return 0;
 }
 
+static void * __init setup_dummy_socinfo(void)
+{
+	if (early_machine_is_mpq8092()) {
+		dummy_socinfo.id = 146;
+		strlcpy(dummy_socinfo.build_id, "mpq8092 - ",
+		sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8610()) {
+		dummy_socinfo.id = 147;
+		strlcpy(dummy_socinfo.build_id, "msm8610 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msmzinc()) {
+		dummy_socinfo.id = 178;
+		strlcpy(dummy_socinfo.build_id, "msmzinc - ",
+			sizeof(dummy_socinfo.build_id));
+	}
+	strlcat(dummy_socinfo.build_id, "Dummy socinfo",
+		sizeof(dummy_socinfo.build_id));
+	return (void *) &dummy_socinfo;
+}
+
+static void __init populate_soc_sysfs_files(struct device *msm_soc_device)
+{
+	uint32_t legacy_format = socinfo_get_format();
+
+	device_create_file(msm_soc_device, &msm_soc_attr_vendor);
+
+	switch (legacy_format) {
+	case 8:
+	case 7:
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_pmic_model);
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_pmic_die_revision);
+	case 6:
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_platform_subtype);
+	case 5:
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_accessory_chip);
+	case 4:
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_platform_version);
+	case 3:
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_hw_platform);
+	case 2:
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_raw_id);
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_raw_version);
+	case 1:
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_build_id);
+		break;
+	default:
+		pr_err("%s:Unknown socinfo format:%u\n", __func__,
+				legacy_format);
+		break;
+	}
+
+	return;
+}
+
+static void  __init soc_info_populate(struct soc_device_attribute *soc_dev_attr)
+{
+	uint32_t soc_version = socinfo_get_version();
+
+	soc_dev_attr->soc_id   = kasprintf(GFP_KERNEL, "%d", socinfo_get_id());
+	soc_dev_attr->machine  = "Snapdragon";
+	soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%u.%u",
+			SOCINFO_VERSION_MAJOR(soc_version),
+			SOCINFO_VERSION_MINOR(soc_version));
+	return;
+
+}
+
 static int __init socinfo_init_sysdev(void)
 {
 	int err;
+	struct device *msm_soc_device;
+	struct soc_device *soc_dev;
+	struct soc_device_attribute *soc_dev_attr;
 
 	if (!socinfo) {
 		pr_err("%s: No socinfo found!\n", __func__);
 		return -ENODEV;
 	}
 
+	soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+	if (!soc_dev_attr) {
+		pr_err("%s: Soc Device alloc failed!\n", __func__);
+		return -ENOMEM;
+	}
+
+	soc_info_populate(soc_dev_attr);
+	soc_dev = soc_device_register(soc_dev_attr);
+	if (IS_ERR_OR_NULL(soc_dev)) {
+		kfree(soc_dev_attr);
+		 pr_err("%s: Soc device register failed\n", __func__);
+		 return -EIO;
+	}
+
+	msm_soc_device = soc_device_to_device(soc_dev);
+	populate_soc_sysfs_files(msm_soc_device);
 	err = sysdev_class_register(&soc_sysdev_class);
 	if (err) {
 		pr_err("%s: sysdev_class_register fail (%d)\n",
@@ -680,77 +992,16 @@
 	if (socinfo->v1.format < 7)
 		return err;
 
-	return socinfo_create_files(&soc_sys_device, socinfo_v7_files,
+	socinfo_create_files(&soc_sys_device, socinfo_v7_files,
 				ARRAY_SIZE(socinfo_v7_files));
+
+	return 0;
 }
 
 arch_initcall(socinfo_init_sysdev);
 
-static void * __init setup_dummy_socinfo(void)
+static void socinfo_print(void)
 {
-	if (machine_is_msm8960_rumi3() || machine_is_msm8960_sim() ||
-	    machine_is_msm8960_cdp())
-		dummy_socinfo.id = 87;
-	else if (machine_is_apq8064_rumi3() || machine_is_apq8064_sim())
-		dummy_socinfo.id = 109;
-	else if (machine_is_msm9615_mtp() || machine_is_msm9615_cdp())
-		dummy_socinfo.id = 104;
-	else if (early_machine_is_msm8974()) {
-		dummy_socinfo.id = 126;
-		strlcpy(dummy_socinfo.build_id, "msm8974 - ",
-			sizeof(dummy_socinfo.build_id));
-	} else if (early_machine_is_msm9625()) {
-		dummy_socinfo.id = 134;
-		strlcpy(dummy_socinfo.build_id, "msm9625 - ",
-			sizeof(dummy_socinfo.build_id));
-	} else if (machine_is_msm8625_rumi3())
-		dummy_socinfo.id = 127;
-	strlcat(dummy_socinfo.build_id, "Dummy socinfo",
-		sizeof(dummy_socinfo.build_id));
-	return (void *) &dummy_socinfo;
-}
-
-int __init socinfo_init(void)
-{
-	socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID, sizeof(struct socinfo_v7));
-
-	if (!socinfo)
-		socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID,
-				sizeof(struct socinfo_v6));
-
-	if (!socinfo)
-		socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID,
-				sizeof(struct socinfo_v5));
-
-	if (!socinfo)
-		socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID,
-				sizeof(struct socinfo_v4));
-
-	if (!socinfo)
-		socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID,
-				sizeof(struct socinfo_v3));
-
-	if (!socinfo)
-		socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID,
-				sizeof(struct socinfo_v2));
-
-	if (!socinfo)
-		socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID,
-				sizeof(struct socinfo_v1));
-
-	if (!socinfo) {
-		pr_warn("%s: Can't find SMEM_HW_SW_BUILD_ID; falling back on "
-			"dummy values.\n", __func__);
-		socinfo = setup_dummy_socinfo();
-	}
-
-	WARN(!socinfo_get_id(), "Unknown SOC ID!\n");
-	WARN(socinfo_get_id() >= ARRAY_SIZE(cpu_of_id),
-		"New IDs added! ID => CPU mapping might need an update.\n");
-
-	if (socinfo->v1.id < ARRAY_SIZE(cpu_of_id))
-		cur_cpu = cpu_of_id[socinfo->v1.id];
-
 	switch (socinfo->v1.format) {
 	case 1:
 		pr_info("%s: v%u, id=%u, ver=%u.%u\n",
@@ -808,6 +1059,7 @@
 			socinfo->v5.accessory_chip,
 			socinfo->v6.hw_platform_subtype);
 		break;
+	case 8:
 	case 7:
 		pr_info("%s: v%u, id=%u, ver=%u.%u, raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n accessory_chip=%u, hw_plat_subtype=%u, pmic_model=%u, pmic_die_revision=%u\n",
 			__func__,
@@ -826,6 +1078,54 @@
 		pr_err("%s: Unknown format found\n", __func__);
 		break;
 	}
+}
+
+int __init socinfo_init(void)
+{
+	socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID, sizeof(struct socinfo_v8));
+
+	if (!socinfo)
+		socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID,
+				sizeof(struct socinfo_v7));
+
+	if (!socinfo)
+		socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID,
+				sizeof(struct socinfo_v6));
+
+	if (!socinfo)
+		socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID,
+				sizeof(struct socinfo_v5));
+
+	if (!socinfo)
+		socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID,
+				sizeof(struct socinfo_v4));
+
+	if (!socinfo)
+		socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID,
+				sizeof(struct socinfo_v3));
+
+	if (!socinfo)
+		socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID,
+				sizeof(struct socinfo_v2));
+
+	if (!socinfo)
+		socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID,
+				sizeof(struct socinfo_v1));
+
+	if (!socinfo) {
+		pr_warn("%s: Can't find SMEM_HW_SW_BUILD_ID; falling back on dummy values.\n",
+				__func__);
+		socinfo = setup_dummy_socinfo();
+	}
+
+	WARN(!socinfo_get_id(), "Unknown SOC ID!\n");
+	WARN(socinfo_get_id() >= ARRAY_SIZE(cpu_of_id),
+		"New IDs added! ID => CPU mapping might need an update.\n");
+
+	if (socinfo->v1.id < ARRAY_SIZE(cpu_of_id))
+		cur_cpu = cpu_of_id[socinfo->v1.id];
+
+	socinfo_print();
 
 	return 0;
 }
@@ -835,20 +1135,15 @@
 	if (!(read_cpuid_mpidr() & BIT(31)))
 		return 1;
 
-	if (read_cpuid_mpidr() & BIT(30) &&
-		!machine_is_msm8960_sim() &&
-		!machine_is_apq8064_sim())
+	if (read_cpuid_mpidr() & BIT(30))
 		return 1;
 
-	
+	/* 1 + the PART[1:0] field of MIDR */
 	return ((read_cpuid_id() >> 4) & 3) + 1;
 }
 
 const int read_msm_cpu_type(void)
 {
-	if (machine_is_msm8960_sim() || machine_is_msm8960_rumi3())
-		return MSM_CPU_8960;
-
 	if (socinfo_get_msm_cpu() != MSM_CPU_UNKNOWN)
 		return socinfo_get_msm_cpu();
 
@@ -865,17 +1160,27 @@
 	case 0x512F04D0:
 		return MSM_CPU_8960;
 
-	case 0x51404D11: 
+	case 0x51404D11: /* We can't get here unless we are in bringup */
 		return MSM_CPU_8930;
 
 	case 0x510F06F0:
 		return MSM_CPU_8064;
 
+	case 0x511F06F1:
+	case 0x511F06F2:
+	case 0x512F06F0:
+		return MSM_CPU_8974;
+
 	default:
 		return MSM_CPU_UNKNOWN;
 	};
 }
 
+const int cpu_is_krait(void)
+{
+	return ((read_cpuid_id() & 0xFF00FC00) == 0x51000400);
+}
+
 const int cpu_is_krait_v1(void)
 {
 	switch (read_cpuid_id()) {
@@ -889,12 +1194,32 @@
 	};
 }
 
+const int cpu_is_krait_v2(void)
+{
+	switch (read_cpuid_id()) {
+	case 0x511F04D0:
+	case 0x511F04D1:
+	case 0x511F04D2:
+	case 0x511F04D3:
+	case 0x511F04D4:
+
+	case 0x510F06F0:
+	case 0x510F06F1:
+	case 0x510F06F2:
+		return 1;
+
+	default:
+		return 0;
+	};
+}
+
 const int cpu_is_krait_v3(void)
 {
 	switch (read_cpuid_id()) {
 	case 0x512F04D0:
 	case 0x511F06F0:
 	case 0x511F06F1:
+	case 0x511F06F2:
 	case 0x510F05D0:
 		return 1;
 
diff --git a/arch/arm/mach-msm/subsystem_map.c b/arch/arm/mach-msm/subsystem_map.c
index 27382f1..0354ffc 100644
--- a/arch/arm/mach-msm/subsystem_map.c
+++ b/arch/arm/mach-msm/subsystem_map.c
@@ -403,7 +403,7 @@
 
 			if (flags & MSM_SUBSYSTEM_MAP_IOMMU_2X)
 				msm_iommu_map_extra
-					(d, temp_va, length, SZ_4K,
+                                  (d, temp_va, phys, length, SZ_4K,
 					(IOMMU_READ | IOMMU_WRITE));
 		}
 
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index cf378a3..afaa39d 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -21,6 +21,8 @@
 #include <linux/highmem.h>
 #include <linux/memblock.h>
 #include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/vmalloc.h>
 
 #include <asm/memory.h>
 #include <asm/highmem.h>
@@ -31,9 +33,109 @@
 #include <asm/mach/map.h>
 #include <asm/system_info.h>
 #include <asm/dma-contiguous.h>
+#include <asm/dma-iommu.h>
 
 #include "mm.h"
 
+/*
+ * The DMA API is built upon the notion of "buffer ownership".  A buffer
+ * is either exclusively owned by the CPU (and therefore may be accessed
+ * by it) or exclusively owned by the DMA device.  These helper functions
+ * represent the transitions between these two ownership states.
+ *
+ * Note, however, that on later ARMs, this notion does not work due to
+ * speculative prefetches.  We model our approach on the assumption that
+ * the CPU does do speculative prefetches, which means we clean caches
+ * before transfers and delay cache invalidation until transfer completion.
+ *
+ */
+static void __dma_page_cpu_to_dev(struct page *, unsigned long,
+		size_t, enum dma_data_direction);
+static void __dma_page_dev_to_cpu(struct page *, unsigned long,
+		size_t, enum dma_data_direction);
+
+/**
+ * arm_dma_map_page - map a portion of a page for streaming DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Ensure that any data held in the cache is appropriately discarded
+ * or written back.
+ *
+ * The device owns this memory once this call has completed.  The CPU
+ * can regain ownership by calling dma_unmap_page().
+ */
+static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
+	     unsigned long offset, size_t size, enum dma_data_direction dir,
+	     struct dma_attrs *attrs)
+{
+	if (!arch_is_coherent())
+		__dma_page_cpu_to_dev(page, offset, size, dir);
+	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
+}
+
+/**
+ * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * Unmap a page streaming mode DMA translation.  The handle and size
+ * must match what was provided in the previous dma_map_page() call.
+ * All other usages are undefined.
+ *
+ * After this call, reads by the CPU to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir,
+		struct dma_attrs *attrs)
+{
+	if (!arch_is_coherent())
+		__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
+				      handle & ~PAGE_MASK, size, dir);
+}
+
+static void arm_dma_sync_single_for_cpu(struct device *dev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+	unsigned int offset = handle & (PAGE_SIZE - 1);
+	struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+	if (!arch_is_coherent())
+		__dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_dma_sync_single_for_device(struct device *dev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+	unsigned int offset = handle & (PAGE_SIZE - 1);
+	struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+	if (!arch_is_coherent())
+		__dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
+
+struct dma_map_ops arm_dma_ops = {
+	.alloc			= arm_dma_alloc,
+	.free			= arm_dma_free,
+	.mmap			= arm_dma_mmap,
+	.map_page		= arm_dma_map_page,
+	.unmap_page		= arm_dma_unmap_page,
+	.map_sg			= arm_dma_map_sg,
+	.unmap_sg		= arm_dma_unmap_sg,
+	.sync_single_for_cpu	= arm_dma_sync_single_for_cpu,
+	.sync_single_for_device	= arm_dma_sync_single_for_device,
+	.sync_sg_for_cpu	= arm_dma_sync_sg_for_cpu,
+	.sync_sg_for_device	= arm_dma_sync_sg_for_device,
+	.set_dma_mask		= arm_dma_set_mask,
+};
+EXPORT_SYMBOL(arm_dma_ops);
+
 static u64 get_coherent_dma_mask(struct device *dev)
 {
 	u64 mask = (u64)arm_dma_limit;
@@ -41,6 +143,10 @@
 	if (dev) {
 		mask = dev->coherent_dma_mask;
 
+		/*
+		 * Sanity check the DMA mask - it must be non-zero, and
+		 * must be able to be satisfied by a DMA allocation.
+		 */
 		if (mask == 0) {
 			dev_warn(dev, "coherent DMA mask is unset\n");
 			return 0;
@@ -60,12 +166,22 @@
 static void __dma_clear_buffer(struct page *page, size_t size)
 {
 	void *ptr;
+	/*
+	 * Ensure that the allocated pages are zeroed, and that any data
+	 * lurking in the kernel direct-mapped region is invalidated.
+	 */
 	ptr = page_address(page);
-	memset(ptr, 0, size);
-	dmac_flush_range(ptr, ptr + size);
-	outer_flush_range(__pa(ptr), __pa(ptr) + size);
+	if (ptr) {
+		memset(ptr, 0, size);
+		dmac_flush_range(ptr, ptr + size);
+		outer_flush_range(__pa(ptr), __pa(ptr) + size);
+	}
 }
 
+/*
+ * Allocate a DMA buffer for 'dev' of size 'size' using the
+ * specified gfp mask.  Note that 'size' must be page aligned.
+ */
 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
 {
 	unsigned long order = get_order(size);
@@ -75,6 +191,9 @@
 	if (!page)
 		return NULL;
 
+	/*
+	 * Now split the huge page and free the excess pages
+	 */
 	split_page(page, order);
 	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
 		__free_page(p);
@@ -84,6 +203,9 @@
 	return page;
 }
 
+/*
+ * Free a DMA buffer.  'size' must be page aligned.
+ */
 static void __dma_free_buffer(struct page *page, size_t size)
 {
 	struct page *e = page + (size >> PAGE_SHIFT);
@@ -99,20 +221,23 @@
 #define CONSISTENT_OFFSET(x)	(((unsigned long)(x) - consistent_base) >> PAGE_SHIFT)
 #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - consistent_base) >> PMD_SHIFT)
 
+/*
+ * These are the page tables (2MB each) covering uncached, DMA consistent allocations
+ */
 static pte_t **consistent_pte;
 
 #define DEFAULT_CONSISTENT_DMA_SIZE (7*SZ_2M)
 
-unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
+static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
 
 void __init init_consistent_dma_size(unsigned long size)
 {
 	unsigned long base = CONSISTENT_END - ALIGN(size, SZ_2M);
 
-	BUG_ON(consistent_pte); 
+	BUG_ON(consistent_pte); /* Check we're called before DMA region init */
 	BUG_ON(base < VMALLOC_END);
 
-	
+	/* Grow region to accommodate specified size  */
 	if (base < consistent_base)
 		consistent_base = base;
 }
@@ -129,6 +254,9 @@
 #error ARM Coherent DMA allocator does not (yet) support huge TLB
 #endif
 
+/*
+ * Initialise the consistent memory allocation.
+ */
 static int __init consistent_init(void)
 {
 	int ret = 0;
@@ -157,14 +285,14 @@
 
 		pud = pud_alloc(&init_mm, pgd, base);
 		if (!pud) {
-			printk(KERN_ERR "%s: no pud tables\n", __func__);
+			pr_err("%s: no pud tables\n", __func__);
 			ret = -ENOMEM;
 			break;
 		}
 
 		pmd = pmd_alloc(&init_mm, pud, base);
 		if (!pmd) {
-			printk(KERN_ERR "%s: no pmd tables\n", __func__);
+			pr_err("%s: no pmd tables\n", __func__);
 			ret = -ENOMEM;
 			break;
 		}
@@ -172,7 +300,7 @@
 
 		pte = pte_alloc_kernel(pmd, base);
 		if (!pte) {
-			printk(KERN_ERR "%s: no pte tables\n", __func__);
+			pr_err("%s: no pte tables\n", __func__);
 			ret = -ENOMEM;
 			break;
 		}
@@ -186,14 +314,15 @@
 core_initcall(consistent_init);
 
 static void *__alloc_from_contiguous(struct device *dev, size_t size,
-				     pgprot_t prot, struct page **ret_page);
+				     pgprot_t prot, struct page **ret_page,
+				     bool no_kernel_mapping);
 
 static struct arm_vmregion_head coherent_head = {
 	.vm_lock	= __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock),
 	.vm_list	= LIST_HEAD_INIT(coherent_head.vm_list),
 };
 
-size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
+static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
 
 static int __init early_coherent_pool(char *p)
 {
@@ -202,6 +331,9 @@
 }
 early_param("coherent_pool", early_coherent_pool);
 
+/*
+ * Initialise the coherent pool for atomic allocations.
+ */
 static int __init coherent_init(void)
 {
 	pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
@@ -212,7 +344,7 @@
 	if (!IS_ENABLED(CONFIG_CMA))
 		return 0;
 
-	ptr = __alloc_from_contiguous(NULL, size, prot, &page);
+	ptr = __alloc_from_contiguous(NULL, size, prot, &page, false);
 	if (ptr) {
 		coherent_head.vm_start = (unsigned long) ptr;
 		coherent_head.vm_end = (unsigned long) ptr + size;
@@ -224,6 +356,9 @@
 	       (unsigned)size / 1024);
 	return -ENOMEM;
 }
+/*
+ * CMA is activated by core_initcall, so we must be called after it.
+ */
 postcore_initcall(coherent_init);
 
 struct dma_contig_early_reserve {
@@ -261,8 +396,11 @@
 		map.length = end - start;
 		map.type = MT_MEMORY_DMA_READY;
 
+		/*
+		 * Clear previous low-memory mapping
+		 */
 		for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
-		     addr += PGDIR_SIZE)
+		     addr += PMD_SIZE)
 			pmd_clear(pmd_off_k(addr));
 
 		iotable_init(&map, 1);
@@ -278,16 +416,25 @@
 	int bit;
 
 	if (!consistent_pte) {
-		printk(KERN_ERR "%s: not initialised\n", __func__);
+		pr_err("%s: not initialised\n", __func__);
 		dump_stack();
 		return NULL;
 	}
 
+	/*
+	 * Align the virtual region allocation - maximum alignment is
+	 * a section size, minimum is a page size.  This helps reduce
+	 * fragmentation of the DMA space, and also prevents allocations
+	 * smaller than a section from crossing a section boundary.
+	 */
 	bit = fls(size - 1);
 	if (bit > SECTION_SHIFT)
 		bit = SECTION_SHIFT;
 	align = 1 << bit;
 
+	/*
+	 * Allocate a virtual address in the consistent mapping region.
+	 */
 	c = arm_vmregion_alloc(&consistent_head, align, size,
 			    gfp & ~(__GFP_DMA | __GFP_HIGHMEM), caller);
 	if (c) {
@@ -296,7 +443,7 @@
 		u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
 
 		pte = consistent_pte[idx] + off;
-		c->vm_pages = page;
+		c->priv = page;
 
 		do {
 			BUG_ON(!pte_none(*pte));
@@ -328,14 +475,14 @@
 
 	c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
 	if (!c) {
-		printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
+		pr_err("%s: trying to free invalid coherent area: %p\n",
 		       __func__, cpu_addr);
 		dump_stack();
 		return;
 	}
 
 	if ((c->vm_end - c->vm_start) != size) {
-		printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
+		pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
 		       __func__, c->vm_end - c->vm_start, size);
 		dump_stack();
 		size = c->vm_end - c->vm_start;
@@ -357,8 +504,8 @@
 		}
 
 		if (pte_none(pte) || !pte_present(pte))
-			printk(KERN_CRIT "%s: bad page in kernel page table\n",
-			       __func__);
+			pr_crit("%s: bad page in kernel page table\n",
+				__func__);
 	} while (size -= PAGE_SIZE);
 
 	flush_tlb_kernel_range(c->vm_start, c->vm_end);
@@ -376,12 +523,27 @@
 	return 0;
 }
 
-static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
+static int __dma_clear_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+			    void *data)
+{
+	pte_clear(&init_mm, addr, pte);
+	return 0;
+}
+
+static void __dma_remap(struct page *page, size_t size, pgprot_t prot,
+			bool no_kernel_map)
 {
 	unsigned long start = (unsigned long) page_address(page);
 	unsigned end = start + size;
+	int (*func)(pte_t *pte, pgtable_t token, unsigned long addr,
+			    void *data);
 
-	apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
+	if (no_kernel_map)
+		func = __dma_clear_pte;
+	else
+		func = __dma_update_pte;
+
+	apply_to_page_range(&init_mm, start, size, func, &prot);
 	dsb();
 	flush_tlb_kernel_range(start, end);
 }
@@ -419,6 +581,11 @@
 		return NULL;
 	}
 
+	/*
+	 * Align the region allocation - allocations from pool are rather
+	 * small, so align them to their order in pages, minimum is a page
+	 * size. This helps reduce fragmentation of the DMA space.
+	 */
 	align = PAGE_SIZE << get_order(size);
 	c = arm_vmregion_alloc(&coherent_head, align, size, 0, caller);
 	if (c) {
@@ -453,7 +620,8 @@
 }
 
 static void *__alloc_from_contiguous(struct device *dev, size_t size,
-				     pgprot_t prot, struct page **ret_page)
+				     pgprot_t prot, struct page **ret_page,
+				     bool no_kernel_mapping)
 {
 	unsigned long order = get_order(size);
 	size_t count = size >> PAGE_SHIFT;
@@ -464,7 +632,7 @@
 		return NULL;
 
 	__dma_clear_buffer(page, size);
-	__dma_remap(page, size, prot);
+	__dma_remap(page, size, prot, no_kernel_mapping);
 
 	*ret_page = page;
 	return page_address(page);
@@ -473,24 +641,38 @@
 static void __free_from_contiguous(struct device *dev, struct page *page,
 				   size_t size)
 {
-	__dma_remap(page, size, pgprot_kernel);
+	__dma_remap(page, size, pgprot_kernel, false);
 	dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
 }
 
+static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
+{
+	if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+		prot = pgprot_writecombine(prot);
+	else if (dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs))
+		prot = pgprot_stronglyordered(prot);
+	/* if non-consistent just pass back what was given */
+	else if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
+		prot = pgprot_dmacoherent(prot);
+
+	return prot;
+}
+
 #define nommu() 0
 
-#else	
+#else	/* !CONFIG_MMU */
 
 #define nommu() 1
 
 #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c)	NULL
 #define __alloc_from_pool(dev, size, ret_page, c)		NULL
-#define __alloc_from_contiguous(dev, size, prot, ret)		NULL
+#define __alloc_from_contiguous(dev, size, prot, ret, w)	NULL
 #define __free_from_pool(cpu_addr, size)			0
 #define __free_from_contiguous(dev, page, size)			do { } while (0)
 #define __dma_free_remap(cpu_addr, size)			do { } while (0)
+#define __get_dma_pgprot(attrs, prot)				__pgprot(0)
 
-#endif	
+#endif	/* CONFIG_MMU */
 
 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
 				   struct page **ret_page)
@@ -507,7 +689,8 @@
 
 
 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
-			 gfp_t gfp, pgprot_t prot, const void *caller)
+			 gfp_t gfp, pgprot_t prot, const void *caller,
+			 bool no_kernel_mapping)
 {
 	u64 mask = get_coherent_dma_mask(dev);
 	struct page *page;
@@ -528,9 +711,16 @@
 	if (mask < 0xffffffffULL)
 		gfp |= GFP_DMA;
 
+	/*
+	 * Following is a work-around (a.k.a. hack) to prevent pages
+	 * with __GFP_COMP being passed to split_page() which cannot
+	 * handle them.  The real problem is that this flag probably
+	 * should be 0 on ARM as it is not supported on this
+	 * platform; see CONFIG_HUGETLBFS.
+	 */
 	gfp &= ~(__GFP_COMP);
 
-	*handle = ~0;
+	*handle = DMA_ERROR_CODE;
 	size = PAGE_ALIGN(size);
 
 	if (arch_is_coherent() || nommu())
@@ -540,7 +730,8 @@
 	else if (gfp & GFP_ATOMIC)
 		addr = __alloc_from_pool(dev, size, &page, caller);
 	else
-		addr = __alloc_from_contiguous(dev, size, prot, &page);
+		addr = __alloc_from_contiguous(dev, size, prot, &page,
+						no_kernel_mapping);
 
 	if (addr)
 		*handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -548,62 +739,54 @@
 	return addr;
 }
 
-void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle,
-			 gfp_t gfp)
+/*
+ * Allocate DMA-coherent memory space and return both the kernel remapped
+ * virtual and bus address for that space.
+ */
+void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+		    gfp_t gfp, struct dma_attrs *attrs)
 {
+	pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
 	void *memory;
+	bool no_kernel_mapping = dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING,
+					attrs);
 
 	if (dma_alloc_from_coherent(dev, size, handle, &memory))
 		return memory;
 
-	return __dma_alloc(dev, size, handle, gfp,
-			   pgprot_dmacoherent(pgprot_kernel),
-			   __builtin_return_address(0));
+	return __dma_alloc(dev, size, handle, gfp, prot,
+			   __builtin_return_address(0), no_kernel_mapping);
 }
-EXPORT_SYMBOL(dma_alloc_coherent);
 
-void *
-dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
-{
-	return __dma_alloc(dev, size, handle, gfp,
-			   pgprot_writecombine(pgprot_kernel),
-			   __builtin_return_address(0));
-}
-EXPORT_SYMBOL(dma_alloc_writecombine);
-
-static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
-		    void *cpu_addr, dma_addr_t dma_addr, size_t size)
+/*
+ * Create userspace mapping for the DMA-coherent memory.
+ */
+int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+		 void *cpu_addr, dma_addr_t dma_addr, size_t size,
+		 struct dma_attrs *attrs)
 {
 	int ret = -ENXIO;
 #ifdef CONFIG_MMU
 	unsigned long pfn = dma_to_pfn(dev, dma_addr);
+	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+
+	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+		return ret;
+
 	ret = remap_pfn_range(vma, vma->vm_start,
 			      pfn + vma->vm_pgoff,
 			      vma->vm_end - vma->vm_start,
 			      vma->vm_page_prot);
-#endif	
+#endif	/* CONFIG_MMU */
 
 	return ret;
 }
 
-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
-		      void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
-	vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
-	return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_coherent);
-
-int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
-			  void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
-	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-	return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_writecombine);
-
-
-void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
+/*
+ * Free a buffer as defined by the above mapping.
+ */
+void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
+		  dma_addr_t handle, struct dma_attrs *attrs)
 {
 	struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
 
@@ -620,56 +803,24 @@
 	} else {
 		if (__free_from_pool(cpu_addr, size))
 			return;
+		/*
+		 * Non-atomic allocations cannot be freed with IRQs disabled
+		 */
 		WARN_ON(irqs_disabled());
 		__free_from_contiguous(dev, page, size);
 	}
 }
-EXPORT_SYMBOL(dma_free_coherent);
-
-void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
-	enum dma_data_direction dir)
-{
-#ifdef CONFIG_OUTER_CACHE
-	unsigned long paddr;
-
-	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
-#endif
-
-	dmac_map_area(kaddr, size, dir);
-
-#ifdef CONFIG_OUTER_CACHE
-	paddr = __pa(kaddr);
-	if (dir == DMA_FROM_DEVICE) {
-		outer_inv_range(paddr, paddr + size);
-	} else {
-		outer_clean_range(paddr, paddr + size);
-	}
-#endif
-	
-}
-EXPORT_SYMBOL(___dma_single_cpu_to_dev);
-
-void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
-	enum dma_data_direction dir)
-{
-#ifdef CONFIG_OUTER_CACHE
-	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
-
-	
-	
-	if (dir != DMA_TO_DEVICE) {
-		unsigned long paddr = __pa(kaddr);
-		outer_inv_range(paddr, paddr + size);
-	}
-#endif
-	dmac_unmap_area(kaddr, size, dir);
-}
-EXPORT_SYMBOL(___dma_single_dev_to_cpu);
 
 static void dma_cache_maint_page(struct page *page, unsigned long offset,
 	size_t size, enum dma_data_direction dir,
 	void (*op)(const void *, size_t, int))
 {
+	/*
+	 * A single sg entry may refer to multiple physically contiguous
+	 * pages.  But we still need to process highmem pages individually.
+	 * If highmem is not configured then the bulk of this loop gets
+	 * optimized out.
+	 */
 	size_t left = size;
 	do {
 		size_t len = left;
@@ -689,7 +840,7 @@
 				op(vaddr, len, dir);
 				kunmap_high(page);
 			} else if (cache_is_vipt()) {
-				
+				/* unmapped pages might still be cached */
 				vaddr = kmap_atomic(page);
 				op(vaddr + offset, len, dir);
 				kunmap_atomic(vaddr);
@@ -704,7 +855,13 @@
 	} while (left);
 }
 
-void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
+/*
+ * Make an area consistent for devices.
+ * Note: Drivers should NOT use this function directly, as it will break
+ * platforms with CONFIG_DMABOUNCE.
+ * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
+ */
+static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
 	size_t size, enum dma_data_direction dir)
 {
 	unsigned long paddr;
@@ -717,102 +874,134 @@
 	} else {
 		outer_clean_range(paddr, paddr + size);
 	}
-	
+	/* FIXME: non-speculating: flush on bidirectional mappings? */
 }
-EXPORT_SYMBOL(___dma_page_cpu_to_dev);
 
-void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
+static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
 	size_t size, enum dma_data_direction dir)
 {
 	unsigned long paddr = page_to_phys(page) + off;
 
-	
-	
+	/* FIXME: non-speculating: not required */
+	/* don't bother invalidating if DMA to device */
 	if (dir != DMA_TO_DEVICE)
 		outer_inv_range(paddr, paddr + size);
 
 	dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
 
+	/*
+	 * Mark the D-cache clean for this page to avoid extra flushing.
+	 */
 	if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
 		set_bit(PG_dcache_clean, &page->flags);
 }
-EXPORT_SYMBOL(___dma_page_dev_to_cpu);
 
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
-		enum dma_data_direction dir)
+/**
+ * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * This is the scatter-gather version of the dma_map_single interface.
+ * Here the scatter gather list elements are each tagged with the
+ * appropriate dma address and length.  They are obtained via
+ * sg_dma_{address,length}.
+ *
+ * Device ownership issues as mentioned for dma_map_single are the same
+ * here.
+ */
+int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+		enum dma_data_direction dir, struct dma_attrs *attrs)
 {
+	struct dma_map_ops *ops = get_dma_ops(dev);
 	struct scatterlist *s;
 	int i, j;
 
-	BUG_ON(!valid_dma_direction(dir));
-
 	for_each_sg(sg, s, nents, i) {
-		s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
-						s->length, dir);
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+		s->dma_length = s->length;
+#endif
+		s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
+						s->length, dir, attrs);
 		if (dma_mapping_error(dev, s->dma_address))
 			goto bad_mapping;
 	}
-	debug_dma_map_sg(dev, sg, nents, nents, dir);
 	return nents;
 
  bad_mapping:
 	for_each_sg(sg, s, i, j)
-		__dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+		ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
 	return 0;
 }
-EXPORT_SYMBOL(dma_map_sg);
 
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
-		enum dma_data_direction dir)
+/**
+ * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @sg: list of buffers
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ *
+ * Unmap a set of streaming mode DMA translations.  Again, CPU access
+ * rules concerning calls here are the same as for dma_unmap_single().
+ */
+void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+		enum dma_data_direction dir, struct dma_attrs *attrs)
 {
+	struct dma_map_ops *ops = get_dma_ops(dev);
 	struct scatterlist *s;
-	int i;
 
-	debug_dma_unmap_sg(dev, sg, nents, dir);
+	int i;
 
 	for_each_sg(sg, s, nents, i)
-		__dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+		ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
 }
-EXPORT_SYMBOL(dma_unmap_sg);
 
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+/**
+ * arm_dma_sync_sg_for_cpu
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
 			int nents, enum dma_data_direction dir)
 {
+	struct dma_map_ops *ops = get_dma_ops(dev);
 	struct scatterlist *s;
 	int i;
 
-	for_each_sg(sg, s, nents, i) {
-		if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
-					    sg_dma_len(s), dir))
-			continue;
-
-		__dma_page_dev_to_cpu(sg_page(s), s->offset,
-				      s->length, dir);
-	}
-
-	debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
+	for_each_sg(sg, s, nents, i)
+		ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
+					 dir);
 }
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
 
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+/**
+ * arm_dma_sync_sg_for_device
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 			int nents, enum dma_data_direction dir)
 {
+	struct dma_map_ops *ops = get_dma_ops(dev);
 	struct scatterlist *s;
 	int i;
 
-	for_each_sg(sg, s, nents, i) {
-		if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
-					sg_dma_len(s), dir))
-			continue;
-
-		__dma_page_cpu_to_dev(sg_page(s), s->offset,
-				      s->length, dir);
-	}
-
-	debug_dma_sync_sg_for_device(dev, sg, nents, dir);
+	for_each_sg(sg, s, nents, i)
+		ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
+					    dir);
 }
-EXPORT_SYMBOL(dma_sync_sg_for_device);
 
+/*
+ * Return whether the given device DMA address mask can be supported
+ * properly.  For example, if your device can only drive the low 24-bits
+ * during bus mastering, then you would pass 0x00ffffff as the mask
+ * to this function.
+ */
 int dma_supported(struct device *dev, u64 mask)
 {
 	if (mask < (u64)arm_dma_limit)
@@ -821,18 +1010,15 @@
 }
 EXPORT_SYMBOL(dma_supported);
 
-int dma_set_mask(struct device *dev, u64 dma_mask)
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
 {
 	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
 		return -EIO;
 
-#ifndef CONFIG_DMABOUNCE
 	*dev->dma_mask = dma_mask;
-#endif
 
 	return 0;
 }
-EXPORT_SYMBOL(dma_set_mask);
 
 #define PREALLOC_DMA_DEBUG_ENTRIES	4096
 
@@ -845,3 +1031,679 @@
 	return 0;
 }
 fs_initcall(dma_debug_do_init);
+
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+
+/* IOMMU */
+
+static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
+				      size_t size)
+{
+	unsigned int order = get_order(size);
+	unsigned int align = 0;
+	unsigned int count, start;
+	unsigned long flags;
+
+	count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
+		 (1 << mapping->order) - 1) >> mapping->order;
+
+	if (order > mapping->order)
+		align = (1 << (order - mapping->order)) - 1;
+
+	spin_lock_irqsave(&mapping->lock, flags);
+	start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
+					   count, align);
+	if (start > mapping->bits) {
+		spin_unlock_irqrestore(&mapping->lock, flags);
+		return DMA_ERROR_CODE;
+	}
+
+	bitmap_set(mapping->bitmap, start, count);
+	spin_unlock_irqrestore(&mapping->lock, flags);
+
+	return mapping->base + (start << (mapping->order + PAGE_SHIFT));
+}
+
+static inline void __free_iova(struct dma_iommu_mapping *mapping,
+			       dma_addr_t addr, size_t size)
+{
+	unsigned int start = (addr - mapping->base) >>
+			     (mapping->order + PAGE_SHIFT);
+	unsigned int count = ((size >> PAGE_SHIFT) +
+			      (1 << mapping->order) - 1) >> mapping->order;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mapping->lock, flags);
+	bitmap_clear(mapping->bitmap, start, count);
+	spin_unlock_irqrestore(&mapping->lock, flags);
+}
+
+static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
+{
+	struct page **pages;
+	int count = size >> PAGE_SHIFT;
+	int array_size = count * sizeof(struct page *);
+	int i = 0;
+
+	if (array_size <= PAGE_SIZE)
+		pages = kzalloc(array_size, gfp);
+	else
+		pages = vzalloc(array_size);
+	if (!pages)
+		return NULL;
+
+	while (count) {
+		int j, order = __fls(count);
+
+		pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
+		while (!pages[i] && order)
+			pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order);
+		if (!pages[i])
+			goto error;
+
+		if (order)
+			split_page(pages[i], order);
+		j = 1 << order;
+		while (--j)
+			pages[i + j] = pages[i] + j;
+
+		__dma_clear_buffer(pages[i], PAGE_SIZE << order);
+		i += 1 << order;
+		count -= 1 << order;
+	}
+
+	return pages;
+error:
+	while (--i)
+		if (pages[i])
+			__free_pages(pages[i], 0);
+	if (array_size < PAGE_SIZE)
+		kfree(pages);
+	else
+		vfree(pages);
+	return NULL;
+}
+
+static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size)
+{
+	int count = size >> PAGE_SHIFT;
+	int array_size = count * sizeof(struct page *);
+	int i;
+	for (i = 0; i < count; i++)
+		if (pages[i])
+			__free_pages(pages[i], 0);
+	if (array_size < PAGE_SIZE)
+		kfree(pages);
+	else
+		vfree(pages);
+	return 0;
+}
+
+/*
+ * Create a CPU mapping for a specified pages
+ */
+static void *
+__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot)
+{
+	struct arm_vmregion *c;
+	size_t align;
+	size_t count = size >> PAGE_SHIFT;
+	int bit;
+
+	if (!consistent_pte[0]) {
+		pr_err("%s: not initialised\n", __func__);
+		dump_stack();
+		return NULL;
+	}
+
+	/*
+	 * Align the virtual region allocation - maximum alignment is
+	 * a section size, minimum is a page size.  This helps reduce
+	 * fragmentation of the DMA space, and also prevents allocations
+	 * smaller than a section from crossing a section boundary.
+	 */
+	bit = fls(size - 1);
+	if (bit > SECTION_SHIFT)
+		bit = SECTION_SHIFT;
+	align = 1 << bit;
+
+	/*
+	 * Allocate a virtual address in the consistent mapping region.
+	 */
+	c = arm_vmregion_alloc(&consistent_head, align, size,
+			    gfp & ~(__GFP_DMA | __GFP_HIGHMEM), NULL);
+	if (c) {
+		pte_t *pte;
+		int idx = CONSISTENT_PTE_INDEX(c->vm_start);
+		int i = 0;
+		u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
+
+		pte = consistent_pte[idx] + off;
+		c->priv = pages;
+
+		do {
+			BUG_ON(!pte_none(*pte));
+
+			set_pte_ext(pte, mk_pte(pages[i], prot), 0);
+			pte++;
+			off++;
+			i++;
+			if (off >= PTRS_PER_PTE) {
+				off = 0;
+				pte = consistent_pte[++idx];
+			}
+		} while (i < count);
+
+		dsb();
+
+		return (void *)c->vm_start;
+	}
+	return NULL;
+}
+
+/*
+ * Create a mapping in device IO address space for specified pages
+ */
+static dma_addr_t
+__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	dma_addr_t dma_addr, iova;
+	int i, ret = DMA_ERROR_CODE;
+
+	dma_addr = __alloc_iova(mapping, size);
+	if (dma_addr == DMA_ERROR_CODE)
+		return dma_addr;
+
+	iova = dma_addr;
+	for (i = 0; i < count; ) {
+		unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
+		phys_addr_t phys = page_to_phys(pages[i]);
+		unsigned int len, j;
+
+		for (j = i + 1; j < count; j++, next_pfn++)
+			if (page_to_pfn(pages[j]) != next_pfn)
+				break;
+
+		len = (j - i) << PAGE_SHIFT;
+		ret = iommu_map(mapping->domain, iova, phys, len, 0);
+		if (ret < 0)
+			goto fail;
+		iova += len;
+		i = j;
+	}
+	return dma_addr;
+fail:
+	iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
+	__free_iova(mapping, dma_addr, size);
+	return DMA_ERROR_CODE;
+}
+
+static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+	/*
+	 * add optional in-page offset from iova to size and align
+	 * result to page size
+	 */
+	size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
+	iova &= PAGE_MASK;
+
+	iommu_unmap(mapping->domain, iova, size);
+	__free_iova(mapping, iova, size);
+	return 0;
+}
+
+static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+	    dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+	pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
+	struct page **pages;
+	void *addr = NULL;
+
+	*handle = DMA_ERROR_CODE;
+	size = PAGE_ALIGN(size);
+
+	pages = __iommu_alloc_buffer(dev, size, gfp);
+	if (!pages)
+		return NULL;
+
+	*handle = __iommu_create_mapping(dev, pages, size);
+	if (*handle == DMA_ERROR_CODE)
+		goto err_buffer;
+
+	addr = __iommu_alloc_remap(pages, size, gfp, prot);
+	if (!addr)
+		goto err_mapping;
+
+	return addr;
+
+err_mapping:
+	__iommu_remove_mapping(dev, *handle, size);
+err_buffer:
+	__iommu_free_buffer(dev, pages, size);
+	return NULL;
+}
+
+static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+		    void *cpu_addr, dma_addr_t dma_addr, size_t size,
+		    struct dma_attrs *attrs)
+{
+	struct arm_vmregion *c;
+
+	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+	c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
+
+	if (c) {
+		struct page **pages = c->priv;
+
+		unsigned long uaddr = vma->vm_start;
+		unsigned long usize = vma->vm_end - vma->vm_start;
+		int i = 0;
+
+		do {
+			int ret;
+
+			ret = vm_insert_page(vma, uaddr, pages[i++]);
+			if (ret) {
+				pr_err("Remapping memory, error: %d\n", ret);
+				return ret;
+			}
+
+			uaddr += PAGE_SIZE;
+			usize -= PAGE_SIZE;
+		} while (usize > 0);
+	}
+	return 0;
+}
+
+/*
+ * free a page as defined by the above mapping.
+ * Must not be called with IRQs disabled.
+ */
+void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+			  dma_addr_t handle, struct dma_attrs *attrs)
+{
+	struct arm_vmregion *c;
+	size = PAGE_ALIGN(size);
+
+	c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
+	if (c) {
+		struct page **pages = c->priv;
+		__dma_free_remap(cpu_addr, size);
+		__iommu_remove_mapping(dev, handle, size);
+		__iommu_free_buffer(dev, pages, size);
+	}
+}
+
+/*
+ * Map a part of the scatter-gather list into contiguous io address space
+ */
+static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
+			  size_t size, dma_addr_t *handle,
+			  enum dma_data_direction dir)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t iova, iova_base;
+	int ret = 0;
+	unsigned int count;
+	struct scatterlist *s;
+
+	size = PAGE_ALIGN(size);
+	*handle = DMA_ERROR_CODE;
+
+	iova_base = iova = __alloc_iova(mapping, size);
+	if (iova == DMA_ERROR_CODE)
+		return -ENOMEM;
+
+	for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
+		phys_addr_t phys = page_to_phys(sg_page(s));
+		unsigned int len = PAGE_ALIGN(s->offset + s->length);
+
+		if (!arch_is_coherent())
+			__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+
+		ret = iommu_map(mapping->domain, iova, phys, len, 0);
+		if (ret < 0)
+			goto fail;
+		count += len >> PAGE_SHIFT;
+		iova += len;
+	}
+	*handle = iova_base;
+
+	return 0;
+fail:
+	iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
+	__free_iova(mapping, iova_base, size);
+	return ret;
+}
+
+/**
+ * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * The scatter gather list elements are merged together (if possible) and
+ * tagged with the appropriate dma address and length. They are obtained via
+ * sg_dma_{address,length}.
+ */
+int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+		     enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+	struct scatterlist *s = sg, *dma = sg, *start = sg;
+	int i, count = 0;
+	unsigned int offset = s->offset;
+	unsigned int size = s->offset + s->length;
+	unsigned int max = dma_get_max_seg_size(dev);
+
+	for (i = 1; i < nents; i++) {
+		s = sg_next(s);
+
+		s->dma_address = DMA_ERROR_CODE;
+		s->dma_length = 0;
+
+		if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
+			if (__map_sg_chunk(dev, start, size, &dma->dma_address,
+			    dir) < 0)
+				goto bad_mapping;
+
+			dma->dma_address += offset;
+			dma->dma_length = size - offset;
+
+			size = offset = s->offset;
+			start = s;
+			dma = sg_next(dma);
+			count += 1;
+		}
+		size += s->length;
+	}
+	if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir) < 0)
+		goto bad_mapping;
+
+	dma->dma_address += offset;
+	dma->dma_length = size - offset;
+
+	return count+1;
+
+bad_mapping:
+	for_each_sg(sg, s, count, i)
+		__iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
+	return 0;
+}
+
+/**
+ * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ *
+ * Unmap a set of streaming mode DMA translations.  Again, CPU access
+ * rules concerning calls here are the same as for dma_unmap_single().
+ */
+void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+			enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+	struct scatterlist *s;
+	int i;
+
+	for_each_sg(sg, s, nents, i) {
+		if (sg_dma_len(s))
+			__iommu_remove_mapping(dev, sg_dma_address(s),
+					       sg_dma_len(s));
+		if (!arch_is_coherent())
+			__dma_page_dev_to_cpu(sg_page(s), s->offset,
+					      s->length, dir);
+	}
+}
+
+/**
+ * arm_iommu_sync_sg_for_cpu
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+			int nents, enum dma_data_direction dir)
+{
+	struct scatterlist *s;
+	int i;
+
+	for_each_sg(sg, s, nents, i)
+		if (!arch_is_coherent())
+			__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
+
+}
+
+/**
+ * arm_iommu_sync_sg_for_device
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+			int nents, enum dma_data_direction dir)
+{
+	struct scatterlist *s;
+	int i;
+
+	for_each_sg(sg, s, nents, i)
+		if (!arch_is_coherent())
+			__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+}
+
+
+/**
+ * arm_iommu_map_page
+ * @dev: valid struct device pointer
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * IOMMU aware version of arm_dma_map_page()
+ */
+static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
+	     unsigned long offset, size_t size, enum dma_data_direction dir,
+	     struct dma_attrs *attrs)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t dma_addr;
+	int ret, len = PAGE_ALIGN(size + offset);
+
+	if (!arch_is_coherent())
+		__dma_page_cpu_to_dev(page, offset, size, dir);
+
+	dma_addr = __alloc_iova(mapping, len);
+	if (dma_addr == DMA_ERROR_CODE)
+		return dma_addr;
+
+	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0);
+	if (ret < 0)
+		goto fail;
+
+	return dma_addr + offset;
+fail:
+	__free_iova(mapping, dma_addr, len);
+	return DMA_ERROR_CODE;
+}
+
+/**
+ * arm_iommu_unmap_page
+ * @dev: valid struct device pointer
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * IOMMU aware version of arm_dma_unmap_page()
+ */
+static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir,
+		struct dma_attrs *attrs)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t iova = handle & PAGE_MASK;
+	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+	int offset = handle & ~PAGE_MASK;
+	int len = PAGE_ALIGN(size + offset);
+
+	if (!iova)
+		return;
+
+	if (!arch_is_coherent())
+		__dma_page_dev_to_cpu(page, offset, size, dir);
+
+	iommu_unmap(mapping->domain, iova, len);
+	__free_iova(mapping, iova, len);
+}
+
+static void arm_iommu_sync_single_for_cpu(struct device *dev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t iova = handle & PAGE_MASK;
+	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+	unsigned int offset = handle & ~PAGE_MASK;
+
+	if (!iova)
+		return;
+
+	if (!arch_is_coherent())
+		__dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_iommu_sync_single_for_device(struct device *dev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t iova = handle & PAGE_MASK;
+	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
+	unsigned int offset = handle & ~PAGE_MASK;
+
+	if (!iova)
+		return;
+
+	__dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+struct dma_map_ops iommu_ops = {
+	.alloc		= arm_iommu_alloc_attrs,
+	.free		= arm_iommu_free_attrs,
+	.mmap		= arm_iommu_mmap_attrs,
+
+	.map_page		= arm_iommu_map_page,
+	.unmap_page		= arm_iommu_unmap_page,
+	.sync_single_for_cpu	= arm_iommu_sync_single_for_cpu,
+	.sync_single_for_device	= arm_iommu_sync_single_for_device,
+
+	.map_sg			= arm_iommu_map_sg,
+	.unmap_sg		= arm_iommu_unmap_sg,
+	.sync_sg_for_cpu	= arm_iommu_sync_sg_for_cpu,
+	.sync_sg_for_device	= arm_iommu_sync_sg_for_device,
+};
+
+/**
+ * arm_iommu_create_mapping
+ * @bus: pointer to the bus holding the client device (for IOMMU calls)
+ * @base: start address of the valid IO address space
+ * @size: size of the valid IO address space
+ * @order: accuracy of the IO addresses allocations
+ *
+ * Creates a mapping structure which holds information about used/unused
+ * IO address ranges, which is required to perform memory allocation and
+ * mapping with IOMMU aware functions.
+ *
+ * The client device need to be attached to the mapping with
+ * arm_iommu_attach_device function.
+ */
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
+			 int order)
+{
+	unsigned int count = size >> (PAGE_SHIFT + order);
+	unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+	struct dma_iommu_mapping *mapping;
+	int err = -ENOMEM;
+
+	if (!count)
+		return ERR_PTR(-EINVAL);
+
+	mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
+	if (!mapping)
+		goto err;
+
+	mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+	if (!mapping->bitmap)
+		goto err2;
+
+	mapping->base = base;
+	mapping->bits = BITS_PER_BYTE * bitmap_size;
+	mapping->order = order;
+	spin_lock_init(&mapping->lock);
+
+	mapping->domain = iommu_domain_alloc(bus);
+	if (!mapping->domain)
+		goto err3;
+
+	kref_init(&mapping->kref);
+	return mapping;
+err3:
+	kfree(mapping->bitmap);
+err2:
+	kfree(mapping);
+err:
+	return ERR_PTR(err);
+}
+
+static void release_iommu_mapping(struct kref *kref)
+{
+	struct dma_iommu_mapping *mapping =
+		container_of(kref, struct dma_iommu_mapping, kref);
+
+	iommu_domain_free(mapping->domain);
+	kfree(mapping->bitmap);
+	kfree(mapping);
+}
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
+{
+	if (mapping)
+		kref_put(&mapping->kref, release_iommu_mapping);
+}
+
+/**
+ * arm_iommu_attach_device
+ * @dev: valid struct device pointer
+ * @mapping: io address space mapping structure (returned from
+ *	arm_iommu_create_mapping)
+ *
+ * Attaches specified io address space mapping to the provided device,
+ * this replaces the dma operations (dma_map_ops pointer) with the
+ * IOMMU aware version. More than one client might be attached to
+ * the same io address space mapping.
+ */
+int arm_iommu_attach_device(struct device *dev,
+			    struct dma_iommu_mapping *mapping)
+{
+	int err;
+
+	err = iommu_attach_device(mapping->domain, dev);
+	if (err)
+		return err;
+
+	kref_get(&mapping->kref);
+	dev->archdata.mapping = mapping;
+	set_dma_ops(dev, &iommu_ops);
+
+	pr_info("Attached IOMMU controller to %s device.\n", dev_name(dev));
+	return 0;
+}
+
+#endif
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index f3fe30d..7aefddf 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -97,8 +97,8 @@
 	int free = 0, total = 0, reserved = 0;
 	int shared = 0, cached = 0, slab = 0, i;
 	struct meminfo * mi = &meminfo;
-	unsigned long kgsl_alloc = kgsl_get_alloc_size(0);
-	int ion_alloc = ion_iommu_heap_dump_size();
+	unsigned long kgsl_alloc = 0;
+	int ion_alloc = 0;
 
 	printk("Mem-info:\n");
 	show_free_areas(filter);
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c
index 4b886f3..73e82f6 100644
--- a/arch/arm/mm/vmregion.c
+++ b/arch/arm/mm/vmregion.c
@@ -7,6 +7,35 @@
 
 #include "vmregion.h"
 
+/*
+ * VM region handling support.
+ *
+ * This should become something generic, handling VM region allocations for
+ * vmalloc and similar (ioremap, module space, etc).
+ *
+ * I envisage vmalloc()'s supporting vm_struct becoming:
+ *
+ *  struct vm_struct {
+ *    struct vmregion	region;
+ *    unsigned long	flags;
+ *    struct page	**pages;
+ *    unsigned int	nr_pages;
+ *    unsigned long	phys_addr;
+ *  };
+ *
+ * get_vm_area() would then call vmregion_alloc with an appropriate
+ * struct vmregion head (eg):
+ *
+ *  struct vmregion vmalloc_head = {
+ *	.vm_list	= LIST_HEAD_INIT(vmalloc_head.vm_list),
+ *	.vm_start	= VMALLOC_START,
+ *	.vm_end		= VMALLOC_END,
+ *  };
+ *
+ * However, vmalloc_head.vm_start is variable (typically, it is dependent on
+ * the amount of RAM found at boot time.)  I would imagine that get_vm_area()
+ * would have to initialise this each time prior to calling vmregion_alloc().
+ */
 
 struct arm_vmregion *
 arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
@@ -40,6 +69,9 @@
 	}
 
  found:
+	/*
+	 * Insert this entry after the one we found.
+	 */
 	list_add(&new->vm_list, &c->vm_list);
 	new->vm_start = addr;
 	new->vm_end = addr + size;
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h
index 162be66..bf312c3 100644
--- a/arch/arm/mm/vmregion.h
+++ b/arch/arm/mm/vmregion.h
@@ -17,7 +17,7 @@
 	struct list_head	vm_list;
 	unsigned long		vm_start;
 	unsigned long		vm_end;
-	struct page		*vm_pages;
+	void			*priv;
 	int			vm_active;
 	const void		*caller;
 };
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 43fc838..1b85949 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -1,3 +1,7 @@
+/*
+ * Coherent per-device memory handling.
+ * Borrowed from i386
+ */
 #include <linux/slab.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -6,6 +10,7 @@
 struct dma_coherent_mem {
 	void		*virt_base;
 	dma_addr_t	device_base;
+	phys_addr_t	pfn_base;
 	int		size;
 	int		flags;
 	unsigned long	*bitmap;
@@ -25,7 +30,7 @@
 	if (dev->dma_mem)
 		goto out;
 
-	
+	/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
 
 	mem_base = ioremap(bus_addr, size);
 	if (!mem_base)
@@ -40,6 +45,7 @@
 
 	dev->dma_mem->virt_base = mem_base;
 	dev->dma_mem->device_base = device_addr;
+	dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
 	dev->dma_mem->size = pages;
 	dev->dma_mem->flags = flags;
 
@@ -89,6 +95,21 @@
 }
 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
 
+/**
+ * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
+ *
+ * @dev:	device from which we allocate memory
+ * @size:	size of requested memory area
+ * @dma_handle:	This will be filled with the correct dma handle
+ * @ret:	This pointer will be filled with the virtual address
+ *		to allocated area.
+ *
+ * This function should be only called from per-arch dma_alloc_coherent()
+ * to support allocation from per-device coherent memory pools.
+ *
+ * Returns 0 if dma_alloc_coherent should continue with allocating from
+ * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
+ */
 int dma_alloc_from_coherent(struct device *dev, ssize_t size,
 				       dma_addr_t *dma_handle, void **ret)
 {
@@ -111,6 +132,9 @@
 	if (unlikely(pageno < 0))
 		goto err;
 
+	/*
+	 * Memory was found in the per-device area.
+	 */
 	*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
 	*ret = mem->virt_base + (pageno << PAGE_SHIFT);
 	memset(*ret, 0, size);
@@ -118,10 +142,28 @@
 	return 1;
 
 err:
+	/*
+	 * In the case where the allocation can not be satisfied from the
+	 * per-device area, try to fall back to generic memory if the
+	 * constraints allow it.
+	 */
 	return mem->flags & DMA_MEMORY_EXCLUSIVE;
 }
 EXPORT_SYMBOL(dma_alloc_from_coherent);
 
+/**
+ * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
+ * @dev:	device from which the memory was allocated
+ * @order:	the order of pages allocated
+ * @vaddr:	virtual address of allocated pages
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, releases that memory.
+ *
+ * Returns 1 if we correctly released the memory, or 0 if
+ * dma_release_coherent() should proceed with releasing memory from
+ * generic pools.
+ */
 int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
 {
 	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
@@ -136,3 +178,43 @@
 	return 0;
 }
 EXPORT_SYMBOL(dma_release_from_coherent);
+
+/**
+ * dma_mmap_from_coherent() - try to mmap the memory allocated from
+ * per-device coherent memory pool to userspace
+ * @dev:	device from which the memory was allocated
+ * @vma:	vm_area for the userspace memory
+ * @vaddr:	cpu address returned by dma_alloc_from_coherent
+ * @size:	size of the memory buffer allocated by dma_alloc_from_coherent
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, maps that memory to the provided vma.
+ *
+ * Returns 1 if we correctly mapped the memory, or 0 if
+ * dma_release_coherent() should proceed with mapping memory from
+ * generic pools.
+ */
+int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+			   void *vaddr, size_t size, int *ret)
+{
+	struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
+
+	if (mem && vaddr >= mem->virt_base && vaddr + size <=
+		   (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+		unsigned long off = vma->vm_pgoff;
+		int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+		int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+		int count = size >> PAGE_SHIFT;
+
+		*ret = -ENXIO;
+		if (off < count && user_count <= count - off) {
+			unsigned pfn = mem->pfn_base + start + off;
+			*ret = remap_pfn_range(vma, vma->vm_start, pfn,
+					       user_count << PAGE_SHIFT,
+					       vma->vm_page_prot);
+		}
+		return 1;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(dma_mmap_from_coherent);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b114875..9d24d65 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3737,10 +3737,6 @@
 # define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE		(1 << 12)
 # define GEN6_RCCUNIT_CLOCK_GATE_DISABLE		(1 << 11)
 
-#define GEN6_UCGCTL2				0x9404
-# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE		(1 << 12)
-# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE		(1 << 11)
-
 #define GEN6_RPNSWREQ				0xA008
 #define   GEN6_TURBO_DISABLE			(1<<31)
 #define   GEN6_FREQUENCY(x)			((x)<<25)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 48dae40..f7eb5d8 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -439,9 +439,6 @@
 	struct radeon_i2c_chan *ddc_bus;
 	/* some systems have an hdmi and vga port with a shared ddc line */
 	bool shared_ddc;
-	/* for some Radeon chip families we apply an additional EDID header
-	   check as part of the DDC probe */
-	bool requires_extended_probe;
 	bool use_digital;
 	/* we need to mind the EDID between detect
 	   and get modes due to analog/digital/tvencoder */
@@ -529,8 +526,7 @@
 				u8 val);
 extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
 extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
-extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector,
-			bool requires_extended_probe);
+extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
 extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
 
 extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
diff --git a/drivers/gpu/ion/Kconfig b/drivers/gpu/ion/Kconfig
index 5bb254b..39133b5 100644
--- a/drivers/gpu/ion/Kconfig
+++ b/drivers/gpu/ion/Kconfig
@@ -16,3 +16,12 @@
 	depends on ARCH_MSM && ION
 	help
 	  Choose this option if you wish to use ion on an MSM target.
+
+config ION_LEAK_CHECK
+	bool "Check for leaked Ion buffers (debugging)"
+	depends on ION
+	help
+	  Choose this option if you wish to enable checking for leaked
+	  ion buffers at runtime. Choosing this option will also add a
+	  debugfs node under the ion directory that can be used to
+	  enable/disable the leak checking.
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile
index 51349f6..60a6b81 100644
--- a/drivers/gpu/ion/Makefile
+++ b/drivers/gpu/ion/Makefile
@@ -1,4 +1,4 @@
 obj-$(CONFIG_ION) +=	ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o ion_iommu_heap.o ion_cp_heap.o
-obj-$(CONFIG_CMA) += ion_cma_heap.o
+obj-$(CONFIG_CMA) += ion_cma_heap.o ion_cma_secure_heap.o
 obj-$(CONFIG_ION_TEGRA) += tegra/
 obj-$(CONFIG_ION_MSM) += msm/
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index d005605..d3434d8 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -2,7 +2,7 @@
  * drivers/gpu/ion/ion.c
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -34,10 +34,11 @@
 #include <linux/debugfs.h>
 #include <linux/dma-buf.h>
 #include <linux/msm_ion.h>
+#include <trace/events/kmem.h>
+
 
 #include <mach/iommu_domains.h>
 #include "ion_priv.h"
-#define DEBUG
 
 /**
  * struct ion_device - the metadata of the ion device node
@@ -105,6 +106,12 @@
 	unsigned int iommu_map_cnt;
 };
 
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
+{
+        return ((buffer->flags & ION_FLAG_CACHED) &&
+                !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
+}
+
 static void ion_iommu_release(struct kref *kref);
 
 /* this function should only be called while dev->lock is held */
@@ -188,6 +195,8 @@
 	return NULL;
 }
 
+static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
+
 /* this function should only be called while dev->lock is held */
 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
 				     struct ion_device *dev,
@@ -197,13 +206,15 @@
 {
 	struct ion_buffer *buffer;
 	struct sg_table *table;
-	int ret;
+	struct scatterlist *sg;
+	int i, ret;
 
 	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
 	if (!buffer)
 		return ERR_PTR(-ENOMEM);
 
 	buffer->heap = heap;
+	buffer->flags = flags;
 	kref_init(&buffer->ref);
 
 	ret = heap->ops->allocate(heap, buffer, len, align, flags);
@@ -214,19 +225,54 @@
 
 	buffer->dev = dev;
 	buffer->size = len;
-	buffer->flags = flags;
 
-	table = buffer->heap->ops->map_dma(buffer->heap, buffer);
+	table = heap->ops->map_dma(heap, buffer);
 	if (IS_ERR_OR_NULL(table)) {
 		heap->ops->free(buffer);
 		kfree(buffer);
 		return ERR_PTR(PTR_ERR(table));
 	}
 	buffer->sg_table = table;
+	if (ion_buffer_fault_user_mappings(buffer)) {
+		for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
+			    i) {
+			if (sg_dma_len(sg) == PAGE_SIZE)
+				continue;
+			pr_err("%s: cached mappings that will be faulted in "
+			       "must have pagewise sg_lists\n", __func__);
+			ret = -EINVAL;
+			goto err;
+		}
 
+		ret = ion_buffer_alloc_dirty(buffer);
+		if (ret)
+			goto err;
+	}
+
+	buffer->dev = dev;
+	buffer->size = len;
+	INIT_LIST_HEAD(&buffer->vmas);
 	mutex_init(&buffer->lock);
+	/* this will set up dma addresses for the sglist -- it is not
+	   technically correct as per the dma api -- a specific
+	   device isn't really taking ownership here.  However, in practice on
+	   our systems the only dma_address space is physical addresses.
+	   Additionally, we can't afford the overhead of invalidating every
+	   allocation via dma_map_sg. The implicit contract here is that
+	   memory comming from the heaps is ready for dma, ie if it has a
+	   cached mapping that mapping has been invalidated */
+	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
+		if (sg_dma_address(sg) == 0)
+			sg_dma_address(sg) = sg_phys(sg);
+	}
 	ion_buffer_add(dev, buffer);
 	return buffer;
+
+err:
+	heap->ops->unmap_dma(heap, buffer);
+	heap->ops->free(buffer);
+	kfree(buffer);
+	return ERR_PTR(ret);
 }
 
 /**
@@ -261,6 +307,12 @@
 	mutex_unlock(&buffer->lock);
 }
 
+static void ion_delayed_unsecure(struct ion_buffer *buffer)
+{
+	if (buffer->heap->ops->unsecure_buffer)
+		buffer->heap->ops->unsecure_buffer(buffer, 1);
+}
+
 static void ion_buffer_destroy(struct kref *kref)
 {
 	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
@@ -268,14 +320,16 @@
 
 	if (WARN_ON(buffer->kmap_cnt > 0))
 		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
-
 	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
 
+	ion_delayed_unsecure(buffer);
 	ion_iommu_delayed_unmap(buffer);
 	buffer->heap->ops->free(buffer);
 	mutex_lock(&dev->lock);
 	rb_erase(&buffer->node, &dev->buffers);
 	mutex_unlock(&dev->lock);
+	if (buffer->flags & ION_FLAG_CACHED)
+		kfree(buffer->dirty);
 	kfree(buffer);
 }
 
@@ -402,7 +456,7 @@
 	struct ion_handle *handle;
 	struct ion_device *dev = client->dev;
 	struct ion_buffer *buffer = NULL;
-	unsigned long secure_allocation = flags & ION_SECURE;
+	unsigned long secure_allocation = flags & ION_FLAG_SECURE;
 	const unsigned int MAX_DBG_STR_LEN = 64;
 	char dbg_str[MAX_DBG_STR_LEN];
 	unsigned int dbg_str_idx = 0;
@@ -410,6 +464,16 @@
 	dbg_str[0] = '\0';
 
 	/*
+	 * For now, we don't want to fault in pages individually since
+	 * clients are already doing manual cache maintenance. In
+	 * other words, the implicit caching infrastructure is in
+	 * place (in code) but should not be used.
+	 */
+	flags |= ION_FLAG_CACHED_NEEDS_SYNC;
+
+	pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
+		 align, heap_mask, flags);
+	/*
 	 * traverse the list of heaps available in this system in priority
 	 * order.  If the heap type is supported by the client, and matches the
 	 * request of the caller allocate from it.  Repeat until allocate has
@@ -431,11 +495,18 @@
 			continue;
 		/* Do not allow un-secure heap if secure is specified */
 		if (secure_allocation &&
-			(heap->type != (enum ion_heap_type) ION_HEAP_TYPE_CP))
+		    !ion_heap_allow_secure_allocation(heap->type))
 			continue;
+		trace_ion_alloc_buffer_start(client->name, heap->name, len,
+					     heap_mask, flags);
 		buffer = ion_buffer_create(heap, dev, len, align, flags);
+		trace_ion_alloc_buffer_end(client->name, heap->name, len,
+					   heap_mask, flags);
 		if (!IS_ERR_OR_NULL(buffer))
 			break;
+
+		trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
+					    heap_mask, flags, PTR_ERR(buffer));
 		if (dbg_str_idx < MAX_DBG_STR_LEN) {
 			unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
 			int ret_value = snprintf(&dbg_str[dbg_str_idx],
@@ -454,10 +525,15 @@
 	}
 	mutex_unlock(&dev->lock);
 
-	if (buffer == NULL)
+	if (buffer == NULL) {
+		trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
+					    heap_mask, flags, -ENODEV);
 		return ERR_PTR(-ENODEV);
+	}
 
 	if (IS_ERR(buffer)) {
+		trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
+					    heap_mask, flags, PTR_ERR(buffer));
 		pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
 			 "0x%x) from heap(s) %sfor client %s with heap "
 			 "mask 0x%x\n",
@@ -627,6 +703,19 @@
 	struct ion_iommu_map *iommu_map;
 	int ret = 0;
 
+	if (IS_ERR_OR_NULL(client)) {
+		pr_err("%s: client pointer is invalid\n", __func__);
+		return -EINVAL;
+	}
+	if (IS_ERR_OR_NULL(handle)) {
+		pr_err("%s: handle pointer is invalid\n", __func__);
+		return -EINVAL;
+	}
+	if (IS_ERR_OR_NULL(handle->buffer)) {
+		pr_err("%s: buffer pointer is invalid\n", __func__);
+		return -EINVAL;
+	}
+
 	if (ION_IS_CACHED(flags)) {
 		pr_err("%s: Cannot map iommu as cached.\n", __func__);
 		return -EINVAL;
@@ -687,6 +776,8 @@
 
 			if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
 				kref_get(&iommu_map->ref);
+		} else {
+			ret = PTR_ERR(iommu_map);
 		}
 	} else {
 		if (iommu_map->flags != iommu_flags) {
@@ -732,6 +823,19 @@
 	struct ion_iommu_map *iommu_map;
 	struct ion_buffer *buffer;
 
+	if (IS_ERR_OR_NULL(client)) {
+		pr_err("%s: client pointer is invalid\n", __func__);
+		return;
+	}
+	if (IS_ERR_OR_NULL(handle)) {
+		pr_err("%s: handle pointer is invalid\n", __func__);
+		return;
+	}
+	if (IS_ERR_OR_NULL(handle->buffer)) {
+		pr_err("%s: buffer pointer is invalid\n", __func__);
+		return;
+	}
+
 	mutex_lock(&client->lock);
 	buffer = handle->buffer;
 
@@ -865,7 +969,7 @@
 		if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
 			type == ION_HEAP_TYPE_CARVEOUT ||
 			type == (enum ion_heap_type) ION_HEAP_TYPE_CP)
-			seq_printf(s, " : %12lx", handle->buffer->priv_phys);
+			seq_printf(s, " : %12pa", &handle->buffer->priv_phys);
 		else
 			seq_printf(s, " : %12s", "N/A");
 
@@ -975,10 +1079,91 @@
 	return client;
 }
 
+/**
+ * ion_mark_dangling_buffers_locked() - Mark dangling buffers
+ * @dev:	the ion device whose buffers will be searched
+ *
+ * Sets marked=1 for all known buffers associated with `dev' that no
+ * longer have a handle pointing to them. dev->lock should be held
+ * across a call to this function (and should only be unlocked after
+ * checking for marked buffers).
+ */
+static void ion_mark_dangling_buffers_locked(struct ion_device *dev)
+{
+	struct rb_node *n, *n2;
+	/* mark all buffers as 1 */
+	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+		struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
+						node);
+
+		buf->marked = 1;
+	}
+
+	/* now see which buffers we can access */
+	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
+		struct ion_client *client = rb_entry(n, struct ion_client,
+						node);
+
+		mutex_lock(&client->lock);
+		for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
+			struct ion_handle *handle
+				= rb_entry(n2, struct ion_handle, node);
+
+			handle->buffer->marked = 0;
+
+		}
+		mutex_unlock(&client->lock);
+
+	}
+}
+
+#ifdef CONFIG_ION_LEAK_CHECK
+static u32 ion_debug_check_leaks_on_destroy;
+
+static int ion_check_for_and_print_leaks(struct ion_device *dev)
+{
+	struct rb_node *n;
+	int num_leaks = 0;
+
+	if (!ion_debug_check_leaks_on_destroy)
+		return 0;
+
+	/* check for leaked buffers (those that no longer have a
+	 * handle pointing to them) */
+	ion_mark_dangling_buffers_locked(dev);
+
+	/* Anyone still marked as a 1 means a leaked handle somewhere */
+	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+		struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
+						node);
+
+		if (buf->marked == 1) {
+			pr_info("Leaked ion buffer at %p\n", buf);
+			num_leaks++;
+		}
+	}
+	return num_leaks;
+}
+static void setup_ion_leak_check(struct dentry *debug_root)
+{
+	debugfs_create_bool("check_leaks_on_destroy", 0664, debug_root,
+			&ion_debug_check_leaks_on_destroy);
+}
+#else
+static int ion_check_for_and_print_leaks(struct ion_device *dev)
+{
+	return 0;
+}
+static void setup_ion_leak_check(struct dentry *debug_root)
+{
+}
+#endif
+
 void ion_client_destroy(struct ion_client *client)
 {
 	struct ion_device *dev = client->dev;
 	struct rb_node *n;
+	int num_leaks;
 
 	pr_debug("%s: %d\n", __func__, __LINE__);
 	while ((n = rb_first(&client->handles))) {
@@ -991,8 +1176,21 @@
 		put_task_struct(client->task);
 	rb_erase(&client->node, &dev->clients);
 	debugfs_remove_recursive(client->debug_root);
+
+	num_leaks = ion_check_for_and_print_leaks(dev);
+
 	mutex_unlock(&dev->lock);
 
+	if (num_leaks) {
+		struct task_struct *current_task = current;
+		char current_task_name[TASK_COMM_LEN];
+		get_task_comm(current_task_name, current_task);
+		WARN(1, "%s: Detected %d leaked ion buffer%s.\n",
+			__func__, num_leaks, num_leaks == 1 ? "" : "s");
+		pr_info("task name at time of leak: %s, pid: %d\n",
+			current_task_name, current_task->pid);
+	}
+
 	kfree(client->name);
 	kfree(client);
 }
@@ -1062,12 +1260,47 @@
 }
 EXPORT_SYMBOL(ion_sg_table);
 
+struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
+					size_t chunk_size, size_t total_size)
+{
+	struct sg_table *table;
+	int i, n_chunks, ret;
+	struct scatterlist *sg;
+
+	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!table)
+		return ERR_PTR(-ENOMEM);
+
+	n_chunks = DIV_ROUND_UP(total_size, chunk_size);
+	pr_debug("creating sg_table with %d chunks\n", n_chunks);
+
+	ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
+	if (ret)
+		goto err0;
+
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		dma_addr_t addr = buffer_base + i * chunk_size;
+		sg_dma_address(sg) = addr;
+		sg_dma_len(sg) = chunk_size;
+	}
+
+	return table;
+err0:
+	kfree(table);
+	return ERR_PTR(ret);
+}
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+				       struct device *dev,
+				       enum dma_data_direction direction);
+
 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
 					enum dma_data_direction direction)
 {
 	struct dma_buf *dmabuf = attachment->dmabuf;
 	struct ion_buffer *buffer = dmabuf->priv;
 
+	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
 	return buffer->sg_table;
 }
 
@@ -1077,40 +1310,119 @@
 {
 }
 
-static void ion_vma_open(struct vm_area_struct *vma)
+static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
 {
-	struct ion_buffer *buffer = vma->vm_private_data;
+	unsigned long pages = buffer->sg_table->nents;
+	unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
 
-	pr_debug("%s: %d\n", __func__, __LINE__);
+	buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
+	if (!buffer->dirty)
+		return -ENOMEM;
+	return 0;
+}
+
+struct ion_vma_list {
+	struct list_head list;
+	struct vm_area_struct *vma;
+};
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+				       struct device *dev,
+				       enum dma_data_direction dir)
+{
+	struct scatterlist *sg;
+	int i;
+	struct ion_vma_list *vma_list;
+
+	pr_debug("%s: syncing for device %s\n", __func__,
+		 dev ? dev_name(dev) : "null");
+
+	if (!ion_buffer_fault_user_mappings(buffer))
+		return;
 
 	mutex_lock(&buffer->lock);
-	buffer->umap_cnt++;
+	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
+		if (!test_bit(i, buffer->dirty))
+			continue;
+		dma_sync_sg_for_device(dev, sg, 1, dir);
+		clear_bit(i, buffer->dirty);
+	}
+	list_for_each_entry(vma_list, &buffer->vmas, list) {
+		struct vm_area_struct *vma = vma_list->vma;
+
+		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
+			       NULL);
+	}
 	mutex_unlock(&buffer->lock);
 }
 
-static void ion_vma_close(struct vm_area_struct *vma)
+int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
 	struct ion_buffer *buffer = vma->vm_private_data;
-
-	pr_debug("%s: %d\n", __func__, __LINE__);
+	struct scatterlist *sg;
+	int i;
 
 	mutex_lock(&buffer->lock);
-	buffer->umap_cnt--;
+	set_bit(vmf->pgoff, buffer->dirty);
+
+	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
+		if (i != vmf->pgoff)
+			continue;
+		dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
+		vm_insert_page(vma, (unsigned long)vmf->virtual_address,
+			       sg_page(sg));
+		break;
+	}
+	mutex_unlock(&buffer->lock);
+	return VM_FAULT_NOPAGE;
+}
+
+static void ion_vm_open(struct vm_area_struct *vma)
+{
+	struct ion_buffer *buffer = vma->vm_private_data;
+	struct ion_vma_list *vma_list;
+
+	vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
+	if (!vma_list)
+		return;
+	vma_list->vma = vma;
+	mutex_lock(&buffer->lock);
+	list_add(&vma_list->list, &buffer->vmas);
+	mutex_unlock(&buffer->lock);
+	pr_debug("%s: adding %p\n", __func__, vma);
+}
+
+static void ion_vm_close(struct vm_area_struct *vma)
+{
+	struct ion_buffer *buffer = vma->vm_private_data;
+	struct ion_vma_list *vma_list, *tmp;
+
+	pr_debug("%s\n", __func__);
+	mutex_lock(&buffer->lock);
+	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
+		if (vma_list->vma != vma)
+			continue;
+		list_del(&vma_list->list);
+		kfree(vma_list);
+		pr_debug("%s: deleting %p\n", __func__, vma);
+		break;
+	}
 	mutex_unlock(&buffer->lock);
 
 	if (buffer->heap->ops->unmap_user)
 		buffer->heap->ops->unmap_user(buffer->heap, buffer);
 }
 
-static struct vm_operations_struct ion_vm_ops = {
-	.open = ion_vma_open,
-	.close = ion_vma_close,
+struct vm_operations_struct ion_vma_ops = {
+	.open = ion_vm_open,
+	.close = ion_vm_close,
+	.fault = ion_vm_fault,
 };
 
 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
 {
 	struct ion_buffer *buffer = dmabuf->priv;
-	int ret;
+	int ret = 0;
 
 	if (!buffer->heap->ops->map_user) {
 		pr_err("%s: this heap does not define a method for mapping "
@@ -1118,25 +1430,26 @@
 		return -EINVAL;
 	}
 
+	if (ion_buffer_fault_user_mappings(buffer)) {
+		vma->vm_private_data = buffer;
+		vma->vm_ops = &ion_vma_ops;
+		vma->vm_flags |= VM_MIXEDMAP;
+		ion_vm_open(vma);
+		return 0;
+	}
+
+	if (!(buffer->flags & ION_FLAG_CACHED))
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
 	mutex_lock(&buffer->lock);
 	/* now map it to userspace */
 	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
+	mutex_unlock(&buffer->lock);
 
-	if (ret) {
-		mutex_unlock(&buffer->lock);
+	if (ret)
 		pr_err("%s: failure mapping buffer to userspace\n",
 		       __func__);
-	} else {
-		buffer->umap_cnt++;
-		mutex_unlock(&buffer->lock);
 
-		vma->vm_ops = &ion_vm_ops;
-		/*
-		 * move the buffer into the vm_private_data so we can access it
-		 * from vma_open/close
-		 */
-		vma->vm_private_data = buffer;
-	}
 	return ret;
 }
 
@@ -1205,33 +1518,6 @@
 	.kunmap = ion_dma_buf_kunmap,
 };
 
-static int ion_share_set_flags(struct ion_client *client,
-				struct ion_handle *handle,
-				unsigned long flags)
-{
-	struct ion_buffer *buffer;
-	bool valid_handle;
-	unsigned long ion_flags = 0;
-	if (flags & O_DSYNC)
-		ion_flags = ION_SET_UNCACHED(ion_flags);
-	else
-		ion_flags = ION_SET_CACHED(ion_flags);
-
-
-	mutex_lock(&client->lock);
-	valid_handle = ion_handle_validate(client, handle);
-	mutex_unlock(&client->lock);
-	if (!valid_handle) {
-		WARN(1, "%s: invalid handle passed to set_flags.\n", __func__);
-		return -EINVAL;
-	}
-
-	buffer = handle->buffer;
-
-	return 0;
-}
-
-
 int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
 {
 	struct ion_buffer *buffer;
@@ -1299,6 +1585,30 @@
 }
 EXPORT_SYMBOL(ion_import_dma_buf);
 
+static int ion_sync_for_device(struct ion_client *client, int fd)
+{
+	struct dma_buf *dmabuf;
+	struct ion_buffer *buffer;
+
+	dmabuf = dma_buf_get(fd);
+	if (IS_ERR_OR_NULL(dmabuf))
+		return PTR_ERR(dmabuf);
+
+	/* if this memory came from ion */
+	if (dmabuf->ops != &dma_buf_ops) {
+		pr_err("%s: can not sync dmabuf from another exporter\n",
+		       __func__);
+		dma_buf_put(dmabuf);
+		return -EINVAL;
+	}
+	buffer = dmabuf->priv;
+
+	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+			       buffer->sg_table->nents, DMA_BIDIRECTIONAL);
+	dma_buf_put(dmabuf);
+	return 0;
+}
+
 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	struct ion_client *client = filp->private_data;
@@ -1342,14 +1652,9 @@
 	case ION_IOC_SHARE:
 	{
 		struct ion_fd_data data;
-		int ret;
 		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
 			return -EFAULT;
 
-		ret = ion_share_set_flags(client, data.handle, filp->f_flags);
-		if (ret)
-			return ret;
-
 		data.fd = ion_share_dma_buf(client, data.handle);
 		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
 			return -EFAULT;
@@ -1376,6 +1681,15 @@
 			return ret;
 		break;
 	}
+	case ION_IOC_SYNC:
+	{
+		struct ion_fd_data data;
+		if (copy_from_user(&data, (void __user *)arg,
+				   sizeof(struct ion_fd_data)))
+			return -EFAULT;
+		ion_sync_for_device(client, data.fd);
+		break;
+	}
 	case ION_IOC_CUSTOM:
 	{
 		struct ion_device *dev = client->dev;
@@ -1397,9 +1711,6 @@
 	case ION_IOC_CLEAN_INV_CACHES:
 		return client->dev->custom_ioctl(client,
 						ION_IOC_CLEAN_INV_CACHES, arg);
-	case ION_IOC_GET_FLAGS:
-		return client->dev->custom_ioctl(client,
-						ION_IOC_GET_FLAGS, arg);
 	default:
 		return -ENOTTY;
 	}
@@ -1542,6 +1853,10 @@
 {
 	struct ion_device *dev = heap->dev;
 	struct rb_node *n;
+	size_t size;
+
+	if (!heap->ops->phys)
+		return;
 
 	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
 		struct ion_buffer *buffer =
@@ -1554,9 +1869,11 @@
 					   "Part of memory map will not be logged\n");
 				break;
 			}
-			data->addr = buffer->priv_phys;
-			data->addr_end = buffer->priv_phys + buffer->size-1;
-			data->size = buffer->size;
+
+			buffer->heap->ops->phys(buffer->heap, buffer,
+						&(data->addr), &size);
+			data->size = (unsigned long) size;
+			data->addr_end = data->addr + data->size - 1;
 			data->client_name = ion_debug_locate_owner(dev, buffer);
 			ion_debug_mem_map_add(mem_map, data);
 		}
@@ -1674,6 +1991,73 @@
 	mutex_unlock(&dev->lock);
 }
 
+int ion_secure_handle(struct ion_client *client, struct ion_handle *handle,
+			int version, void *data, int flags)
+{
+	int ret = -EINVAL;
+	struct ion_heap *heap;
+	struct ion_buffer *buffer;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		WARN(1, "%s: invalid handle passed to secure.\n", __func__);
+		goto out_unlock;
+	}
+
+	buffer = handle->buffer;
+	heap = buffer->heap;
+
+	if (!ion_heap_allow_handle_secure(heap->type)) {
+		pr_err("%s: cannot secure buffer from non secure heap\n",
+			__func__);
+		goto out_unlock;
+	}
+
+	BUG_ON(!buffer->heap->ops->secure_buffer);
+	/*
+	 * Protect the handle via the client lock to ensure we aren't
+	 * racing with free
+	 */
+	ret = buffer->heap->ops->secure_buffer(buffer, version, data, flags);
+
+out_unlock:
+	mutex_unlock(&client->lock);
+	return ret;
+}
+
+int ion_unsecure_handle(struct ion_client *client, struct ion_handle *handle)
+{
+	int ret = -EINVAL;
+	struct ion_heap *heap;
+	struct ion_buffer *buffer;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		WARN(1, "%s: invalid handle passed to secure.\n", __func__);
+		goto out_unlock;
+	}
+
+	buffer = handle->buffer;
+	heap = buffer->heap;
+
+	if (!ion_heap_allow_handle_secure(heap->type)) {
+		pr_err("%s: cannot secure buffer from non secure heap\n",
+			__func__);
+		goto out_unlock;
+	}
+
+	BUG_ON(!buffer->heap->ops->unsecure_buffer);
+	/*
+	 * Protect the handle via the client lock to ensure we aren't
+	 * racing with free
+	 */
+	ret = buffer->heap->ops->unsecure_buffer(buffer, 0);
+
+out_unlock:
+	mutex_unlock(&client->lock);
+	return ret;
+}
+
 int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
 			void *data)
 {
@@ -1687,7 +2071,7 @@
 	mutex_lock(&dev->lock);
 	for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
 		struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
-		if (heap->type != (enum ion_heap_type) ION_HEAP_TYPE_CP)
+		if (!ion_heap_allow_heap_secure(heap->type))
 			continue;
 		if (ION_HEAP(heap->id) != heap_id)
 			continue;
@@ -1715,7 +2099,7 @@
 	mutex_lock(&dev->lock);
 	for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
 		struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
-		if (heap->type != (enum ion_heap_type) ION_HEAP_TYPE_CP)
+		if (!ion_heap_allow_heap_secure(heap->type))
 			continue;
 		if (ION_HEAP(heap->id) != heap_id)
 			continue;
@@ -1734,37 +2118,14 @@
 {
 	struct ion_device *dev = s->private;
 	struct rb_node *n;
-	struct rb_node *n2;
 
-	/* mark all buffers as 1 */
 	seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
 		"ref cnt");
+
 	mutex_lock(&dev->lock);
-	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
-		struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
-						     node);
+	ion_mark_dangling_buffers_locked(dev);
 
-		buf->marked = 1;
-	}
-
-	/* now see which buffers we can access */
-	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
-		struct ion_client *client = rb_entry(n, struct ion_client,
-						     node);
-
-		mutex_lock(&client->lock);
-		for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
-			struct ion_handle *handle = rb_entry(n2,
-						struct ion_handle, node);
-
-			handle->buffer->marked = 0;
-
-		}
-		mutex_unlock(&client->lock);
-
-	}
-
-	/* And anyone still marked as a 1 means a leaked handle somewhere */
+	/* Anyone still marked as a 1 means a leaked handle somewhere */
 	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
 		struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
 						     node);
@@ -1825,6 +2186,8 @@
 	idev->clients = RB_ROOT;
 	debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
 			    &debug_leak_fops);
+
+	setup_ion_leak_check(idev->debug_root);
 	return idev;
 }
 
@@ -1845,8 +2208,8 @@
 		ret = memblock_reserve(data->heaps[i].base,
 				       data->heaps[i].size);
 		if (ret)
-			pr_err("memblock reserve of %x@%lx failed\n",
+			pr_err("memblock reserve of %x@%pa failed\n",
 			       data->heaps[i].size,
-			       data->heaps[i].base);
+			       &data->heaps[i].base);
 	}
 }
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index a808cc9..9610dfe 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -2,7 +2,7 @@
  * drivers/gpu/ion/ion_carveout_heap.c
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -23,6 +23,7 @@
 #include <linux/mm.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
 #include <linux/iommu.h>
 #include <linux/seq_file.h>
 #include "ion_priv.h"
@@ -111,26 +112,13 @@
 struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
 					      struct ion_buffer *buffer)
 {
-	struct sg_table *table;
-	int ret;
+	size_t chunk_size = buffer->size;
 
-	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
-	if (!table)
-		return ERR_PTR(-ENOMEM);
+	if (ION_IS_CACHED(buffer->flags))
+		chunk_size = PAGE_SIZE;
 
-	ret = sg_alloc_table(table, 1, GFP_KERNEL);
-	if (ret)
-		goto err0;
-
-	table->sgl->length = buffer->size;
-	table->sgl->offset = 0;
-	table->sgl->dma_address = buffer->priv_phys;
-
-	return table;
-
-err0:
-	kfree(table);
-	return ERR_PTR(ret);
+	return ion_create_chunked_sg_table(buffer->priv_phys, chunk_size,
+					buffer->size);
 }
 
 void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
@@ -240,25 +228,78 @@
 			void *vaddr, unsigned int offset, unsigned int length,
 			unsigned int cmd)
 {
-	void (*outer_cache_op)(phys_addr_t, phys_addr_t);
+	void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
 	struct ion_carveout_heap *carveout_heap =
 	     container_of(heap, struct  ion_carveout_heap, heap);
+	unsigned int size_to_vmap, total_size;
+	int i, j;
+	void *ptr = NULL;
+	ion_phys_addr_t buff_phys = buffer->priv_phys;
 
-	switch (cmd) {
-	case ION_IOC_CLEAN_CACHES:
-		dmac_clean_range(vaddr, vaddr + length);
-		outer_cache_op = outer_clean_range;
-		break;
-	case ION_IOC_INV_CACHES:
-		dmac_inv_range(vaddr, vaddr + length);
-		outer_cache_op = outer_inv_range;
-		break;
-	case ION_IOC_CLEAN_INV_CACHES:
-		dmac_flush_range(vaddr, vaddr + length);
-		outer_cache_op = outer_flush_range;
-		break;
-	default:
-		return -EINVAL;
+	if (!vaddr) {
+		/*
+		 * Split the vmalloc space into smaller regions in
+		 * order to clean and/or invalidate the cache.
+		 */
+		size_to_vmap = ((VMALLOC_END - VMALLOC_START)/8);
+		total_size = buffer->size;
+
+		for (i = 0; i < total_size; i += size_to_vmap) {
+			size_to_vmap = min(size_to_vmap, total_size - i);
+			for (j = 0; j < 10 && size_to_vmap; ++j) {
+				ptr = ioremap(buff_phys, size_to_vmap);
+				if (ptr) {
+					switch (cmd) {
+					case ION_IOC_CLEAN_CACHES:
+						dmac_clean_range(ptr,
+							ptr + size_to_vmap);
+						outer_cache_op =
+							outer_clean_range;
+						break;
+					case ION_IOC_INV_CACHES:
+						dmac_inv_range(ptr,
+							ptr + size_to_vmap);
+						outer_cache_op =
+							outer_inv_range;
+						break;
+					case ION_IOC_CLEAN_INV_CACHES:
+						dmac_flush_range(ptr,
+							ptr + size_to_vmap);
+						outer_cache_op =
+							outer_flush_range;
+						break;
+					default:
+						return -EINVAL;
+					}
+					buff_phys += size_to_vmap;
+					break;
+				} else {
+					size_to_vmap >>= 1;
+				}
+			}
+			if (!ptr) {
+				pr_err("Couldn't io-remap the memory\n");
+				return -EINVAL;
+			}
+			iounmap(ptr);
+		}
+	} else {
+		switch (cmd) {
+		case ION_IOC_CLEAN_CACHES:
+			dmac_clean_range(vaddr, vaddr + length);
+			outer_cache_op = outer_clean_range;
+			break;
+		case ION_IOC_INV_CACHES:
+			dmac_inv_range(vaddr, vaddr + length);
+			outer_cache_op = outer_inv_range;
+			break;
+		case ION_IOC_CLEAN_INV_CACHES:
+			dmac_flush_range(vaddr, vaddr + length);
+			outer_cache_op = outer_flush_range;
+			break;
+		default:
+			return -EINVAL;
+		}
 	}
 
 	if (carveout_heap->has_outer_cache) {
@@ -296,8 +337,11 @@
 			const char *client_name = "(null)";
 
 			if (last_end < data->addr) {
-				seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
-					   "FREE", last_end, data->addr-1,
+				phys_addr_t da;
+
+				da = data->addr-1;
+				seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
+					   "FREE", &last_end, &da,
 					   data->addr-last_end,
 					   data->addr-last_end);
 			}
@@ -305,9 +349,9 @@
 			if (data->client_name)
 				client_name = data->client_name;
 
-			seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
-				   client_name, data->addr,
-				   data->addr_end,
+			seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
+				   client_name, &data->addr,
+				   &data->addr_end,
 				   data->size, data->size);
 			last_end = data->addr_end+1;
 		}
@@ -357,7 +401,7 @@
 		goto out1;
 	}
 
-	sglist = kmalloc(sizeof(*sglist), GFP_KERNEL);
+	sglist = vmalloc(sizeof(*sglist));
 	if (!sglist)
 		goto out1;
 
@@ -376,18 +420,19 @@
 
 	if (extra) {
 		unsigned long extra_iova_addr = data->iova_addr + buffer->size;
-		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra,
-					  SZ_4K, prot);
+		unsigned long phys_addr = sg_phys(sglist);
+		ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
+					extra, SZ_4K, prot);
 		if (ret)
 			goto out2;
 	}
-	kfree(sglist);
+	vfree(sglist);
 	return ret;
 
 out2:
 	iommu_unmap_range(domain, data->iova_addr, buffer->size);
 out1:
-	kfree(sglist);
+	vfree(sglist);
 	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
 				data->mapped_size);
 
diff --git a/drivers/gpu/ion/ion_cma_heap.c b/drivers/gpu/ion/ion_cma_heap.c
index bef6b6f..4f12e38 100644
--- a/drivers/gpu/ion/ion_cma_heap.c
+++ b/drivers/gpu/ion/ion_cma_heap.c
@@ -127,8 +127,8 @@
 	struct device *dev = heap->priv;
 	struct ion_cma_buffer_info *info = buffer->priv_virt;
 
-	dev_dbg(dev, "Return buffer %p physical address 0x%x\n", buffer,
-		info->handle);
+	dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
+		&info->handle);
 
 	*addr = info->handle;
 	*len = buffer->size;
@@ -228,8 +228,9 @@
 
 	extra_iova_addr = data->iova_addr + buffer->size;
 	if (extra) {
-		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
-						prot);
+		unsigned long phys_addr = sg_phys(table->sgl);
+		ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
+					extra, SZ_4K, prot);
 		if (ret)
 			goto out2;
 	}
@@ -280,15 +281,30 @@
 
 	switch (cmd) {
 	case ION_IOC_CLEAN_CACHES:
-		dmac_clean_range(vaddr, vaddr + length);
+		if (!vaddr)
+			dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+				buffer->sg_table->nents, DMA_TO_DEVICE);
+		else
+			dmac_clean_range(vaddr, vaddr + length);
 		outer_cache_op = outer_clean_range;
 		break;
 	case ION_IOC_INV_CACHES:
-		dmac_inv_range(vaddr, vaddr + length);
+		if (!vaddr)
+			dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
+				buffer->sg_table->nents, DMA_FROM_DEVICE);
+		else
+			dmac_inv_range(vaddr, vaddr + length);
 		outer_cache_op = outer_inv_range;
 		break;
 	case ION_IOC_CLEAN_INV_CACHES:
-		dmac_flush_range(vaddr, vaddr + length);
+		if (!vaddr) {
+			dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+				buffer->sg_table->nents, DMA_TO_DEVICE);
+			dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
+				buffer->sg_table->nents, DMA_FROM_DEVICE);
+		} else {
+			dmac_flush_range(vaddr, vaddr + length);
+		}
 		outer_cache_op = outer_flush_range;
 		break;
 	default:
@@ -304,6 +320,35 @@
 	return 0;
 }
 
+static int ion_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
+			const struct rb_root *mem_map)
+{
+	if (mem_map) {
+		struct rb_node *n;
+
+		seq_printf(s, "\nMemory Map\n");
+		seq_printf(s, "%16.s %14.s %14.s %14.s\n",
+			   "client", "start address", "end address",
+			   "size (hex)");
+
+		for (n = rb_first(mem_map); n; n = rb_next(n)) {
+			struct mem_map_data *data =
+					rb_entry(n, struct mem_map_data, node);
+			const char *client_name = "(null)";
+
+
+			if (data->client_name)
+				client_name = data->client_name;
+
+			seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
+				   client_name, &data->addr,
+				   &data->addr_end,
+				   data->size, data->size);
+		}
+	}
+	return 0;
+}
+
 static struct ion_heap_ops ion_cma_ops = {
 	.allocate = ion_cma_allocate,
 	.free = ion_cma_free,
@@ -316,6 +361,7 @@
 	.map_iommu = ion_cma_map_iommu,
 	.unmap_iommu = ion_cma_unmap_iommu,
 	.cache_op = ion_cma_cache_ops,
+	.print_debug = ion_cma_print_debug,
 };
 
 struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
diff --git a/drivers/gpu/ion/ion_cma_secure_heap.c b/drivers/gpu/ion/ion_cma_secure_heap.c
new file mode 100644
index 0000000..0fbcfbf
--- /dev/null
+++ b/drivers/gpu/ion/ion_cma_secure_heap.c
@@ -0,0 +1,386 @@
+/*
+ * drivers/gpu/ion/ion_secure_cma_heap.c
+ *
+ * Copyright (C) Linaro 2012
+ * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/ion.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_ion.h>
+#include <mach/iommu_domains.h>
+
+#include <asm/cacheflush.h>
+
+/* for ion_heap_ops structure */
+#include "ion_priv.h"
+#include "msm/ion_cp_common.h"
+
+#define ION_CMA_ALLOCATE_FAILED NULL
+
+struct ion_secure_cma_buffer_info {
+	/*
+	 * This needs to come first for compatibility with the secure buffer API
+	 */
+	struct ion_cp_buffer secure;
+	void *cpu_addr;
+	dma_addr_t handle;
+	struct sg_table *table;
+	bool is_cached;
+};
+
+static int cma_heap_has_outer_cache;
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ * This function could be replace by dma_common_get_sgtable
+ * as soon as it will avalaible.
+ */
+int ion_secure_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
+			void *cpu_addr, dma_addr_t handle, size_t size)
+{
+	struct page *page = virt_to_page(cpu_addr);
+	int ret;
+
+	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+	if (unlikely(ret))
+		return ret;
+
+	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+	sg_dma_address(sgt->sgl) = handle;
+	return 0;
+}
+
+/* ION CMA heap operations functions */
+static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate(
+			    struct ion_heap *heap, struct ion_buffer *buffer,
+			    unsigned long len, unsigned long align,
+			    unsigned long flags)
+{
+	struct device *dev = heap->priv;
+	struct ion_secure_cma_buffer_info *info;
+	DEFINE_DMA_ATTRS(attrs);
+	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+
+	dev_dbg(dev, "Request buffer allocation len %ld\n", len);
+
+	info = kzalloc(sizeof(struct ion_secure_cma_buffer_info), GFP_KERNEL);
+	if (!info) {
+		dev_err(dev, "Can't allocate buffer info\n");
+		return ION_CMA_ALLOCATE_FAILED;
+	}
+
+	info->cpu_addr = dma_alloc_attrs(dev, len, &(info->handle), 0, &attrs);
+
+	if (!info->cpu_addr) {
+		dev_err(dev, "Fail to allocate buffer\n");
+		goto err;
+	}
+
+	info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!info->table) {
+		dev_err(dev, "Fail to allocate sg table\n");
+		goto err;
+	}
+
+	ion_secure_cma_get_sgtable(dev,
+			info->table, info->cpu_addr, info->handle, len);
+
+	info->secure.buffer = info->handle;
+
+	/* keep this for memory release */
+	buffer->priv_virt = info;
+	dev_dbg(dev, "Allocate buffer %p\n", buffer);
+	return info;
+
+err:
+	kfree(info);
+	return ION_CMA_ALLOCATE_FAILED;
+}
+
+static int ion_secure_cma_allocate(struct ion_heap *heap,
+			    struct ion_buffer *buffer,
+			    unsigned long len, unsigned long align,
+			    unsigned long flags)
+{
+	unsigned long secure_allocation = flags & ION_FLAG_SECURE;
+	struct ion_secure_cma_buffer_info *buf = NULL;
+
+	if (!secure_allocation) {
+		pr_err("%s: non-secure allocation disallowed from heap %s %lx\n",
+			__func__, heap->name, flags);
+		return -ENOMEM;
+	}
+
+	if (ION_IS_CACHED(flags)) {
+		pr_err("%s: cannot allocate cached memory from secure heap %s\n",
+			__func__, heap->name);
+		return -ENOMEM;
+	}
+
+
+	buf = __ion_secure_cma_allocate(heap, buffer, len, align, flags);
+
+	if (buf) {
+		buf->secure.want_delayed_unsecure = 0;
+		atomic_set(&buf->secure.secure_cnt, 0);
+		mutex_init(&buf->secure.lock);
+		buf->secure.is_secure = 1;
+		return 0;
+	} else {
+		return -ENOMEM;
+	}
+}
+
+
+static void ion_secure_cma_free(struct ion_buffer *buffer)
+{
+	struct device *dev = buffer->heap->priv;
+	struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
+
+	dev_dbg(dev, "Release buffer %p\n", buffer);
+	/* release memory */
+	dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
+	/* release sg table */
+	kfree(info->table);
+	kfree(info);
+}
+
+static int ion_secure_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
+			ion_phys_addr_t *addr, size_t *len)
+{
+	struct device *dev = heap->priv;
+	struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
+
+	dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer,
+		&info->handle);
+
+	*addr = info->handle;
+	*len = buffer->size;
+
+	return 0;
+}
+
+struct sg_table *ion_secure_cma_heap_map_dma(struct ion_heap *heap,
+					 struct ion_buffer *buffer)
+{
+	struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
+
+	return info->table;
+}
+
+void ion_secure_cma_heap_unmap_dma(struct ion_heap *heap,
+			       struct ion_buffer *buffer)
+{
+	return;
+}
+
+static int ion_secure_cma_mmap(struct ion_heap *mapper,
+			struct ion_buffer *buffer,
+			struct vm_area_struct *vma)
+{
+	pr_info("%s: mmaping from secure heap %s disallowed\n",
+		__func__, mapper->name);
+	return -EINVAL;
+}
+
+static void *ion_secure_cma_map_kernel(struct ion_heap *heap,
+				struct ion_buffer *buffer)
+{
+	pr_info("%s: kernel mapping from secure heap %s disallowed\n",
+		__func__, heap->name);
+	return NULL;
+}
+
+static void ion_secure_cma_unmap_kernel(struct ion_heap *heap,
+				 struct ion_buffer *buffer)
+{
+	return;
+}
+
+int ion_secure_cma_map_iommu(struct ion_buffer *buffer,
+				struct ion_iommu_map *data,
+				unsigned int domain_num,
+				unsigned int partition_num,
+				unsigned long align,
+				unsigned long iova_length,
+				unsigned long flags)
+{
+	int ret = 0;
+	struct iommu_domain *domain;
+	unsigned long extra;
+	unsigned long extra_iova_addr;
+	struct ion_secure_cma_buffer_info *info = buffer->priv_virt;
+	struct sg_table *table = info->table;
+	int prot = IOMMU_WRITE | IOMMU_READ;
+
+	data->mapped_size = iova_length;
+
+	if (!msm_use_iommu()) {
+		data->iova_addr = info->handle;
+		return 0;
+	}
+
+	extra = iova_length - buffer->size;
+
+	ret = msm_allocate_iova_address(domain_num, partition_num,
+						data->mapped_size, align,
+						&data->iova_addr);
+
+	if (ret)
+		goto out;
+
+	domain = msm_get_iommu_domain(domain_num);
+
+	if (!domain) {
+		ret = -EINVAL;
+		goto out1;
+	}
+
+	ret = iommu_map_range(domain, data->iova_addr, table->sgl,
+				buffer->size, prot);
+
+	if (ret) {
+		pr_err("%s: could not map %lx in domain %p\n",
+			__func__, data->iova_addr, domain);
+		goto out1;
+	}
+
+	extra_iova_addr = data->iova_addr + buffer->size;
+	if (extra) {
+		unsigned long phys_addr = sg_phys(table->sgl);
+		ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
+					extra, SZ_4K, prot);
+		if (ret)
+			goto out2;
+	}
+	return ret;
+
+out2:
+	iommu_unmap_range(domain, data->iova_addr, buffer->size);
+out1:
+	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+				data->mapped_size);
+out:
+	return ret;
+}
+
+
+void ion_secure_cma_unmap_iommu(struct ion_iommu_map *data)
+{
+	unsigned int domain_num;
+	unsigned int partition_num;
+	struct iommu_domain *domain;
+
+	if (!msm_use_iommu())
+		return;
+
+	domain_num = iommu_map_domain(data);
+	partition_num = iommu_map_partition(data);
+
+	domain = msm_get_iommu_domain(domain_num);
+
+	if (!domain) {
+		WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
+		return;
+	}
+
+	iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
+	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
+				data->mapped_size);
+
+	return;
+}
+
+int ion_secure_cma_cache_ops(struct ion_heap *heap,
+			struct ion_buffer *buffer, void *vaddr,
+			unsigned int offset, unsigned int length,
+			unsigned int cmd)
+{
+	pr_info("%s: cache operations disallowed from secure heap %s\n",
+		__func__, heap->name);
+	return -EINVAL;
+}
+
+static int ion_secure_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
+			const struct rb_root *mem_map)
+{
+	if (mem_map) {
+		struct rb_node *n;
+
+		seq_printf(s, "\nMemory Map\n");
+		seq_printf(s, "%16.s %14.s %14.s %14.s\n",
+			   "client", "start address", "end address",
+			   "size (hex)");
+
+		for (n = rb_first(mem_map); n; n = rb_next(n)) {
+			struct mem_map_data *data =
+					rb_entry(n, struct mem_map_data, node);
+			const char *client_name = "(null)";
+
+
+			if (data->client_name)
+				client_name = data->client_name;
+
+			seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
+				   client_name, &data->addr,
+				   &data->addr_end,
+				   data->size, data->size);
+		}
+	}
+	return 0;
+}
+
+static struct ion_heap_ops ion_secure_cma_ops = {
+	.allocate = ion_secure_cma_allocate,
+	.free = ion_secure_cma_free,
+	.map_dma = ion_secure_cma_heap_map_dma,
+	.unmap_dma = ion_secure_cma_heap_unmap_dma,
+	.phys = ion_secure_cma_phys,
+	.map_user = ion_secure_cma_mmap,
+	.map_kernel = ion_secure_cma_map_kernel,
+	.unmap_kernel = ion_secure_cma_unmap_kernel,
+	.map_iommu = ion_secure_cma_map_iommu,
+	.unmap_iommu = ion_secure_cma_unmap_iommu,
+	.cache_op = ion_secure_cma_cache_ops,
+	.print_debug = ion_secure_cma_print_debug,
+	.secure_buffer = ion_cp_secure_buffer,
+	.unsecure_buffer = ion_cp_unsecure_buffer,
+};
+
+struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *data)
+{
+	struct ion_heap *heap;
+
+	heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+
+	if (!heap)
+		return ERR_PTR(-ENOMEM);
+
+	heap->ops = &ion_secure_cma_ops;
+	/* set device as private heaps data, later it will be
+	 * used to make the link with reserved CMA memory */
+	heap->priv = data->priv;
+	heap->type = ION_HEAP_TYPE_SECURE_DMA;
+	cma_heap_has_outer_cache = data->has_outer_cache;
+	return heap;
+}
+
+void ion_secure_cma_heap_destroy(struct ion_heap *heap)
+{
+	kfree(heap);
+}
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index 41ff28d..88addab 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -23,11 +23,12 @@
 #include <linux/mm.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
 #include <linux/memory_alloc.h>
 #include <linux/seq_file.h>
-#include <linux/fmem.h>
 #include <linux/iommu.h>
 #include <linux/dma-mapping.h>
+#include <trace/events/kmem.h>
 
 #include <asm/mach/map.h>
 
@@ -68,8 +69,6 @@
  *		user space.
  * @iommu_iova: saved iova when mapping full heap at once.
  * @iommu_partition: partition used to map full heap.
- * @reusable: indicates if the memory should be reused via fmem.
- * @reserved_vrange: reserved virtual address range for use with fmem
  * @iommu_map_all:	Indicates whether we should map whole heap into IOMMU.
  * @iommu_2x_map_domain: Indicates the domain to use for overmapping.
  * @has_outer_cache:    set to 1 if outer cache is used, 0 otherwise.
@@ -93,7 +92,6 @@
 	unsigned long umap_count;
 	unsigned long iommu_iova[MAX_DOMAINS];
 	unsigned long iommu_partition[MAX_DOMAINS];
-	int reusable;
 	void *reserved_vrange;
 	int iommu_map_all;
 	int iommu_2x_map_domain;
@@ -103,7 +101,7 @@
 	size_t heap_size;
 	dma_addr_t handle;
 	int cma;
-	int disallow_non_secure_allocation;
+	int allow_non_secure_allocation;
 };
 
 enum {
@@ -111,17 +109,8 @@
 	HEAP_PROTECTED = 1,
 };
 
-#define DMA_ALLOC_RETRIES	5
+#define DMA_ALLOC_TRIES	5
 
-static int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
-			unsigned int permission_type, int version,
-			void *data);
-
-static int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size,
-				unsigned int permission_type, int version,
-				void *data);
-
-#if 0
 static int allocate_heap_memory(struct ion_heap *heap)
 {
 	struct device *dev = heap->priv;
@@ -136,14 +125,16 @@
 	if (cp_heap->cpu_addr)
 		return 0;
 
-	while (!cp_heap->cpu_addr && (++tries < DMA_ALLOC_RETRIES)) {
+	while (!cp_heap->cpu_addr && (++tries < DMA_ALLOC_TRIES)) {
 		cp_heap->cpu_addr = dma_alloc_attrs(dev,
 						cp_heap->heap_size,
 						&(cp_heap->handle),
 						0,
 						&attrs);
-		if (!cp_heap->cpu_addr)
+		if (!cp_heap->cpu_addr) {
+			trace_ion_cp_alloc_retry(tries);
 			msleep(20);
+		}
 	}
 
 	if (!cp_heap->cpu_addr)
@@ -170,7 +161,7 @@
 out:
 	return ION_CP_ALLOCATE_FAIL;
 }
-#endif
+
 static void free_heap_memory(struct ion_heap *heap)
 {
 	struct device *dev = heap->priv;
@@ -197,19 +188,12 @@
 	return cp_heap->kmap_cached_count + cp_heap->kmap_uncached_count;
 }
 
-#if 0
 static int ion_on_first_alloc(struct ion_heap *heap)
 {
 	struct ion_cp_heap *cp_heap =
 		container_of(heap, struct ion_cp_heap, heap);
 	int ret_value;
 
-	if (cp_heap->reusable) {
-		ret_value = fmem_set_state(FMEM_C_STATE);
-		if (ret_value)
-			return 1;
-	}
-
 	if (cp_heap->cma) {
 		ret_value = allocate_heap_memory(heap);
 		if (ret_value)
@@ -217,18 +201,12 @@
 	}
 	return 0;
 }
-#endif
 
 static void ion_on_last_free(struct ion_heap *heap)
 {
 	struct ion_cp_heap *cp_heap =
 		container_of(heap, struct ion_cp_heap, heap);
 
-	if (cp_heap->reusable)
-		if (fmem_set_state(FMEM_T_STATE) != 0)
-			pr_err("%s: unable to transition heap to T-state\n",
-				__func__);
-
 	if (cp_heap->cma)
 		free_heap_memory(heap);
 }
@@ -246,31 +224,6 @@
 
 	if (atomic_inc_return(&cp_heap->protect_cnt) == 1) {
 		/* Make sure we are in C state when the heap is protected. */
-		if (cp_heap->reusable && !cp_heap->allocated_bytes) {
-			ret_value = fmem_set_state(FMEM_C_STATE);
-			if (ret_value)
-				goto out;
-		}
-
-		ret_value = ion_cp_protect_mem(cp_heap->secure_base,
-				cp_heap->secure_size, cp_heap->permission_type,
-				version, data);
-		if (ret_value) {
-			pr_err("Failed to protect memory for heap %s - "
-				"error code: %d\n", heap->name, ret_value);
-
-			if (cp_heap->reusable && !cp_heap->allocated_bytes) {
-				if (fmem_set_state(FMEM_T_STATE) != 0)
-					pr_err("%s: unable to transition heap to T-state\n",
-						__func__);
-			}
-			atomic_dec(&cp_heap->protect_cnt);
-		} else {
-			cp_heap->heap_protected = HEAP_PROTECTED;
-			pr_debug("Protected heap %s @ 0x%lx\n",
-				heap->name, cp_heap->base);
-		}
-#if 0
 		if (!cp_heap->allocated_bytes)
 			if (ion_on_first_alloc(heap))
 				goto out;
@@ -288,10 +241,9 @@
 			atomic_dec(&cp_heap->protect_cnt);
 		} else {
 			cp_heap->heap_protected = HEAP_PROTECTED;
-			pr_debug("Protected heap %s @ 0x%lx\n",
-				heap->name, cp_heap->base);
+			pr_debug("Protected heap %s @ 0x%pa\n",
+				heap->name, &cp_heap->base);
 		}
-#endif
 	}
 out:
 	pr_debug("%s: protect count is %d\n", __func__,
@@ -337,8 +289,8 @@
 				      unsigned long flags)
 {
 	unsigned long offset;
-	unsigned long secure_allocation = flags & ION_SECURE;
-	unsigned long force_contig = flags & ION_FORCE_CONTIGUOUS;
+	unsigned long secure_allocation = flags & ION_FLAG_SECURE;
+	unsigned long force_contig = flags & ION_FLAG_FORCE_CONTIGUOUS;
 
 	struct ion_cp_heap *cp_heap =
 		container_of(heap, struct ion_cp_heap, heap);
@@ -352,31 +304,25 @@
 	}
 
 	if (!force_contig && !secure_allocation &&
-	     cp_heap->disallow_non_secure_allocation) {
+	     !cp_heap->allow_non_secure_allocation) {
 		mutex_unlock(&cp_heap->lock);
 		pr_debug("%s: non-secure allocation disallowed from this heap\n",
 			__func__);
 		return ION_CP_ALLOCATE_FAIL;
 	}
 
-	if (secure_allocation &&
-	    (cp_heap->umap_count > 0 || cp_heap->kmap_cached_count > 0)) {
-		mutex_unlock(&cp_heap->lock);
-		pr_err("ION cannot allocate secure memory from heap with "
-			"outstanding mappings: User space: %lu, kernel space "
-			"(cached): %lu\n", cp_heap->umap_count,
-					   cp_heap->kmap_cached_count);
-		return ION_CP_ALLOCATE_FAIL;
+	/*
+	 * The check above already checked for non-secure allocations when the
+	 * heap is protected. HEAP_PROTECTED implies that this must be a secure
+	 * allocation. If the heap is protected and there are userspace or
+	 * cached kernel mappings, something has gone wrong in the security
+	 * model.
+	 */
+	if (cp_heap->heap_protected == HEAP_PROTECTED) {
+		BUG_ON(cp_heap->umap_count != 0);
+		BUG_ON(cp_heap->kmap_cached_count != 0);
 	}
 
-	if (cp_heap->reusable && !cp_heap->allocated_bytes) {
-		if (fmem_set_state(FMEM_C_STATE) != 0) {
-			mutex_unlock(&cp_heap->lock);
-			return ION_RESERVED_ALLOCATE_FAIL;
-		}
-	}
-
-#if 0
 	/*
 	 * if this is the first reusable allocation, transition
 	 * the heap
@@ -387,7 +333,6 @@
 			return ION_RESERVED_ALLOCATE_FAIL;
 		}
 
-#endif
 	cp_heap->allocated_bytes += size;
 	mutex_unlock(&cp_heap->lock);
 
@@ -482,7 +427,9 @@
 				  struct ion_buffer *buffer,
 				  ion_phys_addr_t *addr, size_t *len)
 {
-	*addr = buffer->priv_phys;
+	struct ion_cp_buffer *buf = buffer->priv_virt;
+
+	*addr = buf->buffer;
 	*len = buffer->size;
 	return 0;
 }
@@ -492,39 +439,58 @@
 				      unsigned long size, unsigned long align,
 				      unsigned long flags)
 {
-	buffer->priv_phys = ion_cp_allocate(heap, size, align, flags);
-	return buffer->priv_phys == ION_CP_ALLOCATE_FAIL ? -ENOMEM : 0;
+	struct ion_cp_buffer *buf;
+	phys_addr_t addr;
+
+	/*
+	 * we never want Ion to fault pages in for us with this
+	 * heap. We want to set up the mappings ourselves in .map_user
+	 */
+	flags |= ION_FLAG_CACHED_NEEDS_SYNC;
+
+	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+	if (!buf)
+		return ION_CP_ALLOCATE_FAIL;
+
+	addr = ion_cp_allocate(heap, size, align, flags);
+	if (addr == ION_CP_ALLOCATE_FAIL)
+		return -ENOMEM;
+
+	buf->buffer = addr;
+	buf->want_delayed_unsecure = 0;
+	atomic_set(&buf->secure_cnt, 0);
+	mutex_init(&buf->lock);
+	buf->is_secure = flags & ION_FLAG_SECURE ? 1 : 0;
+	buffer->priv_virt = buf;
+
+	return 0;
 }
 
 static void ion_cp_heap_free(struct ion_buffer *buffer)
 {
 	struct ion_heap *heap = buffer->heap;
+	struct ion_cp_buffer *buf = buffer->priv_virt;
 
-	ion_cp_free(heap, buffer->priv_phys, buffer->size);
-	buffer->priv_phys = ION_CP_ALLOCATE_FAIL;
+	ion_cp_free(heap, buf->buffer, buffer->size);
+	WARN_ON(atomic_read(&buf->secure_cnt));
+	WARN_ON(atomic_read(&buf->map_cnt));
+	kfree(buf);
+
+	buffer->priv_virt = NULL;
 }
 
 struct sg_table *ion_cp_heap_create_sg_table(struct ion_buffer *buffer)
 {
-	struct sg_table *table;
-	int ret;
+	size_t chunk_size = buffer->size;
+	struct ion_cp_buffer *buf = buffer->priv_virt;
 
-	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
-	if (!table)
-		return ERR_PTR(-ENOMEM);
+	if (ION_IS_CACHED(buffer->flags))
+		chunk_size = PAGE_SIZE;
+	else if (buf->is_secure && IS_ALIGNED(buffer->size, SZ_1M))
+		chunk_size = SZ_1M;
 
-	ret = sg_alloc_table(table, 1, GFP_KERNEL);
-	if (ret)
-		goto err0;
-
-	table->sgl->length = buffer->size;
-	table->sgl->offset = 0;
-	table->sgl->dma_address = buffer->priv_phys;
-
-	return table;
-err0:
-	kfree(table);
-	return ERR_PTR(ret);
+	return ion_create_chunked_sg_table(buf->buffer, chunk_size,
+					buffer->size);
 }
 
 struct sg_table *ion_cp_heap_map_dma(struct ion_heap *heap,
@@ -568,33 +534,12 @@
 	return ret_value;
 }
 
-void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base,
-				void *virt_base, unsigned long flags)
-{
-	int ret;
-	unsigned int offset = buffer->priv_phys - phys_base;
-	unsigned long start = ((unsigned long)virt_base) + offset;
-	const struct mem_type *type = ION_IS_CACHED(flags) ?
-				get_mem_type(MT_DEVICE_CACHED) :
-				get_mem_type(MT_DEVICE);
-
-	if (phys_base > buffer->priv_phys)
-		return NULL;
-
-
-	ret = ioremap_pages(start, buffer->priv_phys, buffer->size, type);
-
-	if (!ret)
-		return (void *)start;
-	else
-		return NULL;
-}
-
 void *ion_cp_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
 {
 	struct ion_cp_heap *cp_heap =
 		container_of(heap, struct ion_cp_heap, heap);
 	void *ret_value = NULL;
+	struct ion_cp_buffer *buf = buffer->priv_virt;
 
 	mutex_lock(&cp_heap->lock);
 	if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) ||
@@ -606,33 +551,35 @@
 			return NULL;
 		}
 
-		if (cp_heap->reusable) {
-			ret_value = ion_map_fmem_buffer(buffer, cp_heap->base,
-				cp_heap->reserved_vrange, buffer->flags);
-		} else if (cp_heap->cma) {
+		if (cp_heap->cma) {
 			int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
 			struct page **pages = vmalloc(
 						sizeof(struct page *) * npages);
 			int i;
 			pgprot_t pgprot;
 
+			if (!pages) {
+				mutex_unlock(&cp_heap->lock);
+				return ERR_PTR(-ENOMEM);
+			}
+
 			if (ION_IS_CACHED(buffer->flags))
 				pgprot = PAGE_KERNEL;
 			else
 				pgprot = pgprot_writecombine(PAGE_KERNEL);
 
 			for (i = 0; i < npages; i++) {
-				pages[i] = phys_to_page(buffer->priv_phys +
+				pages[i] = phys_to_page(buf->buffer +
 						i * PAGE_SIZE);
 			}
 			ret_value = vmap(pages, npages, VM_IOREMAP, pgprot);
 			vfree(pages);
 		} else {
 			if (ION_IS_CACHED(buffer->flags))
-				ret_value = ioremap_cached(buffer->priv_phys,
+				ret_value = ioremap_cached(buf->buffer,
 							   buffer->size);
 			else
-				ret_value = ioremap(buffer->priv_phys,
+				ret_value = ioremap(buf->buffer,
 						    buffer->size);
 		}
 
@@ -643,6 +590,7 @@
 				++cp_heap->kmap_cached_count;
 			else
 				++cp_heap->kmap_uncached_count;
+			atomic_inc(&buf->map_cnt);
 		}
 	}
 	mutex_unlock(&cp_heap->lock);
@@ -654,10 +602,9 @@
 {
 	struct ion_cp_heap *cp_heap =
 		container_of(heap, struct ion_cp_heap, heap);
+	struct ion_cp_buffer *buf = buffer->priv_virt;
 
-	if (cp_heap->reusable)
-		unmap_kernel_range((unsigned long)buffer->vaddr, buffer->size);
-	else if (cp_heap->cma)
+	if (cp_heap->cma)
 		vunmap(buffer->vaddr);
 	else
 		__arm_iounmap(buffer->vaddr);
@@ -669,6 +616,8 @@
 		--cp_heap->kmap_cached_count;
 	else
 		--cp_heap->kmap_uncached_count;
+
+	atomic_dec(&buf->map_cnt);
 	ion_cp_release_region(cp_heap);
 	mutex_unlock(&cp_heap->lock);
 
@@ -681,9 +630,10 @@
 	int ret_value = -EAGAIN;
 	struct ion_cp_heap *cp_heap =
 		container_of(heap, struct ion_cp_heap, heap);
+	struct ion_cp_buffer *buf = buffer->priv_virt;
 
 	mutex_lock(&cp_heap->lock);
-	if (cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
+	if (cp_heap->heap_protected == HEAP_NOT_PROTECTED && !buf->is_secure) {
 		if (ion_cp_request_region(cp_heap)) {
 			mutex_unlock(&cp_heap->lock);
 			return -EINVAL;
@@ -694,14 +644,17 @@
 							vma->vm_page_prot);
 
 		ret_value =  remap_pfn_range(vma, vma->vm_start,
-			__phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
+			__phys_to_pfn(buf->buffer) + vma->vm_pgoff,
 			vma->vm_end - vma->vm_start,
 			vma->vm_page_prot);
 
-		if (ret_value)
+		if (ret_value) {
 			ion_cp_release_region(cp_heap);
-		else
+		} else {
+			atomic_inc(&buf->map_cnt);
 			++cp_heap->umap_count;
+		}
+
 	}
 	mutex_unlock(&cp_heap->lock);
 	return ret_value;
@@ -712,9 +665,11 @@
 {
 	struct ion_cp_heap *cp_heap =
 			container_of(heap, struct ion_cp_heap, heap);
+	struct ion_cp_buffer *buf = buffer->priv_virt;
 
 	mutex_lock(&cp_heap->lock);
 	--cp_heap->umap_count;
+	atomic_dec(&buf->map_cnt);
 	ion_cp_release_region(cp_heap);
 	mutex_unlock(&cp_heap->lock);
 }
@@ -723,29 +678,82 @@
 			void *vaddr, unsigned int offset, unsigned int length,
 			unsigned int cmd)
 {
-	void (*outer_cache_op)(phys_addr_t, phys_addr_t);
+	void (*outer_cache_op)(phys_addr_t, phys_addr_t) = NULL;
 	struct ion_cp_heap *cp_heap =
-	     container_of(heap, struct  ion_cp_heap, heap);
+		container_of(heap, struct  ion_cp_heap, heap);
+	unsigned int size_to_vmap, total_size;
+	struct ion_cp_buffer *buf = buffer->priv_virt;
+	int i, j;
+	void *ptr = NULL;
+	ion_phys_addr_t buff_phys = buffer->priv_phys;
 
-	switch (cmd) {
-	case ION_IOC_CLEAN_CACHES:
-		dmac_clean_range(vaddr, vaddr + length);
-		outer_cache_op = outer_clean_range;
-		break;
-	case ION_IOC_INV_CACHES:
-		dmac_inv_range(vaddr, vaddr + length);
-		outer_cache_op = outer_inv_range;
-		break;
-	case ION_IOC_CLEAN_INV_CACHES:
-		dmac_flush_range(vaddr, vaddr + length);
-		outer_cache_op = outer_flush_range;
-		break;
-	default:
-		return -EINVAL;
+	if (!vaddr) {
+		/*
+		 * Split the vmalloc space into smaller regions in
+		 * order to clean and/or invalidate the cache.
+		 */
+		size_to_vmap = (VMALLOC_END - VMALLOC_START)/8;
+		total_size = buffer->size;
+		for (i = 0; i < total_size; i += size_to_vmap) {
+			size_to_vmap = min(size_to_vmap, total_size - i);
+			for (j = 0; j < 10 && size_to_vmap; ++j) {
+				ptr = ioremap(buff_phys, size_to_vmap);
+				if (ptr) {
+					switch (cmd) {
+					case ION_IOC_CLEAN_CACHES:
+						dmac_clean_range(ptr,
+							ptr + size_to_vmap);
+						outer_cache_op =
+							outer_clean_range;
+						break;
+					case ION_IOC_INV_CACHES:
+						dmac_inv_range(ptr,
+							ptr + size_to_vmap);
+						outer_cache_op =
+							outer_inv_range;
+						break;
+					case ION_IOC_CLEAN_INV_CACHES:
+						dmac_flush_range(ptr,
+							ptr + size_to_vmap);
+						outer_cache_op =
+							outer_flush_range;
+						break;
+					default:
+						return -EINVAL;
+					}
+					buff_phys += size_to_vmap;
+					break;
+				} else {
+					size_to_vmap >>= 1;
+				}
+			}
+			if (!ptr) {
+				pr_err("Couldn't io-remap the memory\n");
+				return -EINVAL;
+			}
+			iounmap(ptr);
+		}
+	} else {
+		switch (cmd) {
+		case ION_IOC_CLEAN_CACHES:
+			dmac_clean_range(vaddr, vaddr + length);
+			outer_cache_op = outer_clean_range;
+			break;
+		case ION_IOC_INV_CACHES:
+			dmac_inv_range(vaddr, vaddr + length);
+			outer_cache_op = outer_inv_range;
+			break;
+		case ION_IOC_CLEAN_INV_CACHES:
+			dmac_flush_range(vaddr, vaddr + length);
+			outer_cache_op = outer_flush_range;
+			break;
+		default:
+			return -EINVAL;
+		}
 	}
 
 	if (cp_heap->has_outer_cache) {
-		unsigned long pstart = buffer->priv_phys + offset;
+		unsigned long pstart = buf->buffer + offset;
 		outer_cache_op(pstart, pstart + length);
 	}
 	return 0;
@@ -775,7 +783,6 @@
 	seq_printf(s, "umapping count: %lx\n", umap_count);
 	seq_printf(s, "kmapping count: %lx\n", kmap_count);
 	seq_printf(s, "heap protected: %s\n", heap_protected ? "Yes" : "No");
-	seq_printf(s, "reusable: %s\n", cp_heap->reusable  ? "Yes" : "No");
 
 	if (mem_map) {
 		unsigned long base = cp_heap->base;
@@ -795,8 +802,11 @@
 			const char *client_name = "(null)";
 
 			if (last_end < data->addr) {
-				seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
-					   "FREE", last_end, data->addr-1,
+				phys_addr_t da;
+
+				da = data->addr-1;
+				seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
+					   "FREE", &last_end, &da,
 					   data->addr-last_end,
 					   data->addr-last_end);
 			}
@@ -804,9 +814,9 @@
 			if (data->client_name)
 				client_name = data->client_name;
 
-			seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
-				   client_name, data->addr,
-				   data->addr_end,
+			seq_printf(s, "%16.s %14pa %14pa %14lu (%lx)\n",
+				   client_name, &data->addr,
+				   &data->addr_end,
 				   data->size, data->size);
 			last_end = data->addr_end+1;
 		}
@@ -902,6 +912,7 @@
 		}
 		if (domain_num == cp_heap->iommu_2x_map_domain)
 			ret_value = msm_iommu_map_extra(domain, temp_iova,
+							cp_heap->base,
 							cp_heap->total_size,
 							SZ_64K, prot);
 		if (ret_value)
@@ -933,25 +944,26 @@
 	struct ion_cp_heap *cp_heap =
 		container_of(buffer->heap, struct ion_cp_heap, heap);
 	int prot = IOMMU_WRITE | IOMMU_READ;
+	struct ion_cp_buffer *buf = buffer->priv_virt;
 	prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
 
 	data->mapped_size = iova_length;
 
 	if (!msm_use_iommu()) {
-		data->iova_addr = buffer->priv_phys;
+		data->iova_addr = buf->buffer;
 		return 0;
 	}
 
 	if (cp_heap->iommu_iova[domain_num]) {
 		/* Already mapped. */
-		unsigned long offset = buffer->priv_phys - cp_heap->base;
+		unsigned long offset = buf->buffer - cp_heap->base;
 		data->iova_addr = cp_heap->iommu_iova[domain_num] + offset;
 		return 0;
 	} else if (cp_heap->iommu_map_all) {
 		ret = iommu_map_all(domain_num, cp_heap, partition_num, prot);
 		if (!ret) {
 			unsigned long offset =
-					buffer->priv_phys - cp_heap->base;
+					buf->buffer - cp_heap->base;
 			data->iova_addr =
 				cp_heap->iommu_iova[domain_num] + offset;
 			cp_heap->iommu_partition[domain_num] = partition_num;
@@ -994,8 +1006,9 @@
 
 	if (extra) {
 		unsigned long extra_iova_addr = data->iova_addr + buffer->size;
-		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra,
-					  SZ_4K, prot);
+		unsigned long phys_addr = sg_phys(buffer->sg_table->sgl);
+		ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
+					extra, SZ_4K, prot);
 		if (ret)
 			goto out2;
 	}
@@ -1061,6 +1074,8 @@
 	.unsecure_heap = ion_cp_unsecure_heap,
 	.map_iommu = ion_cp_heap_map_iommu,
 	.unmap_iommu = ion_cp_heap_unmap_iommu,
+	.secure_buffer = ion_cp_secure_buffer,
+	.unsecure_buffer = ion_cp_unsecure_buffer,
 };
 
 struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
@@ -1092,8 +1107,6 @@
 	if (heap_data->extra_data) {
 		struct ion_cp_heap_pdata *extra_data =
 				heap_data->extra_data;
-		cp_heap->reusable = extra_data->reusable;
-		cp_heap->reserved_vrange = extra_data->virt_addr;
 		cp_heap->permission_type = extra_data->permission_type;
 		if (extra_data->secure_size) {
 			cp_heap->secure_base = extra_data->secure_base;
@@ -1112,8 +1125,8 @@
 		cp_heap->iommu_2x_map_domain =
 				extra_data->iommu_2x_map_domain;
 		cp_heap->cma = extra_data->is_cma;
-		cp_heap->disallow_non_secure_allocation =
-			extra_data->no_nonsecure_alloc;
+		cp_heap->allow_non_secure_allocation =
+			extra_data->allow_nonsecure_alloc;
 
 	}
 
@@ -1163,108 +1176,4 @@
 	*size = cp_heap->total_size;
 }
 
-/*  SCM related code for locking down memory for content protection */
 
-#define SCM_CP_LOCK_CMD_ID	0x1
-#define SCM_CP_PROTECT		0x1
-#define SCM_CP_UNPROTECT	0x0
-
-struct cp_lock_msg {
-	unsigned int start;
-	unsigned int end;
-	unsigned int permission_type;
-	unsigned char lock;
-} __attribute__ ((__packed__));
-
-static int ion_cp_protect_mem_v1(unsigned int phy_base, unsigned int size,
-			      unsigned int permission_type)
-{
-	struct cp_lock_msg cmd;
-	cmd.start = phy_base;
-	cmd.end = phy_base + size;
-	cmd.permission_type = permission_type;
-	cmd.lock = SCM_CP_PROTECT;
-
-	return scm_call(SCM_SVC_CP, SCM_CP_LOCK_CMD_ID,
-			&cmd, sizeof(cmd), NULL, 0);
-}
-
-static int ion_cp_unprotect_mem_v1(unsigned int phy_base, unsigned int size,
-				unsigned int permission_type)
-{
-	struct cp_lock_msg cmd;
-	cmd.start = phy_base;
-	cmd.end = phy_base + size;
-	cmd.permission_type = permission_type;
-	cmd.lock = SCM_CP_UNPROTECT;
-
-	return scm_call(SCM_SVC_CP, SCM_CP_LOCK_CMD_ID,
-			&cmd, sizeof(cmd), NULL, 0);
-}
-
-#define V2_CHUNK_SIZE	SZ_1M
-
-static int ion_cp_change_mem_v2(unsigned int phy_base, unsigned int size,
-			      void *data, int lock)
-{
-	enum cp_mem_usage usage = (enum cp_mem_usage) data;
-	unsigned long *chunk_list;
-	int nchunks;
-	int ret;
-	int i;
-
-	if (usage < 0 || usage >= MAX_USAGE)
-		return -EINVAL;
-
-	if (!IS_ALIGNED(size, V2_CHUNK_SIZE)) {
-		pr_err("%s: heap size is not aligned to %x\n",
-			__func__, V2_CHUNK_SIZE);
-		return -EINVAL;
-	}
-
-	nchunks = size / V2_CHUNK_SIZE;
-
-	chunk_list = allocate_contiguous_ebi(sizeof(unsigned long)*nchunks,
-						SZ_4K, 0);
-	if (!chunk_list)
-		return -ENOMEM;
-
-	for (i = 0; i < nchunks; i++)
-		chunk_list[i] = phy_base + i * V2_CHUNK_SIZE;
-
-	ret = ion_cp_change_chunks_state(memory_pool_node_paddr(chunk_list),
-					nchunks, V2_CHUNK_SIZE, usage, lock);
-
-	free_contiguous_memory(chunk_list);
-	return ret;
-}
-
-static int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
-			      unsigned int permission_type, int version,
-			      void *data)
-{
-	switch (version) {
-	case ION_CP_V1:
-		return ion_cp_protect_mem_v1(phy_base, size, permission_type);
-	case ION_CP_V2:
-		return ion_cp_change_mem_v2(phy_base, size, data,
-						SCM_CP_PROTECT);
-	default:
-		return -EINVAL;
-	}
-}
-
-static int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size,
-			      unsigned int permission_type, int version,
-			      void *data)
-{
-	switch (version) {
-	case ION_CP_V1:
-		return ion_cp_unprotect_mem_v1(phy_base, size, permission_type);
-	case ION_CP_V2:
-		return ion_cp_change_mem_v2(phy_base, size, data,
-						SCM_CP_UNPROTECT);
-	default:
-		return -EINVAL;
-	}
-}
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
index 98c1a8c..510b9ce 100644
--- a/drivers/gpu/ion/ion_heap.c
+++ b/drivers/gpu/ion/ion_heap.c
@@ -2,7 +2,7 @@
  * drivers/gpu/ion/ion_heap.c
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -18,13 +18,12 @@
 #include <linux/err.h>
 #include <linux/ion.h>
 #include "ion_priv.h"
-#include <linux/msm_ion.h>
 
 struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
 {
 	struct ion_heap *heap = NULL;
 
-	switch ((int) heap_data->type) {
+	switch (heap_data->type) {
 	case ION_HEAP_TYPE_SYSTEM_CONTIG:
 		heap = ion_system_contig_heap_create(heap_data);
 		break;
@@ -34,17 +33,6 @@
 	case ION_HEAP_TYPE_CARVEOUT:
 		heap = ion_carveout_heap_create(heap_data);
 		break;
-	case ION_HEAP_TYPE_IOMMU:
-		heap = ion_iommu_heap_create(heap_data);
-		break;
-	case ION_HEAP_TYPE_CP:
-		heap = ion_cp_heap_create(heap_data);
-		break;
-#ifdef CONFIG_CMA
-	case ION_HEAP_TYPE_DMA:
-		heap = ion_cma_heap_create(heap_data);
-		break;
-#endif
 	default:
 		pr_err("%s: Invalid heap type %d\n", __func__,
 		       heap_data->type);
@@ -52,9 +40,9 @@
 	}
 
 	if (IS_ERR_OR_NULL(heap)) {
-		pr_err("%s: error creating heap %s type %d base %lu size %u\n",
+		pr_err("%s: error creating heap %s type %d base %pa size %u\n",
 		       __func__, heap_data->name, heap_data->type,
-		       heap_data->base, heap_data->size);
+		       &heap_data->base, heap_data->size);
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -69,7 +57,7 @@
 	if (!heap)
 		return;
 
-	switch ((int) heap->type) {
+	switch (heap->type) {
 	case ION_HEAP_TYPE_SYSTEM_CONTIG:
 		ion_system_contig_heap_destroy(heap);
 		break;
@@ -79,17 +67,6 @@
 	case ION_HEAP_TYPE_CARVEOUT:
 		ion_carveout_heap_destroy(heap);
 		break;
-	case ION_HEAP_TYPE_IOMMU:
-		ion_iommu_heap_destroy(heap);
-		break;
-	case ION_HEAP_TYPE_CP:
-		ion_cp_heap_destroy(heap);
-		break;
-#ifdef CONFIG_CMA
-	case ION_HEAP_TYPE_DMA:
-		ion_cma_heap_destroy(heap);
-		break;
-#endif
 	default:
 		pr_err("%s: Invalid heap type %d\n", __func__,
 		       heap->type);
diff --git a/drivers/gpu/ion/ion_iommu_heap.c b/drivers/gpu/ion/ion_iommu_heap.c
index 3a32390..512ebf3 100644
--- a/drivers/gpu/ion/ion_iommu_heap.c
+++ b/drivers/gpu/ion/ion_iommu_heap.c
@@ -48,8 +48,6 @@
 
 #define MAX_VMAP_RETRIES 10
 
-atomic_t v = ATOMIC_INIT(0);
-
 static const unsigned int orders[] = {8, 4, 0};
 static const int num_orders = ARRAY_SIZE(orders);
 
@@ -72,13 +70,21 @@
 	int i;
 
 	for (i = 0; i < num_orders; i++) {
+		gfp_t gfp;
 		if (size < order_to_size(orders[i]))
 			continue;
 		if (max_order < orders[i])
 			continue;
 
-		page = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM | __GFP_COMP,
-				orders[i]);
+		gfp = __GFP_HIGHMEM;
+
+		if (orders[i]) {
+			gfp |= __GFP_COMP | __GFP_NORETRY |
+			       __GFP_NO_KSWAPD | __GFP_NOWARN;
+		} else {
+			gfp |= GFP_KERNEL;
+		}
+		page = alloc_pages(gfp, orders[i]);
 		if (!page)
 			continue;
 
@@ -107,7 +113,7 @@
 		void *ptr = NULL;
 		unsigned int npages_to_vmap, total_pages, num_large_pages = 0;
 		long size_remaining = PAGE_ALIGN(size);
-		unsigned int max_order = orders[0];
+		unsigned int max_order = ION_IS_CACHED(flags) ? 0 : orders[0];
 
 		data = kmalloc(sizeof(*data), GFP_KERNEL);
 		if (!data)
@@ -199,9 +205,6 @@
 						DMA_BIDIRECTIONAL);
 
 		buffer->priv_virt = data;
-
-		atomic_add(data->size, &v);
-
 		return 0;
 
 	} else {
@@ -246,19 +249,10 @@
 	sg_free_table(table);
 	kfree(table);
 	table = 0;
-
-	atomic_sub(data->size, &v);
-
 	kfree(data->pages);
 	kfree(data);
 }
 
-int ion_iommu_heap_dump_size(void)
-{
-	int ret = atomic_read(&v);
-	return ret;
-}
-
 void *ion_iommu_heap_map_kernel(struct ion_heap *heap,
 				struct ion_buffer *buffer)
 {
@@ -269,7 +263,7 @@
 		return NULL;
 
 	if (!ION_IS_CACHED(buffer->flags))
-		page_prot = pgprot_noncached(page_prot);
+		page_prot = pgprot_writecombine(page_prot);
 
 	buffer->vaddr = vmap(data->pages, data->nrpages, VM_IOREMAP, page_prot);
 
@@ -340,6 +334,14 @@
 	data->mapped_size = iova_length;
 	extra = iova_length - buffer->size;
 
+	/* Use the biggest alignment to allow bigger IOMMU mappings.
+	 * Use the first entry since the first entry will always be the
+	 * biggest entry. To take advantage of bigger mapping sizes both the
+	 * VA and PA addresses have to be aligned to the biggest size.
+	 */
+	if (buffer->sg_table->sgl->length > align)
+		align = buffer->sg_table->sgl->length;
+
 	ret = msm_allocate_iova_address(domain_num, partition_num,
 						data->mapped_size, align,
 						&data->iova_addr);
@@ -365,8 +367,9 @@
 
 	if (extra) {
 		unsigned long extra_iova_addr = data->iova_addr + buffer->size;
-		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
-					  prot);
+		unsigned long phys_addr = sg_phys(buffer->sg_table->sgl);
+		ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
+					extra, SZ_4K, prot);
 		if (ret)
 			goto out2;
 	}
@@ -418,15 +421,30 @@
 
 	switch (cmd) {
 	case ION_IOC_CLEAN_CACHES:
-		dmac_clean_range(vaddr, vaddr + length);
+		if (!vaddr)
+			dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+				buffer->sg_table->nents, DMA_TO_DEVICE);
+		else
+			dmac_clean_range(vaddr, vaddr + length);
 		outer_cache_op = outer_clean_range;
 		break;
 	case ION_IOC_INV_CACHES:
-		dmac_inv_range(vaddr, vaddr + length);
+		if (!vaddr)
+			dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
+				buffer->sg_table->nents, DMA_FROM_DEVICE);
+		else
+			dmac_inv_range(vaddr, vaddr + length);
 		outer_cache_op = outer_inv_range;
 		break;
 	case ION_IOC_CLEAN_INV_CACHES:
-		dmac_flush_range(vaddr, vaddr + length);
+		if (!vaddr) {
+			dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+				buffer->sg_table->nents, DMA_TO_DEVICE);
+			dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
+				buffer->sg_table->nents, DMA_FROM_DEVICE);
+		} else {
+			dmac_flush_range(vaddr, vaddr + length);
+		}
 		outer_cache_op = outer_flush_range;
 		break;
 	default:
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index d494f7a..28ef1a5 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -2,7 +2,7 @@
  * drivers/gpu/ion/ion_priv.h
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -98,7 +98,8 @@
 	void *vaddr;
 	int dmap_cnt;
 	struct sg_table *sg_table;
-	int umap_cnt;
+	unsigned long *dirty;
+	struct list_head vmas;
 	unsigned int iommu_map_cnt;
 	struct rb_root iommu_maps;
 	int marked;
@@ -147,6 +148,9 @@
 			   const struct rb_root *mem_map);
 	int (*secure_heap)(struct ion_heap *heap, int version, void *data);
 	int (*unsecure_heap)(struct ion_heap *heap, int version, void *data);
+	int (*secure_buffer)(struct ion_buffer *buffer, int version,
+				void *data, int flags);
+	int (*unsecure_buffer)(struct ion_buffer *buffer, int force_unsecure);
 };
 
 /**
@@ -177,6 +181,15 @@
 };
 
 /**
+ * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
+ * @buffer:		buffer
+ *
+ * indicates whether userspace mappings of this buffer will be faulted
+ * in, this can affect how buffers are allocated from the heap.
+ */
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
+
+/**
  * struct mem_map_data - represents information about the memory map for a heap
  * @node:		rb node used to store in the tree of mem_map_data
  * @addr:		start address of memory region.
@@ -187,8 +200,8 @@
  */
 struct mem_map_data {
 	struct rb_node node;
-	unsigned long addr;
-	unsigned long addr_end;
+	ion_phys_addr_t addr;
+	ion_phys_addr_t addr_end;
 	unsigned long size;
 	const char *client_name;
 };
@@ -259,6 +272,9 @@
 #ifdef CONFIG_CMA
 struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
 void ion_cma_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *);
+void ion_secure_cma_heap_destroy(struct ion_heap *);
 #endif
 
 struct ion_heap *msm_get_contiguous_heap(void);
@@ -313,4 +329,28 @@
 
 void ion_mem_map_show(struct ion_heap *heap);
 
+
+
+int ion_secure_handle(struct ion_client *client, struct ion_handle *handle,
+			int version, void *data, int flags);
+
+int ion_unsecure_handle(struct ion_client *client, struct ion_handle *handle);
+
+int ion_heap_allow_secure_allocation(enum ion_heap_type type);
+
+int ion_heap_allow_heap_secure(enum ion_heap_type type);
+
+int ion_heap_allow_handle_secure(enum ion_heap_type type);
+
+/**
+ * ion_create_chunked_sg_table - helper function to create sg table
+ * with specified chunk size
+ * @buffer_base:	The starting address used for the sg dma address
+ * @chunk_size:		The size of each entry in the sg table
+ * @total_size:		The total size of the sg table (i.e. the sum of the
+ *			entries). This will be rounded up to the nearest
+ *			multiple of `chunk_size'
+ */
+struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
+					size_t chunk_size, size_t total_size);
 #endif /* _ION_PRIV_H */
diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c
index 980174e..ceb30a4 100644
--- a/drivers/gpu/ion/ion_system_heap.c
+++ b/drivers/gpu/ion/ion_system_heap.c
@@ -2,7 +2,7 @@
  * drivers/gpu/ion/ion_system_heap.c
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -15,7 +15,10 @@
  *
  */
 
+#include <asm/page.h>
+#include <linux/dma-mapping.h>
 #include <linux/err.h>
+#include <linux/highmem.h>
 #include <linux/ion.h>
 #include <linux/mm.h>
 #include <linux/scatterlist.h>
@@ -28,12 +31,44 @@
 #include <mach/memory.h>
 #include <asm/cacheflush.h>
 #include <linux/msm_ion.h>
+#include <linux/dma-mapping.h>
 
 static atomic_t system_heap_allocated;
 static atomic_t system_contig_heap_allocated;
 static unsigned int system_heap_has_outer_cache;
 static unsigned int system_heap_contig_has_outer_cache;
 
+struct page_info {
+	struct page *page;
+	unsigned long order;
+	struct list_head list;
+};
+
+static struct page_info *alloc_largest_available(unsigned long size,
+						 bool split_pages)
+{
+	static unsigned int orders[] = {8, 4, 0};
+	struct page *page;
+	struct page_info *info;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(orders); i++) {
+		if (size < (1 << orders[i]) * PAGE_SIZE)
+			continue;
+		page = alloc_pages(GFP_HIGHUSER | __GFP_ZERO |
+				   __GFP_NOWARN | __GFP_NORETRY, orders[i]);
+		if (!page)
+			continue;
+		if (split_pages)
+			split_page(page, orders[i]);
+		info = kmap(page);
+		info->page = page;
+		info->order = orders[i];
+		return info;
+	}
+	return NULL;
+}
+
 static int ion_system_heap_allocate(struct ion_heap *heap,
 				     struct ion_buffer *buffer,
 				     unsigned long size, unsigned long align,
@@ -41,31 +76,73 @@
 {
 	struct sg_table *table;
 	struct scatterlist *sg;
-	int i, j;
-	int npages = PAGE_ALIGN(size) / PAGE_SIZE;
+	int ret;
+	struct list_head pages;
+	struct page_info *info, *tmp_info;
+	int i = 0;
+	long size_remaining = PAGE_ALIGN(size);
+	bool split_pages = ion_buffer_fault_user_mappings(buffer);
+
+
+	INIT_LIST_HEAD(&pages);
+	while (size_remaining > 0) {
+		info = alloc_largest_available(size_remaining, split_pages);
+		if (!info)
+			goto err;
+		list_add_tail(&info->list, &pages);
+		size_remaining -= (1 << info->order) * PAGE_SIZE;
+		i++;
+	}
 
 	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
 	if (!table)
-		return -ENOMEM;
-	i = sg_alloc_table(table, npages, GFP_KERNEL);
-	if (i)
-		goto err0;
-	for_each_sg(table->sgl, sg, table->nents, i) {
-		struct page *page;
-		page = alloc_page(GFP_KERNEL|__GFP_ZERO);
-		if (!page)
-			goto err1;
-		sg_set_page(sg, page, PAGE_SIZE, 0);
+		goto err;
+
+	if (split_pages)
+		ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
+				     GFP_KERNEL);
+	else
+		ret = sg_alloc_table(table, i, GFP_KERNEL);
+
+	if (ret)
+		goto err1;
+
+	sg = table->sgl;
+	list_for_each_entry_safe(info, tmp_info, &pages, list) {
+		struct page *page = info->page;
+
+		if (split_pages) {
+			for (i = 0; i < (1 << info->order); i++) {
+				sg_set_page(sg, page + i, PAGE_SIZE, 0);
+				sg = sg_next(sg);
+			}
+		} else {
+			sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
+				    0);
+			sg = sg_next(sg);
+		}
+		list_del(&info->list);
+		kunmap(page);
 	}
+
+	dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+			       DMA_BIDIRECTIONAL);
+
 	buffer->priv_virt = table;
 	atomic_add(size, &system_heap_allocated);
 	return 0;
 err1:
-	for_each_sg(table->sgl, sg, i, j)
-		__free_page(sg_page(sg));
-	sg_free_table(table);
-err0:
 	kfree(table);
+err:
+	list_for_each_entry(info, &pages, list) {
+		if (split_pages)
+			for (i = 0; i < (1 << info->order); i++)
+				__free_page(info->page + i);
+		else
+			__free_pages(info->page, info->order);
+
+		kunmap(info->page);
+	}
 	return -ENOMEM;
 }
 
@@ -76,7 +153,7 @@
 	struct sg_table *table = buffer->priv_virt;
 
 	for_each_sg(table->sgl, sg, table->nents, i)
-		__free_page(sg_page(sg));
+		__free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
 	if (buffer->sg_table)
 		sg_free_table(buffer->sg_table);
 	kfree(buffer->sg_table);
@@ -98,25 +175,33 @@
 void *ion_system_heap_map_kernel(struct ion_heap *heap,
 				 struct ion_buffer *buffer)
 {
-	if (!ION_IS_CACHED(buffer->flags)) {
-		pr_err("%s: cannot map system heap uncached\n", __func__);
-		return ERR_PTR(-EINVAL);
-	} else {
-		struct scatterlist *sg;
-		int i;
-		void *vaddr;
-		struct sg_table *table = buffer->priv_virt;
-		struct page **pages = kmalloc(
-					sizeof(struct page *) * table->nents,
-					GFP_KERNEL);
+	struct scatterlist *sg;
+	int i, j;
+	void *vaddr;
+	pgprot_t pgprot;
+	struct sg_table *table = buffer->priv_virt;
+	int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+	struct page **pages = kzalloc(sizeof(struct page *) * npages,
+				     GFP_KERNEL);
+	struct page **tmp = pages;
 
-		for_each_sg(table->sgl, sg, table->nents, i)
-			pages[i] = sg_page(sg);
-		vaddr = vmap(pages, table->nents, VM_MAP, PAGE_KERNEL);
-		kfree(pages);
+	if (buffer->flags & ION_FLAG_CACHED)
+		pgprot = PAGE_KERNEL;
+	else
+		pgprot = pgprot_writecombine(PAGE_KERNEL);
 
-		return vaddr;
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
+		struct page *page = sg_page(sg);
+		BUG_ON(i >= npages);
+		for (j = 0; j < npages_this_entry; j++) {
+			*(tmp++) = page++;
+		}
 	}
+	vaddr = vmap(pages, npages, VM_MAP, pgprot);
+	kfree(pages);
+
+	return vaddr;
 }
 
 void ion_system_heap_unmap_kernel(struct ion_heap *heap,
@@ -154,26 +239,27 @@
 int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
 			     struct vm_area_struct *vma)
 {
+	struct sg_table *table = buffer->priv_virt;
+	unsigned long addr = vma->vm_start;
+	unsigned long offset = vma->vm_pgoff;
+	struct scatterlist *sg;
+	int i;
+
 	if (!ION_IS_CACHED(buffer->flags)) {
 		pr_err("%s: cannot map system heap uncached\n", __func__);
 		return -EINVAL;
-	} else {
-		struct sg_table *table = buffer->priv_virt;
-		unsigned long addr = vma->vm_start;
-		unsigned long offset = vma->vm_pgoff;
-		struct scatterlist *sg;
-		int i;
-
-		for_each_sg(table->sgl, sg, table->nents, i) {
-			if (offset) {
-				offset--;
-				continue;
-			}
-			vm_insert_page(vma, addr, sg_page(sg));
-			addr += PAGE_SIZE;
-		}
-		return 0;
 	}
+
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		if (offset) {
+			offset--;
+			continue;
+		}
+		remap_pfn_range(vma, addr, page_to_pfn(sg_page(sg)),
+				sg_dma_len(sg), vma->vm_page_prot);
+		addr += sg_dma_len(sg);
+	}
+	return 0;
 }
 
 int ion_system_heap_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
@@ -184,15 +270,30 @@
 
 	switch (cmd) {
 	case ION_IOC_CLEAN_CACHES:
-		dmac_clean_range(vaddr, vaddr + length);
+		if (!vaddr)
+			dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+				buffer->sg_table->nents, DMA_TO_DEVICE);
+		else
+			dmac_clean_range(vaddr, vaddr + length);
 		outer_cache_op = outer_clean_range;
 		break;
 	case ION_IOC_INV_CACHES:
-		dmac_inv_range(vaddr, vaddr + length);
+		if (!vaddr)
+			dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
+				buffer->sg_table->nents, DMA_FROM_DEVICE);
+		else
+			dmac_inv_range(vaddr, vaddr + length);
 		outer_cache_op = outer_inv_range;
 		break;
 	case ION_IOC_CLEAN_INV_CACHES:
-		dmac_flush_range(vaddr, vaddr + length);
+		if (!vaddr) {
+			dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+				buffer->sg_table->nents, DMA_TO_DEVICE);
+			dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
+				buffer->sg_table->nents, DMA_FROM_DEVICE);
+		} else {
+			dmac_flush_range(vaddr, vaddr + length);
+		}
 		outer_cache_op = outer_flush_range;
 		break;
 	default:
@@ -255,6 +356,14 @@
 	data->mapped_size = iova_length;
 	extra = iova_length - buffer->size;
 
+	/* Use the biggest alignment to allow bigger IOMMU mappings.
+	 * Use the first entry since the first entry will always be the
+	 * biggest entry. To take advantage of bigger mapping sizes both the
+	 * VA and PA addresses have to be aligned to the biggest size.
+	 */
+	if (table->sgl->length > align)
+		align = table->sgl->length;
+
 	ret = msm_allocate_iova_address(domain_num, partition_num,
 						data->mapped_size, align,
 						&data->iova_addr);
@@ -280,8 +389,9 @@
 
 	extra_iova_addr = data->iova_addr + buffer->size;
 	if (extra) {
-		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
-					  prot);
+		unsigned long phys_addr = sg_phys(table->sgl);
+		ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
+					extra, SZ_4K, prot);
 		if (ret)
 			goto out2;
 	}
@@ -357,7 +467,7 @@
 }
 
 struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
-						   struct ion_buffer *buffer)
+						struct ion_buffer *buffer)
 {
 	struct sg_table *table;
 	int ret;
@@ -375,6 +485,13 @@
 	return table;
 }
 
+void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
+				      struct ion_buffer *buffer)
+{
+	sg_free_table(buffer->sg_table);
+	kfree(buffer->sg_table);
+}
+
 int ion_system_contig_heap_map_user(struct ion_heap *heap,
 				    struct ion_buffer *buffer,
 				    struct vm_area_struct *vma)
@@ -483,7 +600,7 @@
 	}
 	page = virt_to_page(buffer->vaddr);
 
-	sglist = kmalloc(sizeof(*sglist), GFP_KERNEL);
+	sglist = vmalloc(sizeof(*sglist));
 	if (!sglist)
 		goto out1;
 
@@ -500,32 +617,45 @@
 
 	if (extra) {
 		unsigned long extra_iova_addr = data->iova_addr + buffer->size;
-		ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
-					  prot);
+		unsigned long phys_addr = sg_phys(sglist);
+		ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
+					extra, SZ_4K, prot);
 		if (ret)
 			goto out2;
 	}
-	kfree(sglist);
+	vfree(sglist);
 	return ret;
 out2:
 	iommu_unmap_range(domain, data->iova_addr, buffer->size);
 
 out1:
-	kfree(sglist);
+	vfree(sglist);
 	msm_free_iova_address(data->iova_addr, domain_num, partition_num,
 						data->mapped_size);
 out:
 	return ret;
 }
 
+void *ion_system_contig_heap_map_kernel(struct ion_heap *heap,
+	struct ion_buffer *buffer)
+{
+	return buffer->priv_virt;
+}
+
+void ion_system_contig_heap_unmap_kernel(struct ion_heap *heap,
+	struct ion_buffer *buffer)
+{
+	return;
+}
+
 static struct ion_heap_ops kmalloc_ops = {
 	.allocate = ion_system_contig_heap_allocate,
 	.free = ion_system_contig_heap_free,
 	.phys = ion_system_contig_heap_phys,
 	.map_dma = ion_system_contig_heap_map_dma,
-	.unmap_dma = ion_system_heap_unmap_dma,
-	.map_kernel = ion_system_heap_map_kernel,
-	.unmap_kernel = ion_system_heap_unmap_kernel,
+	.unmap_dma = ion_system_contig_heap_unmap_dma,
+	.map_kernel = ion_system_contig_heap_map_kernel,
+	.unmap_kernel = ion_system_contig_heap_unmap_kernel,
 	.map_user = ion_system_contig_heap_map_user,
 	.cache_op = ion_system_contig_heap_cache_ops,
 	.print_debug = ion_system_contig_print_debug,
diff --git a/drivers/gpu/ion/msm/ion_cp_common.c b/drivers/gpu/ion/msm/ion_cp_common.c
index 41e0a04..7d54cfa 100644
--- a/drivers/gpu/ion/msm/ion_cp_common.c
+++ b/drivers/gpu/ion/msm/ion_cp_common.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2011 Google, Inc
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -12,12 +12,15 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/memory_alloc.h>
 #include <linux/types.h>
 #include <mach/scm.h>
 
+#include "../ion_priv.h"
 #include "ion_cp_common.h"
 
 #define MEM_PROTECT_LOCK_ID	0x05
+#define MEM_PROTECT_LOCK_ID2 0x0A
 
 struct cp2_mem_chunks {
 	unsigned int *chunk_list;
@@ -25,28 +28,275 @@
 	unsigned int chunk_size;
 } __attribute__ ((__packed__));
 
-struct cp2_lock_req {
+struct cp2_lock2_req {
 	struct cp2_mem_chunks chunks;
 	unsigned int mem_usage;
 	unsigned int lock;
+	unsigned int flags;
 } __attribute__ ((__packed__));
 
+/*  SCM related code for locking down memory for content protection */
+
+#define SCM_CP_LOCK_CMD_ID	0x1
+#define SCM_CP_PROTECT		0x1
+#define SCM_CP_UNPROTECT	0x0
+
+struct cp_lock_msg {
+	unsigned int start;
+	unsigned int end;
+	unsigned int permission_type;
+	unsigned char lock;
+} __attribute__ ((__packed__));
+
+static int ion_cp_protect_mem_v1(unsigned int phy_base, unsigned int size,
+			      unsigned int permission_type)
+{
+	struct cp_lock_msg cmd;
+	cmd.start = phy_base;
+	cmd.end = phy_base + size;
+	cmd.permission_type = permission_type;
+	cmd.lock = SCM_CP_PROTECT;
+
+	return scm_call(SCM_SVC_MP, SCM_CP_LOCK_CMD_ID,
+			&cmd, sizeof(cmd), NULL, 0);
+}
+
+static int ion_cp_unprotect_mem_v1(unsigned int phy_base, unsigned int size,
+				unsigned int permission_type)
+{
+	struct cp_lock_msg cmd;
+	cmd.start = phy_base;
+	cmd.end = phy_base + size;
+	cmd.permission_type = permission_type;
+	cmd.lock = SCM_CP_UNPROTECT;
+
+	return scm_call(SCM_SVC_MP, SCM_CP_LOCK_CMD_ID,
+			&cmd, sizeof(cmd), NULL, 0);
+}
+
+#define V2_CHUNK_SIZE	SZ_1M
+
+static int ion_cp_change_mem_v2(unsigned int phy_base, unsigned int size,
+			      void *data, int lock)
+{
+	enum cp_mem_usage usage = (enum cp_mem_usage) data;
+	unsigned long *chunk_list;
+	int nchunks;
+	int ret;
+	int i;
+
+	if (usage < 0 || usage >= MAX_USAGE)
+		return -EINVAL;
+
+	if (!IS_ALIGNED(size, V2_CHUNK_SIZE)) {
+		pr_err("%s: heap size is not aligned to %x\n",
+			__func__, V2_CHUNK_SIZE);
+		return -EINVAL;
+	}
+
+	nchunks = size / V2_CHUNK_SIZE;
+
+	chunk_list = allocate_contiguous_ebi(sizeof(unsigned long)*nchunks,
+						SZ_4K, 0);
+	if (!chunk_list)
+		return -ENOMEM;
+
+	for (i = 0; i < nchunks; i++)
+		chunk_list[i] = phy_base + i * V2_CHUNK_SIZE;
+
+	ret = ion_cp_change_chunks_state(memory_pool_node_paddr(chunk_list),
+					nchunks, V2_CHUNK_SIZE, usage, lock);
+
+	free_contiguous_memory(chunk_list);
+	return ret;
+}
+
+int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
+			      unsigned int permission_type, int version,
+			      void *data)
+{
+	switch (version) {
+	case ION_CP_V1:
+		return ion_cp_protect_mem_v1(phy_base, size, permission_type);
+	case ION_CP_V2:
+		return ion_cp_change_mem_v2(phy_base, size, data,
+						SCM_CP_PROTECT);
+	default:
+		return -EINVAL;
+	}
+}
+
+int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size,
+			      unsigned int permission_type, int version,
+			      void *data)
+{
+	switch (version) {
+	case ION_CP_V1:
+		return ion_cp_unprotect_mem_v1(phy_base, size, permission_type);
+	case ION_CP_V2:
+		return ion_cp_change_mem_v2(phy_base, size, data,
+						SCM_CP_UNPROTECT);
+	default:
+		return -EINVAL;
+	}
+}
+
 int ion_cp_change_chunks_state(unsigned long chunks, unsigned int nchunks,
 				unsigned int chunk_size,
 				enum cp_mem_usage usage,
 				int lock)
 {
-	struct cp2_lock_req request;
+	struct cp2_lock2_req request;
+	u32 resp;
 
 	request.mem_usage = usage;
 	request.lock = lock;
+	request.flags = 0;
 
 	request.chunks.chunk_list = (unsigned int *)chunks;
 	request.chunks.chunk_list_size = nchunks;
 	request.chunks.chunk_size = chunk_size;
 
-	return scm_call(SCM_SVC_CP, MEM_PROTECT_LOCK_ID,
-			&request, sizeof(request), NULL, 0);
+	return scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2,
+			&request, sizeof(request), &resp, sizeof(resp));
 
 }
 
+/* Must be protected by ion_cp_buffer lock */
+static int __ion_cp_protect_buffer(struct ion_buffer *buffer, int version,
+					void *data, int flags)
+{
+	struct ion_cp_buffer *buf = buffer->priv_virt;
+	int ret_value = 0;
+
+	if (atomic_inc_return(&buf->secure_cnt) == 1) {
+		ret_value = ion_cp_protect_mem(buf->buffer,
+				buffer->size, 0,
+				version, data);
+
+		if (ret_value) {
+			pr_err("Failed to secure buffer %p, error %d\n",
+				buffer, ret_value);
+			atomic_dec(&buf->secure_cnt);
+		} else {
+			pr_debug("Protected buffer %p from %pa (size %x)\n",
+				buffer, &buf->buffer,
+				buffer->size);
+			buf->want_delayed_unsecure |=
+				flags & ION_UNSECURE_DELAYED ? 1 : 0;
+			buf->data = data;
+			buf->version = version;
+		}
+	}
+	pr_debug("buffer %p protect count %d\n", buffer,
+		atomic_read(&buf->secure_cnt));
+	BUG_ON(atomic_read(&buf->secure_cnt) < 0);
+	return ret_value;
+}
+
+/* Must be protected by ion_cp_buffer lock */
+static int __ion_cp_unprotect_buffer(struct ion_buffer *buffer, int version,
+					void *data, int force_unsecure)
+{
+	struct ion_cp_buffer *buf = buffer->priv_virt;
+	int ret_value = 0;
+
+	if (force_unsecure) {
+		if (!buf->is_secure || atomic_read(&buf->secure_cnt) == 0)
+			return 0;
+
+		if (atomic_read(&buf->secure_cnt) != 1) {
+			WARN(1, "Forcing unsecure of buffer with outstanding secure count %d!\n",
+				atomic_read(&buf->secure_cnt));
+			atomic_set(&buf->secure_cnt, 1);
+		}
+	}
+
+	if (atomic_dec_and_test(&buf->secure_cnt)) {
+		ret_value = ion_cp_unprotect_mem(
+			buf->buffer, buffer->size,
+			0, version, data);
+
+		if (ret_value) {
+			pr_err("Failed to unsecure buffer %p, error %d\n",
+				buffer, ret_value);
+			/*
+			 * If the force unsecure is happening, the buffer
+			 * is being destroyed. We failed to unsecure the
+			 * buffer even though the memory is given back.
+			 * Just die now rather than discovering later what
+			 * happens when trying to use the secured memory as
+			 * unsecured...
+			 */
+			BUG_ON(force_unsecure);
+			/* Bump the count back up one to try again later */
+			atomic_inc(&buf->secure_cnt);
+		} else {
+			buf->version = -1;
+			buf->data = NULL;
+		}
+	}
+	pr_debug("buffer %p unprotect count %d\n", buffer,
+		atomic_read(&buf->secure_cnt));
+	BUG_ON(atomic_read(&buf->secure_cnt) < 0);
+	return ret_value;
+}
+
+int ion_cp_secure_buffer(struct ion_buffer *buffer, int version, void *data,
+				int flags)
+{
+	int ret_value;
+	struct ion_cp_buffer *buf = buffer->priv_virt;
+
+	mutex_lock(&buf->lock);
+	if (!buf->is_secure) {
+		pr_err("%s: buffer %p was not allocated as secure\n",
+			__func__, buffer);
+		ret_value = -EINVAL;
+		goto out_unlock;
+	}
+
+	if (ION_IS_CACHED(buffer->flags)) {
+		pr_err("%s: buffer %p was allocated as cached\n",
+			__func__, buffer);
+		ret_value = -EINVAL;
+		goto out_unlock;
+	}
+
+	if (atomic_read(&buf->map_cnt)) {
+		pr_err("%s: cannot secure buffer %p with outstanding mappings. Total count: %d",
+			__func__, buffer, atomic_read(&buf->map_cnt));
+		ret_value = -EINVAL;
+		goto out_unlock;
+	}
+
+	if (atomic_read(&buf->secure_cnt)) {
+		if (buf->version != version || buf->data != data) {
+			pr_err("%s: Trying to re-secure buffer with different values",
+				__func__);
+			pr_err("Last secured version: %d Currrent %d\n",
+				buf->version, version);
+			pr_err("Last secured data: %p current %p\n",
+				buf->data, data);
+			ret_value = -EINVAL;
+			goto out_unlock;
+		}
+	}
+	ret_value = __ion_cp_protect_buffer(buffer, version, data, flags);
+
+out_unlock:
+	mutex_unlock(&buf->lock);
+	return ret_value;
+}
+
+int ion_cp_unsecure_buffer(struct ion_buffer *buffer, int force_unsecure)
+{
+	int ret_value = 0;
+	struct ion_cp_buffer *buf = buffer->priv_virt;
+
+	mutex_lock(&buf->lock);
+	ret_value = __ion_cp_unprotect_buffer(buffer, buf->version, buf->data,
+						force_unsecure);
+	mutex_unlock(&buf->lock);
+	return ret_value;
+}
diff --git a/drivers/gpu/ion/msm/ion_cp_common.h b/drivers/gpu/ion/msm/ion_cp_common.h
index eec66e6..8ae19be 100644
--- a/drivers/gpu/ion/msm/ion_cp_common.h
+++ b/drivers/gpu/ion/msm/ion_cp_common.h
@@ -20,6 +20,26 @@
 #define ION_CP_V1	1
 #define ION_CP_V2	2
 
+struct ion_cp_buffer {
+	phys_addr_t buffer;
+	atomic_t secure_cnt;
+	int is_secure;
+	int want_delayed_unsecure;
+	/*
+	 * Currently all user/kernel mapping is protected by the heap lock.
+	 * This is sufficient to protect the map count as well. The lock
+	 * should be used to protect map_cnt if the whole heap lock is
+	 * ever removed.
+	 */
+	atomic_t map_cnt;
+	/*
+	 * protects secure_cnt for securing.
+	 */
+	struct mutex lock;
+	int version;
+	void *data;
+};
+
 #if defined(CONFIG_ION_MSM)
 /*
  * ion_cp2_protect_mem - secures memory via trustzone
@@ -37,6 +57,18 @@
 			unsigned int chunk_size, enum cp_mem_usage usage,
 			int lock);
 
+int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
+			unsigned int permission_type, int version,
+			void *data);
+
+int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size,
+				unsigned int permission_type, int version,
+				void *data);
+
+int ion_cp_secure_buffer(struct ion_buffer *buffer, int version, void *data,
+				int flags);
+
+int ion_cp_unsecure_buffer(struct ion_buffer *buffer, int force_unsecure);
 #else
 static inline int ion_cp_change_chunks_state(unsigned long chunks,
 			unsigned int nchunks, unsigned int chunk_size,
@@ -44,6 +76,32 @@
 {
 	return -ENODEV;
 }
+
+static inline int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
+			unsigned int permission_type, int version,
+			void *data)
+{
+	return -ENODEV;
+}
+
+static inline int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size,
+				unsigned int permission_type, int version,
+				void *data)
+{
+	return -ENODEV;
+}
+
+static inline int ion_cp_secure_buffer(struct ion_buffer *buffer, int version,
+				void *data, int flags)
+{
+	return -ENODEV;
+}
+
+static inline int ion_cp_unsecure_buffer(struct ion_buffer *buffer,
+				int force_unsecure)
+{
+	return -ENODEV;
+}
 #endif
 
 #endif
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index ab5d09b..4b55875 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,11 +19,13 @@
 #include <linux/memory_alloc.h>
 #include <linux/fmem.h>
 #include <linux/of.h>
+#include <linux/of_platform.h>
 #include <linux/mm.h>
 #include <linux/mm_types.h>
 #include <linux/sched.h>
 #include <linux/rwsem.h>
 #include <linux/uaccess.h>
+#include <linux/memblock.h>
 #include <mach/ion.h>
 #include <mach/msm_memtypes.h>
 #include "../ion_priv.h"
@@ -44,6 +46,7 @@
 };
 
 
+#ifdef CONFIG_OF
 static struct ion_heap_desc ion_heap_meta[] = {
 	{
 		.id	= ION_SYSTEM_HEAP_ID,
@@ -51,8 +54,13 @@
 		.name	= ION_VMALLOC_HEAP_NAME,
 	},
 	{
+		.id	= ION_SYSTEM_CONTIG_HEAP_ID,
+		.type	= ION_HEAP_TYPE_SYSTEM_CONTIG,
+		.name	= ION_KMALLOC_HEAP_NAME,
+	},
+	{
 		.id	= ION_CP_MM_HEAP_ID,
-		.type	= ION_HEAP_TYPE_CP,
+		.type	= ION_HEAP_TYPE_SECURE_DMA,
 		.name	= ION_MM_HEAP_NAME,
 		.permission_type = IPT_TYPE_MM_CARVEOUT,
 	},
@@ -88,6 +96,16 @@
 		.name	= ION_AUDIO_HEAP_NAME,
 	},
 	{
+		.id	= ION_PIL1_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CARVEOUT,
+		.name	= ION_PIL1_HEAP_NAME,
+	},
+	{
+		.id	= ION_PIL2_HEAP_ID,
+		.type	= ION_HEAP_TYPE_CARVEOUT,
+		.name	= ION_PIL2_HEAP_NAME,
+	},
+	{
 		.id	= ION_CP_WB_HEAP_ID,
 		.type	= ION_HEAP_TYPE_CP,
 		.name	= ION_WB_HEAP_NAME,
@@ -97,7 +115,13 @@
 		.type	= ION_HEAP_TYPE_CARVEOUT,
 		.name	= ION_CAMERA_HEAP_NAME,
 	},
+	{
+		.id	= ION_ADSP_HEAP_ID,
+		.type	= ION_HEAP_TYPE_DMA,
+		.name	= ION_ADSP_HEAP_NAME,
+	}
 };
+#endif
 
 struct ion_client *msm_ion_client_create(unsigned int heap_mask,
 					const char *name)
@@ -130,6 +154,22 @@
 }
 EXPORT_SYMBOL(msm_ion_unsecure_heap_2_0);
 
+int msm_ion_secure_buffer(struct ion_client *client, struct ion_handle *handle,
+				enum cp_mem_usage usage,
+				int flags)
+{
+	return ion_secure_handle(client, handle, ION_CP_V2,
+				(void *)usage, flags);
+}
+EXPORT_SYMBOL(msm_ion_secure_buffer);
+
+int msm_ion_unsecure_buffer(struct ion_client *client,
+				struct ion_handle *handle)
+{
+	return ion_unsecure_handle(client, handle);
+}
+EXPORT_SYMBOL(msm_ion_unsecure_buffer);
+
 int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
 			void *vaddr, unsigned long len, unsigned int cmd)
 {
@@ -137,7 +177,7 @@
 }
 EXPORT_SYMBOL(msm_ion_do_cache_op);
 
-static unsigned long msm_ion_get_base(unsigned long size, int memory_type,
+static ion_phys_addr_t msm_ion_get_base(unsigned long size, int memory_type,
 				    unsigned int align)
 {
 	switch (memory_type) {
@@ -305,10 +345,10 @@
 static int is_heap_overlapping(const struct ion_platform_heap *heap1,
 				const struct ion_platform_heap *heap2)
 {
-	unsigned long heap1_base = heap1->base;
-	unsigned long heap2_base = heap2->base;
-	unsigned long heap1_end = heap1->base + heap1->size - 1;
-	unsigned long heap2_end = heap2->base + heap2->size - 1;
+	ion_phys_addr_t heap1_base = heap1->base;
+	ion_phys_addr_t heap2_base = heap2->base;
+	ion_phys_addr_t heap1_end = heap1->base + heap1->size - 1;
+	ion_phys_addr_t heap2_end = heap2->base + heap2->size - 1;
 
 	if (heap1_base == heap2_base)
 		return 1;
@@ -341,6 +381,7 @@
 	}
 }
 
+#ifdef CONFIG_OF
 static int msm_init_extra_data(struct ion_platform_heap *heap,
 			       const struct ion_heap_desc *heap_desc)
 {
@@ -397,6 +438,7 @@
 	unsigned int i;
 	for (i = 0; i < pdata->nr; ++i)
 		kfree(pdata->heaps[i].extra_data);
+	kfree(pdata->heaps);
 	kfree(pdata);
 }
 
@@ -442,6 +484,7 @@
 {
 	unsigned int val;
 	int ret = 0;
+	u32 out_values[2];
 	const char *memory_name_prop;
 
 	ret = of_property_read_u32(node, "qcom,memory-reservation-size", &val);
@@ -465,12 +508,29 @@
 			ret = -EINVAL;
 		}
 	} else {
-		ret = 0;
+		ret = of_property_read_u32_array(node, "qcom,memory-fixed",
+								out_values, 2);
+		if (!ret)
+			heap->size = out_values[1];
+		else
+			ret = 0;
 	}
 out:
 	return ret;
 }
 
+static void msm_ion_get_heap_base(struct device_node *node,
+				 struct ion_platform_heap *heap)
+{
+	u32 out_values[2];
+	int ret = 0;
+
+	ret = of_property_read_u32_array(node, "qcom,memory-fixed",
+							out_values, 2);
+	if (!ret)
+		heap->base = out_values[0];
+	return;
+}
 
 static void msm_ion_get_heap_adjacent(struct device_node *node,
 				      struct ion_platform_heap *heap)
@@ -504,11 +564,13 @@
 	}
 }
 
-static struct ion_platform_data *msm_ion_parse_dt(
-					const struct device_node *dt_node)
+static struct ion_platform_data *msm_ion_parse_dt(struct platform_device *pdev)
 {
 	struct ion_platform_data *pdata = 0;
+	struct ion_platform_heap *heaps = NULL;
 	struct device_node *node;
+	struct platform_device *new_dev = NULL;
+	const struct device_node *dt_node = pdev->dev.of_node;
 	uint32_t val = 0;
 	int ret = 0;
 	uint32_t num_heaps = 0;
@@ -520,14 +582,27 @@
 	if (!num_heaps)
 		return ERR_PTR(-EINVAL);
 
-	pdata = kzalloc(sizeof(struct ion_platform_data) +
-			num_heaps*sizeof(struct ion_platform_heap), GFP_KERNEL);
+	pdata = kzalloc(sizeof(struct ion_platform_data), GFP_KERNEL);
 	if (!pdata)
 		return ERR_PTR(-ENOMEM);
 
+	heaps = kzalloc(sizeof(struct ion_platform_heap)*num_heaps, GFP_KERNEL);
+	if (!heaps) {
+		kfree(pdata);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pdata->heaps = heaps;
 	pdata->nr = num_heaps;
 
 	for_each_child_of_node(dt_node, node) {
+		new_dev = of_platform_device_create(node, NULL, &pdev->dev);
+		if (!new_dev) {
+			pr_err("Failed to create device %s\n", node->name);
+			goto free_heaps;
+		}
+
+		pdata->heaps[idx].priv = &new_dev->dev;
 		/**
 		 * TODO: Replace this with of_get_address() when this patch
 		 * gets merged: http://
@@ -544,6 +619,7 @@
 		if (ret)
 			goto free_heaps;
 
+		msm_ion_get_heap_base(node, &pdata->heaps[idx]);
 		msm_ion_get_heap_align(node, &pdata->heaps[idx]);
 
 		ret = msm_ion_get_heap_size(node, &pdata->heaps[idx]);
@@ -560,6 +636,17 @@
 	free_pdata(pdata);
 	return ERR_PTR(ret);
 }
+#else
+static struct ion_platform_data *msm_ion_parse_dt(struct platform_device *pdev)
+{
+	return NULL;
+}
+
+static void free_pdata(const struct ion_platform_data *pdata)
+{
+
+}
+#endif
 
 static int check_vaddr_bounds(unsigned long start, unsigned long end)
 {
@@ -570,22 +657,36 @@
 	if (end < start)
 		goto out;
 
-	down_read(&mm->mmap_sem);
 	vma = find_vma(mm, start);
 	if (vma && vma->vm_start < end) {
 		if (start < vma->vm_start)
-			goto out_up;
+			goto out;
 		if (end > vma->vm_end)
-			goto out_up;
+			goto out;
 		ret = 0;
 	}
 
-out_up:
-	up_read(&mm->mmap_sem);
 out:
 	return ret;
 }
 
+int ion_heap_allow_secure_allocation(enum ion_heap_type type)
+{
+	return type == ((enum ion_heap_type) ION_HEAP_TYPE_CP) ||
+		type == ((enum ion_heap_type) ION_HEAP_TYPE_SECURE_DMA);
+}
+
+int ion_heap_allow_handle_secure(enum ion_heap_type type)
+{
+	return type == ((enum ion_heap_type) ION_HEAP_TYPE_CP) ||
+		type == ((enum ion_heap_type) ION_HEAP_TYPE_SECURE_DMA);
+}
+
+int ion_heap_allow_heap_secure(enum ion_heap_type type)
+{
+	return type == ((enum ion_heap_type) ION_HEAP_TYPE_CP);
+}
+
 static long msm_ion_custom_ioctl(struct ion_client *client,
 				unsigned int cmd,
 				unsigned long arg)
@@ -599,20 +700,12 @@
 		unsigned long start, end;
 		struct ion_handle *handle = NULL;
 		int ret;
+		struct mm_struct *mm = current->active_mm;
 
 		if (copy_from_user(&data, (void __user *)arg,
 					sizeof(struct ion_flush_data)))
 			return -EFAULT;
 
-		start = (unsigned long) data.vaddr;
-		end = (unsigned long) data.vaddr + data.length;
-
-		if (check_vaddr_bounds(start, end)) {
-			pr_err("%s: virtual address %p is out of bounds\n",
-				__func__, data.vaddr);
-			return -EINVAL;
-		}
-
 		if (!data.handle) {
 			handle = ion_import_dma_buf(client, data.fd);
 			if (IS_ERR(handle)) {
@@ -622,11 +715,27 @@
 			}
 		}
 
+		down_read(&mm->mmap_sem);
+
+		start = (unsigned long) data.vaddr;
+		end = (unsigned long) data.vaddr + data.length;
+
+		if (start && check_vaddr_bounds(start, end)) {
+			up_read(&mm->mmap_sem);
+			pr_err("%s: virtual address %p is out of bounds\n",
+				__func__, data.vaddr);
+			if (!data.handle)
+				ion_free(client, handle);
+			return -EINVAL;
+		}
+
 		ret = ion_do_cache_op(client,
 				data.handle ? data.handle : handle,
 				data.vaddr, data.offset, data.length,
 				cmd);
 
+		up_read(&mm->mmap_sem);
+
 		if (!data.handle)
 			ion_free(client, handle);
 
@@ -635,28 +744,74 @@
 		break;
 
 	}
-	case ION_IOC_GET_FLAGS:
-	{
-		struct ion_flag_data data;
-		int ret;
-		if (copy_from_user(&data, (void __user *)arg,
-					sizeof(struct ion_flag_data)))
-			return -EFAULT;
-
-		ret = ion_handle_get_flags(client, data.handle, &data.flags);
-		if (ret < 0)
-			return ret;
-		if (copy_to_user((void __user *)arg, &data,
-					sizeof(struct ion_flag_data)))
-			return -EFAULT;
-		break;
-	}
 	default:
 		return -ENOTTY;
 	}
 	return 0;
 }
 
+static struct ion_heap *msm_ion_heap_create(struct ion_platform_heap *heap_data)
+{
+	struct ion_heap *heap = NULL;
+
+	switch ((int)heap_data->type) {
+	case ION_HEAP_TYPE_IOMMU:
+		heap = ion_iommu_heap_create(heap_data);
+		break;
+	case ION_HEAP_TYPE_CP:
+		heap = ion_cp_heap_create(heap_data);
+		break;
+#ifdef CONFIG_CMA
+	case ION_HEAP_TYPE_DMA:
+		heap = ion_cma_heap_create(heap_data);
+		break;
+
+	case ION_HEAP_TYPE_SECURE_DMA:
+		heap = ion_secure_cma_heap_create(heap_data);
+		break;
+#endif
+	default:
+		heap = ion_heap_create(heap_data);
+	}
+
+	if (IS_ERR_OR_NULL(heap)) {
+		pr_err("%s: error creating heap %s type %d base %pa size %u\n",
+		       __func__, heap_data->name, heap_data->type,
+		       &heap_data->base, heap_data->size);
+		return ERR_PTR(-EINVAL);
+	}
+
+	heap->name = heap_data->name;
+	heap->id = heap_data->id;
+	heap->priv = heap_data->priv;
+	return heap;
+}
+
+static void msm_ion_heap_destroy(struct ion_heap *heap)
+{
+	if (!heap)
+		return;
+
+	switch ((int)heap->type) {
+	case ION_HEAP_TYPE_IOMMU:
+		ion_iommu_heap_destroy(heap);
+		break;
+	case ION_HEAP_TYPE_CP:
+		ion_cp_heap_destroy(heap);
+		break;
+#ifdef CONFIG_CMA
+	case ION_HEAP_TYPE_DMA:
+		ion_cma_heap_destroy(heap);
+		break;
+	case ION_HEAP_TYPE_SECURE_DMA:
+		ion_secure_cma_heap_destroy(heap);
+		break;
+#endif
+	default:
+		ion_heap_destroy(heap);
+	}
+}
+
 static int msm_ion_probe(struct platform_device *pdev)
 {
 	struct ion_platform_data *pdata;
@@ -664,7 +819,7 @@
 	int err = -1;
 	int i;
 	if (pdev->dev.of_node) {
-		pdata = msm_ion_parse_dt(pdev->dev.of_node);
+		pdata = msm_ion_parse_dt(pdev);
 		if (IS_ERR(pdata)) {
 			err = PTR_ERR(pdata);
 			goto out;
@@ -698,15 +853,15 @@
 		msm_ion_allocate(heap_data);
 
 		heap_data->has_outer_cache = pdata->has_outer_cache;
-		heaps[i] = ion_heap_create(heap_data);
+		heaps[i] = msm_ion_heap_create(heap_data);
 		if (IS_ERR_OR_NULL(heaps[i])) {
 			heaps[i] = 0;
 			continue;
 		} else {
 			if (heap_data->size)
-				pr_info("ION heap %s created at %lx "
+				pr_info("ION heap %s created at %pa "
 					"with size %x\n", heap_data->name,
-							  heap_data->base,
+							  &heap_data->base,
 							  heap_data->size);
 			else
 				pr_info("ION heap %s created\n",
@@ -715,10 +870,10 @@
 
 		ion_device_add_heap(idev, heaps[i]);
 	}
+	check_for_heap_overlap(pdata->heaps, num_heaps);
 	if (pdata_needs_to_be_freed)
 		free_pdata(pdata);
 
-	check_for_heap_overlap(pdata->heaps, num_heaps);
 	platform_set_drvdata(pdev, idev);
 	return 0;
 
@@ -736,7 +891,7 @@
 	int i;
 
 	for (i = 0; i < num_heaps; i++)
-		ion_heap_destroy(heaps[i]);
+		msm_ion_heap_destroy(heaps[i]);
 
 	ion_device_destroy(idev);
 	kfree(heaps);
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
index fec5363..3441afa 100644
--- a/drivers/gpu/msm/Makefile
+++ b/drivers/gpu/msm/Makefile
@@ -1,4 +1,4 @@
-ccflags-y := -Iinclude/drm -Idrivers/gpu/msm
+ccflags-y := -Iinclude/uapi/drm -Iinclude/drm -Idrivers/gpu/msm
 
 msm_kgsl_core-y = \
 	kgsl.o \
diff --git a/drivers/gpu/msm/a3xx_reg.h b/drivers/gpu/msm/a3xx_reg.h
index be9f3ac..a2f0e60 100644
--- a/drivers/gpu/msm/a3xx_reg.h
+++ b/drivers/gpu/msm/a3xx_reg.h
@@ -66,15 +66,103 @@
 #define A3XX_RBBM_INT_0_MASK 0x063
 #define A3XX_RBBM_INT_0_STATUS 0x064
 #define A3XX_RBBM_PERFCTR_CTL 0x80
+#define A3XX_RBBM_PERFCTR_LOAD_CMD0 0x81
+#define A3XX_RBBM_PERFCTR_LOAD_CMD1 0x82
+#define A3XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x84
+#define A3XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x85
+#define A3XX_RBBM_PERFCOUNTER0_SELECT 0x86
+#define A3XX_RBBM_PERFCOUNTER1_SELECT 0x87
 #define A3XX_RBBM_GPU_BUSY_MASKED 0x88
+#define A3XX_RBBM_PERFCTR_CP_0_LO 0x90
+#define A3XX_RBBM_PERFCTR_CP_0_HI 0x91
+#define A3XX_RBBM_PERFCTR_RBBM_0_LO 0x92
+#define A3XX_RBBM_PERFCTR_RBBM_0_HI 0x93
+#define A3XX_RBBM_PERFCTR_RBBM_1_LO 0x94
+#define A3XX_RBBM_PERFCTR_RBBM_1_HI 0x95
+#define A3XX_RBBM_PERFCTR_PC_0_LO 0x96
+#define A3XX_RBBM_PERFCTR_PC_0_HI 0x97
+#define A3XX_RBBM_PERFCTR_PC_1_LO 0x98
+#define A3XX_RBBM_PERFCTR_PC_1_HI 0x99
+#define A3XX_RBBM_PERFCTR_PC_2_LO 0x9A
+#define A3XX_RBBM_PERFCTR_PC_2_HI 0x9B
+#define A3XX_RBBM_PERFCTR_PC_3_LO 0x9C
+#define A3XX_RBBM_PERFCTR_PC_3_HI 0x9D
+#define A3XX_RBBM_PERFCTR_VFD_0_LO 0x9E
+#define A3XX_RBBM_PERFCTR_VFD_0_HI 0x9F
+#define A3XX_RBBM_PERFCTR_VFD_1_LO 0xA0
+#define A3XX_RBBM_PERFCTR_VFD_1_HI 0xA1
+#define A3XX_RBBM_PERFCTR_HLSQ_0_LO 0xA2
+#define A3XX_RBBM_PERFCTR_HLSQ_0_HI 0xA3
+#define A3XX_RBBM_PERFCTR_HLSQ_1_LO 0xA4
+#define A3XX_RBBM_PERFCTR_HLSQ_1_HI 0xA5
+#define A3XX_RBBM_PERFCTR_HLSQ_2_LO 0xA6
+#define A3XX_RBBM_PERFCTR_HLSQ_2_HI 0xA7
+#define A3XX_RBBM_PERFCTR_HLSQ_3_LO 0xA8
+#define A3XX_RBBM_PERFCTR_HLSQ_3_HI 0xA9
+#define A3XX_RBBM_PERFCTR_HLSQ_4_LO 0xAA
+#define A3XX_RBBM_PERFCTR_HLSQ_4_HI 0xAB
+#define A3XX_RBBM_PERFCTR_HLSQ_5_LO 0xAC
+#define A3XX_RBBM_PERFCTR_HLSQ_5_HI 0xAD
+#define A3XX_RBBM_PERFCTR_VPC_0_LO 0xAE
+#define A3XX_RBBM_PERFCTR_VPC_0_HI 0xAF
+#define A3XX_RBBM_PERFCTR_VPC_1_LO 0xB0
+#define A3XX_RBBM_PERFCTR_VPC_1_HI 0xB1
+#define A3XX_RBBM_PERFCTR_TSE_0_LO 0xB2
+#define A3XX_RBBM_PERFCTR_TSE_0_HI 0xB3
+#define A3XX_RBBM_PERFCTR_TSE_1_LO 0xB4
+#define A3XX_RBBM_PERFCTR_TSE_1_HI 0xB5
+#define A3XX_RBBM_PERFCTR_RAS_0_LO 0xB6
+#define A3XX_RBBM_PERFCTR_RAS_0_HI 0xB7
+#define A3XX_RBBM_PERFCTR_RAS_1_LO 0xB8
+#define A3XX_RBBM_PERFCTR_RAS_1_HI 0xB9
+#define A3XX_RBBM_PERFCTR_UCHE_0_LO 0xBA
+#define A3XX_RBBM_PERFCTR_UCHE_0_HI 0xBB
+#define A3XX_RBBM_PERFCTR_UCHE_1_LO 0xBC
+#define A3XX_RBBM_PERFCTR_UCHE_1_HI 0xBD
+#define A3XX_RBBM_PERFCTR_UCHE_2_LO 0xBE
+#define A3XX_RBBM_PERFCTR_UCHE_2_HI 0xBF
+#define A3XX_RBBM_PERFCTR_UCHE_3_LO 0xC0
+#define A3XX_RBBM_PERFCTR_UCHE_3_HI 0xC1
+#define A3XX_RBBM_PERFCTR_UCHE_4_LO 0xC2
+#define A3XX_RBBM_PERFCTR_UCHE_4_HI 0xC3
+#define A3XX_RBBM_PERFCTR_UCHE_5_LO 0xC4
+#define A3XX_RBBM_PERFCTR_UCHE_5_HI 0xC5
+#define A3XX_RBBM_PERFCTR_TP_0_LO 0xC6
+#define A3XX_RBBM_PERFCTR_TP_0_HI 0xC7
+#define A3XX_RBBM_PERFCTR_TP_1_LO 0xC8
+#define A3XX_RBBM_PERFCTR_TP_1_HI 0xC9
+#define A3XX_RBBM_PERFCTR_TP_2_LO 0xCA
+#define A3XX_RBBM_PERFCTR_TP_2_HI 0xCB
+#define A3XX_RBBM_PERFCTR_TP_3_LO 0xCC
+#define A3XX_RBBM_PERFCTR_TP_3_HI 0xCD
+#define A3XX_RBBM_PERFCTR_TP_4_LO 0xCE
+#define A3XX_RBBM_PERFCTR_TP_4_HI 0xCF
+#define A3XX_RBBM_PERFCTR_TP_5_LO 0xD0
+#define A3XX_RBBM_PERFCTR_TP_5_HI 0xD1
+#define A3XX_RBBM_PERFCTR_SP_0_LO 0xD2
+#define A3XX_RBBM_PERFCTR_SP_0_HI 0xD3
+#define A3XX_RBBM_PERFCTR_SP_1_LO 0xD4
+#define A3XX_RBBM_PERFCTR_SP_1_HI 0xD5
+#define A3XX_RBBM_PERFCTR_SP_2_LO 0xD6
+#define A3XX_RBBM_PERFCTR_SP_2_HI 0xD7
+#define A3XX_RBBM_PERFCTR_SP_3_LO 0xD8
+#define A3XX_RBBM_PERFCTR_SP_3_HI 0xD9
+#define A3XX_RBBM_PERFCTR_SP_4_LO 0xDA
+#define A3XX_RBBM_PERFCTR_SP_4_HI 0xDB
 #define A3XX_RBBM_PERFCTR_SP_5_LO 0xDC
 #define A3XX_RBBM_PERFCTR_SP_5_HI 0xDD
 #define A3XX_RBBM_PERFCTR_SP_6_LO 0xDE
 #define A3XX_RBBM_PERFCTR_SP_6_HI 0xDF
 #define A3XX_RBBM_PERFCTR_SP_7_LO 0xE0
 #define A3XX_RBBM_PERFCTR_SP_7_HI 0xE1
+#define A3XX_RBBM_PERFCTR_RB_0_LO 0xE2
+#define A3XX_RBBM_PERFCTR_RB_0_HI 0xE3
+#define A3XX_RBBM_PERFCTR_RB_1_LO 0xE4
+#define A3XX_RBBM_PERFCTR_RB_1_HI 0xE5
+
 #define A3XX_RBBM_RBBM_CTL 0x100
-#define A3XX_RBBM_RBBM_CTL 0x100
+#define A3XX_RBBM_PERFCTR_PWR_0_LO 0x0EA
+#define A3XX_RBBM_PERFCTR_PWR_0_HI 0x0EB
 #define A3XX_RBBM_PERFCTR_PWR_1_LO 0x0EC
 #define A3XX_RBBM_PERFCTR_PWR_1_HI 0x0ED
 #define A3XX_RBBM_DEBUG_BUS_CTL             0x111
@@ -90,6 +178,7 @@
 #define A3XX_CP_MERCIU_DATA2 0x1D3
 #define A3XX_CP_MEQ_ADDR 0x1DA
 #define A3XX_CP_MEQ_DATA 0x1DB
+#define A3XX_CP_PERFCOUNTER_SELECT 0x445
 #define A3XX_CP_HW_FAULT  0x45C
 #define A3XX_CP_AHB_FAULT 0x54D
 #define A3XX_CP_PROTECT_CTRL 0x45E
@@ -138,6 +227,14 @@
 #define A3XX_VSC_PIPE_CONFIG_7 0xC1B
 #define A3XX_VSC_PIPE_DATA_ADDRESS_7 0xC1C
 #define A3XX_VSC_PIPE_DATA_LENGTH_7 0xC1D
+#define A3XX_PC_PERFCOUNTER0_SELECT 0xC48
+#define A3XX_PC_PERFCOUNTER1_SELECT 0xC49
+#define A3XX_PC_PERFCOUNTER2_SELECT 0xC4A
+#define A3XX_PC_PERFCOUNTER3_SELECT 0xC4B
+#define A3XX_GRAS_PERFCOUNTER0_SELECT 0xC88
+#define A3XX_GRAS_PERFCOUNTER1_SELECT 0xC89
+#define A3XX_GRAS_PERFCOUNTER2_SELECT 0xC8A
+#define A3XX_GRAS_PERFCOUNTER3_SELECT 0xC8B
 #define A3XX_GRAS_CL_USER_PLANE_X0 0xCA0
 #define A3XX_GRAS_CL_USER_PLANE_Y0 0xCA1
 #define A3XX_GRAS_CL_USER_PLANE_Z0 0xCA2
@@ -163,14 +260,42 @@
 #define A3XX_GRAS_CL_USER_PLANE_Z5 0xCB6
 #define A3XX_GRAS_CL_USER_PLANE_W5 0xCB7
 #define A3XX_RB_GMEM_BASE_ADDR 0xCC0
+#define A3XX_RB_PERFCOUNTER0_SELECT   0xCC6
+#define A3XX_RB_PERFCOUNTER1_SELECT   0xCC7
+#define A3XX_HLSQ_PERFCOUNTER0_SELECT 0xE00
+#define A3XX_HLSQ_PERFCOUNTER1_SELECT 0xE01
+#define A3XX_HLSQ_PERFCOUNTER2_SELECT 0xE02
+#define A3XX_HLSQ_PERFCOUNTER3_SELECT 0xE03
+#define A3XX_HLSQ_PERFCOUNTER4_SELECT 0xE04
+#define A3XX_HLSQ_PERFCOUNTER5_SELECT 0xE05
 #define A3XX_VFD_PERFCOUNTER0_SELECT 0xE44
+#define A3XX_VFD_PERFCOUNTER1_SELECT 0xE45
 #define A3XX_VPC_VPC_DEBUG_RAM_SEL 0xE61
 #define A3XX_VPC_VPC_DEBUG_RAM_READ 0xE62
+#define A3XX_VPC_PERFCOUNTER0_SELECT 0xE64
+#define A3XX_VPC_PERFCOUNTER1_SELECT 0xE65
 #define A3XX_UCHE_CACHE_MODE_CONTROL_REG 0xE82
+#define A3XX_UCHE_PERFCOUNTER0_SELECT 0xE84
+#define A3XX_UCHE_PERFCOUNTER1_SELECT 0xE85
+#define A3XX_UCHE_PERFCOUNTER2_SELECT 0xE86
+#define A3XX_UCHE_PERFCOUNTER3_SELECT 0xE87
+#define A3XX_UCHE_PERFCOUNTER4_SELECT 0xE88
+#define A3XX_UCHE_PERFCOUNTER5_SELECT 0xE89
 #define A3XX_UCHE_CACHE_INVALIDATE0_REG 0xEA0
+#define A3XX_SP_PERFCOUNTER0_SELECT 0xEC4
+#define A3XX_SP_PERFCOUNTER1_SELECT 0xEC5
+#define A3XX_SP_PERFCOUNTER2_SELECT 0xEC6
+#define A3XX_SP_PERFCOUNTER3_SELECT 0xEC7
+#define A3XX_SP_PERFCOUNTER4_SELECT 0xEC8
 #define A3XX_SP_PERFCOUNTER5_SELECT 0xEC9
 #define A3XX_SP_PERFCOUNTER6_SELECT 0xECA
 #define A3XX_SP_PERFCOUNTER7_SELECT 0xECB
+#define A3XX_TP_PERFCOUNTER0_SELECT 0xF04
+#define A3XX_TP_PERFCOUNTER1_SELECT 0xF05
+#define A3XX_TP_PERFCOUNTER2_SELECT 0xF06
+#define A3XX_TP_PERFCOUNTER3_SELECT 0xF07
+#define A3XX_TP_PERFCOUNTER4_SELECT 0xF08
+#define A3XX_TP_PERFCOUNTER5_SELECT 0xF09
 #define A3XX_GRAS_CL_CLIP_CNTL 0x2040
 #define A3XX_GRAS_CL_GB_CLIP_ADJ 0x2044
 #define A3XX_GRAS_CL_VPORT_XOFFSET 0x2048
@@ -232,12 +357,14 @@
 #define A3XX_SP_VS_OUT_REG_7 0x22CE
 #define A3XX_SP_VS_VPC_DST_REG_0 0x22D0
 #define A3XX_SP_VS_OBJ_OFFSET_REG 0x22D4
+#define A3XX_SP_VS_OBJ_START_REG 0x22D5
 #define A3XX_SP_VS_PVT_MEM_ADDR_REG 0x22D7
 #define A3XX_SP_VS_PVT_MEM_SIZE_REG 0x22D8
 #define A3XX_SP_VS_LENGTH_REG 0x22DF
 #define A3XX_SP_FS_CTRL_REG0 0x22E0
 #define A3XX_SP_FS_CTRL_REG1 0x22E1
 #define A3XX_SP_FS_OBJ_OFFSET_REG 0x22E2
+#define A3XX_SP_FS_OBJ_START_REG 0x22E3
 #define A3XX_SP_FS_PVT_MEM_ADDR_REG 0x22E5
 #define A3XX_SP_FS_PVT_MEM_SIZE_REG 0x22E6
 #define A3XX_SP_FS_FLAT_SHAD_MODE_REG_0 0x22E8
@@ -271,8 +398,10 @@
 #define A3XX_VBIF_OUT_AXI_AOOO 0x305F
 
 /* Bit flags for RBBM_CTL */
-#define RBBM_RBBM_CTL_RESET_PWR_CTR1  (1 << 1)
-#define RBBM_RBBM_CTL_ENABLE_PWR_CTR1  (1 << 17)
+#define RBBM_RBBM_CTL_RESET_PWR_CTR0  BIT(0)
+#define RBBM_RBBM_CTL_RESET_PWR_CTR1  BIT(1)
+#define RBBM_RBBM_CTL_ENABLE_PWR_CTR0  BIT(16)
+#define RBBM_RBBM_CTL_ENABLE_PWR_CTR1  BIT(17)
 
 /* Various flags used by the context switch code */
 
@@ -537,7 +666,15 @@
 #define RBBM_BLOCK_ID_MARB_3           0x2b
 
 /* RBBM_CLOCK_CTL default value */
-#define A3XX_RBBM_CLOCK_CTL_DEFAULT 0xBFFFFFFF
+#define A305_RBBM_CLOCK_CTL_DEFAULT   0xAAAAAAAA
+#define A305C_RBBM_CLOCK_CTL_DEFAULT  0xAAAAAAAA
+#define A320_RBBM_CLOCK_CTL_DEFAULT   0xBFFFFFFF
+#define A330_RBBM_CLOCK_CTL_DEFAULT   0xBFFCFFFF
+#define A330v2_RBBM_CLOCK_CTL_DEFAULT 0xBFFCFFFF
+#define A305B_RBBM_CLOCK_CTL_DEFAULT  0xAAAAAAAA
+
+#define A330_RBBM_GPR0_CTL_DEFAULT    0x00000000
+#define A330v2_RBBM_GPR0_CTL_DEFAULT  0x00000000
 
 /* COUNTABLE FOR SP PERFCOUNTER */
 #define SP_FS_FULL_ALU_INSTRUCTIONS    0x0E
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 1886e04..62b6a71 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -17,7 +17,7 @@
 #include <linux/sched.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
-#include <linux/msm_kgsl.h>
+#include <linux/delay.h>
 
 #include <mach/socinfo.h>
 #include <mach/msm_bus_board.h>
@@ -30,6 +30,7 @@
 #include "kgsl_cffdump.h"
 #include "kgsl_sharedmem.h"
 #include "kgsl_iommu.h"
+#include "kgsl_trace.h"
 
 #include "adreno.h"
 #include "adreno_pm4types.h"
@@ -99,6 +100,7 @@
 			.irq_name = KGSL_3D0_IRQ,
 		},
 		.iomemname = KGSL_3D0_REG_MEMORY,
+		.shadermemname = KGSL_3D0_SHADER_MEMORY,
 		.ftbl = &adreno_functable,
 #ifdef CONFIG_HAS_EARLYSUSPEND
 		.display_off = {
@@ -166,10 +168,10 @@
 	/* size of gmem for gpu*/
 	unsigned int gmem_size;
 	/* version of pm4 microcode that supports sync_lock
-	   between CPU and GPU for SMMU-v1 programming */
+	   between CPU and GPU for IOMMU-v0 programming */
 	unsigned int sync_lock_pm4_ver;
 	/* version of pfp microcode that supports sync_lock
-	   between CPU and GPU for SMMU-v1 programming */
+	   between CPU and GPU for IOMMU-v0 programming */
 	unsigned int sync_lock_pfp_ver;
 } adreno_gpulist[] = {
 	{ ADRENO_REV_A200, 0, 2, ANY_ID, ANY_ID,
@@ -198,18 +200,331 @@
 		"a225_pm4.fw", "a225_pfp.fw", &adreno_a2xx_gpudev,
 		1536, 768, 3, SZ_512K, 0x225011, 0x225002 },
 	/* A3XX doesn't use the pix_shader_start */
-	{ ADRENO_REV_A305, 3, 0, 5, ANY_ID,
+	{ ADRENO_REV_A305, 3, 0, 5, 0,
 		"a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev,
 		512, 0, 2, SZ_256K, 0x3FF037, 0x3FF016 },
 	/* A3XX doesn't use the pix_shader_start */
 	{ ADRENO_REV_A320, 3, 2, ANY_ID, ANY_ID,
 		"a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev,
 		512, 0, 2, SZ_512K, 0x3FF037, 0x3FF016 },
-	{ ADRENO_REV_A330, 3, 3, 0, 0,
+	{ ADRENO_REV_A330, 3, 3, 0, ANY_ID,
 		"a330_pm4.fw", "a330_pfp.fw", &adreno_a3xx_gpudev,
 		512, 0, 2, SZ_1M, NO_VER, NO_VER },
+	{ ADRENO_REV_A305B, 3, 0, 5, 0x10,
+		"a330_pm4.fw", "a330_pfp.fw", &adreno_a3xx_gpudev,
+		512, 0, 2, SZ_128K, NO_VER, NO_VER },
+	{ ADRENO_REV_A305C, 3, 0, 5, 0x20,
+		"a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev,
+		512, 0, 2, SZ_128K, 0x3FF037, 0x3FF016 },
 };
 
+/**
+ * adreno_perfcounter_init: Reserve kernel performance counters
+ * @device: device to configure
+ *
+ * The kernel needs/wants a certain group of performance counters for
+ * its own activities.  Reserve these performance counters at init time
+ * to ensure that they are always reserved for the kernel.  The performance
+ * counters used by the kernel can be obtained by the user, but these
+ * performance counters will remain active as long as the device is alive.
+ */
+
+static void adreno_perfcounter_init(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	if (adreno_dev->gpudev->perfcounter_init)
+		adreno_dev->gpudev->perfcounter_init(adreno_dev);
+};
+
+/**
+ * adreno_perfcounter_start: Enable performance counters
+ * @adreno_dev: Adreno device to configure
+ *
+ * Ensure all performance counters are enabled that are allocated.  Since
+ * the device was most likely stopped, we can't trust that the counters
+ * are still valid so make it so.
+ */
+
+static void adreno_perfcounter_start(struct adreno_device *adreno_dev)
+{
+	struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+	struct adreno_perfcount_group *group;
+	unsigned int i, j;
+
+	/* group id iter */
+	for (i = 0; i < counters->group_count; i++) {
+		group = &(counters->groups[i]);
+
+		/* countable iter */
+		for (j = 0; j < group->reg_count; j++) {
+			if (group->regs[j].countable ==
+					KGSL_PERFCOUNTER_NOT_USED)
+				continue;
+
+			if (adreno_dev->gpudev->perfcounter_enable)
+				adreno_dev->gpudev->perfcounter_enable(
+					adreno_dev, i, j,
+					group->regs[j].countable);
+		}
+	}
+}
+
+/**
+ * adreno_perfcounter_read_group: Determine which countables are in counters
+ * @adreno_dev: Adreno device to configure
+ * @reads: List of kgsl_perfcounter_read_groups
+ * @count: Length of list
+ *
+ * Read the performance counters for the groupid/countable pairs and return
+ * the 64 bit result for each pair
+ */
+
+int adreno_perfcounter_read_group(struct adreno_device *adreno_dev,
+	struct kgsl_perfcounter_read_group *reads, unsigned int count)
+{
+	struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+	struct adreno_perfcount_group *group;
+	struct kgsl_perfcounter_read_group *list = NULL;
+	unsigned int i, j;
+	int ret = 0;
+
+	/* perfcounter get/put/query/read not allowed on a2xx */
+	if (adreno_is_a2xx(adreno_dev))
+		return -EINVAL;
+
+	/* sanity check for later */
+	if (!adreno_dev->gpudev->perfcounter_read)
+		return -EINVAL;
+
+	/* sanity check params passed in */
+	if (reads == NULL || count == 0 || count > 100)
+		return -EINVAL;
+
+	/* verify valid inputs group ids and countables */
+	for (i = 0; i < count; i++) {
+		if (reads[i].groupid >= counters->group_count)
+			return -EINVAL;
+	}
+
+	list = kmalloc(sizeof(struct kgsl_perfcounter_read_group) * count,
+			GFP_KERNEL);
+	if (!list)
+		return -ENOMEM;
+
+	if (copy_from_user(list, reads,
+			sizeof(struct kgsl_perfcounter_read_group) * count)) {
+		ret = -EFAULT;
+		goto done;
+	}
+
+	/* list iterator */
+	for (j = 0; j < count; j++) {
+		list[j].value = 0;
+
+		group = &(counters->groups[list[j].groupid]);
+
+		/* group/counter iterator */
+		for (i = 0; i < group->reg_count; i++) {
+			if (group->regs[i].countable == list[j].countable) {
+				list[j].value =
+					adreno_dev->gpudev->perfcounter_read(
+					adreno_dev, list[j].groupid,
+					i, group->regs[i].offset);
+				break;
+			}
+		}
+	}
+
+	/* write the data */
+	if (copy_to_user(reads, list,
+			sizeof(struct kgsl_perfcounter_read_group) *
+			count) != 0)
+		ret = -EFAULT;
+
+done:
+	kfree(list);
+	return ret;
+}
+
+/**
+ * adreno_perfcounter_query_group: Determine which countables are in counters
+ * @adreno_dev: Adreno device to configure
+ * @groupid: Desired performance counter group
+ * @countables: Return list of all countables in the groups counters
+ * @count: Max length of the array
+ * @max_counters: max counters for the groupid
+ *
+ * Query the current state of counters for the group.
+ */
+
+int adreno_perfcounter_query_group(struct adreno_device *adreno_dev,
+	unsigned int groupid, unsigned int *countables, unsigned int count,
+	unsigned int *max_counters)
+{
+	struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+	struct adreno_perfcount_group *group;
+	unsigned int i;
+
+	*max_counters = 0;
+
+	/* perfcounter get/put/query not allowed on a2xx */
+	if (adreno_is_a2xx(adreno_dev))
+		return -EINVAL;
+
+	if (groupid >= counters->group_count)
+		return -EINVAL;
+
+	group = &(counters->groups[groupid]);
+	*max_counters = group->reg_count;
+
+	/*
+	 * if NULL countable or *count of zero, return max reg_count in
+	 * *max_counters and return success
+	 */
+	if (countables == NULL || count == 0)
+		return 0;
+
+	/*
+	 * Go through all available counters.  Write upto *count * countable
+	 * values.
+	 */
+	for (i = 0; i < group->reg_count && i < count; i++) {
+		if (copy_to_user(&countables[i], &(group->regs[i].countable),
+				sizeof(unsigned int)) != 0)
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * adreno_perfcounter_get: Try to put a countable in an available counter
+ * @adreno_dev: Adreno device to configure
+ * @groupid: Desired performance counter group
+ * @countable: Countable desired to be in a counter
+ * @offset: Return offset of the countable
+ * @flags: Used to setup kernel perf counters
+ *
+ * Try to place a countable in an available counter.  If the countable is
+ * already in a counter, reference count the counter/countable pair resource
+ * and return success
+ */
+
+int adreno_perfcounter_get(struct adreno_device *adreno_dev,
+	unsigned int groupid, unsigned int countable, unsigned int *offset,
+	unsigned int flags)
+{
+	struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+	struct adreno_perfcount_group *group;
+	unsigned int i, empty = -1;
+
+	/* always clear return variables */
+	if (offset)
+		*offset = 0;
+
+	/* perfcounter get/put/query not allowed on a2xx */
+	if (adreno_is_a2xx(adreno_dev))
+		return -EINVAL;
+
+	if (groupid >= counters->group_count)
+		return -EINVAL;
+
+	group = &(counters->groups[groupid]);
+
+	/*
+	 * Check if the countable is already associated with a counter.
+	 * Refcount and return the offset, otherwise, try and find an empty
+	 * counter and assign the countable to it.
+	 */
+	for (i = 0; i < group->reg_count; i++) {
+		if (group->regs[i].countable == countable) {
+			/* Countable already associated with counter */
+			group->regs[i].refcount++;
+			group->regs[i].flags |= flags;
+			if (offset)
+				*offset = group->regs[i].offset;
+			return 0;
+		} else if (group->regs[i].countable ==
+			KGSL_PERFCOUNTER_NOT_USED) {
+			/* keep track of unused counter */
+			empty = i;
+		}
+	}
+
+	/* no available counters, so do nothing else */
+	if (empty == -1)
+		return -EBUSY;
+
+	/* initialize the new counter */
+	group->regs[empty].countable = countable;
+	group->regs[empty].refcount = 1;
+
+	/* enable the new counter */
+	adreno_dev->gpudev->perfcounter_enable(adreno_dev, groupid, empty,
+		countable);
+
+	group->regs[empty].flags = flags;
+
+	if (offset)
+		*offset = group->regs[empty].offset;
+
+	return 0;
+}
+
+
+/**
+ * adreno_perfcounter_put: Release a countable from counter resource
+ * @adreno_dev: Adreno device to configure
+ * @groupid: Desired performance counter group
+ * @countable: Countable desired to be freed from a  counter
+ *
+ * Put a performance counter/countable pair that was previously received.  If
+ * noone else is using the countable, free up the counter for others.
+ */
+int adreno_perfcounter_put(struct adreno_device *adreno_dev,
+	unsigned int groupid, unsigned int countable)
+{
+	struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+	struct adreno_perfcount_group *group;
+
+	unsigned int i;
+
+	/* perfcounter get/put/query not allowed on a2xx */
+	if (adreno_is_a2xx(adreno_dev))
+		return -EINVAL;
+
+	if (groupid >= counters->group_count)
+		return -EINVAL;
+
+	group = &(counters->groups[groupid]);
+
+	for (i = 0; i < group->reg_count; i++) {
+		if (group->regs[i].countable == countable) {
+			if (group->regs[i].refcount > 0) {
+				group->regs[i].refcount--;
+
+				/*
+				 * book keeping to ensure we never free a
+				 * perf counter used by kernel
+				 */
+				if (group->regs[i].flags &&
+					group->regs[i].refcount == 0)
+					group->regs[i].refcount++;
+
+				/* make available if not used */
+				if (group->regs[i].refcount == 0)
+					group->regs[i].countable =
+						KGSL_PERFCOUNTER_NOT_USED;
+			}
+
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
 static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
 {
 	irqreturn_t result;
@@ -254,26 +569,29 @@
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
 
-	result = kgsl_mmu_map_global(pagetable, &rb->buffer_desc,
-				     GSL_PT_PAGE_RV);
+	result = kgsl_mmu_map_global(pagetable, &rb->buffer_desc);
 	if (result)
 		goto error;
 
-	result = kgsl_mmu_map_global(pagetable, &rb->memptrs_desc,
-				     GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+	result = kgsl_mmu_map_global(pagetable, &rb->memptrs_desc);
 	if (result)
 		goto unmap_buffer_desc;
 
-	result = kgsl_mmu_map_global(pagetable, &device->memstore,
-				     GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+	result = kgsl_mmu_map_global(pagetable, &device->memstore);
 	if (result)
 		goto unmap_memptrs_desc;
 
-	result = kgsl_mmu_map_global(pagetable, &device->mmu.setstate_memory,
-				     GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+	result = kgsl_mmu_map_global(pagetable, &device->mmu.setstate_memory);
 	if (result)
 		goto unmap_memstore_desc;
 
+	/*
+	 * Set the mpu end to the last "normal" global memory we use.
+	 * For the IOMMU, this will be used to restrict access to the
+	 * mapped registers.
+	 */
+	device->mh.mpu_range = device->mmu.setstate_memory.gpuaddr +
+				device->mmu.setstate_memory.size;
 	return result;
 
 unmap_memstore_desc:
@@ -294,7 +612,7 @@
 					uint32_t flags)
 {
 	unsigned int pt_val, reg_pt_val;
-	unsigned int link[250];
+	unsigned int link[230];
 	unsigned int *cmds = &link[0];
 	int sizedwords = 0;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -302,8 +620,14 @@
 	struct kgsl_context *context;
 	struct adreno_context *adreno_ctx = NULL;
 
-	if (!adreno_dev->drawctxt_active)
+	/*
+	 * If we're idle and we don't need to use the GPU to save context
+	 * state, use the CPU instead of the GPU to reprogram the
+	 * iommu for simplicity's sake.
+	 */
+	 if (!adreno_dev->drawctxt_active || device->ftbl->isidle(device))
 		return kgsl_mmu_device_setstate(&device->mmu, flags);
+
 	num_iommu_units = kgsl_mmu_get_num_iommu_units(&device->mmu);
 
 	context = idr_find(&device->context_idr, context_id);
@@ -427,7 +751,7 @@
 		adreno_dev->ringbuffer.timestamp[KGSL_MEMSTORE_GLOBAL], true);
 	}
 
-	if (sizedwords > (sizeof(link)/sizeof(unsigned int))) {
+	if (sizedwords > (ARRAY_SIZE(link))) {
 		KGSL_DRV_ERR(device, "Temp command buffer overflow\n");
 		BUG();
 	}
@@ -605,7 +929,7 @@
 	/* 8x25 returns 0 for minor id, but it should be 1 */
 	if (cpu_is_qsd8x50())
 		patchid = 1;
-	else if (cpu_is_msm8625() && minorid == 0)
+	else if ((cpu_is_msm8625() || cpu_is_msm8625q()) && minorid == 0)
 		minorid = 1;
 
 	chipid |= (minorid << 8) | patchid;
@@ -669,7 +993,6 @@
 	adreno_dev->instruction_size = adreno_gpulist[i].instruction_size;
 	adreno_dev->gmem_size = adreno_gpulist[i].gmem_size;
 	adreno_dev->gpulist_index = i;
-
 }
 
 static struct platform_device_id adreno_id_table[] = {
@@ -756,6 +1079,10 @@
 		&pdata->init_level))
 		pdata->init_level = 1;
 
+	if (adreno_of_read_property(parent, "qcom,step-pwrlevel",
+		&pdata->step_mul))
+		pdata->step_mul = 1;
+
 	if (pdata->init_level < 0 || pdata->init_level > pdata->num_levels) {
 		KGSL_CORE_ERR("Initial power level out of range\n");
 		pdata->init_level = 1;
@@ -981,9 +1308,17 @@
 			goto err;
 		}
 
-		if (adreno_of_read_property(child, "qcom,iommu-ctx-sids",
-			&ctxs[ctx_index].ctx_id))
+		ret = of_property_read_u32_array(child, "reg", reg_val, 2);
+		if (ret) {
+			KGSL_CORE_ERR("Unable to read KGSL IOMMU 'reg'\n");
 			goto err;
+		}
+		if (msm_soc_version_supports_iommu_v0())
+			ctxs[ctx_index].ctx_id = (reg_val[0] -
+				data->physstart) >> KGSL_IOMMU_CTX_SHIFT;
+		else
+			ctxs[ctx_index].ctx_id = ((reg_val[0] -
+				data->physstart) >> KGSL_IOMMU_CTX_SHIFT) - 8;
 
 		ctx_index++;
 	}
@@ -1038,15 +1373,17 @@
 	if (ret)
 		goto err;
 
-	/* Default value is 83, if not found in DT */
 	if (adreno_of_read_property(pdev->dev.of_node, "qcom,idle-timeout",
 		&pdata->idle_timeout))
-		pdata->idle_timeout = 83;
+		pdata->idle_timeout = HZ/12;
 
 	if (adreno_of_read_property(pdev->dev.of_node, "qcom,nap-allowed",
 		&pdata->nap_allowed))
 		pdata->nap_allowed = 1;
 
+	pdata->strtstp_sleepwake = of_property_read_bool(pdev->dev.of_node,
+						"qcom,strtstp-sleepwake");
+
 	if (adreno_of_read_property(pdev->dev.of_node, "qcom,clk-map",
 		&pdata->clk_map))
 		goto err;
@@ -1098,7 +1435,8 @@
 static int
 adreno_ocmem_gmem_malloc(struct adreno_device *adreno_dev)
 {
-	if (!adreno_is_a330(adreno_dev))
+	if (!(adreno_is_a330(adreno_dev) ||
+		adreno_is_a305b(adreno_dev)))
 		return 0;
 
 	/* OCMEM is only needed once, do not support consective allocation */
@@ -1119,7 +1457,8 @@
 static void
 adreno_ocmem_gmem_free(struct adreno_device *adreno_dev)
 {
-	if (!adreno_is_a330(adreno_dev))
+	if (!(adreno_is_a330(adreno_dev) ||
+		adreno_is_a305b(adreno_dev)))
 		return;
 
 	if (adreno_dev->ocmem_hdl == NULL)
@@ -1202,10 +1541,10 @@
 	return 0;
 }
 
-static int adreno_start(struct kgsl_device *device, unsigned int init_ram)
+static int adreno_init(struct kgsl_device *device)
 {
-	int status = -EINVAL;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
 
 	if (KGSL_STATE_DUMP_AND_FT != device->state)
 		kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
@@ -1231,10 +1570,9 @@
 	if (adreno_dev->gpurev == ADRENO_REV_UNKNOWN) {
 		KGSL_DRV_ERR(device, "Unknown chip ID %x\n",
 			adreno_dev->chip_id);
-		goto error_clk_off;
+		BUG_ON(1);
 	}
 
-
 	/*
 	 * Check if firmware supports the sync lock PM4 packets needed
 	 * for IOMMUv1
@@ -1246,7 +1584,34 @@
 		adreno_gpulist[adreno_dev->gpulist_index].sync_lock_pfp_ver))
 		device->mmu.flags |= KGSL_MMU_FLAGS_IOMMU_SYNC;
 
-	/* Set up the MMU */
+	rb->timestamp[KGSL_MEMSTORE_GLOBAL] = 0;
+
+	/* Assign correct RBBM status register to hang detect regs
+	 */
+	ft_detect_regs[0] = adreno_dev->gpudev->reg_rbbm_status;
+
+	adreno_perfcounter_init(device);
+
+	/* Power down the device */
+	kgsl_pwrctrl_disable(device);
+
+	return 0;
+}
+
+static int adreno_start(struct kgsl_device *device)
+{
+	int status = -EINVAL;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	kgsl_cffdump_open(device);
+
+	if (KGSL_STATE_DUMP_AND_FT != device->state)
+		kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+
+	/* Power up the device */
+	kgsl_pwrctrl_enable(device);
+
+	/* Set up a2xx special case */
 	if (adreno_is_a2xx(adreno_dev)) {
 		/*
 		 * the MH_CLNT_INTF_CTRL_CONFIG registers aren't present
@@ -1260,20 +1625,6 @@
 		kgsl_mh_start(device);
 	}
 
-	/* Assign correct RBBM status register to hang detect regs
-	 */
-	ft_detect_regs[0] = adreno_dev->gpudev->reg_rbbm_status;
-
-	/* Add A3XX specific registers for hang detection */
-	if (adreno_is_a3xx(adreno_dev)) {
-		ft_detect_regs[6] = A3XX_RBBM_PERFCTR_SP_7_LO;
-		ft_detect_regs[7] = A3XX_RBBM_PERFCTR_SP_7_HI;
-		ft_detect_regs[8] = A3XX_RBBM_PERFCTR_SP_6_LO;
-		ft_detect_regs[9] = A3XX_RBBM_PERFCTR_SP_6_HI;
-		ft_detect_regs[10] = A3XX_RBBM_PERFCTR_SP_5_LO;
-		ft_detect_regs[11] = A3XX_RBBM_PERFCTR_SP_5_HI;
-	}
-
 	status = kgsl_mmu_start(device);
 	if (status)
 		goto error_clk_off;
@@ -1290,22 +1641,30 @@
 	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
 	device->ftbl->irqctrl(device, 1);
 
-	status = adreno_ringbuffer_start(&adreno_dev->ringbuffer, init_ram);
-	if (status == 0) {
-		/* While fault tolerance is on we do not want timer to
-		 * fire and attempt to change any device state */
-		if (KGSL_STATE_DUMP_AND_FT != device->state)
-			mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
-		return 0;
-	}
+	status = adreno_ringbuffer_start(&adreno_dev->ringbuffer);
+	if (status)
+		goto error_irq_off;
 
+	/* While fault tolerance is on we do not want timer to
+	 * fire and attempt to change any device state */
+	if (KGSL_STATE_DUMP_AND_FT != device->state)
+		mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
+
+	adreno_perfcounter_start(adreno_dev);
+
+	device->reset_counter++;
+
+	return 0;
+
+error_irq_off:
 	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
 
 error_mmu_off:
 	kgsl_mmu_stop(&device->mmu);
 
 error_clk_off:
-	kgsl_pwrctrl_disable(device);
+	if (KGSL_STATE_DUMP_AND_FT != device->state)
+		kgsl_pwrctrl_disable(device);
 
 	return status;
 }
@@ -1329,6 +1688,8 @@
 	/* Power down the device */
 	kgsl_pwrctrl_disable(device);
 
+	kgsl_cffdump_close(device->id);
+
 	return 0;
 }
 
@@ -1411,6 +1772,8 @@
 			start_ptr = adreno_ringbuffer_dec_wrapped(start_ptr,
 									size);
 		kgsl_sharedmem_readl(&rb->buffer_desc, &val1, start_ptr);
+		/* Ensure above read is finished before next read */
+		rmb();
 		if (KGSL_CMD_IDENTIFIER == val1) {
 			if ((start_ptr / sizeof(unsigned int)) != rb->wptr)
 				start_ptr = adreno_ringbuffer_dec_wrapped(
@@ -1448,6 +1811,8 @@
 					temp_rb_rptr, size);
 		kgsl_sharedmem_readl(&rb->buffer_desc, &val[i],
 					temp_rb_rptr);
+		/* Ensure above read is finished before next read */
+		rmb();
 
 		if (check && ((inc && val[i] == global_eop) ||
 			(!inc && (val[i] ==
@@ -1512,6 +1877,8 @@
 
 	while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
 		kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
+		/* Ensure above read is finished before next read */
+		rmb();
 
 		if (check && val[i] == ib1) {
 			/* decrement i, i.e i = (i - 1 + 2) % 2 */
@@ -1553,7 +1920,7 @@
 	return status;
 }
 
-static int adreno_setup_ft_data(struct kgsl_device *device,
+static void adreno_setup_ft_data(struct kgsl_device *device,
 					struct adreno_ft_data *ft_data)
 {
 	int ret = 0;
@@ -1578,30 +1945,30 @@
 			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
 			eoptimestamp));
 
+	/* Ensure context id and global eop ts read complete */
+	rmb();
+
 	ft_data->rb_buffer = vmalloc(rb->buffer_desc.size);
 	if (!ft_data->rb_buffer) {
 		KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
 				rb->buffer_desc.size);
-		return -ENOMEM;
+		return;
 	}
 
 	ft_data->bad_rb_buffer = vmalloc(rb->buffer_desc.size);
 	if (!ft_data->bad_rb_buffer) {
 		KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
 				rb->buffer_desc.size);
-		ret = -ENOMEM;
-		goto done;
+		return;
 	}
 
 	ft_data->good_rb_buffer = vmalloc(rb->buffer_desc.size);
 	if (!ft_data->good_rb_buffer) {
 		KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
 				rb->buffer_desc.size);
-		ret = -ENOMEM;
-		goto done;
+		return;
 	}
-
-	ft_data->status =  0;
+	ft_data->status = 0;
 
 	/* find the start of bad command sequence in rb */
 	context = idr_find(&device->context_idr, ft_data->context_id);
@@ -1612,20 +1979,23 @@
 		 * If there is no context then fault tolerance does not need to
 		 * replay anything, just reset GPU and thats it
 		 */
-		goto done;
+		return;
 	}
-	ret = _find_cmd_seq_after_eop_ts(rb, &rb_rptr,
-					ft_data->global_eop + 1, false);
-	if (ret)
-		goto done;
-
-	ft_data->start_of_replay_cmds = rb_rptr;
-
-	if (!adreno_dev->ft_policy)
-		adreno_dev->ft_policy = KGSL_FT_DEFAULT_POLICY;
 
 	ft_data->ft_policy = adreno_dev->ft_policy;
 
+	if (!ft_data->ft_policy)
+		ft_data->ft_policy = KGSL_FT_DEFAULT_POLICY;
+
+	ret = _find_cmd_seq_after_eop_ts(rb, &rb_rptr,
+					ft_data->global_eop + 1, false);
+	if (ret) {
+		ft_data->ft_policy |= KGSL_FT_TEMP_DISABLE;
+		return;
+	} else
+		ft_data->ft_policy &= ~KGSL_FT_TEMP_DISABLE;
+
+	ft_data->start_of_replay_cmds = rb_rptr;
 
 	adreno_context = context->devctxt;
 	if (adreno_context->flags & CTXT_FLAGS_PREAMBLE) {
@@ -1636,20 +2006,12 @@
 				KGSL_FT_ERR(device,
 				"Start not found for replay IB sequence\n");
 				ret = 0;
-				goto done;
+				return;
 			}
 			ft_data->start_of_replay_cmds = rb_rptr;
 			ft_data->replay_for_snapshot = rb_rptr;
 		}
 	}
-
-done:
-	if (ret) {
-		vfree(ft_data->rb_buffer);
-		vfree(ft_data->bad_rb_buffer);
-		vfree(ft_data->good_rb_buffer);
-	}
-	return ret;
 }
 
 static int
@@ -1663,6 +2025,8 @@
 			&curr_global_ts,
 			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
 			eoptimestamp));
+	/* Ensure above read is finished before long ib check */
+	rmb();
 
 	/* Mark long ib as handled */
 	adreno_dev->long_ib = 0;
@@ -1680,8 +2044,7 @@
 
 static int
 _adreno_ft_restart_device(struct kgsl_device *device,
-		   struct kgsl_context *context,
-		   struct adreno_ft_data *ft_data)
+			   struct kgsl_context *context)
 {
 
 	struct adreno_context *adreno_context = context->devctxt;
@@ -1692,7 +2055,12 @@
 		return 1;
 	}
 
-	if (adreno_start(device, true)) {
+	if (adreno_init(device)) {
+		KGSL_FT_ERR(device, "Device init failed\n");
+		return 1;
+	}
+
+	if (adreno_start(device)) {
 		KGSL_FT_ERR(device, "Device start failed\n");
 		return 1;
 	}
@@ -1751,11 +2119,30 @@
 			unsigned int *buff, unsigned int size)
 {
 	unsigned int ret = 0;
+	unsigned int retry_num = 0;
 
 	_adreno_debug_ft_info(device, ft_data);
 
-	if (_adreno_ft_restart_device(device, context, ft_data))
-		return 1;
+	do {
+		ret = _adreno_ft_restart_device(device, context);
+		if (ret == 0)
+			break;
+		/*
+		 * If device restart fails sleep for 20ms before
+		 * attempting restart. This allows GPU HW to settle
+		 * and improve the chances of next restart to be
+		 * successful.
+		 */
+		msleep(20);
+		KGSL_FT_ERR(device, "Retry device restart %d\n", retry_num);
+		retry_num++;
+	} while (retry_num < 4);
+
+	if (ret) {
+		KGSL_FT_ERR(device, "Device restart failed\n");
+		BUG_ON(1);
+		goto done;
+	}
 
 	if (size) {
 
@@ -1765,6 +2152,7 @@
 		ret = adreno_idle(device);
 	}
 
+done:
 	return ret;
 }
 
@@ -1779,11 +2167,13 @@
 	struct kgsl_context *context;
 	struct adreno_context *adreno_context = NULL;
 	struct adreno_context *last_active_ctx = adreno_dev->drawctxt_active;
+	unsigned int long_ib = 0;
 
 	context = idr_find(&device->context_idr, ft_data->context_id);
 	if (context == NULL) {
-		KGSL_FT_CRIT(device, "Last context unknown id:%d\n",
+		KGSL_FT_ERR(device, "Last context unknown id:%d\n",
 			ft_data->context_id);
+		goto play_good_cmds;
 	} else {
 		adreno_context = context->devctxt;
 		adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
@@ -1793,11 +2183,17 @@
 		 */
 		context->wait_on_invalid_ts = false;
 
+		if (!(adreno_context->flags & CTXT_FLAGS_PER_CONTEXT_TS)) {
+			KGSL_FT_ERR(device, "Fault tolerance not supported\n");
+			goto play_good_cmds;
+		}
+
 		/*
 		 *  This flag will be set by userspace for contexts
 		 *  that do not want to be fault tolerant (ex: OPENCL)
 		 */
 		if (adreno_context->flags & CTXT_FLAGS_NO_FAULT_TOLERANCE) {
+			ft_data->status = 1;
 			KGSL_FT_ERR(device,
 			"No FT set for this context play good cmds\n");
 			goto play_good_cmds;
@@ -1805,51 +2201,57 @@
 
 	}
 
+	/* Check if we detected a long running IB, if false return */
+	if (adreno_dev->long_ib) {
+		long_ib = _adreno_check_long_ib(device);
+		if (!long_ib) {
+			adreno_context->flags &= ~CTXT_FLAGS_GPU_HANG;
+			return 0;
+		}
+	}
+
 	/*
 	 * Extract valid contents from rb which can still be executed after
 	 * hang
 	 */
 	adreno_ringbuffer_extract(rb, ft_data);
 
-	/* Check if we detected a long running IB,
-	 * if true do not attempt replay of bad cmds */
-	if (adreno_dev->long_ib) {
-		if (_adreno_check_long_ib(device)) {
-			ft_data->status = 1;
-			_adreno_debug_ft_info(device, ft_data);
-			goto play_good_cmds;
-		} else {
-			adreno_context->flags &= ~CTXT_FLAGS_GPU_HANG;
-			return 0;
-		}
-	}
-
-	/* Do not try the bad commands if  hang is due to a fault */
-	if (device->mmu.fault) {
-		KGSL_FT_ERR(device, "MMU fault skipping bad cmds\n");
-		device->mmu.fault = 0;
+	/* If long IB detected do not attempt replay of bad cmds */
+	if (long_ib) {
+		_adreno_debug_ft_info(device, ft_data);
 		goto play_good_cmds;
 	}
 
-	if (ft_data->ft_policy & KGSL_FT_DISABLE) {
+	if ((ft_data->ft_policy & KGSL_FT_DISABLE) ||
+		(ft_data->ft_policy & KGSL_FT_TEMP_DISABLE)) {
 		KGSL_FT_ERR(device, "NO FT policy play only good cmds\n");
+		ft_data->status = 1;
 		goto play_good_cmds;
 	}
 
+	/* Do not try the reply if hang is due to a pagefault */
+	if (adreno_context->pagefault) {
+		if ((ft_data->context_id == adreno_context->id) &&
+			(ft_data->global_eop == adreno_context->pagefault_ts)) {
+			ft_data->ft_policy &= ~KGSL_FT_REPLAY;
+			KGSL_FT_ERR(device, "MMU fault skipping replay\n");
+		}
+
+		adreno_context->pagefault = 0;
+	}
+
 	if (ft_data->ft_policy & KGSL_FT_REPLAY) {
-
 		ret = _adreno_ft_resubmit_rb(device, rb, context, ft_data,
 				ft_data->bad_rb_buffer, ft_data->bad_rb_size);
 
 		if (ret) {
-			KGSL_FT_ERR(device, "Replay unsuccessful\n");
+			KGSL_FT_ERR(device, "Replay status: 1\n");
 			ft_data->status = 1;
 		} else
 			goto play_good_cmds;
 	}
 
 	if (ft_data->ft_policy & KGSL_FT_SKIPIB) {
-
 		for (i = 0; i < ft_data->bad_rb_size; i++) {
 			if ((ft_data->bad_rb_buffer[i] ==
 					CP_HDR_INDIRECT_BUFFER_PFD) &&
@@ -1874,7 +2276,7 @@
 				ft_data->bad_rb_buffer, ft_data->bad_rb_size);
 
 		if (ret) {
-			KGSL_FT_ERR(device, "NOP faulty IB unsuccessful\n");
+			KGSL_FT_ERR(device, "NOP faulty IB status: 1\n");
 			ft_data->status = 1;
 		} else {
 			ft_data->status = 0;
@@ -1883,7 +2285,6 @@
 	}
 
 	if (ft_data->ft_policy & KGSL_FT_SKIPFRAME) {
-
 		for (i = 0; i < ft_data->bad_rb_size; i++) {
 			if (ft_data->bad_rb_buffer[i] ==
 					KGSL_END_OF_FRAME_IDENTIFIER) {
@@ -1905,7 +2306,7 @@
 				ft_data->bad_rb_buffer, ft_data->bad_rb_size);
 
 		if (ret) {
-			KGSL_FT_ERR(device, "Skip EOF unsuccessful\n");
+			KGSL_FT_ERR(device, "Skip EOF status: 1\n");
 			ft_data->status = 1;
 		} else {
 			ft_data->status = 0;
@@ -1983,9 +2384,7 @@
 			/* setup new fault tolerance parameters and retry, this
 			 * means more than 1 contexts are causing hang */
 			adreno_destroy_ft_data(ft_data);
-			ret = adreno_setup_ft_data(device, ft_data);
-			if (ret)
-				goto done;
+			adreno_setup_ft_data(device, ft_data);
 			KGSL_FT_INFO(device,
 			"Retry. Parameters: "
 			"IB1: 0x%X, Bad context_id: %u, global_eop: 0x%x\n",
@@ -2050,7 +2449,12 @@
 		kgsl_pwrctrl_pwrlevel_change(device, pwr->max_pwrlevel);
 
 		/* Get the fault tolerance data as soon as hang is detected */
-		result = adreno_setup_ft_data(device, &ft_data);
+		adreno_setup_ft_data(device, &ft_data);
+		/*
+		 * Trigger an automatic dump of the state to
+		 * the console
+		 */
+		kgsl_postmortem_dump(device, 0);
 
 		/*
 		 * If long ib is detected, do not attempt postmortem or
@@ -2072,10 +2476,8 @@
 			kgsl_device_snapshot(device, 1);
 		}
 
-		if (!result) {
-			result = adreno_ft(device, &ft_data);
-			adreno_destroy_ft_data(&ft_data);
-		}
+		result = adreno_ft(device, &ft_data);
+		adreno_destroy_ft_data(&ft_data);
 
 		/* restore power level */
 		kgsl_pwrctrl_pwrlevel_change(device, curr_pwrlevel);
@@ -2228,39 +2630,6 @@
 			status = 0;
 		}
 		break;
-	case KGSL_PROP_FAULT_TOLERANCE: {
-			struct kgsl_ft_config ftd;
-
-			if (adreno_dev->ft_user_control == 0)
-				break;
-
-			if (sizebytes != sizeof(ftd))
-				break;
-
-			if (copy_from_user(&ftd, (void __user *) value,
-							   sizeof(ftd))) {
-				status = -EFAULT;
-				break;
-			}
-
-			if (ftd.ft_policy)
-				adreno_dev->ft_policy = ftd.ft_policy;
-			else
-				adreno_dev->ft_policy = KGSL_FT_DEFAULT_POLICY;
-
-			if (ftd.ft_pf_policy)
-				adreno_dev->ft_pf_policy = ftd.ft_policy;
-			else
-				adreno_dev->ft_pf_policy =
-					KGSL_FT_PAGEFAULT_DEFAULT_POLICY;
-
-			if (ftd.ft_pm_dump)
-				device->pm_dump_enable = 1;
-			else
-				device->pm_dump_enable = 0;
-
-		}
-		break;
 	default:
 		break;
 	}
@@ -2512,12 +2881,23 @@
 	return memdesc ? kgsl_gpuaddr_to_vaddr(memdesc, gpuaddr) : NULL;
 }
 
-void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
-				unsigned int *value)
+/**
+ * adreno_read - General read function to read adreno device memory
+ * @device - Pointer to the GPU device struct (for adreno device)
+ * @base - Base address (kernel virtual) where the device memory is mapped
+ * @offsetwords - Offset in words from the base address, of the memory that
+ * is to be read
+ * @value - Value read from the device memory
+ * @mem_len - Length of the device memory mapped to the kernel
+ */
+static void adreno_read(struct kgsl_device *device, void *base,
+		unsigned int offsetwords, unsigned int *value,
+		unsigned int mem_len)
 {
+
 	unsigned int *reg;
-	BUG_ON(offsetwords*sizeof(uint32_t) >= device->reg_len);
-	reg = (unsigned int *)(device->reg_virt + (offsetwords << 2));
+	BUG_ON(offsetwords*sizeof(uint32_t) >= mem_len);
+	reg = (unsigned int *)(base + (offsetwords << 2));
 
 	if (!in_interrupt())
 		kgsl_pre_hwaccess(device);
@@ -2528,6 +2908,31 @@
 	rmb();
 }
 
+/**
+ * adreno_regread - Used to read adreno device registers
+ * @offsetwords - Word (4 Bytes) offset to the register to be read
+ * @value - Value read from device register
+ */
+void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
+	unsigned int *value)
+{
+	adreno_read(device, device->reg_virt, offsetwords, value,
+						device->reg_len);
+}
+
+/**
+ * adreno_shadermem_regread - Used to read GPU (adreno) shader memory
+ * @device - GPU device whose shader memory is to be read
+ * @offsetwords - Offset in words, of the shader memory address to be read
+ * @value - Pointer to where the read shader mem value is to be stored
+ */
+void adreno_shadermem_regread(struct kgsl_device *device,
+	unsigned int offsetwords, unsigned int *value)
+{
+	adreno_read(device, device->shader_mem_virt, offsetwords, value,
+					device->shader_mem_len);
+}
+
 void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords,
 				unsigned int value)
 {
@@ -2538,6 +2943,8 @@
 	if (!in_interrupt())
 		kgsl_pre_hwaccess(device);
 
+	trace_kgsl_regwrite(device, offsetwords, value);
+
 	kgsl_cffdump_regwrite(device->id, offsetwords << 2, value);
 	reg = (unsigned int *)(device->reg_virt + (offsetwords << 2));
 
@@ -2612,6 +3019,7 @@
 		kgsl_sharedmem_writel(&device->memstore,
 				KGSL_MEMSTORE_OFFSET(context_id,
 					ts_cmp_enable), enableflag);
+
 		/* Make sure the memstore write gets posted */
 		wmb();
 
@@ -2621,9 +3029,10 @@
 		 * get an interrupt
 		 */
 
-		if (context && device->state != KGSL_STATE_SLUMBER)
+		if (context && device->state != KGSL_STATE_SLUMBER) {
 			adreno_ringbuffer_issuecmds(device, context->devctxt,
 					KGSL_CMD_FLAGS_NONE, NULL, 0);
+		}
 	}
 
 	return 0;
@@ -2687,10 +3096,13 @@
 	if (!adreno_dev->long_ib_detect)
 		long_ib_detected = 0;
 
+	if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED))
+		return 0;
+
 	if (is_adreno_rbbm_status_idle(device)) {
 
 		/*
-		 * On A20X if the RPTR != WPTR and the device is idle, then
+		 * On A2XX if the RPTR != WPTR and the device is idle, then
 		 * the last write to WPTR probably failed to latch so write it
 		 * again
 		 */
@@ -2731,7 +3143,7 @@
 			&curr_global_ts,
 			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
 			eoptimestamp));
-
+	/* Make sure the memstore read has posted */
 	mb();
 
 	if (curr_global_ts == prev_global_ts) {
@@ -2742,6 +3154,8 @@
 				&curr_context_id,
 				KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
 				current_context));
+			/* Make sure the memstore read has posted */
+			mb();
 			context = idr_find(&device->context_idr,
 				curr_context_id);
 			if (context != NULL) {
@@ -2753,8 +3167,6 @@
 			}
 		}
 
-		mb();
-
 		if (curr_context != NULL) {
 
 			curr_context->ib_gpu_time_used += KGSL_TIMEOUT_PART;
@@ -2863,6 +3275,8 @@
 	ts_issued = adreno_dev->ringbuffer.timestamp[context_id];
 
 	adreno_regread(device, REG_CP_RB_RPTR, &rptr);
+
+	/* Make sure timestamp check finished before triggering a hang */
 	mb();
 
 	KGSL_DRV_WARN(device,
@@ -3090,7 +3504,8 @@
 		break;
 	}
 	case KGSL_TIMESTAMP_CONSUMED:
-		adreno_regread(device, REG_CP_TIMESTAMP, &timestamp);
+		kgsl_sharedmem_readl(&device->memstore, &timestamp,
+			KGSL_MEMSTORE_OFFSET(context_id, soptimestamp));
 		break;
 	case KGSL_TIMESTAMP_RETIRED:
 		kgsl_sharedmem_readl(&device->memstore, &timestamp,
@@ -3106,27 +3521,55 @@
 static long adreno_ioctl(struct kgsl_device_private *dev_priv,
 			      unsigned int cmd, void *data)
 {
+	struct kgsl_device *device = dev_priv->device;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	int result = 0;
-	struct kgsl_drawctxt_set_bin_base_offset *binbase;
-	struct kgsl_context *context;
 
 	switch (cmd) {
-	case IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET:
+	case IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET: {
+		struct kgsl_drawctxt_set_bin_base_offset *binbase = data;
+		struct kgsl_context *context;
+
 		binbase = data;
 
 		context = kgsl_find_context(dev_priv, binbase->drawctxt_id);
 		if (context) {
 			adreno_drawctxt_set_bin_base_offset(
-				dev_priv->device, context, binbase->offset);
+				device, context, binbase->offset);
 		} else {
 			result = -EINVAL;
-			KGSL_DRV_ERR(dev_priv->device,
+			KGSL_DRV_ERR(device,
 				"invalid drawctxt drawctxt_id %d "
 				"device_id=%d\n",
-				binbase->drawctxt_id, dev_priv->device->id);
+				binbase->drawctxt_id, device->id);
 		}
 		break;
-
+	}
+	case IOCTL_KGSL_PERFCOUNTER_GET: {
+		struct kgsl_perfcounter_get *get = data;
+		result = adreno_perfcounter_get(adreno_dev, get->groupid,
+			get->countable, &get->offset, PERFCOUNTER_FLAG_NONE);
+		break;
+	}
+	case IOCTL_KGSL_PERFCOUNTER_PUT: {
+		struct kgsl_perfcounter_put *put = data;
+		result = adreno_perfcounter_put(adreno_dev, put->groupid,
+			put->countable);
+		break;
+	}
+	case IOCTL_KGSL_PERFCOUNTER_QUERY: {
+		struct kgsl_perfcounter_query *query = data;
+		result = adreno_perfcounter_query_group(adreno_dev,
+			query->groupid, query->countables,
+			query->count, &query->max_counters);
+		break;
+	}
+	case IOCTL_KGSL_PERFCOUNTER_READ: {
+		struct kgsl_perfcounter_read *read = data;
+		result = adreno_perfcounter_read_group(adreno_dev,
+			read->reads, read->count);
+		break;
+	}
 	default:
 		KGSL_DRV_INFO(dev_priv->device,
 			"invalid ioctl code %08x\n", cmd);
@@ -3203,6 +3646,7 @@
 	.idle = adreno_idle,
 	.isidle = adreno_isidle,
 	.suspend_context = adreno_suspend_context,
+	.init = adreno_init,
 	.start = adreno_start,
 	.stop = adreno_stop,
 	.getproperty = adreno_getproperty,
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index d319c98..90d6027 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -39,6 +39,7 @@
 /* Command identifiers */
 #define KGSL_CONTEXT_TO_MEM_IDENTIFIER	0x2EADBEEF
 #define KGSL_CMD_IDENTIFIER		0x2EEDFACE
+#define KGSL_CMD_INTERNAL_IDENTIFIER	0x2EEDD00D
 #define KGSL_START_OF_IB_IDENTIFIER	0x2EADEABE
 #define KGSL_END_OF_IB_IDENTIFIER	0x2ABEDEAD
 #define KGSL_END_OF_FRAME_IDENTIFIER	0x2E0F2E0F
@@ -72,8 +73,10 @@
 	ADRENO_REV_A220 = 220,
 	ADRENO_REV_A225 = 225,
 	ADRENO_REV_A305 = 305,
+	ADRENO_REV_A305C = 306,
 	ADRENO_REV_A320 = 320,
 	ADRENO_REV_A330 = 330,
+	ADRENO_REV_A305B = 335,
 };
 
 struct adreno_gpudev;
@@ -103,7 +106,6 @@
 	unsigned int ib_check_level;
 	unsigned int fast_hang_detect;
 	unsigned int ft_policy;
-	unsigned int ft_user_control;
 	unsigned int long_ib_detect;
 	unsigned int long_ib;
 	unsigned int long_ib_ts;
@@ -111,6 +113,45 @@
 	unsigned int gpulist_index;
 	struct ocmem_buf *ocmem_hdl;
 	unsigned int ocmem_base;
+	unsigned int gpu_cycles;
+};
+
+#define PERFCOUNTER_FLAG_NONE 0x0
+#define PERFCOUNTER_FLAG_KERNEL 0x1
+
+/* Structs to maintain the list of active performance counters */
+
+/**
+ * struct adreno_perfcount_register: register state
+ * @countable: countable the register holds
+ * @refcount: number of users of the register
+ * @offset: register hardware offset
+ */
+struct adreno_perfcount_register {
+	unsigned int countable;
+	unsigned int refcount;
+	unsigned int offset;
+	unsigned int flags;
+};
+
+/**
+ * struct adreno_perfcount_group: registers for a hardware group
+ * @regs: available registers for this group
+ * @reg_count: total registers for this group
+ */
+struct adreno_perfcount_group {
+	struct adreno_perfcount_register *regs;
+	unsigned int reg_count;
+};
+
+/**
+ * adreno_perfcounts: all available perfcounter groups
+ * @groups: available groups for this device
+ * @group_count: total groups for this device
+ */
+struct adreno_perfcounters {
+	struct adreno_perfcount_group *groups;
+	unsigned int group_count;
 };
 
 struct adreno_gpudev {
@@ -124,6 +165,8 @@
 	/* keeps track of when we need to execute the draw workaround code */
 	int ctx_switches_since_last_draw;
 
+	struct adreno_perfcounters *perfcounters;
+
 	/* GPU specific function hooks */
 	int (*ctxt_create)(struct adreno_device *, struct adreno_context *);
 	void (*ctxt_save)(struct adreno_device *, struct adreno_context *);
@@ -134,9 +177,15 @@
 	void (*irq_control)(struct adreno_device *, int);
 	unsigned int (*irq_pending)(struct adreno_device *);
 	void * (*snapshot)(struct adreno_device *, void *, int *, int);
-	void (*rb_init)(struct adreno_device *, struct adreno_ringbuffer *);
+	int (*rb_init)(struct adreno_device *, struct adreno_ringbuffer *);
+	void (*perfcounter_init)(struct adreno_device *);
 	void (*start)(struct adreno_device *);
 	unsigned int (*busy_cycles)(struct adreno_device *);
+	void (*perfcounter_enable)(struct adreno_device *, unsigned int group,
+		unsigned int counter, unsigned int countable);
+	uint64_t (*perfcounter_read)(struct adreno_device *adreno_dev,
+		unsigned int group, unsigned int counter,
+		unsigned int offset);
 };
 
 /*
@@ -179,6 +228,22 @@
 	unsigned int replay_for_snapshot;
 };
 
+/* Fault Tolerance policy flags */
+#define  KGSL_FT_DISABLE                  BIT(0)
+#define  KGSL_FT_REPLAY                   BIT(1)
+#define  KGSL_FT_SKIPIB                   BIT(2)
+#define  KGSL_FT_SKIPFRAME                BIT(3)
+#define  KGSL_FT_TEMP_DISABLE             BIT(4)
+#define  KGSL_FT_DEFAULT_POLICY           (KGSL_FT_REPLAY + KGSL_FT_SKIPIB)
+
+/* Pagefault policy flags */
+#define KGSL_FT_PAGEFAULT_INT_ENABLE         0x00000001
+#define KGSL_FT_PAGEFAULT_GPUHALT_ENABLE     0x00000002
+#define KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE   0x00000004
+#define KGSL_FT_PAGEFAULT_LOG_ONE_PER_INT    0x00000008
+#define KGSL_FT_PAGEFAULT_DEFAULT_POLICY     (KGSL_FT_PAGEFAULT_INT_ENABLE + \
+					KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
+
 extern struct adreno_gpudev adreno_a2xx_gpudev;
 extern struct adreno_gpudev adreno_a3xx_gpudev;
 
@@ -210,7 +275,13 @@
 void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords,
 				unsigned int value);
 
+void adreno_shadermem_regread(struct kgsl_device *device,
+						unsigned int offsetwords,
+						unsigned int *value);
+
 int adreno_dump(struct kgsl_device *device, int manual);
+unsigned int adreno_a3xx_rbbm_clock_ctl_default(struct adreno_device
+							*adreno_dev);
 
 struct kgsl_memdesc *adreno_find_region(struct kgsl_device *device,
 						unsigned int pt_base,
@@ -234,6 +305,13 @@
 unsigned int adreno_ft_detect(struct kgsl_device *device,
 						unsigned int *prev_reg_val);
 
+int adreno_perfcounter_get(struct adreno_device *adreno_dev,
+	unsigned int groupid, unsigned int countable, unsigned int *offset,
+	unsigned int flags);
+
+int adreno_perfcounter_put(struct adreno_device *adreno_dev,
+	unsigned int groupid, unsigned int countable);
+
 static inline int adreno_is_a200(struct adreno_device *adreno_dev)
 {
 	return (adreno_dev->gpurev == ADRENO_REV_A200);
@@ -285,6 +363,16 @@
 	return (adreno_dev->gpurev == ADRENO_REV_A305);
 }
 
+static inline int adreno_is_a305b(struct adreno_device *adreno_dev)
+{
+	return (adreno_dev->gpurev == ADRENO_REV_A305B);
+}
+
+static inline int adreno_is_a305c(struct adreno_device *adreno_dev)
+{
+	return (adreno_dev->gpurev == ADRENO_REV_A305C);
+}
+
 static inline int adreno_is_a320(struct adreno_device *adreno_dev)
 {
 	return (adreno_dev->gpurev == ADRENO_REV_A320);
@@ -295,6 +383,12 @@
 	return (adreno_dev->gpurev == ADRENO_REV_A330);
 }
 
+static inline int adreno_is_a330v2(struct adreno_device *adreno_dev)
+{
+	return ((adreno_dev->gpurev == ADRENO_REV_A330) &&
+		(ADRENO_CHIPID_PATCH(adreno_dev->chip_id) > 0));
+}
+
 static inline int adreno_rb_ctxtswitch(unsigned int *cmd)
 {
 	return (cmd[0] == cp_nop_packet(1) &&
@@ -400,12 +494,13 @@
 	unsigned int *start = cmds;
 
 	*cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
-	*cmds++ = 0x00000000;
+	*cmds++ = 0;
 
 	if ((adreno_dev->gpurev == ADRENO_REV_A305) ||
+		(adreno_dev->gpurev == ADRENO_REV_A305C) ||
 		(adreno_dev->gpurev == ADRENO_REV_A320)) {
 		*cmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1);
-		*cmds++ = 0x00000000;
+		*cmds++ = 0;
 	}
 
 	return cmds - start;
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
index ba4e507..dd9bdc3 100644
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -1515,18 +1515,26 @@
 			"Current active context has caused gpu hang\n");
 
 	if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
-
+		kgsl_cffdump_syncmem(NULL, &context->gpustate,
+			context->reg_save[1],
+			context->reg_save[2] << 2, true);
 		/* save registers and constants. */
 		adreno_ringbuffer_issuecmds(device, context,
 			KGSL_CMD_FLAGS_NONE,
 			context->reg_save, 3);
 
 		if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
+			kgsl_cffdump_syncmem(NULL, &context->gpustate,
+				context->shader_save[1],
+				context->shader_save[2] << 2, true);
 			/* save shader partitioning and instructions. */
 			adreno_ringbuffer_issuecmds(device, context,
 				KGSL_CMD_FLAGS_PMODE,
 				context->shader_save, 3);
 
+			kgsl_cffdump_syncmem(NULL, &context->gpustate,
+				context->shader_fixup[1],
+				context->shader_fixup[2] << 2, true);
 			/*
 			 * fixup shader partitioning parameter for
 			 *  SET_SHADER_BASES.
@@ -1541,6 +1549,9 @@
 
 	if ((context->flags & CTXT_FLAGS_GMEM_SAVE) &&
 	    (context->flags & CTXT_FLAGS_GMEM_SHADOW)) {
+		kgsl_cffdump_syncmem(NULL, &context->gpustate,
+			context->context_gmem_shadow.gmem_save[1],
+			context->context_gmem_shadow.gmem_save[2] << 2, true);
 		/* save gmem.
 		 * (note: changes shader. shader must already be saved.)
 		 */
@@ -1548,6 +1559,10 @@
 			KGSL_CMD_FLAGS_PMODE,
 			context->context_gmem_shadow.gmem_save, 3);
 
+		kgsl_cffdump_syncmem(NULL, &context->gpustate,
+			context->chicken_restore[1],
+			context->chicken_restore[2] << 2, true);
+
 		/* Restore TP0_CHICKEN */
 		if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
 			adreno_ringbuffer_issuecmds(device, context,
@@ -1574,8 +1589,6 @@
 		return;
 	}
 
-	KGSL_CTXT_INFO(device, "context flags %08x\n", context->flags);
-
 	cmds[0] = cp_nop_packet(1);
 	cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
 	cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2);
@@ -1586,21 +1599,24 @@
 					cmds, 5);
 	kgsl_mmu_setstate(&device->mmu, context->pagetable, context->id);
 
-#ifndef CONFIG_MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP
-	kgsl_cffdump_syncmem(NULL, &context->gpustate,
-		context->gpustate.gpuaddr, LCC_SHADOW_SIZE +
-		REG_SHADOW_SIZE + CMD_BUFFER_SIZE + TEX_SHADOW_SIZE, false);
-#endif
-
 	/* restore gmem.
 	 *  (note: changes shader. shader must not already be restored.)
 	 */
 	if (context->flags & CTXT_FLAGS_GMEM_RESTORE) {
+		kgsl_cffdump_syncmem(NULL, &context->gpustate,
+			context->context_gmem_shadow.gmem_restore[1],
+			context->context_gmem_shadow.gmem_restore[2] << 2,
+			true);
+
 		adreno_ringbuffer_issuecmds(device, context,
 			KGSL_CMD_FLAGS_PMODE,
 			context->context_gmem_shadow.gmem_restore, 3);
 
 		if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
+			kgsl_cffdump_syncmem(NULL, &context->gpustate,
+				context->chicken_restore[1],
+				context->chicken_restore[2] << 2, true);
+
 			/* Restore TP0_CHICKEN */
 			adreno_ringbuffer_issuecmds(device, context,
 				KGSL_CMD_FLAGS_NONE,
@@ -1611,6 +1627,9 @@
 	}
 
 	if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
+		kgsl_cffdump_syncmem(NULL, &context->gpustate,
+			context->reg_restore[1],
+			context->reg_restore[2] << 2, true);
 
 		/* restore registers and constants. */
 		adreno_ringbuffer_issuecmds(device, context,
@@ -1618,6 +1637,10 @@
 
 		/* restore shader instructions & partitioning. */
 		if (context->flags & CTXT_FLAGS_SHADER_RESTORE) {
+			kgsl_cffdump_syncmem(NULL, &context->gpustate,
+				context->shader_restore[1],
+				context->shader_restore[2] << 2, true);
+
 			adreno_ringbuffer_issuecmds(device, context,
 				KGSL_CMD_FLAGS_NONE,
 				context->shader_restore, 3);
@@ -1727,12 +1750,8 @@
 	adreno_regwrite(device, REG_CP_INT_ACK, status);
 
 	if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) {
-		KGSL_CMD_WARN(rb->device, "ringbuffer ib1/rb interrupt\n");
 		queue_work(device->work_queue, &device->ts_expired_ws);
 		wake_up_interruptible_all(&device->wait_queue);
-		atomic_notifier_call_chain(&(device->ts_notifier_list),
-					   device->id,
-					   NULL);
 	}
 }
 
@@ -1821,23 +1840,26 @@
 static unsigned int a2xx_irq_pending(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = &adreno_dev->dev;
-	unsigned int rbbm, cp, mh;
+	unsigned int status;
 
-	adreno_regread(device, REG_RBBM_INT_CNTL, &rbbm);
-	adreno_regread(device, REG_CP_INT_CNTL, &cp);
-	adreno_regread(device, MH_INTERRUPT_MASK, &mh);
+	adreno_regread(device, REG_MASTER_INT_SIGNAL, &status);
 
-	return ((rbbm & RBBM_INT_MASK) || (cp & CP_INT_MASK) ||
-		(mh & kgsl_mmu_get_int_mask())) ? 1 : 0;
+	return (status &
+		(MASTER_INT_SIGNAL__MH_INT_STAT |
+		 MASTER_INT_SIGNAL__CP_INT_STAT |
+		 MASTER_INT_SIGNAL__RBBM_INT_STAT)) ? 1 : 0;
 }
 
-static void a2xx_rb_init(struct adreno_device *adreno_dev,
+static int a2xx_rb_init(struct adreno_device *adreno_dev,
 			struct adreno_ringbuffer *rb)
 {
 	unsigned int *cmds, cmds_gpu;
 
 	/* ME_INIT */
 	cmds = adreno_ringbuffer_allocspace(rb, NULL, 19);
+	if (cmds == NULL)
+		return -ENOMEM;
+
 	cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19);
 
 	GSL_RB_WRITE(cmds, cmds_gpu, cp_type3_packet(CP_ME_INIT, 18));
@@ -1890,6 +1912,8 @@
 	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
 
 	adreno_ringbuffer_submit(rb);
+
+	return 0;
 }
 
 static unsigned int a2xx_busy_cycles(struct adreno_device *adreno_dev)
diff --git a/drivers/gpu/msm/adreno_a2xx_snapshot.c b/drivers/gpu/msm/adreno_a2xx_snapshot.c
index 75795b1..2c86f82 100644
--- a/drivers/gpu/msm/adreno_a2xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a2xx_snapshot.c
@@ -224,6 +224,31 @@
 	return DEBUG_SECTION_SZ(MIUDEBUG_COUNT);
 }
 
+/* Snapshot the istore memory */
+static int a2xx_snapshot_istore(struct kgsl_device *device, void *snapshot,
+	int remain, void *priv)
+{
+	struct kgsl_snapshot_istore *header = snapshot;
+	unsigned int *data = snapshot + sizeof(*header);
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	int count, i;
+
+	count = adreno_dev->istore_size * adreno_dev->instruction_size;
+
+	if (remain < (count * 4) + sizeof(*header)) {
+		KGSL_DRV_ERR(device,
+			"snapshot: Not enough memory for the istore section");
+		return 0;
+	}
+
+	header->count = adreno_dev->istore_size;
+
+	for (i = 0; i < count; i++)
+		kgsl_regread(device, ADRENO_ISTORE_START + i, &data[i]);
+
+	return (count * 4) + sizeof(*header);
+}
+
 /* A2XX GPU snapshot function - this is where all of the A2XX specific
  * bits and pieces are grabbed into the snapshot memory
  */
@@ -338,6 +363,18 @@
 		}
 	}
 
+	/*
+	 * Only dump the istore on a hang - reading it on a running system
+	 * has a non zero chance of hanging the GPU.
+	 */
+
+	if (adreno_is_a2xx(adreno_dev) && hang) {
+		snapshot = kgsl_snapshot_add_section(device,
+			KGSL_SNAPSHOT_SECTION_ISTORE, snapshot, remain,
+			a2xx_snapshot_istore, NULL);
+	}
+
+
 	/* Reset the clock gating */
 	adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, pmoverride);
 
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 3d9ec6d..13c723a 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -52,8 +52,8 @@
 	0x2240, 0x227e,
 	0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
 	0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
-	0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356,
-	0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
+	0x22ff, 0x22ff, 0x2340, 0x2343,
+	0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
 	0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472,
 	0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
 	0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
@@ -61,8 +61,8 @@
 	0x25f0, 0x25f0,
 	0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
 	0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
-	0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749,
-	0x2750, 0x2756, 0x2760, 0x2760, 0x300C, 0x300E, 0x301C, 0x301D,
+	0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743,
+	0x300C, 0x300E, 0x301C, 0x301D,
 	0x302A, 0x302A, 0x302C, 0x302D, 0x3030, 0x3031, 0x3034, 0x3036,
 	0x303C, 0x303C, 0x305E, 0x305F,
 };
@@ -445,6 +445,25 @@
 	tmp_ctx.cmd = cmd;
 }
 
+unsigned int adreno_a3xx_rbbm_clock_ctl_default(struct adreno_device
+							*adreno_dev)
+{
+	if (adreno_is_a305(adreno_dev))
+		return A305_RBBM_CLOCK_CTL_DEFAULT;
+	else if (adreno_is_a305c(adreno_dev))
+		return A305C_RBBM_CLOCK_CTL_DEFAULT;
+	else if (adreno_is_a320(adreno_dev))
+		return A320_RBBM_CLOCK_CTL_DEFAULT;
+	else if (adreno_is_a330v2(adreno_dev))
+		return A330v2_RBBM_CLOCK_CTL_DEFAULT;
+	else if (adreno_is_a330(adreno_dev))
+		return A330_RBBM_CLOCK_CTL_DEFAULT;
+	else if (adreno_is_a305b(adreno_dev))
+		return A305B_RBBM_CLOCK_CTL_DEFAULT;
+
+	BUG_ON(1);
+}
+
 /* Copy GMEM contents to system memory shadow. */
 static unsigned int *build_gmem2sys_cmds(struct adreno_device *adreno_dev,
 					 struct adreno_context *drawctxt,
@@ -454,7 +473,7 @@
 	unsigned int *start = cmds;
 
 	*cmds++ = cp_type0_packet(A3XX_RBBM_CLOCK_CTL, 1);
-	*cmds++ = A3XX_RBBM_CLOCK_CTL_DEFAULT;
+	*cmds++ = adreno_a3xx_rbbm_clock_ctl_default(adreno_dev);
 
 	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
 	*cmds++ = CP_REG(A3XX_RB_MODE_CONTROL);
@@ -1250,7 +1269,7 @@
 	unsigned int *start = cmds;
 
 	*cmds++ = cp_type0_packet(A3XX_RBBM_CLOCK_CTL, 1);
-	*cmds++ = A3XX_RBBM_CLOCK_CTL_DEFAULT;
+	*cmds++ = adreno_a3xx_rbbm_clock_ctl_default(adreno_dev);
 
 	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
 	*cmds++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG);
@@ -2400,6 +2419,11 @@
 		 * already be saved.)
 		 */
 
+		kgsl_cffdump_syncmem(NULL,
+			&context->gpustate,
+			context->context_gmem_shadow.gmem_save[1],
+			context->context_gmem_shadow.gmem_save[2] << 2, true);
+
 		adreno_ringbuffer_issuecmds(device, context,
 					KGSL_CMD_FLAGS_PMODE,
 					    context->context_gmem_shadow.
@@ -2421,8 +2445,6 @@
 		return;
 	}
 
-	KGSL_CTXT_INFO(device, "context flags %08x\n", context->flags);
-
 	cmds[0] = cp_nop_packet(1);
 	cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
 	cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2);
@@ -2439,6 +2461,12 @@
 	 */
 
 	if (context->flags & CTXT_FLAGS_GMEM_RESTORE) {
+		kgsl_cffdump_syncmem(NULL,
+			&context->gpustate,
+			context->context_gmem_shadow.gmem_restore[1],
+			context->context_gmem_shadow.gmem_restore[2] << 2,
+			true);
+
 		adreno_ringbuffer_issuecmds(device, context,
 					KGSL_CMD_FLAGS_PMODE,
 					    context->context_gmem_shadow.
@@ -2471,11 +2499,14 @@
 	}
 }
 
-static void a3xx_rb_init(struct adreno_device *adreno_dev,
+static int a3xx_rb_init(struct adreno_device *adreno_dev,
 			 struct adreno_ringbuffer *rb)
 {
 	unsigned int *cmds, cmds_gpu;
 	cmds = adreno_ringbuffer_allocspace(rb, NULL, 18);
+	if (cmds == NULL)
+		return -ENOMEM;
+
 	cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint) * (rb->wptr - 18);
 
 	GSL_RB_WRITE(cmds, cmds_gpu, cp_type3_packet(CP_ME_INIT, 17));
@@ -2499,6 +2530,8 @@
 	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
 
 	adreno_ringbuffer_submit(rb);
+
+	return 0;
 }
 
 static void a3xx_err_callback(struct adreno_device *adreno_dev, int bit)
@@ -2581,9 +2614,213 @@
 
 	/* Schedule work to free mem and issue ibs */
 	queue_work(device->work_queue, &device->ts_expired_ws);
+}
 
-	atomic_notifier_call_chain(&device->ts_notifier_list,
-				   device->id, NULL);
+/**
+ * struct a3xx_perfcounter_register - Define a performance counter register
+ * @load_bit: the bit to set in RBBM_LOAD_CMD0/RBBM_LOAD_CMD1 to force the RBBM
+ * to load the reset value into the appropriate counter
+ * @select: The dword offset of the register to write the selected
+ * countable into
+ */
+
+struct a3xx_perfcounter_register {
+	unsigned int load_bit;
+	unsigned int select;
+};
+
+static struct a3xx_perfcounter_register a3xx_perfcounter_reg_cp[] = {
+	{ 0, A3XX_CP_PERFCOUNTER_SELECT },
+};
+
+static struct a3xx_perfcounter_register a3xx_perfcounter_reg_rbbm[] = {
+	{ 1, A3XX_RBBM_PERFCOUNTER0_SELECT },
+	{ 2, A3XX_RBBM_PERFCOUNTER1_SELECT },
+};
+
+static struct a3xx_perfcounter_register a3xx_perfcounter_reg_pc[] = {
+	{ 3, A3XX_PC_PERFCOUNTER0_SELECT },
+	{ 4, A3XX_PC_PERFCOUNTER1_SELECT },
+	{ 5, A3XX_PC_PERFCOUNTER2_SELECT },
+	{ 6, A3XX_PC_PERFCOUNTER3_SELECT },
+};
+
+static struct a3xx_perfcounter_register a3xx_perfcounter_reg_vfd[] = {
+	{ 7, A3XX_VFD_PERFCOUNTER0_SELECT },
+	{ 8, A3XX_VFD_PERFCOUNTER1_SELECT },
+};
+
+static struct a3xx_perfcounter_register a3xx_perfcounter_reg_hlsq[] = {
+	{ 9, A3XX_HLSQ_PERFCOUNTER0_SELECT },
+	{ 10, A3XX_HLSQ_PERFCOUNTER1_SELECT },
+	{ 11, A3XX_HLSQ_PERFCOUNTER2_SELECT },
+	{ 12, A3XX_HLSQ_PERFCOUNTER3_SELECT },
+	{ 13, A3XX_HLSQ_PERFCOUNTER4_SELECT },
+	{ 14, A3XX_HLSQ_PERFCOUNTER5_SELECT },
+};
+
+static struct a3xx_perfcounter_register a3xx_perfcounter_reg_vpc[] = {
+	{ 15, A3XX_VPC_PERFCOUNTER0_SELECT },
+	{ 16, A3XX_VPC_PERFCOUNTER1_SELECT },
+};
+
+static struct a3xx_perfcounter_register a3xx_perfcounter_reg_tse[] = {
+	{ 17, A3XX_GRAS_PERFCOUNTER0_SELECT },
+	{ 18, A3XX_GRAS_PERFCOUNTER1_SELECT },
+};
+
+static struct a3xx_perfcounter_register a3xx_perfcounter_reg_ras[] = {
+	{ 19, A3XX_GRAS_PERFCOUNTER2_SELECT },
+	{ 20, A3XX_GRAS_PERFCOUNTER3_SELECT },
+};
+
+static struct a3xx_perfcounter_register a3xx_perfcounter_reg_uche[] = {
+	{ 21, A3XX_UCHE_PERFCOUNTER0_SELECT },
+	{ 22, A3XX_UCHE_PERFCOUNTER1_SELECT },
+	{ 23, A3XX_UCHE_PERFCOUNTER2_SELECT },
+	{ 24, A3XX_UCHE_PERFCOUNTER3_SELECT },
+	{ 25, A3XX_UCHE_PERFCOUNTER4_SELECT },
+	{ 26, A3XX_UCHE_PERFCOUNTER5_SELECT },
+};
+
+static struct a3xx_perfcounter_register a3xx_perfcounter_reg_tp[] = {
+	{ 27, A3XX_TP_PERFCOUNTER0_SELECT },
+	{ 28, A3XX_TP_PERFCOUNTER1_SELECT },
+	{ 29, A3XX_TP_PERFCOUNTER2_SELECT },
+	{ 30, A3XX_TP_PERFCOUNTER3_SELECT },
+	{ 31, A3XX_TP_PERFCOUNTER4_SELECT },
+	{ 32, A3XX_TP_PERFCOUNTER5_SELECT },
+};
+
+static struct a3xx_perfcounter_register a3xx_perfcounter_reg_sp[] = {
+	{ 33, A3XX_SP_PERFCOUNTER0_SELECT },
+	{ 34, A3XX_SP_PERFCOUNTER1_SELECT },
+	{ 35, A3XX_SP_PERFCOUNTER2_SELECT },
+	{ 36, A3XX_SP_PERFCOUNTER3_SELECT },
+	{ 37, A3XX_SP_PERFCOUNTER4_SELECT },
+	{ 38, A3XX_SP_PERFCOUNTER5_SELECT },
+	{ 39, A3XX_SP_PERFCOUNTER6_SELECT },
+	{ 40, A3XX_SP_PERFCOUNTER7_SELECT },
+};
+
+static struct a3xx_perfcounter_register a3xx_perfcounter_reg_rb[] = {
+	{ 41, A3XX_RB_PERFCOUNTER0_SELECT },
+	{ 42, A3XX_RB_PERFCOUNTER1_SELECT },
+};
+
+#define REGCOUNTER_GROUP(_x) { (_x), ARRAY_SIZE((_x)) }
+
+static struct {
+	struct a3xx_perfcounter_register *regs;
+	int count;
+} a3xx_perfcounter_reglist[] = {
+	REGCOUNTER_GROUP(a3xx_perfcounter_reg_cp),
+	REGCOUNTER_GROUP(a3xx_perfcounter_reg_rbbm),
+	REGCOUNTER_GROUP(a3xx_perfcounter_reg_pc),
+	REGCOUNTER_GROUP(a3xx_perfcounter_reg_vfd),
+	REGCOUNTER_GROUP(a3xx_perfcounter_reg_hlsq),
+	REGCOUNTER_GROUP(a3xx_perfcounter_reg_vpc),
+	REGCOUNTER_GROUP(a3xx_perfcounter_reg_tse),
+	REGCOUNTER_GROUP(a3xx_perfcounter_reg_ras),
+	REGCOUNTER_GROUP(a3xx_perfcounter_reg_uche),
+	REGCOUNTER_GROUP(a3xx_perfcounter_reg_tp),
+	REGCOUNTER_GROUP(a3xx_perfcounter_reg_sp),
+	REGCOUNTER_GROUP(a3xx_perfcounter_reg_rb),
+};
+
+static void a3xx_perfcounter_enable_pwr(struct kgsl_device *device,
+	unsigned int countable)
+{
+	unsigned int in, out;
+
+	adreno_regread(device, A3XX_RBBM_RBBM_CTL, &in);
+
+	if (countable == 0)
+		out = in | RBBM_RBBM_CTL_RESET_PWR_CTR0;
+	else
+		out = in | RBBM_RBBM_CTL_RESET_PWR_CTR1;
+
+	adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, out);
+
+	if (countable == 0)
+		out = in | RBBM_RBBM_CTL_ENABLE_PWR_CTR0;
+	else
+		out = in | RBBM_RBBM_CTL_ENABLE_PWR_CTR1;
+
+	adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, out);
+
+	return;
+}
+
+/*
+ * a3xx_perfcounter_enable - Configure a performance counter for a countable
+ * @adreno_dev -  Adreno device to configure
+ * @group - Desired performance counter group
+ * @counter - Desired performance counter in the group
+ * @countable - Desired countable
+ *
+ * Physically set up a counter within a group with the desired countable
+ */
+
+static void a3xx_perfcounter_enable(struct adreno_device *adreno_dev,
+	unsigned int group, unsigned int counter, unsigned int countable)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	unsigned int val = 0;
+	struct a3xx_perfcounter_register *reg;
+
+	if (group > ARRAY_SIZE(a3xx_perfcounter_reglist))
+		return;
+
+	if (counter > a3xx_perfcounter_reglist[group].count)
+		return;
+
+	/* Special case - power */
+	if (group == KGSL_PERFCOUNTER_GROUP_PWR)
+		return a3xx_perfcounter_enable_pwr(device, countable);
+
+	reg = &(a3xx_perfcounter_reglist[group].regs[counter]);
+
+	/* Select the desired perfcounter */
+	adreno_regwrite(device, reg->select, countable);
+
+	if (reg->load_bit < 32) {
+		val = 1 << reg->load_bit;
+		adreno_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_CMD0, val);
+	} else {
+		val  = 1 << (reg->load_bit - 32);
+		adreno_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_CMD1, val);
+	}
+}
+
+static uint64_t a3xx_perfcounter_read(struct adreno_device *adreno_dev,
+	unsigned int group, unsigned int counter,
+	unsigned int offset)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	struct a3xx_perfcounter_register *reg = NULL;
+	unsigned int lo = 0, hi = 0;
+	unsigned int val;
+
+	if (group > ARRAY_SIZE(a3xx_perfcounter_reglist))
+		return 0;
+
+	reg = &(a3xx_perfcounter_reglist[group].regs[counter]);
+
+	/* Freeze the counter */
+	adreno_regread(device, A3XX_RBBM_PERFCTR_CTL, &val);
+	val &= ~reg->load_bit;
+	adreno_regwrite(device, A3XX_RBBM_PERFCTR_CTL, val);
+
+	/* Read the values */
+	adreno_regread(device, offset, &lo);
+	adreno_regread(device, offset + 1, &hi);
+
+	/* Re-Enable the counter */
+	val |= reg->load_bit;
+	adreno_regwrite(device, A3XX_RBBM_PERFCTR_CTL, val);
+
+	return (((uint64_t) hi) << 32) | lo;
 }
 
 #define A3XX_IRQ_CALLBACK(_c) { .func = _c }
@@ -2687,26 +2924,22 @@
 static unsigned int a3xx_busy_cycles(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = &adreno_dev->dev;
-	unsigned int reg, val;
-
-	/* Freeze the counter */
-	adreno_regread(device, A3XX_RBBM_RBBM_CTL, &reg);
-	reg &= ~RBBM_RBBM_CTL_ENABLE_PWR_CTR1;
-	adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, reg);
+	unsigned int val;
+	unsigned int ret = 0;
 
 	/* Read the value */
 	adreno_regread(device, A3XX_RBBM_PERFCTR_PWR_1_LO, &val);
 
-	/* Reset the counter */
-	reg |= RBBM_RBBM_CTL_RESET_PWR_CTR1;
-	adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, reg);
+	/* Return 0 for the first read */
+	if (adreno_dev->gpu_cycles != 0) {
+		if (val < adreno_dev->gpu_cycles)
+			ret = (0xFFFFFFFF - adreno_dev->gpu_cycles) + val;
+		else
+			ret = val - adreno_dev->gpu_cycles;
+	}
 
-	/* Re-enable the counter */
-	reg &= ~RBBM_RBBM_CTL_RESET_PWR_CTR1;
-	reg |= RBBM_RBBM_CTL_ENABLE_PWR_CTR1;
-	adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, reg);
-
-	return val;
+	adreno_dev->gpu_cycles = val;
+	return ret;
 }
 
 struct a3xx_vbif_data {
@@ -2734,6 +2967,29 @@
 	{0, 0},
 };
 
+static struct a3xx_vbif_data a305b_vbif[] = {
+	{ A3XX_VBIF_IN_RD_LIM_CONF0, 0x00181818 },
+	{ A3XX_VBIF_IN_WR_LIM_CONF0, 0x00181818 },
+	{ A3XX_VBIF_OUT_RD_LIM_CONF0, 0x00000018 },
+	{ A3XX_VBIF_OUT_WR_LIM_CONF0, 0x00000018 },
+	{ A3XX_VBIF_DDR_OUT_MAX_BURST, 0x00000303 },
+	{ A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003 },
+	{0, 0},
+};
+
+static struct a3xx_vbif_data a305c_vbif[] = {
+	{ A3XX_VBIF_IN_RD_LIM_CONF0, 0x00101010 },
+	{ A3XX_VBIF_IN_WR_LIM_CONF0, 0x00101010 },
+	{ A3XX_VBIF_OUT_RD_LIM_CONF0, 0x00000010 },
+	{ A3XX_VBIF_OUT_WR_LIM_CONF0, 0x00000010 },
+	{ A3XX_VBIF_DDR_OUT_MAX_BURST, 0x00000101 },
+	{ A3XX_VBIF_ARB_CTL, 0x00000010 },
+	/* Set up AOOO */
+	{ A3XX_VBIF_OUT_AXI_AOOO_EN, 0x00000007 },
+	{ A3XX_VBIF_OUT_AXI_AOOO, 0x00070007 },
+	{0, 0},
+};
+
 static struct a3xx_vbif_data a320_vbif[] = {
 	/* Set up 16 deep read/write request queues */
 	{ A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010 },
@@ -2784,17 +3040,81 @@
 	{0, 0},
 };
 
+/*
+ * Most of the VBIF registers on 8974v2 have the correct values at power on, so
+ * we won't modify those if we don't need to
+ */
+static struct a3xx_vbif_data a330v2_vbif[] = {
+	/* Enable 1k sort */
+	{ A3XX_VBIF_ABIT_SORT, 0x0001003F },
+	{ A3XX_VBIF_ABIT_SORT_CONF, 0x000000A4 },
+	/* Enable WR-REQ */
+	{ A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003F },
+	{ A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303 },
+	/* Set up VBIF_ROUND_ROBIN_QOS_ARB */
+	{ A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003 },
+	{0, 0},
+};
+
+static struct {
+	int(*devfunc)(struct adreno_device *);
+	struct a3xx_vbif_data *vbif;
+} a3xx_vbif_platforms[] = {
+	{ adreno_is_a305, a305_vbif },
+	{ adreno_is_a305c, a305c_vbif },
+	{ adreno_is_a320, a320_vbif },
+	/* A330v2 needs to be ahead of A330 so the right device matches */
+	{ adreno_is_a330v2, a330v2_vbif },
+	{ adreno_is_a330, a330_vbif },
+	{ adreno_is_a305b, a305b_vbif },
+};
+
+static void a3xx_perfcounter_init(struct adreno_device *adreno_dev)
+{
+	/*
+	 * Set SP to count SP_ALU_ACTIVE_CYCLES, it includes
+	 * all ALU instruction execution regardless precision or shader ID.
+	 * Set SP to count SP0_ICL1_MISSES, It counts
+	 * USP L1 instruction miss request.
+	 * Set SP to count SP_FS_FULL_ALU_INSTRUCTIONS, it
+	 * counts USP flow control instruction execution.
+	 * we will use this to augment our hang detection
+	 */
+	if (adreno_dev->fast_hang_detect) {
+		adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+			SP_ALU_ACTIVE_CYCLES, &ft_detect_regs[6],
+			PERFCOUNTER_FLAG_KERNEL);
+		ft_detect_regs[7] = ft_detect_regs[6] + 1;
+		adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+			SP0_ICL1_MISSES, &ft_detect_regs[8],
+			PERFCOUNTER_FLAG_KERNEL);
+		ft_detect_regs[9] = ft_detect_regs[8] + 1;
+		adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+			SP_FS_CFLOW_INSTRUCTIONS, &ft_detect_regs[10],
+			PERFCOUNTER_FLAG_KERNEL);
+		ft_detect_regs[11] = ft_detect_regs[10] + 1;
+	}
+
+	adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+		SP_FS_FULL_ALU_INSTRUCTIONS, NULL, PERFCOUNTER_FLAG_KERNEL);
+
+	/* Reserve and start countable 1 in the PWR perfcounter group */
+	adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_PWR, 1,
+			NULL, PERFCOUNTER_FLAG_KERNEL);
+}
+
 static void a3xx_start(struct adreno_device *adreno_dev)
 {
 	struct kgsl_device *device = &adreno_dev->dev;
 	struct a3xx_vbif_data *vbif = NULL;
+	int i;
 
-	if (adreno_is_a305(adreno_dev))
-		vbif = a305_vbif;
-	else if (adreno_is_a320(adreno_dev))
-		vbif = a320_vbif;
-	else if (adreno_is_a330(adreno_dev))
-		vbif = a330_vbif;
+	for (i = 0; i < ARRAY_SIZE(a3xx_vbif_platforms); i++) {
+		if (a3xx_vbif_platforms[i].devfunc(adreno_dev)) {
+			vbif = a3xx_vbif_platforms[i].vbif;
+			break;
+		}
+	}
 
 	BUG_ON(vbif == NULL);
 
@@ -2832,10 +3152,18 @@
 
 	/* Enable Clock gating */
 	adreno_regwrite(device, A3XX_RBBM_CLOCK_CTL,
-			A3XX_RBBM_CLOCK_CTL_DEFAULT);
+		adreno_a3xx_rbbm_clock_ctl_default(adreno_dev));
+
+	if (adreno_is_a330v2(adreno_dev))
+		adreno_regwrite(device, A3XX_RBBM_GPR0_CTL,
+			A330v2_RBBM_GPR0_CTL_DEFAULT);
+	else if (adreno_is_a330(adreno_dev))
+		adreno_regwrite(device, A3XX_RBBM_GPR0_CTL,
+			A330_RBBM_GPR0_CTL_DEFAULT);
 
 	/* Set the OCMEM base address for A330 */
-	if (adreno_is_a330(adreno_dev)) {
+	if (adreno_is_a330(adreno_dev) ||
+		adreno_is_a305b(adreno_dev)) {
 		adreno_regwrite(device, A3XX_RB_GMEM_BASE_ADDR,
 			(unsigned int)(adreno_dev->ocmem_base >> 14));
 	}
@@ -2843,25 +3171,121 @@
 	/* Turn on performance counters */
 	adreno_regwrite(device, A3XX_RBBM_PERFCTR_CTL, 0x01);
 
-	/*
-	 * Set SP perfcounter 5 to count SP_ALU_ACTIVE_CYCLES, it includes
-	 * all ALU instruction execution regardless precision or shader ID.
-	 * Set SP perfcounter 6 to count SP0_ICL1_MISSES, It counts
-	 * USP L1 instruction miss request.
-	 * Set SP perfcounter 7 to count SP_FS_FULL_ALU_INSTRUCTIONS, it
-	 * counts USP flow control instruction execution.
-	 * we will use this to augment our hang detection
-	 */
-	if (adreno_dev->fast_hang_detect) {
-		adreno_regwrite(device, A3XX_SP_PERFCOUNTER5_SELECT,
-			SP_ALU_ACTIVE_CYCLES);
-		adreno_regwrite(device, A3XX_SP_PERFCOUNTER6_SELECT,
-			SP0_ICL1_MISSES);
-		adreno_regwrite(device, A3XX_SP_PERFCOUNTER7_SELECT,
-			SP_FS_CFLOW_INSTRUCTIONS);
-	}
+	/* Turn on the GPU busy counter and let it run free */
+
+	adreno_dev->gpu_cycles = 0;
 }
 
+/*
+ * Define the available perfcounter groups - these get used by
+ * adreno_perfcounter_get and adreno_perfcounter_put
+ */
+
+static struct adreno_perfcount_register a3xx_perfcounters_cp[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_CP_0_LO, 0 },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_rbbm[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RBBM_0_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RBBM_1_LO, 0 },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_pc[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PC_0_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PC_1_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PC_2_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PC_3_LO, 0 },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_vfd[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_VFD_0_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_VFD_1_LO, 0 },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_hlsq[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_0_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_1_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_2_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_3_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_4_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_HLSQ_5_LO, 0 },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_vpc[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_VPC_0_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_VPC_1_LO, 0 },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_tse[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TSE_0_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TSE_1_LO, 0 },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_ras[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RAS_0_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RAS_1_LO, 0 },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_uche[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_0_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_1_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_2_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_3_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_4_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_UCHE_5_LO, 0 },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_tp[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_0_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_1_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_2_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_3_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_4_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_TP_5_LO, 0 },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_sp[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_0_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_1_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_2_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_3_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_4_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_5_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_6_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_SP_7_LO, 0 },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_rb[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RB_0_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_RB_1_LO, 0 },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_pwr[] = {
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PWR_0_LO, 0 },
+	{ KGSL_PERFCOUNTER_NOT_USED, 0, A3XX_RBBM_PERFCTR_PWR_1_LO, 0 },
+};
+
+static struct adreno_perfcount_group a3xx_perfcounter_groups[] = {
+	{ a3xx_perfcounters_cp, ARRAY_SIZE(a3xx_perfcounters_cp) },
+	{ a3xx_perfcounters_rbbm, ARRAY_SIZE(a3xx_perfcounters_rbbm) },
+	{ a3xx_perfcounters_pc, ARRAY_SIZE(a3xx_perfcounters_pc) },
+	{ a3xx_perfcounters_vfd, ARRAY_SIZE(a3xx_perfcounters_vfd) },
+	{ a3xx_perfcounters_hlsq, ARRAY_SIZE(a3xx_perfcounters_hlsq) },
+	{ a3xx_perfcounters_vpc, ARRAY_SIZE(a3xx_perfcounters_vpc) },
+	{ a3xx_perfcounters_tse, ARRAY_SIZE(a3xx_perfcounters_tse) },
+	{ a3xx_perfcounters_ras, ARRAY_SIZE(a3xx_perfcounters_ras) },
+	{ a3xx_perfcounters_uche, ARRAY_SIZE(a3xx_perfcounters_uche) },
+	{ a3xx_perfcounters_tp, ARRAY_SIZE(a3xx_perfcounters_tp) },
+	{ a3xx_perfcounters_sp, ARRAY_SIZE(a3xx_perfcounters_sp) },
+	{ a3xx_perfcounters_rb, ARRAY_SIZE(a3xx_perfcounters_rb) },
+	{ a3xx_perfcounters_pwr, ARRAY_SIZE(a3xx_perfcounters_pwr) },
+};
+
+static struct adreno_perfcounters a3xx_perfcounters = {
+	a3xx_perfcounter_groups,
+	ARRAY_SIZE(a3xx_perfcounter_groups),
+};
+
 /* Defined in adreno_a3xx_snapshot.c */
 void *a3xx_snapshot(struct adreno_device *adreno_dev, void *snapshot,
 	int *remain, int hang);
@@ -2870,16 +3294,20 @@
 	.reg_rbbm_status = A3XX_RBBM_STATUS,
 	.reg_cp_pfp_ucode_addr = A3XX_CP_PFP_UCODE_ADDR,
 	.reg_cp_pfp_ucode_data = A3XX_CP_PFP_UCODE_DATA,
+	.perfcounters = &a3xx_perfcounters,
 
 	.ctxt_create = a3xx_drawctxt_create,
 	.ctxt_save = a3xx_drawctxt_save,
 	.ctxt_restore = a3xx_drawctxt_restore,
 	.ctxt_draw_workaround = NULL,
 	.rb_init = a3xx_rb_init,
+	.perfcounter_init = a3xx_perfcounter_init,
 	.irq_control = a3xx_irq_control,
 	.irq_handler = a3xx_irq_handler,
 	.irq_pending = a3xx_irq_pending,
 	.busy_cycles = a3xx_busy_cycles,
 	.start = a3xx_start,
 	.snapshot = a3xx_snapshot,
+	.perfcounter_enable = a3xx_perfcounter_enable,
+	.perfcounter_read = a3xx_perfcounter_read,
 };
diff --git a/drivers/gpu/msm/adreno_a3xx_snapshot.c b/drivers/gpu/msm/adreno_a3xx_snapshot.c
index d9d5ec8..58e3126 100644
--- a/drivers/gpu/msm/adreno_a3xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a3xx_snapshot.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,6 +11,7 @@
  *
  */
 
+#include <linux/io.h>
 #include "kgsl.h"
 #include "adreno.h"
 #include "kgsl_snapshot.h"
@@ -19,14 +20,43 @@
 #define DEBUG_SECTION_SZ(_dwords) (((_dwords) * sizeof(unsigned int)) \
 		+ sizeof(struct kgsl_snapshot_debug))
 
+/* Shader memory size in words */
 #define SHADER_MEMORY_SIZE 0x4000
 
+/**
+ * _rbbm_debug_bus_read - Helper function to read data from the RBBM
+ * debug bus.
+ * @device - GPU device to read/write registers
+ * @block_id - Debug bus block to read from
+ * @index - Index in the debug bus block to read
+ * @ret - Value of the register read
+ */
+static void _rbbm_debug_bus_read(struct kgsl_device *device,
+	unsigned int block_id, unsigned int index, unsigned int *val)
+{
+	unsigned int block = (block_id << 8) | 1 << 16;
+	adreno_regwrite(device, A3XX_RBBM_DEBUG_BUS_CTL, block | index);
+	adreno_regread(device, A3XX_RBBM_DEBUG_BUS_DATA_STATUS, val);
+}
+
+/**
+ * a3xx_snapshot_shader_memory - Helper function to dump the GPU shader
+ * memory to the snapshot buffer.
+ * @device - GPU device whose shader memory is to be dumped
+ * @snapshot - Pointer to binary snapshot data blob being made
+ * @remain - Number of remaining bytes in the snapshot blob
+ * @priv - Unused parameter
+ */
 static int a3xx_snapshot_shader_memory(struct kgsl_device *device,
 	void *snapshot, int remain, void *priv)
 {
 	struct kgsl_snapshot_debug *header = snapshot;
+	unsigned int i;
 	unsigned int *data = snapshot + sizeof(*header);
-	int i;
+	unsigned int shader_read_len = SHADER_MEMORY_SIZE;
+
+	if (SHADER_MEMORY_SIZE > (device->shader_mem_len >> 2))
+		shader_read_len = (device->shader_mem_len >> 2);
 
 	if (remain < DEBUG_SECTION_SZ(SHADER_MEMORY_SIZE)) {
 		SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
@@ -36,8 +66,22 @@
 	header->type = SNAPSHOT_DEBUG_SHADER_MEMORY;
 	header->size = SHADER_MEMORY_SIZE;
 
-	for (i = 0; i < SHADER_MEMORY_SIZE; i++)
-		adreno_regread(device, 0x4000 + i, &data[i]);
+	/* Map shader memory to kernel, for dumping */
+	if (device->shader_mem_virt == NULL)
+		device->shader_mem_virt = devm_ioremap(device->dev,
+					device->shader_mem_phys,
+					device->shader_mem_len);
+
+	if (device->shader_mem_virt == NULL) {
+		KGSL_DRV_ERR(device,
+		"Unable to map shader memory region\n");
+		return 0;
+	}
+
+	/* Now, dump shader memory to snapshot */
+	for (i = 0; i < shader_read_len; i++)
+		adreno_shadermem_regread(device, i, &data[i]);
+
 
 	return DEBUG_SECTION_SZ(SHADER_MEMORY_SIZE);
 }
@@ -170,7 +214,8 @@
 	int i, size;
 
 	/* The size of the ROQ buffer is core dependent */
-	size = adreno_is_a330(adreno_dev) ?
+	size = (adreno_is_a330(adreno_dev) ||
+		adreno_is_a305b(adreno_dev)) ?
 		A330_CP_ROQ_SIZE : A320_CP_ROQ_SIZE;
 
 	if (remain < DEBUG_SECTION_SZ(size)) {
@@ -220,66 +265,77 @@
 	return DEBUG_SECTION_SZ(size);
 }
 
-#define DEBUGFS_BLOCK_SIZE 0x40
+struct debugbus_block {
+	unsigned int block_id;
+	unsigned int dwords;
+};
 
 static int a3xx_snapshot_debugbus_block(struct kgsl_device *device,
 	void *snapshot, int remain, void *priv)
 {
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
 	struct kgsl_snapshot_debugbus *header = snapshot;
-	unsigned int id = (unsigned int) priv;
-	unsigned int val;
+	struct debugbus_block *block = priv;
 	int i;
 	unsigned int *data = snapshot + sizeof(*header);
-	int size =
-		(DEBUGFS_BLOCK_SIZE * sizeof(unsigned int)) + sizeof(*header);
+	unsigned int dwords;
+	int size;
+
+	/*
+	 * For A305 and A320 all debug bus regions are the same size (0x40). For
+	 * A330, they can be different sizes - most are still 0x40, but some
+	 * like CP are larger
+	 */
+
+	dwords = (adreno_is_a330(adreno_dev) ||
+		adreno_is_a305b(adreno_dev)) ?
+		block->dwords : 0x40;
+
+	size = (dwords * sizeof(unsigned int)) + sizeof(*header);
 
 	if (remain < size) {
 		SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
 		return 0;
 	}
 
-	val = (id << 8) | (1 << 16);
+	header->id = block->block_id;
+	header->count = dwords;
 
-	header->id = id;
-	header->count = DEBUGFS_BLOCK_SIZE;
-
-	for (i = 0; i < DEBUGFS_BLOCK_SIZE; i++) {
-		adreno_regwrite(device, A3XX_RBBM_DEBUG_BUS_CTL, val | i);
-		adreno_regread(device, A3XX_RBBM_DEBUG_BUS_DATA_STATUS,
-			&data[i]);
-	}
+	for (i = 0; i < dwords; i++)
+		_rbbm_debug_bus_read(device, block->block_id, i, &data[i]);
 
 	return size;
 }
 
-static unsigned int debugbus_blocks[] = {
-	RBBM_BLOCK_ID_CP,
-	RBBM_BLOCK_ID_RBBM,
-	RBBM_BLOCK_ID_VBIF,
-	RBBM_BLOCK_ID_HLSQ,
-	RBBM_BLOCK_ID_UCHE,
-	RBBM_BLOCK_ID_PC,
-	RBBM_BLOCK_ID_VFD,
-	RBBM_BLOCK_ID_VPC,
-	RBBM_BLOCK_ID_TSE,
-	RBBM_BLOCK_ID_RAS,
-	RBBM_BLOCK_ID_VSC,
-	RBBM_BLOCK_ID_SP_0,
-	RBBM_BLOCK_ID_SP_1,
-	RBBM_BLOCK_ID_SP_2,
-	RBBM_BLOCK_ID_SP_3,
-	RBBM_BLOCK_ID_TPL1_0,
-	RBBM_BLOCK_ID_TPL1_1,
-	RBBM_BLOCK_ID_TPL1_2,
-	RBBM_BLOCK_ID_TPL1_3,
-	RBBM_BLOCK_ID_RB_0,
-	RBBM_BLOCK_ID_RB_1,
-	RBBM_BLOCK_ID_RB_2,
-	RBBM_BLOCK_ID_RB_3,
-	RBBM_BLOCK_ID_MARB_0,
-	RBBM_BLOCK_ID_MARB_1,
-	RBBM_BLOCK_ID_MARB_2,
-	RBBM_BLOCK_ID_MARB_3,
+static struct debugbus_block debugbus_blocks[] = {
+	{ RBBM_BLOCK_ID_CP, 0x52, },
+	{ RBBM_BLOCK_ID_RBBM, 0x40, },
+	{ RBBM_BLOCK_ID_VBIF, 0x40, },
+	{ RBBM_BLOCK_ID_HLSQ, 0x40, },
+	{ RBBM_BLOCK_ID_UCHE, 0x40, },
+	{ RBBM_BLOCK_ID_PC, 0x40, },
+	{ RBBM_BLOCK_ID_VFD, 0x40, },
+	{ RBBM_BLOCK_ID_VPC, 0x40, },
+	{ RBBM_BLOCK_ID_TSE, 0x40, },
+	{ RBBM_BLOCK_ID_RAS, 0x40, },
+	{ RBBM_BLOCK_ID_VSC, 0x40, },
+	{ RBBM_BLOCK_ID_SP_0, 0x40, },
+	{ RBBM_BLOCK_ID_SP_1, 0x40, },
+	{ RBBM_BLOCK_ID_SP_2, 0x40, },
+	{ RBBM_BLOCK_ID_SP_3, 0x40, },
+	{ RBBM_BLOCK_ID_TPL1_0, 0x40, },
+	{ RBBM_BLOCK_ID_TPL1_1, 0x40, },
+	{ RBBM_BLOCK_ID_TPL1_2, 0x40, },
+	{ RBBM_BLOCK_ID_TPL1_3, 0x40, },
+	{ RBBM_BLOCK_ID_RB_0, 0x40, },
+	{ RBBM_BLOCK_ID_RB_1, 0x40, },
+	{ RBBM_BLOCK_ID_RB_2, 0x40, },
+	{ RBBM_BLOCK_ID_RB_3, 0x40, },
+	{ RBBM_BLOCK_ID_MARB_0, 0x40, },
+	{ RBBM_BLOCK_ID_MARB_1, 0x40, },
+	{ RBBM_BLOCK_ID_MARB_2, 0x40, },
+	{ RBBM_BLOCK_ID_MARB_3, 0x40, },
 };
 
 static void *a3xx_snapshot_debugbus(struct kgsl_device *device,
@@ -291,7 +347,7 @@
 		snapshot = kgsl_snapshot_add_section(device,
 			KGSL_SNAPSHOT_SECTION_DEBUGBUS, snapshot, remain,
 			a3xx_snapshot_debugbus_block,
-			(void *) debugbus_blocks[i]);
+			(void *) &debugbus_blocks[i]);
 	}
 
 	return snapshot;
@@ -309,18 +365,58 @@
 	struct kgsl_snapshot_registers_list *list,
 	struct adreno_device *adreno_dev)
 {
-	/* HLSQ specific registers */
+	struct kgsl_device *device = &adreno_dev->dev;
+
 	/*
-	 * Don't dump any a3xx HLSQ registers just yet.  Reading the HLSQ
-	 * registers can cause the device to hang if the HLSQ block is
-	 * busy.  Add specific checks for each a3xx core as the requirements
-	 * are discovered.  Disable by default for now.
+	 * Trying to read HLSQ registers when the HLSQ block is busy
+	 * will cause the device to hang.  The RBBM_DEBUG_BUS has information
+	 * that will tell us if the HLSQ block is busy or not.  Read values
+	 * from the debug bus to ensure the HLSQ block is not busy (this
+	 * is hardware dependent).  If the HLSQ block is busy do not
+	 * dump the registers, otherwise dump the HLSQ registers.
 	 */
-	if (!adreno_is_a3xx(adreno_dev)) {
-		regs[list->count].regs = (unsigned int *) a3xx_hlsq_registers;
-		regs[list->count].count = a3xx_hlsq_registers_count;
-		list->count++;
+
+	if (adreno_is_a330(adreno_dev)) {
+		/*
+		 * stall_ctxt_full status bit: RBBM_BLOCK_ID_HLSQ index 49 [27]
+		 *
+		 * if (!stall_context_full)
+		 * then dump HLSQ registers
+		 */
+		unsigned int stall_context_full = 0;
+
+		_rbbm_debug_bus_read(device, RBBM_BLOCK_ID_HLSQ, 49,
+				&stall_context_full);
+		stall_context_full &= 0x08000000;
+
+		if (stall_context_full)
+			return;
+	} else {
+		/*
+		 * tpif status bits: RBBM_BLOCK_ID_HLSQ index 4 [4:0]
+		 * spif status bits: RBBM_BLOCK_ID_HLSQ index 7 [5:0]
+		 *
+		 * if ((tpif == 0, 1, 28) && (spif == 0, 1, 10))
+		 * then dump HLSQ registers
+		 */
+		unsigned int next_pif = 0;
+
+		/* check tpif */
+		_rbbm_debug_bus_read(device, RBBM_BLOCK_ID_HLSQ, 4, &next_pif);
+		next_pif &= 0x1f;
+		if (next_pif != 0 && next_pif != 1 && next_pif != 28)
+			return;
+
+		/* check spif */
+		_rbbm_debug_bus_read(device, RBBM_BLOCK_ID_HLSQ, 7, &next_pif);
+		next_pif &= 0x3f;
+		if (next_pif != 0 && next_pif != 1 && next_pif != 10)
+			return;
 	}
+
+	regs[list->count].regs = (unsigned int *) a3xx_hlsq_registers;
+	regs[list->count].count = a3xx_hlsq_registers_count;
+	list->count++;
 }
 
 static void _snapshot_a330_regs(struct kgsl_snapshot_registers *regs,
@@ -342,6 +438,7 @@
 	struct kgsl_device *device = &adreno_dev->dev;
 	struct kgsl_snapshot_registers_list list;
 	struct kgsl_snapshot_registers regs[5];
+	int size;
 
 	list.registers = regs;
 	list.count = 0;
@@ -352,7 +449,7 @@
 	/* Store relevant registers in list to snapshot */
 	_snapshot_a3xx_regs(regs, &list);
 	_snapshot_hlsq_regs(regs, &list, adreno_dev);
-	if (adreno_is_a330(adreno_dev))
+	if (adreno_is_a330(adreno_dev) || adreno_is_a305b(adreno_dev))
 		_snapshot_a330_regs(regs, &list);
 
 	/* Master set of (non debug) registers */
@@ -360,10 +457,15 @@
 		KGSL_SNAPSHOT_SECTION_REGS, snapshot, remain,
 		kgsl_snapshot_dump_regs, &list);
 
-	/* CP_STATE_DEBUG indexed registers */
+	/*
+	 * CP_STATE_DEBUG indexed registers - 20 on 305 and 320 and 46 on A330
+	 */
+	size = (adreno_is_a330(adreno_dev) ||
+		adreno_is_a305b(adreno_dev)) ? 0x2E : 0x14;
+
 	snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
 			remain, REG_CP_STATE_DEBUG_INDEX,
-			REG_CP_STATE_DEBUG_DATA, 0x0, 0x14);
+			REG_CP_STATE_DEBUG_DATA, 0x0, size);
 
 	/* CP_ME indexed registers */
 	snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
@@ -404,7 +506,8 @@
 			KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
 			a3xx_snapshot_cp_roq, NULL);
 
-	if (adreno_is_a330(adreno_dev)) {
+	if (adreno_is_a330(adreno_dev) ||
+		adreno_is_a305b(adreno_dev)) {
 		snapshot = kgsl_snapshot_add_section(device,
 			KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
 			a330_snapshot_cp_merciu, NULL);
@@ -414,7 +517,7 @@
 
 	/* Enable Clock gating */
 	adreno_regwrite(device, A3XX_RBBM_CLOCK_CTL,
-			A3XX_RBBM_CLOCK_CTL_DEFAULT);
+		adreno_a3xx_rbbm_clock_ctl_default(adreno_dev));
 
 	return snapshot;
 }
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index 890c8a1..ef599e9 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2008-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2008-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -64,11 +64,6 @@
 	adreno_dev->fast_hang_detect = 1;
 	debugfs_create_u32("fast_hang_detect", 0644, device->d_debugfs,
 			   &adreno_dev->fast_hang_detect);
-
-	/* Top level switch to enable/disable userspace FT control */
-	adreno_dev->ft_user_control = 0;
-	debugfs_create_u32("ft_user_control", 0644, device->d_debugfs,
-			   &adreno_dev->ft_user_control);
 	/*
 	 * FT policy can be set to any of the options below.
 	 * KGSL_FT_DISABLE -> BIT(0) Set to disable FT
@@ -80,7 +75,6 @@
 	adreno_dev->ft_policy = KGSL_FT_DEFAULT_POLICY;
 	debugfs_create_u32("ft_policy", 0644, device->d_debugfs,
 			   &adreno_dev->ft_policy);
-
 	/* By default enable long IB detection */
 	adreno_dev->long_ib_detect = 1;
 	debugfs_create_u32("long_ib_detect", 0644, device->d_debugfs,
@@ -96,7 +90,7 @@
 	 * KGSL_FT_PAGEFAULT_LOG_ONE_PER_INT -> BIT(3) Set to log only one
 	 * pagefault per INT.
 	 */
-	adreno_dev->ft_pf_policy = KGSL_FT_PAGEFAULT_DEFAULT_POLICY;
-	debugfs_create_u32("ft_pagefault_policy", 0644, device->d_debugfs,
-			   &adreno_dev->ft_pf_policy);
+	 adreno_dev->ft_pf_policy = KGSL_FT_PAGEFAULT_DEFAULT_POLICY;
+	 debugfs_create_u32("ft_pagefault_policy", 0644, device->d_debugfs,
+			&adreno_dev->ft_pf_policy);
 }
diff --git a/drivers/gpu/msm/adreno_debugfs.h b/drivers/gpu/msm/adreno_debugfs.h
deleted file mode 100644
index 1c97ebb..0000000
--- a/drivers/gpu/msm/adreno_debugfs.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* Copyright (c) 2002,2008-2012, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-#ifndef __ADRENO_DEBUGFS_H
-#define __ADRENO_DEBUGFS_H
-
-#ifdef CONFIG_DEBUG_FS
-
-int adreno_debugfs_init(struct kgsl_device *device);
-
-extern int adreno_pm_regs_enabled;
-extern int adreno_pm_ib_enabled;
-
-static inline int is_adreno_pm_regs_enabled(void)
-{
-	return adreno_pm_regs_enabled;
-}
-
-static inline int is_adreno_pm_ib_enabled(void)
-{
-	return adreno_pm_ib_enabled;
-}
-
-#else
-static inline int adreno_debugfs_init(struct kgsl_device *device)
-{
-	return 0;
-}
-
-static inline int kgsl_pmregs_enabled(void)
-{
-	
-	return 1;
-}
-#endif
-
-#endif 
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 6fbcdee..176717d 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -12,6 +12,7 @@
  */
 
 #include <linux/slab.h>
+#include <linux/msm_kgsl.h>
 
 #include "kgsl.h"
 #include "kgsl_sharedmem.h"
@@ -143,7 +144,7 @@
  */
 int adreno_drawctxt_create(struct kgsl_device *device,
 			struct kgsl_pagetable *pagetable,
-			struct kgsl_context *context, uint32_t flags)
+			struct kgsl_context *context, uint32_t *flags)
 {
 	struct adreno_context *drawctxt;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -162,26 +163,36 @@
 	drawctxt->id = context->id;
 	rb->timestamp[context->id] = 0;
 
-	if (flags & KGSL_CONTEXT_PREAMBLE)
+	*flags &= (KGSL_CONTEXT_PREAMBLE |
+		KGSL_CONTEXT_NO_GMEM_ALLOC |
+		KGSL_CONTEXT_PER_CONTEXT_TS |
+		KGSL_CONTEXT_USER_GENERATED_TS |
+		KGSL_CONTEXT_NO_FAULT_TOLERANCE |
+		KGSL_CONTEXT_TYPE_MASK);
+
+	if (*flags & KGSL_CONTEXT_PREAMBLE)
 		drawctxt->flags |= CTXT_FLAGS_PREAMBLE;
 
-	if (flags & KGSL_CONTEXT_NO_GMEM_ALLOC)
+	if (*flags & KGSL_CONTEXT_NO_GMEM_ALLOC)
 		drawctxt->flags |= CTXT_FLAGS_NOGMEMALLOC;
 
-	if (flags & KGSL_CONTEXT_PER_CONTEXT_TS)
+	if (*flags & KGSL_CONTEXT_PER_CONTEXT_TS)
 		drawctxt->flags |= CTXT_FLAGS_PER_CONTEXT_TS;
 
-	if (flags & KGSL_CONTEXT_USER_GENERATED_TS) {
-		if (!(flags & KGSL_CONTEXT_PER_CONTEXT_TS)) {
+	if (*flags & KGSL_CONTEXT_USER_GENERATED_TS) {
+		if (!(*flags & KGSL_CONTEXT_PER_CONTEXT_TS)) {
 			ret = -EINVAL;
 			goto err;
 		}
 		drawctxt->flags |= CTXT_FLAGS_USER_GENERATED_TS;
 	}
 
-	if (flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
+	if (*flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
 		drawctxt->flags |= CTXT_FLAGS_NO_FAULT_TOLERANCE;
 
+	drawctxt->type =
+		(*flags & KGSL_CONTEXT_TYPE_MASK) >> KGSL_CONTEXT_TYPE_SHIFT;
+
 	ret = adreno_dev->gpudev->ctxt_create(adreno_dev, drawctxt);
 	if (ret)
 		goto err;
@@ -242,10 +253,6 @@
 	if (device->state != KGSL_STATE_HUNG)
 		adreno_idle(device);
 
-	if (adreno_is_a20x(adreno_dev) && adreno_dev->drawctxt_active)
-		kgsl_setstate(&device->mmu, adreno_dev->drawctxt_active->id,
-			KGSL_MMUFLAGS_PTUPDATE);
-
 	kgsl_sharedmem_free(&drawctxt->gpustate);
 	kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow);
 
@@ -306,8 +313,10 @@
 		return;
 	}
 
-	KGSL_CTXT_INFO(device, "from %p to %p flags %d\n",
-			adreno_dev->drawctxt_active, drawctxt, flags);
+	KGSL_CTXT_INFO(device, "from %d to %d flags %d\n",
+		adreno_dev->drawctxt_active ?
+		adreno_dev->drawctxt_active->id : 0,
+		drawctxt ? drawctxt->id : 0, flags);
 
 	/* Save the old context */
 	adreno_dev->gpudev->ctxt_save(adreno_dev, adreno_dev->drawctxt_active);
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index fd60688..f0f3b6b 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -57,6 +57,14 @@
 /* Context no fault tolerance */
 #define CTXT_FLAGS_NO_FAULT_TOLERANCE  BIT(16)
 
+/* Symbolic table for the adreno draw context type */
+#define ADRENO_DRAWCTXT_TYPES \
+	{ KGSL_CONTEXT_TYPE_ANY, "any" }, \
+	{ KGSL_CONTEXT_TYPE_GL, "GL" }, \
+	{ KGSL_CONTEXT_TYPE_CL, "CL" }, \
+	{ KGSL_CONTEXT_TYPE_C2D, "C2D" }, \
+	{ KGSL_CONTEXT_TYPE_RS, "RS" }
+
 struct kgsl_device;
 struct adreno_device;
 struct kgsl_device_private;
@@ -93,6 +101,9 @@
 	unsigned int id;
 	unsigned int ib_gpu_time_used;
 	uint32_t flags;
+	uint32_t pagefault;
+	unsigned long pagefault_ts;
+	unsigned int type;
 	struct kgsl_pagetable *pagetable;
 	struct kgsl_memdesc gpustate;
 	unsigned int reg_restore[3];
@@ -125,7 +136,7 @@
 int adreno_drawctxt_create(struct kgsl_device *device,
 			struct kgsl_pagetable *pagetable,
 			struct kgsl_context *context,
-			uint32_t flags);
+			uint32_t *flags);
 
 void adreno_drawctxt_destroy(struct kgsl_device *device,
 			  struct kgsl_context *context);
diff --git a/drivers/gpu/msm/adreno_pm4types.h b/drivers/gpu/msm/adreno_pm4types.h
index a3fa312..f449870 100644
--- a/drivers/gpu/msm/adreno_pm4types.h
+++ b/drivers/gpu/msm/adreno_pm4types.h
@@ -143,10 +143,10 @@
 #define CP_IM_STORE            0x2c
 
 /* test 2 memory locations to dword values specified */
-#define CP_TEST_TWO_MEMS    0x71
+#define CP_TEST_TWO_MEMS	0x71
 
 /* PFP waits until the FIFO between the PFP and the ME is empty */
-#define CP_WAIT_FOR_ME      0x13
+#define CP_WAIT_FOR_ME		0x13
 
 /*
  * for a20x
diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c
index cf1cf90..5b52fd8 100644
--- a/drivers/gpu/msm/adreno_postmortem.c
+++ b/drivers/gpu/msm/adreno_postmortem.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -12,6 +12,7 @@
  */
 
 #include <linux/vmalloc.h>
+#include <mach/board.h>
 
 #include "kgsl.h"
 #include "kgsl_sharedmem.h"
@@ -50,6 +51,7 @@
 	{CP_DRAW_INDX,			"DRW_NDX_"},
 	{CP_DRAW_INDX_BIN,		"DRW_NDXB"},
 	{CP_EVENT_WRITE,		"EVENT_WT"},
+	{CP_MEM_WRITE,			"MEM_WRIT"},
 	{CP_IM_LOAD,			"IN__LOAD"},
 	{CP_IM_LOAD_IMMEDIATE,		"IM_LOADI"},
 	{CP_IM_STORE,			"IM_STORE"},
@@ -69,6 +71,14 @@
 	{CP_WAIT_FOR_IDLE,		"WAIT4IDL"},
 };
 
+static const struct pm_id_name pm3_nop_values[] = {
+	{KGSL_CONTEXT_TO_MEM_IDENTIFIER,	"CTX_SWCH"},
+	{KGSL_CMD_IDENTIFIER,			"CMD__EXT"},
+	{KGSL_CMD_INTERNAL_IDENTIFIER,		"CMD__INT"},
+	{KGSL_START_OF_IB_IDENTIFIER,		"IB_START"},
+	{KGSL_END_OF_IB_IDENTIFIER,		"IB___END"},
+};
+
 static uint32_t adreno_is_pm4_len(uint32_t word)
 {
 	if (word == INVALID_RB_CMD)
@@ -128,6 +138,28 @@
 	return "????????";
 }
 
+static bool adreno_is_pm3_nop_value(uint32_t word)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pm3_nop_values); ++i) {
+		if (word == pm3_nop_values[i].id)
+			return 1;
+	}
+	return 0;
+}
+
+static const char *adreno_pm3_nop_name(uint32_t word)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(pm3_nop_values); ++i) {
+		if (word == pm3_nop_values[i].id)
+			return pm3_nop_values[i].name;
+	}
+	return "????????";
+}
+
 static void adreno_dump_regs(struct kgsl_device *device,
 			   const int *registers, int size)
 {
@@ -244,8 +276,13 @@
 				"%s", adreno_pm4_name(ptr4[j]));
 			*argp = -(adreno_is_pm4_len(ptr4[j])+1);
 		} else {
-			lx += scnprintf(linebuf + lx, linebuflen - lx,
-				"%8.8X", ptr4[j]);
+			if (adreno_is_pm3_nop_value(ptr4[j]))
+				lx += scnprintf(linebuf + lx, linebuflen - lx,
+					"%s", adreno_pm3_nop_name(ptr4[j]));
+			else
+				lx += scnprintf(linebuf + lx, linebuflen - lx,
+					"%8.8X", ptr4[j]);
+
 			if (*argp > 1)
 				--*argp;
 			else if (*argp == 1) {
@@ -665,7 +702,7 @@
 		" %08X\n", r1, r2, r3);
 
 	KGSL_LOG_DUMP(device, "PAGETABLE SIZE: %08X ",
-		kgsl_mmu_get_ptsize());
+		kgsl_mmu_get_ptsize(&device->mmu));
 
 	kgsl_regread(device, MH_MMU_TRAN_ERROR, &r1);
 	KGSL_LOG_DUMP(device, "        TRAN_ERROR = %08X\n", r1);
@@ -703,6 +740,7 @@
 	mb();
 
 	if (device->pm_dump_enable) {
+
 		if (adreno_is_a2xx(adreno_dev))
 			adreno_dump_a2xx(device);
 		else if (adreno_is_a3xx(adreno_dev))
@@ -728,7 +766,7 @@
 	if (!device->pm_dump_enable) {
 
 		KGSL_LOG_DUMP(device,
-			"RBBM STATUS %08X | IB1:%08X/%08X | IB2: %08X/%08X"
+			"STATUS %08X | IB1:%08X/%08X | IB2: %08X/%08X"
 			" | RPTR: %04X | WPTR: %04X\n",
 			rbbm_status,  cp_ib1_base, cp_ib1_bufsz, cp_ib2_base,
 			cp_ib2_bufsz, cp_rb_rptr, cp_rb_wptr);
@@ -890,7 +928,8 @@
 			adreno_dump_regs(device, a3xx_registers,
 					a3xx_registers_count);
 
-			if (adreno_is_a330(adreno_dev))
+			if (adreno_is_a330(adreno_dev) ||
+				adreno_is_a305b(adreno_dev))
 				adreno_dump_regs(device, a330_registers,
 					a330_registers_count);
 		}
diff --git a/drivers/gpu/msm/adreno_postmortem.h b/drivers/gpu/msm/adreno_postmortem.h
deleted file mode 100644
index 7706037..0000000
--- a/drivers/gpu/msm/adreno_postmortem.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __ADRENO_POSTMORTEM_H
-#define __ADRENO_POSTMORTEM_H
-
-struct kgsl_device;
-
-int adreno_postmortem_dump(struct kgsl_device *device, int manual);
-
-#endif 
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 179027c..a4bb4fa 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -18,6 +18,7 @@
 #include "kgsl.h"
 #include "kgsl_sharedmem.h"
 #include "kgsl_cffdump.h"
+#include "kgsl_trace.h"
 
 #include "adreno.h"
 #include "adreno_pm4types.h"
@@ -319,7 +320,7 @@
 	return 0;
 }
 
-int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
+int adreno_ringbuffer_start(struct adreno_ringbuffer *rb)
 {
 	int status;
 	/*cp_rb_cntl_u cp_rb_cntl; */
@@ -331,9 +332,6 @@
 	if (rb->flags & KGSL_FLAGS_STARTED)
 		return 0;
 
-	if (init_ram)
-		rb->timestamp[KGSL_MEMSTORE_GLOBAL] = 0;
-
 	kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
 			   sizeof(struct kgsl_rbmemptrs));
 
@@ -433,8 +431,11 @@
 		return status;
 
 	/* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
-	if (adreno_is_a305(adreno_dev) || adreno_is_a320(adreno_dev))
+	if (adreno_is_a305(adreno_dev) || adreno_is_a305c(adreno_dev) ||
+		adreno_is_a320(adreno_dev))
 		adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000E0602);
+	else if (adreno_is_a330(adreno_dev) || adreno_is_a305b(adreno_dev))
+		adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x003E2008);
 
 	rb->rptr = 0;
 	rb->wptr = 0;
@@ -443,7 +444,9 @@
 	adreno_regwrite(device, REG_CP_ME_CNTL, 0);
 
 	/* ME init is GPU specific, so jump into the sub-function */
-	adreno_dev->gpudev->rb_init(adreno_dev, rb);
+	status = adreno_dev->gpudev->rb_init(adreno_dev, rb);
+	if (status)
+		return status;
 
 	/* idle device to validate ME INIT */
 	status = adreno_idle(device);
@@ -481,6 +484,7 @@
 	 */
 	rb->sizedwords = KGSL_RB_SIZE >> 2;
 
+	rb->buffer_desc.flags = KGSL_MEMFLAGS_GPUREADONLY;
 	/* allocate memory for ringbuffer */
 	status = kgsl_allocate_contiguous(&rb->buffer_desc,
 		(rb->sizedwords << 2));
@@ -564,6 +568,8 @@
 	total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
 	/* 2 dwords to store the start of command sequence */
 	total_sizedwords += 2;
+	/* internal ib command identifier for the ringbuffer */
+	total_sizedwords += (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) ? 2 : 0;
 
 	/* Add CP_COND_EXEC commands to generate CP_INTERRUPT */
 	total_sizedwords += context ? 13 : 0;
@@ -571,17 +577,25 @@
 	if (adreno_is_a3xx(adreno_dev))
 		total_sizedwords += 7;
 
+	if (adreno_is_a2xx(adreno_dev))
+		total_sizedwords += 2; /* CP_WAIT_FOR_IDLE */
+
 	total_sizedwords += 2; /* scratchpad ts for fault tolerance */
+	total_sizedwords += 3; /* sop timestamp */
+	total_sizedwords += 4; /* eop timestamp */
+
 	if (context && context->flags & CTXT_FLAGS_PER_CONTEXT_TS &&
 			!(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
-		total_sizedwords += 3; /* sop timestamp */
-		total_sizedwords += 4; /* eop timestamp */
 		total_sizedwords += 3; /* global timestamp without cache
 					* flush for non-zero context */
-	} else {
-		total_sizedwords += 4; /* global timestamp for fault tolerance*/
 	}
 
+	if (adreno_is_a20x(adreno_dev))
+		total_sizedwords += 2; /* CACHE_FLUSH */
+
+	if (flags & KGSL_CMD_FLAGS_EOF)
+		total_sizedwords += 2;
+
 	ringcmds = adreno_ringbuffer_allocspace(rb, context, total_sizedwords);
 	if (!ringcmds) {
 		/*
@@ -597,23 +611,9 @@
 	GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
 	GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
 
-	if (flags & KGSL_CMD_FLAGS_PMODE) {
-		/* disable protected mode error checking */
-		GSL_RB_WRITE(ringcmds, rcmd_gpu,
-			cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
-		GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
-	}
-
-	for (i = 0; i < sizedwords; i++) {
-		GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
-		cmds++;
-	}
-
-	if (flags & KGSL_CMD_FLAGS_PMODE) {
-		/* re-enable protected mode error checking */
-		GSL_RB_WRITE(ringcmds, rcmd_gpu,
-			cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
-		GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
+	if (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) {
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_INTERNAL_IDENTIFIER);
 	}
 
 	/* always increment the global timestamp. once. */
@@ -635,10 +635,45 @@
 	GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type0_packet(REG_CP_TIMESTAMP, 1));
 	GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
 
+	/* start-of-pipeline timestamp */
+	GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_MEM_WRITE, 2));
+	GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
+		KGSL_MEMSTORE_OFFSET(context_id, soptimestamp)));
+	GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
+
+	if (flags & KGSL_CMD_FLAGS_PMODE) {
+		/* disable protected mode error checking */
+		GSL_RB_WRITE(ringcmds, rcmd_gpu,
+			cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
+	}
+
+	for (i = 0; i < sizedwords; i++) {
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
+		cmds++;
+	}
+
+	if (flags & KGSL_CMD_FLAGS_PMODE) {
+		/* re-enable protected mode error checking */
+		GSL_RB_WRITE(ringcmds, rcmd_gpu,
+			cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
+	}
+
+	/* HW Workaround for MMU Page fault
+	* due to memory getting free early before
+	* GPU completes it.
+	*/
+	if (adreno_is_a2xx(adreno_dev)) {
+		GSL_RB_WRITE(ringcmds, rcmd_gpu,
+			cp_type3_packet(CP_WAIT_FOR_IDLE, 1));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x00);
+	}
+
 	if (adreno_is_a3xx(adreno_dev)) {
 		/*
-		 * FLush HLSQ lazy updates to make sure there are no
-		 * rsources pending for indirect loads after the timestamp
+		 * Flush HLSQ lazy updates to make sure there are no
+		 * resources pending for indirect loads after the timestamp
 		 */
 
 		GSL_RB_WRITE(ringcmds, rcmd_gpu,
@@ -649,22 +684,19 @@
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x00);
 	}
 
+	/*
+	 * end-of-pipeline timestamp.  If per context timestamps is not
+	 * enabled, then context_id will be KGSL_MEMSTORE_GLOBAL so all
+	 * eop timestamps will work out.
+	 */
+	GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_EVENT_WRITE, 3));
+	GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
+	GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
+		KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp)));
+	GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
+
 	if (context && context->flags & CTXT_FLAGS_PER_CONTEXT_TS
 			&& !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
-		/* start-of-pipeline timestamp */
-		GSL_RB_WRITE(ringcmds, rcmd_gpu,
-			cp_type3_packet(CP_MEM_WRITE, 2));
-		GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
-			KGSL_MEMSTORE_OFFSET(context_id, soptimestamp)));
-		GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
-
-		/* end-of-pipeline timestamp */
-		GSL_RB_WRITE(ringcmds, rcmd_gpu,
-			cp_type3_packet(CP_EVENT_WRITE, 3));
-		GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
-		GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
-			KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp)));
-		GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
 
 		GSL_RB_WRITE(ringcmds, rcmd_gpu,
 			cp_type3_packet(CP_MEM_WRITE, 2));
@@ -673,16 +705,14 @@
 				eoptimestamp)));
 		GSL_RB_WRITE(ringcmds, rcmd_gpu,
 			rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
-	} else {
-		GSL_RB_WRITE(ringcmds, rcmd_gpu,
-			cp_type3_packet(CP_EVENT_WRITE, 3));
-		GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
-		GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
-			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
-						eoptimestamp)));
-		GSL_RB_WRITE(ringcmds, rcmd_gpu,
-				rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
 	}
+
+	if (adreno_is_a20x(adreno_dev)) {
+		GSL_RB_WRITE(ringcmds, rcmd_gpu,
+			cp_type3_packet(CP_EVENT_WRITE, 1));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH);
+	}
+
 	if (context) {
 		/* Conditional execution based on memory values */
 		GSL_RB_WRITE(ringcmds, rcmd_gpu,
@@ -960,43 +990,31 @@
 {
 	struct kgsl_device *device = dev_priv->device;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	unsigned int *link;
+	unsigned int *link = 0;
 	unsigned int *cmds;
 	unsigned int i;
-	struct adreno_context *drawctxt;
+	struct adreno_context *drawctxt = NULL;
 	unsigned int start_index = 0;
+	int ret;
 
-	if (device->state & KGSL_STATE_HUNG)
-		return -EBUSY;
+	if (device->state & KGSL_STATE_HUNG) {
+		ret = -EBUSY;
+		goto done;
+	}
+
 	if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED) ||
-	      context == NULL || ibdesc == 0 || numibs == 0)
-		return -EINVAL;
-
+	      context == NULL || ibdesc == 0 || numibs == 0) {
+		ret = -EINVAL;
+		goto done;
+	}
 	drawctxt = context->devctxt;
 
 	if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
 		KGSL_CTXT_ERR(device, "proc %s failed fault tolerance"
 			" will not accept commands for context %d\n",
 			drawctxt->pid_name, drawctxt->id);
-		return -EDEADLK;
-	}
-
-	if (drawctxt->flags & CTXT_FLAGS_SKIP_EOF) {
-		KGSL_CTXT_ERR(device,
-			"proc %s triggered fault tolerance"
-			" skipping commands for context till EOF %d\n",
-			drawctxt->pid_name, drawctxt->id);
-		if (flags & KGSL_CMD_FLAGS_EOF)
-			drawctxt->flags &= ~CTXT_FLAGS_SKIP_EOF;
-		numibs = 0;
-	}
-
-	cmds = link = kzalloc(sizeof(unsigned int) * (numibs * 3 + 4),
-				GFP_KERNEL);
-	if (!link) {
-		KGSL_CORE_ERR("kzalloc(%d) failed\n",
-			sizeof(unsigned int) * (numibs * 3 + 4));
-		return -ENOMEM;
+		ret = -EDEADLK;
+		goto done;
 	}
 
 	/*When preamble is enabled, the preamble buffer with state restoration
@@ -1007,6 +1025,26 @@
 		adreno_dev->drawctxt_active == drawctxt)
 		start_index = 1;
 
+	if (drawctxt->flags & CTXT_FLAGS_SKIP_EOF) {
+		KGSL_CTXT_ERR(device,
+			"proc %s triggered fault tolerance"
+			" skipping commands for context till EOF %d\n",
+			drawctxt->pid_name, drawctxt->id);
+		if (flags & KGSL_CMD_FLAGS_EOF)
+			drawctxt->flags &= ~CTXT_FLAGS_SKIP_EOF;
+		if (start_index)
+			numibs = 1;
+		else
+			numibs = 0;
+	}
+
+	cmds = link = kzalloc(sizeof(unsigned int) * (numibs * 3 + 4),
+				GFP_KERNEL);
+	if (!link) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
 	if (!start_index) {
 		*cmds++ = cp_nop_packet(1);
 		*cmds++ = KGSL_START_OF_IB_IDENTIFIER;
@@ -1021,9 +1059,15 @@
 		if (unlikely(adreno_dev->ib_check_level >= 1 &&
 		    !_parse_ibs(dev_priv, ibdesc[i].gpuaddr,
 				ibdesc[i].sizedwords))) {
-			kfree(link);
-			return -EINVAL;
+			ret = -EINVAL;
+			goto done;
 		}
+
+		if (ibdesc[i].sizedwords == 0) {
+			ret = -EINVAL;
+			goto done;
+		}
+
 		*cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
 		*cmds++ = ibdesc[i].gpuaddr;
 		*cmds++ = ibdesc[i].sizedwords;
@@ -1043,11 +1087,6 @@
 					(flags & KGSL_CMD_FLAGS_EOF),
 					&link[0], (cmds - link), *timestamp);
 
-	KGSL_CMD_INFO(device, "ctxt %d g %08x numibs %d ts %d\n",
-		context->id, (unsigned int)ibdesc, numibs, *timestamp);
-
-	kfree(link);
-
 #ifdef CONFIG_MSM_KGSL_CFF_DUMP
 	/*
 	 * insert wait for idle after every IB1
@@ -1063,9 +1102,16 @@
 	 */
 	if (drawctxt->flags & CTXT_FLAGS_GPU_HANG_FT) {
 		drawctxt->flags &= ~CTXT_FLAGS_GPU_HANG_FT;
-		return -EPROTO;
+		ret = -EPROTO;
 	} else
-		return 0;
+		ret = 0;
+
+done:
+	trace_kgsl_issueibcmds(device, context->id, ibdesc, numibs,
+		*timestamp, flags, ret, drawctxt->type);
+
+	kfree(link);
+	return ret;
 }
 
 static void _turn_preamble_on_for_ib_seq(struct adreno_ringbuffer *rb,
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index fa03c05..e563ec7 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -97,8 +97,7 @@
 
 int adreno_ringbuffer_init(struct kgsl_device *device);
 
-int adreno_ringbuffer_start(struct adreno_ringbuffer *rb,
-				unsigned int init_ram);
+int adreno_ringbuffer_start(struct adreno_ringbuffer *rb);
 
 void adreno_ringbuffer_stop(struct adreno_ringbuffer *rb);
 
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
index f23586e..a76ed87 100644
--- a/drivers/gpu/msm/adreno_snapshot.c
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -162,6 +162,12 @@
 static unsigned int sp_fs_pvt_mem_addr;
 
 /*
+ * Cached value of SP_VS_OBJ_START_REG and SP_FS_OBJ_START_REG.
+ */
+static unsigned int sp_vs_obj_start_reg;
+static unsigned int sp_fs_obj_start_reg;
+
+/*
  * Each load state block has two possible types.  Each type has a different
  * number of dwords per unit.  Use this handy lookup table to make sure
  * we dump the right amount of data from the indirect buffer
@@ -373,6 +379,26 @@
 		sp_fs_pvt_mem_addr = 0;
 	}
 
+	if (sp_vs_obj_start_reg) {
+		ret = kgsl_snapshot_get_object(device, ptbase,
+			sp_vs_obj_start_reg & 0xFFFFFFE0, 0,
+			SNAPSHOT_GPU_OBJECT_GENERIC);
+		if (ret < 0)
+			return -EINVAL;
+		snapshot_frozen_objsize += ret;
+		sp_vs_obj_start_reg = 0;
+	}
+
+	if (sp_fs_obj_start_reg) {
+		ret = kgsl_snapshot_get_object(device, ptbase,
+			sp_fs_obj_start_reg & 0xFFFFFFE0, 0,
+			SNAPSHOT_GPU_OBJECT_GENERIC);
+		if (ret < 0)
+			return -EINVAL;
+		snapshot_frozen_objsize += ret;
+		sp_fs_obj_start_reg = 0;
+	}
+
 	/* Finally: VBOs */
 
 	/* The number of active VBOs is stored in VFD_CONTROL_O[31:27] */
@@ -444,7 +470,7 @@
 	int offset = type0_pkt_offset(*ptr);
 	int i;
 
-	for (i = 0; i < size; i++, offset++) {
+	for (i = 0; i < size - 1; i++, offset++) {
 
 		/* Visiblity stream buffer */
 
@@ -505,11 +531,20 @@
 			case A3XX_SP_FS_PVT_MEM_ADDR_REG:
 				sp_fs_pvt_mem_addr = ptr[i + 1];
 				break;
+			case A3XX_SP_VS_OBJ_START_REG:
+				sp_vs_obj_start_reg = ptr[i + 1];
+				break;
+			case A3XX_SP_FS_OBJ_START_REG:
+				sp_fs_obj_start_reg = ptr[i + 1];
+				break;
 			}
 		}
 	}
 }
 
+static inline int parse_ib(struct kgsl_device *device, unsigned int ptbase,
+		unsigned int gpuaddr, unsigned int dwords);
+
 /* Add an IB as a GPU object, but first, parse it to find more goodies within */
 
 static int ib_add_gpu_object(struct kgsl_device *device, unsigned int ptbase,
@@ -549,32 +584,12 @@
 			if (adreno_cmd_is_ib(src[i])) {
 				unsigned int gpuaddr = src[i + 1];
 				unsigned int size = src[i + 2];
-				unsigned int ibbase;
 
-				/* Address of the last processed IB2 */
-				kgsl_regread(device, REG_CP_IB2_BASE, &ibbase);
+				ret = parse_ib(device, ptbase, gpuaddr, size);
 
-				/*
-				 * If this is the last IB2 that was executed,
-				 * then push it to make sure it goes into the
-				 * static space
-				 */
-
-				if (ibbase == gpuaddr)
-					push_object(device,
-						SNAPSHOT_OBJ_TYPE_IB, ptbase,
-						gpuaddr, size);
-				else {
-					ret = ib_add_gpu_object(device,
-						ptbase, gpuaddr, size);
-
-					/*
-					 * If adding the IB failed then stop
-					 * parsing
-					 */
-					if (ret < 0)
-						goto done;
-				}
+				/* If adding the IB failed then stop parsing */
+				if (ret < 0)
+					goto done;
 			} else {
 				ret = ib_parse_type3(device, &src[i], ptbase);
 				/*
@@ -604,29 +619,34 @@
 	return ret;
 }
 
-/* Snapshot the istore memory */
-static int snapshot_istore(struct kgsl_device *device, void *snapshot,
-	int remain, void *priv)
+/*
+ * We want to store the last executed IB1 and IB2 in the static region to ensure
+ * that we get at least some information out of the snapshot even if we can't
+ * access the dynamic data from the sysfs file.  Push all other IBs on the
+ * dynamic list
+ */
+static inline int parse_ib(struct kgsl_device *device, unsigned int ptbase,
+		unsigned int gpuaddr, unsigned int dwords)
 {
-	struct kgsl_snapshot_istore *header = snapshot;
-	unsigned int *data = snapshot + sizeof(*header);
-	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	int count, i;
+	unsigned int ib1base, ib2base;
+	int ret = 0;
 
-	count = adreno_dev->istore_size * adreno_dev->instruction_size;
+	/*
+	 * Check the IB address - if it is either the last executed IB1 or the
+	 * last executed IB2 then push it into the static blob otherwise put
+	 * it in the dynamic list
+	 */
 
-	if (remain < (count * 4) + sizeof(*header)) {
-		KGSL_DRV_ERR(device,
-			"snapshot: Not enough memory for the istore section");
-		return 0;
-	}
+	kgsl_regread(device, REG_CP_IB1_BASE, &ib1base);
+	kgsl_regread(device, REG_CP_IB2_BASE, &ib2base);
 
-	header->count = adreno_dev->istore_size;
+	if (gpuaddr == ib1base || gpuaddr == ib2base)
+		push_object(device, SNAPSHOT_OBJ_TYPE_IB, ptbase,
+			gpuaddr, dwords);
+	else
+		ret = ib_add_gpu_object(device, ptbase, gpuaddr, dwords);
 
-	for (i = 0; i < count; i++)
-		kgsl_regread(device, ADRENO_ISTORE_START + i, &data[i]);
-
-	return (count * 4) + sizeof(*header);
+	return ret;
 }
 
 /* Snapshot the ringbuffer memory */
@@ -779,12 +799,11 @@
 			 * others get marked at GPU objects
 			 */
 
-			if (ibaddr == ibbase || memdesc != NULL)
+			if (memdesc != NULL)
 				push_object(device, SNAPSHOT_OBJ_TYPE_IB,
 					ptbase, ibaddr, ibsize);
 			else
-				ib_add_gpu_object(device, ptbase, ibaddr,
-					ibsize);
+				parse_ib(device, ptbase, ibaddr, ibsize);
 		}
 
 		index = index + 1;
@@ -799,6 +818,64 @@
 	return size + sizeof(*header);
 }
 
+static int snapshot_capture_mem_list(struct kgsl_device *device, void *snapshot,
+			int remain, void *priv)
+{
+	struct kgsl_snapshot_replay_mem_list *header = snapshot;
+	struct kgsl_process_private *private;
+	unsigned int ptbase;
+	struct rb_node *node;
+	struct kgsl_mem_entry *entry = NULL;
+	int num_mem;
+	unsigned int *data = snapshot + sizeof(*header);
+
+	ptbase = kgsl_mmu_get_current_ptbase(&device->mmu);
+	mutex_lock(&kgsl_driver.process_mutex);
+	list_for_each_entry(private, &kgsl_driver.process_list, list) {
+		if (kgsl_mmu_pt_equal(&device->mmu, private->pagetable,
+			ptbase))
+			break;
+	}
+	mutex_unlock(&kgsl_driver.process_mutex);
+	if (!private) {
+		KGSL_DRV_ERR(device,
+		"Failed to get pointer to process private structure\n");
+		return 0;
+	}
+	/* We need to know the number of memory objects that the process has */
+	spin_lock(&private->mem_lock);
+	for (node = rb_first(&private->mem_rb), num_mem = 0; node; ) {
+		entry = rb_entry(node, struct kgsl_mem_entry, node);
+		node = rb_next(&entry->node);
+		num_mem++;
+	}
+
+	if (remain < ((num_mem * 3 * sizeof(unsigned int)) +
+			sizeof(*header))) {
+		KGSL_DRV_ERR(device,
+			"snapshot: Not enough memory for the mem list section");
+		spin_unlock(&private->mem_lock);
+		return 0;
+	}
+	header->num_entries = num_mem;
+	header->ptbase = ptbase;
+	/*
+	 * Walk throught the memory list and store the
+	 * tuples(gpuaddr, size, memtype) in snapshot
+	 */
+	for (node = rb_first(&private->mem_rb); node; ) {
+		entry = rb_entry(node, struct kgsl_mem_entry, node);
+		node = rb_next(&entry->node);
+
+		*data++ = entry->memdesc.gpuaddr;
+		*data++ = entry->memdesc.size;
+		*data++ = (entry->memdesc.priv & KGSL_MEMTYPE_MASK) >>
+							KGSL_MEMTYPE_SHIFT;
+	}
+	spin_unlock(&private->mem_lock);
+	return sizeof(*header) + (num_mem * 3 * sizeof(unsigned int));
+}
+
 /* Snapshot the memory for an indirect buffer */
 static int snapshot_ib(struct kgsl_device *device, void *snapshot,
 	int remain, void *priv)
@@ -829,15 +906,14 @@
 				continue;
 
 			if (adreno_cmd_is_ib(*src))
-				push_object(device, SNAPSHOT_OBJ_TYPE_IB,
-					obj->ptbase, src[1], src[2]);
-			else {
+				ret = parse_ib(device, obj->ptbase, src[1],
+					src[2]);
+			else
 				ret = ib_parse_type3(device, src, obj->ptbase);
 
-				/* Stop parsing if the type3 decode fails */
-				if (ret < 0)
-					break;
-			}
+			/* Stop parsing if the type3 decode fails */
+			if (ret < 0)
+				break;
 		}
 	}
 
@@ -903,6 +979,13 @@
 		snapshot, remain, snapshot_rb, NULL);
 
 	/*
+	 * Add a section that lists (gpuaddr, size, memtype) tuples of the
+	 * hanging process
+	 */
+	snapshot = kgsl_snapshot_add_section(device,
+			KGSL_SNAPSHOT_SECTION_MEMLIST, snapshot, remain,
+			snapshot_capture_mem_list, NULL);
+	/*
 	 * Make sure that the last IB1 that was being executed is dumped.
 	 * Since this was the last IB1 that was processed, we should have
 	 * already added it to the list during the ringbuffer parse but we
@@ -950,17 +1033,6 @@
 	for (i = 0; i < objbufptr; i++)
 		snapshot = dump_object(device, i, snapshot, remain);
 
-	/*
-	 * Only dump the istore on a hang - reading it on a running system
-	 * has a non 0 chance of hanging the GPU
-	 */
-
-	if (hang) {
-		snapshot = kgsl_snapshot_add_section(device,
-			KGSL_SNAPSHOT_SECTION_ISTORE, snapshot, remain,
-			snapshot_istore, NULL);
-	}
-
 	/* Add GPU specific sections - registers mainly, but other stuff too */
 	if (adreno_dev->gpudev->snapshot)
 		snapshot = adreno_dev->gpudev->snapshot(adreno_dev, snapshot,
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 3582a41..53ef392 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,7 +18,7 @@
 #include <linux/uaccess.h>
 #include <linux/interrupt.h>
 #include <linux/workqueue.h>
-#include <linux/android_pmem.h>
+
 #include <linux/vmalloc.h>
 #include <linux/pm_runtime.h>
 #include <linux/genlock.h>
@@ -28,6 +28,7 @@
 #include <linux/msm_ion.h>
 #include <linux/io.h>
 #include <mach/socinfo.h>
+#include <linux/mman.h>
 
 #include "kgsl.h"
 #include "kgsl_debugfs.h"
@@ -52,6 +53,52 @@
 
 static struct ion_client *kgsl_ion_client;
 
+int kgsl_memfree_hist_init(void)
+{
+	void *base;
+
+	base = kzalloc(KGSL_MEMFREE_HIST_SIZE, GFP_KERNEL);
+	kgsl_driver.memfree_hist.base_hist_rb = base;
+	if (base == NULL)
+		return -ENOMEM;
+	kgsl_driver.memfree_hist.size = KGSL_MEMFREE_HIST_SIZE;
+	kgsl_driver.memfree_hist.wptr = base;
+	return 0;
+}
+
+void kgsl_memfree_hist_exit(void)
+{
+	kfree(kgsl_driver.memfree_hist.base_hist_rb);
+	kgsl_driver.memfree_hist.base_hist_rb = NULL;
+}
+
+void kgsl_memfree_hist_set_event(unsigned int pid, unsigned int gpuaddr,
+			unsigned int size, int flags)
+{
+	struct kgsl_memfree_hist_elem *p;
+
+	void *base = kgsl_driver.memfree_hist.base_hist_rb;
+	int rbsize = kgsl_driver.memfree_hist.size;
+
+	if (base == NULL)
+		return;
+
+	mutex_lock(&kgsl_driver.memfree_hist_mutex);
+	p = kgsl_driver.memfree_hist.wptr;
+	p->pid = pid;
+	p->gpuaddr = gpuaddr;
+	p->size = size;
+	p->flags = flags;
+
+	kgsl_driver.memfree_hist.wptr++;
+	if ((void *)kgsl_driver.memfree_hist.wptr >= base+rbsize) {
+		kgsl_driver.memfree_hist.wptr =
+			(struct kgsl_memfree_hist_elem *)base;
+	}
+	mutex_unlock(&kgsl_driver.memfree_hist_mutex);
+}
+
+
 /* kgsl_get_mem_entry - get the mem_entry structure for the specified object
  * @device - Pointer to the device structure
  * @ptbase - the pagetable base of the object
@@ -99,13 +146,6 @@
 	return entry;
 }
 
-unsigned int kgsl_get_alloc_size(int detailed)
-{
-	unsigned int ret = 0;
-
-	return ret;
-}
-
 void
 kgsl_mem_entry_destroy(struct kref *kref)
 {
@@ -143,9 +183,19 @@
 }
 EXPORT_SYMBOL(kgsl_mem_entry_destroy);
 
-static
-void kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
-				   struct kgsl_process_private *process)
+/**
+ * kgsl_mem_entry_track_gpuaddr - Insert a mem_entry in the address tree
+ * @process: the process that owns the memory
+ * @entry: the memory entry
+ *
+ * Insert a kgsl_mem_entry in to the rb_tree for searching by GPU address.
+ * Not all mem_entries will have gpu addresses when first created, so this
+ * function may be called after creation when the GPU address is finally
+ * assigned.
+ */
+static void
+kgsl_mem_entry_track_gpuaddr(struct kgsl_process_private *process,
+				struct kgsl_mem_entry *entry)
 {
 	struct rb_node **node;
 	struct rb_node *parent = NULL;
@@ -170,8 +220,48 @@
 	rb_insert_color(&entry->node, &process->mem_rb);
 
 	spin_unlock(&process->mem_lock);
+}
 
+/**
+ * kgsl_mem_entry_attach_process - Attach a mem_entry to its owner process
+ * @entry: the memory entry
+ * @process: the owner process
+ *
+ * Attach a newly created mem_entry to its owner process so that
+ * it can be found later. The mem_entry will be added to mem_idr and have
+ * its 'id' field assigned. If the GPU address has been set, the entry
+ * will also be added to the mem_rb tree.
+ *
+ * @returns - 0 on success or error code on failure.
+ */
+static int
+kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
+				   struct kgsl_process_private *process)
+{
+	int ret;
+
+	while (1) {
+		if (idr_pre_get(&process->mem_idr, GFP_KERNEL) == 0) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		spin_lock(&process->mem_lock);
+		ret = idr_get_new_above(&process->mem_idr, entry, 1,
+					&entry->id);
+		spin_unlock(&process->mem_lock);
+
+		if (ret == 0)
+			break;
+		else if (ret != -EAGAIN)
+			goto err;
+	}
 	entry->priv = process;
+
+	if (entry->memdesc.gpuaddr != 0)
+		kgsl_mem_entry_track_gpuaddr(process, entry);
+err:
+	return ret;
 }
 
 /* Detach a memory entry from a process and unmap it from the MMU */
@@ -181,6 +271,17 @@
 	if (entry == NULL)
 		return;
 
+	spin_lock(&entry->priv->mem_lock);
+
+	if (entry->id != 0)
+		idr_remove(&entry->priv->mem_idr, entry->id);
+	entry->id = 0;
+
+	if (entry->memdesc.gpuaddr != 0)
+		rb_erase(&entry->node, &entry->priv->mem_rb);
+
+	spin_unlock(&entry->priv->mem_lock);
+
 	entry->priv->stats[entry->memtype].cur -= entry->memdesc.size;
 	entry->priv = NULL;
 
@@ -199,14 +300,19 @@
 
 	context = kzalloc(sizeof(*context), GFP_KERNEL);
 
-	if (context == NULL)
-		return NULL;
+	if (context == NULL) {
+		KGSL_DRV_INFO(dev_priv->device, "kzalloc(%d) failed\n",
+				sizeof(*context));
+		return ERR_PTR(-ENOMEM);
+	}
 
 	while (1) {
 		if (idr_pre_get(&dev_priv->device->context_idr,
 				GFP_KERNEL) == 0) {
-			kfree(context);
-			return NULL;
+			KGSL_DRV_INFO(dev_priv->device,
+					"idr_pre_get: ENOMEM\n");
+			ret = -ENOMEM;
+			goto func_end;
 		}
 
 		ret = idr_get_new_above(&dev_priv->device->context_idr,
@@ -216,26 +322,25 @@
 			break;
 	}
 
-	if (ret) {
-		kfree(context);
-		return NULL;
-	}
+	if (ret)
+		goto func_end;
 
 	/* MAX - 1, there is one memdesc in memstore for device info */
 	if (id >= KGSL_MEMSTORE_MAX) {
-		KGSL_DRV_ERR(dev_priv->device, "cannot have more than %d "
+		KGSL_DRV_INFO(dev_priv->device, "cannot have more than %d "
 				"ctxts due to memstore limitation\n",
 				KGSL_MEMSTORE_MAX);
 		idr_remove(&dev_priv->device->context_idr, id);
-		kfree(context);
-		return NULL;
+		ret = -ENOSPC;
+		goto func_end;
 	}
 
 	kref_init(&context->refcount);
 	context->id = id;
 	context->dev_priv = dev_priv;
 
-	if (kgsl_sync_timeline_create(context)) {
+	ret = kgsl_sync_timeline_create(context);
+	if (ret) {
 		idr_remove(&dev_priv->device->context_idr, id);
 		goto func_end;
 	}
@@ -257,7 +362,7 @@
 func_end:
 	if (ret) {
 		kfree(context);
-		return NULL;
+		return ERR_PTR(ret);
 	}
 
 	return context;
@@ -362,24 +467,6 @@
 	return ret;
 }
 
-int kgsl_register_ts_notifier(struct kgsl_device *device,
-			      struct notifier_block *nb)
-{
-	BUG_ON(device == NULL);
-	return atomic_notifier_chain_register(&device->ts_notifier_list,
-					      nb);
-}
-EXPORT_SYMBOL(kgsl_register_ts_notifier);
-
-int kgsl_unregister_ts_notifier(struct kgsl_device *device,
-				struct notifier_block *nb)
-{
-	BUG_ON(device == NULL);
-	return atomic_notifier_chain_unregister(&device->ts_notifier_list,
-						nb);
-}
-EXPORT_SYMBOL(kgsl_unregister_ts_notifier);
-
 int kgsl_check_timestamp(struct kgsl_device *device,
 	struct kgsl_context *context, unsigned int timestamp)
 {
@@ -430,7 +517,7 @@
 			INIT_COMPLETION(device->hwaccess_gate);
 			device->ftbl->suspend_context(device);
 			device->ftbl->stop(device);
-			pm_qos_update_request(&device->pm_qos_req_dma,
+			pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
 						PM_QOS_DEFAULT_VALUE);
 			kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND);
 			break;
@@ -579,12 +666,15 @@
 	private->pid = task_tgid_nr(current);
 	private->mem_rb = RB_ROOT;
 
+	idr_init(&private->mem_idr);
+
 	if (kgsl_mmu_enabled())
 	{
 		unsigned long pt_name;
+		struct kgsl_mmu *mmu = &cur_dev_priv->device->mmu;
 
 		pt_name = task_tgid_nr(current);
-		private->pagetable = kgsl_mmu_getpagetable(pt_name);
+		private->pagetable = kgsl_mmu_getpagetable(mmu, pt_name);
 		if (private->pagetable == NULL) {
 			kfree(private);
 			private = NULL;
@@ -607,7 +697,7 @@
 			 struct kgsl_process_private *private)
 {
 	struct kgsl_mem_entry *entry = NULL;
-	struct rb_node *node;
+	int next = 0;
 
 	if (!private)
 		return;
@@ -622,14 +712,22 @@
 
 	list_del(&private->list);
 
-	for (node = rb_first(&private->mem_rb); node; ) {
-		entry = rb_entry(node, struct kgsl_mem_entry, node);
-		node = rb_next(&entry->node);
-
-		rb_erase(&entry->node, &private->mem_rb);
+	while (1) {
+		rcu_read_lock();
+		entry = idr_get_next(&private->mem_idr, &next);
+		rcu_read_unlock();
+		if (entry == NULL)
+			break;
 		kgsl_mem_entry_detach_process(entry);
+		/*
+		 * Always start back at the beginning, to
+		 * ensure all entries are removed,
+		 * like list_for_each_entry_safe.
+		 */
+		next = 0;
 	}
 	kgsl_mmu_putpagetable(private->pagetable);
+	idr_destroy(&private->mem_idr);
 	kfree(private);
 unlock:
 	mutex_unlock(&kgsl_driver.process_mutex);
@@ -717,13 +815,6 @@
 	dev_priv->device = device;
 	filep->private_data = dev_priv;
 
-	/* Get file (per process) private struct */
-	dev_priv->process_priv = kgsl_get_process_private(dev_priv);
-	if (dev_priv->process_priv ==  NULL) {
-		result = -ENOMEM;
-		goto err_freedevpriv;
-	}
-
 	mutex_lock(&device->mutex);
 	kgsl_check_suspended(device);
 
@@ -731,26 +822,45 @@
 		kgsl_sharedmem_set(&device->memstore, 0, 0,
 				device->memstore.size);
 
-		result = device->ftbl->start(device, true);
+		result = device->ftbl->init(device);
+		if (result)
+			goto err_freedevpriv;
 
-		if (result) {
-			mutex_unlock(&device->mutex);
-			goto err_putprocess;
-		}
+		result = device->ftbl->start(device);
+		if (result)
+			goto err_freedevpriv;
+
 		kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
 	}
 	device->open_count++;
 	mutex_unlock(&device->mutex);
 
+	/*
+	 * Get file (per process) private struct. This must be done
+	 * after the first start so that the global pagetable mappings
+	 * are set up before we create the per-process pagetable.
+	 */
+	dev_priv->process_priv = kgsl_get_process_private(dev_priv);
+	if (dev_priv->process_priv ==  NULL) {
+		result = -ENOMEM;
+		goto err_stop;
+	}
+
 	KGSL_DRV_INFO(device, "Initialized %s: mmu=%s pagetable_count=%d\n",
 		device->name, kgsl_mmu_enabled() ? "on" : "off",
 		kgsl_pagetable_count);
 
 	return result;
 
-err_putprocess:
-	kgsl_put_process_private(device, dev_priv->process_priv);
+err_stop:
+	mutex_lock(&device->mutex);
+	device->open_count--;
+	if (device->open_count == 0) {
+		result = device->ftbl->stop(device);
+		kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+	}
 err_freedevpriv:
+	mutex_unlock(&device->mutex);
 	filep->private_data = NULL;
 	kfree(dev_priv);
 err_pmruntime:
@@ -765,7 +875,7 @@
 {
 	struct rb_node *node = private->mem_rb.rb_node;
 
-	if (!kgsl_mmu_gpuaddr_in_range(gpuaddr))
+	if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, gpuaddr))
 		return NULL;
 
 	while (node != NULL) {
@@ -798,6 +908,77 @@
 	return kgsl_sharedmem_find_region(private, gpuaddr, 1);
 }
 
+/**
+ * kgsl_sharedmem_region_empty - Check if an addression region is empty
+ *
+ * @private: private data for the process to check.
+ * @gpuaddr: start address of the region
+ * @size: length of the region.
+ *
+ * Checks that there are no existing allocations within an address
+ * region. Note that unlike other kgsl_sharedmem* search functions,
+ * this one manages locking on its own.
+ */
+int
+kgsl_sharedmem_region_empty(struct kgsl_process_private *private,
+	unsigned int gpuaddr, size_t size)
+{
+	int result = 1;
+	unsigned int gpuaddr_end = gpuaddr + size;
+
+	struct rb_node *node = private->mem_rb.rb_node;
+
+	if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, gpuaddr))
+		return 0;
+
+	/* don't overflow */
+	if (gpuaddr_end < gpuaddr)
+		return 0;
+
+	spin_lock(&private->mem_lock);
+	node = private->mem_rb.rb_node;
+	while (node != NULL) {
+		struct kgsl_mem_entry *entry;
+		unsigned int memdesc_start, memdesc_end;
+
+		entry = rb_entry(node, struct kgsl_mem_entry, node);
+
+		memdesc_start = entry->memdesc.gpuaddr;
+		memdesc_end = memdesc_start
+				+ kgsl_memdesc_mmapsize(&entry->memdesc);
+
+		if (gpuaddr_end <= memdesc_start)
+			node = node->rb_left;
+		else if (memdesc_end <= gpuaddr)
+			node = node->rb_right;
+		else {
+			result = 0;
+			break;
+		}
+	}
+	spin_unlock(&private->mem_lock);
+	return result;
+}
+
+/**
+ * kgsl_sharedmem_find_id - find a memory entry by id
+ * @process: the owning process
+ * @id: id to find
+ *
+ * @returns - the mem_entry or NULL
+ */
+static inline struct kgsl_mem_entry *
+kgsl_sharedmem_find_id(struct kgsl_process_private *process, unsigned int id)
+{
+	struct kgsl_mem_entry *entry;
+
+	rcu_read_lock();
+	entry = idr_find(&process->mem_idr, id);
+	rcu_read_unlock();
+
+	return entry;
+}
+
 /*call all ioctl sub functions with driver locked*/
 static long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
 					  unsigned int cmd, void *data)
@@ -934,11 +1115,8 @@
 	int result;
 
 	context = kgsl_find_context(dev_priv, param->context_id);
-	if (context == NULL) {
-		KGSL_DRV_ERR(dev_priv->device, "invalid context_id %d\n",
-			param->context_id);
+	if (context == NULL)
 		return -EINVAL;
-	}
 	/*
 	 * A reference count is needed here, because waittimestamp may
 	 * block with the device mutex unlocked and userspace could
@@ -955,6 +1133,7 @@
 				      unsigned int cmd, void *data)
 {
 	int result = 0;
+	int i = 0;
 	struct kgsl_ringbuffer_issueibcmds *param = data;
 	struct kgsl_ibdesc *ibdesc;
 	struct kgsl_context *context;
@@ -962,20 +1141,11 @@
 	context = kgsl_find_context(dev_priv, param->drawctxt_id);
 	if (context == NULL) {
 		result = -EINVAL;
-		KGSL_DRV_ERR(dev_priv->device,
-			"invalid context_id %d\n",
-			param->drawctxt_id);
 		goto done;
 	}
 
 	if (param->flags & KGSL_CONTEXT_SUBMIT_IB_LIST) {
-		KGSL_DRV_INFO(dev_priv->device,
-			"Using IB list mode for ib submission, numibs: %d\n",
-			param->numibs);
 		if (!param->numibs) {
-			KGSL_DRV_ERR(dev_priv->device,
-				"Invalid numibs as parameter: %d\n",
-				 param->numibs);
 			result = -EINVAL;
 			goto done;
 		}
@@ -986,9 +1156,6 @@
 		 */
 
 		if (param->numibs > 10000) {
-			KGSL_DRV_ERR(dev_priv->device,
-				"Too many IBs submitted. count: %d max 10000\n",
-				param->numibs);
 			result = -EINVAL;
 			goto done;
 		}
@@ -1028,6 +1195,18 @@
 		param->numibs = 1;
 	}
 
+	for (i = 0; i < param->numibs; i++) {
+		struct kgsl_pagetable *pt = dev_priv->process_priv->pagetable;
+
+		if (!kgsl_mmu_gpuaddr_in_range(pt, ibdesc[i].gpuaddr)) {
+			result = -ERANGE;
+			KGSL_DRV_ERR(dev_priv->device,
+				     "invalid ib base GPU virtual addr %x\n",
+				     ibdesc[i].gpuaddr);
+			goto free_ibdesc;
+		}
+	}
+
 	result = dev_priv->device->ftbl->issueibcmds(dev_priv,
 					     context,
 					     ibdesc,
@@ -1035,8 +1214,6 @@
 					     &param->timestamp,
 					     param->flags);
 
-	trace_kgsl_issueibcmds(dev_priv->device, param, ibdesc, result);
-
 free_ibdesc:
 	kfree(ibdesc);
 done:
@@ -1075,11 +1252,8 @@
 	struct kgsl_context *context;
 
 	context = kgsl_find_context(dev_priv, param->context_id);
-	if (context == NULL) {
-		KGSL_DRV_ERR(dev_priv->device, "invalid context_id %d\n",
-			param->context_id);
+	if (context == NULL)
 		return -EINVAL;
-	}
 
 	return _cmdstream_readtimestamp(dev_priv, context,
 			param->type, &param->timestamp);
@@ -1089,9 +1263,6 @@
 	void *priv, u32 id, u32 timestamp)
 {
 	struct kgsl_mem_entry *entry = priv;
-	spin_lock(&entry->priv->mem_lock);
-	rb_erase(&entry->node, &entry->priv->mem_rb);
-	spin_unlock(&entry->priv->mem_lock);
 	trace_kgsl_mem_timestamp_free(device, entry, id, timestamp, 0);
 	kgsl_mem_entry_detach_process(entry);
 }
@@ -1144,11 +1315,8 @@
 	struct kgsl_context *context;
 
 	context = kgsl_find_context(dev_priv, param->context_id);
-	if (context == NULL) {
-		KGSL_DRV_ERR(dev_priv->device,
-			"invalid drawctxt context_id %d\n", param->context_id);
+	if (context == NULL)
 		return -EINVAL;
-	}
 
 	return _cmdstream_freememontimestamp(dev_priv, param->gpuaddr,
 			context, param->timestamp, param->type);
@@ -1163,22 +1331,22 @@
 
 	context = kgsl_create_context(dev_priv);
 
-	if (context == NULL) {
-		result = -ENOMEM;
+	if (IS_ERR(context)) {
+		result = PTR_ERR(context);
 		goto done;
 	}
 
 	if (dev_priv->device->ftbl->drawctxt_create) {
 		result = dev_priv->device->ftbl->drawctxt_create(
 			dev_priv->device, dev_priv->process_priv->pagetable,
-			context, param->flags);
+			context, &param->flags);
 		if (result)
 			goto done;
 	}
 	trace_kgsl_context_create(dev_priv->device, context, param->flags);
 	param->drawctxt_id = context->id;
 done:
-	if (result && context)
+	if (result && !IS_ERR(context))
 		kgsl_context_detach(context);
 
 	return result;
@@ -1206,27 +1374,52 @@
 static long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
 					unsigned int cmd, void *data)
 {
-	int result = 0;
 	struct kgsl_sharedmem_free *param = data;
 	struct kgsl_process_private *private = dev_priv->process_priv;
 	struct kgsl_mem_entry *entry = NULL;
 
 	spin_lock(&private->mem_lock);
 	entry = kgsl_sharedmem_find(private, param->gpuaddr);
-	if (entry)
-		rb_erase(&entry->node, &private->mem_rb);
-
 	spin_unlock(&private->mem_lock);
 
-	if (entry) {
-		trace_kgsl_mem_free(entry);
-		kgsl_mem_entry_detach_process(entry);
-	} else {
-		KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
-		result = -EINVAL;
+	if (!entry) {
+		KGSL_MEM_INFO(dev_priv->device, "invalid gpuaddr %08x\n",
+				param->gpuaddr);
+		return -EINVAL;
 	}
+	trace_kgsl_mem_free(entry);
 
-	return result;
+	kgsl_memfree_hist_set_event(entry->priv->pid,
+				    entry->memdesc.gpuaddr,
+				    entry->memdesc.size,
+				    entry->memdesc.flags);
+
+	kgsl_mem_entry_detach_process(entry);
+	return 0;
+}
+
+static long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv,
+					unsigned int cmd, void *data)
+{
+	struct kgsl_gpumem_free_id *param = data;
+	struct kgsl_process_private *private = dev_priv->process_priv;
+	struct kgsl_mem_entry *entry = NULL;
+
+	entry = kgsl_sharedmem_find_id(private, param->id);
+
+	if (!entry) {
+		KGSL_MEM_INFO(dev_priv->device, "invalid id %d\n", param->id);
+		return -EINVAL;
+	}
+	trace_kgsl_mem_free(entry);
+
+	kgsl_memfree_hist_set_event(entry->priv->pid,
+				    entry->memdesc.gpuaddr,
+				    entry->memdesc.size,
+				    entry->memdesc.flags);
+
+	kgsl_mem_entry_detach_process(entry);
+	return 0;
 }
 
 static struct vm_area_struct *kgsl_get_vma_from_start_addr(unsigned int addr)
@@ -1257,11 +1450,10 @@
 	dev_t rdev;
 	struct fb_info *info;
 
+	*start = 0;
+	*vstart = 0;
+	*len = 0;
 	*filep = NULL;
-#ifdef CONFIG_ANDROID_PMEM
-	if (!get_pmem_file(fd, start, vstart, len, filep))
-		return 0;
-#endif
 
 	fbfile = fget(fd);
 	if (fbfile == NULL) {
@@ -1302,10 +1494,8 @@
 
 	ret = -ERANGE;
 
-	if (phys == 0) {
-		KGSL_CORE_ERR("kgsl_get_phys_file returned phys=0\n");
+	if (phys == 0)
 		goto err;
-	}
 
 	/* Make sure the length of the region, the offset and the desired
 	 * size are all page aligned or bail
@@ -1313,19 +1503,13 @@
 	if ((len & ~PAGE_MASK) ||
 		(offset & ~PAGE_MASK) ||
 		(size & ~PAGE_MASK)) {
-		KGSL_CORE_ERR("length %lu, offset %u or size %u "
-				"is not page aligned\n",
-				len, offset, size);
+		KGSL_CORE_ERR("length offset or size is not page aligned\n");
 		goto err;
 	}
 
 	/* The size or offset can never be greater than the PMEM length */
-	if (offset >= len || size > len) {
-		KGSL_CORE_ERR("offset %u or size %u "
-				"exceeds pmem length %lu\n",
-				offset, size, len);
+	if (offset >= len || size > len)
 		goto err;
-	}
 
 	/* If size is 0, then adjust it to default to the size of the region
 	 * minus the offset.  If size isn't zero, then make sure that it will
@@ -1343,6 +1527,8 @@
 	entry->memdesc.size = size;
 	entry->memdesc.physaddr = phys + offset;
 	entry->memdesc.hostptr = (void *) (virt + offset);
+	/* USE_CPU_MAP is not impemented for PMEM. */
+	entry->memdesc.flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
 
 	ret = memdesc_sg_phys(&entry->memdesc, phys + offset, size);
 	if (ret)
@@ -1350,18 +1536,14 @@
 
 	return 0;
 err:
-#ifdef CONFIG_ANDROID_PMEM
-	put_pmem_file(filep);
-#endif
 	return ret;
 }
 
 static int memdesc_sg_virt(struct kgsl_memdesc *memdesc,
-	void *addr, int size)
+	unsigned long paddr, int size)
 {
 	int i;
 	int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
-	unsigned long paddr = (unsigned long) addr;
 
 	memdesc->sg = kgsl_sg_alloc(sglen);
 
@@ -1412,34 +1594,33 @@
 	return -EINVAL;
 }
 
-static int kgsl_setup_hostptr(struct kgsl_mem_entry *entry,
+static int kgsl_setup_useraddr(struct kgsl_mem_entry *entry,
 			      struct kgsl_pagetable *pagetable,
-			      void *hostptr, unsigned int offset,
+			      unsigned long useraddr, unsigned int offset,
 			      size_t size)
 {
 	struct vm_area_struct *vma;
 	unsigned int len;
 
 	down_read(&current->mm->mmap_sem);
-	vma = find_vma(current->mm, (unsigned int) hostptr);
+	vma = find_vma(current->mm, useraddr);
 	up_read(&current->mm->mmap_sem);
 
 	if (!vma) {
-		KGSL_CORE_ERR("find_vma(%p) failed\n", hostptr);
+		KGSL_CORE_ERR("find_vma(%lx) failed\n", useraddr);
 		return -EINVAL;
 	}
 
 	/* We don't necessarily start at vma->vm_start */
-	len = vma->vm_end - (unsigned long) hostptr;
+	len = vma->vm_end - useraddr;
 
 	if (offset >= len)
 		return -EINVAL;
 
-	if (!KGSL_IS_PAGE_ALIGNED((unsigned long) hostptr) ||
+	if (!KGSL_IS_PAGE_ALIGNED(useraddr) ||
 	    !KGSL_IS_PAGE_ALIGNED(len)) {
-		KGSL_CORE_ERR("user address len(%u)"
-			      "and start(%p) must be page"
-			      "aligned\n", len, hostptr);
+		KGSL_CORE_ERR("bad alignment: start(%lx) len(%u)\n",
+			      useraddr, len);
 		return -EINVAL;
 	}
 
@@ -1460,28 +1641,29 @@
 
 	entry->memdesc.pagetable = pagetable;
 	entry->memdesc.size = size;
-	entry->memdesc.hostptr = hostptr + (offset & PAGE_MASK);
+	entry->memdesc.useraddr = useraddr + (offset & PAGE_MASK);
+	if (kgsl_memdesc_use_cpu_map(&entry->memdesc))
+		entry->memdesc.gpuaddr = entry->memdesc.useraddr;
 
-	return memdesc_sg_virt(&entry->memdesc,
-		hostptr + (offset & PAGE_MASK), size);
+	return memdesc_sg_virt(&entry->memdesc, entry->memdesc.useraddr,
+				size);
 }
 
 #ifdef CONFIG_ASHMEM
 static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
 			     struct kgsl_pagetable *pagetable,
-			     int fd, void *hostptr, size_t size)
+			     int fd, unsigned long useraddr, size_t size)
 {
 	int ret;
 	struct vm_area_struct *vma;
 	struct file *filep, *vmfile;
 	unsigned long len;
-	unsigned int hostaddr = (unsigned int) hostptr;
 
-	vma = kgsl_get_vma_from_start_addr(hostaddr);
+	vma = kgsl_get_vma_from_start_addr(useraddr);
 	if (vma == NULL)
 		return -EINVAL;
 
-	if (vma->vm_pgoff || vma->vm_start != hostaddr) {
+	if (vma->vm_pgoff || vma->vm_start != useraddr) {
 		KGSL_CORE_ERR("Invalid vma region\n");
 		return -EINVAL;
 	}
@@ -1492,8 +1674,8 @@
 		size = len;
 
 	if (size != len) {
-		KGSL_CORE_ERR("Invalid size %d for vma region %p\n",
-			      size, hostptr);
+		KGSL_CORE_ERR("Invalid size %d for vma region %lx\n",
+			      size, useraddr);
 		return -EINVAL;
 	}
 
@@ -1513,9 +1695,11 @@
 	entry->priv_data = filep;
 	entry->memdesc.pagetable = pagetable;
 	entry->memdesc.size = ALIGN(size, PAGE_SIZE);
-	entry->memdesc.hostptr = hostptr;
+	entry->memdesc.useraddr = useraddr;
+	if (kgsl_memdesc_use_cpu_map(&entry->memdesc))
+		entry->memdesc.gpuaddr = entry->memdesc.useraddr;
 
-	ret = memdesc_sg_virt(&entry->memdesc, hostptr, size);
+	ret = memdesc_sg_virt(&entry->memdesc, useraddr, size);
 	if (ret)
 		goto err;
 
@@ -1528,18 +1712,23 @@
 #else
 static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
 			     struct kgsl_pagetable *pagetable,
-			     int fd, void *hostptr, size_t size)
+			     int fd, unsigned long useraddr, size_t size)
 {
 	return -EINVAL;
 }
 #endif
 
 static int kgsl_setup_ion(struct kgsl_mem_entry *entry,
-		struct kgsl_pagetable *pagetable, int fd)
+		struct kgsl_pagetable *pagetable, void *data)
 {
 	struct ion_handle *handle;
 	struct scatterlist *s;
 	struct sg_table *sg_table;
+	struct kgsl_map_user_mem *param = data;
+	int fd = param->fd;
+
+	if (!param->len)
+		return -EINVAL;
 
 	if (IS_ERR_OR_NULL(kgsl_ion_client))
 		return -ENODEV;
@@ -1554,6 +1743,8 @@
 	entry->priv_data = handle;
 	entry->memdesc.pagetable = pagetable;
 	entry->memdesc.size = 0;
+	/* USE_CPU_MAP is not impemented for ION. */
+	entry->memdesc.flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
 
 	sg_table = ion_sg_table(kgsl_ion_client, handle);
 
@@ -1571,6 +1762,8 @@
 		entry->memdesc.sglen++;
 	}
 
+	entry->memdesc.size = PAGE_ALIGN(entry->memdesc.size);
+
 	return 0;
 err:
 	ion_free(kgsl_ion_client, handle);
@@ -1596,7 +1789,20 @@
 	else
 		memtype = param->memtype;
 
+	/*
+	 * Mask off unknown flags from userspace. This way the caller can
+	 * check if a flag is supported by looking at the returned flags.
+	 * Note: CACHEMODE is ignored for this call. Caching should be
+	 * determined by type of allocation being mapped.
+	 */
+	param->flags &= KGSL_MEMFLAGS_GPUREADONLY
+			| KGSL_MEMTYPE_MASK
+			| KGSL_MEMALIGN_MASK
+			| KGSL_MEMFLAGS_USE_CPU_MAP;
+
 	entry->memdesc.flags = param->flags;
+	if (!kgsl_mmu_use_cpu_map(private->pagetable->mmu))
+		entry->memdesc.flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
 
 	switch (memtype) {
 	case KGSL_USER_MEM_TYPE_PMEM:
@@ -1622,8 +1828,8 @@
 		if (param->hostptr == 0)
 			break;
 
-		result = kgsl_setup_hostptr(entry, private->pagetable,
-					    (void *) param->hostptr,
+		result = kgsl_setup_useraddr(entry, private->pagetable,
+					    param->hostptr,
 					    param->offset, param->len);
 		entry->memtype = KGSL_MEM_ENTRY_USER;
 		break;
@@ -1640,14 +1846,13 @@
 			break;
 
 		result = kgsl_setup_ashmem(entry, private->pagetable,
-					   param->fd, (void *) param->hostptr,
+					   param->fd, param->hostptr,
 					   param->len);
 
 		entry->memtype = KGSL_MEM_ENTRY_ASHMEM;
 		break;
 	case KGSL_USER_MEM_TYPE_ION:
-		result = kgsl_setup_ion(entry, private->pagetable,
-			param->fd);
+		result = kgsl_setup_ion(entry, private->pagetable, data);
 		break;
 	default:
 		KGSL_CORE_ERR("Invalid memory type: %x\n", memtype);
@@ -1662,27 +1867,31 @@
 	else if (entry->memdesc.size >= SZ_64K)
 		kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_64));
 
-	result = kgsl_mmu_map(private->pagetable,
-			      &entry->memdesc,
-			      GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
-
+	result = kgsl_mmu_map(private->pagetable, &entry->memdesc);
 	if (result)
 		goto error_put_file_ptr;
 
 	/* Adjust the returned value for a non 4k aligned offset */
 	param->gpuaddr = entry->memdesc.gpuaddr + (param->offset & ~PAGE_MASK);
+	/* echo back flags */
+	param->flags = entry->memdesc.flags;
+
+	result = kgsl_mem_entry_attach_process(entry, private);
+	if (result)
+		goto error_unmap;
 
 	KGSL_STATS_ADD(param->len, kgsl_driver.stats.mapped,
 		kgsl_driver.stats.mapped_max);
 
 	kgsl_process_add_stats(private, entry->memtype, param->len);
 
-	kgsl_mem_entry_attach_process(entry, private);
 	trace_kgsl_mem_map(entry, param->fd);
 
 	kgsl_check_idle(dev_priv->device);
 	return result;
 
+error_unmap:
+	kgsl_mmu_unmap(private->pagetable, &entry->memdesc);
 error_put_file_ptr:
 	switch (entry->memtype) {
 	case KGSL_MEM_ENTRY_PMEM:
@@ -1702,33 +1911,136 @@
 	return result;
 }
 
-/*This function flushes a graphics memory allocation from CPU cache
- *when caching is enabled with MMU*/
+static int _kgsl_gpumem_sync_cache(struct kgsl_mem_entry *entry, int op)
+{
+	int ret = 0;
+	int cacheop;
+	int mode;
+
+	/*
+	 * Flush is defined as (clean | invalidate).  If both bits are set, then
+	 * do a flush, otherwise check for the individual bits and clean or inv
+	 * as requested
+	 */
+
+	if ((op & KGSL_GPUMEM_CACHE_FLUSH) == KGSL_GPUMEM_CACHE_FLUSH)
+		cacheop = KGSL_CACHE_OP_FLUSH;
+	else if (op & KGSL_GPUMEM_CACHE_CLEAN)
+		cacheop = KGSL_CACHE_OP_CLEAN;
+	else if (op & KGSL_GPUMEM_CACHE_INV)
+		cacheop = KGSL_CACHE_OP_INV;
+	else {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	mode = kgsl_memdesc_get_cachemode(&entry->memdesc);
+	if (mode != KGSL_CACHEMODE_UNCACHED
+		&& mode != KGSL_CACHEMODE_WRITECOMBINE) {
+		trace_kgsl_mem_sync_cache(entry, op);
+		kgsl_cache_range_op(&entry->memdesc, cacheop);
+	}
+
+done:
+	return ret;
+}
+
+/* New cache sync function - supports both directions (clean and invalidate) */
+
+static long
+kgsl_ioctl_gpumem_sync_cache(struct kgsl_device_private *dev_priv,
+	unsigned int cmd, void *data)
+{
+	struct kgsl_gpumem_sync_cache *param = data;
+	struct kgsl_process_private *private = dev_priv->process_priv;
+	struct kgsl_mem_entry *entry = NULL;
+
+	if (param->id != 0) {
+		entry = kgsl_sharedmem_find_id(private, param->id);
+		if (entry == NULL) {
+			KGSL_MEM_INFO(dev_priv->device, "can't find id %d\n",
+					param->id);
+			return -EINVAL;
+		}
+	} else if (param->gpuaddr != 0) {
+		spin_lock(&private->mem_lock);
+		entry = kgsl_sharedmem_find(private, param->gpuaddr);
+		spin_unlock(&private->mem_lock);
+		if (entry == NULL) {
+			KGSL_MEM_INFO(dev_priv->device,
+					"can't find gpuaddr %x\n",
+					param->gpuaddr);
+			return -EINVAL;
+		}
+	} else {
+		return -EINVAL;
+	}
+
+	return _kgsl_gpumem_sync_cache(entry, param->op);
+}
+
+/* Legacy cache function, does a flush (clean  + invalidate) */
+
 static long
 kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv,
 				 unsigned int cmd, void *data)
 {
-	int result = 0;
-	struct kgsl_mem_entry *entry;
 	struct kgsl_sharedmem_free *param = data;
 	struct kgsl_process_private *private = dev_priv->process_priv;
+	struct kgsl_mem_entry *entry = NULL;
 
 	spin_lock(&private->mem_lock);
 	entry = kgsl_sharedmem_find(private, param->gpuaddr);
-	if (!entry) {
-		KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
-		result = -EINVAL;
-		goto done;
-	}
-	if (!entry->memdesc.hostptr) {
-		KGSL_CORE_ERR("invalid hostptr with gpuaddr %08x\n",
-			param->gpuaddr);
-			goto done;
+	spin_unlock(&private->mem_lock);
+	if (entry == NULL) {
+		KGSL_MEM_INFO(dev_priv->device,
+				"can't find gpuaddr %x\n",
+				param->gpuaddr);
+		return -EINVAL;
 	}
 
-	kgsl_cache_range_op(&entry->memdesc, KGSL_CACHE_OP_CLEAN);
-done:
-	spin_unlock(&private->mem_lock);
+	return _kgsl_gpumem_sync_cache(entry, KGSL_GPUMEM_CACHE_FLUSH);
+}
+
+/*
+ * The common parts of kgsl_ioctl_gpumem_alloc and kgsl_ioctl_gpumem_alloc_id.
+ */
+int
+_gpumem_alloc(struct kgsl_device_private *dev_priv,
+		struct kgsl_mem_entry **ret_entry,
+		unsigned int size, unsigned int flags)
+{
+	int result;
+	struct kgsl_process_private *private = dev_priv->process_priv;
+	struct kgsl_mem_entry *entry;
+
+	/*
+	 * Mask off unknown flags from userspace. This way the caller can
+	 * check if a flag is supported by looking at the returned flags.
+	 */
+	flags &= KGSL_MEMFLAGS_GPUREADONLY
+		| KGSL_CACHEMODE_MASK
+		| KGSL_MEMTYPE_MASK
+		| KGSL_MEMALIGN_MASK
+		| KGSL_MEMFLAGS_USE_CPU_MAP;
+
+	entry = kgsl_mem_entry_create();
+	if (entry == NULL)
+		return -ENOMEM;
+
+	result = kgsl_allocate_user(&entry->memdesc, private->pagetable, size,
+				    flags);
+	if (result != 0)
+		goto err;
+
+	entry->memtype = KGSL_MEM_ENTRY_KERNEL;
+
+	kgsl_check_idle(dev_priv->device);
+	*ret_entry = entry;
+	return result;
+err:
+	kfree(entry);
+	*ret_entry = NULL;
 	return result;
 }
 
@@ -1738,29 +2050,115 @@
 {
 	struct kgsl_process_private *private = dev_priv->process_priv;
 	struct kgsl_gpumem_alloc *param = data;
-	struct kgsl_mem_entry *entry;
+	struct kgsl_mem_entry *entry = NULL;
 	int result;
 
-	entry = kgsl_mem_entry_create();
-	if (entry == NULL)
-		return -ENOMEM;
+	param->flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
+	result = _gpumem_alloc(dev_priv, &entry, param->size, param->flags);
+	if (result)
+		return result;
 
-	result = kgsl_allocate_user(&entry->memdesc, private->pagetable,
-		param->size, param->flags);
+	result = kgsl_mmu_map(private->pagetable, &entry->memdesc);
+	if (result)
+		goto err;
 
-	if (result == 0) {
-		entry->memtype = KGSL_MEM_ENTRY_KERNEL;
-		kgsl_mem_entry_attach_process(entry, private);
-		param->gpuaddr = entry->memdesc.gpuaddr;
+	result = kgsl_mem_entry_attach_process(entry, private);
+	if (result != 0)
+		goto err;
 
-		kgsl_process_add_stats(private, entry->memtype, param->size);
-		trace_kgsl_mem_alloc(entry);
-	} else
-		kfree(entry);
+	kgsl_process_add_stats(private, entry->memtype, param->size);
+	trace_kgsl_mem_alloc(entry);
 
-	kgsl_check_idle(dev_priv->device);
+	param->gpuaddr = entry->memdesc.gpuaddr;
+	param->size = entry->memdesc.size;
+	param->flags = entry->memdesc.flags;
+	return result;
+err:
+	kgsl_sharedmem_free(&entry->memdesc);
+	kfree(entry);
 	return result;
 }
+
+static long
+kgsl_ioctl_gpumem_alloc_id(struct kgsl_device_private *dev_priv,
+			unsigned int cmd, void *data)
+{
+	struct kgsl_process_private *private = dev_priv->process_priv;
+	struct kgsl_gpumem_alloc_id *param = data;
+	struct kgsl_mem_entry *entry = NULL;
+	int result;
+
+	if (!kgsl_mmu_use_cpu_map(private->pagetable->mmu))
+		param->flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
+
+	result = _gpumem_alloc(dev_priv, &entry, param->size, param->flags);
+	if (result != 0)
+		goto err;
+
+	if (!kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
+		result = kgsl_mmu_map(private->pagetable, &entry->memdesc);
+		if (result)
+			goto err;
+	}
+
+	result = kgsl_mem_entry_attach_process(entry, private);
+	if (result != 0)
+		goto err;
+
+	kgsl_process_add_stats(private, entry->memtype, param->size);
+	trace_kgsl_mem_alloc(entry);
+
+	param->id = entry->id;
+	param->flags = entry->memdesc.flags;
+	param->size = entry->memdesc.size;
+	param->mmapsize = kgsl_memdesc_mmapsize(&entry->memdesc);
+	param->gpuaddr = entry->memdesc.gpuaddr;
+	return result;
+err:
+	if (entry)
+		kgsl_sharedmem_free(&entry->memdesc);
+	kfree(entry);
+	return result;
+}
+
+static long
+kgsl_ioctl_gpumem_get_info(struct kgsl_device_private *dev_priv,
+			unsigned int cmd, void *data)
+{
+	struct kgsl_process_private *private = dev_priv->process_priv;
+	struct kgsl_gpumem_get_info *param = data;
+	struct kgsl_mem_entry *entry = NULL;
+	int result = 0;
+
+	if (param->id != 0) {
+		entry = kgsl_sharedmem_find_id(private, param->id);
+		if (entry == NULL) {
+			KGSL_MEM_INFO(dev_priv->device, "can't find id %d\n",
+					param->id);
+			return -EINVAL;
+		}
+	} else if (param->gpuaddr != 0) {
+		spin_lock(&private->mem_lock);
+		entry = kgsl_sharedmem_find(private, param->gpuaddr);
+		spin_unlock(&private->mem_lock);
+		if (entry == NULL) {
+			KGSL_MEM_INFO(dev_priv->device,
+					"can't find gpuaddr %lx\n",
+					param->gpuaddr);
+			return -EINVAL;
+		}
+	} else {
+		return -EINVAL;
+	}
+	param->gpuaddr = entry->memdesc.gpuaddr;
+	param->id = entry->id;
+	param->flags = entry->memdesc.flags;
+	param->size = entry->memdesc.size;
+	param->mmapsize = kgsl_memdesc_mmapsize(&entry->memdesc);
+	param->useraddr = entry->memdesc.useraddr;
+	return result;
+}
+
 static long kgsl_ioctl_cff_syncmem(struct kgsl_device_private *dev_priv,
 					unsigned int cmd, void *data)
 {
@@ -1924,7 +2322,7 @@
 static const struct {
 	unsigned int cmd;
 	kgsl_ioctl_func_t func;
-	int flags;
+	unsigned int flags;
 } kgsl_ioctl_funcs[] = {
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_GETPROPERTY,
 			kgsl_ioctl_device_getproperty,
@@ -1975,7 +2373,15 @@
 			KGSL_IOCTL_LOCK),
 	KGSL_IOCTL_FUNC(IOCTL_KGSL_SETPROPERTY,
 			kgsl_ioctl_device_setproperty,
-			KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE)
+			KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC_ID,
+			kgsl_ioctl_gpumem_alloc_id, 0),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_FREE_ID,
+			kgsl_ioctl_gpumem_free_id, 0),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_GET_INFO,
+			kgsl_ioctl_gpumem_get_info, 0),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_SYNC_CACHE,
+			kgsl_ioctl_gpumem_sync_cache, 0),
 };
 
 static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
@@ -2067,7 +2473,11 @@
 		mutex_unlock(&dev_priv->device->mutex);
 	}
 
-	if (ret == 0 && (cmd & IOC_OUT)) {
+	/*
+	 * Still copy back on failure, but assume function took
+	 * all necessary precautions sanitizing the return values.
+	 */
+	if (cmd & IOC_OUT) {
 		if (copy_to_user((void __user *) arg, uptr, _IOC_SIZE(cmd)))
 			ret = -EFAULT;
 	}
@@ -2135,6 +2545,8 @@
 kgsl_gpumem_vm_close(struct vm_area_struct *vma)
 {
 	struct kgsl_mem_entry *entry  = vma->vm_private_data;
+
+	entry->memdesc.useraddr = 0;
 	kgsl_mem_entry_put(entry);
 }
 
@@ -2144,8 +2556,145 @@
 	.close = kgsl_gpumem_vm_close,
 };
 
+static int
+get_mmap_entry(struct kgsl_process_private *private,
+		struct kgsl_mem_entry **out_entry, unsigned long pgoff,
+		unsigned long len)
+{
+	int ret = -EINVAL;
+	struct kgsl_mem_entry *entry;
+
+	entry = kgsl_sharedmem_find_id(private, pgoff);
+	if (entry == NULL) {
+		spin_lock(&private->mem_lock);
+		entry = kgsl_sharedmem_find(private, pgoff << PAGE_SHIFT);
+		spin_unlock(&private->mem_lock);
+	}
+
+	if (!entry)
+		return -EINVAL;
+
+	kgsl_mem_entry_get(entry);
+
+	if (!entry->memdesc.ops ||
+		!entry->memdesc.ops->vmflags ||
+		!entry->memdesc.ops->vmfault) {
+		ret = -EINVAL;
+		goto err_put;
+	}
+
+	if (entry->memdesc.useraddr != 0) {
+		ret = -EBUSY;
+		goto err_put;
+	}
+
+	if (len != kgsl_memdesc_mmapsize(&entry->memdesc)) {
+		ret = -ERANGE;
+		goto err_put;
+	}
+
+	*out_entry = entry;
+	return 0;
+err_put:
+	kgsl_mem_entry_put(entry);
+	return ret;
+}
+
+static unsigned long
+kgsl_get_unmapped_area(struct file *file, unsigned long addr,
+			unsigned long len, unsigned long pgoff,
+			unsigned long flags)
+{
+	unsigned long ret = 0;
+	unsigned long vma_offset = pgoff << PAGE_SHIFT;
+	struct kgsl_device_private *dev_priv = file->private_data;
+	struct kgsl_process_private *private = dev_priv->process_priv;
+	struct kgsl_device *device = dev_priv->device;
+	struct kgsl_mem_entry *entry = NULL;
+	unsigned int align;
+	unsigned int retry = 0;
+
+	if (vma_offset == device->memstore.gpuaddr)
+		return get_unmapped_area(NULL, addr, len, pgoff, flags);
+
+	ret = get_mmap_entry(private, &entry, pgoff, len);
+	if (ret)
+		return ret;
+
+	if (!kgsl_memdesc_use_cpu_map(&entry->memdesc) || (flags & MAP_FIXED)) {
+		/*
+		 * If we're not going to use the same mapping on the gpu,
+		 * any address is fine.
+		 * For MAP_FIXED, hopefully the caller knows what they're doing,
+		 * but we may fail in mmap() if there is already something
+		 * at the virtual address chosen.
+		 */
+		ret = get_unmapped_area(NULL, addr, len, pgoff, flags);
+		goto put;
+	}
+	if (entry->memdesc.gpuaddr != 0) {
+		KGSL_MEM_INFO(device,
+				"pgoff %lx already mapped to gpuaddr %x\n",
+				pgoff, entry->memdesc.gpuaddr);
+		ret = -EBUSY;
+		goto put;
+	}
+
+	align = kgsl_memdesc_get_align(&entry->memdesc);
+	if (align >= ilog2(SZ_1M))
+		align = ilog2(SZ_1M);
+	else if (align >= ilog2(SZ_64K))
+		align = ilog2(SZ_64K);
+	else if (align <= PAGE_SHIFT)
+		align = 0;
+
+	if (align)
+		len += 1 << align;
+	do {
+		ret = get_unmapped_area(NULL, addr, len, pgoff, flags);
+		if (IS_ERR_VALUE(ret))
+			break;
+		if (align)
+			ret = ALIGN(ret, (1 << align));
+
+		/*make sure there isn't a GPU only mapping at this address */
+		if (kgsl_sharedmem_region_empty(private, ret, len))
+			break;
+
+		trace_kgsl_mem_unmapped_area_collision(entry, addr, len, ret);
+
+		/*
+		 * If we collided, bump the hint address so that
+		 * get_umapped_area knows to look somewhere else.
+		 */
+		addr = (addr == 0) ? ret + len : addr + len;
+
+		/*
+		 * The addr hint can be set by userspace to be near
+		 * the end of the address space. Make sure we search
+		 * the whole address space at least once by wrapping
+		 * back around once.
+		 */
+		if (!retry && (addr + len >= TASK_SIZE)) {
+			addr = 0;
+			retry = 1;
+		} else {
+			ret = -EBUSY;
+		}
+	} while (addr + len < TASK_SIZE);
+
+	if (IS_ERR_VALUE(ret))
+		KGSL_MEM_INFO(device,
+				"pid %d pgoff %lx len %ld failed error %ld\n",
+				private->pid, pgoff, len, ret);
+put:
+	kgsl_mem_entry_put(entry);
+	return ret;
+}
+
 static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
 {
+	unsigned int ret, cache;
 	unsigned long vma_offset = vma->vm_pgoff << PAGE_SHIFT;
 	struct kgsl_device_private *dev_priv = file->private_data;
 	struct kgsl_process_private *private = dev_priv->process_priv;
@@ -2157,31 +2706,76 @@
 	if (vma_offset == device->memstore.gpuaddr)
 		return kgsl_mmap_memstore(device, vma);
 
-	/* Find a chunk of GPU memory */
+	ret = get_mmap_entry(private, &entry, vma->vm_pgoff,
+				vma->vm_end - vma->vm_start);
+	if (ret)
+		return ret;
 
-	spin_lock(&private->mem_lock);
-	entry = kgsl_sharedmem_find(private, vma_offset);
+	if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
+		entry->memdesc.gpuaddr = vma->vm_start;
 
-	if (entry)
-		kgsl_mem_entry_get(entry);
-
-	spin_unlock(&private->mem_lock);
-
-	if (entry == NULL)
-		return -EINVAL;
-
-	if (!entry->memdesc.ops ||
-		!entry->memdesc.ops->vmflags ||
-		!entry->memdesc.ops->vmfault)
-		return -EINVAL;
+		ret = kgsl_mmu_map(private->pagetable, &entry->memdesc);
+		if (ret) {
+			kgsl_mem_entry_put(entry);
+			return ret;
+		}
+		kgsl_mem_entry_track_gpuaddr(private, entry);
+	}
 
 	vma->vm_flags |= entry->memdesc.ops->vmflags(&entry->memdesc);
 
 	vma->vm_private_data = entry;
-	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	/* Determine user-side caching policy */
+
+	cache = kgsl_memdesc_get_cachemode(&entry->memdesc);
+
+	switch (cache) {
+	case KGSL_CACHEMODE_UNCACHED:
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+		break;
+	case KGSL_CACHEMODE_WRITETHROUGH:
+		vma->vm_page_prot = pgprot_writethroughcache(vma->vm_page_prot);
+		break;
+	case KGSL_CACHEMODE_WRITEBACK:
+		vma->vm_page_prot = pgprot_writebackcache(vma->vm_page_prot);
+		break;
+	case KGSL_CACHEMODE_WRITECOMBINE:
+	default:
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+		break;
+	}
+
 	vma->vm_ops = &kgsl_gpumem_vm_ops;
+
+	if (cache == KGSL_CACHEMODE_WRITEBACK
+		|| cache == KGSL_CACHEMODE_WRITETHROUGH) {
+		struct scatterlist *s;
+		int i;
+		int sglen = entry->memdesc.sglen;
+		unsigned long addr = vma->vm_start;
+
+		/* don't map in the guard page, it should always fault */
+		if (kgsl_memdesc_has_guard_page(&entry->memdesc))
+			sglen--;
+
+		for_each_sg(entry->memdesc.sg, s, sglen, i) {
+			int j;
+			for (j = 0; j < (sg_dma_len(s) >> PAGE_SHIFT); j++) {
+				struct page *page = sg_page(s);
+				page = nth_page(page, j);
+				vm_insert_page(vma, addr, page);
+				addr += PAGE_SIZE;
+			}
+		}
+	}
+
 	vma->vm_file = file;
 
+	entry->memdesc.useraddr = vma->vm_start;
+
+	trace_kgsl_mem_mmap(entry);
+
 	return 0;
 }
 
@@ -2198,6 +2792,7 @@
 	.release = kgsl_release,
 	.open = kgsl_open,
 	.mmap = kgsl_mmap,
+	.get_unmapped_area = kgsl_get_unmapped_area,
 	.unlocked_ioctl = kgsl_ioctl,
 };
 
@@ -2205,6 +2800,8 @@
 	.process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex),
 	.ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock),
 	.devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock),
+	.memfree_hist_mutex =
+		__MUTEX_INITIALIZER(kgsl_driver.memfree_hist_mutex),
 };
 EXPORT_SYMBOL(kgsl_driver);
 
@@ -2287,6 +2884,7 @@
 
 	kgsl_ion_client = msm_ion_client_create(UINT_MAX, KGSL_NAME);
 
+	/* Get starting physical address of device registers */
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 					   device->iomemname);
 	if (res == NULL) {
@@ -2304,6 +2902,33 @@
 	device->reg_phys = res->start;
 	device->reg_len = resource_size(res);
 
+	/*
+	 * Check if a shadermemname is defined, and then get shader memory
+	 * details including shader memory starting physical address
+	 * and shader memory length
+	 */
+	if (device->shadermemname != NULL) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						device->shadermemname);
+
+		if (res == NULL) {
+			KGSL_DRV_ERR(device,
+			"Shader memory: platform_get_resource_byname failed\n");
+		}
+
+		else {
+			device->shader_mem_phys = res->start;
+			device->shader_mem_len = resource_size(res);
+		}
+
+		if (!devm_request_mem_region(device->dev,
+					device->shader_mem_phys,
+					device->shader_mem_len,
+						device->name)) {
+			KGSL_DRV_ERR(device, "request_mem_region_failed\n");
+		}
+	}
+
 	if (!devm_request_mem_region(device->dev, device->reg_phys,
 				device->reg_len, device->name)) {
 		KGSL_DRV_ERR(device, "request_mem_region failed\n");
@@ -2349,7 +2974,6 @@
 	if (result)
 		goto error_pwrctrl_close;
 
-	kgsl_cffdump_open(device->id);
 
 	setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
 	status = kgsl_create_device_workqueue(device);
@@ -2371,7 +2995,8 @@
 		goto error_close_mmu;
 	}
 
-	pm_qos_add_request(&device->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY,
+	pm_qos_add_request(&device->pwrctrl.pm_qos_req_dma,
+				PM_QOS_CPU_DMA_LATENCY,
 				PM_QOS_DEFAULT_VALUE);
 
 	/* Initalize the snapshot engine */
@@ -2421,11 +3046,15 @@
 	if (device->pm_dump_enable) {
 
 		KGSL_LOG_DUMP(device,
-				"POWER: FLAGS = %08lX | ACTIVE POWERLEVEL = %08X",
-				pwr->power_flags, pwr->active_pwrlevel);
+			"POWER: NAP ALLOWED = %d | START_STOP_SLEEP_WAKE = %d\n"
+			, pwr->nap_allowed, pwr->strtstp_sleepwake);
+
+		KGSL_LOG_DUMP(device,
+			"POWER: FLAGS = %08lX | ACTIVE POWERLEVEL = %08X",
+			pwr->power_flags, pwr->active_pwrlevel);
 
 		KGSL_LOG_DUMP(device, "POWER: INTERVAL TIMEOUT = %08X ",
-				pwr->interval_timeout);
+			pwr->interval_timeout);
 
 	}
 
@@ -2473,10 +3102,9 @@
 {
 	kgsl_device_snapshot_close(device);
 
-	kgsl_cffdump_close(device->id);
 	kgsl_pwrctrl_uninit_sysfs(device);
 
-	pm_qos_remove_request(&device->pm_qos_req_dma);
+	pm_qos_remove_request(&device->pwrctrl.pm_qos_req_dma);
 
 	idr_destroy(&device->context_idr);
 
@@ -2529,6 +3157,7 @@
 		kgsl_driver.class = NULL;
 	}
 
+	kgsl_memfree_hist_exit();
 	unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
 }
 
@@ -2600,6 +3229,9 @@
 			goto err;
 	}
 
+	if (kgsl_memfree_hist_init())
+		KGSL_CORE_ERR("failed to init memfree_hist");
+
 	return 0;
 
 err:
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 3935164..c568db5 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -71,6 +71,23 @@
 #define KGSL_STATS_ADD(_size, _stat, _max) \
 	do { _stat += (_size); if (_stat > _max) _max = _stat; } while (0)
 
+
+#define KGSL_MEMFREE_HIST_SIZE	((int)(PAGE_SIZE * 2))
+
+struct kgsl_memfree_hist_elem {
+	unsigned int pid;
+	unsigned int gpuaddr;
+	unsigned int size;
+	unsigned int flags;
+};
+
+struct kgsl_memfree_hist {
+	void *base_hist_rb;
+	unsigned int size;
+	struct kgsl_memfree_hist_elem *wptr;
+};
+
+
 struct kgsl_device;
 struct kgsl_context;
 
@@ -99,6 +116,9 @@
 
 	void *ptpool;
 
+	struct mutex memfree_hist_mutex;
+	struct kgsl_memfree_hist memfree_hist;
+
 	struct {
 		unsigned int vmalloc;
 		unsigned int vmalloc_max;
@@ -129,13 +149,16 @@
 #define KGSL_MEMDESC_GUARD_PAGE BIT(0)
 /* Set if the memdesc is mapped into all pagetables */
 #define KGSL_MEMDESC_GLOBAL BIT(1)
+/* The memdesc is frozen during a snapshot */
+#define KGSL_MEMDESC_FROZEN BIT(2)
 
 /* shared memory allocation */
 struct kgsl_memdesc {
 	struct kgsl_pagetable *pagetable;
-	void *hostptr;
+	void *hostptr; /* kernel virtual address */
+	unsigned long useraddr; /* userspace address */
 	unsigned int gpuaddr;
-	unsigned int physaddr;
+	phys_addr_t physaddr;
 	unsigned int size;
 	unsigned int priv; /* Internal flags and settings */
 	struct scatterlist *sg;
@@ -154,17 +177,13 @@
 #define KGSL_MEM_ENTRY_ION    4
 #define KGSL_MEM_ENTRY_MAX    5
 
-/* List of flags */
-
-#define KGSL_MEM_ENTRY_FROZEN (1 << 0)
-
 struct kgsl_mem_entry {
 	struct kref refcount;
 	struct kgsl_memdesc memdesc;
 	int memtype;
-	int flags;
 	void *priv_data;
 	struct rb_node node;
+	unsigned int id;
 	unsigned int context_id;
 	/* back pointer to private structure under whose context this
 	* allocation is made */
@@ -224,6 +243,10 @@
 static inline int kgsl_gpuaddr_in_memdesc(const struct kgsl_memdesc *memdesc,
 				unsigned int gpuaddr, unsigned int size)
 {
+	/* don't overflow */
+	if ((gpuaddr + size) < gpuaddr)
+		return 0;
+
 	if (gpuaddr >= memdesc->gpuaddr &&
 	    ((gpuaddr + size) <= (memdesc->gpuaddr + memdesc->size))) {
 		return 1;
diff --git a/drivers/gpu/msm/kgsl_cffdump.c b/drivers/gpu/msm/kgsl_cffdump.c
index e06c94d..6dc2ccc 100644
--- a/drivers/gpu/msm/kgsl_cffdump.c
+++ b/drivers/gpu/msm/kgsl_cffdump.c
@@ -28,6 +28,7 @@
 #include "kgsl_log.h"
 #include "kgsl_sharedmem.h"
 #include "adreno_pm4types.h"
+#include "adreno.h"
 
 static struct rchan	*chan;
 static struct dentry	*dir;
@@ -334,7 +335,7 @@
 		return;
 	}
 
-	kgsl_cff_dump_enable = 1;
+	kgsl_cff_dump_enable = 0;
 
 	spin_lock_init(&cffdump_lock);
 
@@ -356,10 +357,21 @@
 		debugfs_remove(dir);
 }
 
-void kgsl_cffdump_open(enum kgsl_deviceid device_id)
+void kgsl_cffdump_open(struct kgsl_device *device)
 {
-	kgsl_cffdump_memory_base(device_id, KGSL_PAGETABLE_BASE,
-			kgsl_mmu_get_ptsize(), SZ_256K);
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
+		kgsl_cffdump_memory_base(device->id,
+			kgsl_mmu_get_base_addr(&device->mmu),
+			kgsl_mmu_get_ptsize(&device->mmu) +
+			KGSL_IOMMU_GLOBAL_MEM_SIZE, adreno_dev->gmem_size);
+	} else {
+		kgsl_cffdump_memory_base(device->id,
+			kgsl_mmu_get_base_addr(&device->mmu),
+			kgsl_mmu_get_ptsize(&device->mmu),
+			adreno_dev->gmem_size);
+	}
 }
 
 void kgsl_cffdump_memory_base(enum kgsl_deviceid device_id, unsigned int base,
@@ -387,7 +399,7 @@
 }
 
 void kgsl_cffdump_syncmem(struct kgsl_device_private *dev_priv,
-	const struct kgsl_memdesc *memdesc, uint gpuaddr, uint sizebytes,
+	struct kgsl_memdesc *memdesc, uint gpuaddr, uint sizebytes,
 	bool clean_cache)
 {
 	const void *src;
@@ -522,7 +534,7 @@
 }
 
 static struct dentry *create_buf_file_handler(const char *filename,
-	struct dentry *parent, int mode, struct rchan_buf *buf,
+	struct dentry *parent, unsigned short mode, struct rchan_buf *buf,
 	int *is_global)
 {
 	return debugfs_create_file(filename, mode, parent, buf,
diff --git a/drivers/gpu/msm/kgsl_cffdump.h b/drivers/gpu/msm/kgsl_cffdump.h
index 2733cc3..d5656f8 100644
--- a/drivers/gpu/msm/kgsl_cffdump.h
+++ b/drivers/gpu/msm/kgsl_cffdump.h
@@ -22,10 +22,10 @@
 
 void kgsl_cffdump_init(void);
 void kgsl_cffdump_destroy(void);
-void kgsl_cffdump_open(enum kgsl_deviceid device_id);
+void kgsl_cffdump_open(struct kgsl_device *device);
 void kgsl_cffdump_close(enum kgsl_deviceid device_id);
 void kgsl_cffdump_syncmem(struct kgsl_device_private *dev_priv,
-	const struct kgsl_memdesc *memdesc, uint physaddr, uint sizebytes,
+	struct kgsl_memdesc *memdesc, uint physaddr, uint sizebytes,
 	bool clean_cache);
 void kgsl_cffdump_setmem(uint addr, uint value, uint sizebytes);
 void kgsl_cffdump_regwrite(enum kgsl_deviceid device_id, uint addr,
@@ -49,7 +49,7 @@
 
 #define kgsl_cffdump_init()					(void)0
 #define kgsl_cffdump_destroy()					(void)0
-#define kgsl_cffdump_open(device_id)				(void)0
+#define kgsl_cffdump_open(device)				(void)0
 #define kgsl_cffdump_close(device_id)				(void)0
 #define kgsl_cffdump_syncmem(dev_priv, memdesc, addr, sizebytes, clean_cache) \
 	(void) 0
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index b41bd6b..9dfda32 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2008-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2008-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -125,6 +125,52 @@
 KGSL_DEBUGFS_LOG(pwr_log);
 KGSL_DEBUGFS_LOG(ft_log);
 
+static int memfree_hist_print(struct seq_file *s, void *unused)
+{
+	void *base = kgsl_driver.memfree_hist.base_hist_rb;
+
+	struct kgsl_memfree_hist_elem *wptr = kgsl_driver.memfree_hist.wptr;
+	struct kgsl_memfree_hist_elem *p;
+	char str[16];
+
+	seq_printf(s, "%8s %8s %8s %11s\n",
+			"pid", "gpuaddr", "size", "flags");
+
+	mutex_lock(&kgsl_driver.memfree_hist_mutex);
+	p = wptr;
+	for (;;) {
+		kgsl_get_memory_usage(str, sizeof(str), p->flags);
+		/*
+		 * if the ring buffer is not filled up yet
+		 * all its empty elems have size==0
+		 * just skip them ...
+		*/
+		if (p->size)
+			seq_printf(s, "%8d %08x %8d %11s\n",
+				p->pid, p->gpuaddr, p->size, str);
+		p++;
+		if ((void *)p >= base + kgsl_driver.memfree_hist.size)
+			p = (struct kgsl_memfree_hist_elem *) base;
+
+		if (p == kgsl_driver.memfree_hist.wptr)
+			break;
+	}
+	mutex_unlock(&kgsl_driver.memfree_hist_mutex);
+	return 0;
+}
+
+static int memfree_hist_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, memfree_hist_print, inode->i_private);
+}
+
+static const struct file_operations memfree_hist_fops = {
+	.open = memfree_hist_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
 void kgsl_device_debugfs_init(struct kgsl_device *device)
 {
 	if (kgsl_debugfs_dir && !IS_ERR(kgsl_debugfs_dir))
@@ -151,6 +197,8 @@
 				&mem_log_fops);
 	debugfs_create_file("log_level_pwr", 0644, device->d_debugfs, device,
 				&pwr_log_fops);
+	debugfs_create_file("memfree_history", 0444, device->d_debugfs, device,
+				&memfree_hist_fops);
 	debugfs_create_file("log_level_ft", 0644, device->d_debugfs, device,
 				&ft_log_fops);
 
@@ -198,35 +246,71 @@
 	return '-';
 }
 
+static char get_cacheflag(const struct kgsl_memdesc *m)
+{
+	static const char table[] = {
+		[KGSL_CACHEMODE_WRITECOMBINE] = '-',
+		[KGSL_CACHEMODE_UNCACHED] = 'u',
+		[KGSL_CACHEMODE_WRITEBACK] = 'b',
+		[KGSL_CACHEMODE_WRITETHROUGH] = 't',
+	};
+	return table[kgsl_memdesc_get_cachemode(m)];
+}
+
+static void print_mem_entry(struct seq_file *s, struct kgsl_mem_entry *entry)
+{
+	char flags[6];
+	char usage[16];
+	struct kgsl_memdesc *m = &entry->memdesc;
+
+	flags[0] = kgsl_memdesc_is_global(m) ?  'g' : '-';
+	flags[1] = m->flags & KGSL_MEMFLAGS_GPUREADONLY ? 'r' : '-';
+	flags[2] = get_alignflag(m);
+	flags[3] = get_cacheflag(m);
+	flags[4] = kgsl_memdesc_use_cpu_map(m) ? 'p' : '-';
+	flags[5] = '\0';
+
+	kgsl_get_memory_usage(usage, sizeof(usage), m->flags);
+
+	seq_printf(s, "%08x %08lx %8d %5d %5s %10s %16s %5d\n",
+			m->gpuaddr, m->useraddr, m->size, entry->id, flags,
+			memtype_str(entry->memtype), usage, m->sglen);
+}
+
 static int process_mem_print(struct seq_file *s, void *unused)
 {
 	struct kgsl_mem_entry *entry;
 	struct rb_node *node;
 	struct kgsl_process_private *private = s->private;
-	char flags[4];
-	char usage[16];
+	int next = 0;
 
+	seq_printf(s, "%8s %8s %8s %5s %5s %10s %16s %5s\n",
+		   "gpuaddr", "useraddr", "size", "id", "flags", "type",
+		   "usage", "sglen");
+
+	/* print all entries with a GPU address */
 	spin_lock(&private->mem_lock);
-	seq_printf(s, "%8s %8s %5s %10s %16s %5s\n",
-		   "gpuaddr", "size", "flags", "type", "usage", "sglen");
+
 	for (node = rb_first(&private->mem_rb); node; node = rb_next(node)) {
-		struct kgsl_memdesc *m;
-
 		entry = rb_entry(node, struct kgsl_mem_entry, node);
-		m = &entry->memdesc;
-
-		flags[0] = m->priv & KGSL_MEMDESC_GLOBAL ?  'g' : '-';
-		flags[1] = m->flags & KGSL_MEMFLAGS_GPUREADONLY ? 'r' : '-';
-		flags[2] = get_alignflag(m);
-		flags[3] = '\0';
-
-		kgsl_get_memory_usage(usage, sizeof(usage), m->flags);
-
-		seq_printf(s, "%08x %8d %5s %10s %16s %5d\n",
-			   m->gpuaddr, m->size, flags,
-			   memtype_str(entry->memtype), usage, m->sglen);
+		print_mem_entry(s, entry);
 	}
+
 	spin_unlock(&private->mem_lock);
+
+	/* now print all the unbound entries */
+	while (1) {
+		rcu_read_lock();
+		entry = idr_get_next(&private->mem_idr, &next);
+		rcu_read_unlock();
+
+		if (entry == NULL)
+			break;
+		if (entry->memdesc.gpuaddr == 0)
+			print_mem_entry(s, entry);
+		next++;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index b215d8c..0d11660 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -73,7 +73,8 @@
 	int (*idle) (struct kgsl_device *device);
 	unsigned int (*isidle) (struct kgsl_device *device);
 	int (*suspend_context) (struct kgsl_device *device);
-	int (*start) (struct kgsl_device *device, unsigned int init_ram);
+	int (*init) (struct kgsl_device *device);
+	int (*start) (struct kgsl_device *device);
 	int (*stop) (struct kgsl_device *device);
 	int (*getproperty) (struct kgsl_device *device,
 		enum kgsl_property_type type, void *value,
@@ -105,7 +106,7 @@
 			uint32_t flags);
 	int (*drawctxt_create) (struct kgsl_device *device,
 		struct kgsl_pagetable *pagetable, struct kgsl_context *context,
-		uint32_t flags);
+		uint32_t *flags);
 	void (*drawctxt_destroy) (struct kgsl_device *device,
 		struct kgsl_context *context);
 	long (*ioctl) (struct kgsl_device_private *dev_priv,
@@ -145,11 +146,27 @@
 	unsigned int ver_minor;
 	uint32_t flags;
 	enum kgsl_deviceid id;
+
+	/* Starting physical address for GPU registers */
 	unsigned long reg_phys;
+
+	/* Starting Kernel virtual address for GPU registers */
 	void *reg_virt;
+
+	/* Total memory size for all GPU registers */
 	unsigned int reg_len;
+
+	/* Kernel virtual address for GPU shader memory */
+	void *shader_mem_virt;
+
+	/* Starting physical address for GPU shader memory */
+	unsigned long shader_mem_phys;
+
+	/* GPU shader memory size */
+	unsigned int shader_mem_len;
 	struct kgsl_memdesc memstore;
 	const char *iomemname;
+	const char *shadermemname;
 
 	struct kgsl_mh mh;
 	struct kgsl_mmu mmu;
@@ -160,7 +177,6 @@
 	struct kgsl_pwrctrl pwrctrl;
 	int open_count;
 
-	struct atomic_notifier_head ts_notifier_list;
 	struct mutex mutex;
 	uint32_t state;
 	uint32_t requested_state;
@@ -201,7 +217,6 @@
 	int pm_dump_enable;
 	struct kgsl_pwrscale pwrscale;
 	struct kobject pwrscale_kobj;
-	struct pm_qos_request pm_qos_req_dma;
 	struct work_struct ts_expired_ws;
 	struct list_head events;
 	struct list_head events_pending_list;
@@ -210,16 +225,16 @@
 	/* Postmortem Control switches */
 	int pm_regs_enabled;
 	int pm_ib_enabled;
+
+	int reset_counter; /* Track how many GPU core resets have occured */
 };
 
 void kgsl_process_events(struct work_struct *work);
-void kgsl_check_fences(struct work_struct *work);
 
 #define KGSL_DEVICE_COMMON_INIT(_dev) \
 	.hwaccess_gate = COMPLETION_INITIALIZER((_dev).hwaccess_gate),\
 	.suspend_gate = COMPLETION_INITIALIZER((_dev).suspend_gate),\
 	.ft_gate = COMPLETION_INITIALIZER((_dev).ft_gate),\
-	.ts_notifier_list = ATOMIC_NOTIFIER_INIT((_dev).ts_notifier_list),\
 	.idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\
 			kgsl_idle_check),\
 	.ts_expired_ws  = __WORK_INITIALIZER((_dev).ts_expired_ws,\
@@ -266,6 +281,7 @@
 	pid_t pid;
 	spinlock_t mem_lock;
 	struct rb_root mem_rb;
+	struct idr mem_idr;
 	struct kgsl_pagetable *pagetable;
 	struct list_head list;
 	struct kobject kobj;
@@ -391,12 +407,6 @@
 int kgsl_check_timestamp(struct kgsl_device *device,
 		struct kgsl_context *context, unsigned int timestamp);
 
-int kgsl_register_ts_notifier(struct kgsl_device *device,
-			      struct notifier_block *nb);
-
-int kgsl_unregister_ts_notifier(struct kgsl_device *device,
-				struct notifier_block *nb);
-
 int kgsl_device_platform_probe(struct kgsl_device *device);
 
 void kgsl_device_platform_remove(struct kgsl_device *device);
diff --git a/drivers/gpu/msm/kgsl_drm.c b/drivers/gpu/msm/kgsl_drm.c
index 2a5a5fa..11d6ffa 100644
--- a/drivers/gpu/msm/kgsl_drm.c
+++ b/drivers/gpu/msm/kgsl_drm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,7 +16,8 @@
  */
 #include "drmP.h"
 #include "drm.h"
-#include <linux/android_pmem.h>
+
+#include <linux/msm_ion.h>
 
 #include "kgsl.h"
 #include "kgsl_device.h"
@@ -27,7 +28,7 @@
 #define DRIVER_AUTHOR           "Qualcomm"
 #define DRIVER_NAME             "kgsl"
 #define DRIVER_DESC             "KGSL DRM"
-#define DRIVER_DATE             "20100127"
+#define DRIVER_DATE             "20121107"
 
 #define DRIVER_MAJOR            2
 #define DRIVER_MINOR            1
@@ -106,6 +107,7 @@
 	uint32_t type;
 	struct kgsl_memdesc memdesc;
 	struct kgsl_pagetable *pagetable;
+	struct ion_handle *ion_handle;
 	uint64_t mmap_offset;
 	int bufcount;
 	int flags;
@@ -129,86 +131,18 @@
 	struct list_head wait_list;
 };
 
+static struct ion_client *kgsl_drm_ion_client;
+
 static int kgsl_drm_inited = DRM_KGSL_NOT_INITED;
 
 /* This is a global list of all the memory currently mapped in the MMU */
 static struct list_head kgsl_mem_list;
 
-static void kgsl_gem_mem_flush(struct kgsl_memdesc *memdesc, int type, int op)
-{
-	int cacheop = 0;
-
-	switch (op) {
-	case DRM_KGSL_GEM_CACHE_OP_TO_DEV:
-		if (type & (DRM_KGSL_GEM_CACHE_WBACK |
-			    DRM_KGSL_GEM_CACHE_WBACKWA))
-			cacheop = KGSL_CACHE_OP_CLEAN;
-
-		break;
-
-	case DRM_KGSL_GEM_CACHE_OP_FROM_DEV:
-		if (type & (DRM_KGSL_GEM_CACHE_WBACK |
-			    DRM_KGSL_GEM_CACHE_WBACKWA |
-			    DRM_KGSL_GEM_CACHE_WTHROUGH))
-			cacheop = KGSL_CACHE_OP_INV;
-	}
-
-	kgsl_cache_range_op(memdesc, cacheop);
-}
-
-/* TODO:
- * Add vsync wait */
-
-static int kgsl_drm_load(struct drm_device *dev, unsigned long flags)
-{
-	return 0;
-}
-
-static int kgsl_drm_unload(struct drm_device *dev)
-{
-	return 0;
-}
-
 struct kgsl_drm_device_priv {
 	struct kgsl_device *device[KGSL_DEVICE_MAX];
 	struct kgsl_device_private *devpriv[KGSL_DEVICE_MAX];
 };
 
-void kgsl_drm_preclose(struct drm_device *dev, struct drm_file *file_priv)
-{
-}
-
-static int kgsl_drm_suspend(struct drm_device *dev, pm_message_t state)
-{
-	return 0;
-}
-
-static int kgsl_drm_resume(struct drm_device *dev)
-{
-	return 0;
-}
-
-static void
-kgsl_gem_free_mmap_offset(struct drm_gem_object *obj)
-{
-	struct drm_device *dev = obj->dev;
-	struct drm_gem_mm *mm = dev->mm_private;
-	struct drm_kgsl_gem_object *priv = obj->driver_private;
-	struct drm_map_list *list;
-
-	list = &obj->map_list;
-	drm_ht_remove_item(&mm->offset_hash, &list->hash);
-	if (list->file_offset_node) {
-		drm_mm_put_block(list->file_offset_node);
-		list->file_offset_node = NULL;
-	}
-
-	kfree(list->map);
-	list->map = NULL;
-
-	priv->mmap_offset = 0;
-}
-
 static int
 kgsl_gem_memory_allocated(struct drm_gem_object *obj)
 {
@@ -220,6 +154,8 @@
 kgsl_gem_alloc_memory(struct drm_gem_object *obj)
 {
 	struct drm_kgsl_gem_object *priv = obj->driver_private;
+	struct sg_table *sg_table;
+	struct scatterlist *s;
 	int index;
 	int result = 0;
 
@@ -237,21 +173,52 @@
 		}
 	}
 
-	/* Set the flags for the memdesc (probably 0, unless it is cached) */
-	priv->memdesc.priv = 0;
-
 	if (TYPE_IS_PMEM(priv->type)) {
 		if (priv->type == DRM_KGSL_GEM_TYPE_EBI ||
 		    priv->type & DRM_KGSL_GEM_PMEM_EBI) {
-				result = kgsl_sharedmem_ebimem_user(
-						&priv->memdesc,
-						priv->pagetable,
-						obj->size * priv->bufcount);
-				if (result) {
-					DRM_ERROR(
-					"Unable to allocate PMEM memory\n");
-					return result;
-				}
+			priv->ion_handle = ion_alloc(kgsl_drm_ion_client,
+				obj->size * priv->bufcount, PAGE_SIZE,
+				ION_HEAP(ION_SF_HEAP_ID), 0);
+			if (IS_ERR_OR_NULL(priv->ion_handle)) {
+				DRM_ERROR(
+				"Unable to allocate ION Phys memory handle\n");
+				return -ENOMEM;
+			}
+
+			priv->memdesc.pagetable = priv->pagetable;
+
+			result = ion_phys(kgsl_drm_ion_client,
+				priv->ion_handle, (ion_phys_addr_t *)
+				&priv->memdesc.physaddr, &priv->memdesc.size);
+			if (result) {
+				DRM_ERROR(
+				"Unable to get ION Physical memory address\n");
+				ion_free(kgsl_drm_ion_client,
+					priv->ion_handle);
+				priv->ion_handle = NULL;
+				return result;
+			}
+
+			result = memdesc_sg_phys(&priv->memdesc,
+				priv->memdesc.physaddr, priv->memdesc.size);
+			if (result) {
+				DRM_ERROR(
+				"Unable to get sg list\n");
+				ion_free(kgsl_drm_ion_client,
+					priv->ion_handle);
+				priv->ion_handle = NULL;
+				return result;
+			}
+
+			result = kgsl_mmu_map(priv->pagetable, &priv->memdesc);
+			if (result) {
+				DRM_ERROR(
+				"kgsl_mmu_map failed.  result = %d\n", result);
+				ion_free(kgsl_drm_ion_client,
+					priv->ion_handle);
+				priv->ion_handle = NULL;
+				return result;
+			}
 		}
 		else
 			return -EINVAL;
@@ -262,15 +229,44 @@
 			priv->type & DRM_KGSL_GEM_CACHE_MASK)
 				list_add(&priv->list, &kgsl_mem_list);
 
-		result = kgsl_sharedmem_page_alloc_user(&priv->memdesc,
-					priv->pagetable,
-					obj->size * priv->bufcount);
+		priv->memdesc.pagetable = priv->pagetable;
 
-		if (result != 0) {
-				DRM_ERROR(
-				"Unable to allocate Vmalloc user memory\n");
-				return result;
+		priv->ion_handle = ion_alloc(kgsl_drm_ion_client,
+				obj->size * priv->bufcount, PAGE_SIZE,
+				ION_HEAP(ION_IOMMU_HEAP_ID), 0);
+		if (IS_ERR_OR_NULL(priv->ion_handle)) {
+			DRM_ERROR(
+				"Unable to allocate ION IOMMU memory handle\n");
+				return -ENOMEM;
 		}
+
+		sg_table = ion_sg_table(kgsl_drm_ion_client,
+				priv->ion_handle);
+		if (IS_ERR_OR_NULL(priv->ion_handle)) {
+			DRM_ERROR(
+			"Unable to get ION sg table\n");
+			goto memerr;
+		}
+
+		priv->memdesc.sg = sg_table->sgl;
+
+		/* Calculate the size of the memdesc from the sglist */
+
+		priv->memdesc.sglen = 0;
+
+		for (s = priv->memdesc.sg; s != NULL; s = sg_next(s)) {
+			priv->memdesc.size += s->length;
+			priv->memdesc.sglen++;
+		}
+
+		result = kgsl_mmu_map(priv->pagetable, &priv->memdesc,
+				GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+		if (result) {
+			DRM_ERROR(
+			"kgsl_mmu_map failed.  result = %d\n", result);
+			goto memerr;
+		}
+
 	} else
 		return -EINVAL;
 
@@ -282,7 +278,15 @@
 	}
 	priv->flags |= DRM_KGSL_GEM_FLAG_MAPPED;
 
+
 	return 0;
+
+memerr:
+	ion_free(kgsl_drm_ion_client,
+		priv->ion_handle);
+	priv->ion_handle = NULL;
+	return -ENOMEM;
+
 }
 
 static void
@@ -293,10 +297,19 @@
 	if (!kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
 		return;
 
-	kgsl_gem_mem_flush(&priv->memdesc,  priv->type,
-			   DRM_KGSL_GEM_CACHE_OP_FROM_DEV);
+	if (priv->memdesc.gpuaddr)
+		kgsl_mmu_unmap(priv->memdesc.pagetable, &priv->memdesc);
 
-	kgsl_sharedmem_free(&priv->memdesc);
+	/* ION will take care of freeing the sg table. */
+	priv->memdesc.sg = NULL;
+	priv->memdesc.sglen = 0;
+
+	if (priv->ion_handle)
+		ion_free(kgsl_drm_ion_client, priv->ion_handle);
+
+	priv->ion_handle = NULL;
+
+	memset(&priv->memdesc, 0, sizeof(priv->memdesc));
 
 	kgsl_mmu_putpagetable(priv->pagetable);
 	priv->pagetable = NULL;
@@ -329,66 +342,10 @@
 kgsl_gem_free_object(struct drm_gem_object *obj)
 {
 	kgsl_gem_free_memory(obj);
-	kgsl_gem_free_mmap_offset(obj);
 	drm_gem_object_release(obj);
 	kfree(obj->driver_private);
 }
 
-static int
-kgsl_gem_create_mmap_offset(struct drm_gem_object *obj)
-{
-	struct drm_device *dev = obj->dev;
-	struct drm_gem_mm *mm = dev->mm_private;
-	struct drm_kgsl_gem_object *priv = obj->driver_private;
-	struct drm_map_list *list;
-	int msize;
-
-	list = &obj->map_list;
-	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
-	if (list->map == NULL) {
-		DRM_ERROR("Unable to allocate drm_map_list\n");
-		return -ENOMEM;
-	}
-
-	msize = obj->size * priv->bufcount;
-
-	list->map->type = _DRM_GEM;
-	list->map->size = msize;
-	list->map->handle = obj;
-
-	/* Allocate a mmap offset */
-	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
-						    msize / PAGE_SIZE,
-						    0, 0);
-
-	if (!list->file_offset_node) {
-		DRM_ERROR("Failed to allocate offset for %d\n", obj->name);
-		kfree(list->map);
-		return -ENOMEM;
-	}
-
-	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
-						  msize / PAGE_SIZE, 0);
-
-	if (!list->file_offset_node) {
-		DRM_ERROR("Unable to create the file_offset_node\n");
-		kfree(list->map);
-		return -ENOMEM;
-	}
-
-	list->hash.key = list->file_offset_node->start;
-	if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
-		DRM_ERROR("Failed to add to map hash\n");
-		drm_mm_put_block(list->file_offset_node);
-		kfree(list->map);
-		return -ENOMEM;
-	}
-
-	priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
-
-	return 0;
-}
-
 int
 kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
 			unsigned long *len)
@@ -435,9 +392,6 @@
 			priv->bufs[priv->active].offset;
 
 		*len = priv->memdesc.size;
-
-		kgsl_gem_mem_flush(&priv->memdesc,
-				   priv->type, DRM_KGSL_GEM_CACHE_OP_TO_DEV);
 	} else {
 		*start = 0;
 		*len = 0;
@@ -468,10 +422,7 @@
 	priv->active = 0;
 	priv->bound = 0;
 
-	/* To preserve backwards compatability, the default memory source
-	   is EBI */
-
-	priv->type = DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI;
+	priv->type = DRM_KGSL_GEM_TYPE_KMEM;
 
 	ret = drm_gem_handle_create(file_priv, obj, handle);
 
@@ -513,8 +464,11 @@
 	}
 
 	ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
-	if (ret)
+	if (ret) {
+		drm_gem_object_release(obj);
+		DRM_ERROR("Unable to initialize GEM object ret = %d\n", ret);
 		return ret;
+	}
 
 	create->handle = handle;
 	return 0;
@@ -587,6 +541,149 @@
 }
 
 int
+kgsl_gem_create_from_ion_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_create_from_ion *args = data;
+	struct drm_gem_object *obj;
+	struct ion_handle *ion_handle;
+	struct drm_kgsl_gem_object *priv;
+	struct sg_table *sg_table;
+	struct scatterlist *s;
+	int ret, handle;
+	unsigned long size;
+
+	ion_handle = ion_import_dma_buf(kgsl_drm_ion_client, args->ion_fd);
+	if (IS_ERR_OR_NULL(ion_handle)) {
+		DRM_ERROR("Unable to import dmabuf.  Error number = %d\n",
+			(int)PTR_ERR(ion_handle));
+		return -EINVAL;
+	}
+
+	ion_handle_get_size(kgsl_drm_ion_client, ion_handle, &size);
+
+	if (size == 0) {
+		ion_free(kgsl_drm_ion_client, ion_handle);
+		DRM_ERROR(
+		"cannot create GEM object from zero size ION buffer\n");
+		return -EINVAL;
+	}
+
+	obj = drm_gem_object_alloc(dev, size);
+
+	if (obj == NULL) {
+		ion_free(kgsl_drm_ion_client, ion_handle);
+		DRM_ERROR("Unable to allocate the GEM object\n");
+		return -ENOMEM;
+	}
+
+	ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
+	if (ret) {
+		ion_free(kgsl_drm_ion_client, ion_handle);
+		drm_gem_object_release(obj);
+		DRM_ERROR("Unable to initialize GEM object ret = %d\n", ret);
+		return ret;
+	}
+
+	priv = obj->driver_private;
+	priv->ion_handle = ion_handle;
+
+	priv->type = DRM_KGSL_GEM_TYPE_KMEM;
+	list_add(&priv->list, &kgsl_mem_list);
+
+	priv->pagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
+
+	priv->memdesc.pagetable = priv->pagetable;
+
+	sg_table = ion_sg_table(kgsl_drm_ion_client,
+		priv->ion_handle);
+	if (IS_ERR_OR_NULL(priv->ion_handle)) {
+		DRM_ERROR("Unable to get ION sg table\n");
+		ion_free(kgsl_drm_ion_client,
+			priv->ion_handle);
+		priv->ion_handle = NULL;
+		kgsl_mmu_putpagetable(priv->pagetable);
+		drm_gem_object_release(obj);
+		kfree(priv);
+		return -ENOMEM;
+	}
+
+	priv->memdesc.sg = sg_table->sgl;
+
+	/* Calculate the size of the memdesc from the sglist */
+
+	priv->memdesc.sglen = 0;
+
+	for (s = priv->memdesc.sg; s != NULL; s = sg_next(s)) {
+		priv->memdesc.size += s->length;
+		priv->memdesc.sglen++;
+	}
+
+	ret = kgsl_mmu_map(priv->pagetable, &priv->memdesc,
+		GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+	if (ret) {
+		DRM_ERROR("kgsl_mmu_map failed.  ret = %d\n", ret);
+		ion_free(kgsl_drm_ion_client,
+			priv->ion_handle);
+		priv->ion_handle = NULL;
+		kgsl_mmu_putpagetable(priv->pagetable);
+		drm_gem_object_release(obj);
+		kfree(priv);
+		return -ENOMEM;
+	}
+
+	priv->bufs[0].offset = 0;
+	priv->bufs[0].gpuaddr = priv->memdesc.gpuaddr;
+	priv->flags |= DRM_KGSL_GEM_FLAG_MAPPED;
+
+	args->handle = handle;
+	return 0;
+}
+
+int
+kgsl_gem_get_ion_fd_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_get_ion_fd *args = data;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+	int ret = 0;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+	if (obj == NULL) {
+		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+		return -EBADF;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	if (TYPE_IS_FD(priv->type))
+		ret = -EINVAL;
+	else if (TYPE_IS_PMEM(priv->type) || TYPE_IS_MEM(priv->type)) {
+		if (priv->ion_handle) {
+			args->ion_fd = ion_share_dma_buf(
+				kgsl_drm_ion_client, priv->ion_handle);
+			if (args->ion_fd < 0) {
+				DRM_ERROR(
+				"Could not share ion buffer. Error = %d\n",
+					args->ion_fd);
+				ret = -EINVAL;
+			}
+		} else {
+			DRM_ERROR("GEM object has no ion memory allocated.\n");
+			ret = -EINVAL;
+		}
+	}
+
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+int
 kgsl_gem_setmemtype_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv)
 {
@@ -685,13 +782,9 @@
 
 	if (ret) {
 		DRM_ERROR("Unable to allocate object memory\n");
-	} else if (!priv->mmap_offset) {
-		ret = kgsl_gem_create_mmap_offset(obj);
-		if (ret)
-			DRM_ERROR("Unable to create a mmap offset\n");
 	}
 
-	args->offset = priv->mmap_offset;
+	args->offset = 0;
 
 	drm_gem_object_unreference(obj);
 	mutex_unlock(&dev->struct_mutex);
@@ -703,33 +796,7 @@
 kgsl_gem_mmap_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv)
 {
-	struct drm_kgsl_gem_mmap *args = data;
-	struct drm_gem_object *obj;
-	unsigned long addr;
-
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
-
-	if (obj == NULL) {
-		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
-		return -EBADF;
-	}
-
-	down_write(&current->mm->mmap_sem);
-
-	addr = do_mmap(obj->filp, 0, args->size,
-		       PROT_READ | PROT_WRITE, MAP_SHARED,
-		       args->offset);
-
-	up_write(&current->mm->mmap_sem);
-
-	mutex_lock(&dev->struct_mutex);
-	drm_gem_object_unreference(obj);
-	mutex_unlock(&dev->struct_mutex);
-
-	if (IS_ERR((void *) addr))
-		return addr;
-
-	args->hostptr = (uint32_t) addr;
+	/* Ion is used for mmap at this time */
 	return 0;
 }
 
@@ -762,18 +829,6 @@
 		return ret;
 	}
 
-	if (priv->mmap_offset == 0) {
-		ret = kgsl_gem_create_mmap_offset(obj);
-		if (ret) {
-			drm_gem_object_unreference(obj);
-			mutex_unlock(&dev->struct_mutex);
-			return ret;
-		}
-	}
-
-	args->offset = priv->mmap_offset;
-	args->phys = priv->memdesc.physaddr;
-
 	drm_gem_object_unreference(obj);
 	mutex_unlock(&dev->struct_mutex);
 
@@ -957,122 +1012,6 @@
 	}
 }
 
-static struct vm_operations_struct kgsl_gem_kmem_vm_ops = {
-	.fault = kgsl_gem_kmem_fault,
-	.open = drm_gem_vm_open,
-	.close = drm_gem_vm_close,
-};
-
-static struct vm_operations_struct kgsl_gem_phys_vm_ops = {
-	.fault = kgsl_gem_phys_fault,
-	.open = drm_gem_vm_open,
-	.close = drm_gem_vm_close,
-};
-
-/* This is a clone of the standard drm_gem_mmap function modified to allow
-   us to properly map KMEM regions as well as the PMEM regions */
-
-int msm_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
-	struct drm_file *priv = filp->private_data;
-	struct drm_device *dev = priv->minor->dev;
-	struct drm_gem_mm *mm = dev->mm_private;
-	struct drm_local_map *map = NULL;
-	struct drm_gem_object *obj;
-	struct drm_hash_item *hash;
-	struct drm_kgsl_gem_object *gpriv;
-	int ret = 0;
-
-	mutex_lock(&dev->struct_mutex);
-
-	if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
-		mutex_unlock(&dev->struct_mutex);
-		return drm_mmap(filp, vma);
-	}
-
-	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
-	if (!map ||
-	    ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
-		ret =  -EPERM;
-		goto out_unlock;
-	}
-
-	/* Check for valid size. */
-	if (map->size < vma->vm_end - vma->vm_start) {
-		ret = -EINVAL;
-		goto out_unlock;
-	}
-
-	obj = map->handle;
-
-	gpriv = obj->driver_private;
-
-	/* VM_PFNMAP is only for memory that doesn't use struct page
-	 * in other words, not "normal" memory.  If you try to use it
-	 * with "normal" memory then the mappings don't get flushed. */
-
-	if (TYPE_IS_MEM(gpriv->type)) {
-		vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
-		vma->vm_ops = &kgsl_gem_kmem_vm_ops;
-	} else {
-		vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP |
-			VM_DONTEXPAND;
-		vma->vm_ops = &kgsl_gem_phys_vm_ops;
-	}
-
-	vma->vm_private_data = map->handle;
-
-
-	/* Take care of requested caching policy */
-	if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM ||
-	    gpriv->type & DRM_KGSL_GEM_CACHE_MASK) {
-		if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACKWA)
-			vma->vm_page_prot =
-			pgprot_writebackwacache(vma->vm_page_prot);
-		else if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACK)
-				vma->vm_page_prot =
-				pgprot_writebackcache(vma->vm_page_prot);
-		else if (gpriv->type & DRM_KGSL_GEM_CACHE_WTHROUGH)
-				vma->vm_page_prot =
-				pgprot_writethroughcache(vma->vm_page_prot);
-		else
-			vma->vm_page_prot =
-			pgprot_writecombine(vma->vm_page_prot);
-	} else {
-		if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE)
-			vma->vm_page_prot =
-			pgprot_noncached(vma->vm_page_prot);
-		else
-			/* default pmem is WC */
-			vma->vm_page_prot =
-			pgprot_writecombine(vma->vm_page_prot);
-	}
-
-	/* flush out existing KMEM cached mappings if new ones are
-	 * of uncached type */
-	if (IS_MEM_UNCACHED(gpriv->type))
-		kgsl_cache_range_op(&gpriv->memdesc,
-				    KGSL_CACHE_OP_FLUSH);
-
-	/* Add the other memory types here */
-
-	/* Take a ref for this mapping of the object, so that the fault
-	 * handler can dereference the mmap offset's pointer to the object.
-	 * This reference is cleaned up by the corresponding vm_close
-	 * (which should happen whether the vma was created by this call, or
-	 * by a vm_open due to mremap or partial unmap or whatever).
-	 */
-	drm_gem_object_reference(obj);
-
-	vma->vm_file = filp;	/* Needed for drm_vm_open() */
-	drm_vm_open_locked(vma);
-
-out_unlock:
-	mutex_unlock(&dev->struct_mutex);
-
-	return ret;
-}
-
 void
 cleanup_fence(struct drm_kgsl_gem_object_fence *fence, int check_waiting)
 {
@@ -1434,6 +1373,9 @@
 	DRM_IOCTL_DEF_DRV(KGSL_GEM_ALLOC, kgsl_gem_alloc_ioctl, 0),
 	DRM_IOCTL_DEF_DRV(KGSL_GEM_MMAP, kgsl_gem_mmap_ioctl, 0),
 	DRM_IOCTL_DEF_DRV(KGSL_GEM_GET_BUFINFO, kgsl_gem_get_bufinfo_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_GET_ION_FD, kgsl_gem_get_ion_fd_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE_FROM_ION,
+		kgsl_gem_create_from_ion_ioctl, 0),
 	DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_BUFCOUNT,
 		      kgsl_gem_set_bufcount_ioctl, 0),
 	DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_ACTIVE, kgsl_gem_set_active_ioctl, 0),
@@ -1447,28 +1389,22 @@
 		      DRM_MASTER),
 };
 
+static const struct file_operations kgsl_drm_driver_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_open,
+	.release = drm_release,
+	.unlocked_ioctl = drm_ioctl,
+	.mmap = drm_gem_mmap,
+	.poll = drm_poll,
+	.fasync = drm_fasync,
+};
+
 static struct drm_driver driver = {
 	.driver_features = DRIVER_GEM,
-	.load = kgsl_drm_load,
-	.unload = kgsl_drm_unload,
-	.preclose = kgsl_drm_preclose,
-	.suspend = kgsl_drm_suspend,
-	.resume = kgsl_drm_resume,
-	.reclaim_buffers = drm_core_reclaim_buffers,
 	.gem_init_object = kgsl_gem_init_object,
 	.gem_free_object = kgsl_gem_free_object,
 	.ioctls = kgsl_drm_ioctls,
-
-	.fops = {
-		 .owner = THIS_MODULE,
-		 .open = drm_open,
-		 .release = drm_release,
-		 .unlocked_ioctl = drm_ioctl,
-		 .mmap = msm_drm_gem_mmap,
-		 .poll = drm_poll,
-		 .fasync = drm_fasync,
-		 },
-
+	.fops = &kgsl_drm_driver_fops,
 	.name = DRIVER_NAME,
 	.desc = DRIVER_DESC,
 	.date = DRIVER_DATE,
@@ -1497,11 +1433,24 @@
 		gem_buf_fence[i].fence_id = ENTRY_EMPTY;
 	}
 
+	/* Create ION Client */
+	kgsl_drm_ion_client = msm_ion_client_create(
+			0xffffffff, "kgsl_drm");
+	if (!kgsl_drm_ion_client) {
+		DRM_ERROR("Unable to create ION client\n");
+		return -ENOMEM;
+	}
+
 	return drm_platform_init(&driver, dev);
 }
 
 void kgsl_drm_exit(void)
 {
 	kgsl_drm_inited = DRM_KGSL_NOT_INITED;
+
+	if (kgsl_drm_ion_client)
+		ion_client_destroy(kgsl_drm_ion_client);
+	kgsl_drm_ion_client = NULL;
+
 	drm_platform_exit(&driver, driver.kdriver.platform_device);
 }
diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c
index 6798eed..9e9c0da 100644
--- a/drivers/gpu/msm/kgsl_events.c
+++ b/drivers/gpu/msm/kgsl_events.c
@@ -149,6 +149,7 @@
 		 * Send the current timestamp so the event knows how far the
 		 * system got before the event was canceled
 		 */
+		list_del(&event->list);
 
 		trace_kgsl_fire_event(id, cur, jiffies - event->created);
 
@@ -156,7 +157,6 @@
 			event->func(device, event->priv, id, cur);
 
 		kgsl_context_put(context);
-		list_del(&event->list);
 		kfree(event);
 
 		kgsl_active_count_put(device);
@@ -192,6 +192,7 @@
 		 * the callback knows how far the GPU made it before things went
 		 * explosion
 		 */
+		list_del(&event->list);
 
 		trace_kgsl_fire_event(KGSL_MEMSTORE_GLOBAL, cur,
 			jiffies - event->created);
@@ -202,8 +203,6 @@
 
 		if (event->context)
 			kgsl_context_put(event->context);
-
-		list_del(&event->list);
 		kfree(event);
 
 		kgsl_active_count_put(device);
@@ -229,6 +228,7 @@
 		 * confused if they don't bother comparing the current timetamp
 		 * to the timestamp they wanted
 		 */
+		list_del(&event->list);
 
 		trace_kgsl_fire_event(id, event->timestamp,
 			jiffies - event->created);
@@ -238,8 +238,6 @@
 
 		if (event->context)
 			kgsl_context_put(event->context);
-
-		list_del(&event->list);
 		kfree(event);
 
 		kgsl_active_count_put(device);
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
index 8f28505..5cc0dff 100644
--- a/drivers/gpu/msm/kgsl_gpummu.c
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,9 +19,11 @@
 
 #include "kgsl.h"
 #include "kgsl_mmu.h"
+#include "kgsl_gpummu.h"
 #include "kgsl_device.h"
 #include "kgsl_sharedmem.h"
 #include "kgsl_trace.h"
+#include "adreno.h"
 
 #define KGSL_PAGETABLE_SIZE \
 	ALIGN(KGSL_PAGETABLE_ENTRIES(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE) * \
@@ -161,7 +163,7 @@
 }
 
 static void *
-_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr)
+_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, phys_addr_t *physaddr)
 {
 	struct kgsl_ptpool_chunk *chunk;
 
@@ -227,7 +229,7 @@
  */
 
 static void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool,
-				unsigned int *physaddr)
+				phys_addr_t *physaddr)
 {
 	void *addr = NULL;
 	int ret;
@@ -363,10 +365,9 @@
 	return gpummu_pt && pt_base && (gpummu_pt->base.gpuaddr == pt_base);
 }
 
-void kgsl_gpummu_destroy_pagetable(void *mmu_specific_pt)
+void kgsl_gpummu_destroy_pagetable(struct kgsl_pagetable *pt)
 {
-	struct kgsl_gpummu_pt *gpummu_pt = (struct kgsl_gpummu_pt *)
-						mmu_specific_pt;
+	struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
 	kgsl_ptpool_free((struct kgsl_ptpool *)kgsl_driver.ptpool,
 				gpummu_pt->base.hostptr);
 
@@ -403,11 +404,22 @@
 {
 	unsigned int reg;
 	unsigned int ptbase;
+	struct kgsl_device *device;
+	struct adreno_device *adreno_dev;
+	unsigned int no_page_fault_log = 0;
 
-	kgsl_regread(mmu->device, MH_MMU_PAGE_FAULT, &reg);
-	kgsl_regread(mmu->device, MH_MMU_PT_BASE, &ptbase);
+	device = mmu->device;
+	adreno_dev = ADRENO_DEVICE(device);
 
-	KGSL_MEM_CRIT(mmu->device,
+	kgsl_regread(device, MH_MMU_PAGE_FAULT, &reg);
+	kgsl_regread(device, MH_MMU_PT_BASE, &ptbase);
+
+
+	if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE)
+		no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, reg);
+
+	if (!no_page_fault_log)
+		KGSL_MEM_CRIT(mmu->device,
 			"mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
 			reg & ~(PAGE_SIZE - 1),
 			kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase),
@@ -516,6 +528,11 @@
 	 */
 	int status = 0;
 
+	mmu->pt_base = KGSL_PAGETABLE_BASE;
+	mmu->pt_size = CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
+	mmu->pt_per_process = KGSL_MMU_USE_PER_PROCESS_PT;
+	mmu->use_cpu_map = false;
+
 	/* sub-client MMU lookups require address translation */
 	if ((mmu->config & ~0x1) > 0) {
 		/*make sure virtual address range is a multiple of 64Kb */
@@ -572,7 +589,7 @@
 
 	if (mmu->defaultpagetable == NULL)
 		mmu->defaultpagetable =
-			kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
+			kgsl_mmu_getpagetable(mmu, KGSL_MMU_GLOBAL_PT);
 
 	/* Return error if the default pagetable doesn't exist */
 	if (mmu->defaultpagetable == NULL)
@@ -592,14 +609,14 @@
 }
 
 static int
-kgsl_gpummu_unmap(void *mmu_specific_pt,
+kgsl_gpummu_unmap(struct kgsl_pagetable *pt,
 		struct kgsl_memdesc *memdesc,
 		unsigned int *tlb_flags)
 {
 	unsigned int numpages;
 	unsigned int pte, ptefirst, ptelast, superpte;
 	unsigned int range = kgsl_sg_size(memdesc->sg, memdesc->sglen);
-	struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
+	struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
 
 	/* All GPU addresses as assigned are page aligned, but some
 	   functions purturb the gpuaddr with an offset, so apply the
@@ -641,13 +658,13 @@
 GSL_TLBFLUSH_FILTER_ISDIRTY((_p) / GSL_PT_SUPER_PTE))
 
 static int
-kgsl_gpummu_map(void *mmu_specific_pt,
+kgsl_gpummu_map(struct kgsl_pagetable *pt,
 		struct kgsl_memdesc *memdesc,
 		unsigned int protflags,
 		unsigned int *tlb_flags)
 {
 	unsigned int pte;
-	struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
+	struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
 	struct scatterlist *s;
 	int flushtlb = 0;
 	int i;
diff --git a/drivers/gpu/msm/kgsl_gpummu.h b/drivers/gpu/msm/kgsl_gpummu.h
index 99e7d5f..1753aff 100644
--- a/drivers/gpu/msm/kgsl_gpummu.h
+++ b/drivers/gpu/msm/kgsl_gpummu.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -57,7 +57,7 @@
 	int dynamic;
 
 	void *data;
-	unsigned int phys;
+	phys_addr_t phys;
 
 	unsigned long *bitmap;
 	struct list_head list;
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index f2393e4..739fcff 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -11,6 +11,7 @@
  *
  */
 #include <linux/types.h>
+#include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/spinlock.h>
 #include <linux/genalloc.h>
@@ -20,6 +21,7 @@
 #include <mach/socinfo.h>
 #include <mach/msm_iomap.h>
 #include <mach/board.h>
+#include <mach/iommu_domains.h>
 #include <stddef.h>
 
 #include "kgsl.h"
@@ -31,24 +33,33 @@
 #include "adreno.h"
 #include "kgsl_trace.h"
 #include "z180.h"
+#include "kgsl_cffdump.h"
 
 
-static struct kgsl_iommu_register_list kgsl_iommuv1_reg[KGSL_IOMMU_REG_MAX] = {
+static struct kgsl_iommu_register_list kgsl_iommuv0_reg[KGSL_IOMMU_REG_MAX] = {
 	{ 0, 0, 0 },				/* GLOBAL_BASE */
 	{ 0x10, 0x0003FFFF, 14 },		/* TTBR0 */
 	{ 0x14, 0x0003FFFF, 14 },		/* TTBR1 */
 	{ 0x20, 0, 0 },				/* FSR */
 	{ 0x800, 0, 0 },			/* TLBIALL */
 	{ 0x820, 0, 0 },			/* RESUME */
+	{ 0x03C, 0, 0 },			/* TLBLKCR */
+	{ 0x818, 0, 0 },			/* V2PUR */
+	{ 0x2C, 0, 0 },                         /* FSYNR0 */
+	{ 0x2C, 0, 0 },                         /* FSYNR0 */
 };
 
-static struct kgsl_iommu_register_list kgsl_iommuv2_reg[KGSL_IOMMU_REG_MAX] = {
+static struct kgsl_iommu_register_list kgsl_iommuv1_reg[KGSL_IOMMU_REG_MAX] = {
 	{ 0, 0, 0 },				/* GLOBAL_BASE */
 	{ 0x20, 0x00FFFFFF, 14 },		/* TTBR0 */
 	{ 0x28, 0x00FFFFFF, 14 },		/* TTBR1 */
 	{ 0x58, 0, 0 },				/* FSR */
 	{ 0x618, 0, 0 },			/* TLBIALL */
-	{ 0x008, 0, 0 }				/* RESUME */
+	{ 0x008, 0, 0 },			/* RESUME */
+	{ 0, 0, 0 },				/* TLBLKCR */
+	{ 0, 0, 0 },				/* V2PUR */
+	{ 0x68, 0, 0 },				/* FSYNR0 */
+	{ 0x6C, 0, 0 }				/* FSYNR1 */
 };
 
 struct remote_iommu_petersons_spinlock kgsl_iommu_sync_lock_vars;
@@ -100,8 +111,172 @@
 	return NULL;
 }
 
+/* These functions help find the nearest allocated memory entries on either side
+ * of a faulting address. If we know the nearby allocations memory we can
+ * get a better determination of what we think should have been located in the
+ * faulting region
+ */
+
+/*
+ * A local structure to make it easy to store the interesting bits for the
+ * memory entries on either side of the faulting address
+ */
+
+struct _mem_entry {
+	unsigned int gpuaddr;
+	unsigned int size;
+	unsigned int flags;
+	unsigned int priv;
+	pid_t pid;
+};
+
+/*
+ * Find the closest alloated memory block with an smaller GPU address then the
+ * given address
+ */
+
+static void _prev_entry(struct kgsl_process_private *priv,
+	unsigned int faultaddr, struct _mem_entry *ret)
+{
+	struct rb_node *node;
+	struct kgsl_mem_entry *entry;
+
+	for (node = rb_first(&priv->mem_rb); node; ) {
+		entry = rb_entry(node, struct kgsl_mem_entry, node);
+
+		if (entry->memdesc.gpuaddr > faultaddr)
+			break;
+
+		/*
+		 * If this is closer to the faulting address, then copy
+		 * the entry
+		 */
+
+		if (entry->memdesc.gpuaddr > ret->gpuaddr) {
+			ret->gpuaddr = entry->memdesc.gpuaddr;
+			ret->size = entry->memdesc.size;
+			ret->flags = entry->memdesc.flags;
+			ret->priv = entry->memdesc.priv;
+			ret->pid = priv->pid;
+		}
+
+		node = rb_next(&entry->node);
+	}
+}
+
+/*
+ * Find the closest alloated memory block with a greater starting GPU address
+ * then the given address
+ */
+
+static void _next_entry(struct kgsl_process_private *priv,
+	unsigned int faultaddr, struct _mem_entry *ret)
+{
+	struct rb_node *node;
+	struct kgsl_mem_entry *entry;
+
+	for (node = rb_last(&priv->mem_rb); node; ) {
+		entry = rb_entry(node, struct kgsl_mem_entry, node);
+
+		if (entry->memdesc.gpuaddr < faultaddr)
+			break;
+
+		/*
+		 * If this is closer to the faulting address, then copy
+		 * the entry
+		 */
+
+		if (entry->memdesc.gpuaddr < ret->gpuaddr) {
+			ret->gpuaddr = entry->memdesc.gpuaddr;
+			ret->size = entry->memdesc.size;
+			ret->flags = entry->memdesc.flags;
+			ret->priv = entry->memdesc.priv;
+			ret->pid = priv->pid;
+		}
+
+		node = rb_prev(&entry->node);
+	}
+}
+
+static void _find_mem_entries(struct kgsl_mmu *mmu, unsigned int faultaddr,
+	unsigned int ptbase, struct _mem_entry *preventry,
+	struct _mem_entry *nextentry)
+{
+	struct kgsl_process_private *private;
+	int id = kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase);
+
+	memset(preventry, 0, sizeof(*preventry));
+	memset(nextentry, 0, sizeof(*nextentry));
+
+	/* Set the maximum possible size as an initial value */
+	nextentry->gpuaddr = 0xFFFFFFFF;
+
+	mutex_lock(&kgsl_driver.process_mutex);
+
+	list_for_each_entry(private, &kgsl_driver.process_list, list) {
+
+		if (private->pagetable->name != id)
+			continue;
+
+		spin_lock(&private->mem_lock);
+		_prev_entry(private, faultaddr, preventry);
+		_next_entry(private, faultaddr, nextentry);
+		spin_unlock(&private->mem_lock);
+	}
+
+	mutex_unlock(&kgsl_driver.process_mutex);
+}
+
+static void _print_entry(struct kgsl_device *device, struct _mem_entry *entry)
+{
+	char name[32];
+	memset(name, 0, sizeof(name));
+
+	kgsl_get_memory_usage(name, sizeof(name) - 1, entry->flags);
+
+	KGSL_LOG_DUMP(device,
+		"[%8.8X - %8.8X] %s (pid = %d) (%s)\n",
+		entry->gpuaddr,
+		entry->gpuaddr + entry->size,
+		entry->priv & KGSL_MEMDESC_GUARD_PAGE ? "(+guard)" : "",
+		entry->pid, name);
+}
+
+static void _check_if_freed(struct kgsl_iommu_device *iommu_dev,
+	unsigned long addr, unsigned int pid)
+{
+	void *base = kgsl_driver.memfree_hist.base_hist_rb;
+	struct kgsl_memfree_hist_elem *wptr;
+	struct kgsl_memfree_hist_elem *p;
+
+	mutex_lock(&kgsl_driver.memfree_hist_mutex);
+	wptr = kgsl_driver.memfree_hist.wptr;
+	p = wptr;
+	for (;;) {
+		if (p->size && p->pid == pid)
+			if (addr >= p->gpuaddr &&
+				addr < (p->gpuaddr + p->size)) {
+
+				KGSL_LOG_DUMP(iommu_dev->kgsldev,
+					"---- premature free ----\n");
+				KGSL_LOG_DUMP(iommu_dev->kgsldev,
+					"[%8.8X-%8.8X] was already freed by pid %d\n",
+					p->gpuaddr,
+					p->gpuaddr + p->size,
+					p->pid);
+			}
+		p++;
+		if ((void *)p >= base + kgsl_driver.memfree_hist.size)
+			p = (struct kgsl_memfree_hist_elem *) base;
+
+		if (p == kgsl_driver.memfree_hist.wptr)
+			break;
+	}
+	mutex_unlock(&kgsl_driver.memfree_hist_mutex);
+}
+
 static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
-	struct device *dev, unsigned long addr, int flags)
+	struct device *dev, unsigned long addr, int flags, void *token)
 {
 	int ret = 0;
 	struct kgsl_mmu *mmu;
@@ -109,9 +284,17 @@
 	struct kgsl_iommu_unit *iommu_unit;
 	struct kgsl_iommu_device *iommu_dev;
 	unsigned int ptbase, fsr;
+	unsigned int pid;
+	struct _mem_entry prev, next;
+	unsigned int fsynr0, fsynr1;
+	int write;
 	struct kgsl_device *device;
 	struct adreno_device *adreno_dev;
 	unsigned int no_page_fault_log = 0;
+	unsigned int curr_context_id = 0;
+	unsigned int curr_global_ts = 0;
+	static struct adreno_context *curr_context;
+	static struct kgsl_context *context;
 
 	ret = get_iommu_unit(dev, &mmu, &iommu_unit);
 	if (ret)
@@ -131,6 +314,25 @@
 
 	fsr = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
 		iommu_dev->ctx_id, FSR);
+	fsynr0 = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
+		iommu_dev->ctx_id, FSYNR0);
+	fsynr1 = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
+		iommu_dev->ctx_id, FSYNR1);
+
+	if (msm_soc_version_supports_iommu_v0())
+		write = ((fsynr1 & (KGSL_IOMMU_FSYNR1_AWRITE_MASK <<
+			KGSL_IOMMU_FSYNR1_AWRITE_SHIFT)) ? 1 : 0);
+	else
+		write = ((fsynr0 & (KGSL_IOMMU_V1_FSYNR0_WNR_MASK <<
+			KGSL_IOMMU_V1_FSYNR0_WNR_SHIFT)) ? 1 : 0);
+
+	pid = kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase);
+	KGSL_MEM_CRIT(iommu_dev->kgsldev,
+		"GPU PAGE FAULT: addr = %lX pid = %d\n", addr, pid);
+	KGSL_MEM_CRIT(iommu_dev->kgsldev,
+		"context = %d FSR = %X FSYNR0 = %X FSYNR1 = %X(%s fault)\n",
+		iommu_dev->ctx_id, fsr, fsynr0, fsynr1,
+		write ? "write" : "read");
 
 	if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE)
 		no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, addr);
@@ -141,13 +343,49 @@
 			addr, kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase));
 		KGSL_MEM_CRIT(iommu_dev->kgsldev, "context = %d FSR = %X\n",
 			iommu_dev->ctx_id, fsr);
+
+		_check_if_freed(iommu_dev, addr, pid);
+
+		KGSL_LOG_DUMP(iommu_dev->kgsldev, "---- nearby memory ----\n");
+
+		_find_mem_entries(mmu, addr, ptbase, &prev, &next);
+
+		if (prev.gpuaddr)
+			_print_entry(iommu_dev->kgsldev, &prev);
+		else
+			KGSL_LOG_DUMP(iommu_dev->kgsldev, "*EMPTY*\n");
+
+		KGSL_LOG_DUMP(iommu_dev->kgsldev, " <- fault @ %8.8lX\n", addr);
+
+		if (next.gpuaddr != 0xFFFFFFFF)
+			_print_entry(iommu_dev->kgsldev, &next);
+		else
+			KGSL_LOG_DUMP(iommu_dev->kgsldev, "*EMPTY*\n");
+
 	}
 
 	mmu->fault = 1;
 	iommu_dev->fault = 1;
 
+	kgsl_sharedmem_readl(&device->memstore, &curr_context_id,
+		KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context));
+	context = idr_find(&device->context_idr, curr_context_id);
+	if (context != NULL)
+			curr_context = context->devctxt;
+
+	kgsl_sharedmem_readl(&device->memstore, &curr_global_ts,
+		KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, eoptimestamp));
+
+	/*
+	 * Store pagefault's timestamp in adreno context,
+	 * this information will be used in GFT
+	 */
+	curr_context->pagefault = 1;
+	curr_context->pagefault_ts = curr_global_ts;
+
 	trace_kgsl_mmu_pagefault(iommu_dev->kgsldev, addr,
-			kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase), 0);
+			kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase),
+			write ? "write" : "read");
 
 	/*
 	 * We do not want the h/w to resume fetching data from an iommu unit
@@ -367,9 +605,9 @@
  *
  * Return - void
  */
-static void kgsl_iommu_destroy_pagetable(void *mmu_specific_pt)
+static void kgsl_iommu_destroy_pagetable(struct kgsl_pagetable *pt)
 {
-	struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;
+	struct kgsl_iommu_pt *iommu_pt = pt->priv;
 	if (iommu_pt->domain)
 		iommu_domain_free(iommu_pt->domain);
 	kfree(iommu_pt);
@@ -384,28 +622,39 @@
  */
 void *kgsl_iommu_create_pagetable(void)
 {
+	int domain_num;
 	struct kgsl_iommu_pt *iommu_pt;
 
+	struct msm_iova_partition kgsl_partition = {
+		.start = 0,
+		.size = 0xFFFFFFFF,
+	};
+	struct msm_iova_layout kgsl_layout = {
+		.partitions = &kgsl_partition,
+		.npartitions = 1,
+		.client_name = "kgsl",
+		.domain_flags = 0,
+	};
+
 	iommu_pt = kzalloc(sizeof(struct kgsl_iommu_pt), GFP_KERNEL);
 	if (!iommu_pt) {
 		KGSL_CORE_ERR("kzalloc(%d) failed\n",
 				sizeof(struct kgsl_iommu_pt));
 		return NULL;
 	}
-	/* L2 redirect is not stable on IOMMU v2 */
-	if (msm_soc_version_supports_iommu_v1())
-		iommu_pt->domain = iommu_domain_alloc(&platform_bus_type,
-					MSM_IOMMU_DOMAIN_PT_CACHEABLE);
-	else
-		iommu_pt->domain = iommu_domain_alloc(&platform_bus_type,
-					0);
-	if (!iommu_pt->domain) {
+	/* L2 redirect is not stable on IOMMU v1 */
+	if (msm_soc_version_supports_iommu_v0())
+		kgsl_layout.domain_flags = MSM_IOMMU_DOMAIN_PT_CACHEABLE;
+
+	domain_num = msm_register_domain(&kgsl_layout);
+	if (domain_num >= 0) {
+		iommu_pt->domain = msm_get_iommu_domain(domain_num);
+		iommu_set_fault_handler(iommu_pt->domain,
+			kgsl_iommu_fault_handler, NULL);
+	} else {
 		KGSL_CORE_ERR("Failed to create iommu domain\n");
 		kfree(iommu_pt);
 		return NULL;
-	} else {
-		iommu_set_fault_handler(iommu_pt->domain,
-			kgsl_iommu_fault_handler);
 	}
 
 	return iommu_pt;
@@ -523,17 +772,23 @@
 {
 	struct kgsl_iommu *iommu = mmu->priv;
 	struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[unit_id];
-	int i;
+	int i, j;
+	int found_ctx;
 
-	if (data->iommu_ctx_count > KGSL_IOMMU_MAX_DEVS_PER_UNIT) {
-		KGSL_CORE_ERR("Too many iommu devices defined for an "
-				"IOMMU unit\n");
-		return -EINVAL;
-	}
-
-	for (i = 0; i < data->iommu_ctx_count; i++) {
-		if (!data->iommu_ctxs[i].iommu_ctx_name)
-			continue;
+	for (j = 0; j < KGSL_IOMMU_MAX_DEVS_PER_UNIT; j++) {
+		found_ctx = 0;
+		for (i = 0; i < data->iommu_ctx_count; i++) {
+			if (j == data->iommu_ctxs[i].ctx_id) {
+				found_ctx = 1;
+				break;
+			}
+		}
+		if (!found_ctx)
+			break;
+		if (!data->iommu_ctxs[i].iommu_ctx_name) {
+			KGSL_CORE_ERR("Context name invalid\n");
+			return -EINVAL;
+		}
 
 		iommu_unit->dev[iommu_unit->dev_count].dev =
 			msm_iommu_get_ctx(data->iommu_ctxs[i].iommu_ctx_name);
@@ -542,12 +797,6 @@
 			"device %s\n", data->iommu_ctxs[i].iommu_ctx_name);
 			return -EINVAL;
 		}
-		if (KGSL_IOMMU_CONTEXT_USER != data->iommu_ctxs[i].ctx_id &&
-			KGSL_IOMMU_CONTEXT_PRIV != data->iommu_ctxs[i].ctx_id) {
-			KGSL_CORE_ERR("Invalid context ID defined: %d\n",
-					data->iommu_ctxs[i].ctx_id);
-			return -EINVAL;
-		}
 		iommu_unit->dev[iommu_unit->dev_count].ctx_id =
 						data->iommu_ctxs[i].ctx_id;
 		iommu_unit->dev[iommu_unit->dev_count].kgsldev = mmu->device;
@@ -559,6 +808,51 @@
 
 		iommu_unit->dev_count++;
 	}
+	if (!j) {
+		KGSL_CORE_ERR("No ctxts initialized, user ctxt absent\n ");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * kgsl_iommu_start_sync_lock - Initialize some variables during MMU start up
+ * for GPU CPU synchronization
+ * @mmu - Pointer to mmu device
+ *
+ * Return - 0 on success else error code
+ */
+static int kgsl_iommu_start_sync_lock(struct kgsl_mmu *mmu)
+{
+	struct kgsl_iommu *iommu = mmu->priv;
+	uint32_t lock_gpu_addr = 0;
+
+	if (KGSL_DEVICE_3D0 != mmu->device->id ||
+		!msm_soc_version_supports_iommu_v0() ||
+		!kgsl_mmu_is_perprocess(mmu) ||
+		iommu->sync_lock_vars)
+		return 0;
+
+	if (!(mmu->flags & KGSL_MMU_FLAGS_IOMMU_SYNC)) {
+		KGSL_DRV_ERR(mmu->device,
+		"The GPU microcode does not support IOMMUv1 sync opcodes\n");
+		return -ENXIO;
+	}
+	/* Store Lock variables GPU address  */
+	lock_gpu_addr = (iommu->sync_lock_desc.gpuaddr +
+			iommu->sync_lock_offset);
+
+	kgsl_iommu_sync_lock_vars.flag[PROC_APPS] = (lock_gpu_addr +
+		(offsetof(struct remote_iommu_petersons_spinlock,
+			flag[PROC_APPS])));
+	kgsl_iommu_sync_lock_vars.flag[PROC_GPU] = (lock_gpu_addr +
+		(offsetof(struct remote_iommu_petersons_spinlock,
+			flag[PROC_GPU])));
+	kgsl_iommu_sync_lock_vars.turn = (lock_gpu_addr +
+		(offsetof(struct remote_iommu_petersons_spinlock, turn)));
+
+	iommu->sync_lock_vars = &kgsl_iommu_sync_lock_vars;
 
 	return 0;
 }
@@ -571,21 +865,30 @@
  */
 static int kgsl_iommu_init_sync_lock(struct kgsl_mmu *mmu)
 {
-	struct kgsl_iommu *iommu = mmu->device->mmu.priv;
+	struct kgsl_iommu *iommu = mmu->priv;
 	int status = 0;
-	struct kgsl_pagetable *pagetable = NULL;
-	uint32_t lock_gpu_addr = 0;
 	uint32_t lock_phy_addr = 0;
 	uint32_t page_offset = 0;
 
-	iommu->sync_lock_initialized = 0;
+	if (!msm_soc_version_supports_iommu_v0() ||
+		!kgsl_mmu_is_perprocess(mmu))
+		return status;
 
-	if (!(mmu->flags & KGSL_MMU_FLAGS_IOMMU_SYNC)) {
-		KGSL_DRV_ERR(mmu->device,
-		"The GPU microcode does not support IOMMUv1 sync opcodes\n");
-		return -ENXIO;
+	/*
+	 * For 2D devices cpu side sync lock is required. For 3D device,
+	 * since we only have a single 3D core and we always ensure that
+	 * 3D core is idle while writing to IOMMU register using CPU this
+	 * lock is not required
+	 */
+	if (KGSL_DEVICE_2D0 == mmu->device->id ||
+		KGSL_DEVICE_2D1 == mmu->device->id) {
+		return status;
 	}
 
+	/* Return if already initialized */
+	if (iommu->sync_lock_initialized)
+		return status;
+
 	/* Get the physical address of the Lock variables */
 	lock_phy_addr = (msm_iommu_lock_initialize()
 			- MSM_SHARED_RAM_BASE + msm_shared_ram_phys);
@@ -600,6 +903,7 @@
 	page_offset = (lock_phy_addr & (PAGE_SIZE - 1));
 	lock_phy_addr = (lock_phy_addr & ~(PAGE_SIZE - 1));
 	iommu->sync_lock_desc.physaddr = (unsigned int)lock_phy_addr;
+	iommu->sync_lock_offset = page_offset;
 
 	iommu->sync_lock_desc.size =
 				PAGE_ALIGN(sizeof(kgsl_iommu_sync_lock_vars));
@@ -610,35 +914,6 @@
 	if (status)
 		return status;
 
-	/* Map Lock variables to GPU pagetable */
-	iommu->sync_lock_desc.priv |= KGSL_MEMDESC_GLOBAL;
-
-	pagetable = mmu->priv_bank_table ? mmu->priv_bank_table :
-				mmu->defaultpagetable;
-
-	status = kgsl_mmu_map(pagetable, &iommu->sync_lock_desc,
-				     GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
-
-	if (status) {
-		kgsl_mmu_unmap(pagetable, &iommu->sync_lock_desc);
-		iommu->sync_lock_desc.priv &= ~KGSL_MEMDESC_GLOBAL;
-		return status;
-	}
-
-	/* Store Lock variables GPU address  */
-	lock_gpu_addr = (iommu->sync_lock_desc.gpuaddr + page_offset);
-
-	kgsl_iommu_sync_lock_vars.flag[PROC_APPS] = (lock_gpu_addr +
-		(offsetof(struct remote_iommu_petersons_spinlock,
-			flag[PROC_APPS])));
-	kgsl_iommu_sync_lock_vars.flag[PROC_GPU] = (lock_gpu_addr +
-		(offsetof(struct remote_iommu_petersons_spinlock,
-			flag[PROC_GPU])));
-	kgsl_iommu_sync_lock_vars.turn = (lock_gpu_addr +
-		(offsetof(struct remote_iommu_petersons_spinlock, turn)));
-
-	iommu->sync_lock_vars = &kgsl_iommu_sync_lock_vars;
-
 	/* Flag Sync Lock is Initialized  */
 	iommu->sync_lock_initialized = 1;
 
@@ -897,6 +1172,75 @@
 	}
 }
 
+/*
+ * kgsl_iommu_setup_regs - map iommu registers into a pagetable
+ * @mmu: Pointer to mmu structure
+ * @pt: the pagetable
+ *
+ * To do pagetable switches from the GPU command stream, the IOMMU
+ * registers need to be mapped into the GPU's pagetable. This function
+ * is used differently on different targets. On 8960, the registers
+ * are mapped into every pagetable during kgsl_setup_pt(). On
+ * all other targets, the registers are mapped only into the second
+ * context bank.
+ *
+ * Return - 0 on success else error code
+ */
+static int kgsl_iommu_setup_regs(struct kgsl_mmu *mmu,
+				    struct kgsl_pagetable *pt)
+{
+	int status;
+	int i = 0;
+	struct kgsl_iommu *iommu = mmu->priv;
+
+	if (!msm_soc_version_supports_iommu_v0())
+		return 0;
+
+	for (i = 0; i < iommu->unit_count; i++) {
+		status = kgsl_mmu_map_global(pt,
+				&(iommu->iommu_units[i].reg_map));
+		if (status)
+			goto err;
+	}
+
+	/* Map Lock variables to GPU pagetable */
+	if (iommu->sync_lock_initialized) {
+		status = kgsl_mmu_map_global(pt, &iommu->sync_lock_desc);
+		if (status)
+			goto err;
+	}
+
+	return 0;
+err:
+	for (i--; i >= 0; i--)
+		kgsl_mmu_unmap(pt,
+				&(iommu->iommu_units[i].reg_map));
+
+	return status;
+}
+
+/*
+ * kgsl_iommu_cleanup_regs - unmap iommu registers from a pagetable
+ * @mmu: Pointer to mmu structure
+ * @pt: the pagetable
+ *
+ * Removes mappings created by kgsl_iommu_setup_regs().
+ *
+ * Return - 0 on success else error code
+ */
+static void kgsl_iommu_cleanup_regs(struct kgsl_mmu *mmu,
+					struct kgsl_pagetable *pt)
+{
+	struct kgsl_iommu *iommu = mmu->priv;
+	int i;
+	for (i = 0; i < iommu->unit_count; i++)
+		kgsl_mmu_unmap(pt, &(iommu->iommu_units[i].reg_map));
+
+	if (iommu->sync_lock_desc.gpuaddr)
+		kgsl_mmu_unmap(pt, &iommu->sync_lock_desc);
+}
+
+
 static int kgsl_iommu_init(struct kgsl_mmu *mmu)
 {
 	/*
@@ -921,16 +1265,45 @@
 	status = kgsl_set_register_map(mmu);
 	if (status)
 		goto done;
+	status = kgsl_iommu_init_sync_lock(mmu);
+	if (status)
+		goto done;
 
-	iommu->iommu_reg_list = kgsl_iommuv1_reg;
-	iommu->ctx_offset = KGSL_IOMMU_CTX_OFFSET_V1;
+	/* We presently do not support per-process for IOMMU-v1 */
+	mmu->pt_per_process = KGSL_MMU_USE_PER_PROCESS_PT &&
+				msm_soc_version_supports_iommu_v0();
 
-	if (msm_soc_version_supports_iommu_v1()) {
+	/*
+	 * For IOMMU per-process pagetables, the allocatable range
+	 * and the kernel global range must both be outside
+	 * the userspace address range. There is a 1Mb gap
+	 * between these address ranges to make overrun
+	 * detection easier.
+	 * For the shared pagetable case use 2GB and because
+	 * mirroring the CPU address space is not possible and
+	 * we're better off with extra room.
+	 */
+	if (mmu->pt_per_process) {
+		mmu->pt_base = PAGE_OFFSET;
+		mmu->pt_size = KGSL_IOMMU_GLOBAL_MEM_BASE
+				- kgsl_mmu_get_base_addr(mmu) - SZ_1M;
+		mmu->use_cpu_map = true;
+	} else {
+		mmu->pt_base = KGSL_PAGETABLE_BASE;
+		mmu->pt_size = SZ_2G;
+		mmu->use_cpu_map = false;
+	}
+
+
+	iommu->iommu_reg_list = kgsl_iommuv0_reg;
+	iommu->ctx_offset = KGSL_IOMMU_CTX_OFFSET_V0;
+
+	if (msm_soc_version_supports_iommu_v0()) {
+		iommu->iommu_reg_list = kgsl_iommuv0_reg;
+		iommu->ctx_offset = KGSL_IOMMU_CTX_OFFSET_V0;
+	} else {
 		iommu->iommu_reg_list = kgsl_iommuv1_reg;
 		iommu->ctx_offset = KGSL_IOMMU_CTX_OFFSET_V1;
-	} else {
-		iommu->iommu_reg_list = kgsl_iommuv2_reg;
-		iommu->ctx_offset = KGSL_IOMMU_CTX_OFFSET_V2;
 	}
 
 	/* A nop is required in an indirect buffer when switching
@@ -939,6 +1312,15 @@
 				KGSL_IOMMU_SETSTATE_NOP_OFFSET,
 				cp_nop_packet(1));
 
+	if (cpu_is_msm8960()) {
+		/*
+		 * 8960 doesn't have a second context bank, so the IOMMU
+		 * registers must be mapped into every pagetable.
+		 */
+		iommu_ops.mmu_setup_pt = kgsl_iommu_setup_regs;
+		iommu_ops.mmu_cleanup_pt = kgsl_iommu_cleanup_regs;
+	}
+
 	dev_info(mmu->device->dev, "|%s| MMU type set for device is IOMMU\n",
 			__func__);
 done:
@@ -961,51 +1343,31 @@
 static int kgsl_iommu_setup_defaultpagetable(struct kgsl_mmu *mmu)
 {
 	int status = 0;
-	int i = 0;
-	struct kgsl_iommu *iommu = mmu->priv;
-	struct kgsl_pagetable *pagetable = NULL;
 
 	/* If chip is not 8960 then we use the 2nd context bank for pagetable
 	 * switching on the 3D side for which a separate table is allocated */
-	if (!cpu_is_msm8960() && msm_soc_version_supports_iommu_v1()) {
+	if (!cpu_is_msm8960() && msm_soc_version_supports_iommu_v0()) {
 		mmu->priv_bank_table =
-			kgsl_mmu_getpagetable(KGSL_MMU_PRIV_BANK_TABLE_NAME);
+			kgsl_mmu_getpagetable(mmu,
+					KGSL_MMU_PRIV_BANK_TABLE_NAME);
 		if (mmu->priv_bank_table == NULL) {
 			status = -ENOMEM;
 			goto err;
 		}
+		status = kgsl_iommu_setup_regs(mmu, mmu->priv_bank_table);
+		if (status)
+			goto err;
 	}
-	mmu->defaultpagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
+	mmu->defaultpagetable = kgsl_mmu_getpagetable(mmu, KGSL_MMU_GLOBAL_PT);
 	/* Return error if the default pagetable doesn't exist */
 	if (mmu->defaultpagetable == NULL) {
 		status = -ENOMEM;
 		goto err;
 	}
-	pagetable = mmu->priv_bank_table ? mmu->priv_bank_table :
-				mmu->defaultpagetable;
-	/* Map the IOMMU regsiters to only defaultpagetable */
-	if (msm_soc_version_supports_iommu_v1()) {
-		for (i = 0; i < iommu->unit_count; i++) {
-			iommu->iommu_units[i].reg_map.priv |=
-						KGSL_MEMDESC_GLOBAL;
-			status = kgsl_mmu_map(pagetable,
-				&(iommu->iommu_units[i].reg_map),
-				GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
-			if (status) {
-				iommu->iommu_units[i].reg_map.priv &=
-							~KGSL_MEMDESC_GLOBAL;
-				goto err;
-			}
-		}
-	}
 	return status;
 err:
-	for (i--; i >= 0; i--) {
-		kgsl_mmu_unmap(pagetable,
-				&(iommu->iommu_units[i].reg_map));
-		iommu->iommu_units[i].reg_map.priv &= ~KGSL_MEMDESC_GLOBAL;
-	}
 	if (mmu->priv_bank_table) {
+		kgsl_iommu_cleanup_regs(mmu, mmu->priv_bank_table);
 		kgsl_mmu_putpagetable(mmu->priv_bank_table);
 		mmu->priv_bank_table = NULL;
 	}
@@ -1016,9 +1378,113 @@
 	return status;
 }
 
-static int kgsl_iommu_start(struct kgsl_mmu *mmu)
+/*
+ * kgsl_iommu_lock_rb_in_tlb - Allocates tlb entries and locks the
+ * virtual to physical address translation of ringbuffer for 3D
+ * device into tlb.
+ * @mmu - Pointer to mmu structure
+ *
+ * Return - void
+ */
+static void kgsl_iommu_lock_rb_in_tlb(struct kgsl_mmu *mmu)
 {
 	struct kgsl_device *device = mmu->device;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb;
+	struct kgsl_iommu *iommu = mmu->priv;
+	unsigned int num_tlb_entries;
+	unsigned int tlblkcr = 0;
+	unsigned int v2pxx = 0;
+	unsigned int vaddr = 0;
+	int i, j, k, l;
+
+	if (!iommu->sync_lock_initialized)
+		return;
+
+	rb = &adreno_dev->ringbuffer;
+	num_tlb_entries = rb->buffer_desc.size / PAGE_SIZE;
+
+	for (i = 0; i < iommu->unit_count; i++) {
+		struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+		for (j = 0; j < iommu_unit->dev_count; j++) {
+			tlblkcr = 0;
+			if (cpu_is_msm8960())
+				tlblkcr |= ((num_tlb_entries &
+					KGSL_IOMMU_TLBLKCR_FLOOR_MASK) <<
+					KGSL_IOMMU_TLBLKCR_FLOOR_SHIFT);
+			else
+				tlblkcr |= (((num_tlb_entries *
+					iommu_unit->dev_count) &
+					KGSL_IOMMU_TLBLKCR_FLOOR_MASK) <<
+					KGSL_IOMMU_TLBLKCR_FLOOR_SHIFT);
+			/* Do not invalidate locked entries on tlbiall flush */
+			tlblkcr	|= ((1 & KGSL_IOMMU_TLBLKCR_TLBIALLCFG_MASK)
+				<< KGSL_IOMMU_TLBLKCR_TLBIALLCFG_SHIFT);
+			tlblkcr	|= ((1 & KGSL_IOMMU_TLBLKCR_TLBIASIDCFG_MASK)
+				<< KGSL_IOMMU_TLBLKCR_TLBIASIDCFG_SHIFT);
+			tlblkcr	|= ((1 & KGSL_IOMMU_TLBLKCR_TLBIVAACFG_MASK)
+				<< KGSL_IOMMU_TLBLKCR_TLBIVAACFG_SHIFT);
+			/* Enable tlb locking */
+			tlblkcr |= ((1 & KGSL_IOMMU_TLBLKCR_LKE_MASK)
+				<< KGSL_IOMMU_TLBLKCR_LKE_SHIFT);
+			KGSL_IOMMU_SET_CTX_REG(iommu, iommu_unit,
+					iommu_unit->dev[j].ctx_id,
+					TLBLKCR, tlblkcr);
+		}
+		for (j = 0; j < iommu_unit->dev_count; j++) {
+			/* skip locking entries for private bank on 8960 */
+			if (cpu_is_msm8960() &&  KGSL_IOMMU_CONTEXT_PRIV == j)
+				continue;
+			/* Lock the ringbuffer virtual address into tlb */
+			vaddr = rb->buffer_desc.gpuaddr;
+			for (k = 0; k < num_tlb_entries; k++) {
+				v2pxx = 0;
+				v2pxx |= (((k + j * num_tlb_entries) &
+					KGSL_IOMMU_V2PXX_INDEX_MASK)
+					<< KGSL_IOMMU_V2PXX_INDEX_SHIFT);
+				v2pxx |= vaddr & (KGSL_IOMMU_V2PXX_VA_MASK <<
+						KGSL_IOMMU_V2PXX_VA_SHIFT);
+
+				KGSL_IOMMU_SET_CTX_REG(iommu, iommu_unit,
+						iommu_unit->dev[j].ctx_id,
+						V2PUR, v2pxx);
+				vaddr += PAGE_SIZE;
+				for (l = 0; l < iommu_unit->dev_count; l++) {
+					tlblkcr = KGSL_IOMMU_GET_CTX_REG(iommu,
+						iommu_unit,
+						iommu_unit->dev[l].ctx_id,
+						TLBLKCR);
+					mb();
+					tlblkcr &=
+					~(KGSL_IOMMU_TLBLKCR_VICTIM_MASK
+					<< KGSL_IOMMU_TLBLKCR_VICTIM_SHIFT);
+					tlblkcr |= (((k + 1 +
+					(j * num_tlb_entries)) &
+					KGSL_IOMMU_TLBLKCR_VICTIM_MASK) <<
+					KGSL_IOMMU_TLBLKCR_VICTIM_SHIFT);
+					KGSL_IOMMU_SET_CTX_REG(iommu,
+						iommu_unit,
+						iommu_unit->dev[l].ctx_id,
+						TLBLKCR, tlblkcr);
+				}
+			}
+		}
+		for (j = 0; j < iommu_unit->dev_count; j++) {
+			tlblkcr = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
+						iommu_unit->dev[j].ctx_id,
+						TLBLKCR);
+			mb();
+			/* Disable tlb locking */
+			tlblkcr &= ~(KGSL_IOMMU_TLBLKCR_LKE_MASK
+				<< KGSL_IOMMU_TLBLKCR_LKE_SHIFT);
+			KGSL_IOMMU_SET_CTX_REG(iommu, iommu_unit,
+				iommu_unit->dev[j].ctx_id, TLBLKCR, tlblkcr);
+		}
+	}
+}
+
+static int kgsl_iommu_start(struct kgsl_mmu *mmu)
+{
 	int status;
 	struct kgsl_iommu *iommu = mmu->priv;
 	int i, j;
@@ -1030,23 +1496,21 @@
 		status = kgsl_iommu_setup_defaultpagetable(mmu);
 		if (status)
 			return -ENOMEM;
-
-		/* Initialize the sync lock between GPU and CPU */
-		if (msm_soc_version_supports_iommu_v1() &&
-			(device->id == KGSL_DEVICE_3D0))
-				kgsl_iommu_init_sync_lock(mmu);
 	}
+	status = kgsl_iommu_start_sync_lock(mmu);
+	if (status)
+		return status;
 
 	/* We use the GPU MMU to control access to IOMMU registers on 8960 with
 	 * a225, hence we still keep the MMU active on 8960 */
-	if (cpu_is_msm8960()) {
+	if (cpu_is_msm8960() && KGSL_DEVICE_3D0 == mmu->device->id) {
 		struct kgsl_mh *mh = &(mmu->device->mh);
+		BUG_ON(iommu->iommu_units[0].reg_map.gpuaddr != 0 &&
+			mh->mpu_base > iommu->iommu_units[0].reg_map.gpuaddr);
 		kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000001);
+
 		kgsl_regwrite(mmu->device, MH_MMU_MPU_END,
-			mh->mpu_base +
-			iommu->iommu_units[0].reg_map.gpuaddr);
-	} else {
-		kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000000);
+			mh->mpu_base + mh->mpu_range);
 	}
 
 	mmu->hwpagetable = mmu->defaultpagetable;
@@ -1071,6 +1535,7 @@
 	 * changing pagetables we can use this lsb value of the pagetable w/o
 	 * having to read it again
 	 */
+	msm_iommu_lock();
 	for (i = 0; i < iommu->unit_count; i++) {
 		struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
 		for (j = 0; j < iommu_unit->dev_count; j++) {
@@ -1081,6 +1546,13 @@
 						TTBR0));
 		}
 	}
+	kgsl_iommu_lock_rb_in_tlb(mmu);
+	msm_iommu_unlock();
+
+	/* For complete CFF */
+	kgsl_cffdump_setmem(mmu->setstate_memory.gpuaddr +
+				KGSL_IOMMU_SETSTATE_NOP_OFFSET,
+				cp_nop_packet(1), sizeof(unsigned int));
 
 	kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
 	mmu->flags |= KGSL_FLAGS_STARTED;
@@ -1094,13 +1566,13 @@
 }
 
 static int
-kgsl_iommu_unmap(void *mmu_specific_pt,
+kgsl_iommu_unmap(struct kgsl_pagetable *pt,
 		struct kgsl_memdesc *memdesc,
 		unsigned int *tlb_flags)
 {
 	int ret;
 	unsigned int range = kgsl_sg_size(memdesc->sg, memdesc->sglen);
-	struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;
+	struct kgsl_iommu_pt *iommu_pt = pt->priv;
 
 	/* All GPU addresses as assigned are page aligned, but some
 	   functions purturb the gpuaddr with an offset, so apply the
@@ -1117,43 +1589,58 @@
 			"with err: %d\n", iommu_pt->domain, gpuaddr,
 			range, ret);
 
-#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
 	/*
 	 * Flushing only required if per process pagetables are used. With
 	 * global case, flushing will happen inside iommu_map function
 	 */
-	if (!ret && msm_soc_version_supports_iommu_v1())
+	if (!ret && kgsl_mmu_is_perprocess(pt->mmu))
 		*tlb_flags = UINT_MAX;
-#endif
 	return 0;
 }
 
 static int
-kgsl_iommu_map(void *mmu_specific_pt,
+kgsl_iommu_map(struct kgsl_pagetable *pt,
 			struct kgsl_memdesc *memdesc,
 			unsigned int protflags,
 			unsigned int *tlb_flags)
 {
 	int ret;
 	unsigned int iommu_virt_addr;
-	struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;
+	struct kgsl_iommu_pt *iommu_pt = pt->priv;
 	int size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
 
 	BUG_ON(NULL == iommu_pt);
 
+	/* if there's a guard page, we'll map it read only below */
+	if ((protflags & IOMMU_WRITE) && kgsl_memdesc_has_guard_page(memdesc))
+			size -= PAGE_SIZE;
 
 	iommu_virt_addr = memdesc->gpuaddr;
 
 	ret = iommu_map_range(iommu_pt->domain, iommu_virt_addr, memdesc->sg,
-				size, (IOMMU_READ | IOMMU_WRITE));
+				size, protflags);
 	if (ret) {
-		KGSL_CORE_ERR("iommu_map_range(%p, %x, %p, %d, %d) "
-				"failed with err: %d\n", iommu_pt->domain,
-				iommu_virt_addr, memdesc->sg, size,
-				(IOMMU_READ | IOMMU_WRITE), ret);
+		KGSL_CORE_ERR("iommu_map_range(%p, %x, %p, %d, %x) err: %d\n",
+			iommu_pt->domain, iommu_virt_addr, memdesc->sg, size,
+			protflags, ret);
 		return ret;
 	}
+	if ((protflags & IOMMU_WRITE) && kgsl_memdesc_has_guard_page(memdesc)) {
+		struct scatterlist *sg = &memdesc->sg[memdesc->sglen - 1];
 
+		ret = iommu_map(iommu_pt->domain, iommu_virt_addr + size,
+				kgsl_get_sg_pa(sg), PAGE_SIZE,
+				protflags & ~IOMMU_WRITE);
+		if (ret) {
+			KGSL_CORE_ERR("iommu_map(%p, %x, %x, %x) err: %d\n",
+				iommu_pt->domain, iommu_virt_addr + size,
+				kgsl_get_sg_pa(sg), protflags & ~IOMMU_WRITE,
+				ret);
+			/* cleanup the partial mapping */
+			iommu_unmap_range(iommu_pt->domain, iommu_virt_addr,
+					  size);
+		}
+	}
 	return ret;
 }
 
@@ -1166,7 +1653,6 @@
 	 *
 	 *  call this with the global lock held
 	 */
-
 	if (mmu->flags & KGSL_FLAGS_STARTED) {
 		/* detach iommu attachment */
 		kgsl_detach_pagetable_iommu_domain(mmu);
@@ -1181,10 +1667,12 @@
 				for (j = 0; j < iommu_unit->dev_count; j++) {
 					if (iommu_unit->dev[j].fault) {
 						kgsl_iommu_enable_clk(mmu, j);
+						msm_iommu_lock();
 						KGSL_IOMMU_SET_CTX_REG(iommu,
 						iommu_unit,
 						iommu_unit->dev[j].ctx_id,
 						RESUME, 1);
+						msm_iommu_unlock();
 						iommu_unit->dev[j].fault = 0;
 					}
 				}
@@ -1202,22 +1690,28 @@
 {
 	struct kgsl_iommu *iommu = mmu->priv;
 	int i;
-	for (i = 0; i < iommu->unit_count; i++) {
-		struct kgsl_pagetable *pagetable = (mmu->priv_bank_table ?
-			mmu->priv_bank_table : mmu->defaultpagetable);
-		if (iommu->iommu_units[i].reg_map.gpuaddr)
-			kgsl_mmu_unmap(pagetable,
-			&(iommu->iommu_units[i].reg_map));
-		if (iommu->iommu_units[i].reg_map.hostptr)
-			iounmap(iommu->iommu_units[i].reg_map.hostptr);
-		kgsl_sg_free(iommu->iommu_units[i].reg_map.sg,
-				iommu->iommu_units[i].reg_map.sglen);
+
+	if (mmu->priv_bank_table != NULL) {
+		kgsl_iommu_cleanup_regs(mmu, mmu->priv_bank_table);
+		kgsl_mmu_putpagetable(mmu->priv_bank_table);
 	}
 
-	if (mmu->priv_bank_table)
-		kgsl_mmu_putpagetable(mmu->priv_bank_table);
-	if (mmu->defaultpagetable)
+	if (mmu->defaultpagetable != NULL)
 		kgsl_mmu_putpagetable(mmu->defaultpagetable);
+
+	for (i = 0; i < iommu->unit_count; i++) {
+		struct kgsl_memdesc *reg_map = &iommu->iommu_units[i].reg_map;
+
+		if (reg_map->hostptr)
+			iounmap(reg_map->hostptr);
+		kgsl_sg_free(reg_map->sg, reg_map->sglen);
+		reg_map->priv &= ~KGSL_MEMDESC_GLOBAL;
+	}
+	/* clear IOMMU GPU CPU sync structures */
+	kgsl_sg_free(iommu->sync_lock_desc.sg, iommu->sync_lock_desc.sglen);
+	memset(&iommu->sync_lock_desc, 0, sizeof(iommu->sync_lock_desc));
+	iommu->sync_lock_vars = NULL;
+
 	kfree(iommu);
 
 	return 0;
@@ -1273,12 +1767,15 @@
 	pt_base &= (iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_mask <<
 			iommu->iommu_reg_list[KGSL_IOMMU_CTX_TTBR0].reg_shift);
 
-	//if (msm_soc_version_supports_iommu_v1())
+	/* For v0 SMMU GPU needs to be idle for tlb invalidate as well */
+	if (msm_soc_version_supports_iommu_v0())
+		kgsl_idle(mmu->device);
+
 	/* Acquire GPU-CPU sync Lock here */
 	msm_iommu_lock();
 
 	if (flags & KGSL_MMUFLAGS_PTUPDATE) {
-		if (!msm_soc_version_supports_iommu_v1())
+		if (!msm_soc_version_supports_iommu_v0())
 			kgsl_idle(mmu->device);
 		for (i = 0; i < iommu->unit_count; i++) {
 			/* get the lsb value which should not change when
@@ -1357,6 +1854,9 @@
 	.mmu_get_num_iommu_units = kgsl_iommu_get_num_iommu_units,
 	.mmu_pt_equal = kgsl_iommu_pt_equal,
 	.mmu_get_pt_base_addr = kgsl_iommu_get_pt_base_addr,
+	/* These callbacks will be set on some chipsets */
+	.mmu_setup_pt = NULL,
+	.mmu_cleanup_pt = NULL,
 	.mmu_sync_lock = kgsl_iommu_sync_lock,
 	.mmu_sync_unlock = kgsl_iommu_sync_unlock,
 };
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index 4507700..c09bc4b 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,10 +15,37 @@
 
 #include <mach/iommu.h>
 
-#define KGSL_IOMMU_CTX_OFFSET_V1	0
-#define KGSL_IOMMU_CTX_OFFSET_V2	0x8000
+#define KGSL_IOMMU_CTX_OFFSET_V0	0
+#define KGSL_IOMMU_CTX_OFFSET_V1	0x8000
 #define KGSL_IOMMU_CTX_SHIFT		12
 
+/* TLBLKCR fields */
+#define KGSL_IOMMU_TLBLKCR_LKE_MASK		0x00000001
+#define KGSL_IOMMU_TLBLKCR_LKE_SHIFT		0
+#define KGSL_IOMMU_TLBLKCR_TLBIALLCFG_MASK	0x00000001
+#define KGSL_IOMMU_TLBLKCR_TLBIALLCFG_SHIFT	1
+#define KGSL_IOMMU_TLBLKCR_TLBIASIDCFG_MASK	0x00000001
+#define KGSL_IOMMU_TLBLKCR_TLBIASIDCFG_SHIFT	2
+#define KGSL_IOMMU_TLBLKCR_TLBIVAACFG_MASK	0x00000001
+#define KGSL_IOMMU_TLBLKCR_TLBIVAACFG_SHIFT	3
+#define KGSL_IOMMU_TLBLKCR_FLOOR_MASK		0x000000FF
+#define KGSL_IOMMU_TLBLKCR_FLOOR_SHIFT		8
+#define KGSL_IOMMU_TLBLKCR_VICTIM_MASK		0x000000FF
+#define KGSL_IOMMU_TLBLKCR_VICTIM_SHIFT		16
+
+/* V2PXX fields */
+#define KGSL_IOMMU_V2PXX_INDEX_MASK		0x000000FF
+#define KGSL_IOMMU_V2PXX_INDEX_SHIFT		0
+#define KGSL_IOMMU_V2PXX_VA_MASK		0x000FFFFF
+#define KGSL_IOMMU_V2PXX_VA_SHIFT		12
+
+/* FSYNR1 V0 fields */
+#define KGSL_IOMMU_FSYNR1_AWRITE_MASK		0x00000001
+#define KGSL_IOMMU_FSYNR1_AWRITE_SHIFT		8
+/* FSYNR0 V1 fields */
+#define KGSL_IOMMU_V1_FSYNR0_WNR_MASK		0x00000001
+#define KGSL_IOMMU_V1_FSYNR0_WNR_SHIFT		4
+
 enum kgsl_iommu_reg_map {
 	KGSL_IOMMU_GLOBAL_BASE = 0,
 	KGSL_IOMMU_CTX_TTBR0,
@@ -26,6 +53,10 @@
 	KGSL_IOMMU_CTX_FSR,
 	KGSL_IOMMU_CTX_TLBIALL,
 	KGSL_IOMMU_CTX_RESUME,
+	KGSL_IOMMU_CTX_TLBLKCR,
+	KGSL_IOMMU_CTX_V2PUR,
+	KGSL_IOMMU_CTX_FSYNR0,
+	KGSL_IOMMU_CTX_FSYNR1,
 	KGSL_IOMMU_REG_MAX
 };
 
@@ -124,6 +155,8 @@
  * IOMMU registers
  * @sync_lock_desc: GPU Memory descriptor for the memory containing the
  * spinlocks
+ * @sync_lock_offset - The page offset within a page at which the sync
+ * variables are located
  * @sync_lock_initialized: True if the sync_lock feature is enabled
  */
 struct kgsl_iommu {
@@ -136,6 +169,7 @@
 	struct kgsl_iommu_register_list *iommu_reg_list;
 	struct remote_iommu_petersons_spinlock *sync_lock_vars;
 	struct kgsl_memdesc sync_lock_desc;
+	unsigned int sync_lock_offset;
 	bool sync_lock_initialized;
 };
 
diff --git a/drivers/gpu/msm/kgsl_log.h b/drivers/gpu/msm/kgsl_log.h
index 83d14f7..a7832e4 100644
--- a/drivers/gpu/msm/kgsl_log.h
+++ b/drivers/gpu/msm/kgsl_log.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2008-2011, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2008-2011,2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index d1f58c4..4e95373 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -23,13 +23,11 @@
 
 #include "kgsl.h"
 #include "kgsl_mmu.h"
+#include "kgsl_gpummu.h"
 #include "kgsl_device.h"
 #include "kgsl_sharedmem.h"
 #include "adreno.h"
 
-#define KGSL_MMU_ALIGN_SHIFT    13
-#define KGSL_MMU_ALIGN_MASK     (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
-
 static enum kgsl_mmutype kgsl_mmu_type;
 
 static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
@@ -37,17 +35,18 @@
 static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
 {
 	int i;
-	/* For IOMMU only unmap the global structures to global pt */
-	if ((KGSL_MMU_TYPE_NONE != kgsl_mmu_type) &&
-		(KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
-		(KGSL_MMU_GLOBAL_PT !=  pt->name) &&
-		(KGSL_MMU_PRIV_BANK_TABLE_NAME !=  pt->name))
-		return 0;
+	struct kgsl_device *device;
+
 	for (i = 0; i < KGSL_DEVICE_MAX; i++) {
-		struct kgsl_device *device = kgsl_driver.devp[i];
+		device = kgsl_driver.devp[i];
 		if (device)
 			device->ftbl->cleanup_pt(device, pt);
 	}
+	/* Only the 3d device needs mmu specific pt entries */
+	device = kgsl_driver.devp[KGSL_DEVICE_3D0];
+	if (device->mmu.mmu_ops->mmu_cleanup_pt != NULL)
+		device->mmu.mmu_ops->mmu_cleanup_pt(&device->mmu, pt);
+
 	return 0;
 }
 
@@ -56,21 +55,25 @@
 {
 	int i = 0;
 	int status = 0;
+	struct kgsl_device *device;
 
-	/* For IOMMU only map the global structures to global pt */
-	if ((KGSL_MMU_TYPE_NONE != kgsl_mmu_type) &&
-		(KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
-		(KGSL_MMU_GLOBAL_PT !=  pt->name) &&
-		(KGSL_MMU_PRIV_BANK_TABLE_NAME !=  pt->name))
-		return 0;
 	for (i = 0; i < KGSL_DEVICE_MAX; i++) {
-		struct kgsl_device *device = kgsl_driver.devp[i];
+		device = kgsl_driver.devp[i];
 		if (device) {
 			status = device->ftbl->setup_pt(device, pt);
 			if (status)
 				goto error_pt;
 		}
 	}
+	/* Only the 3d device needs mmu specific pt entries */
+	device = kgsl_driver.devp[KGSL_DEVICE_3D0];
+	if (device->mmu.mmu_ops->mmu_setup_pt != NULL) {
+		status = device->mmu.mmu_ops->mmu_setup_pt(&device->mmu, pt);
+		if (status) {
+			i = KGSL_DEVICE_MAX - 1;
+			goto error_pt;
+		}
+	}
 	return status;
 error_pt:
 	while (i >= 0) {
@@ -101,7 +104,7 @@
 	if (pagetable->pool)
 		gen_pool_destroy(pagetable->pool);
 
-	pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
+	pagetable->pt_ops->mmu_destroy_pagetable(pagetable);
 
 	kfree(pagetable);
 }
@@ -191,7 +194,7 @@
 
 	if (pt) {
 		ret += snprintf(buf, PAGE_SIZE, "0x%x\n",
-			kgsl_mmu_get_ptsize());
+			kgsl_mmu_get_ptsize(pt->mmu));
 	}
 
 	kgsl_put_pagetable(pt);
@@ -310,22 +313,6 @@
 	return ret;
 }
 
-unsigned int kgsl_mmu_get_ptsize(void)
-{
-	/*
-	 * For IOMMU, we could do up to 4G virtual range if we wanted to, but
-	 * it makes more sense to return a smaller range and leave the rest of
-	 * the virtual range for future improvements
-	 */
-
-	if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
-		return CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
-	else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
-		return SZ_2G - KGSL_PAGETABLE_BASE;
-	else
-		return 0;
-}
-
 int
 kgsl_mmu_get_ptname_from_ptbase(struct kgsl_mmu *mmu, unsigned int pt_base)
 {
@@ -355,15 +342,15 @@
 	unsigned int ret = 0;
 
 	if (!mmu->mmu_ops || !mmu->mmu_ops->mmu_pt_equal)
-		return KGSL_MMU_GLOBAL_PT;
+		return 0;
 	spin_lock(&kgsl_driver.ptlock);
 	list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
 		if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
-			if ((addr & (PAGE_SIZE-1)) == pt->fault_addr) {
+			if ((addr & ~(PAGE_SIZE-1)) == pt->fault_addr) {
 				ret = 1;
 				break;
 			} else {
-				pt->fault_addr = (addr & (PAGE_SIZE-1));
+				pt->fault_addr = (addr & ~(PAGE_SIZE-1));
 				ret = 0;
 				break;
 			}
@@ -458,7 +445,8 @@
 }
 EXPORT_SYMBOL(kgsl_mh_intrcallback);
 
-static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
+static struct kgsl_pagetable *
+kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu,
 				unsigned int name)
 {
 	int status = 0;
@@ -477,8 +465,8 @@
 
 	spin_lock_init(&pagetable->lock);
 
-	ptsize = kgsl_mmu_get_ptsize();
-
+	ptsize = kgsl_mmu_get_ptsize(mmu);
+	pagetable->mmu = mmu;
 	pagetable->name = name;
 	pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(ptsize);
 	pagetable->fault_addr = 0xFFFFFFFF;
@@ -490,10 +478,10 @@
 	if ((KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) &&
 		((KGSL_MMU_GLOBAL_PT == name) ||
 		(KGSL_MMU_PRIV_BANK_TABLE_NAME == name))) {
-		pagetable->kgsl_pool = gen_pool_create(PAGE_SHIFT, -1);
+		pagetable->kgsl_pool = gen_pool_create(ilog2(SZ_8K), -1);
 		if (pagetable->kgsl_pool == NULL) {
 			KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
-					KGSL_MMU_ALIGN_SHIFT);
+					ilog2(SZ_8K));
 			goto err_alloc;
 		}
 		if (gen_pool_add(pagetable->kgsl_pool,
@@ -504,14 +492,14 @@
 		}
 	}
 
-	pagetable->pool = gen_pool_create(KGSL_MMU_ALIGN_SHIFT, -1);
+	pagetable->pool = gen_pool_create(PAGE_SHIFT, -1);
 	if (pagetable->pool == NULL) {
 		KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
-			      KGSL_MMU_ALIGN_SHIFT);
+			      PAGE_SHIFT);
 		goto err_kgsl_pool;
 	}
 
-	if (gen_pool_add(pagetable->pool, KGSL_PAGETABLE_BASE,
+	if (gen_pool_add(pagetable->pool, kgsl_mmu_get_base_addr(mmu),
 				ptsize, -1)) {
 		KGSL_CORE_ERR("gen_pool_add failed\n");
 		goto err_pool;
@@ -540,7 +528,7 @@
 	return pagetable;
 
 err_mmu_create:
-	pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
+	pagetable->pt_ops->mmu_destroy_pagetable(pagetable);
 err_pool:
 	gen_pool_destroy(pagetable->pool);
 err_kgsl_pool:
@@ -552,24 +540,21 @@
 	return NULL;
 }
 
-struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
+struct kgsl_pagetable *kgsl_mmu_getpagetable(struct kgsl_mmu *mmu,
+						unsigned long name)
 {
 	struct kgsl_pagetable *pt;
 
 	if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
 		return (void *)(-1);
 
-#ifndef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
-	name = KGSL_MMU_GLOBAL_PT;
-#endif
-	/* We presently do not support per-process for IOMMU-v2 */
-	if (!msm_soc_version_supports_iommu_v1())
+	if (!kgsl_mmu_is_perprocess(mmu))
 		name = KGSL_MMU_GLOBAL_PT;
 
 	pt = kgsl_get_pagetable(name);
 
 	if (pt == NULL)
-		pt = kgsl_mmu_createpagetableobject(name);
+		pt = kgsl_mmu_createpagetableobject(mmu, name);
 
 	return pt;
 }
@@ -626,24 +611,15 @@
 	 */
 }
 
-static inline struct gen_pool *
-_get_pool(struct kgsl_pagetable *pagetable, unsigned int flags)
-{
-	if (pagetable->kgsl_pool &&
-		(KGSL_MEMDESC_GLOBAL & flags))
-		return pagetable->kgsl_pool;
-	return pagetable->pool;
-}
-
 int
 kgsl_mmu_map(struct kgsl_pagetable *pagetable,
-				struct kgsl_memdesc *memdesc,
-				unsigned int protflags)
+				struct kgsl_memdesc *memdesc)
 {
 	int ret;
-	struct gen_pool *pool;
+	struct gen_pool *pool = NULL;
 	int size;
 	int page_align = ilog2(PAGE_SIZE);
+	unsigned int protflags = kgsl_memdesc_protflags(memdesc);
 
 	if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
 		if (memdesc->sglen == 1) {
@@ -665,33 +641,57 @@
 
 	size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
 
-	/* Allocate from kgsl pool if it exists for global mappings */
-	pool = _get_pool(pagetable, memdesc->priv);
+	pool = pagetable->pool;
 
-	/* Allocate aligned virtual addresses for iommu. This allows
-	 * more efficient pagetable entries if the physical memory
-	 * is also aligned. Don't do this for GPUMMU, because
-	 * the address space is so small.
-	 */
-	if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype() &&
-	    kgsl_memdesc_get_align(memdesc) > 0)
-		page_align = kgsl_memdesc_get_align(memdesc);
-
-	memdesc->gpuaddr = gen_pool_alloc_aligned(pool, size, page_align);
-	if (memdesc->gpuaddr == 0) {
-		KGSL_CORE_ERR("gen_pool_alloc(%d) failed from pool: %s\n",
-			size,
-			(pool == pagetable->kgsl_pool) ?
-			"kgsl_pool" : "general_pool");
-		KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
-				pagetable->name, pagetable->stats.mapped,
-				pagetable->stats.entries);
-		return -ENOMEM;
+	if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
+		/* Allocate aligned virtual addresses for iommu. This allows
+		 * more efficient pagetable entries if the physical memory
+		 * is also aligned. Don't do this for GPUMMU, because
+		 * the address space is so small.
+		 */
+		if (kgsl_memdesc_get_align(memdesc) > 0)
+			page_align = kgsl_memdesc_get_align(memdesc);
+		if (kgsl_memdesc_is_global(memdesc)) {
+			/*
+			 * Only the default pagetable has a kgsl_pool, and
+			 * it is responsible for creating the mapping for
+			 * each global buffer. The mapping will be reused
+			 * in all other pagetables and it must already exist
+			 * when we're creating other pagetables which do not
+			 * have a kgsl_pool.
+			 */
+			pool = pagetable->kgsl_pool;
+			if (pool == NULL && memdesc->gpuaddr == 0) {
+				KGSL_CORE_ERR(
+				  "No address for global mapping into pt %d\n",
+				  pagetable->name);
+				return -EINVAL;
+			}
+		} else if (kgsl_memdesc_use_cpu_map(memdesc)) {
+			if (memdesc->gpuaddr == 0)
+				return -EINVAL;
+			pool = NULL;
+		}
+	}
+	if (pool) {
+		memdesc->gpuaddr = gen_pool_alloc_aligned(pool, size,
+							  page_align);
+		if (memdesc->gpuaddr == 0) {
+			KGSL_CORE_ERR("gen_pool_alloc(%d) failed, pool: %s\n",
+					size,
+					(pool == pagetable->kgsl_pool) ?
+					"kgsl_pool" : "general_pool");
+			KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
+					pagetable->name,
+					pagetable->stats.mapped,
+					pagetable->stats.entries);
+			return -ENOMEM;
+		}
 	}
 
 	if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
 		spin_lock(&pagetable->lock);
-	ret = pagetable->pt_ops->mmu_map(pagetable->priv, memdesc, protflags,
+	ret = pagetable->pt_ops->mmu_map(pagetable, memdesc, protflags,
 						&pagetable->tlb_flags);
 	if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
 		spin_lock(&pagetable->lock);
@@ -713,7 +713,8 @@
 
 err_free_gpuaddr:
 	spin_unlock(&pagetable->lock);
-	gen_pool_free(pool, memdesc->gpuaddr, size);
+	if (pool)
+		gen_pool_free(pool, memdesc->gpuaddr, size);
 	memdesc->gpuaddr = 0;
 	return ret;
 }
@@ -743,7 +744,7 @@
 
 	if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
 		spin_lock(&pagetable->lock);
-	pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc,
+	pagetable->pt_ops->mmu_unmap(pagetable, memdesc,
 					&pagetable->tlb_flags);
 
 	/* If buffer is unmapped 0 fault addr */
@@ -759,21 +760,29 @@
 
 	spin_unlock(&pagetable->lock);
 
-	pool = _get_pool(pagetable, memdesc->priv);
-	gen_pool_free(pool, memdesc->gpuaddr, size);
+	pool = pagetable->pool;
+
+	if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
+		if (kgsl_memdesc_is_global(memdesc))
+			pool = pagetable->kgsl_pool;
+		else if (kgsl_memdesc_use_cpu_map(memdesc))
+			pool = NULL;
+	}
+	if (pool)
+		gen_pool_free(pool, memdesc->gpuaddr, size);
 
 	/*
 	 * Don't clear the gpuaddr on global mappings because they
 	 * may be in use by other pagetables
 	 */
-	if (!(memdesc->priv & KGSL_MEMDESC_GLOBAL))
+	if (!kgsl_memdesc_is_global(memdesc))
 		memdesc->gpuaddr = 0;
 	return 0;
 }
 EXPORT_SYMBOL(kgsl_mmu_unmap);
 
 int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
-			struct kgsl_memdesc *memdesc, unsigned int protflags)
+			struct kgsl_memdesc *memdesc)
 {
 	int result = -EINVAL;
 	unsigned int gpuaddr = 0;
@@ -785,19 +794,17 @@
 	/* Not all global mappings are needed for all MMU types */
 	if (!memdesc->size)
 		return 0;
-
 	gpuaddr = memdesc->gpuaddr;
 	memdesc->priv |= KGSL_MEMDESC_GLOBAL;
 
-	result = kgsl_mmu_map(pagetable, memdesc, protflags);
+	result = kgsl_mmu_map(pagetable, memdesc);
 	if (result)
 		goto error;
 
 	/*global mappings must have the same gpu address in all pagetables*/
 	if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
-		KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
-			"gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
-			gpuaddr, memdesc->gpuaddr);
+		KGSL_CORE_ERR("pt %p addr mismatch phys %pa gpu 0x%0x 0x%08x",
+		     pagetable, &memdesc->physaddr, gpuaddr, memdesc->gpuaddr);
 		goto error_unmap;
 	}
 	return result;
@@ -890,12 +897,18 @@
 }
 EXPORT_SYMBOL(kgsl_mmu_set_mmutype);
 
-int kgsl_mmu_gpuaddr_in_range(unsigned int gpuaddr)
+int kgsl_mmu_gpuaddr_in_range(struct kgsl_pagetable *pt, unsigned int gpuaddr)
 {
 	if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
 		return 1;
-	return ((gpuaddr >= KGSL_PAGETABLE_BASE) &&
-		(gpuaddr < (KGSL_PAGETABLE_BASE + kgsl_mmu_get_ptsize())));
+	if (gpuaddr >= kgsl_mmu_get_base_addr(pt->mmu) &&
+		gpuaddr < kgsl_mmu_get_base_addr(pt->mmu) +
+		kgsl_mmu_get_ptsize(pt->mmu))
+		return 1;
+	if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU
+		&& kgsl_mmu_is_perprocess(pt->mmu))
+		return (gpuaddr > 0 && gpuaddr < TASK_SIZE);
+	return 0;
 }
 EXPORT_SYMBOL(kgsl_mmu_gpuaddr_in_range);
 
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 377f342..d7d9516 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,16 +13,23 @@
 #ifndef __KGSL_MMU_H
 #define __KGSL_MMU_H
 
-/*
- * These defines control the split between ttbr1 and ttbr0 pagetables of IOMMU
- * and what ranges of memory we map to them
- */
-#define KGSL_IOMMU_GLOBAL_MEM_BASE	0xC0000000
-#define KGSL_IOMMU_GLOBAL_MEM_SIZE	SZ_4M
-#define KGSL_IOMMU_TTBR1_SPLIT		2
+#include <mach/iommu.h>
 
-#define KGSL_MMU_ALIGN_SHIFT    13
-#define KGSL_MMU_ALIGN_MASK     (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
+/*
+ * These defines control the address range for allocations that
+ * are mapped into all pagetables.
+ */
+#define KGSL_IOMMU_GLOBAL_MEM_BASE	0xf8000000
+#define KGSL_IOMMU_GLOBAL_MEM_SIZE	SZ_4M
+
+#define KGSL_MMU_ALIGN_MASK     (~((1 << PAGE_SHIFT) - 1))
+
+/* defconfig option for disabling per process pagetables */
+#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
+#define KGSL_MMU_USE_PER_PROCESS_PT true
+#else
+#define KGSL_MMU_USE_PER_PROCESS_PT false
+#endif
 
 /* Identifier for the global page table */
 /* Per process page tables will probably pass in the thread group
@@ -116,6 +123,7 @@
 	unsigned int tlb_flags;
 	unsigned int fault_addr;
 	void *priv;
+	struct kgsl_mmu *mmu;
 };
 
 struct kgsl_mmu;
@@ -149,24 +157,26 @@
 	unsigned int (*mmu_get_pt_base_addr)
 			(struct kgsl_mmu *mmu,
 			struct kgsl_pagetable *pt);
+	int (*mmu_setup_pt) (struct kgsl_mmu *mmu,
+			struct kgsl_pagetable *pt);
+	void (*mmu_cleanup_pt) (struct kgsl_mmu *mmu,
+			struct kgsl_pagetable *pt);
 	unsigned int (*mmu_sync_lock)
-			(struct kgsl_mmu *mmu,
-			unsigned int *cmds);
+			(struct kgsl_mmu *mmu, unsigned int *cmds);
 	unsigned int (*mmu_sync_unlock)
-			(struct kgsl_mmu *mmu,
-			unsigned int *cmds);
+			(struct kgsl_mmu *mmu, unsigned int *cmds);
 };
 
 struct kgsl_mmu_pt_ops {
-	int (*mmu_map) (void *mmu_pt,
+	int (*mmu_map) (struct kgsl_pagetable *pt,
 			struct kgsl_memdesc *memdesc,
 			unsigned int protflags,
 			unsigned int *tlb_flags);
-	int (*mmu_unmap) (void *mmu_pt,
+	int (*mmu_unmap) (struct kgsl_pagetable *pt,
 			struct kgsl_memdesc *memdesc,
 			unsigned int *tlb_flags);
 	void *(*mmu_create_pagetable) (void);
-	void (*mmu_destroy_pagetable) (void *pt);
+	void (*mmu_destroy_pagetable) (struct kgsl_pagetable *);
 };
 
 #define KGSL_MMU_FLAGS_IOMMU_SYNC BIT(31)
@@ -185,14 +195,19 @@
 	const struct kgsl_mmu_ops *mmu_ops;
 	void *priv;
 	int fault;
+	unsigned long pt_base;
+	unsigned long pt_size;
+	bool pt_per_process;
+	bool use_cpu_map;
 };
 
-#include "kgsl_gpummu.h"
-
 extern struct kgsl_mmu_ops iommu_ops;
 extern struct kgsl_mmu_pt_ops iommu_pt_ops;
+extern struct kgsl_mmu_ops gpummu_ops;
+extern struct kgsl_mmu_pt_ops gpummu_pt_ops;
 
-struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name);
+struct kgsl_pagetable *kgsl_mmu_getpagetable(struct kgsl_mmu *,
+						unsigned long name);
 void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable);
 void kgsl_mh_start(struct kgsl_device *device);
 void kgsl_mh_intrcallback(struct kgsl_device *device);
@@ -200,10 +215,9 @@
 int kgsl_mmu_start(struct kgsl_device *device);
 int kgsl_mmu_close(struct kgsl_device *device);
 int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
-		 struct kgsl_memdesc *memdesc,
-		 unsigned int protflags);
+		 struct kgsl_memdesc *memdesc);
 int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
-			struct kgsl_memdesc *memdesc, unsigned int protflags);
+			struct kgsl_memdesc *memdesc);
 int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
 		    struct kgsl_memdesc *memdesc);
 unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr);
@@ -220,8 +234,7 @@
 int kgsl_mmu_enabled(void);
 void kgsl_mmu_set_mmutype(char *mmutype);
 enum kgsl_mmutype kgsl_mmu_get_mmutype(void);
-unsigned int kgsl_mmu_get_ptsize(void);
-int kgsl_mmu_gpuaddr_in_range(unsigned int gpuaddr);
+int kgsl_mmu_gpuaddr_in_range(struct kgsl_pagetable *pt, unsigned int gpuaddr);
 
 /*
  * Static inline functions of MMU that simply call the SMMU specific
@@ -332,6 +345,56 @@
 		return 0;
 }
 
+/*
+ * kgsl_mmu_is_perprocess() - Runtime check for per-process
+ * pagetables.
+ * @mmu: the mmu
+ *
+ * Returns true if per-process pagetables are enabled,
+ * false if not.
+ */
+static inline int kgsl_mmu_is_perprocess(struct kgsl_mmu *mmu)
+{
+	return mmu->pt_per_process;
+}
+
+/*
+ * kgsl_mmu_use_cpu_map() - Runtime check for matching the CPU
+ * address space on the GPU.
+ * @mmu: the mmu
+ *
+ * Returns true if supported false if not.
+ */
+static inline int kgsl_mmu_use_cpu_map(struct kgsl_mmu *mmu)
+{
+	return mmu->pt_per_process;
+}
+
+/*
+ * kgsl_mmu_base_addr() - Get gpu virtual address base.
+ * @mmu: the mmu
+ *
+ * Returns the start address of the allocatable gpu
+ * virtual address space. Other mappings that mirror
+ * the CPU address space are possible outside this range.
+ */
+static inline unsigned int kgsl_mmu_get_base_addr(struct kgsl_mmu *mmu)
+{
+	return mmu->pt_base;
+}
+
+/*
+ * kgsl_mmu_get_ptsize() - Get gpu pagetable size
+ * @mmu: the mmu
+ *
+ * Returns the usable size of the gpu allocatable
+ * address space.
+ */
+static inline unsigned int kgsl_mmu_get_ptsize(struct kgsl_mmu *mmu)
+{
+	return mmu->pt_size;
+}
+
 static inline int kgsl_mmu_sync_lock(struct kgsl_mmu *mmu,
 				unsigned int *cmds)
 {
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index d489119..2f8d93e 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -23,13 +23,13 @@
 #include "kgsl_pwrscale.h"
 #include "kgsl_device.h"
 #include "kgsl_trace.h"
+#include "kgsl_sharedmem.h"
 
 #define KGSL_PWRFLAGS_POWER_ON 0
 #define KGSL_PWRFLAGS_CLK_ON   1
 #define KGSL_PWRFLAGS_AXI_ON   2
 #define KGSL_PWRFLAGS_IRQ_ON   3
 
-#define GPU_SWFI_LATENCY	3
 #define UPDATE_BUSY_VAL		1000000
 #define UPDATE_BUSY		50
 
@@ -130,6 +130,16 @@
 	 */
 
 	pwr->active_pwrlevel = new_level;
+	pwrlevel = &pwr->pwrlevels[pwr->active_pwrlevel];
+
+	if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
+
+		if (pwr->pcl)
+			msm_bus_scale_client_update_request(pwr->pcl,
+				pwrlevel->bus_freq);
+		else if (pwr->ebi1_clk)
+			clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
+	}
 
 	if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags) ||
 		(device->state == KGSL_STATE_NAP)) {
@@ -156,16 +166,6 @@
 		}
 	}
 
-	pwrlevel = &pwr->pwrlevels[pwr->active_pwrlevel];
-
-	if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
-
-		if (pwr->pcl)
-			msm_bus_scale_client_update_request(pwr->pcl,
-				pwrlevel->bus_freq);
-		else if (pwr->ebi1_clk)
-			clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
-	}
 
 	trace_kgsl_pwrlevel(device, pwr->active_pwrlevel, pwrlevel->gpu_freq);
 }
@@ -357,13 +357,13 @@
 	return snprintf(buf, PAGE_SIZE, "%d\n", pwr->num_pwrlevels - 1);
 }
 
-/* Given a GPU clock value, return the nearest powerlevel */
+/* Given a GPU clock value, return the lowest matching powerlevel */
 
 static int _get_nearest_pwrlevel(struct kgsl_pwrctrl *pwr, unsigned int clock)
 {
 	int i;
 
-	for (i = 0; i < pwr->num_pwrlevels - 1; i++) {
+	for (i = pwr->num_pwrlevels - 1; i >= 0; i--) {
 		if (abs(pwr->pwrlevels[i].gpu_freq - clock) < 5000000)
 			return i;
 	}
@@ -515,7 +515,6 @@
 	struct kgsl_device *device = kgsl_device_from_dev(dev);
 	struct kgsl_pwrctrl *pwr;
 	const long div = 1000/HZ;
-	static unsigned int org_interval_timeout = 1;
 	int rc;
 
 	if (device == NULL)
@@ -528,15 +527,11 @@
 	if (rc)
 		return rc;
 
-	if (org_interval_timeout == 1)
-		org_interval_timeout = pwr->interval_timeout;
-
 	mutex_lock(&device->mutex);
 
 	/* Let the timeout be requested in ms, but convert to jiffies. */
 	val /= div;
-	if (val >= org_interval_timeout)
-		pwr->interval_timeout = val;
+	pwr->interval_timeout = val;
 
 	mutex_unlock(&device->mutex);
 
@@ -548,10 +543,48 @@
 					char *buf)
 {
 	struct kgsl_device *device = kgsl_device_from_dev(dev);
+	int mul = 1000/HZ;
+	if (device == NULL)
+		return 0;
+	/* Show the idle_timeout converted to msec */
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+		device->pwrctrl.interval_timeout * mul);
+}
+
+static int kgsl_pwrctrl_pmqos_latency_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	char temp[20];
+	unsigned long val;
+	struct kgsl_device *device = kgsl_device_from_dev(dev);
+	int rc;
+
+	if (device == NULL)
+		return 0;
+
+	snprintf(temp, sizeof(temp), "%.*s",
+			(int)min(count, sizeof(temp) - 1), buf);
+	rc = kstrtoul(temp, 0, &val);
+	if (rc)
+		return rc;
+
+	mutex_lock(&device->mutex);
+	device->pwrctrl.pm_qos_latency = val;
+	mutex_unlock(&device->mutex);
+
+	return count;
+}
+
+static int kgsl_pwrctrl_pmqos_latency_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct kgsl_device *device = kgsl_device_from_dev(dev);
 	if (device == NULL)
 		return 0;
 	return snprintf(buf, PAGE_SIZE, "%d\n",
-		device->pwrctrl.interval_timeout);
+		device->pwrctrl.pm_qos_latency);
 }
 
 static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
@@ -615,6 +648,14 @@
 	return num_chars;
 }
 
+static int kgsl_pwrctrl_reset_count_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct kgsl_device *device = kgsl_device_from_dev(dev);
+	return snprintf(buf, PAGE_SIZE, "%d\n", device->reset_counter);
+}
+
 DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
 DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
 	kgsl_pwrctrl_max_gpuclk_store);
@@ -640,6 +681,12 @@
 DEVICE_ATTR(num_pwrlevels, 0444,
 	kgsl_pwrctrl_num_pwrlevels_show,
 	NULL);
+DEVICE_ATTR(pmqos_latency, 0644,
+	kgsl_pwrctrl_pmqos_latency_show,
+	kgsl_pwrctrl_pmqos_latency_store);
+DEVICE_ATTR(reset_count, 0444,
+	kgsl_pwrctrl_reset_count_show,
+	NULL);
 
 static const struct device_attribute *pwrctrl_attr_list[] = {
 	&dev_attr_gpuclk,
@@ -653,6 +700,8 @@
 	&dev_attr_min_pwrlevel,
 	&dev_attr_thermal_pwrlevel,
 	&dev_attr_num_pwrlevels,
+	&dev_attr_pmqos_latency,
+	&dev_attr_reset_count,
 	NULL
 };
 
@@ -684,6 +733,9 @@
 	clkstats->on_time_old = on_time;
 	clkstats->elapsed_old = clkstats->elapsed;
 	clkstats->elapsed = 0;
+
+	trace_kgsl_gpubusy(device, clkstats->on_time_old,
+		clkstats->elapsed_old);
 }
 
 /* Track the amount of time the gpu is on vs the total system time. *
@@ -714,23 +766,23 @@
 			/* High latency clock maintenance. */
 			if ((pwr->pwrlevels[0].gpu_freq > 0) &&
 				(requested_state != KGSL_STATE_NAP)) {
-				clk_set_rate(pwr->grp_clks[0],
-					pwr->pwrlevels[pwr->num_pwrlevels - 1].
-					gpu_freq);
 				for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
 					if (pwr->grp_clks[i])
 						clk_unprepare(pwr->grp_clks[i]);
+				clk_set_rate(pwr->grp_clks[0],
+					pwr->pwrlevels[pwr->num_pwrlevels - 1].
+					gpu_freq);
 			}
 			kgsl_pwrctrl_busy_time(device, true);
 		} else if (requested_state == KGSL_STATE_SLEEP) {
 			/* High latency clock maintenance. */
+			for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
+				if (pwr->grp_clks[i])
+					clk_unprepare(pwr->grp_clks[i]);
 			if ((pwr->pwrlevels[0].gpu_freq > 0))
 				clk_set_rate(pwr->grp_clks[0],
 					pwr->pwrlevels[pwr->num_pwrlevels - 1].
 					gpu_freq);
-			for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
-				if (pwr->grp_clks[i])
-					clk_unprepare(pwr->grp_clks[i]);
 		}
 	} else if (state == KGSL_PWRFLAGS_ON) {
 		if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
@@ -738,15 +790,14 @@
 			trace_kgsl_clk(device, state);
 			/* High latency clock maintenance. */
 			if (device->state != KGSL_STATE_NAP) {
-				for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
-					if (pwr->grp_clks[i])
-						clk_prepare(pwr->grp_clks[i]);
-
 				if (pwr->pwrlevels[0].gpu_freq > 0)
 					clk_set_rate(pwr->grp_clks[0],
 						pwr->pwrlevels
 						[pwr->active_pwrlevel].
 						gpu_freq);
+				for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
+					if (pwr->grp_clks[i])
+						clk_prepare(pwr->grp_clks[i]);
 			}
 			/* as last step, enable grp_clk
 			   this is to let GPU interrupt to come */
@@ -878,7 +929,8 @@
 	if (pdata->set_grp_async != NULL)
 		pdata->set_grp_async();
 
-	if (pdata->num_levels > KGSL_MAX_PWRLEVELS) {
+	if (pdata->num_levels > KGSL_MAX_PWRLEVELS ||
+	    pdata->num_levels < 1) {
 		KGSL_PWR_ERR(device, "invalid power level count: %d\n",
 					 pdata->num_levels);
 		result = -EINVAL;
@@ -947,6 +999,11 @@
 		}
 	}
 
+	/* Set the power level step multiplier with 1 as the default */
+	pwr->step_mul = pdata->step_mul ? pdata->step_mul : 1;
+
+	/* Set the CPU latency to 501usec to allow low latency PC modes */
+	pwr->pm_qos_latency = 501;
 
 	pm_runtime_enable(device->parentdev);
 	register_early_suspend(&device->display_off);
@@ -1143,7 +1200,7 @@
 		_sleep_accounting(device);
 		kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
 		kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
-		pm_qos_update_request(&device->pm_qos_req_dma,
+		pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
 					PM_QOS_DEFAULT_VALUE);
 		break;
 	case KGSL_STATE_SLEEP:
@@ -1177,7 +1234,7 @@
 		device->ftbl->stop(device);
 		_sleep_accounting(device);
 		kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
-		pm_qos_update_request(&device->pm_qos_req_dma,
+		pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
 						PM_QOS_DEFAULT_VALUE);
 		break;
 	case KGSL_STATE_SLUMBER:
@@ -1224,10 +1281,15 @@
 void kgsl_pwrctrl_wake(struct kgsl_device *device)
 {
 	int status;
+	unsigned int context_id;
+	unsigned int state = device->state;
+	unsigned int ts_processed = 0xdeaddead;
+	struct kgsl_context *context;
+
 	kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
 	switch (device->state) {
 	case KGSL_STATE_SLUMBER:
-		status = device->ftbl->start(device, 0);
+		status = device->ftbl->start(device);
 		if (status) {
 			kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
 			KGSL_DRV_ERR(device, "start failed %d\n", status);
@@ -1237,6 +1299,17 @@
 	case KGSL_STATE_SLEEP:
 		kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
 		kgsl_pwrscale_wake(device);
+		kgsl_sharedmem_readl(&device->memstore,
+			(unsigned int *) &context_id,
+			KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+				current_context));
+		context = idr_find(&device->context_idr, context_id);
+		if (context)
+			ts_processed = kgsl_readtimestamp(device, context,
+				KGSL_TIMESTAMP_RETIRED);
+		KGSL_PWR_INFO(device, "Wake from %s state. CTXT: %d RTRD TS: %08X\n",
+			kgsl_pwrstate_to_str(state),
+			context ? context->id : -1, ts_processed);
 		/* fall through */
 	case KGSL_STATE_NAP:
 		/* Turn on the core clocks */
@@ -1247,8 +1320,8 @@
 		/* Re-enable HW access */
 		mod_timer(&device->idle_timer,
 				jiffies + device->pwrctrl.interval_timeout);
-		pm_qos_update_request(&device->pm_qos_req_dma,
-					GPU_SWFI_LATENCY);
+		pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
+				device->pwrctrl.pm_qos_latency);
 	case KGSL_STATE_ACTIVE:
 		kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
 		break;
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 8d66505..ced52e1 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -60,6 +60,9 @@
  * @irq_name - resource name for the IRQ
  * @restore_slumber - Flag to indicate that we are in a suspend/restore sequence
  * @clk_stats - structure of clock statistics
+ * @pm_qos_req_dma - the power management quality of service structure
+ * @pm_qos_latency - allowed CPU latency in microseconds
+ * @step_mul - multiplier for moving between power levels
  */
 
 struct kgsl_pwrctrl {
@@ -85,6 +88,9 @@
 	s64 time;
 	unsigned int restore_slumber;
 	struct kgsl_clk_stats clk_stats;
+	struct pm_qos_request pm_qos_req_dma;
+	unsigned int pm_qos_latency;
+	unsigned int step_mul;
 };
 
 void kgsl_pwrctrl_irq(struct kgsl_device *device, int state);
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index dffae70..02ada38 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -237,16 +237,14 @@
 void kgsl_pwrscale_busy(struct kgsl_device *device)
 {
 	if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->busy)
-		if (device->requested_state != KGSL_STATE_SLUMBER)
-			device->pwrscale.policy->busy(device,
-					&device->pwrscale);
+		device->pwrscale.policy->busy(device,
+				&device->pwrscale);
 }
 
 void kgsl_pwrscale_idle(struct kgsl_device *device)
 {
 	if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->idle)
-		if (device->requested_state != KGSL_STATE_SLUMBER &&
-			device->requested_state != KGSL_STATE_SLEEP)
+		if (device->state == KGSL_STATE_ACTIVE)
 			device->pwrscale.policy->idle(device,
 					&device->pwrscale);
 }
diff --git a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
index aa6861e..9b2ac70 100644
--- a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
+++ b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -38,6 +38,10 @@
  * per frame for 60fps content.
  */
 #define FLOOR			5000
+/* CEILING is 50msec, larger than any standard
+ * frame length, but less than the idle timer.
+ */
+#define CEILING			50000
 #define SWITCH_OFF		200
 #define SWITCH_OFF_RESET_TH	40
 #define SKIP_COUNTER		500
@@ -163,11 +167,24 @@
 		priv->no_switch_cnt = 0;
 	}
 
-	idle = priv->bin.total_time - priv->bin.busy_time;
+	/* If there is an extended block of busy processing,
+	 * increase frequency.  Otherwise run the normal algorithm.
+	 */
+	if (priv->bin.busy_time > CEILING) {
+		val = -1;
+	} else {
+		idle = priv->bin.total_time - priv->bin.busy_time;
+		idle = (idle > 0) ? idle : 0;
+		val = __secure_tz_entry(TZ_UPDATE_ID, idle, device->id);
+	}
 	priv->bin.total_time = 0;
 	priv->bin.busy_time = 0;
-	idle = (idle > 0) ? idle : 0;
-	val = __secure_tz_entry(TZ_UPDATE_ID, idle, device->id);
+
+	/* If the decision is to move to a lower level, make sure the GPU
+	 * frequency drops.
+	 */
+	if (val > 0)
+		val *= pwr->step_mul;
 	if (val)
 		kgsl_pwrctrl_pwrlevel_change(device,
 					     pwr->active_pwrlevel + val);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index a345e58..595f78f 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -154,9 +154,7 @@
 
 static struct mem_entry_stats mem_stats[] = {
 	MEM_ENTRY_STAT(KGSL_MEM_ENTRY_KERNEL, kernel),
-#ifdef CONFIG_ANDROID_PMEM
 	MEM_ENTRY_STAT(KGSL_MEM_ENTRY_PMEM, pmem),
-#endif
 #ifdef CONFIG_ASHMEM
 	MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ASHMEM, ashmem),
 #endif
@@ -511,21 +509,29 @@
 
 void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op)
 {
-	void *addr = memdesc->hostptr;
+	/*
+	 * If the buffer is mapped in the kernel operate on that address
+	 * otherwise use the user address
+	 */
+
+	void *addr = (memdesc->hostptr) ?
+		memdesc->hostptr : (void *) memdesc->useraddr;
+
 	int size = memdesc->size;
 
-	switch (op) {
-	case KGSL_CACHE_OP_FLUSH:
-		dmac_flush_range(addr, addr + size);
-		break;
-	case KGSL_CACHE_OP_CLEAN:
-		dmac_clean_range(addr, addr + size);
-		break;
-	case KGSL_CACHE_OP_INV:
-		dmac_inv_range(addr, addr + size);
-		break;
+	if (addr !=  NULL) {
+		switch (op) {
+		case KGSL_CACHE_OP_FLUSH:
+			dmac_flush_range(addr, addr + size);
+			break;
+		case KGSL_CACHE_OP_CLEAN:
+			dmac_clean_range(addr, addr + size);
+			break;
+		case KGSL_CACHE_OP_INV:
+			dmac_inv_range(addr, addr + size);
+			break;
+		}
 	}
-
 	outer_cache_range_op_sg(memdesc->sg, memdesc->sglen, op);
 }
 EXPORT_SYMBOL(kgsl_cache_range_op);
@@ -533,7 +539,7 @@
 static int
 _kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
 			struct kgsl_pagetable *pagetable,
-			size_t size, unsigned int protflags)
+			size_t size)
 {
 	int pcount = 0, order, ret = 0;
 	int j, len, page_size, sglen_alloc, sglen = 0;
@@ -541,13 +547,15 @@
 	pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
 	void *ptr;
 	unsigned int align;
+	int step = ((VMALLOC_END - VMALLOC_START)/8) >> PAGE_SHIFT;
 
 	align = (memdesc->flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT;
 
 	page_size = (align >= ilog2(SZ_64K) && size >= SZ_64K)
 			? SZ_64K : PAGE_SIZE;
 	/* update align flags for what we actually use */
-	kgsl_memdesc_set_align(memdesc, ilog2(page_size));
+	if (page_size != PAGE_SIZE)
+		kgsl_memdesc_set_align(memdesc, ilog2(page_size));
 
 	/*
 	 * There needs to be enough room in the sg structure to be able to
@@ -568,11 +576,10 @@
 	memdesc->pagetable = pagetable;
 	memdesc->ops = &kgsl_page_alloc_ops;
 
-	memdesc->sg = kgsl_sg_alloc(sglen_alloc);
+	memdesc->sglen_alloc = sglen_alloc;
+	memdesc->sg = kgsl_sg_alloc(memdesc->sglen_alloc);
 
 	if (memdesc->sg == NULL) {
-		KGSL_CORE_ERR("vmalloc(%d) failed\n",
-			sglen_alloc * sizeof(struct scatterlist));
 		ret = -ENOMEM;
 		goto done;
 	}
@@ -584,26 +591,24 @@
 	 * two pages; well within the acceptable limits for using kmalloc.
 	 */
 
-	pages = kmalloc(sglen_alloc * sizeof(struct page *), GFP_KERNEL);
+	pages = kmalloc(memdesc->sglen_alloc * sizeof(struct page *),
+		GFP_KERNEL);
 
 	if (pages == NULL) {
-		KGSL_CORE_ERR("kmalloc (%d) failed\n",
-			sglen_alloc * sizeof(struct page *));
 		ret = -ENOMEM;
 		goto done;
 	}
 
 	kmemleak_not_leak(memdesc->sg);
 
-	memdesc->sglen_alloc = sglen_alloc;
-	sg_init_table(memdesc->sg, sglen_alloc);
+	sg_init_table(memdesc->sg, memdesc->sglen_alloc);
 
 	len = size;
 
 	while (len > 0) {
 		struct page *page;
 		unsigned int gfp_mask = GFP_KERNEL | __GFP_HIGHMEM |
-			__GFP_NOWARN;
+			__GFP_NOWARN | __GFP_NORETRY;
 		int j;
 
 		/* don't waste space at the end of the allocation*/
@@ -667,41 +672,42 @@
 	 * zeroed and unmaped each individual page, and then we had to turn
 	 * around and call flush_dcache_page() on that page to clear the caches.
 	 * This was killing us for performance. Instead, we found it is much
-	 * faster to allocate the pages without GFP_ZERO, map the entire range,
-	 * memset it, flush the range and then unmap - this results in a factor
-	 * of 4 improvement for speed for large buffers.  There is a small
-	 * increase in speed for small buffers, but only on the order of a few
-	 * microseconds at best.  The only downside is that there needs to be
-	 * enough temporary space in vmalloc to accomodate the map. This
-	 * shouldn't be a problem, but if it happens, fall back to a much slower
-	 * path
+	 * faster to allocate the pages without GFP_ZERO, map a chunk of the
+	 * range ('step' pages), memset it, flush it and then unmap
+	 * - this results in a factor of 4 improvement for speed for large
+	 * buffers. There is a small decrease in speed for small buffers,
+	 * but only on the order of a few microseconds at best. The 'step'
+	 * size is based on a guess at the amount of free vmalloc space, but
+	 * will scale down if there's not enough free space.
 	 */
+	for (j = 0; j < pcount; j += step) {
+		step = min(step, pcount - j);
 
-	ptr = vmap(pages, pcount, VM_IOREMAP, page_prot);
+		ptr = vmap(&pages[j], step, VM_IOREMAP, page_prot);
 
-	if (ptr != NULL) {
-		memset(ptr, 0, memdesc->size);
-		dmac_flush_range(ptr, ptr + memdesc->size);
-		vunmap(ptr);
-	} else {
-		/* Very, very, very slow path */
+		if (ptr != NULL) {
+			memset(ptr, 0, step * PAGE_SIZE);
+			dmac_flush_range(ptr, ptr + step * PAGE_SIZE);
+			vunmap(ptr);
+		} else {
+			int k;
+			/* Very, very, very slow path */
 
-		for (j = 0; j < pcount; j++) {
-			ptr = kmap_atomic(pages[j]);
-			memset(ptr, 0, PAGE_SIZE);
-			dmac_flush_range(ptr, ptr + PAGE_SIZE);
-			kunmap_atomic(ptr);
+			for (k = j; k < j + step; k++) {
+				ptr = kmap_atomic(pages[k]);
+				memset(ptr, 0, PAGE_SIZE);
+				dmac_flush_range(ptr, ptr + PAGE_SIZE);
+				kunmap_atomic(ptr);
+			}
+			/* scale down the step size to avoid this path */
+			if (step > 1)
+				step >>= 1;
 		}
 	}
 
 	outer_cache_range_op_sg(memdesc->sg, memdesc->sglen,
 				KGSL_CACHE_OP_FLUSH);
 
-	ret = kgsl_mmu_map(pagetable, memdesc, protflags);
-
-	if (ret)
-		goto done;
-
 	KGSL_STATS_ADD(size, kgsl_driver.stats.page_alloc,
 		kgsl_driver.stats.page_alloc_max);
 
@@ -728,8 +734,7 @@
 
 	size = ALIGN(size, PAGE_SIZE * 2);
 
-	ret =  _kgsl_sharedmem_page_alloc(memdesc, pagetable, size,
-		GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+	ret =  _kgsl_sharedmem_page_alloc(memdesc, pagetable, size);
 	if (!ret)
 		ret = kgsl_page_alloc_map_kernel(memdesc);
 	if (ret)
@@ -743,17 +748,7 @@
 			    struct kgsl_pagetable *pagetable,
 			    size_t size)
 {
-	unsigned int protflags;
-
-	if (size == 0)
-		return -EINVAL;
-
-	protflags = GSL_PT_PAGE_RV;
-	if (!(memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY))
-		protflags |= GSL_PT_PAGE_WV;
-
-	return _kgsl_sharedmem_page_alloc(memdesc, pagetable, size,
-		protflags);
+	return _kgsl_sharedmem_page_alloc(memdesc, pagetable, PAGE_ALIGN(size));
 }
 EXPORT_SYMBOL(kgsl_sharedmem_page_alloc_user);
 
@@ -831,12 +826,6 @@
 	if (result)
 		goto err;
 
-	result = kgsl_mmu_map(pagetable, memdesc,
-		GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
-
-	if (result)
-		goto err;
-
 	KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
 		kgsl_driver.stats.coherent_max);
 
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index 3109ef2..279490f 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,7 @@
 #include "kgsl_mmu.h"
 #include <linux/slab.h>
 #include <linux/kmemleak.h>
+#include <linux/iommu.h>
 
 #include "kgsl_log.h"
 
@@ -83,6 +84,18 @@
 }
 
 /*
+ * kgsl_memdesc_get_cachemode - Get cache mode of a memdesc
+ * @memdesc: the memdesc
+ *
+ * Returns a KGSL_CACHEMODE* value.
+ */
+static inline int
+kgsl_memdesc_get_cachemode(const struct kgsl_memdesc *memdesc)
+{
+	return (memdesc->flags & KGSL_CACHEMODE_MASK) >> KGSL_CACHEMODE_SHIFT;
+}
+
+/*
  * kgsl_memdesc_set_align - Set alignment flags of a memdesc
  * @memdesc - the memdesc
  * @align - alignment requested, as a power of 2 exponent.
@@ -144,7 +157,7 @@
 		unsigned int physaddr, unsigned int size)
 {
 	memdesc->sg = kgsl_sg_alloc(1);
-	if (!memdesc->sg)
+	if (memdesc->sg == NULL)
 		return -ENOMEM;
 
 	kmemleak_not_leak(memdesc->sg);
@@ -157,14 +170,98 @@
 	return 0;
 }
 
+/*
+ * kgsl_memdesc_is_global - is this a globally mapped buffer?
+ * @memdesc: the memdesc
+ *
+ * Returns nonzero if this is a global mapping, 0 otherwise
+ */
+static inline int kgsl_memdesc_is_global(const struct kgsl_memdesc *memdesc)
+{
+	return (memdesc->priv & KGSL_MEMDESC_GLOBAL) != 0;
+}
+
+/*
+ * kgsl_memdesc_has_guard_page - is the last page a guard page?
+ * @memdesc - the memdesc
+ *
+ * Returns nonzero if there is a guard page, 0 otherwise
+ */
+static inline int
+kgsl_memdesc_has_guard_page(const struct kgsl_memdesc *memdesc)
+{
+	return (memdesc->priv & KGSL_MEMDESC_GUARD_PAGE) != 0;
+}
+
+/*
+ * kgsl_memdesc_protflags - get mmu protection flags
+ * @memdesc - the memdesc
+ * Returns a mask of GSL_PT_PAGE* or IOMMU* values based
+ * on the memdesc flags.
+ */
+static inline unsigned int
+kgsl_memdesc_protflags(const struct kgsl_memdesc *memdesc)
+{
+	unsigned int protflags = 0;
+	enum kgsl_mmutype mmutype = kgsl_mmu_get_mmutype();
+
+	if (mmutype == KGSL_MMU_TYPE_GPU) {
+		protflags = GSL_PT_PAGE_RV;
+		if (!(memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY))
+			protflags |= GSL_PT_PAGE_WV;
+	} else if (mmutype == KGSL_MMU_TYPE_IOMMU) {
+		protflags = IOMMU_READ;
+		if (!(memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY))
+			protflags |= IOMMU_WRITE;
+	}
+	return protflags;
+}
+
+/*
+ * kgsl_memdesc_use_cpu_map - use the same virtual mapping on CPU and GPU?
+ * @memdesc - the memdesc
+ */
+static inline int
+kgsl_memdesc_use_cpu_map(const struct kgsl_memdesc *memdesc)
+{
+	return (memdesc->flags & KGSL_MEMFLAGS_USE_CPU_MAP) != 0;
+}
+
+/*
+ * kgsl_memdesc_mmapsize - get the size of the mmap region
+ * @memdesc - the memdesc
+ *
+ * The entire memdesc must be mapped. Additionally if the
+ * CPU mapping is going to be mirrored, there must be room
+ * for the guard page to be mapped so that the address spaces
+ * match up.
+ */
+static inline unsigned int
+kgsl_memdesc_mmapsize(const struct kgsl_memdesc *memdesc)
+{
+	unsigned int size = memdesc->size;
+	if (kgsl_memdesc_use_cpu_map(memdesc) &&
+		kgsl_memdesc_has_guard_page(memdesc))
+		size += SZ_4K;
+	return size;
+}
+
 static inline int
 kgsl_allocate(struct kgsl_memdesc *memdesc,
 		struct kgsl_pagetable *pagetable, size_t size)
 {
+	int ret;
+	memdesc->priv |= (KGSL_MEMTYPE_KERNEL << KGSL_MEMTYPE_SHIFT);
 	if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
 		return kgsl_sharedmem_ebimem(memdesc, pagetable, size);
-	memdesc->flags |= (KGSL_MEMTYPE_KERNEL << KGSL_MEMTYPE_SHIFT);
-	return kgsl_sharedmem_page_alloc(memdesc, pagetable, size);
+
+	ret = kgsl_sharedmem_page_alloc(memdesc, pagetable, size);
+	if (ret)
+		return ret;
+	ret = kgsl_mmu_map(pagetable, memdesc);
+	if (ret)
+		kgsl_sharedmem_free(memdesc);
+	return ret;
 }
 
 static inline int
@@ -174,6 +271,9 @@
 {
 	int ret;
 
+	if (size == 0)
+		return -EINVAL;
+
 	memdesc->flags = flags;
 
 	if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index a5aa42f..4c9c744 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -106,7 +106,12 @@
 {
 	struct kgsl_snapshot_linux_context *header = _ctxtptr;
 	struct kgsl_context *context = ptr;
-	struct kgsl_device *device = context->dev_priv->device;
+	struct kgsl_device *device;
+
+	if (context)
+		device = context->dev_priv->device;
+	else
+		device = (struct kgsl_device *)data;
 
 	header->id = id;
 
@@ -141,6 +146,9 @@
 
 	idr_for_each(&device->context_idr, snapshot_context_count, &ctxtcount);
 
+	/* Increment ctxcount for the global memstore */
+	ctxtcount++;
+
 	size += ctxtcount * sizeof(struct kgsl_snapshot_linux_context);
 
 	/* Make sure there is enough room for the data */
@@ -169,8 +177,9 @@
 	header->grpclk = kgsl_get_clkrate(pwr->grp_clks[0]);
 	header->busclk = kgsl_get_clkrate(pwr->ebi1_clk);
 
-	/* Future proof for per-context timestamps */
-	header->current_context = -1;
+	/* Save the last active context */
+	kgsl_sharedmem_readl(&device->memstore, &header->current_context,
+		KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context));
 
 	/* Get the current PT base */
 	header->ptbase = kgsl_mmu_get_current_ptbase(&device->mmu);
@@ -185,8 +194,10 @@
 
 	header->ctxtcount = ctxtcount;
 
-	/* append information for each context */
 	_ctxtptr = snapshot + sizeof(*header);
+	/* append information for the global context */
+	snapshot_context_info(KGSL_MEMSTORE_GLOBAL, NULL, device);
+	/* append information for each context */
 	idr_for_each(&device->context_idr, snapshot_context_info, NULL);
 
 	/* Return the size of the data segment */
@@ -283,7 +294,7 @@
 {
 	list_del(&obj->node);
 
-	obj->entry->flags &= ~KGSL_MEM_ENTRY_FROZEN;
+	obj->entry->memdesc.priv &= ~KGSL_MEMDESC_FROZEN;
 	kgsl_mem_entry_put(obj->entry);
 
 	kfree(obj);
@@ -375,8 +386,8 @@
 	/* If the buffer is already on the list, skip it */
 	list_for_each_entry(obj, &device->snapshot_obj_list, node) {
 		if (obj->gpuaddr == gpuaddr && obj->ptbase == ptbase) {
-			/* If the size is different, use the new size */
-			if (obj->size != size)
+			/* If the size is different, use the bigger size */
+			if (obj->size < size)
 				obj->size = size;
 
 			return 0;
@@ -416,10 +427,10 @@
 	 * 0 so it doesn't get counted twice
 	 */
 
-	if (entry->flags & KGSL_MEM_ENTRY_FROZEN)
+	if (entry->memdesc.priv & KGSL_MEMDESC_FROZEN)
 		return 0;
 
-	entry->flags |= KGSL_MEM_ENTRY_FROZEN;
+	entry->memdesc.priv |= KGSL_MEMDESC_FROZEN;
 
 	return entry->memdesc.size;
 }
diff --git a/drivers/gpu/msm/kgsl_snapshot.h b/drivers/gpu/msm/kgsl_snapshot.h
index 327d18a..4db2815 100644
--- a/drivers/gpu/msm/kgsl_snapshot.h
+++ b/drivers/gpu/msm/kgsl_snapshot.h
@@ -52,6 +52,7 @@
 #define KGSL_SNAPSHOT_SECTION_DEBUG        0x0901
 #define KGSL_SNAPSHOT_SECTION_DEBUGBUS     0x0A01
 #define KGSL_SNAPSHOT_SECTION_GPU_OBJECT   0x0B01
+#define KGSL_SNAPSHOT_SECTION_MEMLIST      0x0E01
 
 #define KGSL_SNAPSHOT_SECTION_END          0xFFFF
 
@@ -103,6 +104,17 @@
 	int count;  /* Number of dwords in the dump */
 } __packed;
 
+/* Replay or Memory list section, both sections have same header */
+struct kgsl_snapshot_replay_mem_list {
+	/*
+	 * Number of IBs to replay for replay section or
+	 * number of memory list entries for mem list section
+	 */
+	int num_entries;
+	/* Pagetable base to which the replay IBs or memory entries belong */
+	__u32 ptbase;
+} __packed;
+
 /* Indirect buffer sub-section header */
 struct kgsl_snapshot_ib {
 	__u32 gpuaddr; /* GPU address of the the IB */
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index d9ab081..0e3e046 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -69,7 +69,6 @@
 
 struct kgsl_fence_event_priv {
 	struct kgsl_context *context;
-	unsigned int timestamp;
 };
 
 /**
@@ -86,7 +85,7 @@
 	void *priv, u32 context_id, u32 timestamp)
 {
 	struct kgsl_fence_event_priv *ev = priv;
-	kgsl_sync_timeline_signal(ev->context->timeline, ev->timestamp);
+	kgsl_sync_timeline_signal(ev->context->timeline, timestamp);
 	kgsl_context_put(ev->context);
 	kfree(ev);
 }
@@ -126,7 +125,6 @@
 	if (event == NULL)
 		return -ENOMEM;
 	event->context = context;
-	event->timestamp = timestamp;
 	kgsl_context_get(context);
 
 	pt = kgsl_sync_pt_create(context->timeline, timestamp);
@@ -181,6 +179,7 @@
 }
 
 static const struct sync_timeline_ops kgsl_sync_timeline_ops = {
+	.driver_name = "kgsl-timeline",
 	.dup = kgsl_sync_pt_dup,
 	.has_signaled = kgsl_sync_pt_has_signaled,
 	.compare = kgsl_sync_pt_compare,
@@ -207,7 +206,7 @@
 	struct kgsl_sync_timeline *ktimeline =
 		(struct kgsl_sync_timeline *) timeline;
 
-	if (timestamp_cmp(timestamp, ktimeline->last_timestamp > 0))
+	if (timestamp_cmp(timestamp, ktimeline->last_timestamp) > 0)
 		ktimeline->last_timestamp = timestamp;
 	sync_timeline_signal(timeline);
 }
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index c54445c..5f7ee3c 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -24,6 +24,8 @@
 #include <linux/tracepoint.h>
 #include "kgsl_device.h"
 
+#include "adreno_drawctxt.h"
+
 struct kgsl_device;
 struct kgsl_ringbuffer_issueibcmds;
 struct kgsl_device_waittimestamp;
@@ -34,11 +36,16 @@
 TRACE_EVENT(kgsl_issueibcmds,
 
 	TP_PROTO(struct kgsl_device *device,
-			struct kgsl_ringbuffer_issueibcmds *cmd,
+			int drawctxt_id,
 			struct kgsl_ibdesc *ibdesc,
-			int result),
+			int numibs,
+			int timestamp,
+			int flags,
+			int result,
+			unsigned int type),
 
-	TP_ARGS(device, cmd, ibdesc, result),
+	TP_ARGS(device, drawctxt_id, ibdesc, numibs, timestamp, flags,
+		result, type),
 
 	TP_STRUCT__entry(
 		__string(device_name, device->name)
@@ -48,21 +55,23 @@
 		__field(unsigned int, timestamp)
 		__field(unsigned int, flags)
 		__field(int, result)
+		__field(unsigned int, drawctxt_type)
 	),
 
 	TP_fast_assign(
 		__assign_str(device_name, device->name);
-		__entry->drawctxt_id = cmd->drawctxt_id;
+		__entry->drawctxt_id = drawctxt_id;
 		__entry->ibdesc_addr = ibdesc[0].gpuaddr;
-		__entry->numibs = cmd->numibs;
-		__entry->timestamp = cmd->timestamp;
-		__entry->flags = cmd->flags;
+		__entry->numibs = numibs;
+		__entry->timestamp = timestamp;
+		__entry->flags = flags;
 		__entry->result = result;
+		__entry->drawctxt_type = type;
 	),
 
 	TP_printk(
 		"d_name=%s ctx=%u ib=0x%u numibs=%u timestamp=0x%x "
-		"flags=0x%x(%s) result=%d",
+		"flags=0x%x(%s) result=%d type=%s",
 		__get_str(device_name),
 		__entry->drawctxt_id,
 		__entry->ibdesc_addr,
@@ -74,7 +83,9 @@
 			{ KGSL_CONTEXT_SUBMIT_IB_LIST, "IB_LIST" },
 			{ KGSL_CONTEXT_CTX_SWITCH, "CTX_SWITCH" })
 			: "None",
-		__entry->result
+		__entry->result,
+		__print_symbolic(__entry->drawctxt_type,
+			ADRENO_DRAWCTXT_TYPES)
 	)
 );
 
@@ -274,6 +285,32 @@
 	)
 );
 
+TRACE_EVENT(kgsl_gpubusy,
+	TP_PROTO(struct kgsl_device *device, unsigned int busy,
+		unsigned int elapsed),
+
+	TP_ARGS(device, busy, elapsed),
+
+	TP_STRUCT__entry(
+		__string(device_name, device->name)
+		__field(unsigned int, busy)
+		__field(unsigned int, elapsed)
+	),
+
+	TP_fast_assign(
+		__assign_str(device_name, device->name);
+		__entry->busy = busy;
+		__entry->elapsed = elapsed;
+	),
+
+	TP_printk(
+		"d_name=%s busy=%d elapsed=%d",
+		__get_str(device_name),
+		__entry->busy,
+		__entry->elapsed
+	)
+);
+
 DECLARE_EVENT_CLASS(kgsl_pwrstate_template,
 	TP_PROTO(struct kgsl_device *device, unsigned int state),
 
@@ -317,6 +354,8 @@
 		__field(unsigned int, size)
 		__field(unsigned int, tgid)
 		__array(char, usage, 16)
+		__field(unsigned int, id)
+		__field(unsigned int, flags)
 	),
 
 	TP_fast_assign(
@@ -325,12 +364,76 @@
 		__entry->tgid = mem_entry->priv->pid;
 		kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage),
 				     mem_entry->memdesc.flags);
+		__entry->id = mem_entry->id;
+		__entry->flags = mem_entry->memdesc.flags;
 	),
 
 	TP_printk(
-		"gpuaddr=0x%08x size=%d tgid=%d usage=%s",
+		"gpuaddr=0x%08x size=%d tgid=%d usage=%s id=%d flags=0x%08x",
 		__entry->gpuaddr, __entry->size, __entry->tgid,
-		__entry->usage
+		__entry->usage, __entry->id, __entry->flags
+	)
+);
+
+TRACE_EVENT(kgsl_mem_mmap,
+
+	TP_PROTO(struct kgsl_mem_entry *mem_entry),
+
+	TP_ARGS(mem_entry),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, useraddr)
+		__field(unsigned int, gpuaddr)
+		__field(unsigned int, size)
+		__array(char, usage, 16)
+		__field(unsigned int, id)
+		__field(unsigned int, flags)
+	),
+
+	TP_fast_assign(
+		__entry->useraddr = mem_entry->memdesc.useraddr;
+		__entry->gpuaddr = mem_entry->memdesc.gpuaddr;
+		__entry->size = mem_entry->memdesc.size;
+		kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage),
+				     mem_entry->memdesc.flags);
+		__entry->id = mem_entry->id;
+		__entry->flags = mem_entry->memdesc.flags;
+	),
+
+	TP_printk(
+		"useraddr=%lx gpuaddr=0x%08x size=%d usage=%s id=%d"
+		" flags=0x%08x",
+		__entry->useraddr, __entry->gpuaddr, __entry->size,
+		__entry->usage, __entry->id, __entry->flags
+	)
+);
+
+TRACE_EVENT(kgsl_mem_unmapped_area_collision,
+
+	TP_PROTO(struct kgsl_mem_entry *mem_entry,
+		 unsigned long hint,
+		 unsigned long len,
+		 unsigned long addr),
+
+	TP_ARGS(mem_entry, hint, len, addr),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, id)
+		__field(unsigned long, hint)
+		__field(unsigned long, len)
+		__field(unsigned long, addr)
+	),
+
+	TP_fast_assign(
+		__entry->id = mem_entry->id;
+		__entry->hint  = hint;
+		__entry->len = len;
+		__entry->addr = addr;
+	),
+
+	TP_printk(
+		"id=%d hint=0x%lx len=%ld addr=0x%lx",
+		__entry->id, __entry->hint, __entry->len, __entry->addr
 	)
 );
 
@@ -347,6 +450,7 @@
 		__field(int, type)
 		__field(unsigned int, tgid)
 		__array(char, usage, 16)
+		__field(unsigned int, id)
 	),
 
 	TP_fast_assign(
@@ -357,13 +461,14 @@
 		__entry->tgid = mem_entry->priv->pid;
 		kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage),
 				     mem_entry->memdesc.flags);
+		__entry->id = mem_entry->id;
 	),
 
 	TP_printk(
-		"gpuaddr=0x%08x size=%d type=%d fd=%d tgid=%d usage %s",
+		"gpuaddr=0x%08x size=%d type=%d fd=%d tgid=%d usage=%s id=%d",
 		__entry->gpuaddr, __entry->size,
 		__entry->type, __entry->fd, __entry->tgid,
-		__entry->usage
+		__entry->usage, __entry->id
 	)
 );
 
@@ -380,6 +485,7 @@
 		__field(int, fd)
 		__field(unsigned int, tgid)
 		__array(char, usage, 16)
+		__field(unsigned int, id)
 	),
 
 	TP_fast_assign(
@@ -389,12 +495,47 @@
 		__entry->tgid = mem_entry->priv->pid;
 		kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage),
 				     mem_entry->memdesc.flags);
+		__entry->id = mem_entry->id;
 	),
 
 	TP_printk(
-		"gpuaddr=0x%08x size=%d type=%d tgid=%d usage=%s",
+		"gpuaddr=0x%08x size=%d type=%d tgid=%d usage=%s id=%d",
 		__entry->gpuaddr, __entry->size, __entry->type,
-		__entry->tgid, __entry->usage
+		__entry->tgid, __entry->usage, __entry->id
+	)
+);
+
+TRACE_EVENT(kgsl_mem_sync_cache,
+
+	TP_PROTO(struct kgsl_mem_entry *mem_entry, unsigned int op),
+
+	TP_ARGS(mem_entry, op),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, gpuaddr)
+		__field(unsigned int, size)
+		__array(char, usage, 16)
+		__field(unsigned int, tgid)
+		__field(unsigned int, id)
+		__field(unsigned int, op)
+	),
+
+	TP_fast_assign(
+		__entry->gpuaddr = mem_entry->memdesc.gpuaddr;
+		__entry->size = mem_entry->memdesc.size;
+		__entry->tgid = mem_entry->priv->pid;
+		__entry->id = mem_entry->id;
+		kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage),
+				     mem_entry->memdesc.flags);
+		__entry->op = op;
+	),
+
+	TP_printk(
+		"gpuaddr=0x%08x size=%d tgid=%d usage=%s id=%d op=%c%c",
+		__entry->gpuaddr, __entry->size, __entry->tgid, __entry->usage,
+		__entry->id,
+		(__entry->op & KGSL_GPUMEM_CACHE_CLEAN) ? 'c' : '.',
+		(__entry->op & KGSL_GPUMEM_CACHE_INV) ? 'i' : '.'
 	)
 );
 
@@ -411,6 +552,7 @@
 		__field(unsigned int, size)
 		__field(int, type)
 		__array(char, usage, 16)
+		__field(unsigned int, id)
 		__field(unsigned int, drawctxt_id)
 		__field(unsigned int, curr_ts)
 		__field(unsigned int, free_ts)
@@ -422,6 +564,7 @@
 		__entry->size = mem_entry->memdesc.size;
 		kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage),
 				     mem_entry->memdesc.flags);
+		__entry->id = mem_entry->id;
 		__entry->drawctxt_id = id;
 		__entry->type = mem_entry->memtype;
 		__entry->curr_ts = curr_ts;
@@ -429,13 +572,14 @@
 	),
 
 	TP_printk(
-		"d_name=%s gpuaddr=0x%08x size=%d type=%d usage=%s ctx=%u"
+		"d_name=%s gpuaddr=0x%08x size=%d type=%d usage=%s id=%d ctx=%u"
 		" curr_ts=0x%x free_ts=0x%x",
 		__get_str(device_name),
 		__entry->gpuaddr,
 		__entry->size,
 		__entry->type,
 		__entry->usage,
+		__entry->id,
 		__entry->drawctxt_id,
 		__entry->curr_ts,
 		__entry->free_ts
@@ -535,6 +679,31 @@
 	)
 );
 
+TRACE_EVENT(kgsl_regwrite,
+
+	TP_PROTO(struct kgsl_device *device, unsigned int offset,
+		unsigned int value),
+
+	TP_ARGS(device, offset, value),
+
+	TP_STRUCT__entry(
+		__string(device_name, device->name)
+		__field(unsigned int, offset)
+		__field(unsigned int, value)
+	),
+
+	TP_fast_assign(
+		__assign_str(device_name, device->name);
+		__entry->offset = offset;
+		__entry->value = value;
+	),
+
+	TP_printk(
+		"d_name=%s reg=%x value=%x",
+		__get_str(device_name), __entry->offset, __entry->value
+	)
+);
+
 TRACE_EVENT(kgsl_register_event,
 		TP_PROTO(unsigned int id, unsigned int timestamp),
 		TP_ARGS(id, timestamp),
diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c
index 484630d..a07959b 100644
--- a/drivers/gpu/msm/z180.c
+++ b/drivers/gpu/msm/z180.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,7 @@
 #include "kgsl.h"
 #include "kgsl_cffdump.h"
 #include "kgsl_sharedmem.h"
+#include "kgsl_trace.h"
 
 #include "z180.h"
 #include "z180_reg.h"
@@ -93,7 +94,8 @@
 #define Z180_CMDWINDOW_TARGET_SHIFT		0
 #define Z180_CMDWINDOW_ADDR_SHIFT		8
 
-static int z180_start(struct kgsl_device *device, unsigned int init_ram);
+static int z180_init(struct kgsl_device *device);
+static int z180_start(struct kgsl_device *device);
 static int z180_stop(struct kgsl_device *device);
 static int z180_wait(struct kgsl_device *device,
 				struct kgsl_context *context,
@@ -212,10 +214,6 @@
 
 			queue_work(device->work_queue, &device->ts_expired_ws);
 			wake_up_interruptible(&device->wait_queue);
-
-			atomic_notifier_call_chain(
-				&(device->ts_notifier_list),
-				device->id, NULL);
 		}
 	}
 
@@ -248,22 +246,26 @@
 	int result = 0;
 	struct z180_device *z180_dev = Z180_DEVICE(device);
 
-	result = kgsl_mmu_map_global(pagetable, &device->mmu.setstate_memory,
-				     GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+	result = kgsl_mmu_map_global(pagetable, &device->mmu.setstate_memory);
 
 	if (result)
 		goto error;
 
-	result = kgsl_mmu_map_global(pagetable, &device->memstore,
-				     GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+	result = kgsl_mmu_map_global(pagetable, &device->memstore);
 	if (result)
 		goto error_unmap_dummy;
 
 	result = kgsl_mmu_map_global(pagetable,
-				     &z180_dev->ringbuffer.cmdbufdesc,
-				     GSL_PT_PAGE_RV);
+				     &z180_dev->ringbuffer.cmdbufdesc);
 	if (result)
 		goto error_unmap_memstore;
+	/*
+	 * Set the mpu end to the last "normal" global memory we use.
+	 * For the IOMMU, this will be used to restrict access to the
+	 * mapped registers.
+	 */
+	device->mh.mpu_range = z180_dev->ringbuffer.cmdbufdesc.gpuaddr +
+				z180_dev->ringbuffer.cmdbufdesc.size;
 	return result;
 
 error_unmap_dummy:
@@ -319,16 +321,11 @@
 	*p++ = ADDR_VGV3_LAST << 24;
 }
 
-static void z180_cmdstream_start(struct kgsl_device *device, int init_ram)
+static void z180_cmdstream_start(struct kgsl_device *device)
 {
 	struct z180_device *z180_dev = Z180_DEVICE(device);
 	unsigned int cmd = VGV3_NEXTCMD_JUMP << VGV3_NEXTCMD_NEXTCMD_FSHIFT;
 
-	if (init_ram) {
-		z180_dev->timestamp = 0;
-		z180_dev->current_timestamp = 0;
-	}
-
 	addmarker(&z180_dev->ringbuffer, 0);
 
 	z180_cmdwindow_write(device, ADDR_VGV3_MODE, 4);
@@ -487,6 +484,10 @@
 	z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, cmd);
 	z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, 0);
 error:
+
+	trace_kgsl_issueibcmds(device, context->id, ibdesc, numibs,
+		*timestamp, ctrl, result, 0);
+
 	return (int)result;
 }
 
@@ -495,6 +496,7 @@
 	struct z180_device *z180_dev = Z180_DEVICE(device);
 	memset(&z180_dev->ringbuffer, 0, sizeof(struct z180_ringbuffer));
 	z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT;
+	z180_dev->ringbuffer.cmdbufdesc.flags = KGSL_MEMFLAGS_GPUREADONLY;
 	return kgsl_allocate_contiguous(&z180_dev->ringbuffer.cmdbufdesc,
 		Z180_RB_SIZE);
 }
@@ -551,7 +553,17 @@
 	return 0;
 }
 
-static int z180_start(struct kgsl_device *device, unsigned int init_ram)
+static int z180_init(struct kgsl_device *device)
+{
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+
+	z180_dev->timestamp = 0;
+	z180_dev->current_timestamp = 0;
+
+	return 0;
+}
+
+static int z180_start(struct kgsl_device *device)
 {
 	int status = 0;
 
@@ -568,11 +580,14 @@
 	if (status)
 		goto error_clk_off;
 
-	z180_cmdstream_start(device, init_ram);
+	z180_cmdstream_start(device);
 
 	mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
 	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
 	device->ftbl->irqctrl(device, 1);
+
+	device->reset_counter++;
+
 	return 0;
 
 error_clk_off:
@@ -811,9 +826,9 @@
 {
 	int status = -EINVAL;
 
-	/* Don't wait forever, set a max (10 sec) value for now */
+	/* Don't wait forever, set a max of Z180_IDLE_TIMEOUT */
 	if (msecs == -1)
-		msecs = 10 * MSEC_PER_SEC;
+		msecs = Z180_IDLE_TIMEOUT;
 
 	mutex_unlock(&device->mutex);
 	status = z180_wait(device, context, timestamp, msecs);
@@ -915,6 +930,7 @@
 	.idle = z180_idle,
 	.isidle = z180_isidle,
 	.suspend_context = z180_suspend_context,
+	.init = z180_init,
 	.start = z180_start,
 	.stop = z180_stop,
 	.getproperty = z180_getproperty,
diff --git a/drivers/gpu/msm/z180.h b/drivers/gpu/msm/z180.h
index 268aac3..1be0870 100644
--- a/drivers/gpu/msm/z180.h
+++ b/drivers/gpu/msm/z180.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -29,7 +29,7 @@
 #define Z180_DEFAULT_PWRSCALE_POLICY  NULL
 
 /* Wait a maximum of 10 seconds when trying to idle the core */
-#define Z180_IDLE_TIMEOUT (10 * 1000)
+#define Z180_IDLE_TIMEOUT (20 * 1000)
 
 struct z180_ringbuffer {
 	unsigned int prevctx;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index b1befa0..330c850 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -16,7 +16,7 @@
 # MSM IOMMU support
 config MSM_IOMMU
 	bool "MSM IOMMU Support"
-	depends on ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_APQ8064 || ARCH_MSM8974
+	depends on ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_APQ8064 || ARCH_MSM8974 || ARCH_MPQ8092 || ARCH_MSM8610 || ARCH_MSM8226 || ARCH_MSMZINC
 	select IOMMU_API
 	help
 	  Support for the IOMMUs found on certain Qualcomm SOCs.
@@ -36,6 +36,16 @@
 
 	  If unsure, say N here.
 
+config MSM_IOMMU_PMON
+	bool "MSM IOMMU Perfomance Monitoring Support"
+	depends on (ARCH_MSM8974 || ARCH_MSM8610 || ARCH_MSM8226) && MSM_IOMMU
+	help
+	  Support for monitoring IOMMUs performance on certain Qualcomm SOCs.
+	  It captures TLB statistics per context bank of the IOMMU as an
+	  indication of its performance metric.
+
+	  If unsure, say N here.
+
 config IOMMU_PGTABLES_L2
 	bool "Allow SMMU page tables in the L2 cache (Experimental)"
 	depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 066bc3e..096b53e 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,8 +1,9 @@
 obj-$(CONFIG_IOMMU_API) += iommu.o
-obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
+obj-$(CONFIG_MSM_IOMMU) += msm_iommu-v0.o msm_iommu_dev-v0.o
 ifdef CONFIG_OF
-obj-$(CONFIG_MSM_IOMMU) += msm_iommu-v2.o msm_iommu_dev-v2.o msm_iommu_pagetable.o
+obj-$(CONFIG_MSM_IOMMU) += msm_iommu-v1.o msm_iommu_dev-v1.o msm_iommu_pagetable.o msm_iommu_sec.o
 endif
+obj-$(CONFIG_MSM_IOMMU_PMON) += msm_iommu_perfmon.o msm_iommu_perfmon-v0.o msm_iommu_perfmon-v1.o
 obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
 obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
 obj-$(CONFIG_DMAR_TABLE) += dmar.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a5bee8e..32c00cd 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3193,26 +3193,6 @@
 	return 0;
 }
 
-static int amd_iommu_device_group(struct device *dev, unsigned int *groupid)
-{
-	struct iommu_dev_data *dev_data = dev->archdata.iommu;
-	struct pci_dev *pdev = to_pci_dev(dev);
-	u16 devid;
-
-	if (!dev_data)
-		return -ENODEV;
-
-	if (pdev->is_virtfn || !iommu_group_mf)
-		devid = dev_data->devid;
-	else
-		devid = calc_devid(pdev->bus->number,
-				   PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
-
-	*groupid = amd_iommu_alias_table[devid];
-
-	return 0;
-}
-
 static struct iommu_ops amd_iommu_ops = {
 	.domain_init = amd_iommu_domain_init,
 	.domain_destroy = amd_iommu_domain_destroy,
@@ -3222,7 +3202,6 @@
 	.unmap = amd_iommu_unmap,
 	.iova_to_phys = amd_iommu_iova_to_phys,
 	.domain_has_cap = amd_iommu_domain_has_cap,
-	.device_group = amd_iommu_device_group,
 	.pgsize_bitmap	= AMD_IOMMU_PGSIZES,
 };
 
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f93d5ac..d4a0ff7 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4087,54 +4087,6 @@
 	return 0;
 }
 
-/*
- * Group numbers are arbitrary.  Device with the same group number
- * indicate the iommu cannot differentiate between them.  To avoid
- * tracking used groups we just use the seg|bus|devfn of the lowest
- * level we're able to differentiate devices
- */
-static int intel_iommu_device_group(struct device *dev, unsigned int *groupid)
-{
-	struct pci_dev *pdev = to_pci_dev(dev);
-	struct pci_dev *bridge;
-	union {
-		struct {
-			u8 devfn;
-			u8 bus;
-			u16 segment;
-		} pci;
-		u32 group;
-	} id;
-
-	if (iommu_no_mapping(dev))
-		return -ENODEV;
-
-	id.pci.segment = pci_domain_nr(pdev->bus);
-	id.pci.bus = pdev->bus->number;
-	id.pci.devfn = pdev->devfn;
-
-	if (!device_to_iommu(id.pci.segment, id.pci.bus, id.pci.devfn))
-		return -ENODEV;
-
-	bridge = pci_find_upstream_pcie_bridge(pdev);
-	if (bridge) {
-		if (pci_is_pcie(bridge)) {
-			id.pci.bus = bridge->subordinate->number;
-			id.pci.devfn = 0;
-		} else {
-			id.pci.bus = bridge->bus->number;
-			id.pci.devfn = bridge->devfn;
-		}
-	}
-
-	if (!pdev->is_virtfn && iommu_group_mf)
-		id.pci.devfn = PCI_DEVFN(PCI_SLOT(id.pci.devfn), 0);
-
-	*groupid = id.group;
-
-	return 0;
-}
-
 static struct iommu_ops intel_iommu_ops = {
 	.domain_init	= intel_iommu_domain_init,
 	.domain_destroy = intel_iommu_domain_destroy,
@@ -4144,7 +4096,6 @@
 	.unmap		= intel_iommu_unmap,
 	.iova_to_phys	= intel_iommu_iova_to_phys,
 	.domain_has_cap = intel_iommu_domain_has_cap,
-	.device_group	= intel_iommu_device_group,
 	.pgsize_bitmap	= INTEL_IOMMU_PGSIZES,
 };
 
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index ef69d91..a6eae7e 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -27,60 +27,571 @@
 #include <linux/errno.h>
 #include <linux/iommu.h>
 #include <linux/scatterlist.h>
+#include <linux/idr.h>
+#include <linux/notifier.h>
+#include <linux/err.h>
 
-static ssize_t show_iommu_group(struct device *dev,
-				struct device_attribute *attr, char *buf)
+static struct kset *iommu_group_kset;
+static struct idr iommu_group_idr;
+static struct mutex iommu_group_mutex;
+
+struct iommu_group {
+	struct kobject kobj;
+	struct kobject *devices_kobj;
+	struct list_head devices;
+	struct mutex mutex;
+	struct blocking_notifier_head notifier;
+	void *iommu_data;
+	void (*iommu_data_release)(void *iommu_data);
+	char *name;
+	int id;
+};
+
+struct iommu_device {
+	struct list_head list;
+	struct device *dev;
+	char *name;
+};
+
+struct iommu_group_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct iommu_group *group, char *buf);
+	ssize_t (*store)(struct iommu_group *group,
+			 const char *buf, size_t count);
+};
+
+#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)		\
+struct iommu_group_attribute iommu_group_attr_##_name =		\
+	__ATTR(_name, _mode, _show, _store)
+
+#define to_iommu_group_attr(_attr)	\
+	container_of(_attr, struct iommu_group_attribute, attr)
+#define to_iommu_group(_kobj)		\
+	container_of(_kobj, struct iommu_group, kobj)
+
+static ssize_t iommu_group_attr_show(struct kobject *kobj,
+				     struct attribute *__attr, char *buf)
 {
-	unsigned int groupid;
+	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
+	struct iommu_group *group = to_iommu_group(kobj);
+	ssize_t ret = -EIO;
 
-	if (iommu_device_group(dev, &groupid))
-		return 0;
-
-	return sprintf(buf, "%u", groupid);
+	if (attr->show)
+		ret = attr->show(group, buf);
+	return ret;
 }
-static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL);
+
+static ssize_t iommu_group_attr_store(struct kobject *kobj,
+				      struct attribute *__attr,
+				      const char *buf, size_t count)
+{
+	struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
+	struct iommu_group *group = to_iommu_group(kobj);
+	ssize_t ret = -EIO;
+
+	if (attr->store)
+		ret = attr->store(group, buf, count);
+	return ret;
+}
+
+static const struct sysfs_ops iommu_group_sysfs_ops = {
+	.show = iommu_group_attr_show,
+	.store = iommu_group_attr_store,
+};
+
+static int iommu_group_create_file(struct iommu_group *group,
+				   struct iommu_group_attribute *attr)
+{
+	return sysfs_create_file(&group->kobj, &attr->attr);
+}
+
+static void iommu_group_remove_file(struct iommu_group *group,
+				    struct iommu_group_attribute *attr)
+{
+	sysfs_remove_file(&group->kobj, &attr->attr);
+}
+
+static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
+{
+	return sprintf(buf, "%s\n", group->name);
+}
+
+static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
+
+static void iommu_group_release(struct kobject *kobj)
+{
+	struct iommu_group *group = to_iommu_group(kobj);
+
+	if (group->iommu_data_release)
+		group->iommu_data_release(group->iommu_data);
+
+	mutex_lock(&iommu_group_mutex);
+	idr_remove(&iommu_group_idr, group->id);
+	mutex_unlock(&iommu_group_mutex);
+
+	kfree(group->name);
+	kfree(group);
+}
+
+static struct kobj_type iommu_group_ktype = {
+	.sysfs_ops = &iommu_group_sysfs_ops,
+	.release = iommu_group_release,
+};
+
+/**
+ * iommu_group_alloc - Allocate a new group
+ * @name: Optional name to associate with group, visible in sysfs
+ *
+ * This function is called by an iommu driver to allocate a new iommu
+ * group.  The iommu group represents the minimum granularity of the iommu.
+ * Upon successful return, the caller holds a reference to the supplied
+ * group in order to hold the group until devices are added.  Use
+ * iommu_group_put() to release this extra reference count, allowing the
+ * group to be automatically reclaimed once it has no devices or external
+ * references.
+ */
+struct iommu_group *iommu_group_alloc(void)
+{
+	struct iommu_group *group;
+	int ret;
+
+	group = kzalloc(sizeof(*group), GFP_KERNEL);
+	if (!group)
+		return ERR_PTR(-ENOMEM);
+
+	group->kobj.kset = iommu_group_kset;
+	mutex_init(&group->mutex);
+	INIT_LIST_HEAD(&group->devices);
+	BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
+
+	mutex_lock(&iommu_group_mutex);
+
+again:
+	if (unlikely(0 == idr_pre_get(&iommu_group_idr, GFP_KERNEL))) {
+		kfree(group);
+		mutex_unlock(&iommu_group_mutex);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	ret = idr_get_new_above(&iommu_group_idr, group, 1, &group->id);
+	if (ret == -EAGAIN)
+		goto again;
+	mutex_unlock(&iommu_group_mutex);
+
+	if (ret == -ENOSPC) {
+		kfree(group);
+		return ERR_PTR(ret);
+	}
+
+	ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
+				   NULL, "%d", group->id);
+	if (ret) {
+		mutex_lock(&iommu_group_mutex);
+		idr_remove(&iommu_group_idr, group->id);
+		mutex_unlock(&iommu_group_mutex);
+		kfree(group);
+		return ERR_PTR(ret);
+	}
+
+	group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
+	if (!group->devices_kobj) {
+		kobject_put(&group->kobj); /* triggers .release & free */
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/*
+	 * The devices_kobj holds a reference on the group kobject, so
+	 * as long as that exists so will the group.  We can therefore
+	 * use the devices_kobj for reference counting.
+	 */
+	kobject_put(&group->kobj);
+
+	return group;
+}
+EXPORT_SYMBOL_GPL(iommu_group_alloc);
+
+/**
+ * iommu_group_get_iommudata - retrieve iommu_data registered for a group
+ * @group: the group
+ *
+ * iommu drivers can store data in the group for use when doing iommu
+ * operations.  This function provides a way to retrieve it.  Caller
+ * should hold a group reference.
+ */
+void *iommu_group_get_iommudata(struct iommu_group *group)
+{
+	return group->iommu_data;
+}
+EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
+
+/**
+ * iommu_group_set_iommudata - set iommu_data for a group
+ * @group: the group
+ * @iommu_data: new data
+ * @release: release function for iommu_data
+ *
+ * iommu drivers can store data in the group for use when doing iommu
+ * operations.  This function provides a way to set the data after
+ * the group has been allocated.  Caller should hold a group reference.
+ */
+void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
+			       void (*release)(void *iommu_data))
+{
+	group->iommu_data = iommu_data;
+	group->iommu_data_release = release;
+}
+EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
+
+/**
+ * iommu_group_set_name - set name for a group
+ * @group: the group
+ * @name: name
+ *
+ * Allow iommu driver to set a name for a group.  When set it will
+ * appear in a name attribute file under the group in sysfs.
+ */
+int iommu_group_set_name(struct iommu_group *group, const char *name)
+{
+	int ret;
+
+	if (group->name) {
+		iommu_group_remove_file(group, &iommu_group_attr_name);
+		kfree(group->name);
+		group->name = NULL;
+		if (!name)
+			return 0;
+	}
+
+	group->name = kstrdup(name, GFP_KERNEL);
+	if (!group->name)
+		return -ENOMEM;
+
+	ret = iommu_group_create_file(group, &iommu_group_attr_name);
+	if (ret) {
+		kfree(group->name);
+		group->name = NULL;
+		return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(iommu_group_set_name);
+
+/**
+ * iommu_group_add_device - add a device to an iommu group
+ * @group: the group into which to add the device (reference should be held)
+ * @dev: the device
+ *
+ * This function is called by an iommu driver to add a device into a
+ * group.  Adding a device increments the group reference count.
+ */
+int iommu_group_add_device(struct iommu_group *group, struct device *dev)
+{
+	int ret, i = 0;
+	struct iommu_device *device;
+
+	device = kzalloc(sizeof(*device), GFP_KERNEL);
+	if (!device)
+		return -ENOMEM;
+
+	device->dev = dev;
+
+	ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
+	if (ret) {
+		kfree(device);
+		return ret;
+	}
+
+	device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
+rename:
+	if (!device->name) {
+		sysfs_remove_link(&dev->kobj, "iommu_group");
+		kfree(device);
+		return -ENOMEM;
+	}
+
+	ret = sysfs_create_link_nowarn(group->devices_kobj,
+				       &dev->kobj, device->name);
+	if (ret) {
+		kfree(device->name);
+		if (ret == -EEXIST && i >= 0) {
+			/*
+			 * Account for the slim chance of collision
+			 * and append an instance to the name.
+			 */
+			device->name = kasprintf(GFP_KERNEL, "%s.%d",
+						 kobject_name(&dev->kobj), i++);
+			goto rename;
+		}
+
+		sysfs_remove_link(&dev->kobj, "iommu_group");
+		kfree(device);
+		return ret;
+	}
+
+	kobject_get(group->devices_kobj);
+
+	dev->iommu_group = group;
+
+	mutex_lock(&group->mutex);
+	list_add_tail(&device->list, &group->devices);
+	mutex_unlock(&group->mutex);
+
+	/* Notify any listeners about change to group. */
+	blocking_notifier_call_chain(&group->notifier,
+				     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(iommu_group_add_device);
+
+/**
+ * iommu_group_remove_device - remove a device from it's current group
+ * @dev: device to be removed
+ *
+ * This function is called by an iommu driver to remove the device from
+ * it's current group.  This decrements the iommu group reference count.
+ */
+void iommu_group_remove_device(struct device *dev)
+{
+	struct iommu_group *group = dev->iommu_group;
+	struct iommu_device *tmp_device, *device = NULL;
+
+	/* Pre-notify listeners that a device is being removed. */
+	blocking_notifier_call_chain(&group->notifier,
+				     IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
+
+	mutex_lock(&group->mutex);
+	list_for_each_entry(tmp_device, &group->devices, list) {
+		if (tmp_device->dev == dev) {
+			device = tmp_device;
+			list_del(&device->list);
+			break;
+		}
+	}
+	mutex_unlock(&group->mutex);
+
+	if (!device)
+		return;
+
+	sysfs_remove_link(group->devices_kobj, device->name);
+	sysfs_remove_link(&dev->kobj, "iommu_group");
+
+	kfree(device->name);
+	kfree(device);
+	dev->iommu_group = NULL;
+	kobject_put(group->devices_kobj);
+}
+EXPORT_SYMBOL_GPL(iommu_group_remove_device);
+
+/**
+ * iommu_group_for_each_dev - iterate over each device in the group
+ * @group: the group
+ * @data: caller opaque data to be passed to callback function
+ * @fn: caller supplied callback function
+ *
+ * This function is called by group users to iterate over group devices.
+ * Callers should hold a reference count to the group during callback.
+ * The group->mutex is held across callbacks, which will block calls to
+ * iommu_group_add/remove_device.
+ */
+int iommu_group_for_each_dev(struct iommu_group *group, void *data,
+			     int (*fn)(struct device *, void *))
+{
+	struct iommu_device *device;
+	int ret = 0;
+
+	mutex_lock(&group->mutex);
+	list_for_each_entry(device, &group->devices, list) {
+		ret = fn(device->dev, data);
+		if (ret)
+			break;
+	}
+	mutex_unlock(&group->mutex);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
+
+/**
+ * iommu_group_get - Return the group for a device and increment reference
+ * @dev: get the group that this device belongs to
+ *
+ * This function is called by iommu drivers and users to get the group
+ * for the specified device.  If found, the group is returned and the group
+ * reference in incremented, else NULL.
+ */
+struct iommu_group *iommu_group_get(struct device *dev)
+{
+	struct iommu_group *group = dev->iommu_group;
+
+	if (group)
+		kobject_get(group->devices_kobj);
+
+	return group;
+}
+EXPORT_SYMBOL_GPL(iommu_group_get);
+
+/**
+ * iommu_group_find - Find and return the group based on the group name.
+ * Also increment the reference count.
+ * @name: the name of the group
+ *
+ * This function is called by iommu drivers and clients to get the group
+ * by the specified name.  If found, the group is returned and the group
+ * reference is incremented, else NULL.
+ */
+struct iommu_group *iommu_group_find(const char *name)
+{
+	struct iommu_group *group;
+	int next = 0;
+
+	mutex_lock(&iommu_group_mutex);
+	while ((group = idr_get_next(&iommu_group_idr, &next))) {
+		if (group->name) {
+			if (strcmp(group->name, name) == 0)
+				break;
+		}
+		++next;
+	}
+	mutex_unlock(&iommu_group_mutex);
+
+	if (group)
+		kobject_get(group->devices_kobj);
+
+	return group;
+}
+EXPORT_SYMBOL_GPL(iommu_group_find);
+
+/**
+ * iommu_group_put - Decrement group reference
+ * @group: the group to use
+ *
+ * This function is called by iommu drivers and users to release the
+ * iommu group.  Once the reference count is zero, the group is released.
+ */
+void iommu_group_put(struct iommu_group *group)
+{
+	if (group)
+		kobject_put(group->devices_kobj);
+}
+EXPORT_SYMBOL_GPL(iommu_group_put);
+
+/**
+ * iommu_group_register_notifier - Register a notifier for group changes
+ * @group: the group to watch
+ * @nb: notifier block to signal
+ *
+ * This function allows iommu group users to track changes in a group.
+ * See include/linux/iommu.h for actions sent via this notifier.  Caller
+ * should hold a reference to the group throughout notifier registration.
+ */
+int iommu_group_register_notifier(struct iommu_group *group,
+				  struct notifier_block *nb)
+{
+	return blocking_notifier_chain_register(&group->notifier, nb);
+}
+EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
+
+/**
+ * iommu_group_unregister_notifier - Unregister a notifier
+ * @group: the group to watch
+ * @nb: notifier block to signal
+ *
+ * Unregister a previously registered group notifier block.
+ */
+int iommu_group_unregister_notifier(struct iommu_group *group,
+				    struct notifier_block *nb)
+{
+	return blocking_notifier_chain_unregister(&group->notifier, nb);
+}
+EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
+
+/**
+ * iommu_group_id - Return ID for a group
+ * @group: the group to ID
+ *
+ * Return the unique ID for the group matching the sysfs group number.
+ */
+int iommu_group_id(struct iommu_group *group)
+{
+	return group->id;
+}
+EXPORT_SYMBOL_GPL(iommu_group_id);
 
 static int add_iommu_group(struct device *dev, void *data)
 {
-	unsigned int groupid;
+	struct iommu_ops *ops = data;
 
-	if (iommu_device_group(dev, &groupid) == 0)
-		return device_create_file(dev, &dev_attr_iommu_group);
+	if (!ops->add_device)
+		return -ENODEV;
+
+	WARN_ON(dev->iommu_group);
+
+	ops->add_device(dev);
 
 	return 0;
 }
 
-static int remove_iommu_group(struct device *dev)
-{
-	unsigned int groupid;
-
-	if (iommu_device_group(dev, &groupid) == 0)
-		device_remove_file(dev, &dev_attr_iommu_group);
-
-	return 0;
-}
-
-static int iommu_device_notifier(struct notifier_block *nb,
-				 unsigned long action, void *data)
+static int iommu_bus_notifier(struct notifier_block *nb,
+			      unsigned long action, void *data)
 {
 	struct device *dev = data;
+	struct iommu_ops *ops = dev->bus->iommu_ops;
+	struct iommu_group *group;
+	unsigned long group_action = 0;
 
-	if (action == BUS_NOTIFY_ADD_DEVICE)
-		return add_iommu_group(dev, NULL);
-	else if (action == BUS_NOTIFY_DEL_DEVICE)
-		return remove_iommu_group(dev);
+	/*
+	 * ADD/DEL call into iommu driver ops if provided, which may
+	 * result in ADD/DEL notifiers to group->notifier
+	 */
+	if (action == BUS_NOTIFY_ADD_DEVICE) {
+		if (ops->add_device)
+			return ops->add_device(dev);
+	} else if (action == BUS_NOTIFY_DEL_DEVICE) {
+		if (ops->remove_device && dev->iommu_group) {
+			ops->remove_device(dev);
+			return 0;
+		}
+	}
 
+	/*
+	 * Remaining BUS_NOTIFYs get filtered and republished to the
+	 * group, if anyone is listening
+	 */
+	group = iommu_group_get(dev);
+	if (!group)
+		return 0;
+
+	switch (action) {
+	case BUS_NOTIFY_BIND_DRIVER:
+		group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
+		break;
+	case BUS_NOTIFY_BOUND_DRIVER:
+		group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
+		break;
+	case BUS_NOTIFY_UNBIND_DRIVER:
+		group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
+		break;
+	case BUS_NOTIFY_UNBOUND_DRIVER:
+		group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
+		break;
+	}
+
+	if (group_action)
+		blocking_notifier_call_chain(&group->notifier,
+					     group_action, dev);
+
+	iommu_group_put(group);
 	return 0;
 }
 
-static struct notifier_block iommu_device_nb = {
-	.notifier_call = iommu_device_notifier,
+static struct notifier_block iommu_bus_nb = {
+	.notifier_call = iommu_bus_notifier,
 };
 
 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
 {
-	bus_register_notifier(bus, &iommu_device_nb);
-	bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
+	bus_register_notifier(bus, &iommu_bus_nb);
+	bus_for_each_dev(bus, NULL, ops, add_iommu_group);
 }
 
 /**
@@ -120,6 +631,7 @@
  * iommu_set_fault_handler() - set a fault handler for an iommu domain
  * @domain: iommu domain
  * @handler: fault handler
+ * @token: user data, will be passed back to the fault handler
  *
  * This function should be used by IOMMU users which want to be notified
  * whenever an IOMMU fault happens.
@@ -128,11 +640,13 @@
  * error code otherwise.
  */
 void iommu_set_fault_handler(struct iommu_domain *domain,
-					iommu_fault_handler_t handler)
+					iommu_fault_handler_t handler,
+					void *token)
 {
 	BUG_ON(!domain);
 
 	domain->handler = handler;
+	domain->handler_token = token;
 }
 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
 
@@ -190,6 +704,45 @@
 }
 EXPORT_SYMBOL_GPL(iommu_detach_device);
 
+/*
+ * IOMMU groups are really the natrual working unit of the IOMMU, but
+ * the IOMMU API works on domains and devices.  Bridge that gap by
+ * iterating over the devices in a group.  Ideally we'd have a single
+ * device which represents the requestor ID of the group, but we also
+ * allow IOMMU drivers to create policy defined minimum sets, where
+ * the physical hardware may be able to distiguish members, but we
+ * wish to group them at a higher level (ex. untrusted multi-function
+ * PCI devices).  Thus we attach each device.
+ */
+static int iommu_group_do_attach_device(struct device *dev, void *data)
+{
+	struct iommu_domain *domain = data;
+
+	return iommu_attach_device(domain, dev);
+}
+
+int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
+{
+	return iommu_group_for_each_dev(group, domain,
+					iommu_group_do_attach_device);
+}
+EXPORT_SYMBOL_GPL(iommu_attach_group);
+
+static int iommu_group_do_detach_device(struct device *dev, void *data)
+{
+	struct iommu_domain *domain = data;
+
+	iommu_detach_device(domain, dev);
+
+	return 0;
+}
+
+void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
+{
+	iommu_group_for_each_dev(group, domain, iommu_group_do_detach_device);
+}
+EXPORT_SYMBOL_GPL(iommu_detach_group);
+
 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
 			       unsigned long iova)
 {
@@ -367,11 +920,15 @@
 }
 EXPORT_SYMBOL_GPL(iommu_get_pt_base_addr);
 
-int iommu_device_group(struct device *dev, unsigned int *groupid)
+static int __init iommu_init(void)
 {
-	if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group)
-		return dev->bus->iommu_ops->device_group(dev, groupid);
+	iommu_group_kset = kset_create_and_add("iommu_groups",
+					       NULL, kernel_kobj);
+	idr_init(&iommu_group_idr);
+	mutex_init(&iommu_group_mutex);
 
-	return -ENODEV;
+	BUG_ON(!iommu_group_kset);
+
+	return 0;
 }
-EXPORT_SYMBOL_GPL(iommu_device_group);
+subsys_initcall(iommu_init);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu-v0.c
similarity index 82%
rename from drivers/iommu/msm_iommu.c
rename to drivers/iommu/msm_iommu-v0.c
index 05bb4a9..c0a4720 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu-v0.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -27,7 +27,9 @@
 #include <asm/cacheflush.h>
 #include <asm/sizes.h>
 
-#include <mach/iommu_hw-8xxx.h>
+#include <mach/iommu_perfmon.h>
+#include <mach/iommu_hw-v0.h>
+#include <mach/msm_iommu_priv.h>
 #include <mach/iommu.h>
 #include <mach/msm_smsm.h>
 
@@ -49,6 +51,9 @@
 #define MSM_IOMMU_ATTR_CACHED_WB_NWA	0x2
 #define MSM_IOMMU_ATTR_CACHED_WT	0x3
 
+struct bus_type msm_iommu_sec_bus_type = {
+	.name = "msm_iommu_sec_bus",
+};
 
 static inline void clean_pte(unsigned long *start, unsigned long *end,
 			     int redirect)
@@ -127,12 +132,6 @@
 	return msm_iommu_remote_lock.lock;
 }
 
-struct msm_priv {
-	unsigned long *pgtable;
-	int redirect;
-	struct list_head list_attached;
-};
-
 static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
 {
 	int ret;
@@ -157,9 +156,40 @@
 	clk_disable_unprepare(drvdata->pclk);
 }
 
+static int __enable_regulators(struct msm_iommu_drvdata *drvdata)
+{
+	/* No need to do anything. IOMMUv0 is always on. */
+	return 0;
+}
+
+static void __disable_regulators(struct msm_iommu_drvdata *drvdata)
+{
+	/* No need to do anything. IOMMUv0 is always on. */
+}
+
+static void _iommu_lock_acquire(void)
+{
+	msm_iommu_lock();
+}
+
+static void _iommu_lock_release(void)
+{
+	msm_iommu_unlock();
+}
+
+struct iommu_access_ops iommu_access_ops_v0 = {
+	.iommu_power_on = __enable_regulators,
+	.iommu_power_off = __disable_regulators,
+	.iommu_clk_on = __enable_clocks,
+	.iommu_clk_off = __disable_clocks,
+	.iommu_lock_acquire = _iommu_lock_acquire,
+	.iommu_lock_release = _iommu_lock_release,
+};
+EXPORT_SYMBOL(iommu_access_ops_v0);
+
 static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
 {
-	struct msm_priv *priv = domain->priv;
+	struct msm_iommu_priv *priv = domain->priv;
 	struct msm_iommu_drvdata *iommu_drvdata;
 	struct msm_iommu_ctx_drvdata *ctx_drvdata;
 	int ret = 0;
@@ -196,7 +226,7 @@
 
 static int __flush_iotlb(struct iommu_domain *domain)
 {
-	struct msm_priv *priv = domain->priv;
+	struct msm_iommu_priv *priv = domain->priv;
 	struct msm_iommu_drvdata *iommu_drvdata;
 	struct msm_iommu_ctx_drvdata *ctx_drvdata;
 	int ret = 0;
@@ -230,13 +260,13 @@
 	return ret;
 }
 
-static void __reset_context(void __iomem *base, int ctx)
+static void __reset_context(void __iomem *base, void __iomem *glb_base, int ctx)
 {
-	SET_BPRCOSH(base, ctx, 0);
-	SET_BPRCISH(base, ctx, 0);
-	SET_BPRCNSH(base, ctx, 0);
-	SET_BPSHCFG(base, ctx, 0);
-	SET_BPMTCFG(base, ctx, 0);
+	SET_BPRCOSH(glb_base, ctx, 0);
+	SET_BPRCISH(glb_base, ctx, 0);
+	SET_BPRCNSH(glb_base, ctx, 0);
+	SET_BPSHCFG(glb_base, ctx, 0);
+	SET_BPMTCFG(glb_base, ctx, 0);
 	SET_ACTLR(base, ctx, 0);
 	SET_SCTLR(base, ctx, 0);
 	SET_FSRRESTORE(base, ctx, 0);
@@ -254,16 +284,15 @@
 	mb();
 }
 
-static void __program_context(void __iomem *base, int ctx, int ncb,
-			      phys_addr_t pgtable, int redirect,
-			      int ttbr_split)
+static void __program_context(void __iomem *base, void __iomem *glb_base,
+			      int ctx, int ncb, phys_addr_t pgtable,
+			      int redirect, int ttbr_split)
 {
 	unsigned int prrr, nmrr;
 	int i, j, found;
-
 	msm_iommu_remote_spin_lock();
 
-	__reset_context(base, ctx);
+	__reset_context(base, glb_base, ctx);
 
 	/* Set up HTW mode */
 	/* TLB miss configuration: perform HTW on miss */
@@ -358,26 +387,27 @@
 
 static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
 {
-	struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	struct msm_iommu_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 
 	if (!priv)
 		goto fail_nomem;
 
 	INIT_LIST_HEAD(&priv->list_attached);
-	priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL,
+	priv->pt.fl_table = (unsigned long *)__get_free_pages(GFP_KERNEL,
 							  get_order(SZ_16K));
 
-	if (!priv->pgtable)
+	if (!priv->pt.fl_table)
 		goto fail_nomem;
 
 #ifdef CONFIG_IOMMU_PGTABLES_L2
-	priv->redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
+	priv->pt.redirect = flags & MSM_IOMMU_DOMAIN_PT_CACHEABLE;
 #endif
 
-	memset(priv->pgtable, 0, SZ_16K);
+	memset(priv->pt.fl_table, 0, SZ_16K);
 	domain->priv = priv;
 
-	clean_pte(priv->pgtable, priv->pgtable + NUM_FL_PTE, priv->redirect);
+	clean_pte(priv->pt.fl_table, priv->pt.fl_table + NUM_FL_PTE,
+		  priv->pt.redirect);
 
 	return 0;
 
@@ -388,7 +418,7 @@
 
 static void msm_iommu_domain_destroy(struct iommu_domain *domain)
 {
-	struct msm_priv *priv;
+	struct msm_iommu_priv *priv;
 	unsigned long *fl_table;
 	int i;
 
@@ -397,15 +427,15 @@
 	domain->priv = NULL;
 
 	if (priv) {
-		fl_table = priv->pgtable;
+		fl_table = priv->pt.fl_table;
 
 		for (i = 0; i < NUM_FL_PTE; i++)
 			if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
 				free_page((unsigned long) __va(((fl_table[i]) &
 								FL_BASE_MASK)));
 
-		free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
-		priv->pgtable = NULL;
+		free_pages((unsigned long)priv->pt.fl_table, get_order(SZ_16K));
+		priv->pt.fl_table = NULL;
 	}
 
 	kfree(priv);
@@ -414,8 +444,7 @@
 
 static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
 {
-	struct msm_priv *priv;
-	struct msm_iommu_ctx_dev *ctx_dev;
+	struct msm_iommu_priv *priv;
 	struct msm_iommu_drvdata *iommu_drvdata;
 	struct msm_iommu_ctx_drvdata *ctx_drvdata;
 	struct msm_iommu_ctx_drvdata *tmp_drvdata;
@@ -427,42 +456,52 @@
 
 	if (!priv || !dev) {
 		ret = -EINVAL;
-		goto fail;
+		goto unlock;
 	}
 
 	iommu_drvdata = dev_get_drvdata(dev->parent);
 	ctx_drvdata = dev_get_drvdata(dev);
-	ctx_dev = dev->platform_data;
 
-	if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
+	if (!iommu_drvdata || !ctx_drvdata) {
 		ret = -EINVAL;
-		goto fail;
+		goto unlock;
 	}
 
+	++ctx_drvdata->attach_count;
+
+	if (ctx_drvdata->attach_count > 1)
+		goto unlock;
+
 	if (!list_empty(&ctx_drvdata->attached_elm)) {
 		ret = -EBUSY;
-		goto fail;
+		goto unlock;
 	}
 
 	list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
 		if (tmp_drvdata == ctx_drvdata) {
 			ret = -EBUSY;
-			goto fail;
+			goto unlock;
 		}
 
 	ret = __enable_clocks(iommu_drvdata);
 	if (ret)
-		goto fail;
+		goto unlock;
 
-	__program_context(iommu_drvdata->base, ctx_dev->num, iommu_drvdata->ncb,
-			  __pa(priv->pgtable), priv->redirect,
+	__program_context(iommu_drvdata->base, iommu_drvdata->glb_base,
+			  ctx_drvdata->num, iommu_drvdata->ncb,
+			  __pa(priv->pt.fl_table), priv->pt.redirect,
 			  iommu_drvdata->ttbr_split);
 
 	__disable_clocks(iommu_drvdata);
 	list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
 
 	ctx_drvdata->attached_domain = domain;
-fail:
+
+	mutex_unlock(&msm_iommu_lock);
+
+	msm_iommu_attached(dev->parent);
+	return ret;
+unlock:
 	mutex_unlock(&msm_iommu_lock);
 	return ret;
 }
@@ -470,42 +509,49 @@
 static void msm_iommu_detach_dev(struct iommu_domain *domain,
 				 struct device *dev)
 {
-	struct msm_priv *priv;
-	struct msm_iommu_ctx_dev *ctx_dev;
+	struct msm_iommu_priv *priv;
 	struct msm_iommu_drvdata *iommu_drvdata;
 	struct msm_iommu_ctx_drvdata *ctx_drvdata;
 	int ret;
 
+	msm_iommu_detached(dev->parent);
+
 	mutex_lock(&msm_iommu_lock);
 	priv = domain->priv;
 
 	if (!priv || !dev)
-		goto fail;
+		goto unlock;
 
 	iommu_drvdata = dev_get_drvdata(dev->parent);
 	ctx_drvdata = dev_get_drvdata(dev);
-	ctx_dev = dev->platform_data;
 
-	if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
-		goto fail;
+	if (!iommu_drvdata || !ctx_drvdata)
+		goto unlock;
+
+	--ctx_drvdata->attach_count;
+	BUG_ON(ctx_drvdata->attach_count < 0);
+
+	if (ctx_drvdata->attach_count > 0)
+		goto unlock;
 
 	ret = __enable_clocks(iommu_drvdata);
 	if (ret)
-		goto fail;
+		goto unlock;
 
 	msm_iommu_remote_spin_lock();
 
-	SET_TLBIASID(iommu_drvdata->base, ctx_dev->num,
-		     GET_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_dev->num));
+	SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num,
+		    GET_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_drvdata->num));
 
-	__reset_context(iommu_drvdata->base, ctx_dev->num);
+	__reset_context(iommu_drvdata->base, iommu_drvdata->glb_base,
+			ctx_drvdata->num);
 
 	msm_iommu_remote_spin_unlock();
 
 	__disable_clocks(iommu_drvdata);
 	list_del_init(&ctx_drvdata->attached_elm);
 	ctx_drvdata->attached_domain = NULL;
-fail:
+unlock:
 	mutex_unlock(&msm_iommu_lock);
 }
 
@@ -551,7 +597,7 @@
 	return pgprot;
 }
 
-static unsigned long *make_second_level(struct msm_priv *priv,
+static unsigned long *make_second_level(struct msm_iommu_priv *priv,
 					unsigned long *fl_pte)
 {
 	unsigned long *sl;
@@ -563,12 +609,12 @@
 		goto fail;
 	}
 	memset(sl, 0, SZ_4K);
-	clean_pte(sl, sl + NUM_SL_PTE, priv->redirect);
+	clean_pte(sl, sl + NUM_SL_PTE, priv->pt.redirect);
 
 	*fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
 			FL_TYPE_TABLE);
 
-	clean_pte(fl_pte, fl_pte + 1, priv->redirect);
+	clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
 fail:
 	return sl;
 }
@@ -640,7 +686,7 @@
 static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
 			 phys_addr_t pa, size_t len, int prot)
 {
-	struct msm_priv *priv;
+	struct msm_iommu_priv *priv;
 	unsigned long *fl_table;
 	unsigned long *fl_pte;
 	unsigned long fl_offset;
@@ -658,7 +704,7 @@
 		goto fail;
 	}
 
-	fl_table = priv->pgtable;
+	fl_table = priv->pt.fl_table;
 
 	if (len != SZ_16M && len != SZ_1M &&
 	    len != SZ_64K && len != SZ_4K) {
@@ -687,14 +733,14 @@
 		ret = fl_16m(fl_pte, pa, pgprot);
 		if (ret)
 			goto fail;
-		clean_pte(fl_pte, fl_pte + 16, priv->redirect);
+		clean_pte(fl_pte, fl_pte + 16, priv->pt.redirect);
 	}
 
 	if (len == SZ_1M) {
 		ret = fl_1m(fl_pte, pa, pgprot);
 		if (ret)
 			goto fail;
-		clean_pte(fl_pte, fl_pte + 1, priv->redirect);
+		clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
 	}
 
 	/* Need a 2nd level table */
@@ -722,14 +768,14 @@
 		if (ret)
 			goto fail;
 
-		clean_pte(sl_pte, sl_pte + 1, priv->redirect);
+		clean_pte(sl_pte, sl_pte + 1, priv->pt.redirect);
 	}
 
 	if (len == SZ_64K) {
 		ret = sl_64k(sl_pte, pa, pgprot);
 		if (ret)
 			goto fail;
-		clean_pte(sl_pte, sl_pte + 16, priv->redirect);
+		clean_pte(sl_pte, sl_pte + 16, priv->pt.redirect);
 	}
 
 	ret = __flush_iotlb_va(domain, va);
@@ -741,7 +787,7 @@
 static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
 			    size_t len)
 {
-	struct msm_priv *priv;
+	struct msm_iommu_priv *priv;
 	unsigned long *fl_table;
 	unsigned long *fl_pte;
 	unsigned long fl_offset;
@@ -757,7 +803,7 @@
 	if (!priv)
 		goto fail;
 
-	fl_table = priv->pgtable;
+	fl_table = priv->pt.fl_table;
 
 	if (len != SZ_16M && len != SZ_1M &&
 	    len != SZ_64K && len != SZ_4K) {
@@ -783,13 +829,13 @@
 		for (i = 0; i < 16; i++)
 			*(fl_pte+i) = 0;
 
-		clean_pte(fl_pte, fl_pte + 16, priv->redirect);
+		clean_pte(fl_pte, fl_pte + 16, priv->pt.redirect);
 	}
 
 	if (len == SZ_1M) {
 		*fl_pte = 0;
 
-		clean_pte(fl_pte, fl_pte + 1, priv->redirect);
+		clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
 	}
 
 	sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
@@ -800,13 +846,13 @@
 		for (i = 0; i < 16; i++)
 			*(sl_pte+i) = 0;
 
-		clean_pte(sl_pte, sl_pte + 16, priv->redirect);
+		clean_pte(sl_pte, sl_pte + 16, priv->pt.redirect);
 	}
 
 	if (len == SZ_4K) {
 		*sl_pte = 0;
 
-		clean_pte(sl_pte, sl_pte + 1, priv->redirect);
+		clean_pte(sl_pte, sl_pte + 1, priv->pt.redirect);
 	}
 
 	if (len == SZ_4K || len == SZ_64K) {
@@ -819,7 +865,7 @@
 			free_page((unsigned long)sl_table);
 			*fl_pte = 0;
 
-			clean_pte(fl_pte, fl_pte + 1, priv->redirect);
+			clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
 		}
 	}
 
@@ -853,6 +899,55 @@
 		&& (len >= align);
 }
 
+static int check_range(unsigned long *fl_table, unsigned int va,
+				 unsigned int len)
+{
+	unsigned int offset = 0;
+	unsigned long *fl_pte;
+	unsigned long fl_offset;
+	unsigned long *sl_table;
+	unsigned long sl_start, sl_end;
+	int i;
+
+	fl_offset = FL_OFFSET(va);	/* Upper 12 bits */
+	fl_pte = fl_table + fl_offset;	/* int pointers, 4 bytes */
+
+	while (offset < len) {
+		if (*fl_pte & FL_TYPE_TABLE) {
+			sl_start = SL_OFFSET(va);
+			sl_table =  __va(((*fl_pte) & FL_BASE_MASK));
+			sl_end = ((len - offset) / SZ_4K) + sl_start;
+
+			if (sl_end > NUM_SL_PTE)
+				sl_end = NUM_SL_PTE;
+
+			for (i = sl_start; i < sl_end; i++) {
+				if (sl_table[i] != 0) {
+					pr_err("%08x - %08x already mapped\n",
+						va, va + SZ_4K);
+					return -EBUSY;
+				}
+				offset += SZ_4K;
+				va += SZ_4K;
+			}
+
+
+			sl_start = 0;
+		} else {
+			if (*fl_pte != 0) {
+				pr_err("%08x - %08x already mapped\n",
+				       va, va + SZ_1M);
+				return -EBUSY;
+			}
+			va += SZ_1M;
+			offset += SZ_1M;
+			sl_start = 0;
+		}
+		fl_pte++;
+	}
+	return 0;
+}
+
 static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
 			       struct scatterlist *sg, unsigned int len,
 			       int prot)
@@ -866,7 +961,7 @@
 	unsigned long sl_offset, sl_start;
 	unsigned int chunk_size, chunk_offset = 0;
 	int ret = 0;
-	struct msm_priv *priv;
+	struct msm_iommu_priv *priv;
 	unsigned int pgprot4k, pgprot64k, pgprot1m, pgprot16m;
 
 	mutex_lock(&msm_iommu_lock);
@@ -874,7 +969,7 @@
 	BUG_ON(len & (SZ_4K - 1));
 
 	priv = domain->priv;
-	fl_table = priv->pgtable;
+	fl_table = priv->pt.fl_table;
 
 	pgprot4k = __get_pgprot(prot, SZ_4K);
 	pgprot64k = __get_pgprot(prot, SZ_64K);
@@ -885,6 +980,9 @@
 		ret = -EINVAL;
 		goto fail;
 	}
+	ret = check_range(fl_table, va, len);
+	if (ret)
+		goto fail;
 
 	fl_offset = FL_OFFSET(va);	/* Upper 12 bits */
 	fl_pte = fl_table + fl_offset;	/* int pointers, 4 bytes */
@@ -907,13 +1005,15 @@
 				ret = fl_16m(fl_pte, pa, pgprot16m);
 				if (ret)
 					goto fail;
-				clean_pte(fl_pte, fl_pte + 16, priv->redirect);
+				clean_pte(fl_pte, fl_pte + 16,
+					  priv->pt.redirect);
 				fl_pte += 16;
 			} else if (chunk_size == SZ_1M) {
 				ret = fl_1m(fl_pte, pa, pgprot1m);
 				if (ret)
 					goto fail;
-				clean_pte(fl_pte, fl_pte + 1, priv->redirect);
+				clean_pte(fl_pte, fl_pte + 1,
+					  priv->pt.redirect);
 				fl_pte++;
 			}
 
@@ -995,7 +1095,7 @@
 		}
 
 		clean_pte(sl_table + sl_start, sl_table + sl_offset,
-				priv->redirect);
+				priv->pt.redirect);
 
 		fl_pte++;
 		sl_offset = 0;
@@ -1017,14 +1117,14 @@
 	unsigned long *sl_table;
 	unsigned long sl_start, sl_end;
 	int used, i;
-	struct msm_priv *priv;
+	struct msm_iommu_priv *priv;
 
 	mutex_lock(&msm_iommu_lock);
 
 	BUG_ON(len & (SZ_4K - 1));
 
 	priv = domain->priv;
-	fl_table = priv->pgtable;
+	fl_table = priv->pt.fl_table;
 
 	fl_offset = FL_OFFSET(va);	/* Upper 12 bits */
 	fl_pte = fl_table + fl_offset;	/* int pointers, 4 bytes */
@@ -1040,7 +1140,7 @@
 
 			memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
 			clean_pte(sl_table + sl_start, sl_table + sl_end,
-					priv->redirect);
+					priv->pt.redirect);
 
 			offset += (sl_end - sl_start) * SZ_4K;
 			va += (sl_end - sl_start) * SZ_4K;
@@ -1065,13 +1165,14 @@
 				free_page((unsigned long)sl_table);
 				*fl_pte = 0;
 
-				clean_pte(fl_pte, fl_pte + 1, priv->redirect);
+				clean_pte(fl_pte, fl_pte + 1,
+					  priv->pt.redirect);
 			}
 
 			sl_start = 0;
 		} else {
 			*fl_pte = 0;
-			clean_pte(fl_pte, fl_pte + 1, priv->redirect);
+			clean_pte(fl_pte, fl_pte + 1, priv->pt.redirect);
 			va += SZ_1M;
 			offset += SZ_1M;
 			sl_start = 0;
@@ -1087,7 +1188,7 @@
 static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
 					  unsigned long va)
 {
-	struct msm_priv *priv;
+	struct msm_iommu_priv *priv;
 	struct msm_iommu_drvdata *iommu_drvdata;
 	struct msm_iommu_ctx_drvdata *ctx_drvdata;
 	unsigned int par;
@@ -1212,7 +1313,12 @@
 		}
 
 		SET_FSR(base, num, fsr);
-		SET_RESUME(base, num, 1);
+		/*
+		 * Only resume fetches if the registered fault handler
+		 * allows it
+		 */
+		if (ret != -EBUSY)
+			SET_RESUME(base, num, 1);
 
 		ret = IRQ_HANDLED;
 	} else
@@ -1228,8 +1334,8 @@
 
 static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
 {
-	struct msm_priv *priv = domain->priv;
-	return __pa(priv->pgtable);
+	struct msm_iommu_priv *priv = domain->priv;
+	return __pa(priv->pt.fl_table);
 }
 
 static struct iommu_ops msm_iommu_ops = {
@@ -1287,7 +1393,7 @@
 
 static int __init msm_iommu_init(void)
 {
-	if (!msm_soc_version_supports_iommu_v1())
+	if (!msm_soc_version_supports_iommu_v0())
 		return -ENODEV;
 
 	msm_iommu_lock_initialize();
diff --git a/drivers/iommu/msm_iommu-v2.c b/drivers/iommu/msm_iommu-v1.c
similarity index 64%
rename from drivers/iommu/msm_iommu-v2.c
rename to drivers/iommu/msm_iommu-v1.c
index 15de300..8a26003 100644
--- a/drivers/iommu/msm_iommu-v2.c
+++ b/drivers/iommu/msm_iommu-v1.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -28,9 +28,10 @@
 #include <linux/regulator/consumer.h>
 #include <asm/sizes.h>
 
-#include <mach/iommu_hw-v2.h>
+#include <mach/iommu_hw-v1.h>
 #include <mach/iommu.h>
-
+#include <mach/msm_iommu_priv.h>
+#include <mach/iommu_perfmon.h>
 #include "msm_iommu_pagetable.h"
 
 /* bitmap of the page sizes currently supported */
@@ -38,10 +39,28 @@
 
 static DEFINE_MUTEX(msm_iommu_lock);
 
-struct msm_priv {
-	struct iommu_pt pt;
-	struct list_head list_attached;
-};
+static int __enable_regulators(struct msm_iommu_drvdata *drvdata)
+{
+	int ret = regulator_enable(drvdata->gdsc);
+	if (ret)
+		goto fail;
+
+	if (drvdata->alt_gdsc)
+		ret = regulator_enable(drvdata->alt_gdsc);
+
+	if (ret)
+		regulator_disable(drvdata->gdsc);
+fail:
+	return ret;
+}
+
+static void __disable_regulators(struct msm_iommu_drvdata *drvdata)
+{
+	if (drvdata->alt_gdsc)
+		regulator_disable(drvdata->alt_gdsc);
+
+	regulator_disable(drvdata->gdsc);
+}
 
 static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
 {
@@ -62,6 +81,16 @@
 			clk_disable_unprepare(drvdata->pclk);
 		}
 	}
+
+	if (drvdata->clk_reg_virt) {
+		unsigned int value;
+
+		value = readl_relaxed(drvdata->clk_reg_virt);
+		value &= ~0x1;
+		writel_relaxed(value, drvdata->clk_reg_virt);
+		/* Ensure clock is on before continuing */
+		mb();
+	}
 fail:
 	return ret;
 }
@@ -74,6 +103,56 @@
 	clk_disable_unprepare(drvdata->pclk);
 }
 
+static void _iommu_lock_acquire(void)
+{
+	mutex_lock(&msm_iommu_lock);
+}
+
+static void _iommu_lock_release(void)
+{
+	mutex_unlock(&msm_iommu_lock);
+}
+
+struct iommu_access_ops iommu_access_ops_v1 = {
+	.iommu_power_on = __enable_regulators,
+	.iommu_power_off = __disable_regulators,
+	.iommu_clk_on = __enable_clocks,
+	.iommu_clk_off = __disable_clocks,
+	.iommu_lock_acquire = _iommu_lock_acquire,
+	.iommu_lock_release = _iommu_lock_release,
+};
+EXPORT_SYMBOL(iommu_access_ops_v1);
+
+void iommu_halt(const struct msm_iommu_drvdata *iommu_drvdata)
+{
+	if (iommu_drvdata->halt_enabled) {
+		SET_MICRO_MMU_CTRL_HALT_REQ(iommu_drvdata->base, 1);
+
+		while (GET_MICRO_MMU_CTRL_IDLE(iommu_drvdata->base) == 0)
+			cpu_relax();
+		/* Ensure device is idle before continuing */
+		mb();
+	}
+}
+
+void iommu_resume(const struct msm_iommu_drvdata *iommu_drvdata)
+{
+	if (iommu_drvdata->halt_enabled) {
+		/*
+		 * Ensure transactions have completed before releasing
+		 * the halt
+		 */
+		mb();
+		SET_MICRO_MMU_CTRL_HALT_REQ(iommu_drvdata->base, 0);
+		/*
+		 * Ensure write is complete before continuing to ensure
+		 * we don't turn off clocks while transaction is still
+		 * pending.
+		 */
+		mb();
+	}
+}
+
 static void __sync_tlb(void __iomem *base, int ctx)
 {
 	SET_TLBSYNC(base, ctx, 0);
@@ -87,11 +166,10 @@
 
 static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
 {
-	struct msm_priv *priv = domain->priv;
+	struct msm_iommu_priv *priv = domain->priv;
 	struct msm_iommu_drvdata *iommu_drvdata;
 	struct msm_iommu_ctx_drvdata *ctx_drvdata;
 	int ret = 0;
-	int asid;
 
 	list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
 		BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
@@ -104,11 +182,8 @@
 		if (ret)
 			goto fail;
 
-		asid = GET_CB_CONTEXTIDR_ASID(iommu_drvdata->base,
-					   ctx_drvdata->num);
-
 		SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
-			   asid | (va & CB_TLBIVA_VA));
+			   ctx_drvdata->asid | (va & CB_TLBIVA_VA));
 		mb();
 		__sync_tlb(iommu_drvdata->base, ctx_drvdata->num);
 		__disable_clocks(iommu_drvdata);
@@ -119,11 +194,10 @@
 
 static int __flush_iotlb(struct iommu_domain *domain)
 {
-	struct msm_priv *priv = domain->priv;
+	struct msm_iommu_priv *priv = domain->priv;
 	struct msm_iommu_drvdata *iommu_drvdata;
 	struct msm_iommu_ctx_drvdata *ctx_drvdata;
 	int ret = 0;
-	int asid;
 
 	list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
 		BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
@@ -135,10 +209,8 @@
 		if (ret)
 			goto fail;
 
-		asid = GET_CB_CONTEXTIDR_ASID(iommu_drvdata->base,
-					   ctx_drvdata->num);
-
-		SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
+		SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num,
+			     ctx_drvdata->asid);
 		mb();
 		__sync_tlb(iommu_drvdata->base, ctx_drvdata->num);
 		__disable_clocks(iommu_drvdata);
@@ -148,20 +220,19 @@
 	return ret;
 }
 
-static void __reset_iommu(void __iomem *base, int smt_size)
+/*
+ * May only be called for non-secure iommus
+ */
+static void __reset_iommu(void __iomem *base)
 {
-	int i;
+	int i, smt_size;
 
 	SET_ACR(base, 0);
-	SET_NSACR(base, 0);
 	SET_CR2(base, 0);
-	SET_NSCR2(base, 0);
 	SET_GFAR(base, 0);
 	SET_GFSRRESTORE(base, 0);
 	SET_TLBIALLNSNH(base, 0);
-	SET_PMCR(base, 0);
-	SET_SCR1(base, 0);
-	SET_SSDR_N(base, 0, 0);
+	smt_size = GET_IDR0_NUMSMRG(base);
 
 	for (i = 0; i < smt_size; i++)
 		SET_SMR_VALID(base, i, 0);
@@ -169,9 +240,12 @@
 	mb();
 }
 
-static void __program_iommu(void __iomem *base, int smt_size)
+/*
+ * May only be called for non-secure iommus
+ */
+static void __program_iommu(void __iomem *base)
 {
-	__reset_iommu(base, smt_size);
+	__reset_iommu(base);
 
 	SET_CR0_SMCFCFG(base, 1);
 	SET_CR0_USFCFG(base, 1);
@@ -181,7 +255,20 @@
 	SET_CR0_GFIE(base, 1);
 	SET_CR0_GFRE(base, 1);
 	SET_CR0_CLIENTPD(base, 0);
-	mb();	/* Make sure writes complete before returning */
+
+	mb(); /* Make sure writes complete before returning */
+}
+
+void program_iommu_bfb_settings(void __iomem *base,
+			const struct msm_iommu_bfb_settings *bfb_settings)
+{
+	unsigned int i;
+	if (bfb_settings)
+		for (i = 0; i < bfb_settings->length; i++)
+			SET_GLOBAL_REG(base, bfb_settings->regs[i],
+					     bfb_settings->data[i]);
+
+	mb(); /* Make sure writes complete before returning */
 }
 
 static void __reset_context(void __iomem *base, int ctx)
@@ -200,13 +287,69 @@
 	mb();
 }
 
-static void __program_context(void __iomem *base, int ctx, int ncb,
-				phys_addr_t pgtable, int redirect,
-				u32 *sids, int len, int smt_size)
+static void __release_smg(void __iomem *base, int ctx)
+{
+	int i, smt_size;
+	smt_size = GET_IDR0_NUMSMRG(base);
+
+	/* Invalidate any SMGs associated with this context */
+	for (i = 0; i < smt_size; i++)
+		if (GET_SMR_VALID(base, i) &&
+		    GET_S2CR_CBNDX(base, i) == ctx)
+			SET_SMR_VALID(base, i, 0);
+}
+
+static void msm_iommu_assign_ASID(const struct msm_iommu_drvdata *iommu_drvdata,
+				  struct msm_iommu_ctx_drvdata *curr_ctx,
+				  struct msm_iommu_priv *priv)
+{
+	unsigned int found = 0;
+	void __iomem *base = iommu_drvdata->base;
+	unsigned int i;
+	unsigned int ncb = iommu_drvdata->ncb;
+	struct msm_iommu_ctx_drvdata *tmp_drvdata;
+
+	/* Find if this page table is used elsewhere, and re-use ASID */
+	if (!list_empty(&priv->list_attached)) {
+		tmp_drvdata = list_first_entry(&priv->list_attached,
+				struct msm_iommu_ctx_drvdata, attached_elm);
+
+		++iommu_drvdata->asid[tmp_drvdata->asid - 1];
+		curr_ctx->asid = tmp_drvdata->asid;
+
+		SET_CB_CONTEXTIDR_ASID(base, curr_ctx->num, curr_ctx->asid);
+		found = 1;
+	}
+
+	/* If page table is new, find an unused ASID */
+	if (!found) {
+		for (i = 0; i < ncb; ++i) {
+			if (iommu_drvdata->asid[i] == 0) {
+				++iommu_drvdata->asid[i];
+				curr_ctx->asid = i + 1;
+
+				SET_CB_CONTEXTIDR_ASID(base, curr_ctx->num,
+						       curr_ctx->asid);
+				found = 1;
+				break;
+			}
+		}
+		BUG_ON(!found);
+	}
+}
+
+static void __program_context(struct msm_iommu_drvdata *iommu_drvdata,
+			      struct msm_iommu_ctx_drvdata *ctx_drvdata,
+			      struct msm_iommu_priv *priv, bool is_secure)
 {
 	unsigned int prrr, nmrr;
 	unsigned int pn;
-	int i, j, found, num = 0;
+	int num = 0, i, smt_size;
+	void __iomem *base = iommu_drvdata->base;
+	unsigned int ctx = ctx_drvdata->num;
+	u32 *sids = ctx_drvdata->sids;
+	int len = ctx_drvdata->nsid;
+	phys_addr_t pgtable = __pa(priv->pt.fl_table);
 
 	__reset_context(base, ctx);
 
@@ -237,7 +380,7 @@
 	/* Configure page tables as inner-cacheable and shareable to reduce
 	 * the TLB miss penalty.
 	 */
-	if (redirect) {
+	if (priv->pt.redirect) {
 		SET_CB_TTBR0_S(base, ctx, 1);
 		SET_CB_TTBR0_NOS(base, ctx, 1);
 		SET_CB_TTBR0_IRGN1(base, ctx, 0); /* WB, WA */
@@ -245,57 +388,45 @@
 		SET_CB_TTBR0_RGN(base, ctx, 1);   /* WB, WA */
 	}
 
-	/* Program the M2V tables for this context */
-	for (i = 0; i < len / sizeof(*sids); i++) {
-		for (; num < smt_size; num++)
-			if (GET_SMR_VALID(base, num) == 0)
-				break;
-		BUG_ON(num >= smt_size);
+	if (!is_secure) {
+		smt_size = GET_IDR0_NUMSMRG(base);
+		/* Program the M2V tables for this context */
+		for (i = 0; i < len / sizeof(*sids); i++) {
+			for (; num < smt_size; num++)
+				if (GET_SMR_VALID(base, num) == 0)
+					break;
+			BUG_ON(num >= smt_size);
 
-		SET_SMR_VALID(base, num, 1);
-		SET_SMR_MASK(base, num, 0);
-		SET_SMR_ID(base, num, sids[i]);
+			SET_SMR_VALID(base, num, 1);
+			SET_SMR_MASK(base, num, 0);
+			SET_SMR_ID(base, num, sids[i]);
 
-		/* Set VMID = 0 */
-		SET_S2CR_N(base, num, 0);
-		SET_S2CR_CBNDX(base, num, ctx);
-		/* Set security bit override to be Non-secure */
-		SET_S2CR_NSCFG(base, num, 3);
+			SET_S2CR_N(base, num, 0);
+			SET_S2CR_CBNDX(base, num, ctx);
+			SET_S2CR_MEMATTR(base, num, 0x0A);
+			/* Set security bit override to be Non-secure */
+			SET_S2CR_NSCFG(base, num, 3);
+		}
+		SET_CBAR_N(base, ctx, 0);
+
+		/* Stage 1 Context with Stage 2 bypass */
+		SET_CBAR_TYPE(base, ctx, 1);
+
+		/* Route page faults to the non-secure interrupt */
+		SET_CBAR_IRPTNDX(base, ctx, 1);
+
+		/* Set VMID to non-secure HLOS */
+		SET_CBAR_VMID(base, ctx, 3);
+
+		/* Bypass is treated as inner-shareable */
+		SET_CBAR_BPSHCFG(base, ctx, 2);
+
+		/* Do not downgrade memory attributes */
+		SET_CBAR_MEMATTR(base, ctx, 0x0A);
+
 	}
 
-	SET_CBAR_N(base, ctx, 0);
-	/* Stage 1 Context with Stage 2 bypass */
-	SET_CBAR_TYPE(base, ctx, 1);
-	/* Route page faults to the non-secure interrupt */
-	SET_CBAR_IRPTNDX(base, ctx, 1);
-
-       /* Find if this page table is used elsewhere, and re-use ASID */
-	found = 0;
-	for (i = 0; i < ncb; i++)
-		if ((GET_CB_TTBR0_ADDR(base, i) == pn) && (i != ctx)) {
-			SET_CB_CONTEXTIDR_ASID(base, ctx, \
-					GET_CB_CONTEXTIDR_ASID(base, i));
-			found = 1;
-			break;
-		}
-
-	/* If page table is new, find an unused ASID */
-	if (!found) {
-		for (i = 0; i < ncb; i++) {
-			found = 0;
-			for (j = 0; j < ncb; j++) {
-				if (GET_CB_CONTEXTIDR_ASID(base, j) == i &&
-				    j != ctx)
-					found = 1;
-			}
-
-			if (!found) {
-				SET_CB_CONTEXTIDR_ASID(base, ctx, i);
-				break;
-			}
-		}
-		BUG_ON(found);
-	}
+	msm_iommu_assign_ASID(iommu_drvdata, ctx_drvdata, priv);
 
 	/* Enable the MMU */
 	SET_CB_SCTLR_M(base, ctx, 1);
@@ -304,7 +435,7 @@
 
 static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
 {
-	struct msm_priv *priv;
+	struct msm_iommu_priv *priv;
 
 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 	if (!priv)
@@ -328,7 +459,7 @@
 
 static void msm_iommu_domain_destroy(struct iommu_domain *domain)
 {
-	struct msm_priv *priv;
+	struct msm_iommu_priv *priv;
 
 	mutex_lock(&msm_iommu_lock);
 	priv = domain->priv;
@@ -341,32 +472,14 @@
 	mutex_unlock(&msm_iommu_lock);
 }
 
-static int msm_iommu_ctx_attached(struct device *dev)
-{
-	struct platform_device *pdev;
-	struct device_node *child;
-	struct msm_iommu_ctx_drvdata *ctx;
-
-	for_each_child_of_node(dev->of_node, child) {
-		pdev = of_find_device_by_node(child);
-
-		ctx = dev_get_drvdata(&pdev->dev);
-		if (ctx->attached_domain) {
-			of_node_put(child);
-			return 1;
-		}
-	}
-
-	return 0;
-}
-
 static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
 {
-	struct msm_priv *priv;
+	struct msm_iommu_priv *priv;
 	struct msm_iommu_drvdata *iommu_drvdata;
 	struct msm_iommu_ctx_drvdata *ctx_drvdata;
 	struct msm_iommu_ctx_drvdata *tmp_drvdata;
 	int ret;
+	int is_secure;
 
 	mutex_lock(&msm_iommu_lock);
 
@@ -394,28 +507,53 @@
 			goto fail;
 		}
 
-	ret = regulator_enable(iommu_drvdata->gdsc);
+	is_secure = iommu_drvdata->sec_id != -1;
+
+	ret = __enable_regulators(iommu_drvdata);
 	if (ret)
 		goto fail;
 
 	ret = __enable_clocks(iommu_drvdata);
 	if (ret) {
-		regulator_disable(iommu_drvdata->gdsc);
+		__disable_regulators(iommu_drvdata);
 		goto fail;
 	}
 
-	if (!msm_iommu_ctx_attached(dev->parent))
-		__program_iommu(iommu_drvdata->base, iommu_drvdata->nsmr);
+	/* We can only do this once */
+	if (!iommu_drvdata->ctx_attach_count) {
+		if (!is_secure) {
+			iommu_halt(iommu_drvdata);
+			__program_iommu(iommu_drvdata->base);
+			iommu_resume(iommu_drvdata);
+		} else {
+			ret = msm_iommu_sec_program_iommu(
+				iommu_drvdata->sec_id);
+			if (ret) {
+				__disable_regulators(iommu_drvdata);
+				__disable_clocks(iommu_drvdata);
+				goto fail;
+			}
+		}
+		program_iommu_bfb_settings(iommu_drvdata->base,
+					   iommu_drvdata->bfb_settings);
+	}
 
-	__program_context(iommu_drvdata->base, ctx_drvdata->num,
-		iommu_drvdata->ncb, __pa(priv->pt.fl_table),
-		priv->pt.redirect, ctx_drvdata->sids, ctx_drvdata->nsid,
-		iommu_drvdata->nsmr);
+	iommu_halt(iommu_drvdata);
+
+	__program_context(iommu_drvdata, ctx_drvdata, priv, is_secure);
+
+	iommu_resume(iommu_drvdata);
+
 	__disable_clocks(iommu_drvdata);
 
 	list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
 	ctx_drvdata->attached_domain = domain;
+	++iommu_drvdata->ctx_attach_count;
 
+	mutex_unlock(&msm_iommu_lock);
+
+	msm_iommu_attached(dev->parent);
+	return ret;
 fail:
 	mutex_unlock(&msm_iommu_lock);
 	return ret;
@@ -424,10 +562,13 @@
 static void msm_iommu_detach_dev(struct iommu_domain *domain,
 				 struct device *dev)
 {
-	struct msm_priv *priv;
+	struct msm_iommu_priv *priv;
 	struct msm_iommu_drvdata *iommu_drvdata;
 	struct msm_iommu_ctx_drvdata *ctx_drvdata;
 	int ret;
+	int is_secure;
+
+	msm_iommu_detached(dev->parent);
 
 	mutex_lock(&msm_iommu_lock);
 	priv = domain->priv;
@@ -443,17 +584,30 @@
 	if (ret)
 		goto fail;
 
-	SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num,
-		GET_CB_CONTEXTIDR_ASID(iommu_drvdata->base, ctx_drvdata->num));
+	is_secure = iommu_drvdata->sec_id != -1;
+
+	SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, ctx_drvdata->asid);
+
+	BUG_ON(iommu_drvdata->asid[ctx_drvdata->asid - 1] == 0);
+	iommu_drvdata->asid[ctx_drvdata->asid - 1]--;
+	ctx_drvdata->asid = -1;
+
+	iommu_halt(iommu_drvdata);
 
 	__reset_context(iommu_drvdata->base, ctx_drvdata->num);
+	if (!is_secure)
+		__release_smg(iommu_drvdata->base, ctx_drvdata->num);
+
+	iommu_resume(iommu_drvdata);
+
 	__disable_clocks(iommu_drvdata);
 
-	regulator_disable(iommu_drvdata->gdsc);
+	__disable_regulators(iommu_drvdata);
 
 	list_del_init(&ctx_drvdata->attached_elm);
 	ctx_drvdata->attached_domain = NULL;
-
+	BUG_ON(iommu_drvdata->ctx_attach_count == 0);
+	--iommu_drvdata->ctx_attach_count;
 fail:
 	mutex_unlock(&msm_iommu_lock);
 }
@@ -461,7 +615,7 @@
 static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
 			 phys_addr_t pa, size_t len, int prot)
 {
-	struct msm_priv *priv;
+	struct msm_iommu_priv *priv;
 	int ret = 0;
 
 	mutex_lock(&msm_iommu_lock);
@@ -485,7 +639,7 @@
 static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
 			    size_t len)
 {
-	struct msm_priv *priv;
+	struct msm_iommu_priv *priv;
 	int ret = -ENODEV;
 
 	mutex_lock(&msm_iommu_lock);
@@ -512,7 +666,7 @@
 			       int prot)
 {
 	int ret;
-	struct msm_priv *priv;
+	struct msm_iommu_priv *priv;
 
 	mutex_lock(&msm_iommu_lock);
 
@@ -536,7 +690,7 @@
 static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
 				 unsigned int len)
 {
-	struct msm_priv *priv;
+	struct msm_iommu_priv *priv;
 
 	mutex_lock(&msm_iommu_lock);
 
@@ -551,7 +705,7 @@
 static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
 					  unsigned long va)
 {
-	struct msm_priv *priv;
+	struct msm_iommu_priv *priv;
 	struct msm_iommu_drvdata *iommu_drvdata;
 	struct msm_iommu_ctx_drvdata *ctx_drvdata;
 	unsigned int par;
@@ -688,7 +842,7 @@
 
 static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
 {
-	struct msm_priv *priv = domain->priv;
+	struct msm_iommu_priv *priv = domain->priv;
 	return __pa(priv->pt.fl_table);
 }
 
diff --git a/drivers/iommu/msm_iommu_dev-v0.c b/drivers/iommu/msm_iommu_dev-v0.c
new file mode 100644
index 0000000..549800f
--- /dev/null
+++ b/drivers/iommu/msm_iommu_dev-v0.c
@@ -0,0 +1,695 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/iommu.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+
+#include <mach/iommu_perfmon.h>
+#include <mach/iommu_hw-v0.h>
+#include <mach/iommu.h>
+
+static DEFINE_MUTEX(iommu_list_lock);
+static LIST_HEAD(iommu_list);
+
+void msm_iommu_add_drv(struct msm_iommu_drvdata *drv)
+{
+	mutex_lock(&iommu_list_lock);
+	list_add(&drv->list, &iommu_list);
+	mutex_unlock(&iommu_list_lock);
+}
+
+void msm_iommu_remove_drv(struct msm_iommu_drvdata *drv)
+{
+	mutex_lock(&iommu_list_lock);
+	list_del(&drv->list);
+	mutex_unlock(&iommu_list_lock);
+}
+
+static int find_iommu_ctx(struct device *dev, void *data)
+{
+	struct msm_iommu_ctx_drvdata *c;
+
+	c = dev_get_drvdata(dev);
+	if (!c || !c->name)
+		return 0;
+
+	return !strcmp(data, c->name);
+}
+
+static struct device *find_context(struct device *dev, const char *name)
+{
+	return device_find_child(dev, (void *)name, find_iommu_ctx);
+}
+
+struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+	struct msm_iommu_drvdata *drv;
+	struct device *dev = NULL;
+
+	mutex_lock(&iommu_list_lock);
+	list_for_each_entry(drv, &iommu_list, list) {
+		dev = find_context(drv->dev, ctx_name);
+		if (dev)
+			break;
+	}
+	mutex_unlock(&iommu_list_lock);
+
+	if (!dev || !dev_get_drvdata(dev))
+		pr_err("Could not find context <%s>\n", ctx_name);
+	put_device(dev);
+
+	return dev;
+}
+EXPORT_SYMBOL(msm_iommu_get_ctx);
+
+static void msm_iommu_reset(void __iomem *base, void __iomem *glb_base, int ncb)
+{
+	int ctx;
+
+	SET_RPUE(glb_base, 0);
+	SET_RPUEIE(glb_base, 0);
+	SET_ESRRESTORE(glb_base, 0);
+	SET_TBE(glb_base, 0);
+	SET_CR(glb_base, 0);
+	SET_SPDMBE(glb_base, 0);
+	SET_TESTBUSCR(glb_base, 0);
+	SET_TLBRSW(glb_base, 0);
+	SET_GLOBAL_TLBIALL(glb_base, 0);
+	SET_RPU_ACR(glb_base, 0);
+	SET_TLBLKCRWE(glb_base, 1);
+
+	for (ctx = 0; ctx < ncb; ctx++) {
+		SET_BPRCOSH(glb_base, ctx, 0);
+		SET_BPRCISH(glb_base, ctx, 0);
+		SET_BPRCNSH(glb_base, ctx, 0);
+		SET_BPSHCFG(glb_base, ctx, 0);
+		SET_BPMTCFG(glb_base, ctx, 0);
+		SET_ACTLR(base, ctx, 0);
+		SET_SCTLR(base, ctx, 0);
+		SET_FSRRESTORE(base, ctx, 0);
+		SET_TTBR0(base, ctx, 0);
+		SET_TTBR1(base, ctx, 0);
+		SET_TTBCR(base, ctx, 0);
+		SET_BFBCR(base, ctx, 0);
+		SET_PAR(base, ctx, 0);
+		SET_FAR(base, ctx, 0);
+		SET_TLBFLPTER(base, ctx, 0);
+		SET_TLBSLPTER(base, ctx, 0);
+		SET_TLBLKCR(base, ctx, 0);
+		SET_CTX_TLBIALL(base, ctx, 0);
+		SET_TLBIVA(base, ctx, 0);
+		SET_PRRR(base, ctx, 0);
+		SET_NMRR(base, ctx, 0);
+		SET_CONTEXTIDR(base, ctx, 0);
+	}
+	mb();
+}
+
+static int msm_iommu_parse_dt(struct platform_device *pdev,
+				struct msm_iommu_drvdata *drvdata)
+{
+#ifdef CONFIG_OF_DEVICE
+	struct device_node *child;
+	struct resource *r;
+	u32 glb_offset = 0;
+	int ret;
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r) {
+		pr_err("%s: Missing property reg\n", __func__);
+		return -EINVAL;
+	}
+	drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
+	if (!drvdata->base) {
+		pr_err("%s: Unable to ioremap %pr\n", __func__, r);
+		return -ENOMEM;
+	}
+	drvdata->glb_base = drvdata->base;
+
+	if (!of_property_read_u32(pdev->dev.of_node, "qcom,glb-offset",
+			&glb_offset)) {
+		drvdata->glb_base += glb_offset;
+	} else {
+		pr_err("%s: Missing property qcom,glb-offset\n", __func__);
+		return -EINVAL;
+	}
+
+	for_each_child_of_node(pdev->dev.of_node, child) {
+		drvdata->ncb++;
+		if (!of_platform_device_create(child, NULL, &pdev->dev))
+			pr_err("Failed to create %s device\n", child->name);
+	}
+
+	ret = of_property_read_string(pdev->dev.of_node, "label",
+			&drvdata->name);
+	if (ret) {
+		pr_err("%s: Missing property label\n", __func__);
+		return -EINVAL;
+	}
+	drvdata->sec_id = -1;
+	drvdata->ttbr_split = 0;
+#endif
+	return 0;
+}
+
+static int __get_clocks(struct platform_device *pdev,
+				 struct msm_iommu_drvdata *drvdata)
+{
+	int ret = 0;
+
+	drvdata->pclk = clk_get(&pdev->dev, "iface_clk");
+	if (IS_ERR(drvdata->pclk)) {
+		ret = PTR_ERR(drvdata->pclk);
+		drvdata->pclk = NULL;
+		pr_err("Unable to get %s clock for %s IOMMU device\n",
+			dev_name(&pdev->dev), drvdata->name);
+		goto fail;
+	}
+
+	drvdata->clk = clk_get(&pdev->dev, "core_clk");
+
+	if (!IS_ERR(drvdata->clk)) {
+		if (clk_get_rate(drvdata->clk) == 0) {
+			ret = clk_round_rate(drvdata->clk, 1000);
+			clk_set_rate(drvdata->clk, ret);
+		}
+	} else {
+		drvdata->clk = NULL;
+	}
+	return 0;
+fail:
+	return ret;
+}
+
+static void __put_clocks(struct msm_iommu_drvdata *drvdata)
+{
+	if (drvdata->clk)
+		clk_put(drvdata->clk);
+	clk_put(drvdata->pclk);
+}
+
+static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
+{
+	int ret;
+
+	ret = clk_prepare_enable(drvdata->pclk);
+	if (ret)
+		goto fail;
+
+	if (drvdata->clk) {
+		ret = clk_prepare_enable(drvdata->clk);
+		if (ret)
+			clk_disable_unprepare(drvdata->pclk);
+	}
+fail:
+	return ret;
+}
+
+static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
+{
+	if (drvdata->clk)
+		clk_disable_unprepare(drvdata->clk);
+	clk_disable_unprepare(drvdata->pclk);
+}
+
+/*
+ * Do a basic check of the IOMMU by performing an ATS operation
+ * on context bank 0.
+ */
+static int iommu_sanity_check(struct msm_iommu_drvdata *drvdata)
+{
+	int par;
+	int ret = 0;
+
+	SET_M(drvdata->base, 0, 1);
+	SET_PAR(drvdata->base, 0, 0);
+	SET_V2PCFG(drvdata->base, 0, 1);
+	SET_V2PPR(drvdata->base, 0, 0);
+	mb();
+	par = GET_PAR(drvdata->base, 0);
+	SET_V2PCFG(drvdata->base, 0, 0);
+	SET_M(drvdata->base, 0, 0);
+	mb();
+
+	if (!par) {
+		pr_err("%s: Invalid PAR value detected\n", drvdata->name);
+		ret = -ENODEV;
+	}
+	return ret;
+}
+
+static int msm_iommu_pmon_parse_dt(struct platform_device *pdev,
+					struct iommu_pmon *pmon_info)
+{
+	int ret = 0;
+	int irq = platform_get_irq(pdev, 0);
+	unsigned int cls_prop_size;
+
+	if (irq > 0) {
+		pmon_info->iommu.evt_irq = platform_get_irq(pdev, 0);
+
+		ret = of_property_read_u32(pdev->dev.of_node,
+					   "qcom,iommu-pmu-ngroups",
+					   &pmon_info->num_groups);
+		if (ret) {
+			pr_err("Error reading qcom,iommu-pmu-ngroups\n");
+			goto fail;
+		}
+		ret = of_property_read_u32(pdev->dev.of_node,
+					   "qcom,iommu-pmu-ncounters",
+					   &pmon_info->num_counters);
+		if (ret) {
+			pr_err("Error reading qcom,iommu-pmu-ncounters\n");
+			goto fail;
+		}
+
+		if (!of_get_property(pdev->dev.of_node,
+				     "qcom,iommu-pmu-event-classes",
+				     &cls_prop_size)) {
+			pr_err("Error reading qcom,iommu-pmu-event-classes\n");
+			return -EINVAL;
+		}
+
+		pmon_info->event_cls_supported =
+			   devm_kzalloc(&pdev->dev, cls_prop_size, GFP_KERNEL);
+
+		if (!pmon_info->event_cls_supported) {
+			pr_err("Unable to get memory for event class array\n");
+			return -ENOMEM;
+		}
+
+		pmon_info->nevent_cls_supported = cls_prop_size / sizeof(u32);
+
+		ret = of_property_read_u32_array(pdev->dev.of_node,
+					"qcom,iommu-pmu-event-classes",
+					pmon_info->event_cls_supported,
+					pmon_info->nevent_cls_supported);
+		if (ret) {
+			pr_err("Error reading qcom,iommu-pmu-event-classes\n");
+			return ret;
+		}
+	} else {
+		pmon_info->iommu.evt_irq = -1;
+		ret = irq;
+	}
+
+fail:
+	return ret;
+}
+
+static int msm_iommu_probe(struct platform_device *pdev)
+{
+	struct iommu_pmon *pmon_info;
+	struct msm_iommu_drvdata *drvdata;
+	struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data;
+	int ret;
+
+	drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+
+	if (!drvdata) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	if (pdev->dev.of_node) {
+		ret = msm_iommu_parse_dt(pdev, drvdata);
+		if (ret)
+			goto fail;
+	} else if (pdev->dev.platform_data) {
+		struct resource *r, *r2;
+		resource_size_t	len;
+
+		r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"physbase");
+
+		if (!r) {
+			ret = -ENODEV;
+			goto fail;
+		}
+
+		len = resource_size(r);
+
+		r2 = request_mem_region(r->start, len, r->name);
+		if (!r2) {
+			pr_err("Could not request memory region: %pr\n", r);
+			ret = -EBUSY;
+			goto fail;
+		}
+
+		drvdata->base = devm_ioremap(&pdev->dev, r2->start, len);
+
+		if (!drvdata->base) {
+			pr_err("Could not ioremap: %pr\n", r);
+			ret = -EBUSY;
+			goto fail;
+		}
+		/*
+		 * Global register space offset for legacy IOMMUv1 hardware
+		 * is always 0xFF000
+		 */
+		drvdata->glb_base = drvdata->base + 0xFF000;
+		drvdata->name = iommu_dev->name;
+		drvdata->dev = &pdev->dev;
+		drvdata->ncb = iommu_dev->ncb;
+		drvdata->ttbr_split = iommu_dev->ttbr_split;
+	} else {
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	drvdata->dev = &pdev->dev;
+
+	ret = __get_clocks(pdev, drvdata);
+
+	if (ret)
+		goto fail;
+
+	__enable_clocks(drvdata);
+
+	msm_iommu_reset(drvdata->base, drvdata->glb_base, drvdata->ncb);
+
+	ret = iommu_sanity_check(drvdata);
+	if (ret)
+		goto fail_clk;
+
+	pr_info("device %s mapped at %p, with %d ctx banks\n",
+		drvdata->name, drvdata->base, drvdata->ncb);
+
+	msm_iommu_add_drv(drvdata);
+	platform_set_drvdata(pdev, drvdata);
+
+	__disable_clocks(drvdata);
+
+	pmon_info = msm_iommu_pm_alloc(&pdev->dev);
+	if (pmon_info != NULL) {
+		ret = msm_iommu_pmon_parse_dt(pdev, pmon_info);
+		if (ret) {
+			msm_iommu_pm_free(&pdev->dev);
+			pr_info("%s: pmon not available.\n", drvdata->name);
+		} else {
+			pmon_info->iommu.base = drvdata->base;
+			pmon_info->iommu.ops = &iommu_access_ops_v0;
+			pmon_info->iommu.hw_ops = iommu_pm_get_hw_ops_v0();
+			pmon_info->iommu.iommu_name = drvdata->name;
+			ret = msm_iommu_pm_iommu_register(pmon_info);
+			if (ret) {
+				pr_err("%s iommu register fail\n",
+								drvdata->name);
+				msm_iommu_pm_free(&pdev->dev);
+			} else {
+				pr_debug("%s iommu registered for pmon\n",
+						pmon_info->iommu.iommu_name);
+			}
+		}
+	}
+
+	return 0;
+
+fail_clk:
+	__disable_clocks(drvdata);
+	__put_clocks(drvdata);
+fail:
+	return ret;
+}
+
+static int msm_iommu_remove(struct platform_device *pdev)
+{
+	struct msm_iommu_drvdata *drv = NULL;
+
+	drv = platform_get_drvdata(pdev);
+	if (drv) {
+		msm_iommu_remove_drv(drv);
+		if (drv->clk)
+			clk_put(drv->clk);
+		clk_put(drv->pclk);
+		platform_set_drvdata(pdev, NULL);
+	}
+	return 0;
+}
+
+static int msm_iommu_ctx_parse_dt(struct platform_device *pdev,
+				struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+	struct resource *r, rp;
+	int irq, ret;
+	u32 nmid_array_size;
+	u32 nmid;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq > 0) {
+		ret = request_threaded_irq(irq, NULL,
+				msm_iommu_fault_handler,
+				IRQF_ONESHOT | IRQF_SHARED,
+				"msm_iommu_nonsecure_irq", ctx_drvdata);
+		if (ret) {
+			pr_err("Request IRQ %d failed with ret=%d\n", irq, ret);
+			return ret;
+		}
+	}
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r) {
+		pr_err("Could not find reg property for context bank\n");
+		return -EINVAL;
+	}
+
+	ret = of_address_to_resource(pdev->dev.parent->of_node, 0, &rp);
+	if (ret) {
+		pr_err("of_address_to_resource failed\n");
+		return -EINVAL;
+	}
+
+	/* Calculate the context bank number using the base addresses. CB0
+	 * starts at the base address.
+	 */
+	ctx_drvdata->num = ((r->start - rp.start) >> CTX_SHIFT);
+
+	if (of_property_read_string(pdev->dev.of_node, "label",
+					&ctx_drvdata->name)) {
+		pr_err("Could not find label property\n");
+		return -EINVAL;
+	}
+
+	if (!of_get_property(pdev->dev.of_node, "qcom,iommu-ctx-mids",
+			     &nmid_array_size)) {
+		pr_err("Could not find iommu-ctx-mids property\n");
+		return -EINVAL;
+	}
+	if (nmid_array_size >= sizeof(ctx_drvdata->sids)) {
+		pr_err("Too many mids defined - array size: %u, mids size: %u\n",
+			nmid_array_size, sizeof(ctx_drvdata->sids));
+		return -EINVAL;
+	}
+	nmid = nmid_array_size / sizeof(*ctx_drvdata->sids);
+
+	if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iommu-ctx-mids",
+				       ctx_drvdata->sids, nmid)) {
+		pr_err("Could not find iommu-ctx-mids property\n");
+		return -EINVAL;
+	}
+	ctx_drvdata->nsid = nmid;
+
+	return 0;
+}
+
+static void __program_m2v_tables(struct msm_iommu_drvdata *drvdata,
+				struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+	int i;
+
+	/* Program the M2V tables for this context */
+	for (i = 0; i < ctx_drvdata->nsid; i++) {
+		int sid = ctx_drvdata->sids[i];
+		int num = ctx_drvdata->num;
+
+		SET_M2VCBR_N(drvdata->glb_base, sid, 0);
+		SET_CBACR_N(drvdata->glb_base, num, 0);
+
+		/* Route page faults to the non-secure interrupt */
+		SET_IRPTNDX(drvdata->glb_base, num, 1);
+
+		/* Set VMID = 0 */
+		SET_VMID(drvdata->glb_base, sid, 0);
+
+		/* Set the context number for that SID to this context */
+		SET_CBNDX(drvdata->glb_base, sid, num);
+
+		/* Set SID associated with this context bank to 0 */
+		SET_CBVMID(drvdata->glb_base, num, 0);
+
+		/* Set the ASID for TLB tagging for this context to 0 */
+		SET_CONTEXTIDR_ASID(drvdata->base, num, 0);
+
+		/* Set security bit override to be Non-secure */
+		SET_NSCFG(drvdata->glb_base, sid, 3);
+	}
+	mb();
+}
+
+static int msm_iommu_ctx_probe(struct platform_device *pdev)
+{
+	struct msm_iommu_drvdata *drvdata;
+	struct msm_iommu_ctx_drvdata *ctx_drvdata = NULL;
+	int i, ret, irq;
+	if (!pdev->dev.parent) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	drvdata = dev_get_drvdata(pdev->dev.parent);
+
+	if (!drvdata) {
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	ctx_drvdata = devm_kzalloc(&pdev->dev, sizeof(*ctx_drvdata),
+					GFP_KERNEL);
+	if (!ctx_drvdata) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	ctx_drvdata->pdev = pdev;
+	INIT_LIST_HEAD(&ctx_drvdata->attached_elm);
+	platform_set_drvdata(pdev, ctx_drvdata);
+	ctx_drvdata->attach_count = 0;
+
+	if (pdev->dev.of_node) {
+		ret = msm_iommu_ctx_parse_dt(pdev, ctx_drvdata);
+		if (ret)
+			goto fail;
+	} else if (pdev->dev.platform_data) {
+		struct msm_iommu_ctx_dev *c = pdev->dev.platform_data;
+
+		ctx_drvdata->num = c->num;
+		ctx_drvdata->name = c->name;
+
+		for (i = 0;  i < MAX_NUM_MIDS; ++i) {
+			if (c->mids[i] == -1) {
+				ctx_drvdata->nsid = i;
+				break;
+			}
+			ctx_drvdata->sids[i] = c->mids[i];
+		}
+		irq = platform_get_irq_byname(
+					to_platform_device(pdev->dev.parent),
+					"nonsecure_irq");
+		if (irq < 0) {
+			ret = -ENODEV;
+			goto fail;
+		}
+
+		ret = request_threaded_irq(irq, NULL, msm_iommu_fault_handler,
+					IRQF_ONESHOT | IRQF_SHARED,
+					"msm_iommu_nonsecure_irq", ctx_drvdata);
+
+		if (ret) {
+			pr_err("request_threaded_irq %d failed: %d\n", irq,
+								       ret);
+			goto fail;
+		}
+	} else {
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	__enable_clocks(drvdata);
+	__program_m2v_tables(drvdata, ctx_drvdata);
+	__disable_clocks(drvdata);
+
+	dev_info(&pdev->dev, "context %s using bank %d\n", ctx_drvdata->name,
+							   ctx_drvdata->num);
+	return 0;
+fail:
+	return ret;
+}
+
+static int __devexit msm_iommu_ctx_remove(struct platform_device *pdev)
+{
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+
+static struct of_device_id msm_iommu_match_table[] = {
+	{ .compatible = "qcom,msm-smmu-v0", },
+	{}
+};
+
+static struct platform_driver msm_iommu_driver = {
+	.driver = {
+		.name	= "msm_iommu-v0",
+		.of_match_table = msm_iommu_match_table,
+	},
+	.probe		= msm_iommu_probe,
+	.remove		= __devexit_p(msm_iommu_remove),
+};
+
+static struct of_device_id msm_iommu_ctx_match_table[] = {
+	{ .name = "qcom,iommu-ctx", },
+	{}
+};
+
+static struct platform_driver msm_iommu_ctx_driver = {
+	.driver = {
+		.name	= "msm_iommu_ctx",
+		.of_match_table = msm_iommu_ctx_match_table,
+	},
+	.probe		= msm_iommu_ctx_probe,
+	.remove		= __devexit_p(msm_iommu_ctx_remove),
+};
+
+static int __init msm_iommu_driver_init(void)
+{
+	int ret;
+	ret = platform_driver_register(&msm_iommu_driver);
+	if (ret != 0) {
+		pr_err("Failed to register IOMMU driver\n");
+		goto error;
+	}
+
+	ret = platform_driver_register(&msm_iommu_ctx_driver);
+	if (ret != 0) {
+		pr_err("Failed to register IOMMU context driver\n");
+		goto error;
+	}
+
+error:
+	return ret;
+}
+
+static void __exit msm_iommu_driver_exit(void)
+{
+	platform_driver_unregister(&msm_iommu_ctx_driver);
+	platform_driver_unregister(&msm_iommu_driver);
+}
+
+subsys_initcall(msm_iommu_driver_init);
+module_exit(msm_iommu_driver_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
diff --git a/drivers/iommu/msm_iommu_dev-v1.c b/drivers/iommu/msm_iommu_dev-v1.c
new file mode 100644
index 0000000..418a086
--- /dev/null
+++ b/drivers/iommu/msm_iommu_dev-v1.c
@@ -0,0 +1,462 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/iommu.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+
+#include <mach/iommu_hw-v1.h>
+#include <mach/iommu.h>
+#include <mach/iommu_perfmon.h>
+
+static int msm_iommu_parse_bfb_settings(struct platform_device *pdev,
+				    struct msm_iommu_drvdata *drvdata)
+{
+	struct msm_iommu_bfb_settings *bfb_settings;
+	u32 nreg, nval;
+	int ret;
+
+	/*
+	 * It is not valid for a device to have the qcom,iommu-bfb-regs
+	 * property but not the qcom,iommu-bfb-data property, and vice versa.
+	 */
+	if (!of_get_property(pdev->dev.of_node, "qcom,iommu-bfb-regs", &nreg)) {
+		if (of_get_property(pdev->dev.of_node, "qcom,iommu-bfb-data",
+				    &nval))
+			return -EINVAL;
+		return 0;
+	}
+
+	if (!of_get_property(pdev->dev.of_node, "qcom,iommu-bfb-data", &nval))
+		return -EINVAL;
+
+	if (nreg >= sizeof(bfb_settings->regs))
+		return -EINVAL;
+
+	if (nval >= sizeof(bfb_settings->data))
+		return -EINVAL;
+
+	if (nval != nreg)
+		return -EINVAL;
+
+	bfb_settings = devm_kzalloc(&pdev->dev, sizeof(*bfb_settings),
+				    GFP_KERNEL);
+	if (!bfb_settings)
+		return -ENOMEM;
+
+	ret = of_property_read_u32_array(pdev->dev.of_node,
+					 "qcom,iommu-bfb-regs",
+					 bfb_settings->regs,
+					 nreg / sizeof(*bfb_settings->regs));
+	if (ret)
+		return ret;
+
+	ret = of_property_read_u32_array(pdev->dev.of_node,
+					 "qcom,iommu-bfb-data",
+					 bfb_settings->data,
+					 nval / sizeof(*bfb_settings->data));
+	if (ret)
+		return ret;
+
+	bfb_settings->length = nreg / sizeof(*bfb_settings->regs);
+
+	drvdata->bfb_settings = bfb_settings;
+	return 0;
+}
+
+static int msm_iommu_parse_dt(struct platform_device *pdev,
+				struct msm_iommu_drvdata *drvdata)
+{
+	struct device_node *child;
+	int ret = 0;
+	struct resource *r;
+
+	drvdata->dev = &pdev->dev;
+	msm_iommu_add_drv(drvdata);
+
+	ret = msm_iommu_parse_bfb_settings(pdev, drvdata);
+	if (ret)
+		goto fail;
+
+	for_each_child_of_node(pdev->dev.of_node, child) {
+		drvdata->ncb++;
+		if (!of_platform_device_create(child, NULL, &pdev->dev))
+			pr_err("Failed to create %s device\n", child->name);
+	}
+
+	drvdata->asid = devm_kzalloc(&pdev->dev, drvdata->ncb * sizeof(int),
+				     GFP_KERNEL);
+
+	if (!drvdata->asid) {
+		pr_err("Unable to get memory for asid array\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	ret = of_property_read_string(pdev->dev.of_node, "label",
+				      &drvdata->name);
+	if (ret)
+		goto fail;
+
+	drvdata->sec_id = -1;
+	of_property_read_u32(pdev->dev.of_node, "qcom,iommu-secure-id",
+				&drvdata->sec_id);
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "clk_base");
+	if (r) {
+		drvdata->clk_reg_virt = devm_ioremap(&pdev->dev, r->start,
+						     resource_size(r));
+		if (!drvdata->clk_reg_virt) {
+			pr_err("Failed to map resource for iommu clk: %pr\n",
+				r);
+			ret = -ENOMEM;
+			goto fail;
+		}
+	}
+
+	drvdata->halt_enabled = of_property_read_bool(pdev->dev.of_node,
+						      "qcom,iommu-enable-halt");
+
+	return 0;
+fail:
+	return ret;
+}
+
+static int msm_iommu_pmon_parse_dt(struct platform_device *pdev,
+					struct iommu_pmon *pmon_info)
+{
+	int ret = 0;
+	int irq = platform_get_irq(pdev, 0);
+	unsigned int cls_prop_size;
+
+	if (irq > 0) {
+		pmon_info->iommu.evt_irq = platform_get_irq(pdev, 0);
+
+		ret = of_property_read_u32(pdev->dev.of_node,
+					   "qcom,iommu-pmu-ngroups",
+					   &pmon_info->num_groups);
+		if (ret) {
+			pr_err("Error reading qcom,iommu-pmu-ngroups\n");
+			goto fail;
+		}
+		ret = of_property_read_u32(pdev->dev.of_node,
+					   "qcom,iommu-pmu-ncounters",
+					   &pmon_info->num_counters);
+		if (ret) {
+			pr_err("Error reading qcom,iommu-pmu-ncounters\n");
+			goto fail;
+		}
+
+		if (!of_get_property(pdev->dev.of_node,
+				     "qcom,iommu-pmu-event-classes",
+				     &cls_prop_size)) {
+			pr_err("Error reading qcom,iommu-pmu-event-classes\n");
+			return -EINVAL;
+		}
+
+		pmon_info->event_cls_supported =
+			   devm_kzalloc(&pdev->dev, cls_prop_size, GFP_KERNEL);
+
+		if (!pmon_info->event_cls_supported) {
+			pr_err("Unable to get memory for event class array\n");
+			return -ENOMEM;
+		}
+
+		pmon_info->nevent_cls_supported = cls_prop_size / sizeof(u32);
+
+		ret = of_property_read_u32_array(pdev->dev.of_node,
+					"qcom,iommu-pmu-event-classes",
+					pmon_info->event_cls_supported,
+					pmon_info->nevent_cls_supported);
+		if (ret) {
+			pr_err("Error reading qcom,iommu-pmu-event-classes\n");
+			return ret;
+		}
+	} else {
+		pmon_info->iommu.evt_irq = -1;
+		ret = irq;
+	}
+
+fail:
+	return ret;
+}
+
+static int __devinit msm_iommu_probe(struct platform_device *pdev)
+{
+	struct iommu_pmon *pmon_info;
+	struct msm_iommu_drvdata *drvdata;
+	struct resource *r;
+	int ret, needs_alt_core_clk;
+
+	drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iommu_base");
+	if (!r)
+		return -EINVAL;
+
+	drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
+	if (!drvdata->base)
+		return -ENOMEM;
+
+	drvdata->glb_base = drvdata->base;
+
+	drvdata->gdsc = devm_regulator_get(&pdev->dev, "vdd");
+	if (IS_ERR(drvdata->gdsc))
+		return -EINVAL;
+
+	drvdata->alt_gdsc = devm_regulator_get(&pdev->dev, "qcom,alt-vdd");
+	if (IS_ERR(drvdata->alt_gdsc))
+		drvdata->alt_gdsc = NULL;
+
+	drvdata->pclk = devm_clk_get(&pdev->dev, "iface_clk");
+	if (IS_ERR(drvdata->pclk))
+		return PTR_ERR(drvdata->pclk);
+
+	drvdata->clk = devm_clk_get(&pdev->dev, "core_clk");
+	if (IS_ERR(drvdata->clk))
+		return PTR_ERR(drvdata->clk);
+
+	needs_alt_core_clk = of_property_read_bool(pdev->dev.of_node,
+						   "qcom,needs-alt-core-clk");
+	if (needs_alt_core_clk) {
+		drvdata->aclk = devm_clk_get(&pdev->dev, "alt_core_clk");
+		if (IS_ERR(drvdata->aclk))
+			return PTR_ERR(drvdata->aclk);
+	}
+
+	if (clk_get_rate(drvdata->clk) == 0) {
+		ret = clk_round_rate(drvdata->clk, 1000);
+		clk_set_rate(drvdata->clk, ret);
+	}
+
+	if (drvdata->aclk && clk_get_rate(drvdata->aclk) == 0) {
+		ret = clk_round_rate(drvdata->aclk, 1000);
+		clk_set_rate(drvdata->aclk, ret);
+	}
+
+	ret = msm_iommu_parse_dt(pdev, drvdata);
+	if (ret)
+		return ret;
+
+	dev_info(&pdev->dev, "device %s mapped at %p, with %d ctx banks\n",
+		drvdata->name, drvdata->base, drvdata->ncb);
+
+	platform_set_drvdata(pdev, drvdata);
+
+	msm_iommu_sec_set_access_ops(&iommu_access_ops_v1);
+
+	pmon_info = msm_iommu_pm_alloc(&pdev->dev);
+	if (pmon_info != NULL) {
+		ret = msm_iommu_pmon_parse_dt(pdev, pmon_info);
+		if (ret) {
+			msm_iommu_pm_free(&pdev->dev);
+			pr_info("%s: pmon not available.\n", drvdata->name);
+		} else {
+			pmon_info->iommu.base = drvdata->base;
+			pmon_info->iommu.ops = &iommu_access_ops_v1;
+			pmon_info->iommu.hw_ops = iommu_pm_get_hw_ops_v1();
+			pmon_info->iommu.iommu_name = drvdata->name;
+			ret = msm_iommu_pm_iommu_register(pmon_info);
+			if (ret) {
+				pr_err("%s iommu register fail\n",
+								drvdata->name);
+				msm_iommu_pm_free(&pdev->dev);
+			} else {
+				pr_debug("%s iommu registered for pmon\n",
+						pmon_info->iommu.iommu_name);
+			}
+		}
+	}
+	return 0;
+}
+
+static int __devexit msm_iommu_remove(struct platform_device *pdev)
+{
+	struct msm_iommu_drvdata *drv = NULL;
+
+	msm_iommu_pm_iommu_unregister(&pdev->dev);
+	msm_iommu_pm_free(&pdev->dev);
+
+	drv = platform_get_drvdata(pdev);
+	if (drv) {
+		msm_iommu_remove_drv(drv);
+		if (drv->clk)
+			clk_put(drv->clk);
+		clk_put(drv->pclk);
+		platform_set_drvdata(pdev, NULL);
+	}
+	return 0;
+}
+
+static int msm_iommu_ctx_parse_dt(struct platform_device *pdev,
+				struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+	struct resource *r, rp;
+	int irq, ret;
+	u32 nsid;
+
+	ctx_drvdata->secure_context = of_property_read_bool(pdev->dev.of_node,
+							"qcom,secure-context");
+
+	if (!ctx_drvdata->secure_context) {
+		irq = platform_get_irq(pdev, 0);
+		if (irq > 0) {
+			ret = request_threaded_irq(irq, NULL,
+					msm_iommu_fault_handler_v2,
+					IRQF_ONESHOT | IRQF_SHARED,
+					"msm_iommu_nonsecure_irq", pdev);
+			if (ret) {
+				pr_err("Request IRQ %d failed with ret=%d\n",
+					irq, ret);
+				return ret;
+			}
+		}
+	}
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r)
+		return -EINVAL;
+
+	ret = of_address_to_resource(pdev->dev.parent->of_node, 0, &rp);
+	if (ret)
+		return -EINVAL;
+
+	/* Calculate the context bank number using the base addresses. The
+	 * first 8 pages belong to the global address space which is followed
+	 * by the context banks, hence subtract by 8 to get the context bank
+	 * number.
+	 */
+	ctx_drvdata->num = ((r->start - rp.start) >> CTX_SHIFT) - 8;
+
+	if (of_property_read_string(pdev->dev.of_node, "label",
+					&ctx_drvdata->name))
+		ctx_drvdata->name = dev_name(&pdev->dev);
+
+	if (!of_get_property(pdev->dev.of_node, "qcom,iommu-ctx-sids", &nsid))
+		return -EINVAL;
+
+	if (nsid >= sizeof(ctx_drvdata->sids))
+		return -EINVAL;
+
+	if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iommu-ctx-sids",
+				       ctx_drvdata->sids,
+				       nsid / sizeof(*ctx_drvdata->sids))) {
+		return -EINVAL;
+	}
+	ctx_drvdata->nsid = nsid;
+
+	ctx_drvdata->asid = -1;
+	return 0;
+}
+
+static int __devinit msm_iommu_ctx_probe(struct platform_device *pdev)
+{
+	struct msm_iommu_ctx_drvdata *ctx_drvdata = NULL;
+	int ret;
+
+	if (!pdev->dev.parent)
+		return -EINVAL;
+
+	ctx_drvdata = devm_kzalloc(&pdev->dev, sizeof(*ctx_drvdata),
+					GFP_KERNEL);
+	if (!ctx_drvdata)
+		return -ENOMEM;
+
+	ctx_drvdata->pdev = pdev;
+	INIT_LIST_HEAD(&ctx_drvdata->attached_elm);
+	platform_set_drvdata(pdev, ctx_drvdata);
+
+	ret = msm_iommu_ctx_parse_dt(pdev, ctx_drvdata);
+	if (!ret)
+		dev_info(&pdev->dev, "context %s using bank %d\n",
+			 ctx_drvdata->name, ctx_drvdata->num);
+
+	return ret;
+}
+
+static int __devexit msm_iommu_ctx_remove(struct platform_device *pdev)
+{
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static struct of_device_id msm_iommu_match_table[] = {
+	{ .compatible = "qcom,msm-smmu-v1", },
+	{}
+};
+
+static struct platform_driver msm_iommu_driver = {
+	.driver = {
+		.name	= "msm_iommu_v1",
+		.of_match_table = msm_iommu_match_table,
+	},
+	.probe		= msm_iommu_probe,
+	.remove		= __devexit_p(msm_iommu_remove),
+};
+
+static struct of_device_id msm_iommu_ctx_match_table[] = {
+	{ .name = "qcom,iommu-ctx", },
+	{}
+};
+
+static struct platform_driver msm_iommu_ctx_driver = {
+	.driver = {
+		.name	= "msm_iommu_ctx_v1",
+		.of_match_table = msm_iommu_ctx_match_table,
+	},
+	.probe		= msm_iommu_ctx_probe,
+	.remove		= __devexit_p(msm_iommu_ctx_remove),
+};
+
+static int __init msm_iommu_driver_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&msm_iommu_driver);
+	if (ret != 0) {
+		pr_err("Failed to register IOMMU driver\n");
+		goto error;
+	}
+
+	ret = platform_driver_register(&msm_iommu_ctx_driver);
+	if (ret != 0) {
+		pr_err("Failed to register IOMMU context driver\n");
+		goto error;
+	}
+
+error:
+	return ret;
+}
+
+static void __exit msm_iommu_driver_exit(void)
+{
+	platform_driver_unregister(&msm_iommu_ctx_driver);
+	platform_driver_unregister(&msm_iommu_driver);
+}
+
+subsys_initcall(msm_iommu_driver_init);
+module_exit(msm_iommu_driver_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/msm_iommu_dev-v2.c b/drivers/iommu/msm_iommu_dev-v2.c
deleted file mode 100644
index 4c69c8c..0000000
--- a/drivers/iommu/msm_iommu_dev-v2.c
+++ /dev/null
@@ -1,308 +0,0 @@
-/* Copyright (c) 2012 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/iommu.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/atomic.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-
-#include <mach/iommu_hw-v2.h>
-#include <mach/iommu.h>
-
-static int msm_iommu_parse_dt(struct platform_device *pdev,
-				struct msm_iommu_drvdata *drvdata)
-{
-	struct device_node *child;
-	int ret = 0;
-	u32 nsmr;
-
-	ret = device_move(&pdev->dev, &msm_iommu_root_dev->dev, DPM_ORDER_NONE);
-	if (ret)
-		goto fail;
-
-	ret = of_property_read_u32(pdev->dev.of_node, "qcom,iommu-smt-size",
-				   &nsmr);
-	if (ret)
-		goto fail;
-
-	if (nsmr > MAX_NUM_SMR) {
-		pr_err("Invalid SMT size: %d\n", nsmr);
-		ret = -EINVAL;
-		goto fail;
-	}
-
-	drvdata->nsmr = nsmr;
-	for_each_child_of_node(pdev->dev.of_node, child) {
-		drvdata->ncb++;
-		if (!of_platform_device_create(child, NULL, &pdev->dev))
-			pr_err("Failed to create %s device\n", child->name);
-	}
-
-	drvdata->name = dev_name(&pdev->dev);
-fail:
-	return ret;
-}
-
-static atomic_t msm_iommu_next_id = ATOMIC_INIT(-1);
-
-static int __devinit msm_iommu_probe(struct platform_device *pdev)
-{
-	struct msm_iommu_drvdata *drvdata;
-	struct resource *r;
-	int ret, needs_alt_core_clk;
-
-	if (msm_iommu_root_dev == pdev)
-		return 0;
-
-	if (pdev->id == -1)
-		pdev->id = atomic_inc_return(&msm_iommu_next_id) - 1;
-
-	drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
-	if (!drvdata)
-		return -ENOMEM;
-
-	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!r)
-		return -EINVAL;
-
-	drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
-	if (!drvdata->base)
-		return -ENOMEM;
-
-	drvdata->gdsc = devm_regulator_get(&pdev->dev, "vdd");
-	if (IS_ERR(drvdata->gdsc))
-		return -EINVAL;
-
-	drvdata->pclk = devm_clk_get(&pdev->dev, "iface_clk");
-	if (IS_ERR(drvdata->pclk))
-		return PTR_ERR(drvdata->pclk);
-
-	drvdata->clk = devm_clk_get(&pdev->dev, "core_clk");
-	if (IS_ERR(drvdata->clk))
-		return PTR_ERR(drvdata->clk);
-
-	needs_alt_core_clk = of_property_read_bool(pdev->dev.of_node,
-						   "qcom,needs-alt-core-clk");
-	if (needs_alt_core_clk) {
-		drvdata->aclk = devm_clk_get(&pdev->dev, "alt_core_clk");
-		if (IS_ERR(drvdata->aclk))
-			return PTR_ERR(drvdata->aclk);
-	}
-
-	if (clk_get_rate(drvdata->clk) == 0) {
-		ret = clk_round_rate(drvdata->clk, 1);
-		clk_set_rate(drvdata->clk, ret);
-	}
-
-	if (drvdata->aclk && clk_get_rate(drvdata->aclk) == 0) {
-		ret = clk_round_rate(drvdata->aclk, 1);
-		clk_set_rate(drvdata->aclk, ret);
-	}
-
-	ret = msm_iommu_parse_dt(pdev, drvdata);
-	if (ret)
-		return ret;
-
-	pr_info("device %s mapped at %p, with %d ctx banks\n",
-		drvdata->name, drvdata->base, drvdata->ncb);
-
-	platform_set_drvdata(pdev, drvdata);
-
-	return 0;
-}
-
-static int __devexit msm_iommu_remove(struct platform_device *pdev)
-{
-	struct msm_iommu_drvdata *drv = NULL;
-
-	drv = platform_get_drvdata(pdev);
-	if (drv) {
-		if (drv->clk)
-			clk_put(drv->clk);
-		clk_put(drv->pclk);
-		platform_set_drvdata(pdev, NULL);
-	}
-	return 0;
-}
-
-static int msm_iommu_ctx_parse_dt(struct platform_device *pdev,
-				struct msm_iommu_ctx_drvdata *ctx_drvdata)
-{
-	struct resource *r, rp;
-	int irq, ret;
-	u32 nsid;
-
-	irq = platform_get_irq(pdev, 0);
-	if (irq > 0) {
-		ret = request_threaded_irq(irq, NULL,
-				msm_iommu_fault_handler_v2,
-				IRQF_ONESHOT | IRQF_SHARED,
-				"msm_iommu_nonsecure_irq", pdev);
-		if (ret) {
-			pr_err("Request IRQ %d failed with ret=%d\n", irq, ret);
-			return ret;
-		}
-	}
-
-	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!r)
-		return -EINVAL;
-
-	ret = of_address_to_resource(pdev->dev.parent->of_node, 0, &rp);
-	if (ret)
-		return -EINVAL;
-
-	/* Calculate the context bank number using the base addresses. The
-	 * first 8 pages belong to the global address space which is followed
-	 * by the context banks, hence subtract by 8 to get the context bank
-	 * number.
-	 */
-	ctx_drvdata->num = ((r->start - rp.start) >> CTX_SHIFT) - 8;
-
-	if (of_property_read_string(pdev->dev.of_node, "label",
-					&ctx_drvdata->name))
-		ctx_drvdata->name = dev_name(&pdev->dev);
-
-	if (!of_get_property(pdev->dev.of_node, "qcom,iommu-ctx-sids", &nsid))
-		return -EINVAL;
-
-	if (nsid >= sizeof(ctx_drvdata->sids))
-		return -EINVAL;
-
-	if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iommu-ctx-sids",
-				       ctx_drvdata->sids,
-				       nsid / sizeof(*ctx_drvdata->sids))) {
-		return -EINVAL;
-	}
-	ctx_drvdata->nsid = nsid;
-
-	return 0;
-}
-
-static int __devinit msm_iommu_ctx_probe(struct platform_device *pdev)
-{
-	struct msm_iommu_ctx_drvdata *ctx_drvdata = NULL;
-	int ret;
-
-	if (!pdev->dev.parent)
-		return -EINVAL;
-
-	ctx_drvdata = devm_kzalloc(&pdev->dev, sizeof(*ctx_drvdata),
-					GFP_KERNEL);
-	if (!ctx_drvdata)
-		return -ENOMEM;
-
-	ctx_drvdata->pdev = pdev;
-	INIT_LIST_HEAD(&ctx_drvdata->attached_elm);
-	platform_set_drvdata(pdev, ctx_drvdata);
-
-	ret = msm_iommu_ctx_parse_dt(pdev, ctx_drvdata);
-	if (!ret)
-		dev_info(&pdev->dev, "context %s using bank %d\n",
-			 ctx_drvdata->name, ctx_drvdata->num);
-
-	return ret;
-}
-
-static int __devexit msm_iommu_ctx_remove(struct platform_device *pdev)
-{
-	platform_set_drvdata(pdev, NULL);
-	return 0;
-}
-
-static struct of_device_id msm_iommu_match_table[] = {
-	{ .compatible = "qcom,msm-smmu-v2", },
-	{}
-};
-
-static struct platform_driver msm_iommu_driver = {
-	.driver = {
-		.name	= "msm_iommu_v2",
-		.of_match_table = msm_iommu_match_table,
-	},
-	.probe		= msm_iommu_probe,
-	.remove		= __devexit_p(msm_iommu_remove),
-};
-
-static struct of_device_id msm_iommu_ctx_match_table[] = {
-	{ .name = "qcom,iommu-ctx", },
-	{}
-};
-
-static struct platform_driver msm_iommu_ctx_driver = {
-	.driver = {
-		.name	= "msm_iommu_ctx_v2",
-		.of_match_table = msm_iommu_ctx_match_table,
-	},
-	.probe		= msm_iommu_ctx_probe,
-	.remove		= __devexit_p(msm_iommu_ctx_remove),
-};
-
-static int __init msm_iommu_driver_init(void)
-{
-	struct device_node *node;
-	int ret;
-
-	node = of_find_compatible_node(NULL, NULL, "qcom,msm-smmu-v2");
-	if (!node)
-		return -ENODEV;
-
-	of_node_put(node);
-
-	msm_iommu_root_dev = platform_device_register_simple(
-						"msm_iommu", -1, 0, 0);
-	if (!msm_iommu_root_dev) {
-		pr_err("Failed to create root IOMMU device\n");
-		ret = -ENODEV;
-		goto error;
-	}
-
-	atomic_inc(&msm_iommu_next_id);
-
-	ret = platform_driver_register(&msm_iommu_driver);
-	if (ret != 0) {
-		pr_err("Failed to register IOMMU driver\n");
-		goto error;
-	}
-
-	ret = platform_driver_register(&msm_iommu_ctx_driver);
-	if (ret != 0) {
-		pr_err("Failed to register IOMMU context driver\n");
-		goto error;
-	}
-
-error:
-	return ret;
-}
-
-static void __exit msm_iommu_driver_exit(void)
-{
-	platform_driver_unregister(&msm_iommu_ctx_driver);
-	platform_driver_unregister(&msm_iommu_driver);
-	platform_device_unregister(msm_iommu_root_dev);
-}
-
-subsys_initcall(msm_iommu_driver_init);
-module_exit(msm_iommu_driver_exit);
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c
deleted file mode 100644
index fca102a..0000000
--- a/drivers/iommu/msm_iommu_dev.c
+++ /dev/null
@@ -1,431 +0,0 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/iommu.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-
-#include <mach/iommu_hw-8xxx.h>
-#include <mach/iommu.h>
-
-struct iommu_ctx_iter_data {
-	/* input */
-	const char *name;
-
-	/* output */
-	struct device *dev;
-};
-
-struct platform_device *msm_iommu_root_dev;
-
-static int each_iommu_ctx(struct device *dev, void *data)
-{
-	struct iommu_ctx_iter_data *res = data;
-	struct msm_iommu_ctx_drvdata *c;
-
-	c = dev_get_drvdata(dev);
-	if (!res || !c || !c->name || !res->name)
-		return -EINVAL;
-
-	if (!strcmp(res->name, c->name)) {
-		res->dev = dev;
-		return 1;
-	}
-	return 0;
-}
-
-static int each_iommu(struct device *dev, void *data)
-{
-	return device_for_each_child(dev, data, each_iommu_ctx);
-}
-
-struct device *msm_iommu_get_ctx(const char *ctx_name)
-{
-	struct iommu_ctx_iter_data r;
-	int found;
-
-	if (!msm_iommu_root_dev) {
-		pr_err("No root IOMMU device.\n");
-		goto fail;
-	}
-
-	r.name = ctx_name;
-	found = device_for_each_child(&msm_iommu_root_dev->dev, &r, each_iommu);
-
-	if (found <= 0 || !dev_get_drvdata(r.dev)) {
-		pr_err("Could not find context <%s>\n", ctx_name);
-		goto fail;
-	}
-
-	return r.dev;
-fail:
-	return NULL;
-}
-EXPORT_SYMBOL(msm_iommu_get_ctx);
-
-static void msm_iommu_reset(void __iomem *base, int ncb)
-{
-	int ctx;
-
-	SET_RPUE(base, 0);
-	SET_RPUEIE(base, 0);
-	SET_ESRRESTORE(base, 0);
-	SET_TBE(base, 0);
-	SET_CR(base, 0);
-	SET_SPDMBE(base, 0);
-	SET_TESTBUSCR(base, 0);
-	SET_TLBRSW(base, 0);
-	SET_GLOBAL_TLBIALL(base, 0);
-	SET_RPU_ACR(base, 0);
-	SET_TLBLKCRWE(base, 1);
-
-	for (ctx = 0; ctx < ncb; ctx++) {
-		SET_BPRCOSH(base, ctx, 0);
-		SET_BPRCISH(base, ctx, 0);
-		SET_BPRCNSH(base, ctx, 0);
-		SET_BPSHCFG(base, ctx, 0);
-		SET_BPMTCFG(base, ctx, 0);
-		SET_ACTLR(base, ctx, 0);
-		SET_SCTLR(base, ctx, 0);
-		SET_FSRRESTORE(base, ctx, 0);
-		SET_TTBR0(base, ctx, 0);
-		SET_TTBR1(base, ctx, 0);
-		SET_TTBCR(base, ctx, 0);
-		SET_BFBCR(base, ctx, 0);
-		SET_PAR(base, ctx, 0);
-		SET_FAR(base, ctx, 0);
-		SET_TLBFLPTER(base, ctx, 0);
-		SET_TLBSLPTER(base, ctx, 0);
-		SET_TLBLKCR(base, ctx, 0);
-		SET_CTX_TLBIALL(base, ctx, 0);
-		SET_TLBIVA(base, ctx, 0);
-		SET_PRRR(base, ctx, 0);
-		SET_NMRR(base, ctx, 0);
-		SET_CONTEXTIDR(base, ctx, 0);
-	}
-	mb();
-}
-
-static int msm_iommu_probe(struct platform_device *pdev)
-{
-	struct resource *r, *r2;
-	struct clk *iommu_clk = NULL;
-	struct clk *iommu_pclk = NULL;
-	struct msm_iommu_drvdata *drvdata;
-	struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data;
-	void __iomem *regs_base;
-	resource_size_t	len;
-	int ret, par;
-
-	if (pdev->id == -1) {
-		msm_iommu_root_dev = pdev;
-		return 0;
-	}
-
-	drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
-
-	if (!drvdata) {
-		ret = -ENOMEM;
-		goto fail;
-	}
-
-	if (!iommu_dev) {
-		ret = -ENODEV;
-		goto fail;
-	}
-
-	iommu_pclk = clk_get_sys("msm_iommu", "iface_clk");
-	if (IS_ERR(iommu_pclk)) {
-		ret = -ENODEV;
-		goto fail;
-	}
-
-	ret = clk_prepare_enable(iommu_pclk);
-	if (ret)
-		goto fail_enable;
-
-	iommu_clk = clk_get(&pdev->dev, "core_clk");
-
-	if (!IS_ERR(iommu_clk))	{
-		if (clk_get_rate(iommu_clk) == 0) {
-			ret = clk_round_rate(iommu_clk, 1);
-			clk_set_rate(iommu_clk, ret);
-		}
-
-		ret = clk_prepare_enable(iommu_clk);
-		if (ret) {
-			clk_put(iommu_clk);
-			goto fail_pclk;
-		}
-	} else
-		iommu_clk = NULL;
-
-	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "physbase");
-
-	if (!r) {
-		ret = -ENODEV;
-		goto fail_clk;
-	}
-
-	len = resource_size(r);
-
-	r2 = request_mem_region(r->start, len, r->name);
-	if (!r2) {
-		pr_err("Could not request memory region: start=%p, len=%d\n",
-							(void *) r->start, len);
-		ret = -EBUSY;
-		goto fail_clk;
-	}
-
-	regs_base = ioremap(r2->start, len);
-
-	if (!regs_base) {
-		pr_err("Could not ioremap: start=%p, len=%d\n",
-			 (void *) r2->start, len);
-		ret = -EBUSY;
-		goto fail_mem;
-	}
-
-	msm_iommu_reset(regs_base, iommu_dev->ncb);
-
-	SET_M(regs_base, 0, 1);
-	SET_PAR(regs_base, 0, 0);
-	SET_V2PCFG(regs_base, 0, 1);
-	SET_V2PPR(regs_base, 0, 0);
-	mb();
-	par = GET_PAR(regs_base, 0);
-	SET_V2PCFG(regs_base, 0, 0);
-	SET_M(regs_base, 0, 0);
-	mb();
-
-	if (!par) {
-		pr_err("%s: Invalid PAR value detected\n", iommu_dev->name);
-		ret = -ENODEV;
-		goto fail_io;
-	}
-
-	drvdata->pclk = iommu_pclk;
-	drvdata->clk = iommu_clk;
-	drvdata->base = regs_base;
-	drvdata->ncb = iommu_dev->ncb;
-	drvdata->ttbr_split = iommu_dev->ttbr_split;
-	drvdata->name = iommu_dev->name;
-
-	pr_info("device %s mapped at %p, with %d ctx banks\n",
-		iommu_dev->name, regs_base, iommu_dev->ncb);
-
-	platform_set_drvdata(pdev, drvdata);
-
-	if (iommu_clk)
-		clk_disable_unprepare(iommu_clk);
-
-	clk_disable_unprepare(iommu_pclk);
-
-	return 0;
-fail_io:
-	iounmap(regs_base);
-fail_mem:
-	release_mem_region(r->start, len);
-fail_clk:
-	if (iommu_clk) {
-		clk_disable_unprepare(iommu_clk);
-		clk_put(iommu_clk);
-	}
-fail_pclk:
-	clk_disable_unprepare(iommu_pclk);
-fail_enable:
-	clk_put(iommu_pclk);
-fail:
-	kfree(drvdata);
-	return ret;
-}
-
-static int msm_iommu_remove(struct platform_device *pdev)
-{
-	struct msm_iommu_drvdata *drv = NULL;
-
-	drv = platform_get_drvdata(pdev);
-	if (drv) {
-		if (drv->clk)
-			clk_put(drv->clk);
-		clk_put(drv->pclk);
-		memset(drv, 0, sizeof(*drv));
-		kfree(drv);
-		platform_set_drvdata(pdev, NULL);
-	}
-	return 0;
-}
-
-static int msm_iommu_ctx_probe(struct platform_device *pdev)
-{
-	struct msm_iommu_ctx_dev *c = pdev->dev.platform_data;
-	struct msm_iommu_drvdata *drvdata;
-	struct msm_iommu_ctx_drvdata *ctx_drvdata = NULL;
-	int i, ret, irq;
-	if (!c || !pdev->dev.parent) {
-		ret = -EINVAL;
-		goto fail;
-	}
-
-	drvdata = dev_get_drvdata(pdev->dev.parent);
-
-	if (!drvdata) {
-		ret = -ENODEV;
-		goto fail;
-	}
-
-	ctx_drvdata = kzalloc(sizeof(*ctx_drvdata), GFP_KERNEL);
-	if (!ctx_drvdata) {
-		ret = -ENOMEM;
-		goto fail;
-	}
-	ctx_drvdata->num = c->num;
-	ctx_drvdata->pdev = pdev;
-	ctx_drvdata->name = c->name;
-
-	irq = platform_get_irq_byname(to_platform_device(pdev->dev.parent),
-				      "nonsecure_irq");
-	if (irq < 0) {
-		ret = -ENODEV;
-		goto fail;
-	}
-
-	ret = request_threaded_irq(irq, NULL, msm_iommu_fault_handler,
-				   IRQF_ONESHOT | IRQF_SHARED,
-				   "msm_iommu_nonsecure_irq", ctx_drvdata);
-
-	if (ret) {
-		pr_err("request_threaded_irq %d failed: %d\n", irq, ret);
-		goto fail;
-	}
-
-	INIT_LIST_HEAD(&ctx_drvdata->attached_elm);
-	platform_set_drvdata(pdev, ctx_drvdata);
-
-	ret = clk_prepare_enable(drvdata->pclk);
-	if (ret)
-		goto fail;
-
-	if (drvdata->clk) {
-		ret = clk_prepare_enable(drvdata->clk);
-		if (ret) {
-			clk_disable_unprepare(drvdata->pclk);
-			goto fail;
-		}
-	}
-
-	/* Program the M2V tables for this context */
-	for (i = 0; i < MAX_NUM_MIDS; i++) {
-		int mid = c->mids[i];
-		if (mid == -1)
-			break;
-
-		SET_M2VCBR_N(drvdata->base, mid, 0);
-		SET_CBACR_N(drvdata->base, c->num, 0);
-
-		/* Route page faults to the non-secure interrupt */
-		SET_IRPTNDX(drvdata->base, c->num, 1);
-
-		/* Set VMID = 0 */
-		SET_VMID(drvdata->base, mid, 0);
-
-		/* Set the context number for that MID to this context */
-		SET_CBNDX(drvdata->base, mid, c->num);
-
-		/* Set MID associated with this context bank to 0 */
-		SET_CBVMID(drvdata->base, c->num, 0);
-
-		/* Set the ASID for TLB tagging for this context to 0 */
-		SET_CONTEXTIDR_ASID(drvdata->base, c->num, 0);
-
-		/* Set security bit override to be Non-secure */
-		SET_NSCFG(drvdata->base, mid, 3);
-	}
-	mb();
-
-	if (drvdata->clk)
-		clk_disable_unprepare(drvdata->clk);
-	clk_disable_unprepare(drvdata->pclk);
-
-	dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num);
-	return 0;
-fail:
-	kfree(ctx_drvdata);
-	return ret;
-}
-
-static int msm_iommu_ctx_remove(struct platform_device *pdev)
-{
-	struct msm_iommu_ctx_drvdata *drv = NULL;
-	drv = platform_get_drvdata(pdev);
-	if (drv) {
-		memset(drv, 0, sizeof(struct msm_iommu_ctx_drvdata));
-		kfree(drv);
-		platform_set_drvdata(pdev, NULL);
-	}
-	return 0;
-}
-
-static struct platform_driver msm_iommu_driver = {
-	.driver = {
-		.name	= "msm_iommu",
-	},
-	.probe		= msm_iommu_probe,
-	.remove		= msm_iommu_remove,
-};
-
-static struct platform_driver msm_iommu_ctx_driver = {
-	.driver = {
-		.name	= "msm_iommu_ctx",
-	},
-	.probe		= msm_iommu_ctx_probe,
-	.remove		= msm_iommu_ctx_remove,
-};
-
-static int __init msm_iommu_driver_init(void)
-{
-	int ret;
-	ret = platform_driver_register(&msm_iommu_driver);
-	if (ret != 0) {
-		pr_err("Failed to register IOMMU driver\n");
-		goto error;
-	}
-
-	ret = platform_driver_register(&msm_iommu_ctx_driver);
-	if (ret != 0) {
-		pr_err("Failed to register IOMMU context driver\n");
-		goto error;
-	}
-
-error:
-	return ret;
-}
-
-static void __exit msm_iommu_driver_exit(void)
-{
-	platform_driver_unregister(&msm_iommu_ctx_driver);
-	platform_driver_unregister(&msm_iommu_driver);
-}
-
-subsys_initcall(msm_iommu_driver_init);
-module_exit(msm_iommu_driver_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
diff --git a/drivers/iommu/msm_iommu_pagetable.c b/drivers/iommu/msm_iommu_pagetable.c
index 2ee9ba6..b62bb76 100644
--- a/drivers/iommu/msm_iommu_pagetable.c
+++ b/drivers/iommu/msm_iommu_pagetable.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,7 @@
 #include <asm/cacheflush.h>
 
 #include <mach/iommu.h>
+#include <mach/msm_iommu_priv.h>
 #include "msm_iommu_pagetable.h"
 
 /* Sharability attributes of MSM IOMMU mappings */
@@ -41,7 +42,7 @@
 		dmac_flush_range(start, end);
 }
 
-int msm_iommu_pagetable_alloc(struct iommu_pt *pt)
+int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt)
 {
 	pt->fl_table = (unsigned long *)__get_free_pages(GFP_KERNEL,
 							  get_order(SZ_16K));
@@ -54,7 +55,7 @@
 	return 0;
 }
 
-void msm_iommu_pagetable_free(struct iommu_pt *pt)
+void msm_iommu_pagetable_free(struct msm_iommu_pt *pt)
 {
 	unsigned long *fl_table;
 	int i;
@@ -110,7 +111,91 @@
 	return pgprot;
 }
 
-int msm_iommu_pagetable_map(struct iommu_pt *pt, unsigned long va,
+static unsigned long *make_second_level(struct msm_iommu_pt *pt,
+					unsigned long *fl_pte)
+{
+	unsigned long *sl;
+	sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
+			get_order(SZ_4K));
+
+	if (!sl) {
+		pr_debug("Could not allocate second level table\n");
+		goto fail;
+	}
+	memset(sl, 0, SZ_4K);
+	clean_pte(sl, sl + NUM_SL_PTE, pt->redirect);
+
+	*fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
+			FL_TYPE_TABLE);
+
+	clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+fail:
+	return sl;
+}
+
+static int sl_4k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
+{
+	int ret = 0;
+
+	if (*sl_pte) {
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	*sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
+		| SL_TYPE_SMALL | pgprot;
+fail:
+	return ret;
+}
+
+static int sl_64k(unsigned long *sl_pte, phys_addr_t pa, unsigned int pgprot)
+{
+	int ret = 0;
+
+	int i;
+
+	for (i = 0; i < 16; i++)
+		if (*(sl_pte+i)) {
+			ret = -EBUSY;
+			goto fail;
+		}
+
+	for (i = 0; i < 16; i++)
+		*(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
+				| SL_SHARED | SL_TYPE_LARGE | pgprot;
+
+fail:
+	return ret;
+}
+
+static inline int fl_1m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
+{
+	if (*fl_pte)
+		return -EBUSY;
+
+	*fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
+		| pgprot;
+
+	return 0;
+}
+
+static inline int fl_16m(unsigned long *fl_pte, phys_addr_t pa, int pgprot)
+{
+	int i;
+	int ret = 0;
+	for (i = 0; i < 16; i++)
+		if (*(fl_pte+i)) {
+			ret = -EBUSY;
+			goto fail;
+		}
+	for (i = 0; i < 16; i++)
+		*(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
+			| FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
+fail:
+	return ret;
+}
+
+int msm_iommu_pagetable_map(struct msm_iommu_pt *pt, unsigned long va,
 			phys_addr_t pa, size_t len, int prot)
 {
 	unsigned long *fl_pte;
@@ -144,28 +229,16 @@
 	fl_pte = pt->fl_table + fl_offset;	/* int pointers, 4 bytes */
 
 	if (len == SZ_16M) {
-		int i = 0;
-
-		for (i = 0; i < 16; i++)
-			if (*(fl_pte+i)) {
-				ret = -EBUSY;
-				goto fail;
-			}
-
-		for (i = 0; i < 16; i++)
-			*(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
-				  FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
+		ret = fl_16m(fl_pte, pa, pgprot);
+		if (ret)
+			goto fail;
 		clean_pte(fl_pte, fl_pte + 16, pt->redirect);
 	}
 
 	if (len == SZ_1M) {
-		if (*fl_pte) {
-			ret = -EBUSY;
+		ret = fl_1m(fl_pte, pa, pgprot);
+		if (ret)
 			goto fail;
-		}
-
-		*fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT
-					| FL_SHARED | pgprot;
 		clean_pte(fl_pte, fl_pte + 1, pt->redirect);
 	}
 
@@ -173,21 +246,10 @@
 	if (len == SZ_4K || len == SZ_64K) {
 
 		if (*fl_pte == 0) {
-			unsigned long *sl;
-			sl = (unsigned long *) __get_free_pages(GFP_KERNEL,
-							get_order(SZ_4K));
-
-			if (!sl) {
-				pr_debug("Could not allocate second level table\n");
+			if (make_second_level(pt, fl_pte) == NULL) {
 				ret = -ENOMEM;
 				goto fail;
 			}
-			memset(sl, 0, SZ_4K);
-			clean_pte(sl, sl + NUM_SL_PTE, pt->redirect);
-
-			*fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
-						      FL_TYPE_TABLE);
-			clean_pte(fl_pte, fl_pte + 1, pt->redirect);
 		}
 
 		if (!(*fl_pte & FL_TYPE_TABLE)) {
@@ -201,29 +263,16 @@
 	sl_pte = sl_table + sl_offset;
 
 	if (len == SZ_4K) {
-		if (*sl_pte) {
-			ret = -EBUSY;
+		ret = sl_4k(sl_pte, pa, pgprot);
+		if (ret)
 			goto fail;
-		}
-
-		*sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
-						| SL_TYPE_SMALL | pgprot;
 		clean_pte(sl_pte, sl_pte + 1, pt->redirect);
 	}
 
 	if (len == SZ_64K) {
-		int i;
-
-		for (i = 0; i < 16; i++)
-			if (*(sl_pte+i)) {
-				ret = -EBUSY;
-				goto fail;
-			}
-
-		for (i = 0; i < 16; i++)
-			*(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
-					| SL_SHARED | SL_TYPE_LARGE | pgprot;
-
+		ret = sl_64k(sl_pte, pa, pgprot);
+		if (ret)
+			goto fail;
 		clean_pte(sl_pte, sl_pte + 16, pt->redirect);
 	}
 
@@ -231,7 +280,7 @@
 	return ret;
 }
 
-size_t msm_iommu_pagetable_unmap(struct iommu_pt *pt, unsigned long va,
+size_t msm_iommu_pagetable_unmap(struct msm_iommu_pt *pt, unsigned long va,
 				size_t len)
 {
 	unsigned long *fl_pte;
@@ -309,77 +358,165 @@
 	return ret;
 }
 
-static unsigned int get_phys_addr(struct scatterlist *sg)
+static phys_addr_t get_phys_addr(struct scatterlist *sg)
 {
 	/*
 	 * Try sg_dma_address first so that we can
 	 * map carveout regions that do not have a
 	 * struct page associated with them.
 	 */
-	unsigned int pa = sg_dma_address(sg);
+	phys_addr_t pa = sg_dma_address(sg);
 	if (pa == 0)
 		pa = sg_phys(sg);
 	return pa;
 }
 
-int msm_iommu_pagetable_map_range(struct iommu_pt *pt, unsigned int va,
-		       struct scatterlist *sg, unsigned int len, int prot)
+static int check_range(unsigned long *fl_table, unsigned int va,
+				 unsigned int len)
 {
-	unsigned int pa;
 	unsigned int offset = 0;
-	unsigned int pgprot;
 	unsigned long *fl_pte;
 	unsigned long fl_offset;
 	unsigned long *sl_table;
+	unsigned long sl_start, sl_end;
+	int i;
+
+	fl_offset = FL_OFFSET(va);	/* Upper 12 bits */
+	fl_pte = fl_table + fl_offset;	/* int pointers, 4 bytes */
+
+	while (offset < len) {
+		if (*fl_pte & FL_TYPE_TABLE) {
+			sl_start = SL_OFFSET(va);
+			sl_table =  __va(((*fl_pte) & FL_BASE_MASK));
+			sl_end = ((len - offset) / SZ_4K) + sl_start;
+
+			if (sl_end > NUM_SL_PTE)
+				sl_end = NUM_SL_PTE;
+
+			for (i = sl_start; i < sl_end; i++) {
+				if (sl_table[i] != 0) {
+					pr_err("%08x - %08x already mapped\n",
+						va, va + SZ_4K);
+					return -EBUSY;
+				}
+				offset += SZ_4K;
+				va += SZ_4K;
+			}
+
+
+			sl_start = 0;
+		} else {
+			if (*fl_pte != 0) {
+				pr_err("%08x - %08x already mapped\n",
+				       va, va + SZ_1M);
+				return -EBUSY;
+			}
+			va += SZ_1M;
+			offset += SZ_1M;
+			sl_start = 0;
+		}
+		fl_pte++;
+	}
+	return 0;
+}
+
+static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
+				   int align)
+{
+	return  IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
+		&& (len >= align);
+}
+
+int msm_iommu_pagetable_map_range(struct msm_iommu_pt *pt, unsigned int va,
+		       struct scatterlist *sg, unsigned int len, int prot)
+{
+	phys_addr_t pa;
+	unsigned int offset = 0;
+	unsigned long *fl_pte;
+	unsigned long fl_offset;
+	unsigned long *sl_table = NULL;
 	unsigned long sl_offset, sl_start;
-	unsigned int chunk_offset = 0;
-	unsigned int chunk_pa;
+	unsigned int chunk_size, chunk_offset = 0;
 	int ret = 0;
+	unsigned int pgprot4k, pgprot64k, pgprot1m, pgprot16m;
 
 	BUG_ON(len & (SZ_4K - 1));
 
-	pgprot = __get_pgprot(prot, SZ_4K);
-	if (!pgprot) {
+	pgprot4k = __get_pgprot(prot, SZ_4K);
+	pgprot64k = __get_pgprot(prot, SZ_64K);
+	pgprot1m = __get_pgprot(prot, SZ_1M);
+	pgprot16m = __get_pgprot(prot, SZ_16M);
+	if (!pgprot4k || !pgprot64k || !pgprot1m || !pgprot16m) {
 		ret = -EINVAL;
 		goto fail;
 	}
 
 	fl_offset = FL_OFFSET(va);		/* Upper 12 bits */
 	fl_pte = pt->fl_table + fl_offset;	/* int pointers, 4 bytes */
+	pa = get_phys_addr(sg);
 
-	sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
-	sl_offset = SL_OFFSET(va);
-
-	chunk_pa = get_phys_addr(sg);
-	if (chunk_pa == 0) {
-		pr_debug("No dma address for sg %p\n", sg);
-		ret = -EINVAL;
+	ret = check_range(pt->fl_table, va, len);
+	if (ret)
 		goto fail;
-	}
 
 	while (offset < len) {
-		/* Set up a 2nd level page table if one doesn't exist */
-		if (*fl_pte == 0) {
-			sl_table = (unsigned long *)
-				 __get_free_pages(GFP_KERNEL, get_order(SZ_4K));
+		chunk_size = SZ_4K;
 
-			if (!sl_table) {
-				pr_debug("Could not allocate second level table\n");
+		if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+				     SZ_16M))
+			chunk_size = SZ_16M;
+		else if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+					  SZ_1M))
+			chunk_size = SZ_1M;
+		/* 64k or 4k determined later */
+
+		/* for 1M and 16M, only first level entries are required */
+		if (chunk_size >= SZ_1M) {
+			if (chunk_size == SZ_16M) {
+				ret = fl_16m(fl_pte, pa, pgprot16m);
+				if (ret)
+					goto fail;
+				clean_pte(fl_pte, fl_pte + 16, pt->redirect);
+				fl_pte += 16;
+			} else if (chunk_size == SZ_1M) {
+				ret = fl_1m(fl_pte, pa, pgprot1m);
+				if (ret)
+					goto fail;
+				clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+				fl_pte++;
+			}
+
+			offset += chunk_size;
+			chunk_offset += chunk_size;
+			va += chunk_size;
+			pa += chunk_size;
+
+			if (chunk_offset >= sg->length && offset < len) {
+				chunk_offset = 0;
+				sg = sg_next(sg);
+				pa = get_phys_addr(sg);
+				if (pa == 0) {
+					pr_debug("No dma address for sg %p\n",
+							sg);
+					ret = -EINVAL;
+					goto fail;
+				}
+			}
+			continue;
+		}
+		/* for 4K or 64K, make sure there is a second level table */
+		if (*fl_pte == 0) {
+			if (!make_second_level(pt, fl_pte)) {
 				ret = -ENOMEM;
 				goto fail;
 			}
-
-			memset(sl_table, 0, SZ_4K);
-			clean_pte(sl_table, sl_table + NUM_SL_PTE,
-					pt->redirect);
-
-			*fl_pte = ((((int)__pa(sl_table)) & FL_BASE_MASK) |
-							    FL_TYPE_TABLE);
-			clean_pte(fl_pte, fl_pte + 1, pt->redirect);
-		} else
-			sl_table = (unsigned long *)
-					       __va(((*fl_pte) & FL_BASE_MASK));
-
+		}
+		if (!(*fl_pte & FL_TYPE_TABLE)) {
+			ret = -EBUSY;
+			goto fail;
+		}
+		sl_table = __va(((*fl_pte) & FL_BASE_MASK));
+		sl_offset = SL_OFFSET(va);
 		/* Keep track of initial position so we
 		 * don't clean more than we have to
 		 */
@@ -387,21 +524,38 @@
 
 		/* Build the 2nd level page table */
 		while (offset < len && sl_offset < NUM_SL_PTE) {
-			pa = chunk_pa + chunk_offset;
-			sl_table[sl_offset] = (pa & SL_BASE_MASK_SMALL) |
-			      pgprot | SL_NG | SL_SHARED | SL_TYPE_SMALL;
-			sl_offset++;
-			offset += SZ_4K;
+			/* Map a large 64K page if the chunk is large enough and
+			 * the pa and va are aligned
+			 */
 
-			chunk_offset += SZ_4K;
+			if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+					     SZ_64K))
+				chunk_size = SZ_64K;
+			else
+				chunk_size = SZ_4K;
+
+			if (chunk_size == SZ_4K) {
+				sl_4k(&sl_table[sl_offset], pa, pgprot4k);
+				sl_offset++;
+			} else {
+				BUG_ON(sl_offset + 16 > NUM_SL_PTE);
+				sl_64k(&sl_table[sl_offset], pa, pgprot64k);
+				sl_offset += 16;
+			}
+
+
+			offset += chunk_size;
+			chunk_offset += chunk_size;
+			va += chunk_size;
+			pa += chunk_size;
 
 			if (chunk_offset >= sg->length && offset < len) {
 				chunk_offset = 0;
 				sg = sg_next(sg);
-				chunk_pa = get_phys_addr(sg);
-				if (chunk_pa == 0) {
+				pa = get_phys_addr(sg);
+				if (pa == 0) {
 					pr_debug("No dma address for sg %p\n",
-						sg);
+							sg);
 					ret = -EINVAL;
 					goto fail;
 				}
@@ -418,7 +572,7 @@
 	return ret;
 }
 
-void msm_iommu_pagetable_unmap_range(struct iommu_pt *pt, unsigned int va,
+void msm_iommu_pagetable_unmap_range(struct msm_iommu_pt *pt, unsigned int va,
 				 unsigned int len)
 {
 	unsigned int offset = 0;
@@ -433,44 +587,53 @@
 	fl_offset = FL_OFFSET(va);		/* Upper 12 bits */
 	fl_pte = pt->fl_table + fl_offset;	/* int pointers, 4 bytes */
 
-	sl_start = SL_OFFSET(va);
-
 	while (offset < len) {
-		sl_table = (unsigned long *) __va(((*fl_pte) & FL_BASE_MASK));
-		sl_end = ((len - offset) / SZ_4K) + sl_start;
+		if (*fl_pte & FL_TYPE_TABLE) {
+			sl_start = SL_OFFSET(va);
+			sl_table =  __va(((*fl_pte) & FL_BASE_MASK));
+			sl_end = ((len - offset) / SZ_4K) + sl_start;
 
-		if (sl_end > NUM_SL_PTE)
-			sl_end = NUM_SL_PTE;
+			if (sl_end > NUM_SL_PTE)
+				sl_end = NUM_SL_PTE;
 
-		memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
-		clean_pte(sl_table + sl_start, sl_table + sl_end,
-				pt->redirect);
+			memset(sl_table + sl_start, 0, (sl_end - sl_start) * 4);
+			clean_pte(sl_table + sl_start, sl_table + sl_end,
+					pt->redirect);
 
-		offset += (sl_end - sl_start) * SZ_4K;
+			offset += (sl_end - sl_start) * SZ_4K;
+			va += (sl_end - sl_start) * SZ_4K;
 
-		/* Unmap and free the 2nd level table if all mappings in it
-		 * were removed. This saves memory, but the table will need
-		 * to be re-allocated the next time someone tries to map these
-		 * VAs.
-		 */
-		used = 0;
+			/* Unmap and free the 2nd level table if all mappings
+			 * in it were removed. This saves memory, but the table
+			 * will need to be re-allocated the next time someone
+			 * tries to map these VAs.
+			 */
+			used = 0;
 
-		/* If we just unmapped the whole table, don't bother
-		 * seeing if there are still used entries left.
-		 */
-		if (sl_end - sl_start != NUM_SL_PTE)
-			for (i = 0; i < NUM_SL_PTE; i++)
-				if (sl_table[i]) {
-					used = 1;
-					break;
-				}
-		if (!used) {
-			free_page((unsigned long)sl_table);
+			/* If we just unmapped the whole table, don't bother
+			 * seeing if there are still used entries left.
+			 */
+			if (sl_end - sl_start != NUM_SL_PTE)
+				for (i = 0; i < NUM_SL_PTE; i++)
+					if (sl_table[i]) {
+						used = 1;
+						break;
+					}
+			if (!used) {
+				free_page((unsigned long)sl_table);
+				*fl_pte = 0;
+
+				clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+			}
+
+			sl_start = 0;
+		} else {
 			*fl_pte = 0;
 			clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+			va += SZ_1M;
+			offset += SZ_1M;
+			sl_start = 0;
 		}
-
-		sl_start = 0;
 		fl_pte++;
 	}
 }
diff --git a/drivers/iommu/msm_iommu_pagetable.h b/drivers/iommu/msm_iommu_pagetable.h
index 3266681..7513aa5 100644
--- a/drivers/iommu/msm_iommu_pagetable.h
+++ b/drivers/iommu/msm_iommu_pagetable.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -71,20 +71,17 @@
 #define RCP15_PRRR(reg)   MRC(reg, p15, 0, c10, c2, 0)
 #define RCP15_NMRR(reg)   MRC(reg, p15, 0, c10, c2, 1)
 
-struct iommu_pt {
-	unsigned long *fl_table;
-	int redirect;
-};
+struct iommu_pt;
 
 void msm_iommu_pagetable_init(void);
-int msm_iommu_pagetable_alloc(struct iommu_pt *pt);
-void msm_iommu_pagetable_free(struct iommu_pt *pt);
-int msm_iommu_pagetable_map(struct iommu_pt *pt, unsigned long va,
+int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt);
+void msm_iommu_pagetable_free(struct msm_iommu_pt *pt);
+int msm_iommu_pagetable_map(struct msm_iommu_pt *pt, unsigned long va,
 			phys_addr_t pa, size_t len, int prot);
-size_t msm_iommu_pagetable_unmap(struct iommu_pt *pt, unsigned long va,
+size_t msm_iommu_pagetable_unmap(struct msm_iommu_pt *pt, unsigned long va,
 				size_t len);
-int msm_iommu_pagetable_map_range(struct iommu_pt *pt, unsigned int va,
+int msm_iommu_pagetable_map_range(struct msm_iommu_pt *pt, unsigned int va,
 			struct scatterlist *sg, unsigned int len, int prot);
-void msm_iommu_pagetable_unmap_range(struct iommu_pt *pt, unsigned int va,
+void msm_iommu_pagetable_unmap_range(struct msm_iommu_pt *pt, unsigned int va,
 				unsigned int len);
 #endif
diff --git a/drivers/iommu/msm_iommu_perfmon-v0.c b/drivers/iommu/msm_iommu_perfmon-v0.c
new file mode 100644
index 0000000..1073623
--- /dev/null
+++ b/drivers/iommu/msm_iommu_perfmon-v0.c
@@ -0,0 +1,313 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/**
+ * This file contains the part of the IOMMUv0 PMU driver that actually touches
+ * IOMMU PMU registers.
+ */
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <mach/iommu_hw-v0.h>
+#include <mach/iommu_perfmon.h>
+#include <mach/iommu.h>
+
+#define PM_RESET_MASK		(0xF)
+#define PM_RESET_SHIFT		(0x8)
+#define PM_RESET		(PM_RESET_MASK << PM_RESET_SHIFT)
+
+#define PM_ENABLE_MASK		(0x1)
+#define PM_ENABLE_SHIFT		(0x0)
+#define PM_ENABLE		(PM_ENABLE_MASK << PM_ENABLE_SHIFT)
+
+#define PM_OVFL_FLAG_MASK	(0xF)
+#define PM_OVFL_FLAG_SHIFT	(0x0)
+#define PM_OVFL_FLAG		(PM_OVFL_FLAG_MASK << PM_OVFL_FLAG_SHIFT)
+
+#define PM_EVENT_TYPE_MASK	(0x1F)
+#define PM_EVENT_TYPE_SHIFT	(0x2)
+#define PM_EVENT_TYPE		(PM_EVENT_TYPE_MASK << PM_EVENT_TYPE_SHIFT)
+
+#define PM_INT_EN_MASK		(0x1)
+#define PM_INT_EN_SHIFT		(0x0)
+#define PM_INT_EN		(PM_INT_EN_MASK << PM_INT_EN_SHIFT)
+
+#define PM_INT_POL_MASK		(0x1)
+#define PM_INT_POL_SHIFT	(0x2)
+#define PM_INT_ACTIVE_HIGH	(0x1)
+
+#define PMEVCNTR_(n)		(EMC_N + n*4)
+#define PMEVTYPER_(n)		(EMCC_N + n*4)
+
+/**
+ * Translate between SMMUv0 event classes and standard ARM SMMU event classes
+ */
+static int iommu_pm_event_class_translation_table[] = {
+	MSM_IOMMU_PMU_NO_EVENT_CLASS,
+	MSM_IOMMU_PMU_NO_EVENT_CLASS,
+	MSM_IOMMU_PMU_NO_EVENT_CLASS,
+	0x8,
+	0x9,
+	MSM_IOMMU_PMU_NO_EVENT_CLASS,
+	0x80,
+	MSM_IOMMU_PMU_NO_EVENT_CLASS,
+	0x12,
+	MSM_IOMMU_PMU_NO_EVENT_CLASS,
+	MSM_IOMMU_PMU_NO_EVENT_CLASS,
+	MSM_IOMMU_PMU_NO_EVENT_CLASS,
+	MSM_IOMMU_PMU_NO_EVENT_CLASS,
+	MSM_IOMMU_PMU_NO_EVENT_CLASS,
+	MSM_IOMMU_PMU_NO_EVENT_CLASS,
+	0x10,
+};
+
+static int iommu_pm_translate_event_class(int event_class)
+{
+	const unsigned int TBL_LEN =
+			ARRAY_SIZE(iommu_pm_event_class_translation_table);
+	unsigned int i;
+
+	if (event_class < 0)
+		return event_class;
+
+	for (i = 0; i < TBL_LEN; ++i) {
+		if (iommu_pm_event_class_translation_table[i] == event_class)
+			return i;
+	}
+	return MSM_IOMMU_PMU_NO_EVENT_CLASS;
+}
+
+static unsigned int iommu_pm_is_hw_access_OK(const struct iommu_pmon *pmon)
+{
+	/*
+	 * IOMMUv0 is in always ON domain so we don't care whether we are
+	 * attached or not. We only care whether the PMU is enabled or
+	 * not meaning clocks are turned on.
+	 */
+	return pmon->enabled;
+}
+
+static void iommu_pm_grp_enable(struct iommu_info *iommu, unsigned int grp_no)
+{
+	/* No group concept in v0. */
+}
+
+static void iommu_pm_grp_disable(struct iommu_info *iommu, unsigned int grp_no)
+{
+	/* No group concept in v0. */
+}
+
+static void iommu_pm_set_int_active_high(const struct iommu_info *iommu)
+{
+	unsigned int emmc;
+	emmc = readl_relaxed(iommu->base + EMMC);
+	emmc |= (PM_INT_ACTIVE_HIGH & PM_INT_POL_MASK) << PM_INT_POL_SHIFT;
+	writel_relaxed(emmc, iommu->base + EMMC);
+}
+
+static void iommu_pm_enable(struct iommu_info *iommu)
+{
+	unsigned int emmc;
+	emmc = readl_relaxed(iommu->base + EMMC);
+	emmc |= PM_ENABLE;
+	writel_relaxed(emmc, iommu->base + EMMC);
+}
+
+static void iommu_pm_disable(struct iommu_info *iommu)
+{
+	unsigned int emmc;
+	emmc = readl_relaxed(iommu->base + EMMC);
+	emmc &= ~PM_ENABLE;
+	writel_relaxed(emmc, iommu->base + EMMC);
+}
+
+static void iommu_pm_reset_counters(const struct iommu_info *iommu)
+{
+	unsigned int emmc;
+	emmc = readl_relaxed(iommu->base + EMMC);
+	emmc |= PM_RESET;
+	writel_relaxed(emmc, iommu->base + EMMC);
+}
+
+static void iommu_pm_check_for_overflow(struct iommu_pmon *pmon)
+{
+	struct iommu_pmon_counter *counter;
+	struct iommu_info *iommu = &pmon->iommu;
+	unsigned int reg_value;
+	unsigned int j;
+	struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[0];
+
+	reg_value = readl_relaxed(iommu->base + EMCS);
+	reg_value &= PM_OVFL_FLAG;
+
+	for (j = 0; j < cnt_grp->num_counters; ++j) {
+		counter = &cnt_grp->counters[j];
+
+		if (counter->enabled) {
+			if (reg_value & (1 << counter->absolute_counter_no))
+				counter->overflow_count++;
+		}
+	}
+
+	/* Clear overflow */
+	writel_relaxed(reg_value, iommu->base + EMCS);
+}
+
+static irqreturn_t iommu_pm_evt_ovfl_int_handler(int irq, void *dev_id)
+{
+	struct iommu_pmon *pmon = dev_id;
+	struct iommu_info *iommu = &pmon->iommu;
+
+	mutex_lock(&pmon->lock);
+
+	if (!iommu_pm_is_hw_access_OK(pmon)) {
+		mutex_unlock(&pmon->lock);
+		goto out;
+	}
+
+	iommu->ops->iommu_lock_acquire();
+	iommu_pm_check_for_overflow(pmon);
+	iommu->ops->iommu_lock_release();
+
+	mutex_unlock(&pmon->lock);
+
+out:
+	return IRQ_HANDLED;
+}
+
+static void iommu_pm_counter_enable(struct iommu_info *iommu,
+				    struct iommu_pmon_counter *counter)
+{
+	unsigned int bit_no = counter->absolute_counter_no;
+	unsigned int reg_value;
+
+	/* Clear overflow of counter */
+	reg_value = readl_relaxed(iommu->base + EMCS);
+	reg_value &= (1 << bit_no);
+	writel_relaxed(reg_value, iommu->base + EMCS);
+
+	/* Enable counter */
+	counter->enabled = 1;
+}
+
+static void iommu_pm_counter_disable(struct iommu_info *iommu,
+				     struct iommu_pmon_counter *counter)
+{
+	unsigned int bit_no = counter->absolute_counter_no;
+	unsigned int reg_value;
+
+	/* Disable counter */
+	counter->enabled = 0;
+
+	/* Clear overflow of counter */
+	reg_value = readl_relaxed(iommu->base + EMCS);
+	reg_value &= (1 << bit_no);
+	writel_relaxed(reg_value, iommu->base + EMCS);
+}
+
+/*
+ * Must be called after iommu_start_access() is called
+ */
+static void iommu_pm_ovfl_int_enable(struct iommu_info *iommu,
+				     const struct iommu_pmon_counter *counter)
+{
+	unsigned int reg_no = counter->absolute_counter_no;
+	unsigned int reg_value;
+
+	/* Enable overflow interrupt for counter */
+	reg_value = readl_relaxed(iommu->base + PMEVTYPER_(reg_no));
+	reg_value |= PM_INT_EN;
+	writel_relaxed(reg_value, iommu->base + PMEVTYPER_(reg_no));
+}
+
+/*
+ * Must be called after iommu_start_access() is called
+ */
+static void iommu_pm_ovfl_int_disable(struct iommu_info *iommu,
+				      const struct iommu_pmon_counter *counter)
+{
+	unsigned int reg_no = counter->absolute_counter_no;
+	unsigned int reg_value;
+
+	/* Disable overflow interrupt for counter */
+	reg_value = readl_relaxed(iommu->base + PMEVTYPER_(reg_no));
+	reg_value &= ~PM_INT_EN;
+	writel_relaxed(reg_value, iommu->base + PMEVTYPER_(reg_no));
+}
+
+static void iommu_pm_set_event_class(struct iommu_pmon *pmon,
+				    unsigned int count_no,
+				    unsigned int event_class)
+{
+	unsigned int reg_no = count_no;
+	unsigned int reg_value;
+	int event = iommu_pm_translate_event_class(event_class);
+
+	if (event == MSM_IOMMU_PMU_NO_EVENT_CLASS)
+		event = 0;
+
+	reg_value = readl_relaxed(pmon->iommu.base + PMEVTYPER_(reg_no));
+	reg_value &= ~(PM_EVENT_TYPE_MASK << PM_EVENT_TYPE_SHIFT);
+	reg_value |= (event & PM_EVENT_TYPE_MASK) << PM_EVENT_TYPE_SHIFT;
+	writel_relaxed(reg_value, pmon->iommu.base + PMEVTYPER_(reg_no));
+}
+
+static unsigned int iommu_pm_read_counter(struct iommu_pmon_counter *counter)
+{
+	struct iommu_pmon *pmon = counter->cnt_group->pmon;
+	struct iommu_info *info = &pmon->iommu;
+	unsigned int cnt_no = counter->absolute_counter_no;
+	return readl_relaxed(info->base + PMEVCNTR_(cnt_no));
+}
+
+static void iommu_pm_initialize_hw(const struct iommu_pmon *pmon)
+{
+	const struct iommu_info *iommu = &pmon->iommu;
+	struct msm_iommu_drvdata *iommu_drvdata =
+					dev_get_drvdata(iommu->iommu_dev);
+
+	/* This is called during bootup device initialization so no need
+	 * for locking here.
+	 */
+	iommu->ops->iommu_power_on(iommu_drvdata);
+	iommu->ops->iommu_clk_on(iommu_drvdata);
+	iommu_pm_set_int_active_high(iommu);
+	iommu->ops->iommu_clk_off(iommu_drvdata);
+	iommu->ops->iommu_power_off(iommu_drvdata);
+}
+
+static struct iommu_pm_hw_ops iommu_pm_hw_ops = {
+	.initialize_hw = iommu_pm_initialize_hw,
+	.is_hw_access_OK = iommu_pm_is_hw_access_OK,
+	.grp_enable = iommu_pm_grp_enable,
+	.grp_disable = iommu_pm_grp_disable,
+	.enable_pm = iommu_pm_enable,
+	.disable_pm = iommu_pm_disable,
+	.reset_counters = iommu_pm_reset_counters,
+	.check_for_overflow = iommu_pm_check_for_overflow,
+	.evt_ovfl_int_handler = iommu_pm_evt_ovfl_int_handler,
+	.counter_enable = iommu_pm_counter_enable,
+	.counter_disable = iommu_pm_counter_disable,
+	.ovfl_int_enable = iommu_pm_ovfl_int_enable,
+	.ovfl_int_disable = iommu_pm_ovfl_int_disable,
+	.set_event_class = iommu_pm_set_event_class,
+	.read_counter = iommu_pm_read_counter,
+};
+
+struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v0(void)
+{
+	return &iommu_pm_hw_ops;
+}
+EXPORT_SYMBOL(iommu_pm_get_hw_ops_v0);
+
diff --git a/drivers/iommu/msm_iommu_perfmon-v1.c b/drivers/iommu/msm_iommu_perfmon-v1.c
new file mode 100644
index 0000000..7d6dd34
--- /dev/null
+++ b/drivers/iommu/msm_iommu_perfmon-v1.c
@@ -0,0 +1,270 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/**
+ * This file contains the part of the IOMMUv1 PMU driver that actually touches
+ * IOMMU PMU registers.
+ */
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <mach/iommu_hw-v1.h>
+#include <mach/iommu_perfmon.h>
+#include <mach/iommu.h>
+
+#define PMCR_P_MASK		(0x1)
+#define PMCR_P_SHIFT		(1)
+#define PMCR_P			(PMCR_P_MASK << PMCR_P_SHIFT)
+#define PMCFGR_NCG_MASK		(0xFF)
+#define PMCFGR_NCG_SHIFT	(24)
+#define PMCFGR_NCG		(PMCFGR_NCG_MASK << PMCFGR_NCG_SHIFT)
+#define PMCFGR_N_MASK		(0xFF)
+#define PMCFGR_N_SHIFT		(0)
+#define PMCFGR_N		(PMCFGR_N_MASK << PMCFGR_N_SHIFT)
+#define CR_E			0x1
+#define CGCR_CEN		0x800
+#define CGCR_CEN_SHFT		(1 << 11)
+#define PMCGCR_CGNC_MASK	(0x0F)
+#define PMCGCR_CGNC_SHIFT	(24)
+#define PMCGCR_CGNC		(PMCGCR_CGNC_MASK << PMCGCR_CGNC_SHIFT)
+#define PMCGCR_(group)		(PMCGCR_N + group*4)
+
+#define PMOVSCLR_(n)		(PMOVSCLR_N + n*4)
+#define PMCNTENSET_(n)		(PMCNTENSET_N + n*4)
+#define PMCNTENCLR_(n)		(PMCNTENCLR_N + n*4)
+#define PMINTENSET_(n)		(PMINTENSET_N + n*4)
+#define PMINTENCLR_(n)		(PMINTENCLR_N + n*4)
+
+#define PMEVCNTR_(n)		(PMEVCNTR_N + n*4)
+#define PMEVTYPER_(n)		(PMEVTYPER_N + n*4)
+
+
+static unsigned int iommu_pm_is_hw_access_OK(const struct iommu_pmon *pmon)
+{
+	/*
+	 * IOMMUv1 is not in the always on domain so we need to make sure
+	 * the regulators are turned on in addition to clocks before we allow
+	 * access to the hardware thus we check if we have attached to the
+	 * IOMMU in addition to checking if we have enabled PMU.
+	 */
+	return pmon->enabled && (pmon->iommu_attach_count > 0);
+}
+
+static void iommu_pm_grp_enable(struct iommu_info *iommu, unsigned int grp_no)
+{
+	unsigned int pmcgcr;
+	pmcgcr = readl_relaxed(iommu->base + PMCGCR_(grp_no));
+	pmcgcr |= CGCR_CEN;
+	writel_relaxed(pmcgcr, iommu->base + PMCGCR_(grp_no));
+}
+
+static void iommu_pm_grp_disable(struct iommu_info *iommu, unsigned int grp_no)
+{
+	unsigned int pmcgcr;
+	pmcgcr = readl_relaxed(iommu->base + PMCGCR_(grp_no));
+	pmcgcr &= ~CGCR_CEN;
+	writel_relaxed(pmcgcr, iommu->base + PMCGCR_(grp_no));
+}
+
+static void iommu_pm_enable(struct iommu_info *iommu)
+{
+	unsigned int pmcr;
+	pmcr = readl_relaxed(iommu->base + PMCR);
+	pmcr |= CR_E;
+	writel_relaxed(pmcr, iommu->base + PMCR);
+}
+
+static void iommu_pm_disable(struct iommu_info *iommu)
+{
+	unsigned int pmcr;
+	pmcr = readl_relaxed(iommu->base + PMCR);
+	pmcr &= ~CR_E;
+	writel_relaxed(pmcr, iommu->base + PMCR);
+}
+
+static void iommu_pm_reset_counters(const struct iommu_info *iommu)
+{
+	unsigned int pmcr;
+	pmcr = readl_relaxed(iommu->base + PMCR);
+	pmcr |= PMCR_P;
+	writel_relaxed(pmcr, iommu->base + PMCR);
+}
+
+static void iommu_pm_check_for_overflow(struct iommu_pmon *pmon)
+{
+	struct iommu_pmon_counter *counter;
+	struct iommu_info *iommu = &pmon->iommu;
+	unsigned int reg_no = 0;
+	unsigned int bit_no;
+	unsigned int reg_value;
+	unsigned int i;
+	unsigned int j;
+	unsigned int curr_reg = 0;
+
+	reg_value = readl_relaxed(iommu->base + PMOVSCLR_(curr_reg));
+
+	for (i = 0; i < pmon->num_groups; ++i) {
+		struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
+		for (j = 0; j < cnt_grp->num_counters; ++j) {
+			counter = &cnt_grp->counters[j];
+			reg_no = counter->absolute_counter_no / 32;
+			bit_no = counter->absolute_counter_no % 32;
+			if (reg_no != curr_reg) {
+				/* Clear overflow bits */
+				writel_relaxed(reg_value, iommu->base +
+					       PMOVSCLR_(reg_no));
+				curr_reg = reg_no;
+				reg_value = readl_relaxed(iommu->base +
+							  PMOVSCLR_(curr_reg));
+			}
+
+			if (counter->enabled) {
+				if (reg_value & (1 << bit_no))
+					counter->overflow_count++;
+			}
+		}
+	}
+
+	/* Clear overflow */
+	writel_relaxed(reg_value, iommu->base + PMOVSCLR_(reg_no));
+}
+
+static irqreturn_t iommu_pm_evt_ovfl_int_handler(int irq, void *dev_id)
+{
+	struct iommu_pmon *pmon = dev_id;
+	struct iommu_info *iommu = &pmon->iommu;
+
+	mutex_lock(&pmon->lock);
+
+	if (!iommu_pm_is_hw_access_OK(pmon)) {
+		mutex_unlock(&pmon->lock);
+		goto out;
+	}
+
+	iommu->ops->iommu_lock_acquire();
+	iommu_pm_check_for_overflow(pmon);
+	iommu->ops->iommu_lock_release();
+
+	mutex_unlock(&pmon->lock);
+
+out:
+	return IRQ_HANDLED;
+}
+
+static void iommu_pm_counter_enable(struct iommu_info *iommu,
+				    struct iommu_pmon_counter *counter)
+{
+	unsigned int reg_no = counter->absolute_counter_no / 32;
+	unsigned int bit_no = counter->absolute_counter_no % 32;
+	unsigned int reg_value;
+
+	/* Clear overflow of counter */
+	reg_value = 1 << bit_no;
+	writel_relaxed(reg_value, iommu->base + PMOVSCLR_(reg_no));
+
+	/* Enable counter */
+	writel_relaxed(reg_value, iommu->base + PMCNTENSET_(reg_no));
+	counter->enabled = 1;
+}
+
+static void iommu_pm_counter_disable(struct iommu_info *iommu,
+				     struct iommu_pmon_counter *counter)
+{
+	unsigned int reg_no = counter->absolute_counter_no / 32;
+	unsigned int bit_no = counter->absolute_counter_no % 32;
+	unsigned int reg_value;
+
+	counter->enabled = 0;
+
+	/* Disable counter */
+	reg_value = 1 << bit_no;
+	writel_relaxed(reg_value, iommu->base + PMCNTENCLR_(reg_no));
+
+	/* Clear overflow of counter */
+	writel_relaxed(reg_value, iommu->base + PMOVSCLR_(reg_no));
+}
+
+/*
+ * Must be called after iommu_start_access() is called
+ */
+static void iommu_pm_ovfl_int_enable(struct iommu_info *iommu,
+				     const struct iommu_pmon_counter *counter)
+{
+	unsigned int reg_no = counter->absolute_counter_no / 32;
+	unsigned int bit_no = counter->absolute_counter_no % 32;
+	unsigned int reg_value;
+
+	/* Enable overflow interrupt for counter */
+	reg_value = (1 << bit_no);
+	writel_relaxed(reg_value, iommu->base + PMINTENSET_(reg_no));
+}
+
+/*
+ * Must be called after iommu_start_access() is called
+ */
+static void iommu_pm_ovfl_int_disable(struct iommu_info *iommu,
+				      const struct iommu_pmon_counter *counter)
+{
+	unsigned int reg_no = counter->absolute_counter_no / 32;
+	unsigned int bit_no = counter->absolute_counter_no % 32;
+	unsigned int reg_value;
+
+	/* Disable overflow interrupt for counter */
+	reg_value = 1 << bit_no;
+	writel_relaxed(reg_value, iommu->base + PMINTENCLR_(reg_no));
+}
+
+static void iommu_pm_set_event_class(struct iommu_pmon *pmon,
+				    unsigned int count_no,
+				    unsigned int event_class)
+{
+	writel_relaxed(event_class, pmon->iommu.base + PMEVTYPER_(count_no));
+}
+
+static unsigned int iommu_pm_read_counter(struct iommu_pmon_counter *counter)
+{
+	struct iommu_pmon *pmon = counter->cnt_group->pmon;
+	struct iommu_info *info = &pmon->iommu;
+	unsigned int cnt_no = counter->absolute_counter_no;
+	return readl_relaxed(info->base + PMEVCNTR_(cnt_no));
+}
+
+static void iommu_pm_initialize_hw(const struct iommu_pmon *pmon)
+{
+	/* No initialization needed */
+}
+
+static struct iommu_pm_hw_ops iommu_pm_hw_ops = {
+	.initialize_hw = iommu_pm_initialize_hw,
+	.is_hw_access_OK = iommu_pm_is_hw_access_OK,
+	.grp_enable = iommu_pm_grp_enable,
+	.grp_disable = iommu_pm_grp_disable,
+	.enable_pm = iommu_pm_enable,
+	.disable_pm = iommu_pm_disable,
+	.reset_counters = iommu_pm_reset_counters,
+	.check_for_overflow = iommu_pm_check_for_overflow,
+	.evt_ovfl_int_handler = iommu_pm_evt_ovfl_int_handler,
+	.counter_enable = iommu_pm_counter_enable,
+	.counter_disable = iommu_pm_counter_disable,
+	.ovfl_int_enable = iommu_pm_ovfl_int_enable,
+	.ovfl_int_disable = iommu_pm_ovfl_int_disable,
+	.set_event_class = iommu_pm_set_event_class,
+	.read_counter = iommu_pm_read_counter,
+};
+
+struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v1(void)
+{
+	return &iommu_pm_hw_ops;
+}
+EXPORT_SYMBOL(iommu_pm_get_hw_ops_v1);
+
diff --git a/drivers/iommu/msm_iommu_perfmon.c b/drivers/iommu/msm_iommu_perfmon.c
new file mode 100644
index 0000000..fee8a4a
--- /dev/null
+++ b/drivers/iommu/msm_iommu_perfmon.c
@@ -0,0 +1,819 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/iommu.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <mach/iommu.h>
+#include <mach/iommu_perfmon.h>
+
+static LIST_HEAD(iommu_list);
+static struct dentry *msm_iommu_root_debugfs_dir;
+static const char *NO_EVENT_CLASS_NAME = "none";
+static const unsigned int MAX_EVEN_CLASS_NAME_LEN = 36;
+
+struct event_class {
+	unsigned int event_number;
+	const char *desc;
+};
+
+static struct event_class pmu_event_classes[] = {
+	{ 0x00, "cycle_count"      },
+	{ 0x01, "cycle_count64"    },
+	{ 0x08, "tlb_refill"       },
+	{ 0x09, "tlb_refill_read"  },
+	{ 0x0A, "tlb_refill_write" },
+	{ 0x10, "access"           },
+	{ 0x11, "access_read"      },
+	{ 0x12, "access_write"     },
+	{ 0x80, "full_misses"      },
+	{ 0x81, "partial_miss_1lbfb_hit" },
+	{ 0x82, "partial_miss_2lbfb_hit" },
+	{ 0x83, "full_hit" },
+	{ 0x90, "pred_req_full_miss" },
+	{ 0x91, "pred_req_partial_miss_1lbfb_hit" },
+	{ 0x92, "pred_req_partial_miss_2lbfb_hit" },
+	{ 0xb0, "tot_num_miss_axi_htw_read_req" },
+	{ 0xb1, "tot_num_pred_axi_htw_read_req" },
+};
+
+static unsigned int iommu_pm_create_sup_cls_str(char **buf,
+						struct iommu_pmon *pmon)
+{
+	unsigned long buf_size = ARRAY_SIZE(pmu_event_classes) *
+				 MAX_EVEN_CLASS_NAME_LEN;
+	unsigned int pos = 0;
+	unsigned int nevent_cls = pmon->nevent_cls_supported;
+
+	*buf = kzalloc(buf_size, GFP_KERNEL);
+	if (*buf) {
+		unsigned int j;
+		int i;
+		struct event_class *ptr;
+		size_t array_len = ARRAY_SIZE(pmu_event_classes);
+		ptr = pmu_event_classes;
+
+		for (j = 0; j < nevent_cls; ++j) {
+			for (i = 0; i < array_len; ++i) {
+
+				if (ptr[i].event_number !=
+						pmon->event_cls_supported[j])
+					continue;
+
+				if (pos < buf_size) {
+					pos += snprintf(&(*buf)[pos],
+							buf_size-pos,
+							"[%u] %s\n",
+							ptr[i].event_number,
+							ptr[i].desc);
+				}
+				break;
+			}
+		}
+	}
+	return pos;
+}
+
+static const char *iommu_pm_find_event_class_name(int event_class)
+{
+	size_t array_len;
+	struct event_class *ptr;
+	int i;
+	const char *event_class_name = NO_EVENT_CLASS_NAME;
+	if (event_class < 0)
+		goto out;
+
+	array_len = ARRAY_SIZE(pmu_event_classes);
+	ptr = pmu_event_classes;
+
+	for (i = 0; i < array_len; ++i) {
+		if (ptr[i].event_number == event_class) {
+			event_class_name =  ptr[i].desc;
+			break;
+		}
+	}
+
+out:
+	return event_class_name;
+}
+
+static int iommu_pm_find_event_class(const char *event_class_name)
+{
+	size_t array_len;
+	struct event_class *ptr;
+	int i;
+	int event_class = MSM_IOMMU_PMU_NO_EVENT_CLASS;
+
+	if (strcmp(event_class_name, NO_EVENT_CLASS_NAME) == 0)
+		goto out;
+
+	array_len = ARRAY_SIZE(pmu_event_classes);
+	ptr = pmu_event_classes;
+
+	for (i = 0; i < array_len; ++i) {
+		if (strcmp(ptr[i].desc, event_class_name) == 0) {
+			event_class =  ptr[i].event_number;
+			goto out;
+		}
+	}
+
+out:
+	return event_class;
+}
+
+static inline void iommu_pm_add_to_iommu_list(struct iommu_pmon *iommu_pmon)
+{
+	list_add(&iommu_pmon->iommu_list, &iommu_list);
+}
+
+static inline void iommu_pm_del_from_iommu_list(struct iommu_pmon *iommu_pmon)
+{
+	list_del(&iommu_pmon->iommu_list);
+}
+
+static struct iommu_pmon *iommu_pm_get_pm_by_dev(struct device *dev)
+{
+	struct iommu_pmon *pmon;
+	struct iommu_info *info;
+	struct list_head *ent;
+	list_for_each(ent, &iommu_list) {
+		pmon = list_entry(ent, struct iommu_pmon, iommu_list);
+		info = &pmon->iommu;
+		if (dev == info->iommu_dev)
+			return pmon;
+	}
+	return NULL;
+}
+
+static void iommu_pm_set_event_type(struct iommu_pmon *pmon,
+				    struct iommu_pmon_counter *counter)
+{
+	int event_class;
+	unsigned int count_no;
+	struct iommu_info *iommu = &pmon->iommu;
+
+	event_class = counter->current_event_class;
+	count_no = counter->absolute_counter_no;
+
+	if (event_class == MSM_IOMMU_PMU_NO_EVENT_CLASS) {
+		if (iommu->hw_ops->is_hw_access_OK(pmon)) {
+			iommu->ops->iommu_lock_acquire();
+			iommu->hw_ops->counter_disable(iommu, counter);
+			iommu->hw_ops->ovfl_int_disable(iommu, counter);
+			iommu->hw_ops->set_event_class(pmon, count_no, 0);
+			iommu->ops->iommu_lock_release();
+		}
+		counter->overflow_count = 0;
+		counter->value = 0;
+	} else {
+		counter->overflow_count = 0;
+		counter->value = 0;
+		if (iommu->hw_ops->is_hw_access_OK(pmon)) {
+			iommu->ops->iommu_lock_acquire();
+			iommu->hw_ops->set_event_class(pmon, count_no,
+					event_class);
+			iommu->hw_ops->ovfl_int_enable(iommu, counter);
+			iommu->hw_ops->counter_enable(iommu, counter);
+			iommu->ops->iommu_lock_release();
+		}
+	}
+}
+
+static void iommu_pm_reset_counts(struct iommu_pmon *pmon)
+{
+	unsigned int i;
+	unsigned int j;
+	for (i = 0; i < pmon->num_groups; ++i) {
+		struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
+		for (j = 0; j < cnt_grp->num_counters; ++j) {
+			cnt_grp->counters[j].value = 0;
+			cnt_grp->counters[j].overflow_count = 0;
+		}
+	}
+}
+
+static void iommu_pm_set_all_counters(struct iommu_pmon *pmon)
+{
+	unsigned int i;
+	unsigned int j;
+	for (i = 0; i < pmon->num_groups; ++i) {
+		struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
+		for (j = 0; j < cnt_grp->num_counters; ++j)
+			iommu_pm_set_event_type(pmon, &cnt_grp->counters[j]);
+	}
+}
+
+static void iommu_pm_read_all_counters(struct iommu_pmon *pmon)
+{
+	unsigned int i;
+	unsigned int j;
+	struct iommu_info *iommu = &pmon->iommu;
+	for (i = 0; i < pmon->num_groups; ++i) {
+		struct iommu_pmon_cnt_group *cnt_grp = &pmon->cnt_grp[i];
+		for (j = 0; j < cnt_grp->num_counters; ++j) {
+			struct iommu_pmon_counter *counter;
+			counter = &cnt_grp->counters[j];
+			counter->value = iommu->hw_ops->read_counter(counter);
+		}
+	}
+}
+
+static void iommu_pm_on(struct iommu_pmon *pmon)
+{
+	unsigned int i;
+	struct iommu_info *iommu = &pmon->iommu;
+	struct msm_iommu_drvdata *iommu_drvdata =
+					dev_get_drvdata(iommu->iommu_dev);
+
+	iommu->ops->iommu_power_on(iommu_drvdata);
+	iommu->ops->iommu_clk_on(iommu_drvdata);
+
+	/* Reset counters in HW */
+	iommu->ops->iommu_lock_acquire();
+	iommu->hw_ops->reset_counters(&pmon->iommu);
+	iommu->ops->iommu_lock_release();
+
+	/* Reset SW counters */
+	iommu_pm_reset_counts(pmon);
+
+	pmon->enabled = 1;
+
+	iommu_pm_set_all_counters(pmon);
+
+	iommu->ops->iommu_lock_acquire();
+
+	/* enable all counter group */
+	for (i = 0; i < pmon->num_groups; ++i)
+		iommu->hw_ops->grp_enable(iommu, i);
+
+	/* enable global counters */
+	iommu->hw_ops->enable_pm(iommu);
+	iommu->ops->iommu_lock_release();
+
+	pr_info("%s: TLB performance monitoring turned ON\n",
+		pmon->iommu.iommu_name);
+}
+
+static void iommu_pm_off(struct iommu_pmon *pmon)
+{
+	unsigned int i;
+	struct iommu_info *iommu = &pmon->iommu;
+	struct msm_iommu_drvdata *iommu_drvdata =
+					dev_get_drvdata(iommu->iommu_dev);
+
+	pmon->enabled = 0;
+
+	iommu->ops->iommu_lock_acquire();
+
+	/* disable global counters */
+	iommu->hw_ops->disable_pm(iommu);
+
+	/* Check if we overflowed just before turning off pmon */
+	iommu->hw_ops->check_for_overflow(pmon);
+
+	/* disable all counter group */
+	for (i = 0; i < pmon->num_groups; ++i)
+		iommu->hw_ops->grp_disable(iommu, i);
+
+	/* Update cached copy of counters before turning off power */
+	iommu_pm_read_all_counters(pmon);
+
+	iommu->ops->iommu_lock_release();
+	iommu->ops->iommu_clk_off(iommu_drvdata);
+	iommu->ops->iommu_power_off(iommu_drvdata);
+
+	pr_info("%s: TLB performance monitoring turned OFF\n",
+		pmon->iommu.iommu_name);
+}
+
+static int iommu_pm_debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t iommu_pm_count_value_read(struct file *fp,
+					 char __user *user_buff,
+					 size_t count, loff_t *pos)
+{
+	size_t rd_cnt;
+	unsigned long long full_count;
+
+	struct iommu_pmon_counter *counter = fp->private_data;
+	struct iommu_pmon *pmon = counter->cnt_group->pmon;
+	struct iommu_info *iommu = &pmon->iommu;
+	char buf[50];
+	size_t len;
+
+	mutex_lock(&pmon->lock);
+
+	if (iommu->hw_ops->is_hw_access_OK(pmon)) {
+		iommu->ops->iommu_lock_acquire();
+		counter->value = iommu->hw_ops->read_counter(counter);
+		iommu->ops->iommu_lock_release();
+	}
+	full_count = (unsigned long long) counter->value +
+		     ((unsigned long long)counter->overflow_count *
+			0x100000000ULL);
+
+	len = snprintf(buf, 50, "%llu\n", full_count);
+	rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
+	mutex_unlock(&pmon->lock);
+
+	return rd_cnt;
+}
+
+static const struct file_operations cnt_value_file_ops = {
+	.open = iommu_pm_debug_open,
+	.read = iommu_pm_count_value_read,
+};
+
+static ssize_t iommu_pm_event_class_read(struct file *fp,
+					 char __user *user_buff,
+					 size_t count, loff_t *pos)
+{
+	size_t rd_cnt;
+	struct iommu_pmon_counter *counter = fp->private_data;
+	struct iommu_pmon *pmon = counter->cnt_group->pmon;
+	char buf[50];
+	const char *event_class_name;
+	size_t len;
+
+	mutex_lock(&pmon->lock);
+	event_class_name = iommu_pm_find_event_class_name(
+						counter->current_event_class);
+	len = snprintf(buf, 50, "%s\n", event_class_name);
+
+	rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
+	mutex_unlock(&pmon->lock);
+	return rd_cnt;
+}
+
+static ssize_t iommu_pm_event_class_write(struct file *fp,
+					  const char __user *user_buff,
+					  size_t count, loff_t *pos)
+{
+	size_t wr_cnt;
+	char buf[50];
+	size_t buf_size = sizeof(buf);
+	struct iommu_pmon_counter *counter = fp->private_data;
+	struct iommu_pmon *pmon = counter->cnt_group->pmon;
+	int current_event_class;
+
+	if ((count + *pos) >= buf_size)
+		return -EINVAL;
+
+	mutex_lock(&pmon->lock);
+	current_event_class = counter->current_event_class;
+	wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
+	if (wr_cnt >= 1) {
+		int rv;
+		long value;
+		buf[wr_cnt-1] = '\0';
+		rv = kstrtol(buf, 10, &value);
+		if (!rv) {
+			counter->current_event_class =
+				iommu_pm_find_event_class(
+					iommu_pm_find_event_class_name(value));
+		} else {
+			counter->current_event_class =
+						iommu_pm_find_event_class(buf);
+	}	}
+
+	if (current_event_class != counter->current_event_class)
+		iommu_pm_set_event_type(pmon, counter);
+
+	mutex_unlock(&pmon->lock);
+	return wr_cnt;
+}
+
+static const struct file_operations event_class_file_ops = {
+	.open = iommu_pm_debug_open,
+	.read = iommu_pm_event_class_read,
+	.write = iommu_pm_event_class_write,
+};
+
+static ssize_t iommu_reset_counters_write(struct file *fp,
+				    const char __user *user_buff,
+				    size_t count, loff_t *pos)
+{
+	size_t wr_cnt;
+	char buf[10];
+	size_t buf_size = sizeof(buf);
+	struct iommu_pmon *pmon = fp->private_data;
+	struct iommu_info *iommu = &pmon->iommu;
+
+	if ((count + *pos) >= buf_size)
+		return -EINVAL;
+
+	mutex_lock(&pmon->lock);
+	wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
+	if (wr_cnt >= 1) {
+		unsigned long cmd = 0;
+		int rv;
+		buf[wr_cnt-1] = '\0';
+		rv = kstrtoul(buf, 10, &cmd);
+		if (!rv && (cmd == 1)) {
+			if (iommu->hw_ops->is_hw_access_OK(pmon)) {
+				iommu->ops->iommu_lock_acquire();
+				iommu->hw_ops->reset_counters(&pmon->iommu);
+				iommu->ops->iommu_lock_release();
+			}
+			iommu_pm_reset_counts(pmon);
+			pr_info("TLB performance counters reset\n");
+		} else {
+			pr_err("Unknown performance monitor command: %lu\n",
+				cmd);
+		}
+	}
+	mutex_unlock(&pmon->lock);
+	return wr_cnt;
+}
+
+static const struct file_operations reset_file_ops = {
+	.open = iommu_pm_debug_open,
+	.write = iommu_reset_counters_write,
+};
+
+static ssize_t iommu_pm_enable_counters_read(struct file *fp,
+					     char __user *user_buff,
+					     size_t count, loff_t *pos)
+{
+	size_t rd_cnt;
+	char buf[5];
+	size_t len;
+	struct iommu_pmon *pmon = fp->private_data;
+
+	mutex_lock(&pmon->lock);
+	len = snprintf(buf, 5, "%u\n", pmon->enabled);
+	rd_cnt = simple_read_from_buffer(user_buff, count, pos, buf, len);
+	mutex_unlock(&pmon->lock);
+	return rd_cnt;
+}
+
+static ssize_t iommu_pm_enable_counters_write(struct file *fp,
+				     const char __user *user_buff,
+				     size_t count, loff_t *pos)
+{
+	size_t wr_cnt;
+	char buf[10];
+	size_t buf_size = sizeof(buf);
+	struct iommu_pmon *pmon = fp->private_data;
+
+	if ((count + *pos) >= buf_size)
+		return -EINVAL;
+
+	mutex_lock(&pmon->lock);
+	wr_cnt = simple_write_to_buffer(buf, buf_size, pos, user_buff, count);
+	if (wr_cnt >= 1) {
+		unsigned long cmd;
+		int rv;
+		buf[wr_cnt-1] = '\0';
+		rv = kstrtoul(buf, 10, &cmd);
+		if (!rv && (cmd < 2)) {
+			if (pmon->enabled == 1 && cmd == 0) {
+				if (pmon->iommu_attach_count > 0)
+					iommu_pm_off(pmon);
+			} else if (pmon->enabled == 0 && cmd == 1) {
+				/* We can only turn on perf. monitoring if
+				 * iommu is attached. Delay turning on perf.
+				 * monitoring until we are attached.
+				 */
+				if (pmon->iommu_attach_count > 0)
+					iommu_pm_on(pmon);
+				else
+					pmon->enabled = 1;
+			}
+		} else {
+			pr_err("Unknown performance monitor command: %lu\n",
+				cmd);
+		}
+	}
+	mutex_unlock(&pmon->lock);
+	return wr_cnt;
+}
+
+static const struct file_operations event_enable_file_ops = {
+	.open = iommu_pm_debug_open,
+	.read = iommu_pm_enable_counters_read,
+	.write = iommu_pm_enable_counters_write,
+};
+
+static ssize_t iommu_pm_avail_event_cls_read(struct file *fp,
+					     char __user *user_buff,
+					     size_t count, loff_t *pos)
+{
+	size_t rd_cnt = 0;
+	struct iommu_pmon *pmon = fp->private_data;
+	char *buf;
+	size_t len;
+
+	mutex_lock(&pmon->lock);
+
+	len = iommu_pm_create_sup_cls_str(&buf, pmon);
+	if (buf) {
+		rd_cnt = simple_read_from_buffer(user_buff, count, pos,
+						 buf, len);
+		kfree(buf);
+	}
+	mutex_unlock(&pmon->lock);
+	return rd_cnt;
+}
+
+static const struct file_operations available_event_cls_file_ops = {
+	.open = iommu_pm_debug_open,
+	.read = iommu_pm_avail_event_cls_read,
+};
+
+
+
+static int iommu_pm_create_grp_debugfs_counters_hierarchy(
+					struct iommu_pmon_cnt_group *cnt_grp,
+					unsigned int *abs_counter_no)
+{
+	int ret = 0;
+	int j;
+	char name[20];
+
+	for (j = 0; j < cnt_grp->num_counters; ++j) {
+		struct dentry *grp_dir = cnt_grp->group_dir;
+		struct dentry *counter_dir;
+		cnt_grp->counters[j].cnt_group = cnt_grp;
+		cnt_grp->counters[j].counter_no = j;
+		cnt_grp->counters[j].absolute_counter_no = *abs_counter_no;
+		(*abs_counter_no)++;
+		cnt_grp->counters[j].value = 0;
+		cnt_grp->counters[j].overflow_count = 0;
+		cnt_grp->counters[j].current_event_class =
+						MSM_IOMMU_PMU_NO_EVENT_CLASS;
+
+		snprintf(name, 20, "counter%u", j);
+
+		counter_dir = debugfs_create_dir(name, grp_dir);
+
+		if (IS_ERR_OR_NULL(counter_dir)) {
+			pr_err("unable to create counter debugfs dir %s\n",
+				name);
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		cnt_grp->counters[j].counter_dir = counter_dir;
+
+		if (!debugfs_create_file("value", 0644, counter_dir,
+					 &cnt_grp->counters[j],
+					 &cnt_value_file_ops)) {
+			ret = -EIO;
+			goto out;
+		}
+
+		if (!debugfs_create_file("current_event_class", 0644,
+				counter_dir, &cnt_grp->counters[j],
+				&event_class_file_ops)) {
+			ret = -EIO;
+			goto out;
+		}
+	}
+out:
+	return ret;
+}
+
+static int iommu_pm_create_group_debugfs_hierarchy(struct iommu_info *iommu,
+				   struct iommu_pmon *pmon_entry)
+{
+	int i;
+	int ret = 0;
+	char name[20];
+	unsigned int abs_counter_no = 0;
+
+	for (i = 0; i < pmon_entry->num_groups; ++i) {
+		pmon_entry->cnt_grp[i].pmon = pmon_entry;
+		pmon_entry->cnt_grp[i].grp_no = i;
+		pmon_entry->cnt_grp[i].num_counters = pmon_entry->num_counters;
+		pmon_entry->cnt_grp[i].counters =
+			kzalloc(sizeof(*pmon_entry->cnt_grp[i].counters)
+			* pmon_entry->cnt_grp[i].num_counters, GFP_KERNEL);
+
+		if (!pmon_entry->cnt_grp[i].counters) {
+			pr_err("Unable to allocate memory for counters\n");
+			ret = -ENOMEM;
+			goto out;
+		}
+		snprintf(name, 20, "group%u", i);
+		pmon_entry->cnt_grp[i].group_dir = debugfs_create_dir(name,
+							pmon_entry->iommu_dir);
+		if (IS_ERR_OR_NULL(pmon_entry->cnt_grp[i].group_dir)) {
+			pr_err("unable to create group debugfs dir %s\n", name);
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		ret = iommu_pm_create_grp_debugfs_counters_hierarchy(
+						&pmon_entry->cnt_grp[i],
+						&abs_counter_no);
+		if (ret)
+			goto out;
+	}
+out:
+	return ret;
+}
+
+int msm_iommu_pm_iommu_register(struct iommu_pmon *pmon_entry)
+{
+	int ret = 0;
+	struct iommu_info *iommu = &pmon_entry->iommu;
+	int i;
+
+	if (!iommu->ops || !iommu->iommu_name || !iommu->base
+					|| !iommu->iommu_dev) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (!msm_iommu_root_debugfs_dir) {
+		msm_iommu_root_debugfs_dir = debugfs_create_dir("iommu", NULL);
+		if (IS_ERR_OR_NULL(msm_iommu_root_debugfs_dir)) {
+			pr_err("Failed creating iommu debugfs dir \"iommu\"\n");
+			ret = -EIO;
+			goto out;
+		}
+	}
+
+	pmon_entry->cnt_grp = kzalloc(sizeof(*pmon_entry->cnt_grp)
+				      * pmon_entry->num_groups, GFP_KERNEL);
+	if (!pmon_entry->cnt_grp) {
+		pr_err("Unable to allocate memory for counter groups\n");
+		ret = -ENOMEM;
+		goto file_err;
+	}
+	pmon_entry->iommu_dir = debugfs_create_dir(iommu->iommu_name,
+						   msm_iommu_root_debugfs_dir);
+	if (IS_ERR_OR_NULL(pmon_entry->iommu_dir)) {
+		pr_err("unable to create iommu debugfs dir %s\n",
+							iommu->iommu_name);
+		ret = -ENOMEM;
+		goto free_mem;
+	}
+
+	if (!debugfs_create_file("reset_counters", 0644,
+			pmon_entry->iommu_dir, pmon_entry, &reset_file_ops)) {
+		ret = -EIO;
+		goto free_mem;
+	}
+
+	if (!debugfs_create_file("enable_counters", 0644,
+		pmon_entry->iommu_dir, pmon_entry, &event_enable_file_ops)) {
+		ret = -EIO;
+		goto free_mem;
+	}
+
+	if (!debugfs_create_file("available_event_classes", 0644,
+			pmon_entry->iommu_dir, pmon_entry,
+			&available_event_cls_file_ops)) {
+		ret = -EIO;
+		goto free_mem;
+	}
+
+	ret = iommu_pm_create_group_debugfs_hierarchy(iommu, pmon_entry);
+	if (ret)
+		goto free_mem;
+
+	iommu->hw_ops->initialize_hw(pmon_entry);
+
+	if (iommu->evt_irq > 0) {
+		ret = request_threaded_irq(iommu->evt_irq, NULL,
+				iommu->hw_ops->evt_ovfl_int_handler,
+				IRQF_ONESHOT | IRQF_SHARED,
+				"msm_iommu_pmon_nonsecure_irq", pmon_entry);
+		if (ret) {
+			pr_err("Request IRQ %d failed with ret=%d\n",
+								iommu->evt_irq,
+								ret);
+			goto free_mem;
+		}
+	} else {
+		pr_info("%s: Overflow interrupt not available\n", __func__);
+	}
+
+	dev_dbg(iommu->iommu_dev, "%s iommu registered\n", iommu->iommu_name);
+
+	goto out;
+free_mem:
+	if (pmon_entry->cnt_grp) {
+		for (i = 0; i < pmon_entry->num_groups; ++i) {
+			kfree(pmon_entry->cnt_grp[i].counters);
+			pmon_entry->cnt_grp[i].counters = 0;
+		}
+	}
+	kfree(pmon_entry->cnt_grp);
+	pmon_entry->cnt_grp = 0;
+file_err:
+	debugfs_remove_recursive(msm_iommu_root_debugfs_dir);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(msm_iommu_pm_iommu_register);
+
+void msm_iommu_pm_iommu_unregister(struct device *dev)
+{
+	int i;
+	struct iommu_pmon *pmon_entry = iommu_pm_get_pm_by_dev(dev);
+
+	if (!pmon_entry)
+		return;
+
+	free_irq(pmon_entry->iommu.evt_irq, pmon_entry->iommu.iommu_dev);
+
+	if (!pmon_entry)
+		goto remove_debugfs;
+
+	if (pmon_entry->cnt_grp) {
+		for (i = 0; i < pmon_entry->num_groups; ++i)
+			kfree(pmon_entry->cnt_grp[i].counters);
+	}
+
+	kfree(pmon_entry->cnt_grp);
+
+remove_debugfs:
+	debugfs_remove_recursive(msm_iommu_root_debugfs_dir);
+
+	return;
+}
+EXPORT_SYMBOL(msm_iommu_pm_iommu_unregister);
+
+struct iommu_pmon *msm_iommu_pm_alloc(struct device *dev)
+{
+	struct iommu_pmon *pmon_entry;
+	struct iommu_info *info;
+	pmon_entry = devm_kzalloc(dev, sizeof(*pmon_entry), GFP_KERNEL);
+	if (!pmon_entry)
+		return NULL;
+	info = &pmon_entry->iommu;
+	info->iommu_dev = dev;
+	mutex_init(&pmon_entry->lock);
+	iommu_pm_add_to_iommu_list(pmon_entry);
+	return pmon_entry;
+}
+EXPORT_SYMBOL(msm_iommu_pm_alloc);
+
+void msm_iommu_pm_free(struct device *dev)
+{
+	struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
+	if (pmon)
+		iommu_pm_del_from_iommu_list(pmon);
+}
+EXPORT_SYMBOL(msm_iommu_pm_free);
+
+void msm_iommu_attached(struct device *dev)
+{
+	struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
+	if (pmon) {
+		mutex_lock(&pmon->lock);
+		++pmon->iommu_attach_count;
+		if (pmon->iommu_attach_count == 1) {
+			/* If perf. mon was enabled before we attached we do
+			 * the actual after we attach.
+			 */
+			if (pmon->enabled)
+				iommu_pm_on(pmon);
+		}
+		mutex_unlock(&pmon->lock);
+	}
+}
+EXPORT_SYMBOL(msm_iommu_attached);
+
+void msm_iommu_detached(struct device *dev)
+{
+	struct iommu_pmon *pmon = iommu_pm_get_pm_by_dev(dev);
+	if (pmon) {
+		mutex_lock(&pmon->lock);
+		if (pmon->iommu_attach_count == 1) {
+			/* If perf. mon is still enabled we have to disable
+			 * before we do the detach.
+			 */
+			if (pmon->enabled)
+				iommu_pm_off(pmon);
+		}
+		BUG_ON(pmon->iommu_attach_count == 0);
+		--pmon->iommu_attach_count;
+		mutex_unlock(&pmon->lock);
+	}
+}
+EXPORT_SYMBOL(msm_iommu_detached);
+
diff --git a/drivers/iommu/msm_iommu_sec.c b/drivers/iommu/msm_iommu_sec.c
new file mode 100644
index 0000000..74d8b48
--- /dev/null
+++ b/drivers/iommu/msm_iommu_sec.c
@@ -0,0 +1,567 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/clk.h>
+#include <linux/scatterlist.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/kmemleak.h>
+
+#include <asm/sizes.h>
+
+#include <mach/iommu_perfmon.h>
+#include <mach/iommu_hw-v1.h>
+#include <mach/msm_iommu_priv.h>
+#include <mach/iommu.h>
+#include <mach/scm.h>
+
+/* bitmap of the page sizes currently supported */
+#define MSM_IOMMU_PGSIZES	(SZ_4K | SZ_64K | SZ_1M | SZ_16M)
+
+#define IOMMU_SECURE_CFG	2
+#define IOMMU_SECURE_PTBL_SIZE  3
+#define IOMMU_SECURE_PTBL_INIT  4
+#define IOMMU_SECURE_MAP	6
+#define IOMMU_SECURE_UNMAP      7
+#define IOMMU_SECURE_MAP2 0x0B
+#define IOMMU_SECURE_UNMAP2 0x0C
+#define IOMMU_TLBINVAL_FLAG 0x00000001
+
+static struct iommu_access_ops *iommu_access_ops;
+
+struct msm_scm_paddr_list {
+	unsigned int list;
+	unsigned int list_size;
+	unsigned int size;
+};
+
+struct msm_scm_mapping_info {
+	unsigned int id;
+	unsigned int ctx_id;
+	unsigned int va;
+	unsigned int size;
+};
+
+struct msm_scm_map2_req {
+	struct msm_scm_paddr_list plist;
+	struct msm_scm_mapping_info info;
+	unsigned int flags;
+};
+
+struct msm_scm_unmap2_req {
+	struct msm_scm_mapping_info info;
+	unsigned int flags;
+};
+
+void msm_iommu_sec_set_access_ops(struct iommu_access_ops *access_ops)
+{
+	iommu_access_ops = access_ops;
+}
+
+static int msm_iommu_sec_ptbl_init(void)
+{
+	struct device_node *np;
+	struct msm_scm_ptbl_init {
+		unsigned int paddr;
+		unsigned int size;
+		unsigned int spare;
+	} pinit;
+	unsigned int *buf;
+	int psize[2] = {0, 0};
+	unsigned int spare;
+	int ret, ptbl_ret = 0;
+
+	for_each_compatible_node(np, NULL, "qcom,msm-smmu-v1")
+		if (of_find_property(np, "qcom,iommu-secure-id", NULL))
+			break;
+
+	if (!np)
+		return 0;
+
+	of_node_put(np);
+	ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_SIZE, &spare,
+			sizeof(spare), psize, sizeof(psize));
+	if (ret) {
+		pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
+		goto fail;
+	}
+
+	if (psize[1]) {
+		pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
+		goto fail;
+	}
+
+	buf = kmalloc(psize[0], GFP_KERNEL);
+	if (!buf) {
+		pr_err("%s: Failed to allocate %d bytes for PTBL\n",
+			__func__, psize[0]);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	pinit.paddr = virt_to_phys(buf);
+	pinit.size = psize[0];
+
+	ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_INIT, &pinit,
+			sizeof(pinit), &ptbl_ret, sizeof(ptbl_ret));
+	if (ret) {
+		pr_err("scm call IOMMU_SECURE_PTBL_INIT failed\n");
+		goto fail_mem;
+	}
+	if (ptbl_ret) {
+		pr_err("scm call IOMMU_SECURE_PTBL_INIT extended ret fail\n");
+		goto fail_mem;
+	}
+
+	kmemleak_not_leak(buf);
+
+	return 0;
+
+fail_mem:
+	kfree(buf);
+fail:
+	return ret;
+}
+
+int msm_iommu_sec_program_iommu(int sec_id)
+{
+	struct msm_scm_sec_cfg {
+		unsigned int id;
+		unsigned int spare;
+	} cfg;
+	int ret, scm_ret = 0;
+
+	cfg.id = sec_id;
+
+	ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_CFG, &cfg, sizeof(cfg),
+			&scm_ret, sizeof(scm_ret));
+	if (ret || scm_ret) {
+		pr_err("scm call IOMMU_SECURE_CFG failed\n");
+		return ret ? ret : -EINVAL;
+	}
+
+	return ret;
+}
+
+static int msm_iommu_sec_ptbl_map(struct msm_iommu_drvdata *iommu_drvdata,
+			struct msm_iommu_ctx_drvdata *ctx_drvdata,
+			unsigned long va, phys_addr_t pa, size_t len)
+{
+	struct msm_scm_map2_req map;
+	int ret = 0;
+
+	map.plist.list = virt_to_phys(&pa);
+	map.plist.list_size = 1;
+	map.plist.size = len;
+	map.info.id = iommu_drvdata->sec_id;
+	map.info.ctx_id = ctx_drvdata->num;
+	map.info.va = va;
+	map.info.size = len;
+	map.flags = IOMMU_TLBINVAL_FLAG;
+
+	if (scm_call(SCM_SVC_MP, IOMMU_SECURE_MAP2, &map, sizeof(map), &ret,
+								sizeof(ret)))
+		return -EINVAL;
+	if (ret)
+		return -EINVAL;
+
+	return 0;
+}
+
+static unsigned int get_phys_addr(struct scatterlist *sg)
+{
+	/*
+	 * Try sg_dma_address first so that we can
+	 * map carveout regions that do not have a
+	 * struct page associated with them.
+	 */
+	unsigned int pa = sg_dma_address(sg);
+	if (pa == 0)
+		pa = sg_phys(sg);
+	return pa;
+}
+
+static int msm_iommu_sec_ptbl_map_range(struct msm_iommu_drvdata *iommu_drvdata,
+			struct msm_iommu_ctx_drvdata *ctx_drvdata,
+			unsigned long va, struct scatterlist *sg, size_t len)
+{
+	struct scatterlist *sgiter;
+	struct msm_scm_map2_req map;
+	unsigned int *pa_list = 0;
+	unsigned int pa, cnt;
+	unsigned int offset = 0, chunk_offset = 0;
+	int ret, scm_ret;
+
+	map.info.id = iommu_drvdata->sec_id;
+	map.info.ctx_id = ctx_drvdata->num;
+	map.info.va = va;
+	map.info.size = len;
+	map.flags = IOMMU_TLBINVAL_FLAG;
+
+	if (sg->length == len) {
+		pa = get_phys_addr(sg);
+		map.plist.list = virt_to_phys(&pa);
+		map.plist.list_size = 1;
+		map.plist.size = len;
+	} else {
+		sgiter = sg;
+		cnt = sg->length / SZ_1M;
+		while ((sgiter = sg_next(sgiter)))
+			cnt += sgiter->length / SZ_1M;
+
+		pa_list = kmalloc(cnt * sizeof(*pa_list), GFP_KERNEL);
+		if (!pa_list)
+			return -ENOMEM;
+
+		sgiter = sg;
+		cnt = 0;
+		pa = get_phys_addr(sgiter);
+		while (offset < len) {
+			pa += chunk_offset;
+			pa_list[cnt] = pa;
+			chunk_offset += SZ_1M;
+			offset += SZ_1M;
+			cnt++;
+
+			if (chunk_offset >= sgiter->length && offset < len) {
+				chunk_offset = 0;
+				sgiter = sg_next(sgiter);
+				pa = get_phys_addr(sgiter);
+			}
+		}
+
+		map.plist.list = virt_to_phys(pa_list);
+		map.plist.list_size = cnt;
+		map.plist.size = SZ_1M;
+	}
+
+	ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_MAP2, &map, sizeof(map),
+			&scm_ret, sizeof(scm_ret));
+	kfree(pa_list);
+	return ret;
+}
+
+static int msm_iommu_sec_ptbl_unmap(struct msm_iommu_drvdata *iommu_drvdata,
+			struct msm_iommu_ctx_drvdata *ctx_drvdata,
+			unsigned long va, size_t len)
+{
+	struct msm_scm_unmap2_req unmap;
+	int ret, scm_ret;
+
+	unmap.info.id = iommu_drvdata->sec_id;
+	unmap.info.ctx_id = ctx_drvdata->num;
+	unmap.info.va = va;
+	unmap.info.size = len;
+	unmap.flags = IOMMU_TLBINVAL_FLAG;
+
+	ret = scm_call(SCM_SVC_MP, IOMMU_SECURE_UNMAP2, &unmap, sizeof(unmap),
+			&scm_ret, sizeof(scm_ret));
+	return ret;
+}
+
+static int msm_iommu_domain_init(struct iommu_domain *domain, int flags)
+{
+	struct msm_iommu_priv *priv;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&priv->list_attached);
+	domain->priv = priv;
+	return 0;
+}
+
+static void msm_iommu_domain_destroy(struct iommu_domain *domain)
+{
+	struct msm_iommu_priv *priv;
+
+	iommu_access_ops->iommu_lock_acquire();
+	priv = domain->priv;
+	domain->priv = NULL;
+
+	kfree(priv);
+	iommu_access_ops->iommu_lock_release();
+}
+
+static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+	struct msm_iommu_priv *priv;
+	struct msm_iommu_drvdata *iommu_drvdata;
+	struct msm_iommu_ctx_drvdata *ctx_drvdata;
+	struct msm_iommu_ctx_drvdata *tmp_drvdata;
+	int ret = 0;
+
+	iommu_access_ops->iommu_lock_acquire();
+
+	priv = domain->priv;
+	if (!priv || !dev) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	iommu_drvdata = dev_get_drvdata(dev->parent);
+	ctx_drvdata = dev_get_drvdata(dev);
+	if (!iommu_drvdata || !ctx_drvdata) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	if (!list_empty(&ctx_drvdata->attached_elm)) {
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
+		if (tmp_drvdata == ctx_drvdata) {
+			ret = -EBUSY;
+			goto fail;
+		}
+
+	ret = iommu_access_ops->iommu_power_on(iommu_drvdata);
+	if (ret)
+		goto fail;
+
+	/* We can only do this once */
+	if (!iommu_drvdata->ctx_attach_count) {
+		ret = iommu_access_ops->iommu_clk_on(iommu_drvdata);
+		if (ret) {
+			iommu_access_ops->iommu_power_off(iommu_drvdata);
+			goto fail;
+		}
+
+		ret = msm_iommu_sec_program_iommu(iommu_drvdata->sec_id);
+
+		/* bfb settings are always programmed by HLOS */
+		program_iommu_bfb_settings(iommu_drvdata->base,
+					   iommu_drvdata->bfb_settings);
+
+		iommu_access_ops->iommu_clk_off(iommu_drvdata);
+		if (ret) {
+			iommu_access_ops->iommu_power_off(iommu_drvdata);
+			goto fail;
+		}
+	}
+
+	list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
+	ctx_drvdata->attached_domain = domain;
+	++iommu_drvdata->ctx_attach_count;
+
+	iommu_access_ops->iommu_lock_release();
+
+	msm_iommu_attached(dev->parent);
+	return ret;
+fail:
+	iommu_access_ops->iommu_lock_release();
+	return ret;
+}
+
+static void msm_iommu_detach_dev(struct iommu_domain *domain,
+				 struct device *dev)
+{
+	struct msm_iommu_drvdata *iommu_drvdata;
+	struct msm_iommu_ctx_drvdata *ctx_drvdata;
+
+	msm_iommu_detached(dev->parent);
+
+	iommu_access_ops->iommu_lock_acquire();
+	if (!dev)
+		goto fail;
+
+	iommu_drvdata = dev_get_drvdata(dev->parent);
+	ctx_drvdata = dev_get_drvdata(dev);
+	if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
+		goto fail;
+
+	list_del_init(&ctx_drvdata->attached_elm);
+	ctx_drvdata->attached_domain = NULL;
+
+	iommu_access_ops->iommu_power_off(iommu_drvdata);
+	BUG_ON(iommu_drvdata->ctx_attach_count == 0);
+	--iommu_drvdata->ctx_attach_count;
+fail:
+	iommu_access_ops->iommu_lock_release();
+}
+
+static int get_drvdata(struct iommu_domain *domain,
+			struct msm_iommu_drvdata **iommu_drvdata,
+			struct msm_iommu_ctx_drvdata **ctx_drvdata)
+{
+	struct msm_iommu_priv *priv = domain->priv;
+	struct msm_iommu_ctx_drvdata *ctx;
+
+	list_for_each_entry(ctx, &priv->list_attached, attached_elm) {
+		if (ctx->attached_domain == domain)
+			break;
+	}
+
+	if (ctx->attached_domain != domain)
+		return -EINVAL;
+
+	*ctx_drvdata = ctx;
+	*iommu_drvdata = dev_get_drvdata(ctx->pdev->dev.parent);
+	return 0;
+}
+
+static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
+			 phys_addr_t pa, size_t len, int prot)
+{
+	struct msm_iommu_drvdata *iommu_drvdata;
+	struct msm_iommu_ctx_drvdata *ctx_drvdata;
+	int ret = 0;
+
+	iommu_access_ops->iommu_lock_acquire();
+
+	ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+	if (ret)
+		goto fail;
+
+	iommu_access_ops->iommu_clk_on(iommu_drvdata);
+	ret = msm_iommu_sec_ptbl_map(iommu_drvdata, ctx_drvdata,
+					va, pa, len);
+	iommu_access_ops->iommu_clk_off(iommu_drvdata);
+fail:
+	iommu_access_ops->iommu_lock_release();
+	return ret;
+}
+
+static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
+			    size_t len)
+{
+	struct msm_iommu_drvdata *iommu_drvdata;
+	struct msm_iommu_ctx_drvdata *ctx_drvdata;
+	int ret = -ENODEV;
+
+	iommu_access_ops->iommu_lock_acquire();
+
+	ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+	if (ret)
+		goto fail;
+
+	iommu_access_ops->iommu_clk_on(iommu_drvdata);
+	ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata,
+					va, len);
+	iommu_access_ops->iommu_clk_off(iommu_drvdata);
+fail:
+	iommu_access_ops->iommu_lock_release();
+
+	/* the IOMMU API requires us to return how many bytes were unmapped */
+	len = ret ? 0 : len;
+	return len;
+}
+
+static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
+			       struct scatterlist *sg, unsigned int len,
+			       int prot)
+{
+	int ret;
+	struct msm_iommu_drvdata *iommu_drvdata;
+	struct msm_iommu_ctx_drvdata *ctx_drvdata;
+
+	iommu_access_ops->iommu_lock_acquire();
+
+	ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+	if (ret)
+		goto fail;
+	iommu_access_ops->iommu_clk_on(iommu_drvdata);
+	ret = msm_iommu_sec_ptbl_map_range(iommu_drvdata, ctx_drvdata,
+						va, sg, len);
+	iommu_access_ops->iommu_clk_off(iommu_drvdata);
+fail:
+	iommu_access_ops->iommu_lock_release();
+	return ret;
+}
+
+
+static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
+				 unsigned int len)
+{
+	struct msm_iommu_drvdata *iommu_drvdata;
+	struct msm_iommu_ctx_drvdata *ctx_drvdata;
+	int ret;
+
+	iommu_access_ops->iommu_lock_acquire();
+
+	ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+	if (ret)
+		goto fail;
+
+	iommu_access_ops->iommu_clk_on(iommu_drvdata);
+	ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata, va, len);
+	iommu_access_ops->iommu_clk_off(iommu_drvdata);
+
+fail:
+	iommu_access_ops->iommu_lock_release();
+	return 0;
+}
+
+static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
+					  unsigned long va)
+{
+	return 0;
+}
+
+static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
+				    unsigned long cap)
+{
+	return 0;
+}
+
+static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
+{
+	return 0;
+}
+
+static struct iommu_ops msm_iommu_ops = {
+	.domain_init = msm_iommu_domain_init,
+	.domain_destroy = msm_iommu_domain_destroy,
+	.attach_dev = msm_iommu_attach_dev,
+	.detach_dev = msm_iommu_detach_dev,
+	.map = msm_iommu_map,
+	.unmap = msm_iommu_unmap,
+	.map_range = msm_iommu_map_range,
+	.unmap_range = msm_iommu_unmap_range,
+	.iova_to_phys = msm_iommu_iova_to_phys,
+	.domain_has_cap = msm_iommu_domain_has_cap,
+	.get_pt_base_addr = msm_iommu_get_pt_base_addr,
+	.pgsize_bitmap = MSM_IOMMU_PGSIZES,
+};
+
+static int __init msm_iommu_sec_init(void)
+{
+	int ret;
+
+	ret = bus_register(&msm_iommu_sec_bus_type);
+	if (ret)
+		goto fail;
+
+	bus_set_iommu(&msm_iommu_sec_bus_type, &msm_iommu_ops);
+	ret = msm_iommu_sec_ptbl_init();
+fail:
+	return ret;
+}
+
+subsys_initcall(msm_iommu_sec_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM SMMU Secure Driver");
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index b7ae510..5ceb423 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -38,7 +38,6 @@
 #include <linux/rcupdate.h>
 #include <linux/notifier.h>
 
-extern void show_meminfo(void);
 static uint32_t lowmem_debug_level = 2;
 static int lowmem_adj[6] = {
 	0,
@@ -234,7 +233,6 @@
 		lowmem_deathpending_timeout = jiffies + HZ;
 		if (selected_oom_adj < 7)
 		{
-			show_meminfo();
 			dump_tasks();
 		}
 		send_sig(SIGKILL, selected, 0);
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 5204851..af16884 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -361,16 +361,6 @@
 	  If you have a Pandora console, say Y to enable the
 	  backlight driver.
 
-config BACKLIGHT_LM3530
-	tristate "select lm3530 device as LGE backlight management"
-	help
-	  Say Y to enable the backlight driver for TI LM3530.
-
-config BACKLIGHT_LM3533
-	tristate "select lm3533 device as LGE backlight management"
-	help
-	  Say Y to enable the backlight driver for TI LM3533.
-
 endif # BACKLIGHT_CLASS_DEVICE
 
 endif # BACKLIGHT_LCD_SUPPORT
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index c65121a..36855ae 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -42,5 +42,3 @@
 obj-$(CONFIG_BACKLIGHT_PCF50633)	+= pcf50633-backlight.o
 obj-$(CONFIG_BACKLIGHT_AAT2870) += aat2870_bl.o
 obj-$(CONFIG_BACKLIGHT_OT200) += ot200_bl.o
-obj-$(CONFIG_BACKLIGHT_LM3530) += lm3530_bl.o
-obj-$(CONFIG_BACKLIGHT_LM3533) += lm3533.o
diff --git a/drivers/video/backlight/lm3530_bl.c b/drivers/video/backlight/lm3530_bl.c
deleted file mode 100644
index c87fec0..0000000
--- a/drivers/video/backlight/lm3530_bl.c
+++ /dev/null
@@ -1,435 +0,0 @@
-/*
- *  LM3530 backlight device driver
- *
- *  Copyright (C) 2011-2012, LG Eletronics,Inc. All rights reservced.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/lm35xx_bl.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/backlight.h>
-#include <linux/fb.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/mutex.h>
-
-#define I2C_BL_NAME             "lm3530"
-
-#define BL_ON                   1
-#define BL_OFF                  0
-
-static DEFINE_MUTEX(backlight_mtx);
-
-static struct i2c_client *lm3530_i2c_client;
-
-struct lm3530_device {
-	struct i2c_client *client;
-	struct backlight_device *bl_dev;
-	int gpio;
-	int max_current;
-	int min_brightness;
-	int max_brightness;
-	int default_brightness;
-	char *blmap;
-	int blmap_size;
-};
-
-static const struct i2c_device_id lm3530_bl_id[] = {
-	{ I2C_BL_NAME, 0 },
-	{ },
-};
-
-static int lm3530_write_reg(struct i2c_client *client,
-		unsigned char reg, unsigned char val);
-
-static int cur_main_lcd_level;
-static int saved_main_lcd_level;
-static int backlight_status = BL_ON;
-
-static void lm3530_hw_reset(struct i2c_client *client)
-{
-	struct lm3530_device *dev = i2c_get_clientdata(client);
-	int gpio = dev->gpio;
-
-	if (gpio_is_valid(gpio)) {
-		gpio_direction_output(gpio, 1);
-		gpio_set_value_cansleep(gpio, 1);
-		mdelay(1);
-	}
-}
-
-static int lm3530_write_reg(struct i2c_client *client,
-		unsigned char reg, unsigned char val)
-{
-	u8 buf[2];
-	struct i2c_msg msg = {
-		client->addr, 0, 2, buf
-	};
-
-	buf[0] = reg;
-	buf[1] = val;
-
-	if (i2c_transfer(client->adapter, &msg, 1) < 0)
-		dev_err(&client->dev, "i2c write error\n");
-
-	return 0;
-}
-
-static void lm3530_set_main_current_level(struct i2c_client *client, int level)
-{
-	struct lm3530_device *dev = i2c_get_clientdata(client);
-	int cal_value = 0;
-	int min_brightness = dev->min_brightness;
-	int max_brightness = dev->max_brightness;
-
-	dev->bl_dev->props.brightness = cur_main_lcd_level = level;
-
-	if (level != 0) {
-		if (level > 0 && level <= min_brightness)
-			cal_value = min_brightness;
-		else if (level > min_brightness && level <= max_brightness)
-			cal_value = level;
-		else if (level > max_brightness)
-			cal_value = max_brightness;
-
-		if (dev->blmap) {
-			if (cal_value < dev->blmap_size)
-				lm3530_write_reg(client, 0xA0,
-						dev->blmap[cal_value]);
-			else
-				dev_warn(&client->dev, "invalid index %d:%d\n",
-						dev->blmap_size,
-						cal_value);
-		} else {
-			lm3530_write_reg(client, 0xA0, cal_value);
-		}
-	} else
-		lm3530_write_reg(client, 0x10, 0x00);
-
-	mdelay(1);
-}
-
-static bool first_boot = true;
-static void lm3530_backlight_on(struct i2c_client *client, int level)
-{
-	struct lm3530_device *dev = i2c_get_clientdata(client);
-
-	mutex_lock(&backlight_mtx);
-	if (backlight_status == BL_OFF) {
-		pr_info("%s, ++ lm3530_backlight_on  \n",__func__);
-		lm3530_hw_reset(client);
-
-		lm3530_write_reg(dev->client, 0xA0, 0x00);
-		lm3530_write_reg(dev->client, 0x10, dev->max_current);
-	}
-
-	if (first_boot) {
-		lm3530_write_reg(dev->client, 0x10, dev->max_current);
-		first_boot = false;
-	}
-
-	lm3530_set_main_current_level(dev->client, level);
-	backlight_status = BL_ON;
-	mutex_unlock(&backlight_mtx);
-}
-
-static void lm3530_backlight_off(struct i2c_client *client)
-{
-	struct lm3530_device *dev = i2c_get_clientdata(client);
-	int gpio = dev->gpio;
-
-	pr_info("%s, on: %d\n", __func__, backlight_status);
-
-	mutex_lock(&backlight_mtx);
-	if (backlight_status == BL_OFF) {
-		mutex_unlock(&backlight_mtx);
-		return;
-	}
-
-	saved_main_lcd_level = cur_main_lcd_level;
-	lm3530_set_main_current_level(dev->client, 0);
-	backlight_status = BL_OFF;
-
-	gpio_tlmm_config(GPIO_CFG(gpio, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL,
-			GPIO_CFG_2MA), GPIO_CFG_ENABLE);
-	gpio_direction_output(gpio, 0);
-	msleep(6);
-	mutex_unlock(&backlight_mtx);
-}
-
-void lm3530_lcd_backlight_set_level(int level)
-{
-	struct i2c_client *client = lm3530_i2c_client;
-	struct lm3530_device *dev = i2c_get_clientdata(client);
-
-	if (!client) {
-		pr_warn("%s: not yet enabled\n", __func__);
-		return;
-	}
-
-	if (level > dev->max_brightness)
-		level = dev->max_brightness;
-
-	pr_debug("%s: level %d\n", __func__, level);
-	if (level)
-		lm3530_backlight_on(client, level);
-	else
-		lm3530_backlight_off(client);
-}
-EXPORT_SYMBOL(lm3530_lcd_backlight_set_level);
-
-void lm3530_lcd_backlight_pwm_disable(void)
-{
-	struct i2c_client *client = lm3530_i2c_client;
-	struct lm3530_device *dev = i2c_get_clientdata(client);
-
-	if (backlight_status == BL_OFF)
-		return;
-
-	lm3530_write_reg(client, 0x10, dev->max_current & 0x1F);
-}
-EXPORT_SYMBOL(lm3530_lcd_backlight_pwm_disable);
-
-int lm3530_lcd_backlight_on_status(void)
-{
-	return backlight_status;
-}
-EXPORT_SYMBOL(lm3530_lcd_backlight_on_status);
-
-static int bl_set_intensity(struct backlight_device *bd)
-{
-	struct i2c_client *client = lm3530_i2c_client;
-	struct lm3530_device *dev = i2c_get_clientdata(client);
-	int brightness = bd->props.brightness;
-
-	if ((bd->props.state & BL_CORE_FBBLANK) ||
-			(bd->props.state & BL_CORE_SUSPENDED))
-		brightness = 0;
-	else if (brightness == 0)
-		brightness = dev->default_brightness;
-
-	lm3530_lcd_backlight_set_level(brightness);
-	return 0;
-}
-
-static int bl_get_intensity(struct backlight_device *bd)
-{
-	return cur_main_lcd_level;
-}
-
-static ssize_t lcd_backlight_show_level(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	return snprintf(buf, PAGE_SIZE, "LCD Backlight Level is : %d\n",
-			cur_main_lcd_level);
-}
-
-static ssize_t lcd_backlight_store_level(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
-{
-	int level;
-
-	if (!count)
-		return -EINVAL;
-
-	level = simple_strtoul(buf, NULL, 10);
-	lm3530_lcd_backlight_set_level(level);
-
-	return count;
-}
-
-static int lm3530_bl_resume(struct i2c_client *client)
-{
-	lm3530_backlight_on(client, saved_main_lcd_level);
-	return 0;
-}
-
-static int lm3530_bl_suspend(struct i2c_client *client, pm_message_t state)
-{
-	pr_info("%s: new state: %d\n", __func__, state.event);
-
-	lm3530_backlight_off(client);
-
-	return 0;
-}
-
-static ssize_t lcd_backlight_show_on_off(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	pr_info("%s received (prev backlight_status: %s)\n", __func__,
-			backlight_status ? "ON" : "OFF");
-	return 0;
-}
-
-static ssize_t lcd_backlight_store_on_off(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
-{
-	int on_off;
-	struct i2c_client *client = to_i2c_client(dev);
-
-	if (!count)
-		return -EINVAL;
-
-	pr_info("%s received (prev backlight_status: %s)\n", __func__,
-			backlight_status ? "ON" : "OFF");
-
-	on_off = simple_strtoul(buf, NULL, 10);
-
-	pr_info("%d", on_off);
-
-	if (on_off == 1)
-		lm3530_bl_resume(client);
-	else if (on_off == 0)
-		lm3530_bl_suspend(client, PMSG_SUSPEND);
-
-	return count;
-
-}
-DEVICE_ATTR(lm3530_level, 0644, lcd_backlight_show_level,
-		lcd_backlight_store_level);
-DEVICE_ATTR(lm3530_backlight_on_off, 0644, lcd_backlight_show_on_off,
-		lcd_backlight_store_on_off);
-
-static struct backlight_ops lm3530_bl_ops = {
-	.update_status = bl_set_intensity,
-	.get_brightness = bl_get_intensity,
-};
-
-static int __devinit lm3530_probe(struct i2c_client *i2c_dev,
-		const struct i2c_device_id *id)
-{
-	struct backlight_platform_data *pdata;
-	struct lm3530_device *dev;
-	struct backlight_device *bl_dev;
-	struct backlight_properties props;
-	int err = 0;
-
-	pdata = i2c_dev->dev.platform_data;
-	if (!pdata)
-		return -ENODEV;
-
-	dev = kzalloc(sizeof(struct lm3530_device), GFP_KERNEL);
-	if (!dev) {
-		dev_err(&i2c_dev->dev, "out of memory\n");
-		return -ENOMEM;
-	}
-
-	memset(&props, 0, sizeof(struct backlight_properties));
-	props.type = BACKLIGHT_RAW;
-	props.max_brightness = pdata->max_brightness;
-
-	bl_dev = backlight_device_register(I2C_BL_NAME, &i2c_dev->dev, NULL,
-			&lm3530_bl_ops, &props);
-	if (IS_ERR(bl_dev)) {
-		dev_err(&i2c_dev->dev, "failed to register backlight\n");
-		err = PTR_ERR(bl_dev);
-		goto err_backlight_device_register;
-	}
-	bl_dev->props.max_brightness = pdata->max_brightness;
-	bl_dev->props.brightness = pdata->default_brightness;
-	bl_dev->props.power = FB_BLANK_UNBLANK;
-
-	dev->bl_dev = bl_dev;
-	dev->client = i2c_dev;
-	dev->gpio = pdata->gpio;
-	dev->max_current = pdata->max_current;
-	dev->min_brightness = pdata->min_brightness;
-	dev->max_brightness = pdata->max_brightness;
-	dev->default_brightness = pdata->default_brightness;
-	dev->blmap = pdata->blmap;
-	dev->blmap_size = pdata->blmap_size;
-	i2c_set_clientdata(i2c_dev, dev);
-
-	if (gpio_is_valid(dev->gpio)) {
-		err = gpio_request(dev->gpio, "lm3530 reset");
-		if (err < 0) {
-			dev_err(&i2c_dev->dev, "failed to request gpio\n");
-			goto err_gpio_request;
-		}
-	}
-
-	err = device_create_file(&i2c_dev->dev, &dev_attr_lm3530_level);
-	if (err < 0) {
-		dev_err(&i2c_dev->dev, "failed to create 1st sysfs\n");
-		goto err_device_create_file_1;
-	}
-	err = device_create_file(&i2c_dev->dev,
-			&dev_attr_lm3530_backlight_on_off);
-	if (err < 0) {
-		dev_err(&i2c_dev->dev, "failed to create 2nd sysfs\n");
-		goto err_device_create_file_2;
-	}
-
-	lm3530_i2c_client = i2c_dev;
-
-	pr_info("lm3530 probed\n");
-	return 0;
-
-err_device_create_file_2:
-	device_remove_file(&i2c_dev->dev, &dev_attr_lm3530_level);
-err_device_create_file_1:
-	if (gpio_is_valid(dev->gpio))
-		gpio_free(dev->gpio);
-err_gpio_request:
-	backlight_device_unregister(bl_dev);
-err_backlight_device_register:
-	kfree(dev);
-
-	return err;
-}
-
-static int __devexit lm3530_remove(struct i2c_client *i2c_dev)
-{
-	struct lm3530_device *dev = i2c_get_clientdata(i2c_dev);
-
-	lm3530_i2c_client = NULL;
-	device_remove_file(&i2c_dev->dev, &dev_attr_lm3530_level);
-	device_remove_file(&i2c_dev->dev, &dev_attr_lm3530_backlight_on_off);
-	i2c_set_clientdata(i2c_dev, NULL);
-
-	if (gpio_is_valid(dev->gpio))
-		gpio_free(dev->gpio);
-
-	backlight_device_unregister(dev->bl_dev);
-	kfree(dev);
-
-	return 0;
-}
-
-static struct i2c_driver main_lm3530_driver = {
-	.probe = lm3530_probe,
-	.remove = lm3530_remove,
-	.id_table = lm3530_bl_id,
-	.driver = {
-		.name = I2C_BL_NAME,
-		.owner = THIS_MODULE,
-	},
-};
-
-static int __init lcd_backlight_init(void)
-{
-	return i2c_add_driver(&main_lm3530_driver);
-}
-
-module_init(lcd_backlight_init);
-
-MODULE_DESCRIPTION("LM3530 Backlight Control");
-MODULE_AUTHOR("Jaeseong Gim <jaeseong.gim@lge.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/lm3533.c b/drivers/video/backlight/lm3533.c
deleted file mode 100644
index 006712b..0000000
--- a/drivers/video/backlight/lm3533.c
+++ /dev/null
@@ -1,552 +0,0 @@
-/*
- *  Copyright (C) 2011-2012, LG Eletronics,Inc. All rights reserved.
- *      LM3533 backlight device driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/backlight.h>
-#include <linux/fb.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <mach/board.h>
-
-#include <mach/board_lge.h>
-#include <linux/earlysuspend.h>
-
-#define MAX_BRIGHTNESS_lm3533   0xFF
-#define MAX_BRIGHTNESS_lm3528   0x7F
-#define DEFAULT_BRIGHTNESS      0xA5
-#define MIN_BRIGHTNESS          0x0F
-#define I2C_BL_NAME             "lm3533"
-
-#define DEFAULT_FTM_BRIGHTNESS  0x0F
-
-#define BL_ON                   1
-#define BL_OFF                  0
-
-static struct i2c_client *lm3533_i2c_client;
-
-struct backlight_platform_data {
-	void (*platform_init)(void);
-	int gpio;
-	unsigned int mode;
-	int max_current;
-	int init_on_boot;
-	int min_brightness;
-	int max_brightness;
-	int default_brightness;
-	int factory_brightness;
-};
-
-struct lm3533_device {
-	struct i2c_client *client;
-	struct backlight_device *bl_dev;
-	int gpio;
-	int max_current;
-	int min_brightness;
-	int max_brightness;
-	int default_brightness;
-	int factory_brightness;
-	struct mutex bl_mutex;
-};
-
-static const struct i2c_device_id lm3533_bl_id[] = {
-	{ I2C_BL_NAME, 0 },
-	{ },
-};
-
-static int lm3533_write_reg(struct i2c_client *client,
-		unsigned char reg, unsigned char val);
-
-static int cur_main_lcd_level = DEFAULT_BRIGHTNESS;
-static int saved_main_lcd_level = DEFAULT_BRIGHTNESS;
-
-static int backlight_status = BL_ON;
-static struct lm3533_device *main_lm3533_dev;
-#ifdef CONFIG_HAS_EARLYSUSPEND
-static struct early_suspend early_suspend;
-
-#if defined(CONFIG_FB_MSM_MIPI_LGIT_CMD_WVGA_INVERSE_PT_PANEL) || \
-	defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WVGA_INVERSE_PT_PANEL)
-static int is_early_suspended = false;
-static int requested_in_early_suspend_lcd_level= 0;
-#endif
-
-#endif /* CONFIG_HAS_EARLYSUSPEND */
-
-#if !defined(CONFIG_FB_MSM_MIPI_LGIT_CMD_WVGA_INVERSE_PT_PANEL) && \
-	!defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WVGA_INVERSE_PT_PANEL)
-static struct early_suspend * h;
-#endif
-
-static void lm3533_hw_reset(void)
-{
-	int gpio = main_lm3533_dev->gpio;
-
-	if (gpio_is_valid(gpio)) {
-		gpio_direction_output(gpio, 1);
-		gpio_set_value_cansleep(gpio, 1);
-		mdelay(1);
-	} else {
-		pr_err("%s: gpio is not valid !!\n", __func__);
-	}
-}
-
-static int lm3533_write_reg(struct i2c_client *client,
-		unsigned char reg, unsigned char val)
-{
-	int err;
-	u8 buf[2];
-	struct i2c_msg msg = {
-		client->addr, 0, 2, buf
-	};
-
-	buf[0] = reg;
-	buf[1] = val;
-
-	err = i2c_transfer(client->adapter, &msg, 1);
-	if (err < 0)
-		dev_err(&client->dev, "i2c write error\n");
-	return 0;
-}
-
-static int exp_min_value = 150;
-static int cal_value;
-static char mapped_value[256] = {
-	1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  1,  4,
-	10, 16, 21, 26, 31, 35, 39, 43, 47, 51, 54, 58, 61, 64, 67,
-	70, 73, 76, 78, 81, 83, 86, 88, 91, 93, 95, 97, 99, 101,103,
-	105,107,109,111,113,114,116,118,119,121,123,124,126,127,129,
-	130,132,133,134,136,137,138,140,141,142,144,145,146,147,148,
-	149,151,152,153,154,155,156,157,158,159,160,161,162,163,164,
-	165,166,167,168,169,170,171,172,173,174,174,175,176,177,178,
-	179,179,180,181,182,183,183,184,185,186,187,187,188,189,189,
-	190,191,192,192,193,194,194,195,196,196,197,198,198,199,200,
-	200,201,202,202,203,204,204,205,205,206,207,207,208,208,209,
-	210,210,211,211,212,212,213,213,214,215,215,216,216,217,217,
-	218,218,219,219,220,220,221,221,222,222,223,223,224,224,225,
-	225,226,226,227,227,228,228,229,229,230,230,230,231,231,232,
-	232,233,233,234,234,234,235,235,236,236,237,237,237,238,238,
-	239,239,240,240,240,241,241,242,242,242,243,243,243,244,244,
-	245,245,245,246,246,247,247,247,248,248,248,249,249,250,250,
-	250,251,251,251,252,252,252,253,253,253,254,254,254,255,255,
-	255
-};
-
-static void lm3533_set_main_current_level(struct i2c_client *client, int level)
-{
-	struct lm3533_device *dev;
-	dev = (struct lm3533_device *)i2c_get_clientdata(client);
-
-	if (lge_get_factory_boot() &&
-			((lge_pm_get_cable_type() == CABLE_56K) ||
-			(lge_pm_get_cable_type() == CABLE_130K) ||
-			(lge_pm_get_cable_type() == CABLE_910K))) {
-		level = dev->factory_brightness;
-	}
-
-	if (level == -1)
-		level = dev->default_brightness;
-
-	cur_main_lcd_level = level;
-	dev->bl_dev->props.brightness = cur_main_lcd_level;
-
-	mutex_lock(&main_lm3533_dev->bl_mutex);
-	if (level != 0) {
-		cal_value = mapped_value[level];
-		lm3533_write_reg(main_lm3533_dev->client, 0x40, cal_value);
-	} else {
-		lm3533_write_reg(client, 0x27, 0x00);
-	}
-	mutex_unlock(&main_lm3533_dev->bl_mutex);
-}
-
-void lm3533_backlight_on(int level)
-{
-
-#if defined(CONFIG_HAS_EARLYSUSPEND) && \
-	(defined(CONFIG_FB_MSM_MIPI_LGIT_CMD_WVGA_INVERSE_PT_PANEL) || \
-	defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WVGA_INVERSE_PT_PANEL))
-
-	if (is_early_suspended) {
-		requested_in_early_suspend_lcd_level = level;
-		return;
-	}
-#endif /* CONFIG_HAS_EARLYSUSPEND */
-	if (backlight_status == BL_OFF) {
-		lm3533_hw_reset();
-		lm3533_write_reg(main_lm3533_dev->client, 0x10, 0x0);
-#if defined(CONFIG_LGE_BACKLIGHT_CABC)
-		lm3533_write_reg(main_lm3533_dev->client, 0x14, 0x1);
-#else
-		lm3533_write_reg(main_lm3533_dev->client, 0x14, 0x0);
-#endif
-		lm3533_write_reg(main_lm3533_dev->client, 0x1A, 0x00);
-		lm3533_write_reg(main_lm3533_dev->client, 0x1F, 0x13);
-		lm3533_write_reg(main_lm3533_dev->client, 0x27, 0x1);
-#if defined(CONFIG_FB_MSM_MIPI_LGIT_CMD_WVGA_INVERSE_PT_PANEL) || \
-		defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WVGA_INVERSE_PT_PANEL)
-		lm3533_write_reg(main_lm3533_dev->client, 0x2C, 0xC);
-		lm3533_write_reg(main_lm3533_dev->client, 0x12, 0x9);
-		lm3533_write_reg(main_lm3533_dev->client, 0x13, 0x9);
-#else
-		lm3533_write_reg(main_lm3533_dev->client, 0x2C, 0xE);
-#endif
-	}
-
-	lm3533_set_main_current_level(main_lm3533_dev->client, level);
-	backlight_status = BL_ON;
-
-	return;
-}
-
-#if defined(CONFIG_FB_MSM_MIPI_LGIT_CMD_WVGA_INVERSE_PT_PANEL) || \
-	defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WVGA_INVERSE_PT_PANEL) || \
-	!defined(CONFIG_HAS_EARLYSUSPEND)
-void lm3533_backlight_off(void)
-#else
-void lm3533_backlight_off(struct early_suspend * h)
-#endif
-{
-	int gpio = main_lm3533_dev->gpio;
-
-	if (backlight_status == BL_OFF)
-		return;
-	saved_main_lcd_level = cur_main_lcd_level;
-	lm3533_set_main_current_level(main_lm3533_dev->client, 0);
-	backlight_status = BL_OFF;
-
-	gpio_direction_output(gpio, 0);
-	msleep(6);
-
-	return;
-}
-
-void lm3533_lcd_backlight_set_level(int level)
-{
-	if (level > MAX_BRIGHTNESS_lm3533)
-		level = MAX_BRIGHTNESS_lm3533;
-
-	if (lm3533_i2c_client != NULL) {
-		if (level == 0) {
-#if defined(CONFIG_FB_MSM_MIPI_LGIT_CMD_WVGA_INVERSE_PT_PANEL) || \
-	defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WVGA_INVERSE_PT_PANEL) || \
-	!defined(CONFIG_HAS_EARLYSUSPEND)
-			lm3533_backlight_off();
-#else
-			lm3533_backlight_off(h);
-#endif
-		} else {
-			lm3533_backlight_on(level);
-		}
-	} else {
-		pr_err("%s(): No client\n", __func__);
-	}
-}
-EXPORT_SYMBOL(lm3533_lcd_backlight_set_level);
-
-#if defined(CONFIG_HAS_EARLYSUSPEND) && \
-	(defined(CONFIG_FB_MSM_MIPI_LGIT_CMD_WVGA_INVERSE_PT_PANEL) || \
-	defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WVGA_INVERSE_PT_PANEL))
-
-void lm3533_early_suspend(struct early_suspend * h)
-{
-	is_early_suspended = true;
-
-	pr_info("%s[Start] backlight_status: %d\n", __func__,
-			backlight_status);
-	if (backlight_status == BL_OFF)
-		return;
-
-	lm3533_lcd_backlight_set_level(0);
-}
-
-void lm3533_late_resume(struct early_suspend * h)
-{
-	is_early_suspended = false;
-
-	pr_info("%s[Start] backlight_status: %d\n", __func__,
-			backlight_status);
-	if (backlight_status == BL_ON)
-		return;
-
-	lm3533_lcd_backlight_set_level(requested_in_early_suspend_lcd_level);
-	return;
-}
-#endif /* CONFIG_HAS_EARLYSUSPEND */
-
-static int bl_set_intensity(struct backlight_device *bd)
-{
-	struct i2c_client *client = to_i2c_client(bd->dev.parent);
-
-	lm3533_set_main_current_level(client, bd->props.brightness);
-	cur_main_lcd_level = bd->props.brightness;
-
-	return 0;
-}
-
-static int bl_get_intensity(struct backlight_device *bd)
-{
-	unsigned char val = 0;
-	val &= 0x1f;
-	return (int)val;
-}
-
-static ssize_t lcd_backlight_show_level(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	int r;
-	r = snprintf(buf, PAGE_SIZE, "LCD Backlight Level is "
-			": %d\n", cal_value);
-	return r;
-}
-
-static ssize_t lcd_backlight_store_level(struct device *dev,
-		struct device_attribute *attr,
-		const char *buf, size_t count)
-{
-	int level;
-	struct i2c_client *client = to_i2c_client(dev);
-
-	if (!count)
-		return -EINVAL;
-
-	level = simple_strtoul(buf, NULL, 10);
-
-	if (level > MAX_BRIGHTNESS_lm3533)
-		level = MAX_BRIGHTNESS_lm3533;
-
-	lm3533_set_main_current_level(client, level);
-	cur_main_lcd_level = level;
-
-	return count;
-}
-
-static int lm3533_bl_resume(struct i2c_client *client)
-{
-#if defined(CONFIG_FB_MSM_MIPI_LGIT_CMD_WVGA_INVERSE_PT_PANEL) || \
-	defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WVGA_INVERSE_PT_PANEL)
-	lm3533_lcd_backlight_set_level(saved_main_lcd_level);
-#else
-	lm3533_backlight_on(saved_main_lcd_level);
-#endif
-	return 0;
-}
-
-static int lm3533_bl_suspend(struct i2c_client *client, pm_message_t state)
-{
-	printk(KERN_INFO"%s: new state: %d\n", __func__, state.event);
-
-#if defined(CONFIG_FB_MSM_MIPI_LGIT_CMD_WVGA_INVERSE_PT_PANEL) || \
-	defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WVGA_INVERSE_PT_PANEL) || \
-	!defined(CONFIG_HAS_EARLYSUSPEND)
-	lm3533_lcd_backlight_set_level(saved_main_lcd_level);
-#else
-	lm3533_backlight_off(h);
-#endif
-	return 0;
-}
-
-static ssize_t lcd_backlight_show_on_off(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	int r = 0;
-	pr_info("%s received (prev backlight_status: %s)\n", __func__,
-			backlight_status ? "ON" : "OFF");
-	return r;
-}
-
-static ssize_t lcd_backlight_store_on_off(struct device *dev,
-		struct device_attribute *attr,
-		const char *buf, size_t count)
-{
-	int on_off;
-	struct i2c_client *client = to_i2c_client(dev);
-
-	if (!count)
-		return -EINVAL;
-
-	pr_info("%s received (prev backlight_status: %s)\n", __func__,
-			backlight_status ? "ON" : "OFF");
-
-	on_off = simple_strtoul(buf, NULL, 10);
-
-	printk(KERN_ERR "%d", on_off);
-
-	if (on_off == 1) {
-		lm3533_bl_resume(client);
-	} else if (on_off == 0)
-		lm3533_bl_suspend(client, PMSG_SUSPEND);
-
-	return count;
-}
-static ssize_t lcd_backlight_show_exp_min_value(struct device *dev,
-		struct device_attribute *attr, char *buf)
-{
-	int r;
-	r = snprintf(buf, PAGE_SIZE, "LCD Backlight : %d\n", exp_min_value);
-	return r;
-}
-
-static ssize_t lcd_backlight_store_exp_min_value(struct device *dev,
-		struct device_attribute *attr,
-		const char *buf, size_t count)
-{
-	int value;
-
-	if (!count)
-		return -EINVAL;
-
-	value = simple_strtoul(buf, NULL, 10);
-	exp_min_value = value;
-
-	return count;
-}
-
-DEVICE_ATTR(lm3533_level, 0644, lcd_backlight_show_level,
-		lcd_backlight_store_level);
-DEVICE_ATTR(lm3533_backlight_on_off, 0644, lcd_backlight_show_on_off,
-		lcd_backlight_store_on_off);
-DEVICE_ATTR(lm3533_exp_min_value, 0644, lcd_backlight_show_exp_min_value,
-		lcd_backlight_store_exp_min_value);
-
-static struct backlight_ops lm3533_bl_ops = {
-	.update_status = bl_set_intensity,
-	.get_brightness = bl_get_intensity,
-};
-
-static int lm3533_probe(struct i2c_client *i2c_dev,
-		const struct i2c_device_id *id)
-{
-	struct backlight_platform_data *pdata;
-	struct lm3533_device *dev;
-	struct backlight_device *bl_dev;
-	struct backlight_properties props;
-	int err;
-
-	pr_info("%s: i2c probe start\n", __func__);
-
-	pdata = i2c_dev->dev.platform_data;
-	lm3533_i2c_client = i2c_dev;
-
-	dev = kzalloc(sizeof(struct lm3533_device), GFP_KERNEL);
-	if (dev == NULL) {
-		dev_err(&i2c_dev->dev, "fail alloc for lm3533_device\n");
-		return 0;
-	}
-
-	pr_info("%s: gpio = %d\n", __func__,pdata->gpio);
-
-	if (pdata->gpio && gpio_request(pdata->gpio, "lm3533 reset") != 0) {
-		return -ENODEV;
-	}
-
-	main_lm3533_dev = dev;
-
-	memset(&props, 0, sizeof(struct backlight_properties));
-	props.type = BACKLIGHT_RAW;
-
-	props.max_brightness = MAX_BRIGHTNESS_lm3533;
-	bl_dev = backlight_device_register(I2C_BL_NAME, &i2c_dev->dev,
-			NULL, &lm3533_bl_ops, &props);
-	bl_dev->props.max_brightness = MAX_BRIGHTNESS_lm3533;
-	bl_dev->props.brightness = DEFAULT_BRIGHTNESS;
-	bl_dev->props.power = FB_BLANK_UNBLANK;
-
-	dev->bl_dev = bl_dev;
-	dev->client = i2c_dev;
-	dev->gpio = pdata->gpio;
-	dev->max_current = pdata->max_current;
-	dev->min_brightness = pdata->min_brightness;
-	dev->default_brightness = pdata->default_brightness;
-	dev->max_brightness = pdata->max_brightness;
-	i2c_set_clientdata(i2c_dev, dev);
-
-	if (pdata->factory_brightness <= 0)
-		dev->factory_brightness = DEFAULT_FTM_BRIGHTNESS;
-	else
-		dev->factory_brightness = pdata->factory_brightness;
-
-	mutex_init(&dev->bl_mutex);
-
-	err = device_create_file(&i2c_dev->dev,
-			&dev_attr_lm3533_level);
-	err = device_create_file(&i2c_dev->dev,
-			&dev_attr_lm3533_backlight_on_off);
-	err = device_create_file(&i2c_dev->dev,
-			&dev_attr_lm3533_exp_min_value);
-
-#ifdef CONFIG_HAS_EARLYSUSPEND
-#if defined(CONFIG_FB_MSM_MIPI_LGIT_CMD_WVGA_INVERSE_PT_PANEL) || \
-	defined(CONFIG_FB_MSM_MIPI_LGIT_VIDEO_WVGA_INVERSE_PT_PANEL)
-	early_suspend.suspend = lm3533_early_suspend;
-	early_suspend.resume = lm3533_late_resume;
-#else
-	early_suspend.suspend = lm3533_backlight_off;
-#endif
-	register_early_suspend(&early_suspend);
-#endif /* CONFIG_HAS_EARLYSUSPEND */
-	return 0;
-}
-
-static int lm3533_remove(struct i2c_client *i2c_dev)
-{
-	struct lm3533_device *dev;
-	int gpio = main_lm3533_dev->gpio;
-
-	device_remove_file(&i2c_dev->dev, &dev_attr_lm3533_level);
-	device_remove_file(&i2c_dev->dev, &dev_attr_lm3533_backlight_on_off);
-	dev = (struct lm3533_device *)i2c_get_clientdata(i2c_dev);
-	backlight_device_unregister(dev->bl_dev);
-	i2c_set_clientdata(i2c_dev, NULL);
-
-	if (gpio_is_valid(gpio))
-		gpio_free(gpio);
-	return 0;
-}
-
-static struct i2c_driver main_lm3533_driver = {
-	.probe = lm3533_probe,
-	.remove = lm3533_remove,
-	.suspend = NULL,
-	.resume = NULL,
-	.id_table = lm3533_bl_id,
-	.driver = {
-		.name = I2C_BL_NAME,
-		.owner = THIS_MODULE,
-	},
-};
-
-
-static int __init lcd_backlight_init(void)
-{
-	static int err;
-
-	err = i2c_add_driver(&main_lm3533_driver);
-
-	return err;
-}
-
-module_init(lcd_backlight_init);
-
-MODULE_DESCRIPTION("LM3533 Backlight Control");
-MODULE_AUTHOR("Jaeseong Gim <jaeseong.gim@lge.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/msm/Kconfig b/drivers/video/msm/Kconfig
index 1c16071..590723a 100644
--- a/drivers/video/msm/Kconfig
+++ b/drivers/video/msm/Kconfig
@@ -10,6 +10,8 @@
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
+	select SYNC
+	select SW_SYNC
 	---help---
 	  Support for MSM Framebuffer.
 
@@ -39,6 +41,9 @@
 	bool
 	default n
 
+config FB_MSM_MDSS_COMMON
+	bool
+
 choice
 	prompt "MDP HW version"
 	default FB_MSM_MDP22
@@ -86,6 +91,7 @@
 
 config FB_MSM_MDSS
 	bool "MDSS HW"
+	select FB_MSM_MDSS_COMMON
 	---help---
 	The Mobile Display Sub System (MDSS) driver supports devices which
 	contain MDSS hardware block.
@@ -101,6 +107,10 @@
 
 endchoice
 
+config FB_MSM_QPIC
+	bool
+	select FB_MSM_MDSS_COMMON
+
 config FB_MSM_EBI2
 	bool
 	default n
@@ -122,7 +132,7 @@
 	default n
 
 config FB_MSM_OVERLAY
-	depends on FB_MSM_MDP40 && ANDROID_PMEM
+	depends on FB_MSM_MDP40
 	bool "MDP4 overlay support"
 	default n
 
@@ -179,11 +189,6 @@
 	select FB_MSM_MIPI_DSI
 	default n
 
-config FB_MSM_MIPI_DSI_LGIT
-	bool
-	select FB_MSM_MIPI_DSI
-	default n
-
 config FB_MSM_MIPI_DSI_RENESAS
 	bool
 	select FB_MSM_MIPI_DSI
@@ -307,12 +312,12 @@
 	select FB_MSM_LCDC_PANEL
 	default n
 
-config FB_MSM_MIPI_LGIT_VIDEO_WXGA_PT
+config FB_MSM_LVDS_CHIMEI_WXGA
 	bool
-	select FB_MSM_MIPI_DSI_LGIT
+	select FB_MSM_LVDS
 	default n
 
-config FB_MSM_LVDS_CHIMEI_WXGA
+config FB_MSM_LVDS_FRC_FHD
 	bool
 	select FB_MSM_LVDS
 	default n
@@ -655,10 +660,6 @@
 	---help---
 	  Support for MDDI Sharp QVGA (240x320) and 128x128 dual panel
 
-config FB_MSM_MIPI_LGIT_VIDEO_WXGA_PT_PANEL
-	bool "MIPI LG Display WXGA PT Panel"
-	select FB_MSM_MIPI_LGIT_VIDEO_WXGA_PT
-
 config FB_MSM_MIPI_TOSHIBA_VIDEO_WVGA_PT_PANEL
 	bool "MIPI Toshiba WVGA PT Panel"
 	select FB_MSM_MIPI_TOSHIBA_VIDEO_WVGA_PT
@@ -745,14 +746,6 @@
 	  This will disable LCD panel
 endchoice
 
-config LGIT_VIDEO_WXGA_CABC
-	bool "Content Adaptive Backlight Control on LGIT WXGA Panel"
-	default y
-	depends on FB_MSM_MIPI_LGIT_VIDEO_WXGA_PT_PANEL
-	help
-	  Enable CABC(Content Apaptive Backlight Control) on LGIT Panel
-	  to reduce power consumption
-
 choice
 	prompt "Secondary LCD Panel"
 	depends on  FB_MSM_MDP31
@@ -965,6 +958,21 @@
 	---help---
 	  Support for EBI2 panel auto detect
 
+config FB_MSM_QPIC_ILI_QVGA_PANEL
+	bool "Qpic MIPI ILI QVGA Panel"
+	select FB_MSM_QPIC
+	---help---
+	  Support for MIPI ILI QVGA (240x320) panel
+	  ILI TECHNOLOGY 9341
+	  with on-chip full display RAM
+	  use parallel interface
+
+config FB_MSM_QPIC_PANEL_DETECT
+	bool "Qpic Panel Detect"
+	select FB_MSM_QPIC_ILI_QVGA_PANEL
+	---help---
+	  Support for Qpic panel auto detect
+
 if FB_MSM_MDSS
 	source "drivers/video/msm/mdss/Kconfig"
 endif
diff --git a/drivers/video/msm/Makefile b/drivers/video/msm/Makefile
index e49e2ba..67c6b48 100644
--- a/drivers/video/msm/Makefile
+++ b/drivers/video/msm/Makefile
@@ -1,4 +1,4 @@
-ifeq ($(CONFIG_FB_MSM_MDSS),y)
+ifeq ($(CONFIG_FB_MSM_MDSS_COMMON),y)
 obj-y += mdss/
 else
 obj-y := msm_fb.o
diff --git a/drivers/video/msm/external_common.c b/drivers/video/msm/external_common.c
index 578f34e..7aeca6e 100644
--- a/drivers/video/msm/external_common.c
+++ b/drivers/video/msm/external_common.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,7 +14,6 @@
 #include <linux/types.h>
 #include <linux/bitops.h>
 #include <linux/mutex.h>
-#include <linux/slimport.h>
 
 /* #define DEBUG */
 #define DEV_DBG_PREFIX "EXT_COMMON: "
@@ -245,6 +244,7 @@
 	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_720x480p240_16_9),
 	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x480i240_4_3),
 	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x480i240_16_9),
+	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1280x1024p60_5_4)
 };
 EXPORT_SYMBOL(hdmi_common_supported_video_mode_lut);
 
@@ -309,6 +309,7 @@
 	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_720x480p240_16_9),
 	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x480i240_4_3),
 	VFRMT_NOT_SUPPORTED(HDMI_VFRMT_1440x480i240_16_9),
+	HDMI_SETTINGS_1280x1024p60_5_4
 };
 EXPORT_SYMBOL(hdmi_mhl_supported_video_mode_lut);
 
@@ -833,10 +834,17 @@
 static ssize_t hdmi_common_rda_audio_data_block(struct device *dev,
 	struct device_attribute *attr, char *buf)
 {
-	int adb_size = external_common_state->adb_size;
-	int adb_count = 1;
-	ssize_t ret = sizeof(adb_count) + sizeof(adb_size) + adb_size;
-	char *data = buf;
+	int adb_size	= 0;
+	int adb_count	= 0;
+	ssize_t ret	= 0;
+	char *data	= buf;
+
+	if (!external_common_state)
+		return 0;
+
+	adb_count = 1;
+	adb_size  = external_common_state->adb_size;
+	ret       = sizeof(adb_count) + sizeof(adb_size) + adb_size;
 
 	if (ret > PAGE_SIZE) {
 		DEV_DBG("%s: Insufficient buffer size\n", __func__);
@@ -860,10 +868,17 @@
 static ssize_t hdmi_common_rda_spkr_alloc_data_block(struct device *dev,
 	struct device_attribute *attr, char *buf)
 {
-	int sadb_size = external_common_state->sadb_size;
-	int sadb_count = 1;
-	ssize_t ret = sizeof(sadb_count) + sizeof(sadb_size) + sadb_size;
-	char *data = buf;
+	int sadb_size	= 0;
+	int sadb_count	= 0;
+	ssize_t ret	= 0;
+	char *data	= buf;
+
+	if (!external_common_state)
+		return 0;
+
+	sadb_count = 1;
+	sadb_size  = external_common_state->sadb_size;
+	ret        = sizeof(sadb_count) + sizeof(sadb_size) + sadb_size;
 
 	if (ret > PAGE_SIZE) {
 		DEV_DBG("%s: Insufficient buffer size\n", __func__);
@@ -1062,6 +1077,8 @@
 	 89909, 119880, 148352, 119880, FALSE},
 	{HDMI_VFRMT_1280x720p120_16_9, 1280, 720, FALSE, 1650, 370, 750, 30,
 	 90000, 120000, 148500, 120000, FALSE},
+	{HDMI_VFRMT_1280x1024p60_5_4, 1280, 1024, FALSE, 1688, 408, 1066, 42,
+	 63981, 60020, 108000, 60000, FALSE},
 
 	/* All 1440 H Active */
 	{HDMI_VFRMT_1440x576i50_4_3, 1440, 576, TRUE,  1728, 288, 625, 24,
@@ -1452,38 +1469,6 @@
 		DEV_INFO("%s: *no mode* found\n", __func__);
 }
 
-static void limit_supported_video_format(uint32 *video_format)
-{
-	switch(sp_get_link_bw()){
-	case 0x0a:
-		if((*video_format == HDMI_VFRMT_1920x1080p60_16_9) ||
-			(*video_format == HDMI_VFRMT_2880x480p60_4_3)||
-			(*video_format == HDMI_VFRMT_2880x480p60_16_9) ||
-			(*video_format == HDMI_VFRMT_1280x720p120_16_9))
-
-			*video_format = HDMI_VFRMT_1280x720p60_16_9;
-		else if((*video_format == HDMI_VFRMT_1920x1080p50_16_9) ||
-			(*video_format == HDMI_VFRMT_2880x576p50_4_3)||
-			(*video_format == HDMI_VFRMT_2880x576p50_16_9) ||
-			(*video_format == HDMI_VFRMT_1280x720p100_16_9))
-
-			*video_format = HDMI_VFRMT_1280x720p50_16_9;
-		else if (*video_format == HDMI_VFRMT_1920x1080i100_16_9)
-			*video_format = HDMI_VFRMT_1920x1080i50_16_9;
-
-		else if (*video_format == HDMI_VFRMT_1920x1080i120_16_9)
-			*video_format = HDMI_VFRMT_1920x1080i60_16_9;
-		break;
-	case 0x06:
-		if(*video_format != HDMI_VFRMT_640x480p60_4_3)
-			*video_format = HDMI_VFRMT_640x480p60_4_3;
-		break;
-	case 0x14:
-	default:
-		break;
-	}
-}
-
 static void add_supported_video_format(
 	struct hdmi_disp_mode_list_type *disp_mode_list,
 	uint32 video_format)
@@ -1491,7 +1476,6 @@
 	const struct hdmi_disp_mode_timing_type *timing;
 	boolean supported = false;
 	boolean mhl_supported = true;
-	limit_supported_video_format(&video_format);
 
 	if (video_format >= HDMI_VFRMT_MAX)
 		return;
@@ -1502,7 +1486,7 @@
 		video_format, video_format_2string(video_format),
 		supported ? "Supported" : "Not-Supported");
 
-	if (mhl_is_connected()) {
+	if (mhl_is_enabled()) {
 		const struct hdmi_disp_mode_timing_type *mhl_timing =
 			hdmi_mhl_get_supported_mode(video_format);
 		mhl_supported = mhl_timing != NULL;
@@ -1589,7 +1573,8 @@
 	struct hdmi_disp_mode_list_type *disp_mode_list,
 	uint32 num_og_cea_blocks)
 {
-	uint8 len, offset, present_multi_3d, hdmi_vic_len, hdmi_3d_len;
+	uint8 len, offset, present_multi_3d, hdmi_vic_len;
+	int hdmi_3d_len;
 	uint16 structure_all, structure_mask;
 	const uint8 *vsd = num_og_cea_blocks ?
 		hdmi_edid_find_block(data_buf+0x80, DBC_START_OFFSET,
@@ -1713,7 +1698,7 @@
 	struct hdmi_disp_mode_list_type *disp_mode_list,
 	uint32 num_og_cea_blocks)
 {
-	uint8 i			= 0;
+	uint8 i			= 0, offset = 0, std_blk = 0;
 	uint32 video_format	= HDMI_VFRMT_640x480p60_4_3;
 	boolean has480p		= FALSE;
 	uint8 len;
@@ -1844,6 +1829,66 @@
 		}
 	}
 
+
+	/*
+	 * Check SD Timings if it contains 1280x1024@60Hz.
+	 * SD Timing can be max 8 with 2 byte in size.
+	 */
+	std_blk = 0;
+	offset  = 0;
+	while (std_blk < 8) {
+		if ((edid_blk0[0x26 + offset] == 0x81) &&
+			(edid_blk0[0x26 + offset + 1] == 0x80)) {
+			add_supported_video_format(disp_mode_list,
+					HDMI_VFRMT_1280x1024p60_5_4);
+			break;
+		} else {
+			offset += 2;
+		}
+		std_blk++;
+	}
+
+	/* check if the EDID revision is 4 (version 1.4) */
+	if (edid_blk0[0x13] == 4) {
+		uint8  start = 0x36;
+
+		i = 0;
+
+		/* Check each of 4 - 18 bytes descriptors */
+		while (i < 4) {
+			uint8  itrate   = start;
+			uint32 header_1 = 0;
+			uint8  header_2 = 0;
+
+			/*
+			 * First 5 bytes are header.
+			 * If they match 0x000000F700, it means its an
+			 * established Timing III descriptor.
+			 */
+			header_1 = edid_blk0[itrate++];
+			header_1 = header_1 << 8 | edid_blk0[itrate++];
+			header_1 = header_1 << 8 | edid_blk0[itrate++];
+			header_1 = header_1 << 8 | edid_blk0[itrate++];
+			header_2 = edid_blk0[itrate];
+
+			if (header_1 == 0x000000F7 &&
+			    header_2 == 0x00) {
+				itrate++; /* VESA DMT Standard Version (0x0A)*/
+				itrate++; /* First set of supported formats */
+				itrate++; /* Second set of supported formats */
+				/* BIT(1) indicates 1280x1024@60Hz */
+				if (edid_blk0[itrate] & 0x02) {
+					add_supported_video_format(
+						disp_mode_list,
+						HDMI_VFRMT_1280x1024p60_5_4);
+					break;
+				}
+			}
+			i++;
+			start += 0x12;
+		}
+	}
+
 	/* mandaroty 3d format */
 	if (external_common_state->present_3d) {
 		if (has60hz_mode) {
@@ -1955,13 +2000,13 @@
 	memset(&external_common_state->disp_mode_list, 0,
 		sizeof(external_common_state->disp_mode_list));
 	memset(edid_buf, 0, sizeof(edid_buf));
+	external_common_state->default_res_supported = false;
 	memset(external_common_state->audio_data_block, 0,
 		sizeof(external_common_state->audio_data_block));
 	memset(external_common_state->spkr_alloc_data_block, 0,
 		sizeof(external_common_state->spkr_alloc_data_block));
 	external_common_state->adb_size = 0;
 	external_common_state->sadb_size = 0;
-	external_common_state->default_res_supported = false;
 
 	status = hdmi_common_read_edid_block(0, edid_buf);
 	if (status || !check_edid_header(edid_buf)) {
@@ -2067,7 +2112,6 @@
 	external_common_state->disp_mode_list.num_of_elements = 1;
 	external_common_state->disp_mode_list.disp_mode_list[0] =
 		external_common_state->video_resolution;
-	external_common_state->default_res_supported = true;
 	return status;
 }
 EXPORT_SYMBOL(hdmi_common_read_edid);
@@ -2077,18 +2121,15 @@
 	uint32 format =  external_common_state->video_resolution;
 	struct fb_var_screeninfo *var = &mfd->fbi->var;
 	bool changed = TRUE;
-	uint32_t userformat = 0;
-	userformat = var->reserved[3] >> 16;
 
-	if (userformat) {
-		format = userformat-1;
+	if (var->reserved[3]) {
+		format = var->reserved[3]-1;
 		DEV_DBG("reserved format is %d\n", format);
 	} else if (hdmi_prim_resolution) {
 		format = hdmi_prim_resolution - 1;
 	} else {
-		DEV_DBG("detecting resolution from %dx%d use top 2 bytes of"
-			" var->reserved[3] to specify mode", mfd->var_xres,
-			mfd->var_yres);
+		DEV_DBG("detecting resolution from %dx%d use var->reserved[3]"
+			" to specify mode", mfd->var_xres, mfd->var_yres);
 		switch (mfd->var_xres) {
 		default:
 		case  640:
@@ -2100,7 +2141,9 @@
 				: HDMI_VFRMT_720x576p50_16_9;
 			break;
 		case 1280:
-			if (mfd->var_frame_rate == 50000)
+			if (mfd->var_yres == 1024)
+				format = HDMI_VFRMT_1280x1024p60_5_4;
+			else if (mfd->var_frame_rate == 50000)
 				format = HDMI_VFRMT_1280x720p50_16_9;
 			else
 				format = HDMI_VFRMT_1280x720p60_16_9;
diff --git a/drivers/video/msm/external_common.h b/drivers/video/msm/external_common.h
index 318c95e..d117898 100644
--- a/drivers/video/msm/external_common.h
+++ b/drivers/video/msm/external_common.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -109,9 +109,19 @@
 #define HDMI_VFRMT_1440x480i240_4_3	HDMI_VFRMT_720x480i240_4_3
 #define HDMI_VFRMT_720x480i240_16_9	58
 #define HDMI_VFRMT_1440x480i240_16_9	HDMI_VFRMT_720x480i240_16_9
-#define HDMI_VFRMT_MAX			59
 #define HDMI_VFRMT_FORCE_32BIT		0x7FFFFFFF
 
+/* Video Identification Codes from 65-127 are reserved for the future */
+#define HDMI_VFRMT_END			127
+
+/* VESA DMT TIMINGS */
+/* DMT ID: 23h, STD code: (81h, 80h), also a part of Established Timing III */
+#define HDMI_VFRMT_1280x1024p60_5_4	(HDMI_VFRMT_END + 1)
+#define DMT_VFRMT_END                   HDMI_VFRMT_1280x1024p60_5_4
+
+#define HDMI_VFRMT_MAX	                (DMT_VFRMT_END + 1)
+
+
 extern int ext_resolution;
 
 struct hdmi_disp_mode_timing_type {
@@ -185,6 +195,9 @@
 #define HDMI_SETTINGS_1920x1080p30_16_9					\
 	{HDMI_VFRMT_1920x1080p30_16_9,   1920,  88,   44,  148,  FALSE,	\
 	 1080, 4, 5, 36, FALSE, 74250, 30000, FALSE, TRUE}
+#define HDMI_SETTINGS_1280x1024p60_5_4					\
+	{HDMI_VFRMT_1280x1024p60_5_4,   1280,  48,  112,  248,  FALSE, \
+	 1024, 1, 3, 38, FALSE, 108000, 60000, FALSE, TRUE}
 
 /* A lookup table for all the supported display modes by the HDMI
  * hardware and driver.  Use HDMI_SETUP_LUT in the module init to
diff --git a/drivers/video/msm/hdmi_msm.c b/drivers/video/msm/hdmi_msm.c
index 3e7a431..fa57992 100644
--- a/drivers/video/msm/hdmi_msm.c
+++ b/drivers/video/msm/hdmi_msm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -22,7 +22,6 @@
 #include <linux/bitops.h>
 #include <linux/clk.h>
 #include <linux/mutex.h>
-#include <linux/slimport.h>
 #include <mach/msm_hdmi_audio.h>
 #include <mach/clk.h>
 #include <mach/msm_iomap.h>
@@ -58,8 +57,8 @@
 #define HDCP_DDC_CTRL_1		0x0124
 #define HDMI_DDC_CTRL		0x020C
 
-#define HPD_DISCONNECT_POLARITY	0
-#define HPD_CONNECT_POLARITY	1
+#define HPD_EVENT_OFFLINE 0
+#define HPD_EVENT_ONLINE  1
 
 #define SWITCH_SET_HDMI_AUDIO(d, force) \
 	do {\
@@ -648,6 +647,7 @@
 	HDMI_SETUP_LUT(1920x1080p24_16_9);
 	HDMI_SETUP_LUT(1920x1080p25_16_9);
 	HDMI_SETUP_LUT(1920x1080p30_16_9);
+	HDMI_SETUP_LUT(1280x1024p60_5_4);
 }
 
 #ifdef PORT_DEBUG
@@ -771,6 +771,14 @@
 static int hdmi_msm_read_edid(void);
 static void hdmi_msm_hpd_off(void);
 
+static bool hdmi_ready(void)
+{
+	return MSM_HDMI_BASE &&
+			hdmi_msm_state &&
+				hdmi_msm_state->hdmi_app_clk &&
+					hdmi_msm_state->hpd_initialized;
+}
+
 static void hdmi_msm_send_event(boolean on)
 {
 	char *envp[2];
@@ -807,12 +815,14 @@
 		kobject_uevent(external_common_state->uevent_kobj,
 			KOBJ_OFFLINE);
 	}
+
+	if (!completion_done(&hdmi_msm_state->hpd_event_processed))
+		complete(&hdmi_msm_state->hpd_event_processed);
 }
 
 static void hdmi_msm_hpd_state_work(struct work_struct *work)
 {
-	if (!hdmi_msm_state || !hdmi_msm_state->hpd_initialized ||
-		!MSM_HDMI_BASE) {
+	if (!hdmi_ready()) {
 		DEV_ERR("hdmi: %s: ignored, probe failed\n", __func__);
 		return;
 	}
@@ -1002,8 +1012,7 @@
 	static uint32 sample_drop_int_occurred;
 	const uint32 occurrence_limit = 5;
 
-	if (!hdmi_msm_state || !hdmi_msm_state->hpd_initialized ||
-		!MSM_HDMI_BASE) {
+	if (!hdmi_ready()) {
 		DEV_DBG("ISR ignored, probe failed\n");
 		return IRQ_HANDLED;
 	}
@@ -2073,10 +2082,6 @@
 	}
 
 	external_common_state->read_edid_block = hdmi_msm_read_edid_block;
-#ifdef CONFIG_SLIMPORT_ANX7808
-	external_common_state->read_edid_block = slimport_read_edid_block;
-#endif
-
 	status = hdmi_common_read_edid();
 	if (!status)
 		DEV_DBG("EDID: successfully read\n");
@@ -3614,36 +3619,35 @@
 	return 0;
 }
 
-
-static uint8 hdmi_msm_avi_iframe_lut[][16] = {
+static uint8 hdmi_msm_avi_iframe_lut[][17] = {
 /*	480p60	480i60	576p50	576i50	720p60	 720p50	1080p60	1080i60	1080p50
 	1080i50	1080p24	1080p30	1080p25	640x480p 480p60_16_9 576p50_4_3 */
 	{0x10,	0x10,	0x10,	0x10,	0x10,	 0x10,	0x10,	0x10,	0x10,
-	 0x10,	0x10,	0x10,	0x10,	0x10, 0x10, 0x10}, /*00*/
+	 0x10,	0x10,	0x10,	0x10,	0x10, 0x10, 0x10, 0x10}, /*00*/
 	{0x18,	0x18,	0x28,	0x28,	0x28,	 0x28,	0x28,	0x28,	0x28,
-	 0x28,	0x28,	0x28,	0x28,	0x18, 0x28, 0x18}, /*01*/
+	 0x28,	0x28,	0x28,	0x28,	0x18, 0x28, 0x18, 0x08}, /*01*/
 	{0x00,	0x04,	0x04,	0x04,	0x04,	 0x04,	0x04,	0x04,	0x04,
-	 0x04,	0x04,	0x04,	0x04,	0x88, 0x00, 0x04}, /*02*/
+	 0x04,	0x04,	0x04,	0x04,	0x88, 0x00, 0x04, 0x04}, /*02*/
 	{0x02,	0x06,	0x11,	0x15,	0x04,	 0x13,	0x10,	0x05,	0x1F,
-	 0x14,	0x20,	0x22,	0x21,	0x01, 0x03, 0x11}, /*03*/
+	 0x14,	0x20,	0x22,	0x21,	0x01, 0x03, 0x11, 0x00}, /*03*/
 	{0x00,	0x01,	0x00,	0x01,	0x00,	 0x00,	0x00,	0x00,	0x00,
-	 0x00,	0x00,	0x00,	0x00,	0x00, 0x00, 0x00}, /*04*/
+	 0x00,	0x00,	0x00,	0x00,	0x00, 0x00, 0x00, 0x00}, /*04*/
 	{0x00,	0x00,	0x00,	0x00,	0x00,	 0x00,	0x00,	0x00,	0x00,
-	 0x00,	0x00,	0x00,	0x00,	0x00, 0x00, 0x00}, /*05*/
+	 0x00,	0x00,	0x00,	0x00,	0x00, 0x00, 0x00, 0x00}, /*05*/
 	{0x00,	0x00,	0x00,	0x00,	0x00,	 0x00,	0x00,	0x00,	0x00,
-	 0x00,	0x00,	0x00,	0x00,	0x00, 0x00, 0x00}, /*06*/
+	 0x00,	0x00,	0x00,	0x00,	0x00, 0x00, 0x00, 0x00}, /*06*/
 	{0xE1,	0xE1,	0x41,	0x41,	0xD1,	 0xd1,	0x39,	0x39,	0x39,
-	 0x39,	0x39,	0x39,	0x39,	0xe1, 0xE1, 0x41}, /*07*/
+	 0x39,	0x39,	0x39,	0x39,	0xe1, 0xE1, 0x41, 0x01}, /*07*/
 	{0x01,	0x01,	0x02,	0x02,	0x02,	 0x02,	0x04,	0x04,	0x04,
-	 0x04,	0x04,	0x04,	0x04,	0x01, 0x01, 0x02}, /*08*/
+	 0x04,	0x04,	0x04,	0x04,	0x01, 0x01, 0x02, 0x04}, /*08*/
 	{0x00,	0x00,	0x00,	0x00,	0x00,	 0x00,	0x00,	0x00,	0x00,
-	 0x00,	0x00,	0x00,	0x00,	0x00, 0x00, 0x00}, /*09*/
+	 0x00,	0x00,	0x00,	0x00,	0x00, 0x00, 0x00, 0x00}, /*09*/
 	{0x00,	0x00,	0x00,	0x00,	0x00,	 0x00,	0x00,	0x00,	0x00,
-	 0x00,	0x00,	0x00,	0x00,	0x00, 0x00, 0x00}, /*10*/
+	 0x00,	0x00,	0x00,	0x00,	0x00, 0x00, 0x00, 0x00}, /*10*/
 	{0xD1,	0xD1,	0xD1,	0xD1,	0x01,	 0x01,	0x81,	0x81,	0x81,
-	 0x81,	0x81,	0x81,	0x81,	0x81, 0xD1, 0xD1}, /*11*/
+	 0x81,	0x81,	0x81,	0x81,	0x81, 0xD1, 0xD1, 0x01}, /*11*/
 	{0x02,	0x02,	0x02,	0x02,	0x05,	 0x05,	0x07,	0x07,	0x07,
-	 0x07,	0x07,	0x07,	0x07,	0x02, 0x02, 0x02}  /*12*/
+	 0x07,	0x07,	0x07,	0x07,	0x02, 0x02, 0x02, 0x05}  /*12*/
 };
 
 static void hdmi_msm_avi_info_frame(void)
@@ -3706,6 +3710,9 @@
 	case HDMI_VFRMT_720x576p50_4_3:
 		mode = 15;
 		break;
+	case HDMI_VFRMT_1280x1024p60_5_4:
+		mode = 16;
+		break;
 	default:
 		DEV_INFO("%s: mode %d not supported\n", __func__,
 			external_common_state->video_resolution);
@@ -4172,19 +4179,28 @@
 }
 #endif
 
-static void hdmi_msm_hpd_polarity_setup(bool polarity, bool trigger)
+static void hdmi_msm_hpd_polarity_setup(void)
 {
 	u32 cable_sense;
+	bool polarity = !external_common_state->hpd_state;
+	bool trigger = false;
+
 	if (polarity)
 		HDMI_OUTP(0x0254, BIT(2) | BIT(1));
 	else
 		HDMI_OUTP(0x0254, BIT(2));
 
 	cable_sense = (HDMI_INP(0x0250) & BIT(1)) >> 1;
-	DEV_DBG("%s: listen=%s, sense=%s\n", __func__,
+
+	if (cable_sense == polarity)
+		trigger = true;
+
+	DEV_DBG("%s: listen=%s, sense=%s, trigger=%s\n", __func__,
 		polarity ? "connect" : "disconnect",
-		cable_sense ? "connect" : "disconnect");
-	if (trigger && (cable_sense == polarity)) {
+		cable_sense ? "connect" : "disconnect",
+		trigger ? "Yes" : "No");
+
+	if (trigger) {
 		u32 reg_val = HDMI_INP(0x0258);
 
 		/* Toggle HPD circuit to trigger HPD sense */
@@ -4285,8 +4301,8 @@
 		/* Turn on HPD HW circuit */
 		HDMI_OUTP(0x0258, hpd_ctrl | BIT(28));
 
-		/* Set up HPD_CTRL to sense HPD event */
-		hdmi_msm_hpd_polarity_setup(HPD_CONNECT_POLARITY, true);
+		/* Set HPD cable sense polarity */
+		hdmi_msm_hpd_polarity_setup();
 	}
 
 	DEV_DBG("%s: (IRQ, 5V on)\n", __func__);
@@ -4302,7 +4318,8 @@
 
 static int hdmi_msm_power_ctrl(boolean enable)
 {
-	int rc = 0;
+	int rc   = 0;
+	int time = 0;
 
 	if (enable) {
 		/*
@@ -4312,7 +4329,22 @@
 		if (hdmi_prim_display ||
 			external_common_state->hpd_feature_on) {
 			DEV_DBG("%s: Turning HPD ciruitry on\n", __func__);
+
 			rc = hdmi_msm_hpd_on();
+			if (rc) {
+				DEV_ERR("%s: HPD ON FAILED\n", __func__);
+				return rc;
+			}
+
+			/* Wait for HPD initialization to complete */
+			INIT_COMPLETION(hdmi_msm_state->hpd_event_processed);
+			time = wait_for_completion_interruptible_timeout(
+				&hdmi_msm_state->hpd_event_processed, HZ);
+			if (!time && !external_common_state->hpd_state) {
+				DEV_DBG("%s: cable not detected\n", __func__);
+				queue_work(hdmi_work_queue,
+				    &hdmi_msm_state->hpd_state_work);
+			}
 		}
 	} else {
 		DEV_DBG("%s: Turning HPD ciruitry off\n", __func__);
@@ -4325,32 +4357,33 @@
 static int hdmi_msm_power_on(struct platform_device *pdev)
 {
 	struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+	int ret = 0;
 	bool changed;
 
-	if (!hdmi_msm_state || !hdmi_msm_state->hdmi_app_clk || !MSM_HDMI_BASE)
-		return -ENODEV;
-
-	if (!hdmi_msm_state->hpd_initialized ||
-		!external_common_state->hpd_state) {
-		DEV_DBG("%s: HPD not initialized/cable not conn. Returning\n",
-				__func__);
-		return 0;
+	if (!hdmi_ready()) {
+		DEV_ERR("%s: HDMI/HPD not initialized\n", __func__);
+		return ret;
 	}
 
-	DEV_INFO("power: ON (%dx%d %d)\n", mfd->var_xres, mfd->var_yres,
-		mfd->var_pixclock);
+	if (!external_common_state->hpd_state) {
+		DEV_DBG("%s:HDMI cable not connected\n", __func__);
+		goto error;
+	}
 
 	/* Only start transmission with supported resolution */
 	changed = hdmi_common_get_video_format_from_drv_data(mfd);
 	if (changed || external_common_state->default_res_supported) {
-		hdmi_msm_audio_info_setup(TRUE, 0, 0, 0, FALSE);
 		mutex_lock(&external_common_state_hpd_mutex);
-		hdmi_msm_state->panel_power_on = TRUE;
 		if (external_common_state->hpd_state &&
 				hdmi_msm_is_power_on()) {
-			DEV_DBG("%s: Turning HDMI on\n", __func__);
 			mutex_unlock(&external_common_state_hpd_mutex);
+
+			DEV_INFO("HDMI cable connected %s(%dx%d, %d)\n",
+				__func__, mfd->var_xres, mfd->var_yres,
+				mfd->var_pixclock);
+
 			hdmi_msm_turn_on();
+			hdmi_msm_state->panel_power_on = TRUE;
 
 			if (hdmi_msm_state->hdcp_enable) {
 				/* Kick off HDCP Authentication */
@@ -4375,10 +4408,11 @@
 				external_common_state->video_resolution);
 	}
 
-	/* Enable HPD interrupt and listen to disconnect interrupts */
-	hdmi_msm_hpd_polarity_setup(HPD_DISCONNECT_POLARITY,
-			external_common_state->hpd_state);
-	return 0;
+error:
+	/* Set HPD cable sense polarity */
+	hdmi_msm_hpd_polarity_setup();
+
+	return ret;
 }
 
 void mhl_connect_api(boolean on)
@@ -4427,12 +4461,16 @@
  */
 static int hdmi_msm_power_off(struct platform_device *pdev)
 {
-	if (!hdmi_msm_state->hdmi_app_clk)
-		return -ENODEV;
+	int ret = 0;
+
+	if (!hdmi_ready()) {
+		DEV_ERR("%s: HDMI/HPD not initialized\n", __func__);
+		return ret;
+	}
 
 	if (!hdmi_msm_state->panel_power_on) {
-		DEV_DBG("%s: panel not on. returning\n", __func__);
-		return 0;
+		DEV_DBG("%s: panel not ON\n", __func__);
+		goto error;
 	}
 
 	if (hdmi_msm_state->hdcp_enable) {
@@ -4454,6 +4492,7 @@
 		cancel_work_sync(&hdmi_msm_state->hdcp_reauth_work);
 		cancel_work_sync(&hdmi_msm_state->hdcp_work);
 		del_timer_sync(&hdmi_msm_state->hdcp_timer);
+		hdmi_msm_state->reauth = FALSE;
 
 		hdcp_deauthenticate();
 	}
@@ -4468,11 +4507,18 @@
 	hdmi_msm_state->panel_power_on = FALSE;
 	DEV_INFO("power: OFF (audio off)\n");
 
-	/* Enable HPD interrupt and listen to connect interrupts */
-	hdmi_msm_hpd_polarity_setup(HPD_CONNECT_POLARITY,
-				!external_common_state->hpd_state);
+	if (!completion_done(&hdmi_msm_state->hpd_event_processed))
+		complete(&hdmi_msm_state->hpd_event_processed);
+error:
+	/* Set HPD cable sense polarity */
+	hdmi_msm_hpd_polarity_setup();
 
-	return 0;
+	return ret;
+}
+
+bool mhl_is_enabled(void)
+{
+	return hdmi_msm_state->is_mhl_enabled;
 }
 
 void hdmi_msm_config_hdcp_feature(void)
@@ -4669,6 +4715,12 @@
 		goto error;
 	}
 
+	/* Set the default video resolution for MHL-enabled display */
+	if (hdmi_msm_state->is_mhl_enabled) {
+		DEV_DBG("MHL Enabled. Restricting default video resolution\n");
+		external_common_state->video_resolution =
+			HDMI_VFRMT_1920x1080p30_16_9;
+	}
 	return 0;
 
 error:
@@ -4744,9 +4796,19 @@
 	if (on) {
 		rc = hdmi_msm_hpd_on();
 	} else {
-		external_common_state->hpd_state = 0;
+		if (external_common_state->hpd_state) {
+			external_common_state->hpd_state = 0;
+
+			/* Send offline event to switch OFF HDMI and HAL FD */
+			hdmi_msm_send_event(HPD_EVENT_OFFLINE);
+
+			/* Wait for HDMI and FD to close */
+			INIT_COMPLETION(hdmi_msm_state->hpd_event_processed);
+			wait_for_completion_interruptible_timeout(
+				&hdmi_msm_state->hpd_event_processed, HZ);
+		}
+
 		hdmi_msm_hpd_off();
-		SWITCH_SET_HDMI_AUDIO(0, 0);
 
 		/* Set HDMI switch node to 0 on HPD feature disable */
 		switch_set_state(&external_common_state->sdev, 0);
@@ -4842,6 +4904,7 @@
 
 	hdmi_common_init_panel_info(&hdmi_msm_panel_data.panel_info);
 	init_completion(&hdmi_msm_state->ddc_sw_done);
+	init_completion(&hdmi_msm_state->hpd_event_processed);
 	INIT_WORK(&hdmi_msm_state->hpd_state_work, hdmi_msm_hpd_state_work);
 
 #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_CEC_SUPPORT
diff --git a/drivers/video/msm/hdmi_msm.h b/drivers/video/msm/hdmi_msm.h
index a44ec20..ce01830 100644
--- a/drivers/video/msm/hdmi_msm.h
+++ b/drivers/video/msm/hdmi_msm.h
@@ -106,6 +106,7 @@
 
 	struct external_common_state_type common;
 	boolean is_mhl_enabled;
+	struct completion hpd_event_processed;
 };
 
 extern struct hdmi_msm_state_type *hdmi_msm_state;
diff --git a/drivers/video/msm/mddi_quickvx.c b/drivers/video/msm/mddi_quickvx.c
index e4b7736..a5d9ea3 100644
--- a/drivers/video/msm/mddi_quickvx.c
+++ b/drivers/video/msm/mddi_quickvx.c
@@ -263,22 +263,10 @@
 
 int ql_mddi_write(uint32 address, uint32 value)
 {
-	uint32 regval = 0;
 	int ret = 0;
 
 	ret = mddi_queue_register_write(address, value, TRUE, 0);
 
-	if (!ret) {
-		ret = mddi_queue_register_read(address, &regval, TRUE, 0);
-		if (regval != value) {
-			MDDI_MSG_DEBUG("\nMismatch: ql_mddi_write[0x%x]->0x%x "
-				"r0x%x\n", address, value, regval);
-		} else {
-			MDDI_MSG_DEBUG("\nMatch: ql_mddi_write[0x%x]->0x%x "
-				"r0x%x\n", address, value, regval);
-		}
-	}
-
 	return ret;
 }
 
@@ -294,8 +282,6 @@
 
 int ql_send_spi_cmd_to_lcd(uint32 index, uint32 cmd)
 {
-	int retry, ret;
-	uint32 readval;
 
 	MDDI_MSG_DEBUG("\n %s(): index 0x%x, cmd 0x%x", __func__, index, cmd);
 	/* do the index phase */
@@ -308,18 +294,6 @@
 
 	/* set start */
 	ql_mddi_write(QUICKVX_SPI_CTRL_REG,  QL_SPI_CTRL_LCD_START);
-	retry = 0;
-
-	do {
-		ret = ql_mddi_read(QUICKVX_SPI_CTRL_REG, &readval);
-
-		if (ret || ++retry > 5) {
-			MDDI_MSG_DEBUG("\n ql_send_spi_cmd_to_lcd: retry "
-				"timeout at index phase, ret = %d", ret);
-			return -EIO;
-		}
-		mddi_wait(1);
-	} while ((readval & QL_SPI_CTRL_MASK_rTxDone) == 0);
 
 	/* do the command phase */
 	/* send 24 bits in the cmd phase */
@@ -331,18 +305,6 @@
 
 	/* set start */
 	ql_mddi_write(QUICKVX_SPI_CTRL_REG,  QL_SPI_CTRL_LCD_START);
-	retry = 0;
-
-	do {
-		ret = ql_mddi_read(QUICKVX_SPI_CTRL_REG, &readval);
-
-		if (ret || ++retry > 5) {
-			MDDI_MSG_DEBUG("\n ql_send_spi_cmd_to_lcd: retry "
-				"timeout at cmd phase, ret = %d", ret);
-			return -EIO;
-		}
-		mddi_wait(1);
-	} while ((readval & QL_SPI_CTRL_MASK_rTxDone) == 0);
 
 	return 0;
 }
@@ -350,8 +312,6 @@
 
 int ql_send_spi_data_from_lcd(uint32 index, uint32 *value)
 {
-	int retry, ret;
-	uint32 readval;
 
 	MDDI_MSG_DEBUG("\n %s(): index 0x%x", __func__, index);
 	/* do the index phase */
@@ -364,19 +324,6 @@
 
 	/* set start */
 	ql_mddi_write(QUICKVX_SPI_CTRL_REG,  QL_SPI_CTRL_LCD_START);
-	retry = 0;
-
-	do {
-		ret = ql_mddi_read(QUICKVX_SPI_CTRL_REG, &readval);
-
-		if (ret || ++retry > 5) {
-			MDDI_MSG_DEBUG("\n ql_send_spi_cmd_to_lcd: retry "
-				"timeout at index phase, ret = %d", ret);
-			return -EIO;
-		}
-		mddi_wait(1);
-	} while ((readval & QL_SPI_CTRL_MASK_rTxDone) == 0);
-
 	/* do the command phase */
 	/* send 8 bits  and read 24 bits in the cmd phase, so total 32 bits */
 	ql_mddi_write(QUICKVX_SPI_TLEN_REG, 31);
@@ -387,29 +334,9 @@
 
 	/* set start */
 	ql_mddi_write(QUICKVX_SPI_CTRL_REG,  QL_SPI_CTRL_LCD_START);
-	retry = 0;
 
-	do {
-		ret = ql_mddi_read(QUICKVX_SPI_CTRL_REG, &readval);
+	return 0;
 
-		if (ret || ++retry > 5) {
-			MDDI_MSG_DEBUG("\n ql_send_spi_cmd_to_lcd: retry "
-				"timeout at cmd phase, ret = %d", ret);
-			return -EIO;
-		}
-		mddi_wait(1);
-	} while ((readval & QL_SPI_CTRL_MASK_rTxDone) == 0);
-
-	/* value will appear at lower 16 bits */
-	ret = ql_mddi_read(QUICKVX_SPI_RX0_REG, value);
-
-	if (!ret) {
-		*value = *value & 0xffff;
-		MDDI_MSG_DEBUG("\n QUICKVX_SPI_RX0_REG value = 0x%x", *value);
-	} else
-		MDDI_MSG_DEBUG("\n Read QUICKVX_SPI_RX0_REG Failed");
-
-	return ret;
 }
 
 /* Global Variables */
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index 9c6563e..63a842d 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -2,7 +2,7 @@
  *
  * MSM MDP Interface (used by framebuffer core)
  *
- * Copyright (c) 2007-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2007-2012, The Linux Foundation. All rights reserved.
  * Copyright (C) 2007 Google Incorporated
  *
  * This software is licensed under the terms of the GNU General Public
@@ -51,11 +51,11 @@
 static struct clk *mdp_lut_clk;
 int mdp_rev;
 int mdp_iommu_split_domain;
-u32 mdp_max_clk = 266667000;
+u32 mdp_max_clk = 200000000;
 u64 mdp_max_bw = 2000000000;
 
 static struct platform_device *mdp_init_pdev;
-static struct regulator *footswitch;
+static struct regulator *footswitch, *dsi_pll_vdda, *dsi_pll_vddio;
 static unsigned int mdp_footswitch_on;
 
 struct completion mdp_ppp_comp;
@@ -483,7 +483,6 @@
 
 DEFINE_MUTEX(mdp_lut_push_sem);
 static int mdp_lut_i;
-
 static int mdp_lut_hw_update(struct fb_cmap *cmap)
 {
 	int i;
@@ -564,43 +563,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_UPDATE_LCDC_LUT
-int mdp_preset_lut_update_lcdc(struct fb_cmap *cmap, uint32_t *internal_lut)
-{
-	uint32_t out;
-	int i;
-	u16 r, g, b;
-
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-	mdp_clk_ctrl(1);
-
-	for (i = 0; i < cmap->len; i++) {
-		r = lut2r(internal_lut[i]);
-		g = lut2g(internal_lut[i]);
-		b = lut2b(internal_lut[i]);
-#ifdef CONFIG_LCD_KCAL
-		r = scaled_by_kcal(r, *(cmap->red));
-		g = scaled_by_kcal(g, *(cmap->green));
-		b = scaled_by_kcal(b, *(cmap->blue));
-#endif
-		MDP_OUTP(MDP_BASE + 0x94800 +
-			(0x400*mdp_lut_i) + cmap->start*4 + i*4,
-				((g & 0xff) |
-				 ((b & 0xff) << 8) |
-				 ((r & 0xff) << 16)));
-	}
-
-	/*mask off non LUT select bits*/
-	out = inpdw(MDP_BASE + 0x90070) & ~((0x1 << 10) | 0x7);
-	MDP_OUTP(MDP_BASE + 0x90070, (mdp_lut_i << 10) | 0x7 | out);
-	mdp_clk_ctrl(0);
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-	mdp_lut_i = (mdp_lut_i + 1)%2;
-
-	return 0;
-}
-#endif
-
 static void mdp_lut_enable(void)
 {
 	uint32_t out;
@@ -951,10 +913,12 @@
 	int ret = 0;
 
 	mutex_lock(&mgmt->mdp_hist_mutex);
-	if (mgmt->mdp_is_hist_start && !mgmt->mdp_is_hist_data && en)
-		ret = mdp_histogram_enable(mgmt);
-	else if (mgmt->mdp_is_hist_data && !en)
-		ret = mdp_histogram_disable(mgmt);
+	if (mgmt->mdp_is_hist_start == TRUE) {
+		if (en)
+			ret = mdp_histogram_enable(mgmt);
+		else
+			ret = mdp_histogram_disable(mgmt);
+	}
 	mutex_unlock(&mgmt->mdp_hist_mutex);
 
 	if (en == false)
@@ -1052,6 +1016,7 @@
 	mgmt->mdp_is_hist_start = FALSE;
 
 	if (!mfd->panel_power_on) {
+		mgmt->mdp_is_hist_data = FALSE;
 		if (mgmt->hist != NULL) {
 			mgmt->hist = NULL;
 			complete(&mgmt->mdp_hist_comp);
@@ -1452,7 +1417,7 @@
 
 	if (frame_rate == 0) {
 		frame_rate = DEFAULT_FRAME_RATE;
-		pr_debug("%s frame rate=%d is default\n", __func__, frame_rate);
+		pr_warn("%s frame rate=%d is default\n", __func__, frame_rate);
 	}
 	pr_debug("%s frame rate=%d total_pixel=%d, pixel_rate=%d\n", __func__,
 		frame_rate, total_pixel, pixel_rate);
@@ -1727,7 +1692,9 @@
 			mdp_clk_cnt--;
 			if (mdp_clk_cnt == 0)
 				mdp_clk_disable_unprepare();
-		}
+		} else
+			pr_err("%s: %d: mdp clk off is invalid\n",
+			       __func__, __LINE__);
 	}
 	pr_debug("%s: on=%d cnt=%d\n", __func__, on, mdp_clk_cnt);
 	mutex_unlock(&mdp_suspend_mutex);
@@ -1932,7 +1899,7 @@
 	unsigned long flag;
 	struct mdp_hist_mgmt *mgmt = NULL;
 	int i, ret;
-	int vsync_isr;
+	int vsync_isr, disabled_clocks;
 	/* Ensure all the register write are complete */
 	mb();
 
@@ -1955,17 +1922,21 @@
 	if (mdp_interrupt & MDP_PRIM_RDPTR) {
 		spin_lock_irqsave(&mdp_spin_lock, flag);
 		vsync_isr = vsync_cntrl.vsync_irq_enabled;
-		if (!vsync_isr) {
+		disabled_clocks = vsync_cntrl.disabled_clocks;
+		if ((!vsync_isr && !vsync_cntrl.disabled_clocks)
+			|| (!vsync_isr && vsync_cntrl.vsync_dma_enabled)) {
 			mdp_intr_mask &= ~MDP_PRIM_RDPTR;
 			outp32(MDP_INTR_ENABLE, mdp_intr_mask);
 			mdp_disable_irq_nosync(MDP_VSYNC_TERM);
 			vsync_cntrl.disabled_clocks = 1;
-		} else {
+		} else if (vsync_isr) {
 			vsync_isr_handler();
 		}
+		vsync_cntrl.vsync_dma_enabled = 0;
 		spin_unlock_irqrestore(&mdp_spin_lock, flag);
 
-		if (!vsync_isr)
+		complete(&vsync_cntrl.vsync_comp);
+		if (!vsync_isr && !disabled_clocks)
 			mdp_pipe_ctrl(MDP_CMD_BLOCK,
 				MDP_BLOCK_POWER_OFF, TRUE);
 
@@ -2081,6 +2052,7 @@
 			spin_unlock_irqrestore(&mdp_spin_lock, flag);
 			mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_OFF,
 				TRUE);
+			mdp_disable_irq_nosync(MDP_DMA2_TERM);
 			complete(&dma->comp);
 		}
 #endif
@@ -2132,6 +2104,7 @@
 	dma2_data.dmap_busy = FALSE;
 	dma2_data.waiting = FALSE;
 	init_completion(&dma2_data.comp);
+	init_completion(&vsync_cntrl.vsync_comp);
 	init_completion(&dma2_data.dmap_comp);
 	sema_init(&dma2_data.mutex, 1);
 	mutex_init(&dma2_data.ov_mutex);
@@ -2244,12 +2217,6 @@
 	},
 };
 
-static int mdp_fps_level_change(struct platform_device *pdev, u32 fps_level)
-{
-	int ret = 0;
-	ret = panel_next_fps_level_change(pdev, fps_level);
-	return ret;
-}
 static int mdp_off(struct platform_device *pdev)
 {
 	int ret = 0;
@@ -2269,8 +2236,8 @@
 			mfd->panel.type == LCDC_PANEL ||
 			mfd->panel.type == LVDS_PANEL)
 		mdp4_lcdc_off(pdev);
-	else if (mfd->panel.type == WRITEBACK_PANEL)
-		mdp4_overlay_writeback_off(pdev);
+	else if (mfd->panel.type == MDDI_PANEL)
+		mdp4_mddi_off(pdev);
 
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 	ret = panel_next_off(pdev);
@@ -2296,9 +2263,6 @@
 }
 
 #endif
-
-static int mdp_bus_scale_restore_request(void);
-
 static int mdp_on(struct platform_device *pdev)
 {
 	int ret = 0;
@@ -2307,26 +2271,9 @@
 
 	pr_debug("%s:+\n", __func__);
 
-	if (!(mfd->cont_splash_done)) {
-		if (mfd->panel.type == MIPI_VIDEO_PANEL)
-			mdp4_dsi_video_splash_done();
-
-		/* Clks are enabled in probe.
-		Disabling clocks now */
-		mdp_clk_ctrl(0);
-		mfd->cont_splash_done = 1;
-	}
-
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
-	ret = panel_next_on(pdev);
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-
-
 	if (mdp_rev >= MDP_REV_40) {
 		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 		mdp_clk_ctrl(1);
-		mdp_bus_scale_restore_request();
 		mdp4_hw_init();
 		outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
 		if (mfd->panel.type == MIPI_CMD_PANEL) {
@@ -2338,19 +2285,27 @@
 				mfd->panel.type == LCDC_PANEL ||
 				mfd->panel.type == LVDS_PANEL) {
 			mdp4_lcdc_on(pdev);
+		} else if (mfd->panel.type == MDDI_PANEL) {
+			mdp_vsync_cfg_regs(mfd, FALSE);
+			mdp4_mddi_on(pdev);
 		}
 
 		mdp_clk_ctrl(0);
-		mdp4_overlay_reset();
 		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 	}
 
 	if (mdp_rev == MDP_REV_303 && mfd->panel.type == MIPI_CMD_PANEL) {
 
 		vsync_cntrl.dev = mfd->fbi->dev;
-		atomic_set(&vsync_cntrl.suspend, 1);
+		atomic_set(&vsync_cntrl.suspend, 0);
+		atomic_set(&vsync_cntrl.vsync_resume, 1);
 	}
 
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	ret = panel_next_on(pdev);
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
 	mdp_histogram_ctrl_all(TRUE);
 
 	if (ret == 0)
@@ -2438,21 +2393,18 @@
 		mdp_bus_usecases[i].num_paths = 1;
 		mdp_bus_usecases[i].vectors = &mdp_bus_vectors[i];
 	}
-
+	mdp_bus_scale_handle = msm_bus_scale_register_client(bus_pdata);
 	if (!mdp_bus_scale_handle) {
-		mdp_bus_scale_handle = msm_bus_scale_register_client(bus_pdata);
-		if (!mdp_bus_scale_handle) {
-			pr_err("%s: not able to get bus scale!\n", __func__);
-			return -ENOMEM;
-		}
+		pr_err("%s: not able to get bus scale!\n", __func__);
+		return -ENOMEM;
 	}
-
 	return 0;
 }
 
-static int bus_index = 1;
 int mdp_bus_scale_update_request(u64 ab, u64 ib)
 {
+	static int bus_index = 1;
+
 	if (mdp_bus_scale_handle < 1) {
 		pr_err("%s invalid bus handle\n", __func__);
 		return -EINVAL;
@@ -2478,20 +2430,6 @@
 	return msm_bus_scale_client_update_request
 		(mdp_bus_scale_handle, bus_index);
 }
-static int mdp_bus_scale_restore_request(void)
-{
-	pr_debug("%s: index=%d ab=%llu ib=%llu\n", __func__, bus_index,
-		mdp_bus_usecases[bus_index].vectors->ab,
-		mdp_bus_usecases[bus_index].vectors->ib);
-	return mdp_bus_scale_update_request
-		(mdp_bus_usecases[bus_index].vectors->ab,
-		 mdp_bus_usecases[bus_index].vectors->ib);
-}
-#else
-static int mdp_bus_scale_restore_request(void)
-{
-	return 0;
-}
 #endif
 DEFINE_MUTEX(mdp_clk_lock);
 int mdp_set_core_clk(u32 rate)
@@ -2540,18 +2478,40 @@
 	}
 	disable_irq(mdp_irq);
 
+	dsi_pll_vdda = regulator_get(&pdev->dev, "dsi_pll_vdda");
+	if (IS_ERR(dsi_pll_vdda)) {
+		dsi_pll_vdda = NULL;
+	} else {
+		if (mdp_rev == MDP_REV_42 || mdp_rev == MDP_REV_44) {
+			ret = regulator_set_voltage(dsi_pll_vdda, 1200000,
+				1200000);
+			if (ret) {
+				pr_err("set_voltage failed for dsi_pll_vdda, ret=%d\n",
+					ret);
+			}
+		}
+	}
+
+	dsi_pll_vddio = regulator_get(&pdev->dev, "dsi_pll_vddio");
+	if (IS_ERR(dsi_pll_vddio)) {
+		dsi_pll_vddio = NULL;
+	} else {
+		if (mdp_rev == MDP_REV_42) {
+			ret = regulator_set_voltage(dsi_pll_vddio, 1800000,
+				1800000);
+			if (ret) {
+				pr_err("set_voltage failed for dsi_pll_vddio, ret=%d\n",
+					ret);
+			}
+		}
+	}
+
 	footswitch = regulator_get(&pdev->dev, "vdd");
-	if (IS_ERR(footswitch))
+	if (IS_ERR(footswitch)) {
 		footswitch = NULL;
-	else {
+	} else {
 		regulator_enable(footswitch);
 		mdp_footswitch_on = 1;
-
-		if (mdp_rev == MDP_REV_42 && !cont_splashScreen) {
-			regulator_disable(footswitch);
-			msleep(20);
-			regulator_enable(footswitch);
-		}
 	}
 
 	mdp_clk = clk_get(&pdev->dev, "core_clk");
@@ -2602,6 +2562,17 @@
 
 	MSM_FB_DEBUG("mdp_clk: mdp_clk=%d\n", (int)clk_get_rate(mdp_clk));
 #endif
+
+	if (mdp_rev == MDP_REV_42 && !cont_splashScreen) {
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+		/* DSI Video Timing generator disable */
+		outpdw(MDP_BASE + 0xE0000, 0x0);
+		/* Clear MDP Interrupt Enable register */
+		outpdw(MDP_BASE + 0x50, 0x0);
+		/* Set Overlay Proc 0 to reset state */
+		outpdw(MDP_BASE + 0x10004, 0x3);
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	}
 	return 0;
 }
 
@@ -2620,7 +2591,6 @@
 #if defined(CONFIG_FB_MSM_MIPI_DSI) && defined(CONFIG_FB_MSM_MDP40)
 	struct mipi_panel_info *mipi;
 #endif
-	static int contSplash_update_done;
 
 	if ((pdev->id == 0) && (pdev->num_resources > 0)) {
 		mdp_init_pdev = pdev;
@@ -2650,6 +2620,8 @@
 		if (rc)
 			return rc;
 
+		mdp_clk_ctrl(1);
+
 		mdp_hw_version();
 
 		/* initializing mdp hw */
@@ -2657,12 +2629,16 @@
 		if (!(mdp_pdata->cont_splash_enabled))
 			mdp4_hw_init();
 #else
-		mdp_hw_init();
+		mdp_hw_init(mdp_pdata->cont_splash_enabled);
 #endif
 
 #ifdef CONFIG_FB_MSM_OVERLAY
 		mdp_hw_cursor_init();
 #endif
+
+		if (!(mdp_pdata->cont_splash_enabled))
+			mdp_clk_ctrl(0);
+
 		mdp_resource_initialized = 1;
 		return 0;
 	}
@@ -2690,21 +2666,6 @@
 	mfd->mdp_rev = mdp_rev;
 	mfd->vsync_init = NULL;
 
-	if (mdp_pdata) {
-		if (mdp_pdata->cont_splash_enabled) {
-			mfd->cont_splash_done = 0;
-			if (!contSplash_update_done) {
-				if (mfd->panel.type == MIPI_VIDEO_PANEL ||
-				    mfd->panel.type == LCDC_PANEL)
-					mdp_pipe_ctrl(MDP_CMD_BLOCK,
-						MDP_BLOCK_POWER_ON, FALSE);
-				mdp_clk_ctrl(1);
-				contSplash_update_done = 1;
-			}
-		} else
-			mfd->cont_splash_done = 1;
-	}
-
 	mfd->ov0_wb_buf = MDP_ALLOC(sizeof(struct mdp_buf_type));
 	mfd->ov1_wb_buf = MDP_ALLOC(sizeof(struct mdp_buf_type));
 	memset((void *)mfd->ov0_wb_buf, 0, sizeof(struct mdp_buf_type));
@@ -2714,10 +2675,12 @@
 		mfd->ov0_wb_buf->size = mdp_pdata->ov0_wb_size;
 		mfd->ov1_wb_buf->size = mdp_pdata->ov1_wb_size;
 		mfd->mem_hid = mdp_pdata->mem_hid;
+		mfd->avtimer_phy = mdp_pdata->avtimer_phy;
 	} else {
 		mfd->ov0_wb_buf->size = 0;
 		mfd->ov1_wb_buf->size = 0;
 		mfd->mem_hid = 0;
+		mfd->avtimer_phy = 0;
 	}
 
 	/* initialize Post Processing data*/
@@ -2733,11 +2696,57 @@
 		rc = -ENOMEM;
 		goto mdp_probe_err;
 	}
+
+	if (mdp_pdata) {
+		if (mdp_pdata->cont_splash_enabled &&
+				 mfd->panel_info.pdest == DISPLAY_1) {
+			char *cp;
+			uint32 bpp = 3;
+			/*read panel wxh and calculate splash screen
+			  size*/
+			mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+			mdp_pdata->splash_screen_size =
+				inpdw(MDP_BASE + 0x90004);
+			mdp_pdata->splash_screen_size =
+				(((mdp_pdata->splash_screen_size >> 16) &
+				  0x00000FFF) * (
+					  mdp_pdata->splash_screen_size &
+					  0x00000FFF)) * bpp;
+
+			mdp_pdata->splash_screen_addr =
+				inpdw(MDP_BASE + 0x90008);
+
+			mfd->copy_splash_buf = dma_alloc_coherent(NULL,
+					mdp_pdata->splash_screen_size,
+					(dma_addr_t *) &(mfd->copy_splash_phys),
+					GFP_KERNEL);
+
+			if (!mfd->copy_splash_buf) {
+				pr_err("DMA ALLOC FAILED for SPLASH\n");
+				return -ENOMEM;
+			}
+			cp = (char *)ioremap(
+					mdp_pdata->splash_screen_addr,
+					mdp_pdata->splash_screen_size);
+			if (!cp) {
+				pr_err("IOREMAP FAILED for SPLASH\n");
+				return -ENOMEM;
+			}
+			memcpy(mfd->copy_splash_buf, cp,
+					mdp_pdata->splash_screen_size);
+
+			MDP_OUTP(MDP_BASE + 0x90008,
+					mfd->copy_splash_phys);
+		}
+
+		mfd->cont_splash_done = (1 - mdp_pdata->cont_splash_enabled);
+	}
+
 	/* data chain */
 	pdata = msm_fb_dev->dev.platform_data;
 	pdata->on = mdp_on;
 	pdata->off = mdp_off;
-	pdata->fps_level_change = mdp_fps_level_change;
 	pdata->late_init = NULL;
 	pdata->next = pdev;
 
@@ -2754,6 +2763,9 @@
 			  mdp_vsync_resync_workqueue_handler);
 		mfd->hw_refresh = FALSE;
 
+		if (mfd->panel.type == MDDI_PANEL)
+			mdp4_mddi_rdptr_init(0);
+
 		if (mfd->panel.type == EXT_MDDI_PANEL) {
 			/* 15 fps -> 66 msec */
 			mfd->refresh_timer_duration = (66 * HZ / 1000);
@@ -2908,7 +2920,7 @@
 		pdata->on = mdp4_dtv_on;
 		pdata->off = mdp4_dtv_off;
 		mfd->hw_refresh = TRUE;
-		mfd->cursor_update = mdp_hw_cursor_update;
+		mfd->cursor_update = mdp_hw_cursor_sync_update;
 		mfd->dma_fnc = mdp4_dtv_overlay;
 		mfd->dma = &dma_e_data;
 		mfd->do_histogram = mdp_do_histogram;
@@ -3029,7 +3041,9 @@
 		return -ENOMEM;
 
 	/* req bus bandwidth immediately */
-	mdp_bus_scale_update_request(mdp_max_bw, mdp_max_bw);
+	if (!(mfd->cont_splash_done))
+		mdp_bus_scale_update_request
+			(MDP_BUS_SCALE_INIT, MDP_BUS_SCALE_INIT);
 #endif
 
 	/* set driver data */
@@ -3045,9 +3059,13 @@
 
 	pdev_list[pdev_list_cnt++] = pdev;
 	mdp4_extn_disp = 0;
-
-	if (mfd->vsync_init != NULL) {
-		mfd->vsync_init(0);
+	/*
+	 * vsync_init call not required for mdp3.
+	 * vsync_init call required for mdp4 targets.
+	 */
+	if ((mfd->vsync_init != NULL) || (mdp_rev < MDP_REV_40)) {
+		if (mdp_rev >= MDP_REV_40)
+			mfd->vsync_init(0);
 
 		if (!mfd->vsync_sysfs_created) {
 			mfd->dev_attr.attr.name = "vsync_event";
@@ -3073,10 +3091,8 @@
       mdp_probe_err:
 	platform_device_put(msm_fb_dev);
 #ifdef CONFIG_MSM_BUS_SCALING
-	if (mdp_bus_scale_handle > 0) {
+	if (mdp_bus_scale_handle > 0)
 		msm_bus_scale_unregister_client(mdp_bus_scale_handle);
-		mdp_bus_scale_handle = 0;
-	}
 #endif
 	return rc;
 }
@@ -3090,6 +3106,17 @@
 		return;
 	}
 
+	if (dsi_pll_vddio)
+		regulator_enable(dsi_pll_vddio);
+
+	if (dsi_pll_vdda)
+		regulator_enable(dsi_pll_vdda);
+
+	mipi_dsi_prepare_clocks();
+	mipi_dsi_ahb_ctrl(1);
+	mipi_dsi_phy_ctrl(1);
+	mipi_dsi_clk_enable();
+
 	if (on && !mdp_footswitch_on) {
 		pr_debug("Enable MDP FS\n");
 		regulator_enable(footswitch);
@@ -3100,9 +3127,31 @@
 		mdp_footswitch_on = 0;
 	}
 
+	mipi_dsi_clk_disable();
+	mipi_dsi_phy_ctrl(0);
+	mipi_dsi_ahb_ctrl(0);
+	mipi_dsi_unprepare_clocks();
+
+	if (dsi_pll_vdda)
+		regulator_disable(dsi_pll_vdda);
+
+	if (dsi_pll_vddio)
+		regulator_disable(dsi_pll_vddio);
+
 	mutex_unlock(&mdp_suspend_mutex);
 }
 
+void mdp_free_splash_buffer(struct msm_fb_data_type *mfd)
+{
+	if (mfd->copy_splash_buf) {
+		dma_free_coherent(NULL,	mdp_pdata->splash_screen_size,
+			mfd->copy_splash_buf,
+			(dma_addr_t) mfd->copy_splash_phys);
+
+		mfd->copy_splash_buf = NULL;
+	}
+}
+
 #ifdef CONFIG_PM
 static void mdp_suspend_sub(void)
 {
@@ -3145,6 +3194,7 @@
 {
 	mdp_suspend_sub();
 #ifdef CONFIG_FB_MSM_DTV
+	mdp4_solidfill_commit(MDP4_MIXER1);
 	mdp4_dtv_set_black_screen();
 #endif
 	mdp_footswitch_ctrl(FALSE);
@@ -3172,10 +3222,8 @@
 	iounmap(msm_mdp_base);
 	pm_runtime_disable(&pdev->dev);
 #ifdef CONFIG_MSM_BUS_SCALING
-	if (mdp_bus_scale_handle > 0) {
+	if (mdp_bus_scale_handle > 0)
 		msm_bus_scale_unregister_client(mdp_bus_scale_handle);
-		mdp_bus_scale_handle = 0;
-	}
 #endif
 	return 0;
 }
diff --git a/drivers/video/msm/mdp.h b/drivers/video/msm/mdp.h
index 730b1d5..6574d9e 100644
--- a/drivers/video/msm/mdp.h
+++ b/drivers/video/msm/mdp.h
@@ -73,6 +73,7 @@
 #define MDPOP_SHARPENING	BIT(11) /* enable sharpening */
 #define MDPOP_BLUR		BIT(12) /* enable blur */
 #define MDPOP_FG_PM_ALPHA       BIT(13)
+#define MDPOP_LAYER_IS_FG       BIT(14)
 #define MDP_ALLOC(x)  kmalloc(x, GFP_KERNEL)
 
 struct mdp_buf_type {
@@ -94,9 +95,11 @@
 
 struct vsync {
 	ktime_t vsync_time;
+	struct completion vsync_comp;
 	struct device *dev;
 	struct work_struct vsync_work;
 	int vsync_irq_enabled;
+	int vsync_dma_enabled;
 	int disabled_clocks;
 	struct completion vsync_wait;
 	atomic_t suspend;
@@ -737,7 +740,7 @@
 #define MDP_DMA_P_LUT_C2_EN   BIT(2)
 #define MDP_DMA_P_LUT_POST    BIT(4)
 
-void mdp_hw_init(void);
+void mdp_hw_init(int splash);
 int mdp_ppp_pipe_wait(void);
 void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd);
 void mdp_clk_ctrl(int on);
@@ -782,7 +785,7 @@
 int mdp_lcdc_on(struct platform_device *pdev);
 int mdp_lcdc_off(struct platform_device *pdev);
 void mdp_lcdc_update(struct msm_fb_data_type *mfd);
-
+void mdp_free_splash_buffer(struct msm_fb_data_type *mfd);
 #ifdef CONFIG_FB_MSM_MDP303
 int mdp_dsi_video_on(struct platform_device *pdev);
 int mdp_dsi_video_off(struct platform_device *pdev);
@@ -800,6 +803,10 @@
 {
 	return 0;
 }
+static inline int mdp4_mddi_off(struct platform_device *pdev)
+{
+	return 0;
+}
 static inline int mdp4_dsi_cmd_on(struct platform_device *pdev)
 {
 	return 0;
@@ -812,6 +819,19 @@
 {
 	return 0;
 }
+static inline int mdp4_mddi_on(struct platform_device *pdev)
+{
+	return 0;
+}
+#endif
+
+
+#ifndef CONFIG_FB_MSM_MDDI
+static inline void mdp4_mddi_rdptr_init(int cndx)
+{
+	/* empty */
+}
+
 #endif
 
 void set_cont_splashScreen_status(int);
@@ -914,7 +934,7 @@
 
 int mdp_ppp_v4l2_overlay_set(struct fb_info *info, struct mdp_overlay *req);
 int mdp_ppp_v4l2_overlay_clear(void);
-int mdp_ppp_v4l2_overlay_play(struct fb_info *info,
+int mdp_ppp_v4l2_overlay_play(struct fb_info *info, bool bUserPtr,
 	unsigned long srcp0_addr, unsigned long srcp0_size,
 	unsigned long srcp1_addr, unsigned long srcp1_size);
 void mdp_update_pm(struct msm_fb_data_type *mfd, ktime_t pre_vsync);
@@ -929,25 +949,4 @@
 	/* empty */
 }
 #endif
-
-#ifdef CONFIG_UPDATE_LCDC_LUT
-#define R_MASK    0x00ff0000
-#define G_MASK    0x000000ff
-#define B_MASK    0x0000ff00
-#define R_SHIFT   16
-#define G_SHIFT   0
-#define B_SHIFT   8
-#define lut2r(lut) ((lut & R_MASK) >> R_SHIFT)
-#define lut2g(lut) ((lut & G_MASK) >> G_SHIFT)
-#define lut2b(lut) ((lut & B_MASK) >> B_SHIFT)
-
-#ifdef CONFIG_LCD_KCAL
-#define NUM_QLUT  256
-#define MAX_KCAL_V (NUM_QLUT-1)
-#define scaled_by_kcal(rgb, kcal) \
-		(((((unsigned int)(rgb) * (unsigned int)(kcal)) << 16) / \
-		(unsigned int)MAX_KCAL_V) >> 16)
-#endif
-int mdp_preset_lut_update_lcdc(struct fb_cmap *cmap, uint32_t *internal_lut);
-#endif
 #endif /* MDP_H */
diff --git a/drivers/video/msm/mdp4.h b/drivers/video/msm/mdp4.h
index 0c6d545..a3d8d7e 100644
--- a/drivers/video/msm/mdp4.h
+++ b/drivers/video/msm/mdp4.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -28,11 +28,14 @@
 extern char *mmss_cc_base;	/* mutimedia sub system clock control */
 extern spinlock_t dsi_clk_lock;
 extern u32 mdp_max_clk;
+extern u32 dbg_force_ov0_blt;
+extern u32 dbg_force_ov1_blt;
 
 extern u64 mdp_max_bw;
 #define MDP4_BW_AB_FACTOR (115)	/* 1.15 */
 #define MDP4_BW_IB_FACTOR (125)	/* 1.25 */
 #define MDP_BUS_SCALE_AB_STEP (0x4000000)
+#define MDP_BUS_SCALE_INIT (0x10000000)
 
 #define MDP4_OVERLAYPROC0_BASE	0x10000
 #define MDP4_OVERLAYPROC1_BASE	0x18000
@@ -92,17 +95,6 @@
 #define MDP4_PANEL_WRITEBACK		BIT(6)
 
 enum {
-	OVERLAY_BLT_SWITCH_TG_ON,
-	OVERLAY_BLT_SWITCH_TG_OFF,
-	OVERLAY_BLT_ALWAYS_ON
-};
-
-enum {
-	OVERLAY_MODE_NONE,
-	OVERLAY_MODE_BLT
-};
-
-enum {
 	OVERLAY_REFRESH_ON_DEMAND,
 	OVERLAY_REFRESH_VSYNC,
 	OVERLAY_REFRESH_VSYNC_HALF,
@@ -284,6 +276,7 @@
 	struct mdp4_overlay_pipe *solidfill_pipe;
 };
 
+
 struct mdp4_overlay_pipe {
 	uint32 pipe_used;
 	uint32 pipe_type;		/* rgb, video/graphic */
@@ -365,12 +358,11 @@
 	uint32 ov_cnt;
 	uint32 dmap_cnt;
 	uint32 dmae_cnt;
-	uint32 blt_end;
+	uint32 blt_end;	/* used by mddi only */
 	uint32 blt_ov_koff;
 	uint32 blt_ov_done;
 	uint32 blt_dmap_koff;
 	uint32 blt_dmap_done;
-	uint32 blt_forced;
 	uint32 req_clk;
 	uint64 bw_ab_quota;
 	uint64 bw_ib_quota;
@@ -487,7 +479,7 @@
 int mdp4_overlay_dtv_unset(struct msm_fb_data_type *mfd,
 			struct mdp4_overlay_pipe *pipe);
 void mdp4_dmae_done_dtv(void);
-void mdp4_dtv_wait4vsync(int cndx);
+void mdp4_dtv_wait4vsync(int cndx, long long *vtime);
 void mdp4_dtv_vsync_ctrl(struct fb_info *info, int enable);
 void mdp4_dtv_base_swap(int cndx, struct mdp4_overlay_pipe *pipe);
 #else
@@ -520,7 +512,7 @@
 {
     /* empty */
 }
-static inline void mdp4_dtv_wait4vsync(int cndx)
+static inline void mdp4_dtv_wait4vsync(int cndx, long long *vtime)
 {
     /* empty */
 }
@@ -561,9 +553,6 @@
 void mdp4_overlay0_done_dsi_cmd(int cndx);
 void mdp4_primary_rdptr(void);
 void mdp4_dsi_cmd_overlay(struct msm_fb_data_type *mfd);
-int mdp4_overlay_commit(struct fb_info *info);
-int mdp4_dsi_video_pipe_commit(int cndx, int wait);
-int mdp4_dsi_cmd_pipe_commit(int cndx, int wait);
 int mdp4_lcdc_pipe_commit(int cndx, int wait);
 int mdp4_dtv_pipe_commit(int cndx, int wait);
 int mdp4_dsi_cmd_update_cnt(int cndx);
@@ -596,16 +585,18 @@
 int mdp4_overlay_format2pipe(struct mdp4_overlay_pipe *pipe);
 int mdp4_overlay_get(struct fb_info *info, struct mdp_overlay *req);
 int mdp4_overlay_set(struct fb_info *info, struct mdp_overlay *req);
-int mdp4_overlay_wait4vsync(struct fb_info *info);
+int mdp4_overlay_wait4vsync(struct fb_info *info, long long *vtime);
 int mdp4_overlay_vsync_ctrl(struct fb_info *info, int enable);
 int mdp4_overlay_unset(struct fb_info *info, int ndx);
 int mdp4_overlay_unset_mixer(int mixer);
 int mdp4_overlay_play_wait(struct fb_info *info,
 	struct msmfb_overlay_data *req);
 int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req);
+int mdp4_overlay_commit(struct fb_info *info);
 struct mdp4_overlay_pipe *mdp4_overlay_pipe_alloc(int ptype, int mixer);
 void mdp4_overlay_dma_commit(int mixer);
 void mdp4_overlay_vsync_commit(struct mdp4_overlay_pipe *pipe);
+void mdp4_solidfill_commit(int mixer);
 void mdp4_mixer_stage_commit(int mixer);
 void mdp4_dsi_cmd_do_update(int cndx, struct mdp4_overlay_pipe *pipe);
 void mdp4_lcdc_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
@@ -618,19 +609,21 @@
 int mdp4_overlay_pipe_staged(struct mdp4_overlay_pipe *pipe);
 void mdp4_lcdc_primary_vsyn(void);
 void mdp4_overlay0_done_lcdc(int cndx);
-void mdp4_overlay0_done_mddi(struct mdp_dma_data *dma);
+void mdp4_overlay0_done_mddi(int cndx);
 void mdp4_dma_p_done_mddi(struct mdp_dma_data *dma);
 void mdp4_dmap_done_dsi_cmd(int cndx);
+void mdp4_dmap_done_mddi(int cndx);
 void mdp4_dmap_done_dsi_video(int cndx);
 void mdp4_dmap_done_lcdc(int cndx);
 void mdp4_overlay1_done_dtv(void);
 void mdp4_overlay1_done_atv(void);
 void mdp4_primary_vsync_lcdc(void);
 void mdp4_external_vsync_dtv(void);
-void mdp4_lcdc_wait4vsync(int cndx);
+void mdp4_lcdc_wait4vsync(int cndx, long long *vtime);
 void mdp4_overlay_lcdc_vsync_push(struct msm_fb_data_type *mfd,
 				struct mdp4_overlay_pipe *pipe);
 void mdp4_mddi_overlay_dmas_restore(void);
+void mdp4_dtv_set_avparams(struct mdp4_overlay_pipe *pipe, int id);
 
 #ifndef CONFIG_FB_MSM_MIPI_DSI
 void mdp4_mddi_dma_busy_wait(struct msm_fb_data_type *mfd);
@@ -694,6 +687,12 @@
 void mdp4_dsi_video_overlay_blt(struct msm_fb_data_type *mfd,
 					struct msmfb_overlay_blt *req);
 void mdp4_dsi_video_base_swap(int cndx, struct mdp4_overlay_pipe *pipe);
+static inline void mdp4_mddi_blt_start(struct msm_fb_data_type *mfd)
+{
+}
+static inline void mdp4_mddi_blt_stop(struct msm_fb_data_type *mfd)
+{
+}
 
 #ifdef CONFIG_FB_MSM_MDP40
 static inline void mdp3_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd)
@@ -702,6 +701,8 @@
 }
 #endif
 #else     /* CONFIG_FB_MSM_MIPI_DSI */
+void mdp4_mddi_blt_start(struct msm_fb_data_type *mfd);
+void mdp4_mddi_blt_stop(struct msm_fb_data_type *mfd);
 int mdp4_mddi_overlay_blt_offset(struct msm_fb_data_type *mfd,
 					struct msmfb_overlay_blt *req);
 void mdp4_mddi_overlay_blt(struct msm_fb_data_type *mfd,
@@ -709,6 +710,7 @@
 int mdp4_mddi_overlay_blt_start(struct msm_fb_data_type *mfd);
 int mdp4_mddi_overlay_blt_stop(struct msm_fb_data_type *mfd);
 void mdp4_mddi_blt_dmap_busy_wait(struct msm_fb_data_type *mfd);
+void mdp4_mddi_rdptr_init(int cndx);
 static inline int mdp4_dsi_overlay_blt_start(struct msm_fb_data_type *mfd)
 {
 	return -ENODEV;
@@ -783,13 +785,14 @@
 int mdp4_dsi_cmd_off(struct platform_device *pdev);
 int mdp4_dsi_video_off(struct platform_device *pdev);
 int mdp4_dsi_video_on(struct platform_device *pdev);
-int mdp4_dsi_video_splash_done(void);
 void mdp4_primary_vsync_dsi_video(void);
 void mdp4_dsi_cmd_base_swap(int cndx, struct mdp4_overlay_pipe *pipe);
-void mdp4_dsi_cmd_wait4vsync(int cndx);
-void mdp4_dsi_video_wait4vsync(int cndx);
+void mdp4_dsi_cmd_wait4vsync(int cndx, long long *vtime);
+void mdp4_dsi_video_wait4vsync(int cndx, long long *vtime);
 void mdp4_dsi_cmd_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
 void mdp4_dsi_video_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
+int mdp4_dsi_video_pipe_commit(int cndx, int wait);
+int mdp4_dsi_cmd_pipe_commit(int cndx, int wait);
 void mdp4_dsi_cmd_vsync_ctrl(struct fb_info *info, int enable);
 void mdp4_dsi_video_vsync_ctrl(struct fb_info *info, int enable);
 #ifdef CONFIG_FB_MSM_MDP303
@@ -797,11 +800,37 @@
 {
 	/* empty */
 }
-#else /* CONFIG_FB_MSM_MIPI_DSI */
+#else /* CONFIG_FB_MSM_MDP303 */
 void mdp4_dsi_cmd_del_timer(void);
+static inline int mdp4_mddi_on(struct platform_device *pdev)
+{
+	return 0;
+}
+static inline int mdp4_mddi_off(struct platform_device *pdev)
+{
+	return 0;
+}
+static inline void mdp4_mddi_wait4vsync(int cndx, long long *vtime)
+{
+}
+static inline void mdp4_mddi_vsync_ctrl(struct fb_info *info,
+				int enable)
+{
+}
+static inline void mdp4_mddi_pipe_queue(int cndx,
+			struct mdp4_overlay_pipe *pipe)
+{
+}
 #endif
 #else  /* CONFIG_FB_MSM_MIPI_DSI */
 
+int mdp4_mddi_off(struct platform_device *pdev);
+int mdp4_mddi_on(struct platform_device *pdev);
+void mdp4_mddi_wait4vsync(int cndx, long long *vtime);
+void mdp4_mddi_vsync_ctrl(struct fb_info *info, int enable);
+void mdp4_mddi_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
+void mdp4_overlay_update_mddi(struct msm_fb_data_type *mfd);
+
 static inline int mdp4_dsi_cmd_on(struct platform_device *pdev)
 {
 	return 0;
@@ -825,10 +854,10 @@
 	struct mdp4_overlay_pipe *pipe)
 {
 }
-static inline void mdp4_dsi_cmd_wait4vsync(int cndx)
+static inline void mdp4_dsi_cmd_wait4vsync(int cndx, long long *vtime)
 {
 }
-static inline void mdp4_dsi_video_wait4vsync(int cndx)
+static inline void mdp4_dsi_video_wait4vsync(int cndx, long long *vtime)
 {
 }
 static inline void mdp4_dsi_cmd_pipe_queue(int cndx,
@@ -839,6 +868,14 @@
 			struct mdp4_overlay_pipe *pipe)
 {
 }
+static inline int mdp4_dsi_video_pipe_commit(int cndx, int wait)
+{
+	return 0;
+}
+static inline int mdp4_dsi_cmd_pipe_commit(int cndx, int wait)
+{
+	return 0;
+}
 static inline void mdp4_dsi_cmd_vsync_ctrl(struct fb_info *info,
 				int enable)
 {
@@ -852,11 +889,6 @@
 {
 	/* empty */
 }
-
-static int mdp4_dsi_video_splash_done(void)
-{
-}
-
 #ifdef CONFIG_FB_MSM_MDP40
 static inline void mdp_dsi_cmd_overlay_suspend(struct msm_fb_data_type *mfd)
 {
@@ -913,7 +945,6 @@
 void mdp4_writeback_dma_stop(struct msm_fb_data_type *mfd);
 int mdp4_writeback_init(struct fb_info *info);
 int mdp4_writeback_terminate(struct fb_info *info);
-int mdp4_writeback_set_mirroring_hint(struct fb_info *info, int hint);
 
 uint32_t mdp_block2base(uint32_t block);
 int mdp_hist_lut_config(struct mdp_hist_lut_data *data);
@@ -925,6 +956,7 @@
 int mdp4_pcc_cfg(struct mdp_pcc_cfg_data *cfg_ptr);
 int mdp4_argc_cfg(struct mdp_pgc_lut_data *pgc_ptr);
 int mdp4_qseed_cfg(struct mdp_qseed_cfg_data *cfg);
+int mdp4_calib_config(struct mdp_calib_config_data *cfg);
 int mdp4_qseed_access_cfg(struct mdp_qseed_cfg *cfg, uint32_t base);
 u32  mdp4_allocate_writeback_buf(struct msm_fb_data_type *mfd, u32 mix_num);
 void mdp4_init_writeback_buf(struct msm_fb_data_type *mfd, u32 mix_num);
@@ -950,7 +982,11 @@
 int mdp4_overlay_mdp_perf_req(struct msm_fb_data_type *mfd,
 				struct mdp4_overlay_pipe *plist);
 void mdp4_overlay_mdp_perf_upd(struct msm_fb_data_type *mfd, int flag);
-int mdp4_overlay_reset(void);
+int mdp4_update_base_blend(struct msm_fb_data_type *mfd,
+				struct mdp_blend_cfg *mdp_blend_cfg);
+int mdp4_update_writeback_format(struct msm_fb_data_type *mfd,
+			struct mdp_mixer_cfg *mdp_mixer_cfg);
+u32 mdp4_get_mixer_num(u32 panel_type);
 
 #ifndef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
 static inline void mdp4_wfd_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe)
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index ba9e33b..e8d7489 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -30,7 +29,6 @@
 #include <linux/fb.h>
 #include <linux/msm_mdp.h>
 #include <linux/file.h>
-#include <linux/android_pmem.h>
 #include <linux/major.h>
 #include <asm/system.h>
 #include <asm/mach-types.h>
@@ -49,6 +47,8 @@
 	struct mdp4_overlay_pipe *stage[MDP4_MIXER_MAX][MDP4_MIXER_STAGE_MAX];
 	struct mdp4_overlay_pipe *baselayer[MDP4_MIXER_MAX];
 	struct blend_cfg blend[MDP4_MIXER_MAX][MDP4_MIXER_STAGE_MAX];
+	struct mdp4_overlay_pipe sf_plist[MDP4_MIXER_MAX][OVERLAY_PIPE_MAX];
+	struct mdp_mixer_cfg mdp_mixer_cfg[MDP4_MIXER_MAX];
 	uint32 mixer_cfg[MDP4_MIXER_MAX];
 	uint32 flush[MDP4_MIXER_MAX];
 	struct iommu_free_list iommu_free[MDP4_MIXER_MAX];
@@ -119,6 +119,7 @@
 
 static struct ion_client *display_iclient;
 
+static void mdp4_overlay_bg_solidfill(struct blend_cfg *blend);
 
 /*
  * mdp4_overlay_iommu_unmap_freelist()
@@ -191,7 +192,6 @@
 
 	if (pipe->flags & MDP_MEMORY_ID_TYPE_FB) {
 		pipe->flags &= ~MDP_MEMORY_ID_TYPE_FB;
-
 		if (pipe->put0_need) {
 			fput_light(pipe->srcp0_file, pipe->put0_need);
 			pipe->put0_need = 0;
@@ -244,7 +244,8 @@
 		pr_err("ion_import_dma_buf() failed\n");
 		return PTR_ERR(*srcp_ihdl);
 	}
-	pr_debug("%s(): ion_hdl %p, ion_buf %d\n", __func__, *srcp_ihdl, mem_id);
+	pr_debug("%s(): ion_hdl %p, ion_buf %d\n", __func__, *srcp_ihdl,
+		ion_share_dma_buf(display_iclient, *srcp_ihdl));
 	pr_debug("mixer %u, pipe %u, plane %u\n", pipe->mixer_num,
 		pipe->pipe_ndx, plane);
 	if (ion_map_iommu(display_iclient, *srcp_ihdl,
@@ -726,41 +727,61 @@
 	*luma_off = 0;
 	*chroma_off = 0;
 
-	if (pipe->src_x && (pipe->frame_format ==
+	if ((pipe->src_x || pipe->src_y) && (pipe->frame_format ==
 		MDP4_FRAME_FORMAT_LINEAR)) {
-		src_xy = (pipe->src_y << 16) | pipe->src_x;
-		src_xy &= 0xffff0000;
+		src_xy = 0;
 		outpdw(vg_base + 0x0004, src_xy);	/* MDP_RGB_SRC_XY */
 
 		switch (pipe->src_format) {
 		case MDP_Y_CR_CB_H2V2:
 		case MDP_Y_CR_CB_GH2V2:
 		case MDP_Y_CB_CR_H2V2:
-				*luma_off = pipe->src_x;
-				*chroma_off = pipe->src_x/2;
+			*luma_off = pipe->src_x +
+				(pipe->src_y * pipe->srcp0_ystride);
+			*chroma_off = pipe->src_x / 2 +
+				((pipe->src_y / 2) * pipe->srcp1_ystride);
 			break;
 
 		case MDP_Y_CBCR_H2V2_TILE:
 		case MDP_Y_CRCB_H2V2_TILE:
 		case MDP_Y_CBCR_H2V2:
 		case MDP_Y_CRCB_H2V2:
+			*luma_off = pipe->src_x +
+				(pipe->src_y * pipe->srcp0_ystride);
+			*chroma_off = pipe->src_x +
+				((pipe->src_y / 2) * pipe->srcp1_ystride);
+			break;
+
 		case MDP_Y_CRCB_H1V1:
 		case MDP_Y_CBCR_H1V1:
+			*luma_off = pipe->src_x +
+				(pipe->src_y * pipe->srcp0_ystride);
+			*chroma_off = pipe->src_x +
+				((pipe->src_y * 2) * pipe->srcp1_ystride);
+			break;
+
 		case MDP_Y_CRCB_H2V1:
 		case MDP_Y_CBCR_H2V1:
-			*luma_off = pipe->src_x;
-			*chroma_off = pipe->src_x;
+		case MDP_Y_CRCB_H1V2:
+		case MDP_Y_CBCR_H1V2:
+			*luma_off = pipe->src_x +
+				(pipe->src_y * pipe->srcp0_ystride);
+			*chroma_off = pipe->src_x +
+				(pipe->src_y * pipe->srcp1_ystride);
 			break;
 
 		case MDP_YCRYCB_H2V1:
+		case MDP_CBYCRY_H2V1:
 			if (pipe->src_x & 0x1)
 				pipe->src_x += 1;
-			*luma_off += pipe->src_x * 2;
+			*luma_off += pipe->src_x * 2 +
+				((pipe->src_y * 2) * pipe->srcp0_ystride);
 			break;
 
 		case MDP_ARGB_8888:
 		case MDP_RGBA_8888:
 		case MDP_BGRA_8888:
+		case MDP_BGRX_8888:
 		case MDP_RGBX_8888:
 		case MDP_RGB_565:
 		case MDP_BGR_565:
@@ -768,7 +789,8 @@
 		case MDP_RGB_888:
 		case MDP_YCBCR_H1V1:
 		case MDP_YCRCB_H1V1:
-			*luma_off = pipe->src_x * pipe->bpp;
+			*luma_off = (pipe->src_x * pipe->bpp) +
+					(pipe->src_y * pipe->srcp0_ystride);
 			break;
 
 		default:
@@ -947,11 +969,15 @@
 	case MDP_ARGB_8888:
 	case MDP_RGBA_8888:
 	case MDP_BGRA_8888:
+	case MDP_BGRX_8888:
 	case MDP_RGBX_8888:
 		return OVERLAY_TYPE_RGB;
 	case MDP_YCRYCB_H2V1:
+	case MDP_CBYCRY_H2V1:
 	case MDP_Y_CRCB_H2V1:
 	case MDP_Y_CBCR_H2V1:
+	case MDP_Y_CRCB_H1V2:
+	case MDP_Y_CBCR_H1V2:
 	case MDP_Y_CRCB_H2V2:
 	case MDP_Y_CBCR_H2V2:
 	case MDP_Y_CBCR_H2V2_TILE:
@@ -1115,6 +1141,23 @@
 		pipe->element0 = C1_B_Cb;	/* B */
 		pipe->bpp = 4;		/* 4 bpp */
 		break;
+	case MDP_BGRX_8888:
+		pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
+		pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
+		pipe->a_bit = 3;        /* alpha, 4 bits */
+		pipe->r_bit = 3;        /* R, 8 bits */
+		pipe->b_bit = 3;        /* B, 8 bits */
+		pipe->g_bit = 3;        /* G, 8 bits */
+		pipe->alpha_enable = 0;
+		pipe->unpack_tight = 1;
+		pipe->unpack_align_msb = 0;
+		pipe->unpack_count = 3;
+		pipe->element3 = C3_ALPHA;      /* alpha */
+		pipe->element2 = C2_R_Cr;       /* R */
+		pipe->element1 = C0_G_Y;        /* G */
+		pipe->element0 = C1_B_Cb;       /* B */
+		pipe->bpp = 4;          /* 4 bpp */
+		break;
 	case MDP_YCRYCB_H2V1:
 		pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
 		pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
@@ -1126,6 +1169,24 @@
 		pipe->unpack_tight = 1;
 		pipe->unpack_align_msb = 0;
 		pipe->unpack_count = 3;
+		pipe->element3 = C1_B_Cb;	/* B */
+		pipe->element2 = C0_G_Y;	/* G */
+		pipe->element1 = C2_R_Cr;	/* R */
+		pipe->element0 = C0_G_Y;	/* G */
+		pipe->bpp = 2;		/* 2 bpp */
+		pipe->chroma_sample = MDP4_CHROMA_H2V1;
+		break;
+	case MDP_CBYCRY_H2V1:
+		pipe->frame_format = MDP4_FRAME_FORMAT_LINEAR;
+		pipe->fetch_plane = OVERLAY_PLANE_INTERLEAVED;
+		pipe->a_bit = 0;	/* alpha, 4 bits */
+		pipe->r_bit = 3;	/* R, 8 bits */
+		pipe->b_bit = 3;	/* B, 8 bits */
+		pipe->g_bit = 3;	/* G, 8 bits */
+		pipe->alpha_enable = 0;
+		pipe->unpack_tight = 1;
+		pipe->unpack_align_msb = 0;
+		pipe->unpack_count = 3;
 		pipe->element3 = C0_G_Y;	/* G */
 		pipe->element2 = C2_R_Cr;	/* R */
 		pipe->element1 = C0_G_Y;	/* G */
@@ -1135,6 +1196,8 @@
 		break;
 	case MDP_Y_CRCB_H2V1:
 	case MDP_Y_CBCR_H2V1:
+	case MDP_Y_CRCB_H1V2:
+	case MDP_Y_CBCR_H1V2:
 	case MDP_Y_CRCB_H2V2:
 	case MDP_Y_CBCR_H2V2:
 	case MDP_Y_CRCB_H1V1:
@@ -1171,6 +1234,14 @@
 				pipe->chroma_sample = MDP4_CHROMA_H1V2;
 			else
 				pipe->chroma_sample = MDP4_CHROMA_RGB;
+		} else if (pipe->src_format == MDP_Y_CRCB_H1V2) {
+			pipe->element1 = C1_B_Cb;
+			pipe->element0 = C2_R_Cr;
+			pipe->chroma_sample = MDP4_CHROMA_H1V2;
+		} else if (pipe->src_format == MDP_Y_CBCR_H1V2) {
+			pipe->element1 = C2_R_Cr;
+			pipe->element0 = C1_B_Cb;
+			pipe->chroma_sample = MDP4_CHROMA_H1V2;
 		} else if (pipe->src_format == MDP_Y_CRCB_H2V2) {
 			pipe->element1 = C1_B_Cb;
 			pipe->element0 = C2_R_Cr;
@@ -1292,6 +1363,7 @@
 	case MDP_XRGB_8888:
 	case MDP_ARGB_8888:
 	case MDP_BGRA_8888:
+	case MDP_BGRX_8888:
 		b_start = 0;
 		g_start = 8;
 		r_start = 16;
@@ -1331,6 +1403,8 @@
 	case MDP_Y_CR_CB_GH2V2:
 	case MDP_Y_CRCB_H2V2:
 	case MDP_Y_CRCB_H2V1:
+	case MDP_Y_CRCB_H1V2:
+	case MDP_Y_CBCR_H1V2:
 	case MDP_Y_CRCB_H1V1:
 	case MDP_Y_CBCR_H1V1:
 	case MDP_YCRCB_H1V1:
@@ -1402,6 +1476,87 @@
 			(pipe->element1 << 8) | pipe->element0;
 }
 
+static uint32 mdp4_overlayproc_cfg_wb_panel(struct mdp4_overlay_pipe *pipe,
+					char *overlay_base, uint32 curr)
+{
+	int off, bpp;
+	uint32 flag;
+	bool is_rgb = false;
+	struct mdp_mixer_cfg *mixer_cfg;
+
+	off = 0;
+	mixer_cfg = &ctrl->mdp_mixer_cfg[MDP4_MIXER2];
+
+	switch (mixer_cfg->writeback_format) {
+	case WB_FORMAT_RGB_888:
+		bpp = 3; /* RGB888 */
+		flag = 0x0;
+		is_rgb = true;
+		break;
+	case WB_FORMAT_RGB_565:
+		bpp = 2; /* RGB565 */
+		flag = 0x1;
+		is_rgb = true;
+		break;
+	case WB_FORMAT_xRGB_8888:
+		bpp = 4; /* xRGB8888 */
+		flag = 0x3;
+		is_rgb = true;
+		break;
+	case WB_FORMAT_ARGB_8888:
+		bpp = 4; /* ARGB8888 */
+		flag = 0x80000003;
+		is_rgb = true;
+		break;
+	case WB_FORMAT_ARGB_8888_INPUT_ALPHA:
+		pr_warn("currently not supported ARGB_8888_INPUT_ALPHA\n");
+	default:
+		bpp = 1; /* NV12 */
+		is_rgb = false;
+		break;
+	}
+
+	if (is_rgb == true) {
+		if (pipe->ov_cnt & 0x01)
+			off = pipe->src_height * pipe->src_width * bpp;
+
+		outpdw(overlay_base + 0x000c, pipe->ov_blt_addr + off);
+		/* overlay ouput is RGB888 */
+		outpdw(overlay_base + 0x0010, pipe->src_width * bpp);
+		outpdw(overlay_base + 0x001c, pipe->ov_blt_addr + off);
+		/* MDDI - BLT + on demand */
+		outpdw(overlay_base + 0x0004, 0x08);
+
+		curr = inpdw(overlay_base + 0x0014);
+		curr &= 0x4;
+
+		outpdw(overlay_base + 0x0014, curr | flag);
+	} else {
+		if (pipe->ov_cnt & 0x01)
+			off = pipe->src_height * pipe->src_width * bpp;
+
+		outpdw(overlay_base + 0x000c, pipe->ov_blt_addr + off);
+		/* overlay ouput is RGB888 */
+		outpdw(overlay_base + 0x0010, ((pipe->src_width << 16) |
+				pipe->src_width));
+		outpdw(overlay_base + 0x001c, pipe->ov_blt_addr + off);
+		off = pipe->src_height * pipe->src_width;
+		/* align chroma to 2k address */
+		off = (off + 2047) & ~2047;
+		/* UV plane adress */
+		outpdw(overlay_base + 0x0020, pipe->ov_blt_addr + off);
+		/* MDDI - BLT + on demand */
+		outpdw(overlay_base + 0x0004, 0x08);
+		/* pseudo planar + writeback */
+		curr = inpdw(overlay_base + 0x0014);
+		curr &= 0x4;
+		outpdw(overlay_base + 0x0014, curr | 0x012);
+		/* rgb->yuv */
+		outpdw(overlay_base + 0x0200, 0x05);
+	}
+	return curr;
+}
+
 /*
  * mdp4_overlayproc_cfg: only be called from base layer
  */
@@ -1461,34 +1616,8 @@
 #endif
 		} else if (pipe->mixer_num == MDP4_MIXER2) {
 			if (ctrl->panel_mode & MDP4_PANEL_WRITEBACK) {
-				off = 0;
-				bpp = 1;
-				if (pipe->ov_cnt & 0x01)
-					off = pipe->src_height *
-							pipe->src_width * bpp;
-
-				outpdw(overlay_base + 0x000c,
-						pipe->ov_blt_addr + off);
-				/* overlay ouput is RGB888 */
-				outpdw(overlay_base + 0x0010,
-					((pipe->src_width << 16) |
-					 pipe->src_width));
-				outpdw(overlay_base + 0x001c,
-						pipe->ov_blt_addr + off);
-				off = pipe->src_height * pipe->src_width;
-				/* align chroma to 2k address */
-				off = (off + 2047) & ~2047;
-				/* UV plane adress */
-				outpdw(overlay_base + 0x0020,
-						pipe->ov_blt_addr + off);
-				/* MDDI - BLT + on demand */
-				outpdw(overlay_base + 0x0004, 0x08);
-				/* pseudo planar + writeback */
-				curr = inpdw(overlay_base + 0x0014);
-				curr &= 0x4;
-				outpdw(overlay_base + 0x0014, curr | 0x012);
-				/* rgb->yuv */
-				outpdw(overlay_base + 0x0200, 0x05);
+				curr = mdp4_overlayproc_cfg_wb_panel(pipe,
+							overlay_base, curr);
 			}
 		}
 	} else {
@@ -1563,6 +1692,35 @@
 	return cnt;
 }
 
+void mdp4_solidfill_commit(int mixer)
+{
+	struct blend_cfg bcfg;
+	struct mdp4_overlay_pipe *pp = NULL;
+	int i = 0;
+
+	for (i = 0; i < OVERLAY_PIPE_MAX; i++) {
+		pp = &ctrl->sf_plist[mixer][i];
+		if (pp->pipe_ndx && pp->solid_fill) {
+			bcfg.solidfill = 1;
+			bcfg.solidfill_pipe = pp;
+			mdp4_overlay_bg_solidfill(&bcfg);
+			mdp4_overlay_reg_flush(pp, 1);
+			mdp4_mixer_stage_up(pp, 0);
+		}
+	}
+	mdp4_mixer_stage_commit(MDP4_MIXER1);
+
+	for (i = 0; i < OVERLAY_PIPE_MAX; i++) {
+		pp = &ctrl->sf_plist[mixer][i];
+		if (pp->pipe_ndx && pp->solid_fill) {
+			mdp4_overlay_reg_flush(pp, 1);
+			mdp4_mixer_stage_down(pp, 0);
+			pp->solid_fill = 0;
+		}
+	}
+	mdp4_mixer_stage_commit(MDP4_MIXER1);
+}
+
 void mdp4_mixer_stage_commit(int mixer)
 {
 	struct mdp4_overlay_pipe *pipe;
@@ -1660,7 +1818,8 @@
 			ctrl->stage[mixer][i] = NULL;  /* clear it */
 	}
 
-	if (commit || (mixer > 0 && !hdmi_prim_display))
+	if (commit || ((mixer == 1) && !hdmi_prim_display) ||
+		(mixer == 2))
 		mdp4_mixer_stage_commit(mixer);
 }
 /*
@@ -1673,7 +1832,6 @@
 	struct mdp4_overlay_pipe *bspipe;
 	int ptype, pnum, pndx, mixer;
 	int format, alpha_enable, alpha;
-	struct mdp4_iommu_pipe_info iom;
 
 	if (pipe->pipe_type != OVERLAY_TYPE_BF)
 		return;
@@ -1694,7 +1852,6 @@
 	/* save original base layer */
 	ctrl->baselayer[mixer] = bspipe;
 
-	iom = pipe->iommu;
 	pipe->alpha = 0;	/* make sure bf pipe has alpha 0 */
 	ptype = pipe->pipe_type;
 	pnum = pipe->pipe_num;
@@ -1709,7 +1866,6 @@
 	pipe->src_format = format;
 	pipe->alpha_enable = alpha_enable;
 	pipe->alpha = alpha;
-	pipe->iommu = iom;
 
 	/* free original base layer pipe to be sued as normal pipe */
 	bspipe->pipe_used = 0;
@@ -1720,10 +1876,8 @@
 		mdp4_dsi_cmd_base_swap(0, pipe);
 	else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
 		mdp4_lcdc_base_swap(0, pipe);
-#ifdef CONFIG_FB_MSM_DTV
 	else if (ctrl->panel_mode & MDP4_PANEL_DTV)
 		mdp4_dtv_base_swap(0, pipe);
-#endif
 
 	mdp4_overlay_reg_flush(bspipe, 1);
 	/* borderfill pipe as base layer */
@@ -1909,10 +2063,10 @@
 	struct mdp4_overlay_pipe *d_pipe;
 	struct mdp4_overlay_pipe *s_pipe;
 	struct blend_cfg *blend;
-	int i, off, ptype, alpha_drop;
+	int i, off, alpha_drop;
 	int d_alpha, s_alpha;
 	unsigned char *overlay_base;
-	uint32 c0, c1, c2;
+	uint32 c0, c1, c2, base_premulti;
 
 
 	d_pipe = ctrl->stage[mixer][MDP4_MIXER_STAGE_BASE];
@@ -1922,6 +2076,8 @@
 	}
 
 	blend = &ctrl->blend[mixer][MDP4_MIXER_STAGE0];
+	base_premulti = ctrl->blend[mixer][MDP4_MIXER_STAGE_BASE].op &
+		MDP4_BLEND_FG_ALPHA_BG_CONST;
 	for (i = MDP4_MIXER_STAGE0; i < MDP4_MIXER_STAGE_MAX; i++) {
 		blend->solidfill = 0;
 		blend->op = (MDP4_BLEND_FG_ALPHA_FG_CONST |
@@ -1966,7 +2122,9 @@
 		} else if (s_alpha) {
 			if (!alpha_drop) {
 				blend->op = MDP4_BLEND_BG_ALPHA_FG_PIXEL;
-				if (!(s_pipe->flags & MDP_BLEND_FG_PREMULT))
+				if ((!(s_pipe->flags & MDP_BLEND_FG_PREMULT)) &&
+						((i != MDP4_MIXER_STAGE0) ||
+							(!base_premulti)))
 					blend->op |=
 						MDP4_BLEND_FG_ALPHA_FG_PIXEL;
 				else
@@ -1975,21 +2133,17 @@
 			} else
 				blend->op = MDP4_BLEND_BG_ALPHA_FG_CONST;
 		} else if (d_alpha) {
-			ptype = mdp4_overlay_format2type(s_pipe->src_format);
-			if (ptype == OVERLAY_TYPE_VIDEO &&
-				(!(s_pipe->flags & MDP_BACKEND_COMPOSITION))) {
-				blend->op = (MDP4_BLEND_FG_ALPHA_BG_PIXEL |
-					MDP4_BLEND_FG_INV_ALPHA);
-				if (!(s_pipe->flags & MDP_BLEND_FG_PREMULT))
-					blend->op |=
-						MDP4_BLEND_BG_ALPHA_BG_PIXEL;
-				blend->co3_sel = 0; /* use bg alpha */
-			} else {
-				/* s_pipe is rgb without alpha */
-				blend->op = (MDP4_BLEND_FG_ALPHA_FG_CONST |
-					    MDP4_BLEND_BG_ALPHA_BG_CONST);
-				blend->bg_alpha = 0;
-			}
+			blend->op = (MDP4_BLEND_FG_ALPHA_BG_PIXEL |
+				MDP4_BLEND_FG_INV_ALPHA);
+			if ((!(d_pipe->flags & MDP_BLEND_FG_PREMULT)) &&
+					((i != MDP4_MIXER_STAGE0) ||
+						(!base_premulti)))
+				blend->op |=
+					MDP4_BLEND_BG_ALPHA_BG_PIXEL;
+			else
+				blend->fg_alpha = 0xff;
+
+			blend->co3_sel = 0; /* use bg alpha */
 		}
 
 		if (s_pipe->transp != MDP_TRANSP_NOP) {
@@ -2051,7 +2205,9 @@
 		outpdw(overlay_base + off + 0x108, blend->fg_alpha);
 		outpdw(overlay_base + off + 0x10c, blend->bg_alpha);
 
-		if (mdp_rev >= MDP_REV_42)
+		if (mdp_rev >= MDP_REV_42 ||
+			ctrl->panel_mode & MDP4_PANEL_MDDI ||
+			 ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
 			outpdw(overlay_base + off + 0x104, blend->op);
 
 		outpdw(overlay_base + (off << 5) + 0x1004, blend->co3_sel);
@@ -2154,7 +2310,6 @@
 {
 	uint32 ptype, num, ndx, mixer;
 	struct mdp4_iommu_pipe_info iom;
-	struct mdp4_overlay_pipe *orgpipe;
 
 	pr_debug("%s: pipe=%x ndx=%d\n", __func__, (int)pipe, pipe->pipe_ndx);
 
@@ -2163,9 +2318,7 @@
 	ndx = pipe->pipe_ndx;
 	mixer = pipe->mixer_num;
 
-	/* No need for borderfill pipe */
-	if (pipe->pipe_type != OVERLAY_TYPE_BF)
-		mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
+	mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
 
 	iom = pipe->iommu;
 
@@ -2176,11 +2329,6 @@
 	pipe->mixer_num = mixer;
 	pipe->iommu = iom;
 
-	/*Clear real pipe attributes as well */
-	orgpipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
-	if (orgpipe != NULL)
-		orgpipe->pipe_used = 0;
-
 }
 
 static int mdp4_overlay_req2pipe(struct mdp_overlay *req, int mixer,
@@ -2299,6 +2447,18 @@
 		return -ERANGE;
 	}
 
+	if (mdp_rev <= MDP_REV_41) {
+		if ((mdp4_overlay_format2type(req->src.format) ==
+			OVERLAY_TYPE_RGB) &&
+			!(req->flags & MDP_OV_PIPE_SHARE) &&
+			((req->src_rect.w > req->dst_rect.w) ||
+			 (req->src_rect.h > req->dst_rect.h))) {
+			mdp4_stat.err_size++;
+			pr_err("%s: downscale on RGB pipe!\n", __func__);
+			return -EINVAL;
+		}
+	}
+
 	if (mdp_hw_revision == MDP4_REVISION_V1) {
 		/*  non integer down saceling ratio  smaller than 1/4
 		 *  is not supportted
@@ -2444,7 +2604,6 @@
 	u32 hsync = 0;
 	u32 shift = 16;
 	u64 rst;
-	int ptype;
 	int ret = -EINVAL;
 
 	if (!pipe) {
@@ -2456,17 +2615,6 @@
 		return ret;
 	}
 
-	/*
-	 * Serveral special cases require the max mdp clk but cannot
-	 * be explained by mdp clk equation.
-	 */
-	if (pipe->flags & MDP_DEINTERLACE) {
-		pr_info("%s deinterlace requires max mdp clk.\n",
-			__func__);
-		pipe->req_clk = mdp_max_clk;
-		return 0;
-	}
-
 	pr_debug("%s: pipe sets: panel res(x,y)=(%d,%d)\n",
 		 __func__,  mfd->panel_info.xres, mfd->panel_info.yres);
 	pr_debug("%s: src(w,h)(%d,%d),src(x,y)(%d,%d)\n",
@@ -2478,6 +2626,11 @@
 		mfd->panel_info.type == MIPI_CMD_PANEL) ?
 		mfd->panel_info.mipi.dsi_pclk_rate :
 		mfd->panel_info.clk_rate;
+
+	if (mfd->panel_info.type == LVDS_PANEL &&
+		mfd->panel_info.lvds.channel_mode == LVDS_DUAL_CHANNEL_MODE)
+		pclk = pclk << 1;
+
 	if (!pclk) {
 		pipe->req_clk = mdp_max_clk;
 		pr_err("%s panel pixel clk is zero!\n", __func__);
@@ -2510,24 +2663,6 @@
 		return ret;
 	}
 
-	if (pipe->mixer_num == MDP4_MIXER0) {
-		if (pipe->blt_forced)
-			return 0;
-
-		ptype = mdp4_overlay_format2type(pipe->src_format);
-		if (ptype == OVERLAY_TYPE_VIDEO) {
-			if ((pipe->src_h >= 720) && (pipe->src_w >= 1080))  {
-				pipe->req_clk = (u32) mdp_max_clk + 100; 
-				pipe->blt_forced++;
-				return 0;
-			} else if ((pipe->src_h >= 1080) && (pipe->src_w >= 720))  {
-				pipe->req_clk = (u32) mdp_max_clk + 100; 
-				pipe->blt_forced++;
-				return 0;
-			}
-		}
-	}
-
 	/*
 	 * For the scaling cases, make more margin by removing porch
 	 * values and adding extra 20%.
@@ -2624,6 +2759,16 @@
 			__func__);
 	}
 
+	/*
+	 * Interlaced videos require the max mdp clk but cannot
+	 * be explained by mdp clk equation.
+	 */
+	if (pipe->flags & MDP_DEINTERLACE) {
+		rst = (rst > mdp_max_clk) ? rst : mdp_max_clk;
+		pr_info("%s deinterlace requires max mdp clk.\n",
+			__func__);
+	}
+
 	pipe->req_clk = (u32) rst;
 
 	pr_debug("%s: required mdp clk %d mixer %d pipe ndx %d\n",
@@ -2885,7 +3030,8 @@
 		}
 
 		if ((mfd->panel_info.pdest == DISPLAY_1 &&
-		     perf_req->use_ov_blt[0] && !perf_cur->use_ov_blt[0])) {
+		     perf_req->use_ov_blt[0] && !perf_cur->use_ov_blt[0]) ||
+		    dbg_force_ov0_blt) {
 			if (mfd->panel_info.type == LCDC_PANEL ||
 			    mfd->panel_info.type == LVDS_PANEL)
 				mdp4_lcdc_overlay_blt_start(mfd);
@@ -2893,13 +3039,26 @@
 				mdp4_dsi_video_blt_start(mfd);
 			else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
 				mdp4_dsi_cmd_blt_start(mfd);
-			pr_info("%s mixer0 start blt [%d] from %d to %d.\n",
+			else if (ctrl->panel_mode & MDP4_PANEL_MDDI)
+				mdp4_mddi_blt_start(mfd);
+			pr_debug("%s mixer0 start blt [%d] from %d to %d.\n",
 				__func__,
 				flag,
 				perf_cur->use_ov_blt[0],
 				perf_req->use_ov_blt[0]);
 			perf_cur->use_ov_blt[0] = perf_req->use_ov_blt[0];
 		}
+		if ((mfd->panel_info.pdest == DISPLAY_2 &&
+		     perf_req->use_ov_blt[1] && !perf_cur->use_ov_blt[1]) ||
+		    dbg_force_ov1_blt) {
+			mdp4_dtv_overlay_blt_start(mfd);
+			pr_debug("%s mixer1 start blt [%d] from %d to %d.\n",
+				__func__,
+				flag,
+				perf_cur->use_ov_blt[1],
+				perf_req->use_ov_blt[1]);
+			perf_cur->use_ov_blt[1] = perf_req->use_ov_blt[1];
+		}
 	} else {
 		if (perf_req->mdp_clk_rate < perf_cur->mdp_clk_rate) {
 			pr_info("%s mdp clk is changed [%d] from %d to %d\n",
@@ -2928,9 +3087,9 @@
 			perf_cur->mdp_ab_bw = perf_req->mdp_ab_bw;
 			perf_cur->mdp_ib_bw = perf_req->mdp_ib_bw;
 		}
-
 		if ((mfd->panel_info.pdest == DISPLAY_1 &&
-		     !perf_req->use_ov_blt[0] && perf_cur->use_ov_blt[0])) {
+		     !perf_req->use_ov_blt[0] && perf_cur->use_ov_blt[0]) ||
+		    dbg_force_ov0_blt) {
 			if (mfd->panel_info.type == LCDC_PANEL ||
 			    mfd->panel_info.type == LVDS_PANEL)
 				mdp4_lcdc_overlay_blt_stop(mfd);
@@ -2938,13 +3097,26 @@
 				mdp4_dsi_video_blt_stop(mfd);
 			else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
 				mdp4_dsi_cmd_blt_stop(mfd);
-			pr_info("%s mixer0 stop blt [%d] from %d to %d.\n",
+			else if (ctrl->panel_mode & MDP4_PANEL_MDDI)
+				mdp4_mddi_blt_stop(mfd);
+			pr_debug("%s mixer0 stop blt [%d] from %d to %d.\n",
 				__func__,
 				flag,
 				perf_cur->use_ov_blt[0],
 				perf_req->use_ov_blt[0]);
 			perf_cur->use_ov_blt[0] = perf_req->use_ov_blt[0];
 		}
+		if ((mfd->panel_info.pdest == DISPLAY_2 &&
+		     !perf_req->use_ov_blt[1] && perf_cur->use_ov_blt[1]) ||
+		    dbg_force_ov1_blt) {
+			mdp4_dtv_overlay_blt_stop(mfd);
+			pr_debug("%s mixer1 stop blt [%d] from %d to %d.\n",
+				__func__,
+				flag,
+				perf_cur->use_ov_blt[1],
+				perf_req->use_ov_blt[1]);
+			perf_cur->use_ov_blt[1] = perf_req->use_ov_blt[1];
+		}
 	}
 	return;
 }
@@ -2956,9 +3128,6 @@
 {
 	struct file *file;
 	int put_needed, ret = 0, fb_num;
-#ifdef CONFIG_ANDROID_PMEM
-	unsigned long vstart;
-#endif
 	*p_need = 0;
 
 	if (img->flags & MDP_BLIT_SRC_GEM) {
@@ -2993,13 +3162,6 @@
 	return mdp4_overlay_iommu_map_buf(img->memory_id, pipe, plane,
 		start, len, srcp_ihdl);
 #endif
-#ifdef CONFIG_ANDROID_PMEM
-	if (!get_pmem_file(img->memory_id, start, &vstart,
-					    len, srcp_file))
-		return 0;
-	else
-		return -EINVAL;
-#endif
 }
 
 #ifdef CONFIG_FB_MSM_MIPI_DSI
@@ -3111,10 +3273,10 @@
 		pr_debug("pipe->flags 0x%x\n", pipe->flags);
 		if (pipe->flags & MDP_SECURE_OVERLAY_SESSION) {
 			mfd->mem_hid &= ~BIT(ION_IOMMU_HEAP_ID);
-			mfd->mem_hid |= ION_SECURE;
+			mfd->mem_hid |= ION_FLAG_SECURE;
 		} else {
 			mfd->mem_hid |= BIT(ION_IOMMU_HEAP_ID);
-			mfd->mem_hid &= ~ION_SECURE;
+			mfd->mem_hid &= ~ION_FLAG_SECURE;
 		}
 	}
 
@@ -3129,9 +3291,6 @@
 								__func__);
 	}
 
-	if (hdmi_prim_display)
-		fill_black_screen(FALSE, pipe->pipe_num, pipe->mixer_num);
-
 	mdp4_overlay_mdp_pipe_req(pipe, mfd);
 
 	mutex_unlock(&mfd->dma->ov_mutex);
@@ -3142,6 +3301,7 @@
 int mdp4_overlay_unset_mixer(int mixer)
 {
 	struct mdp4_overlay_pipe *pipe;
+	struct mdp4_overlay_pipe *orgpipe;
 	int i, cnt = 0;
 
 	/* free pipe besides base layer pipe */
@@ -3149,11 +3309,14 @@
 		pipe = ctrl->stage[mixer][i];
 		if (pipe == NULL)
 			continue;
-
 		pipe->flags &= ~MDP_OV_PLAY_NOWAIT;
 		mdp4_overlay_reg_flush(pipe, 1);
 		mdp4_mixer_stage_down(pipe, 1);
 		mdp4_overlay_pipe_free(pipe);
+		/*Clear real pipe attributes as well */
+		orgpipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
+		if (orgpipe != NULL)
+			orgpipe->pipe_used = 0;
 		cnt++;
 	}
 
@@ -3164,7 +3327,6 @@
 {
 	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
 	struct mdp4_overlay_pipe *pipe;
-	int i, mixer;
 
 	if (mfd == NULL)
 		return -ENODEV;
@@ -3185,8 +3347,6 @@
 		return 0;
 	}
 
-	mixer = pipe->mixer_num;
-
 	if (pipe->mixer_num == MDP4_MIXER2)
 		ctrl->mixer2_played = 0;
 	else if (pipe->mixer_num == MDP4_MIXER1)
@@ -3194,33 +3354,23 @@
 	else {
 		/* mixer 0 */
 		ctrl->mixer0_played = 0;
-		if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
-			if (mfd->panel_power_on)
-				mdp4_mddi_blt_dmap_busy_wait(mfd);
-		}
+
 	}
 
 	mdp4_overlay_reg_flush(pipe, 1);
 	mdp4_mixer_stage_down(pipe, 0);
 
-	if (pipe->blt_forced) {
-		if (pipe->flags & MDP_SECURE_OVERLAY_SESSION) {
-			pipe->blt_forced = 0;
-			pipe->req_clk = 0;
-			mdp4_overlay_mdp_perf_req(mfd, ctrl->plist);
-		}
-	}
-
 	if (pipe->mixer_num == MDP4_MIXER0) {
-		if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
-			if (mfd->panel_power_on)
-				mdp4_mddi_overlay_restore();
-		}
+
 	} else {	/* mixer1, DTV, ATV */
 		if (ctrl->panel_mode & MDP4_PANEL_DTV) {
-			if (hdmi_prim_display)
-				fill_black_screen(TRUE, pipe->pipe_num,
-					pipe->mixer_num);
+			if (hdmi_prim_display) {
+				struct mdp4_overlay_pipe *pp;
+				pp = &ctrl->sf_plist[pipe->mixer_num]
+					[pipe->pipe_ndx - 1];
+				*pp = *pipe; /* clone it */
+				pp->solid_fill = 1;
+			}
 			mdp4_overlay_dtv_unset(mfd, pipe);
 		}
 	}
@@ -3229,37 +3379,24 @@
 
 	mdp4_overlay_pipe_free(pipe);
 
-	/* mdp4_mixer_stage_down will remove pipe for mixer 1 and 2*/
-	if (mixer > MDP4_MIXER0 && !hdmi_prim_display) {
-		for (i = MDP4_MIXER_STAGE_BASE; i < MDP4_MIXER_STAGE_MAX; i++) {
-			pipe = ctrl->stage[mixer][i];
-			if (pipe && pipe->pipe_type != OVERLAY_TYPE_BF)
-				break;
-		}
-		/* only BF pipe is connected */
-		if (i == MDP4_MIXER_STAGE_MAX) {
-			/* make sure the operation has finished.*/
-			msleep(20);
-			msm_fb_release_timeline(mfd);
-		}
-	}
-
 	mutex_unlock(&mfd->dma->ov_mutex);
 
 	return 0;
 }
 
-int mdp4_overlay_wait4vsync(struct fb_info *info)
+int mdp4_overlay_wait4vsync(struct fb_info *info, long long *vtime)
 {
 	if (!hdmi_prim_display && info->node == 0) {
 		if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
-			mdp4_dsi_video_wait4vsync(0);
+			mdp4_dsi_video_wait4vsync(0, vtime);
 		else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
-			mdp4_dsi_cmd_wait4vsync(0);
+			mdp4_dsi_cmd_wait4vsync(0, vtime);
 		else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
-			mdp4_lcdc_wait4vsync(0);
+			mdp4_lcdc_wait4vsync(0, vtime);
+		else if (ctrl->panel_mode & MDP4_PANEL_MDDI)
+			mdp4_mddi_wait4vsync(0, vtime);
 	} else if (hdmi_prim_display || info->node == 1) {
-		mdp4_dtv_wait4vsync(0);
+		mdp4_dtv_wait4vsync(0, vtime);
 	}
 
 	return 0;
@@ -3281,6 +3418,8 @@
 			mdp4_dsi_cmd_vsync_ctrl(info, cmd);
 		else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
 			mdp4_lcdc_vsync_ctrl(info, cmd);
+		else if (ctrl->panel_mode & MDP4_PANEL_MDDI)
+			mdp4_mddi_vsync_ctrl(info, cmd);
 	} else if (hdmi_prim_display || info->node == 1)
 		mdp4_dtv_vsync_ctrl(info, cmd);
 
@@ -3373,6 +3512,9 @@
 	if (mfd == NULL)
 		return -ENODEV;
 
+	if (!mfd->panel_power_on) /* suspended */
+		return -EPERM;
+
 	pipe = mdp4_overlay_ndx2pipe(req->id);
 	if (pipe == NULL) {
 		mdp4_stat.err_play++;
@@ -3432,7 +3574,9 @@
 		}
 		pipe->srcp0_ystride = pipe->src_width;
 		if ((pipe->src_format == MDP_Y_CRCB_H1V1) ||
-			(pipe->src_format == MDP_Y_CBCR_H1V1)) {
+			(pipe->src_format == MDP_Y_CBCR_H1V1) ||
+			(pipe->src_format == MDP_Y_CRCB_H1V2) ||
+			(pipe->src_format == MDP_Y_CBCR_H1V2)) {
 			if (pipe->src_width > YUV_444_MAX_WIDTH)
 				pipe->srcp1_ystride = pipe->src_width << 2;
 			else
@@ -3508,15 +3652,24 @@
 			/* cndx = 0 */
 			mdp4_lcdc_pipe_queue(0, pipe);
 		}
+		if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
+			/* cndx = 0 */
+			mdp4_mddi_pipe_queue(0, pipe);
+		}
 	} else if (pipe->mixer_num == MDP4_MIXER1) {
-		if (ctrl->panel_mode & MDP4_PANEL_DTV)
+		if (ctrl->panel_mode & MDP4_PANEL_DTV) {
 			mdp4_dtv_pipe_queue(0, pipe);/* cndx = 0 */
+			mdp4_dtv_set_avparams(pipe, img->memory_id);
+		}
 	} else if (pipe->mixer_num == MDP4_MIXER2) {
 		ctrl->mixer2_played++;
 		if (ctrl->panel_mode & MDP4_PANEL_WRITEBACK)
 			mdp4_wfd_pipe_queue(0, pipe);/* cndx = 0 */
 	}
 
+	mutex_unlock(&mfd->dma->ov_mutex);
+	return ret;
+
 end:
 	mutex_unlock(&mfd->dma->ov_mutex);
 
@@ -3527,7 +3680,6 @@
 {
 	int ret = 0;
 	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
-	int mixer;
 
 	if (mfd == NULL)
 		return -ENODEV;
@@ -3535,8 +3687,6 @@
 	if (!mfd->panel_power_on) /* suspended */
 		return -EINVAL;
 
-	mixer = mfd->panel_info.pdest;	/* DISPLAY_1 or DISPLAY_2 */
-
 	mutex_lock(&mfd->dma->ov_mutex);
 
 	mdp4_overlay_mdp_perf_upd(mfd, 1);
@@ -3550,7 +3700,6 @@
 	case MIPI_VIDEO_PANEL:
 		mdp4_dsi_video_pipe_commit(0, 1);
 		break;
-	case LVDS_PANEL:
 	case LCDC_PANEL:
 		mdp4_lcdc_pipe_commit(0, 1);
 		break;
@@ -3784,9 +3933,58 @@
 	mutex_unlock(&mfd->dma->ov_mutex);
 	return err;
 }
-int mdp4_overlay_reset()
+
+int mdp4_update_writeback_format(struct msm_fb_data_type *mfd,
+				struct mdp_mixer_cfg *mdp_mixer_cfg)
 {
-	memset(&perf_request, 0, sizeof(perf_request));
-	memset(&perf_current, 0, sizeof(perf_current));
-	return 0;
+	int ret = 0;
+	u32 mixer_num;
+	struct mdp_mixer_cfg *mixer;
+
+	mixer_num = mdp4_get_mixer_num(mfd->panel_info.type);
+	if (!ctrl) {
+		pr_warn("mdp4_overlay_ctrl is NULL\n");
+		return -EPERM;
+	}
+	mixer = &ctrl->mdp_mixer_cfg[mixer_num];
+
+	switch (mdp_mixer_cfg->writeback_format) {
+	case WB_FORMAT_RGB_888:
+	case WB_FORMAT_RGB_565:
+	case WB_FORMAT_NV12:
+	case WB_FORMAT_xRGB_8888:
+	case WB_FORMAT_ARGB_8888:
+		mixer->writeback_format = mdp_mixer_cfg->writeback_format;
+		break;
+	case WB_FORMAT_ARGB_8888_INPUT_ALPHA:
+		mixer->writeback_format = mdp_mixer_cfg->writeback_format;
+		mixer->alpha = mdp_mixer_cfg->alpha;
+		break;
+	default:
+		mixer->writeback_format = WB_FORMAT_NV12;
+		pr_warn("Unsupported format request, setting to NV12\n");
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+int mdp4_update_base_blend(struct msm_fb_data_type *mfd,
+			struct mdp_blend_cfg *mdp_blend_cfg)
+{
+	int ret = 0;
+	u32 mixer_num;
+	struct blend_cfg *blend;
+	mixer_num = mdp4_get_mixer_num(mfd->panel_info.type);
+	if (!ctrl)
+		return -EPERM;
+	blend = &ctrl->blend[mixer_num][MDP4_MIXER_STAGE_BASE];
+	if (mdp_blend_cfg->is_premultiplied) {
+		blend->bg_alpha = 0xFF;
+		blend->op = MDP4_BLEND_FG_ALPHA_BG_CONST;
+	} else {
+		blend->op = MDP4_BLEND_FG_ALPHA_FG_PIXEL;
+		blend->bg_alpha = 0;
+	}
+	return ret;
 }
diff --git a/drivers/video/msm/mdp4_overlay_dsi_cmd.c b/drivers/video/msm/mdp4_overlay_dsi_cmd.c
index 7711f48..5554d88 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_cmd.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_cmd.c
@@ -250,23 +250,7 @@
 	mdp4_stat.overlay_play[pipe->mixer_num]++;
 }
 
-static void mdp4_dsi_cmd_pipe_clean(struct vsync_update *vp)
-{
-	struct mdp4_overlay_pipe *pipe;
-	int i;
-
-	pipe = vp->plist;
-	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
-		if (pipe->pipe_used) {
-			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
-			pipe->pipe_used = 0; /* clear */
-		}
-	}
-	vp->update_cnt = 0;     /* empty queue */
-}
-
 static void mdp4_dsi_cmd_blt_ov_update(struct mdp4_overlay_pipe *pipe);
-static int mdp4_dsi_cmd_clk_check(struct vsycn_ctrl *vctrl);
 
 int mdp4_dsi_cmd_pipe_commit(int cndx, int wait)
 {
@@ -291,11 +275,10 @@
 
 	mdp_update_pm(vctrl->mfd, vctrl->vsync_time);
 
-	/*
-	 * allow stage_commit without pipes queued
-	 * (vp->update_cnt == 0) to unstage pipes after
-	 * overlay_unset
-	 */
+	if (vp->update_cnt == 0) {
+		mutex_unlock(&vctrl->update_lock);
+		return cnt;
+	}
 
 	vctrl->update_ndx++;
 	vctrl->update_ndx &= 0x01;
@@ -307,9 +290,6 @@
 	}
 	mutex_unlock(&vctrl->update_lock);
 
-	if (mdp4_dsi_cmd_clk_check(vctrl) < 0)
-		return 0;
-
 	/* free previous committed iommu back to pool */
 	mdp4_overlay_iommu_unmap_freelist(mixer);
 
@@ -403,8 +383,11 @@
 
 	mdp4_stat.overlay_commit[pipe->mixer_num]++;
 
-	if (wait)
-		mdp4_dsi_cmd_wait4vsync(0);
+	if (wait) {
+		long long tick;
+
+		mdp4_dsi_cmd_wait4vsync(cndx, &tick);
+	}
 
 	return cnt;
 }
@@ -456,7 +439,7 @@
 	mutex_unlock(&vctrl->update_lock);
 }
 
-void mdp4_dsi_cmd_wait4vsync(int cndx)
+void mdp4_dsi_cmd_wait4vsync(int cndx, long long *vtime)
 {
 	struct vsycn_ctrl *vctrl;
 	struct mdp4_overlay_pipe *pipe;
@@ -470,8 +453,10 @@
 	vctrl = &vsync_ctrl_db[cndx];
 	pipe = vctrl->base_pipe;
 
-	if (atomic_read(&vctrl->suspend) > 0)
+	if (atomic_read(&vctrl->suspend) > 0) {
+		*vtime = -1;
 		return;
+	}
 
 	spin_lock_irqsave(&vctrl->spin_lock, flags);
 	if (vctrl->wait_vsync_cnt == 0)
@@ -481,6 +466,8 @@
 
 	wait_for_completion(&vctrl->vsync_comp);
 	mdp4_stat.wait4vsync0++;
+
+	*vtime = ktime_to_ns(vctrl->vsync_time);
 }
 
 static void mdp4_dsi_cmd_wait4dmap(int cndx)
@@ -566,6 +553,8 @@
 
 	vctrl = &vsync_ctrl_db[cndx];
 	pipe = vctrl->base_pipe;
+	if (pipe == NULL)
+		return;
 
 	 /* blt enabled */
 	spin_lock(&vctrl->spin_lock);
@@ -609,6 +598,8 @@
 
 	vctrl = &vsync_ctrl_db[cndx];
 	pipe = vctrl->base_pipe;
+	if (pipe == NULL)
+		return;
 
 	spin_lock(&vctrl->spin_lock);
 	vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
@@ -696,7 +687,10 @@
 		msecs_to_jiffies(VSYNC_PERIOD * 4));
 	if (ret <= 0) {
 		vctrl->wait_vsync_cnt = 0;
-		vctrl->vsync_time = ktime_get();
+		vsync_tick = ktime_to_ns(ktime_get());
+		ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick);
+		buf[strlen(buf) + 1] = '\0';
+		return ret;
 	}
 
 	spin_lock_irqsave(&vctrl->spin_lock, flags);
@@ -704,6 +698,8 @@
 	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
 
 	ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick);
+	pr_debug("%s: UEVENT\n", __func__);
+
 	buf[strlen(buf) + 1] = '\0';
 	return ret;
 }
@@ -1025,7 +1021,6 @@
 	atomic_set(&vctrl->suspend, 0);
 
 	pr_debug("%s-:\n", __func__);
-
 	return ret;
 }
 
@@ -1088,15 +1083,8 @@
 	/* sanity check, free pipes besides base layer */
 	mdp4_overlay_unset_mixer(pipe->mixer_num);
 	mdp4_mixer_stage_down(pipe, 1);
-	if (mfd->ref_cnt == 0) {
-		mdp4_overlay_pipe_free(pipe);
-		vctrl->base_pipe = NULL;
-	}
-
-	if (vctrl->vsync_enabled) {
-		vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM);
-		vctrl->vsync_enabled = 0;
-	}
+	mdp4_overlay_pipe_free(pipe);
+	vctrl->base_pipe = NULL;
 
 	undx =  vctrl->update_ndx;
 	vp = &vctrl->vlist[undx];
@@ -1105,8 +1093,7 @@
 		 * pipe's iommu will be freed at next overlay play
 		 * and iommu_drop statistic will be increased by one
 		 */
-		pr_warn("%s: update_cnt=%d\n", __func__, vp->update_cnt);
-		mdp4_dsi_cmd_pipe_clean(vp);
+		vp->update_cnt = 0;     /* empty queue */
 	}
 
 	pr_debug("%s-:\n", __func__);
@@ -1138,16 +1125,35 @@
 	}
 }
 
-static int mdp4_dsi_cmd_clk_check(struct vsycn_ctrl *vctrl)
+void mdp4_dsi_cmd_overlay(struct msm_fb_data_type *mfd)
 {
-	int clk_set_on = 0;
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
 	unsigned long flags;
+	int clk_set_on = 0;
+
+	mutex_lock(&mfd->dma->ov_mutex);
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (!mfd->panel_power_on) {
+		mutex_unlock(&mfd->dma->ov_mutex);
+		return;
+	}
+
+	pipe = vctrl->base_pipe;
+	if (pipe == NULL) {
+		pr_err("%s: NO base pipe\n", __func__);
+		mutex_unlock(&mfd->dma->ov_mutex);
+		return;
+	}
 
 	mutex_lock(&vctrl->update_lock);
 	if (atomic_read(&vctrl->suspend)) {
 		mutex_unlock(&vctrl->update_lock);
+		mutex_unlock(&mfd->dma->ov_mutex);
 		pr_err("%s: suspended, no more pan display\n", __func__);
-		return -EPERM;
+		return;
 	}
 
 	spin_lock_irqsave(&vctrl->spin_lock, flags);
@@ -1169,35 +1175,6 @@
 
 	mutex_unlock(&vctrl->update_lock);
 
-	return 0;
-}
-
-void mdp4_dsi_cmd_overlay(struct msm_fb_data_type *mfd)
-{
-	int cndx = 0;
-	struct vsycn_ctrl *vctrl;
-	struct mdp4_overlay_pipe *pipe;
-
-	mutex_lock(&mfd->dma->ov_mutex);
-	vctrl = &vsync_ctrl_db[cndx];
-
-	if (!mfd->panel_power_on) {
-		mutex_unlock(&mfd->dma->ov_mutex);
-		return;
-	}
-
-	pipe = vctrl->base_pipe;
-	if (pipe == NULL) {
-		pr_err("%s: NO base pipe\n", __func__);
-		mutex_unlock(&mfd->dma->ov_mutex);
-		return;
-	}
-
-	if (mdp4_dsi_cmd_clk_check(vctrl) < 0) {
-		mutex_unlock(&mfd->dma->ov_mutex);
-		return;
-	}
-
 	if (pipe->mixer_stage == MDP4_MIXER_STAGE_BASE) {
 		mdp4_mipi_vsync_enable(mfd, pipe, 0);
 		mdp4_overlay_setup_pipe_addr(mfd, pipe);
@@ -1208,5 +1185,4 @@
 	mdp4_dsi_cmd_pipe_commit(cndx, 0);
 	mdp4_overlay_mdp_perf_upd(mfd, 0);
 	mutex_unlock(&mfd->dma->ov_mutex);
-
 }
diff --git a/drivers/video/msm/mdp4_overlay_dsi_video.c b/drivers/video/msm/mdp4_overlay_dsi_video.c
index cff788b..72a53fc 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_video.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_video.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -56,7 +56,8 @@
 	int wait_vsync_cnt;
 	int blt_change;
 	int blt_free;
-	int blt_ctrl;
+	u32 blt_ctrl;
+	u32 blt_mode;
 	int sysfs_created;
 	struct mutex update_lock;
 	struct completion ov_comp;
@@ -144,21 +145,6 @@
 	mdp4_stat.overlay_play[pipe->mixer_num]++;
 }
 
-static void mdp4_dsi_video_pipe_clean(struct vsync_update *vp)
-{
-	struct mdp4_overlay_pipe *pipe;
-	int i;
-
-	pipe = vp->plist;
-	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
-		if (pipe->pipe_used) {
-			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
-			pipe->pipe_used = 0; /* clear */
-		}
-	}
-	vp->update_cnt = 0;     /* empty queue */
-}
-
 static void mdp4_dsi_video_blt_ov_update(struct mdp4_overlay_pipe *pipe);
 static void mdp4_dsi_video_wait4dmap(int cndx);
 static void mdp4_dsi_video_wait4ov(int cndx);
@@ -185,11 +171,10 @@
 
 	mdp_update_pm(vctrl->mfd, vctrl->vsync_time);
 
-	/*
-	 * allow stage_commit without pipes queued
-	 * (vp->update_cnt == 0) to unstage pipes after
-	 * overlay_unset
-	 */
+	if (vp->update_cnt == 0) {
+		mutex_unlock(&vctrl->update_lock);
+		return cnt;
+	}
 
 	vctrl->update_ndx++;
 	vctrl->update_ndx &= 0x01;
@@ -213,18 +198,6 @@
 	}
 	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
 
-	if (vctrl->blt_change) {
-		pipe = vctrl->base_pipe;
-		spin_lock_irqsave(&vctrl->spin_lock, flags);
-		INIT_COMPLETION(vctrl->dmap_comp);
-		INIT_COMPLETION(vctrl->ov_comp);
-		vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
-		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
-		mdp4_dsi_video_wait4dmap(0);
-		if (pipe->ov_blt_addr)
-			mdp4_dsi_video_wait4ov(0);
-	}
-
 	pipe = vp->plist;
 
 	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
@@ -272,39 +245,14 @@
 
 	if (wait) {
 		if (pipe->ov_blt_addr)
-			mdp4_dsi_video_wait4ov(0);
+			mdp4_dsi_video_wait4ov(cndx);
 		else
-			mdp4_dsi_video_wait4dmap(0);
+			mdp4_dsi_video_wait4dmap(cndx);
 	}
 
 	return cnt;
 }
 
-static void mdp4_video_vsync_irq_ctrl(int cndx, int enable)
-{
-	struct vsycn_ctrl *vctrl;
-	static int vsync_irq_cnt;
-
-	vctrl = &vsync_ctrl_db[cndx];
-
-	mutex_lock(&vctrl->update_lock);
-	if (enable) {
-		if (vsync_irq_cnt == 0)
-			vsync_irq_enable(INTR_PRIMARY_VSYNC,
-						MDP_PRIM_VSYNC_TERM);
-		vsync_irq_cnt++;
-	} else {
-		if (vsync_irq_cnt) {
-			vsync_irq_cnt--;
-			if (vsync_irq_cnt == 0)
-				vsync_irq_disable(INTR_PRIMARY_VSYNC,
-						MDP_PRIM_VSYNC_TERM);
-		}
-	}
-	pr_debug("%s: enable=%d cnt=%d\n", __func__, enable, vsync_irq_cnt);
-	mutex_unlock(&vctrl->update_lock);
-}
-
 void mdp4_dsi_video_vsync_ctrl(struct fb_info *info, int enable)
 {
 	struct vsycn_ctrl *vctrl;
@@ -319,13 +267,16 @@
 
 	vctrl->vsync_irq_enabled = enable;
 
-	mdp4_video_vsync_irq_ctrl(cndx, enable);
+	if (enable)
+		vsync_irq_enable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
+	else
+		vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
 
 	if (vctrl->vsync_irq_enabled &&  atomic_read(&vctrl->suspend) == 0)
 		atomic_set(&vctrl->vsync_resume, 1);
 }
 
-void mdp4_dsi_video_wait4vsync(int cndx)
+void mdp4_dsi_video_wait4vsync(int cndx, long long *vtime)
 {
 	struct vsycn_ctrl *vctrl;
 	struct mdp4_overlay_pipe *pipe;
@@ -339,10 +290,13 @@
 	vctrl = &vsync_ctrl_db[cndx];
 	pipe = vctrl->base_pipe;
 
-	if (atomic_read(&vctrl->suspend) > 0)
+	if (atomic_read(&vctrl->suspend) > 0) {
+		*vtime = -1;
 		return;
+	}
 
-	mdp4_video_vsync_irq_ctrl(cndx, 1);
+	/* start timing generator & mmu if they are not started yet */
+	mdp4_overlay_dsi_video_start();
 
 	spin_lock_irqsave(&vctrl->spin_lock, flags);
 	if (vctrl->wait_vsync_cnt == 0)
@@ -352,8 +306,9 @@
 	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
 
 	wait_for_completion(&vctrl->vsync_comp);
-	mdp4_video_vsync_irq_ctrl(cndx, 0);
 	mdp4_stat.wait4vsync0++;
+
+	*vtime = ktime_to_ns(vctrl->vsync_time);
 }
 
 static void mdp4_dsi_video_wait4dmap(int cndx)
@@ -435,7 +390,10 @@
 		msecs_to_jiffies(VSYNC_PERIOD * 4));
 	if (ret <= 0) {
 		vctrl->wait_vsync_cnt = 0;
-		vctrl->vsync_time = ktime_get();
+		vsync_tick = ktime_to_ns(ktime_get());
+		ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick);
+		buf[strlen(buf) + 1] = '\0';
+		return ret;
 	}
 
 	spin_lock_irqsave(&vctrl->spin_lock, flags);
@@ -486,33 +444,6 @@
 	vctrl->base_pipe = pipe;
 }
 
-/* timing generator off */
-static void mdp4_dsi_video_tg_off(struct vsycn_ctrl *vctrl)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&vctrl->spin_lock, flags);
-	INIT_COMPLETION(vctrl->vsync_comp);
-	vctrl->wait_vsync_cnt++;
-	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0); /* turn off timing generator */
-	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
-
-	mdp4_dsi_video_wait4vsync(0);
-}
-
-int mdp4_dsi_video_splash_done(void)
-{
-	struct vsycn_ctrl *vctrl;
-	int cndx = 0;
-
-	vctrl = &vsync_ctrl_db[cndx];
-
-	mdp4_dsi_video_tg_off(vctrl);
-	mipi_dsi_controller_cfg(0);
-
-	return 0;
-}
-
 int mdp4_dsi_video_on(struct platform_device *pdev)
 {
 	int dsi_width;
@@ -559,7 +490,6 @@
 
 	vctrl = &vsync_ctrl_db[cndx];
 	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
-	pinfo = &mfd->panel_info;
 
 	if (!mfd)
 		return -ENODEV;
@@ -569,7 +499,9 @@
 
 	vctrl->mfd = mfd;
 	vctrl->dev = mfd->fbi->dev;
+	pinfo = &mfd->panel_info;
 	vctrl->blt_ctrl = pinfo->lcd.blt_ctrl;
+	vctrl->blt_mode = pinfo->lcd.blt_mode;
 
 	/* mdp clock on */
 	mdp_clk_ctrl(1);
@@ -608,7 +540,16 @@
 		pipe = vctrl->base_pipe;
 	}
 
-	atomic_set(&vctrl->suspend, 0);
+	if (!(mfd->cont_splash_done)) {
+		mfd->cont_splash_done = 1;
+		mdp4_dsi_video_wait4dmap_done(0);
+		MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0);
+		dsi_video_enabled = 0;
+		mipi_dsi_controller_cfg(0);
+		/* Clks are enabled in probe.
+		   Disabling clocks now */
+		mdp_clk_ctrl(0);
+	}
 
 	pipe->src_height = fbi->var.yres;
 	pipe->src_width = fbi->var.xres;
@@ -632,6 +573,8 @@
 	mdp4_overlay_mdp_pipe_req(pipe, mfd);
 	mdp4_calc_blt_mdp_bw(mfd, pipe);
 
+	atomic_set(&vctrl->suspend, 0);
+
 	mdp4_overlay_dmap_xy(pipe);	/* dma_p */
 	mdp4_overlay_dmap_cfg(mfd, 1);
 	mdp4_overlay_rgb_setup(pipe);
@@ -719,7 +662,6 @@
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 
 	mdp_histogram_ctrl_all(TRUE);
-	mdp4_overlay_dsi_video_start();
 
 	return ret;
 }
@@ -739,10 +681,11 @@
 	vctrl = &vsync_ctrl_db[cndx];
 	pipe = vctrl->base_pipe;
 
-	mdp4_dsi_video_wait4vsync(cndx);
-
+	atomic_set(&vctrl->suspend, 1);
 	atomic_set(&vctrl->vsync_resume, 0);
 
+	msleep(20);	/* >= 17 ms */
+
 	complete_all(&vctrl->vsync_comp);
 
 	if (pipe->ov_blt_addr) {
@@ -756,8 +699,15 @@
 
 	mdp_histogram_ctrl_all(FALSE);
 
+	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0);
+
 	dsi_video_enabled = 0;
 
+	if (vctrl->vsync_irq_enabled) {
+		vctrl->vsync_irq_enabled = 0;
+		vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
+	}
+
 	undx =  vctrl->update_ndx;
 	vp = &vctrl->vlist[undx];
 	if (vp->update_cnt) {
@@ -765,8 +715,7 @@
 		 * pipe's iommu will be freed at next overlay play
 		 * and iommu_drop statistic will be increased by one
 		 */
-		pr_warn("%s: update_cnt=%d\n", __func__, vp->update_cnt);
-		mdp4_dsi_video_pipe_clean(vp);
+		vp->update_cnt = 0;     /* empty queue */
 	}
 
 	if (pipe) {
@@ -790,15 +739,6 @@
 		}
 	}
 
-	mdp4_dsi_video_tg_off(vctrl);
-
-	atomic_set(&vctrl->suspend, 1);
-
-	if (vctrl->vsync_irq_enabled) {
-		vctrl->vsync_irq_enabled = 0;
-		vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
-	}
-
 	/* mdp clock off */
 	mdp_clk_ctrl(0);
 	mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
@@ -987,32 +927,16 @@
 	}
 	vctrl = &vsync_ctrl_db[cndx];
 	pipe = vctrl->base_pipe;
+	if (pipe == NULL)
+		return;
 
 	spin_lock(&vctrl->spin_lock);
 	vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
-
-	if (pipe == NULL) {
-		spin_unlock(&vctrl->spin_lock);
-		return;
-	}
-
 	if (vctrl->blt_change) {
 		mdp4_overlayproc_cfg(pipe);
 		mdp4_overlay_dmap_xy(pipe);
-		if (pipe->ov_blt_addr) {
-			mdp4_dsi_video_blt_ov_update(pipe);
-			pipe->ov_cnt++;
-			/* Prefill one frame */
-			vsync_irq_enable(INTR_OVERLAY0_DONE,
-						MDP_OVERLAY0_TERM);
-			/* kickoff overlay0 engine */
-			mdp4_stat.kickoff_ov0++;
-			vctrl->ov_koff++;	/* make up for prefill */
-			outpdw(MDP_BASE + 0x0004, 0);
-		}
 		vctrl->blt_change = 0;
 	}
-
 	complete_all(&vctrl->dmap_comp);
 	mdp4_overlay_dma_commit(cndx);
 	spin_unlock(&vctrl->spin_lock);
@@ -1028,17 +952,13 @@
 
 	vctrl = &vsync_ctrl_db[cndx];
 	pipe = vctrl->base_pipe;
+	if (pipe == NULL)
+		return;
 
 	spin_lock(&vctrl->spin_lock);
 	vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
 	vctrl->ov_done++;
 	complete_all(&vctrl->ov_comp);
-
-	if (pipe == NULL) {
-		spin_unlock(&vctrl->spin_lock);
-		return;
-	}
-
 	if (pipe->ov_blt_addr == 0) {
 		spin_unlock(&vctrl->spin_lock);
 		return;
@@ -1059,19 +979,44 @@
 	int cndx = 0;
 	struct vsycn_ctrl *vctrl;
 	struct mdp4_overlay_pipe *pipe;
+	long long vtime;
+	u32 mode, ctrl;
 
 	vctrl = &vsync_ctrl_db[cndx];
 	pipe = vctrl->base_pipe;
 
-	mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
+	mode = (dbg_force_ov0_blt & 0x0f) ?
+		(dbg_force_ov0_blt & 0x0f) : vctrl->blt_mode;
+	ctrl = (dbg_force_ov0_blt >> 4) ?
+		(dbg_force_ov0_blt >> 4) : vctrl->blt_ctrl;
 
-	if (mfd->ov0_wb_buf->write_addr == 0) {
-		pr_info("%s: no blt_base assigned\n", __func__);
+	pr_debug("%s: mode=%d, enable=%d ov_blt_addr=%x\n",
+		 __func__, mode, enable, (int)pipe->ov_blt_addr);
+
+	if ((mode == MDP4_OVERLAY_MODE_BLT_ALWAYS_OFF) &&
+	    !pipe->ov_blt_addr)
 		return;
+	else if ((mode == MDP4_OVERLAY_MODE_BLT_ALWAYS_ON) &&
+	    pipe->ov_blt_addr)
+		return;
+	else if (enable && pipe->ov_blt_addr)
+		return;
+	else if (!enable && !pipe->ov_blt_addr)
+		return;
+
+	if (pipe->ov_blt_addr == 0) {
+		mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
+		if (mfd->ov0_wb_buf->write_addr == 0) {
+			pr_warning("%s: no blt_base assigned\n", __func__);
+			return;
+		}
 	}
 
+	pr_debug("%s: mode=%d, enable=%d ov_blt_addr=%x\n",
+		 __func__, mode, enable, (int)pipe->ov_blt_addr);
+
 	spin_lock_irqsave(&vctrl->spin_lock, flag);
-	if (enable && pipe->ov_blt_addr == 0) {
+	if (pipe->ov_blt_addr == 0) {
 		pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
 		pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
 		pipe->ov_cnt = 0;
@@ -1080,48 +1025,57 @@
 		vctrl->ov_done = 0;
 		vctrl->blt_free = 0;
 		mdp4_stat.blt_dsi_video++;
-		vctrl->blt_change++;
-	} else if (enable == 0 && pipe->ov_blt_addr) {
+	} else {
 		pipe->ov_blt_addr = 0;
 		pipe->dma_blt_addr =  0;
 		vctrl->blt_free = 4;	/* 4 commits to free wb buf */
-		vctrl->blt_change++;
-	}
-
-	pr_info("%s: changed=%d enable=%d ov_blt_addr=%x\n", __func__,
-		vctrl->blt_change, enable, (int)pipe->ov_blt_addr);
-
-	if (!vctrl->blt_change) {
-		spin_unlock_irqrestore(&vctrl->spin_lock, flag);
-		return;
 	}
 	spin_unlock_irqrestore(&vctrl->spin_lock, flag);
 
-	if (vctrl->blt_ctrl == OVERLAY_BLT_SWITCH_TG_OFF) {
-		int tg_enabled;
-
-		pr_info("%s: blt enabled by switching TG off\n", __func__);
-		vctrl->blt_change = 0;
-		tg_enabled = inpdw(MDP_BASE + DSI_VIDEO_BASE) & 0x01;
-		if (tg_enabled) {
-			mdp4_dsi_video_wait4vsync(cndx);
+	if (ctrl == MDP4_OVERLAY_BLT_SWITCH_TG_ON) {
+		spin_lock_irqsave(&vctrl->spin_lock, flag);
+		if (!dsi_video_enabled) {
+			pr_debug("%s: blt switched not in ISR dsi_video_enabled=%d\n",
+				__func__, dsi_video_enabled);
+			mdp4_overlayproc_cfg(pipe);
+			mdp4_overlay_dmap_xy(pipe);
+		} else {
+			pr_debug("%s: blt switched in ISR dsi_video_enabled=%d\n",
+				__func__, dsi_video_enabled);
+			vctrl->blt_change++;
+		}
+		spin_unlock_irqrestore(&vctrl->spin_lock, flag);
+		if (dsi_video_enabled)
+			mdp4_dsi_video_wait4dmap_done(0);
+	} else if (ctrl == MDP4_OVERLAY_BLT_SWITCH_TG_OFF) {
+		pr_debug("%s: blt switched by turning TG off\n", __func__);
+		if (dsi_video_enabled) {
+			mdp4_dsi_video_wait4vsync(0, &vtime);
 			MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0);
 			mdp4_dsi_video_wait4dmap_done(0);
 		}
 		mdp4_overlayproc_cfg(pipe);
 		mdp4_overlay_dmap_xy(pipe);
-		if (tg_enabled) {
+		if (dsi_video_enabled) {
 			/*
-			 * need wait for more than 1 ms to
-			 * make sure lanes' fifo is empty and
-			 * lanes in stop state befroe reset
-			 * controller
-			 */
+			* need wait for more than 1 ms to
+			* make sure dsi lanes' fifo is empty and
+			* lanes in stop state befroe reset
+			* controller
+			*/
 			usleep(2000);
 			mipi_dsi_sw_reset();
 			MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 1);
 		}
-	}
+	} else if (ctrl == MDP4_OVERLAY_BLT_SWITCH_POLL) {
+		pr_debug("%s: blt switched by polling mdp status\n", __func__);
+		if (dsi_video_enabled)
+			while (inpdw(MDP_BASE + 0x0018) & 0x05)
+				cpu_relax();
+		mdp4_overlayproc_cfg(pipe);
+		mdp4_overlay_dmap_xy(pipe);
+	} else
+		pr_err("%s: ctrl=%d is not supported\n", __func__, ctrl);
 }
 
 void mdp4_dsi_video_overlay_blt(struct msm_fb_data_type *mfd,
@@ -1178,7 +1132,7 @@
 	mdp4_overlay_mdp_perf_upd(mfd, 1);
 
 	cnt = mdp4_dsi_video_pipe_commit(cndx, 0);
-	if (cnt >= 0) {
+	if (cnt) {
 		if (pipe->ov_blt_addr)
 			mdp4_dsi_video_wait4ov(cndx);
 		else
diff --git a/drivers/video/msm/mdp4_overlay_dtv.c b/drivers/video/msm/mdp4_overlay_dtv.c
index 0aec147..8539c00 100644
--- a/drivers/video/msm/mdp4_overlay_dtv.c
+++ b/drivers/video/msm/mdp4_overlay_dtv.c
@@ -33,7 +33,6 @@
 #include "mdp4.h"
 
 #define DTV_BASE	0xD0000
-
 static int dtv_enabled;
 
 /*#define DEBUG*/
@@ -55,14 +54,6 @@
 static int first_pixel_start_x;
 static int first_pixel_start_y;
 
-void mdp4_dtv_base_swap(int cndx, struct mdp4_overlay_pipe *pipe)
-{
-#ifdef BYPASS4
-	if (hdmi_prim_display)
-		dtv_pipe = pipe;
-#endif
-}
-
 #define MAX_CONTROLLER	1
 
 static struct vsycn_ctrl {
@@ -75,6 +66,9 @@
 	int dmae_wait_cnt;
 	int wait_vsync_cnt;
 	int blt_change;
+	int blt_ctrl;
+	int blt_mode;
+	int blt_free;
 	int sysfs_created;
 	struct mutex update_lock;
 	struct completion ov_comp;
@@ -86,6 +80,10 @@
 	struct vsync_update vlist[2];
 	int vsync_irq_enabled;
 	ktime_t vsync_time;
+	uint32 *avtimer;
+	int vg1fd;
+	int vg2fd;
+	unsigned long long avtimer_tick;
 } vsync_ctrl_db[MAX_CONTROLLER];
 
 static void vsync_irq_enable(int intr, int term)
@@ -163,22 +161,8 @@
 	mdp4_stat.overlay_play[pipe->mixer_num]++;
 }
 
-static void mdp4_dtv_pipe_clean(struct vsync_update *vp)
-{
-	struct mdp4_overlay_pipe *pipe;
-	int i;
-
-	pipe = vp->plist;
-	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
-		if (pipe->pipe_used) {
-			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
-			pipe->pipe_used = 0; /* clear */
-		}
-	}
-	vp->update_cnt = 0;     /* empty queue */
-}
-
 static void mdp4_dtv_blt_ov_update(struct mdp4_overlay_pipe *pipe);
+static void mdp4_dtv_wait4ov(int cndx);
 static void mdp4_dtv_wait4dmae(int cndx);
 
 int mdp4_dtv_pipe_commit(int cndx, int wait)
@@ -203,15 +187,21 @@
 
 	mdp_update_pm(vctrl->mfd, vctrl->vsync_time);
 
-	/*
-	 * allow stage_commit without pipes queued
-	 * (vp->update_cnt == 0) to unstage pipes after
-	 * overlay_unset
-	 */
+	if (vp->update_cnt == 0) {
+		mutex_unlock(&vctrl->update_lock);
+		return 0;
+	}
 
 	vctrl->update_ndx++;
 	vctrl->update_ndx &= 0x01;
 	vp->update_cnt = 0;	/* reset */
+
+	if (vctrl->blt_free) {
+		vctrl->blt_free--;
+		if (vctrl->blt_free == 0)
+			mdp4_free_writeback_buf(vctrl->mfd, mixer);
+	}
+
 	mutex_unlock(&vctrl->update_lock);
 
 	pipe = vp->plist;
@@ -241,6 +231,7 @@
 	if (pipe->ov_blt_addr) {
 		mdp4_dtv_blt_ov_update(pipe);
 		pipe->blt_ov_done++;
+		INIT_COMPLETION(vctrl->ov_comp);
 		vsync_irq_enable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM);
 		mb();
 		pipe->blt_ov_koff++;
@@ -255,35 +246,13 @@
 	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
 	mdp4_stat.overlay_commit[pipe->mixer_num]++;
 
-	if (wait)
-		mdp4_dtv_wait4dmae(0);
-
-	return cnt;
-}
-
-static void mdp4_dtv_vsync_irq_ctrl(int cndx, int enable)
-{
-	struct vsycn_ctrl *vctrl;
-	static int vsync_irq_cnt;
-
-	vctrl = &vsync_ctrl_db[cndx];
-
-	mutex_lock(&vctrl->update_lock);
-	if (enable) {
-		if (vsync_irq_cnt == 0)
-			vsync_irq_enable(INTR_EXTERNAL_VSYNC,
-						MDP_EXTER_VSYNC_TERM);
-		vsync_irq_cnt++;
-	} else {
-		if (vsync_irq_cnt) {
-			vsync_irq_cnt--;
-			if (vsync_irq_cnt == 0)
-				vsync_irq_disable(INTR_EXTERNAL_VSYNC,
-						MDP_EXTER_VSYNC_TERM);
-		}
+	if (wait) {
+		if (pipe->ov_blt_addr)
+			mdp4_dtv_wait4ov(cndx);
+		else
+			mdp4_dtv_wait4dmae(cndx);
 	}
-	pr_debug("%s: enable=%d cnt=%d\n", __func__, enable, vsync_irq_cnt);
-	mutex_unlock(&vctrl->update_lock);
+	return cnt;
 }
 
 void mdp4_dtv_vsync_ctrl(struct fb_info *info, int enable)
@@ -303,13 +272,16 @@
 
 	vctrl->vsync_irq_enabled = enable;
 
-	mdp4_dtv_vsync_irq_ctrl(cndx, enable);
+	if (enable)
+		vsync_irq_enable(INTR_EXTERNAL_VSYNC, MDP_EXTER_VSYNC_TERM);
+	else
+		vsync_irq_disable(INTR_EXTERNAL_VSYNC, MDP_EXTER_VSYNC_TERM);
 
 	if (vctrl->vsync_irq_enabled &&  atomic_read(&vctrl->suspend) == 0)
 		atomic_set(&vctrl->vsync_resume, 1);
 }
 
-void mdp4_dtv_wait4vsync(int cndx)
+void mdp4_dtv_wait4vsync(int cndx, long long *vtime)
 {
 	struct vsycn_ctrl *vctrl;
 	struct mdp4_overlay_pipe *pipe;
@@ -326,8 +298,6 @@
 	if (atomic_read(&vctrl->suspend) > 0)
 		return;
 
-	mdp4_dtv_vsync_irq_ctrl(cndx, 1);
-
 	spin_lock_irqsave(&vctrl->spin_lock, flags);
 
 	if (vctrl->wait_vsync_cnt == 0)
@@ -336,8 +306,26 @@
 	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
 
 	wait_for_completion(&vctrl->vsync_comp);
-	mdp4_dtv_vsync_irq_ctrl(cndx, 0);
 	mdp4_stat.wait4vsync1++;
+
+	*vtime = ktime_to_ns(vctrl->vsync_time);
+}
+
+static void mdp4_dtv_wait4ov(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
+
+	wait_for_completion(&vctrl->ov_comp);
 }
 
 static void mdp4_dtv_wait4dmae(int cndx)
@@ -364,10 +352,14 @@
 	struct vsycn_ctrl *vctrl;
 	ssize_t ret = 0;
 	unsigned long flags;
-	u64 vsync_tick;
+	char ch = '\0';
+	int vg1fd = -1, vg2fd = -1;
+	unsigned long long avtimer_tick = 0;
+	u64 vsync_tick = 0;
 
 	cndx = 0;
 	vctrl = &vsync_ctrl_db[0];
+	memset(buf, 0, 64);
 
 	if (atomic_read(&vctrl->suspend) > 0 ||
 		!external_common_state->hpd_state ||
@@ -384,17 +376,54 @@
 		msecs_to_jiffies(VSYNC_PERIOD * 4));
 	if (ret <= 0) {
 		vctrl->wait_vsync_cnt = 0;
-		vctrl->vsync_time = ktime_get();
+		vsync_tick = ktime_to_ns(ktime_get());
+		ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick);
+		buf[strlen(buf) + 1] = '\0';
+		return ret;
 	}
 
 	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	vg1fd = vctrl->vg1fd;
+	vg2fd = vctrl->vg2fd;
+	avtimer_tick = vctrl->avtimer_tick;
 	vsync_tick = ktime_to_ns(vctrl->vsync_time);
 	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
 
-	ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick);
-	buf[strlen(buf) + 1] = '\0';
+	ret = snprintf(buf, PAGE_SIZE,
+			"VSYNC=%llu%c"
+			"AVSYNCTP=%llu%c"
+			"VG1MEMID=%d%c"
+			"VG2MEMID=%d",
+			vsync_tick,
+			ch, avtimer_tick,
+			ch, vg1fd,
+			ch, vg2fd);
+
 	return ret;
 }
+
+static void mdp4_dtv_wait4dmae_done(int cndx)
+{
+	unsigned long flags;
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
+
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	INIT_COMPLETION(vctrl->dmae_comp);
+	vsync_irq_enable(INTR_DMA_E_DONE, MDP_DMA_E_TERM);
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+	mdp4_dtv_wait4dmae(cndx);
+}
+
 void mdp4_dtv_vsync_init(int cndx)
 {
 	struct vsycn_ctrl *vctrl;
@@ -421,6 +450,24 @@
 	spin_lock_init(&vctrl->spin_lock);
 }
 
+void mdp4_dtv_base_swap(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (!hdmi_prim_display) {
+		pr_err("%s: failed, hdmi is not primary\n", __func__);
+		return;
+	}
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+	vctrl->base_pipe = pipe;
+}
+
 static int mdp4_dtv_start(struct msm_fb_data_type *mfd)
 {
 	int dtv_width;
@@ -453,7 +500,6 @@
 	int data_en_polarity;
 	int hsync_start_x;
 	int hsync_end_x;
-	uint32_t userformat;
 	struct fb_info *fbi;
 	struct fb_var_screeninfo *var;
 	struct vsycn_ctrl *vctrl;
@@ -492,10 +538,9 @@
 	dtv_border_clr = mfd->panel_info.lcdc.border_clr;
 	dtv_underflow_clr = mfd->panel_info.lcdc.underflow_clr;
 	dtv_hsync_skew = mfd->panel_info.lcdc.hsync_skew;
-	userformat = var->reserved[3] >> 16;
 
 	pr_info("%s: <ID=%d %dx%d (%d,%d,%d), (%d,%d,%d) %dMHz>\n", __func__,
-		userformat, var->xres, var->yres,
+		var->reserved[3], var->xres, var->yres,
 		var->right_margin, var->hsync_len, var->left_margin,
 		var->lower_margin, var->vsync_len, var->upper_margin,
 		var->pixclock/1000/1000);
@@ -566,6 +611,22 @@
 
 	/* enable DTV block */
 	MDP_OUTP(MDP_BASE + DTV_BASE, 1);
+	dtv_enabled = 1;
+
+	return 0;
+}
+
+static int mdp4_dtv_stop(struct msm_fb_data_type *mfd)
+{
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
+
+	vctrl = &vsync_ctrl_db[cndx];
+	if (vctrl->base_pipe == NULL)
+		return -EINVAL;
+
+	MDP_OUTP(MDP_BASE + DTV_BASE, 0);
+	dtv_enabled = 0;
 
 	return 0;
 }
@@ -576,6 +637,7 @@
 	int ret = 0;
 	int cndx = 0;
 	struct vsycn_ctrl *vctrl;
+	struct msm_panel_info *pinfo;
 
 	vctrl = &vsync_ctrl_db[cndx];
 
@@ -587,7 +649,12 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
+	vctrl->mfd = mfd;
 	vctrl->dev = mfd->fbi->dev;
+	pinfo = &mfd->panel_info;
+
+	vctrl->blt_ctrl = pinfo->lcd.blt_ctrl;
+	vctrl->blt_mode = pinfo->lcd.blt_mode;
 
 	mdp_footswitch_ctrl(TRUE);
 	/* Mdp clock enable */
@@ -609,26 +676,17 @@
 		pr_warn("%s: panel_next_on failed", __func__);
 
 	atomic_set(&vctrl->suspend, 0);
+	if (mfd->avtimer_phy && (vctrl->avtimer == NULL)) {
+		vctrl->avtimer = (uint32 *)ioremap(mfd->avtimer_phy, 8);
+		if (vctrl->avtimer == NULL)
+			pr_err(" avtimer ioremap fail\n");
+	}
 
 	pr_info("%s:\n", __func__);
 
 	return ret;
 }
 
-/* timing generator off */
-static void mdp4_dtv_tg_off(struct vsycn_ctrl *vctrl)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&vctrl->spin_lock, flags);
-	INIT_COMPLETION(vctrl->vsync_comp);
-	vctrl->wait_vsync_cnt++;
-	MDP_OUTP(MDP_BASE + DTV_BASE, 0); /* turn off timing generator */
-	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
-
-	mdp4_dtv_wait4vsync(0);
-}
-
 int mdp4_dtv_off(struct platform_device *pdev)
 {
 	struct msm_fb_data_type *mfd;
@@ -643,14 +701,19 @@
 
 	vctrl = &vsync_ctrl_db[cndx];
 
-	mdp4_dtv_wait4vsync(cndx);
-
+	atomic_set(&vctrl->suspend, 1);
 	atomic_set(&vctrl->vsync_resume, 0);
 
+	/* wait for one vsycn time to make sure
+	 * previous stage_commit had been kicked in
+	 */
+	msleep(20);     /* >= 17 ms */
+
 	complete_all(&vctrl->vsync_comp);
 
 	pipe = vctrl->base_pipe;
 	if (pipe != NULL) {
+		mdp4_dtv_stop(mfd);
 		/* sanity check, free pipes besides base layer */
 		mdp4_overlay_unset_mixer(pipe->mixer_num);
 		if (hdmi_prim_display && mfd->ref_cnt == 0) {
@@ -658,6 +721,10 @@
 			if (pipe->pipe_type == OVERLAY_TYPE_BF)
 				mdp4_overlay_borderfill_stage_down(pipe);
 
+			/* base pipe may change after borderfill_stage_down */
+			pipe = vctrl->base_pipe;
+			mdp4_mixer_stage_down(pipe, 1);
+			mdp4_overlay_pipe_free(pipe);
 			/* pipe == rgb2 */
 			vctrl->base_pipe = NULL;
 		} else {
@@ -667,12 +734,13 @@
 		}
 	}
 
-	mdp4_dtv_tg_off(vctrl);
-
-	atomic_set(&vctrl->suspend, 1);
-
 	mdp4_overlay_panel_mode_unset(MDP4_MIXER1, MDP4_PANEL_DTV);
 
+	if (vctrl->vsync_irq_enabled) {
+		vctrl->vsync_irq_enabled = 0;
+		vsync_irq_disable(INTR_EXTERNAL_VSYNC, MDP_EXTER_VSYNC_TERM);
+	}
+
 	undx =  vctrl->update_ndx;
 	vp = &vctrl->vlist[undx];
 	if (vp->update_cnt) {
@@ -680,8 +748,12 @@
 		 * pipe's iommu will be freed at next overlay play
 		 * and iommu_drop statistic will be increased by one
 		 */
-		pr_warn("%s: update_cnt=%d\n", __func__, vp->update_cnt);
-		mdp4_dtv_pipe_clean(vp);
+		vp->update_cnt = 0;     /* empty queue */
+	}
+
+	if (vctrl->avtimer != NULL) {
+		iounmap(vctrl->avtimer);
+		vctrl->avtimer = NULL;
 	}
 
 	ret = panel_next_off(pdev);
@@ -741,11 +813,6 @@
 	MDP_OUTP(MDP_BASE + 0xb0008, addr);
 }
 
-void mdp4_overlay_dtv_set_perf(struct msm_fb_data_type *mfd)
-{
-
-}
-
 static void mdp4_overlay_dtv_alloc_pipe(struct msm_fb_data_type *mfd,
 		int32 ptype, struct vsycn_ctrl *vctrl)
 {
@@ -863,14 +930,20 @@
 	struct vsycn_ctrl *vctrl;
 
 	vctrl = &vsync_ctrl_db[cndx];
-	if (vctrl->base_pipe != NULL)
+	if (vctrl->base_pipe == NULL)
 		return 0;
 
 	if (pipe->mixer_stage == MDP4_MIXER_STAGE_BASE &&
 			pipe->pipe_type == OVERLAY_TYPE_RGB) {
-		mdp4_dtv_tg_off(vctrl);
+		result = mdp4_dtv_stop(mfd);
 		vctrl->base_pipe = NULL;
 	}
+
+	if (pipe->pipe_num == OVERLAY_PIPE_VG1)
+		vctrl->vg1fd = -1;
+	else if (pipe->pipe_num == OVERLAY_PIPE_VG2)
+		vctrl->vg2fd = -1;
+
 	return result;
 }
 
@@ -880,6 +953,7 @@
 {
 	int cndx;
 	struct vsycn_ctrl *vctrl;
+	uint32 *tp, LSW;
 
 	cndx = 0;
 	vctrl = &vsync_ctrl_db[cndx];
@@ -887,6 +961,15 @@
 
 	spin_lock(&vctrl->spin_lock);
 	vctrl->vsync_time = ktime_get();
+	vctrl->avtimer_tick = 0;
+
+	if (vctrl->avtimer && ((vctrl->vg1fd > 0) || (vctrl->vg2fd > 0))) {
+		tp = vctrl->avtimer;
+		LSW = inpdw(tp);
+		tp++;
+		vctrl->avtimer_tick = (unsigned long long) inpdw(tp);
+		vctrl->avtimer_tick = ((vctrl->avtimer_tick << 32) | LSW);
+	}
 
 	if (vctrl->wait_vsync_cnt) {
 		complete_all(&vctrl->vsync_comp);
@@ -912,22 +995,14 @@
 
 	vctrl = &vsync_ctrl_db[cndx];
 	pipe = vctrl->base_pipe;
+	if (pipe == NULL)
+		return;
 	pr_debug("%s: cpu=%d\n", __func__, smp_processor_id());
 
 	spin_lock(&vctrl->spin_lock);
 	if (vctrl->blt_change) {
-		if (pipe->ov_blt_addr) {
-			mdp4_overlayproc_cfg(pipe);
-			mdp4_overlay_dmae_xy(pipe);
-			mdp4_dtv_blt_ov_update(pipe);
-			pipe->blt_ov_done++;
-
-			/* Prefill one frame */
-			vsync_irq_enable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM);
-			/* kickoff overlay1 engine */
-			mdp4_stat.kickoff_ov1++;
-			outpdw(MDP_BASE + 0x0008, 0);
-		}
+		mdp4_overlayproc_cfg(pipe);
+		mdp4_overlay_dmae_xy(pipe);
 		vctrl->blt_change = 0;
 	}
 
@@ -952,6 +1027,8 @@
 
 	vctrl = &vsync_ctrl_db[cndx];
 	pipe = vctrl->base_pipe;
+	if (pipe == NULL)
+		return;
 
 	spin_lock(&vctrl->spin_lock);
 	if (pipe->ov_blt_addr == 0) {
@@ -960,104 +1037,154 @@
 	}
 
 	mdp4_dtv_blt_dmae_update(pipe);
+	complete_all(&vctrl->ov_comp);
 	pipe->blt_dmap_done++;
 	vsync_irq_disable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM);
 	spin_unlock(&vctrl->spin_lock);
 }
 
-void mdp4_dtv_set_black_screen(void)
+void mdp4_dtv_set_black_screen()
 {
 	char *rgb_base;
 	/*Black color*/
 	uint32 color = 0x00000000;
 	uint32 temp_src_format;
-	int cndx = 0;
+	int commit = 1, cndx = 0;
+	int pipe_num = OVERLAY_PIPE_RGB1;
 	struct vsycn_ctrl *vctrl;
 
 	vctrl = &vsync_ctrl_db[cndx];
-	if (vctrl->base_pipe == NULL || !hdmi_prim_display) {
-		pr_err("dtv_pipe is not configured yet\n");
+	if (!hdmi_prim_display)
 		return;
-	}
-	rgb_base = MDP_BASE + MDP4_RGB_BASE;
-	rgb_base += (MDP4_RGB_OFF * vctrl->base_pipe->pipe_num);
 
-	/*
-	* RGB Constant Color
-	*/
+	if (vctrl->base_pipe == NULL)
+		commit = 0;
+	else
+		pipe_num = vctrl->base_pipe->pipe_num;
+
+	rgb_base = MDP_BASE;
+	rgb_base += (MDP4_RGB_OFF * (pipe_num + 2));
+
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	/* RGB Constant Color */
 	MDP_OUTP(rgb_base + 0x1008, color);
-	/*
-	* MDP_RGB_SRC_FORMAT
-	*/
+
+	/* MDP_RGB_SRC_FORMAT */
 	temp_src_format = inpdw(rgb_base + 0x0050);
 	MDP_OUTP(rgb_base + 0x0050, temp_src_format | BIT(22));
-	mdp4_overlay_reg_flush(vctrl->base_pipe, 1);
 
-	mdp4_mixer_stage_up(vctrl->base_pipe, 0);
-	mdp4_mixer_stage_commit(vctrl->base_pipe->mixer_num);
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	if (commit) {
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
+		mdp4_overlay_reg_flush(vctrl->base_pipe, 1);
+		mdp4_mixer_stage_up(vctrl->base_pipe, 0);
+		mdp4_mixer_stage_commit(vctrl->base_pipe->mixer_num);
+	} else {
+		/* MDP_OVERLAY_REG_FLUSH for pipe*/
+		MDP_OUTP(MDP_BASE + 0x18000,
+			BIT(pipe_num + 2) | BIT(MDP4_MIXER1));
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	}
 }
 
 static void mdp4_dtv_do_blt(struct msm_fb_data_type *mfd, int enable)
 {
 	unsigned long flag;
-	int data;
 	int cndx = 0;
 	struct vsycn_ctrl *vctrl;
 	struct mdp4_overlay_pipe *pipe;
+	u32 mode, ctrl;
 
 	vctrl = &vsync_ctrl_db[cndx];
 	pipe = vctrl->base_pipe;
 
-	mdp4_allocate_writeback_buf(mfd, MDP4_MIXER1);
+	mode = (dbg_force_ov1_blt & 0x0f) ?
+		(dbg_force_ov1_blt & 0x0f) : vctrl->blt_mode;
+	ctrl = (dbg_force_ov1_blt >> 4) ?
+		(dbg_force_ov1_blt >> 4) : vctrl->blt_ctrl;
 
-	if (!mfd->ov1_wb_buf->write_addr) {
-		pr_info("%s: ctrl=%d blt_base NOT assigned\n", __func__, cndx);
+	pr_debug("%s: mode=%d, ctrl = %d, enable=%d ov_blt_addr=%x\n",
+		 __func__, mode, ctrl, enable, (int)pipe->ov_blt_addr);
+
+	if ((mode == MDP4_OVERLAY_MODE_BLT_ALWAYS_OFF) &&
+	    !pipe->ov_blt_addr)
 		return;
+	else if ((mode == MDP4_OVERLAY_MODE_BLT_ALWAYS_ON) &&
+	    pipe->ov_blt_addr)
+		return;
+	else if (enable && pipe->ov_blt_addr)
+		return;
+	else if (!enable && !pipe->ov_blt_addr)
+		return;
+
+	if (pipe->ov_blt_addr == 0) {
+		mdp4_allocate_writeback_buf(vctrl->mfd, MDP4_MIXER1);
+		if (!vctrl->mfd->ov1_wb_buf->write_addr) {
+			pr_warning("%s: ctrl=%d blt_base NOT assigned\n",
+				__func__, cndx);
+			return;
+		}
 	}
 
+	pr_debug("%s: mode=%d, ctrl=%d, enable=%d ov_blt_addr=%x\n",
+		 __func__, mode, ctrl, enable, (int)pipe->ov_blt_addr);
+
 	spin_lock_irqsave(&vctrl->spin_lock, flag);
 	if (enable && pipe->ov_blt_addr == 0) {
-		pipe->ov_blt_addr = mfd->ov1_wb_buf->write_addr;
-		pipe->dma_blt_addr = mfd->ov1_wb_buf->read_addr;
+		pipe->ov_blt_addr = vctrl->mfd->ov1_wb_buf->write_addr;
+		pipe->dma_blt_addr = vctrl->mfd->ov1_wb_buf->read_addr;
 		pipe->blt_cnt = 0;
 		pipe->ov_cnt = 0;
 		pipe->blt_dmap_done = 0;
 		pipe->blt_ov_koff = 0;
 		pipe->blt_ov_done = 0;
 		mdp4_stat.blt_dtv++;
-		vctrl->blt_change++;
+		vctrl->blt_free = 0;
 	} else if (enable == 0 && pipe->ov_blt_addr) {
 		pipe->ov_blt_addr = 0;
 		pipe->dma_blt_addr = 0;
-		vctrl->blt_change++;
+		vctrl->blt_free = 4;
 	}
-
-	pr_info("%s: enable=%d change=%d blt_addr=%x\n", __func__,
-		enable, vctrl->blt_change, (int)pipe->ov_blt_addr);
-
-	if (!vctrl->blt_change) {
-		spin_unlock_irqrestore(&vctrl->spin_lock, flag);
-		return;
-	}
-
-	atomic_set(&vctrl->suspend, 1);
 	spin_unlock_irqrestore(&vctrl->spin_lock, flag);
 
-	data = inpdw(MDP_BASE + DTV_BASE);
-	data &= 0x01;
-	if (data)       /* timing generator enabled */
-		mdp4_dtv_wait4dmae(0);
+	if (ctrl == MDP4_OVERLAY_BLT_SWITCH_TG_ON) {
+		spin_lock_irqsave(&vctrl->spin_lock, flag);
+		if (!dtv_enabled) {
+			pr_debug("%s: blt switched not in isr dtv_enabled=%d\n",
+				 __func__, dtv_enabled);
+			mdp4_overlayproc_cfg(pipe);
+			mdp4_overlay_dmae_xy(pipe);
+		} else {
+			pr_debug("%s: blt switched in ISR dtv_enabled=%d\n",
+				__func__, dtv_enabled);
+			vctrl->blt_change++;
 
-	if (pipe->ov_blt_addr == 0) {
-		MDP_OUTP(MDP_BASE + DTV_BASE, 0);       /* stop dtv */
-		msleep(20);
+		}
+		spin_unlock_irqrestore(&vctrl->spin_lock, flag);
+		if (dtv_enabled)
+			mdp4_dtv_wait4dmae_done(0);
+	} else if (ctrl == MDP4_OVERLAY_BLT_SWITCH_TG_OFF) {
+		pr_debug("%s: dtv blt switched by turning TG off\n",
+			 __func__);
+		if (dtv_enabled) {
+			mdp4_dtv_wait4dmae_done(0);
+			MDP_OUTP(MDP_BASE + DTV_BASE, 0);
+			msleep(20);
+		}
 		mdp4_overlayproc_cfg(pipe);
 		mdp4_overlay_dmae_xy(pipe);
-		MDP_OUTP(MDP_BASE + DTV_BASE, 1);       /* start dtv */
-	}
-
-	atomic_set(&vctrl->suspend, 0);
+		if (dtv_enabled)
+			MDP_OUTP(MDP_BASE + DTV_BASE, 1);
+	} else if (ctrl == MDP4_OVERLAY_BLT_SWITCH_POLL) {
+		pr_debug("%s: dtv blt change by polling status\n",
+			__func__);
+		while (inpdw(MDP_BASE + 0x0018) & 0x12)
+			cpu_relax();
+		mdp4_overlayproc_cfg(pipe);
+		mdp4_overlay_dmae_xy(pipe);
+	} else
+		pr_err("%s: ctrl=%d is not supported\n", __func__, ctrl);
 }
 
 void mdp4_dtv_overlay_blt_start(struct msm_fb_data_type *mfd)
@@ -1075,6 +1202,7 @@
 	int cndx = 0;
 	struct vsycn_ctrl *vctrl;
 	struct mdp4_overlay_pipe *pipe;
+	int wait = 0;
 
 	mutex_lock(&mfd->dma->ov_mutex);
 	if (!mfd->panel_power_on) {
@@ -1106,8 +1234,27 @@
 		mdp4_dtv_pipe_queue(0, pipe);
 	}
 
+	if (hdmi_prim_display)
+		wait = 1;
+
 	mdp4_overlay_mdp_perf_upd(mfd, 1);
-	mdp4_dtv_pipe_commit(0, 0);
+	mdp4_dtv_pipe_commit(cndx, wait);
 	mdp4_overlay_mdp_perf_upd(mfd, 0);
 	mutex_unlock(&mfd->dma->ov_mutex);
 }
+
+void mdp4_dtv_set_avparams(struct mdp4_overlay_pipe *pipe, int id)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (pipe == NULL) {
+		pr_warn("%s: dtv_pipe == NULL\n", __func__);
+		return;
+	}
+	vctrl = &vsync_ctrl_db[0];
+	if (pipe->pipe_num == OVERLAY_PIPE_VG1)
+		vctrl->vg1fd = id;
+	else if (pipe->pipe_num == OVERLAY_PIPE_VG2)
+		vctrl->vg2fd = id;
+}
+
diff --git a/drivers/video/msm/mdp4_overlay_lcdc.c b/drivers/video/msm/mdp4_overlay_lcdc.c
index 629e51e..3702b59 100644
--- a/drivers/video/msm/mdp4_overlay_lcdc.c
+++ b/drivers/video/msm/mdp4_overlay_lcdc.c
@@ -148,21 +148,6 @@
 	mdp4_stat.overlay_play[pipe->mixer_num]++;
 }
 
-static void mdp4_lcdc_pipe_clean(struct vsync_update *vp)
-{
-	struct mdp4_overlay_pipe *pipe;
-	int i;
-
-	pipe = vp->plist;
-	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
-		if (pipe->pipe_used) {
-			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
-			pipe->pipe_used = 0; /* clear */
-		}
-	}
-	vp->update_cnt = 0;     /* empty queue */
-}
-
 static void mdp4_lcdc_blt_ov_update(struct mdp4_overlay_pipe *pipe);
 static void mdp4_lcdc_wait4dmap(int cndx);
 static void mdp4_lcdc_wait4ov(int cndx);
@@ -189,11 +174,10 @@
 
 	mdp_update_pm(vctrl->mfd, vctrl->vsync_time);
 
-	/*
-	 * allow stage_commit without pipes queued
-	 * (vp->update_cnt == 0) to unstage pipes after
-	 * overlay_unset
-	 */
+	if (vp->update_cnt == 0) {
+		mutex_unlock(&vctrl->update_lock);
+		return 0;
+	}
 
 	vctrl->update_ndx++;
 	vctrl->update_ndx &= 0x01;
@@ -277,39 +261,14 @@
 
 	if (wait) {
 		if (pipe->ov_blt_addr)
-			mdp4_lcdc_wait4ov(0);
+			mdp4_lcdc_wait4ov(cndx);
 		else
-			mdp4_lcdc_wait4dmap(0);
+			mdp4_lcdc_wait4dmap(cndx);
 	}
 
 	return cnt;
 }
 
-static void mdp4_lcdc_vsync_irq_ctrl(int cndx, int enable)
-{
-	struct vsycn_ctrl *vctrl;
-	static int vsync_irq_cnt;
-
-	vctrl = &vsync_ctrl_db[cndx];
-
-	mutex_lock(&vctrl->update_lock);
-	if (enable) {
-		if (vsync_irq_cnt == 0)
-			vsync_irq_enable(INTR_PRIMARY_VSYNC,
-						MDP_PRIM_VSYNC_TERM);
-		vsync_irq_cnt++;
-	} else {
-		if (vsync_irq_cnt) {
-			vsync_irq_cnt--;
-			if (vsync_irq_cnt == 0)
-				vsync_irq_disable(INTR_PRIMARY_VSYNC,
-						MDP_PRIM_VSYNC_TERM);
-		}
-	}
-	pr_debug("%s: enable=%d cnt=%d\n", __func__, enable, vsync_irq_cnt);
-	mutex_unlock(&vctrl->update_lock);
-}
-
 void mdp4_lcdc_vsync_ctrl(struct fb_info *info, int enable)
 {
 	struct vsycn_ctrl *vctrl;
@@ -324,13 +283,16 @@
 
 	vctrl->vsync_irq_enabled = enable;
 
-	mdp4_lcdc_vsync_irq_ctrl(cndx, enable);
+	if (enable)
+		vsync_irq_enable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
+	else
+		vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
 
 	if (vctrl->vsync_irq_enabled &&  atomic_read(&vctrl->suspend) == 0)
 		atomic_set(&vctrl->vsync_resume, 1);
 }
 
-void mdp4_lcdc_wait4vsync(int cndx)
+void mdp4_lcdc_wait4vsync(int cndx, long long *vtime)
 {
 	struct vsycn_ctrl *vctrl;
 	struct mdp4_overlay_pipe *pipe;
@@ -344,10 +306,13 @@
 	vctrl = &vsync_ctrl_db[cndx];
 	pipe = vctrl->base_pipe;
 
-	if (atomic_read(&vctrl->suspend) > 0)
+	if (atomic_read(&vctrl->suspend) > 0) {
+		*vtime = -1;
 		return;
+	}
 
-	mdp4_lcdc_vsync_irq_ctrl(cndx, 1);
+	/* start timing generator & mmu if they are not started yet */
+	mdp4_overlay_lcdc_start();
 
 	spin_lock_irqsave(&vctrl->spin_lock, flags);
 
@@ -357,8 +322,9 @@
 	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
 
 	wait_for_completion(&vctrl->vsync_comp);
-	mdp4_lcdc_vsync_irq_ctrl(cndx, 0);
 	mdp4_stat.wait4vsync0++;
+
+	*vtime = vctrl->vsync_time.tv64;
 }
 
 static void mdp4_lcdc_wait4dmap(int cndx)
@@ -420,7 +386,10 @@
 		msecs_to_jiffies(VSYNC_PERIOD * 4));
 	if (ret <= 0) {
 		vctrl->wait_vsync_cnt = 0;
-		vctrl->vsync_time = ktime_get();
+		vsync_tick = ktime_to_ns(ktime_get());
+		ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick);
+		buf[strlen(buf) + 1] = '\0';
+		return ret;
 	}
 
 	spin_lock_irqsave(&vctrl->spin_lock, flags);
@@ -685,24 +654,10 @@
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 
 	mdp_histogram_ctrl_all(TRUE);
-	mdp4_overlay_lcdc_start();
 
 	return ret;
 }
 
-/* timing generator off */
-static void mdp4_lcdc_tg_off(struct vsycn_ctrl *vctrl)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&vctrl->spin_lock, flags);
-	INIT_COMPLETION(vctrl->vsync_comp);
-	vctrl->wait_vsync_cnt++;
-	MDP_OUTP(MDP_BASE + LCDC_BASE, 0); /* turn off timing generator */
-	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
-
-	mdp4_lcdc_wait4vsync(0);
-}
 int mdp4_lcdc_off(struct platform_device *pdev)
 {
 	int ret = 0;
@@ -718,10 +673,11 @@
 	vctrl = &vsync_ctrl_db[cndx];
 	pipe = vctrl->base_pipe;
 
-	mdp4_lcdc_wait4vsync(cndx);
-
+	atomic_set(&vctrl->suspend, 1);
 	atomic_set(&vctrl->vsync_resume, 0);
 
+	msleep(20);	/* >= 17 ms */
+
 	complete_all(&vctrl->vsync_comp);
 
 	if (pipe->ov_blt_addr) {
@@ -735,8 +691,15 @@
 
 	mdp_histogram_ctrl_all(FALSE);
 
+	MDP_OUTP(MDP_BASE + LCDC_BASE, 0);
+
 	lcdc_enabled = 0;
 
+	if (vctrl->vsync_irq_enabled) {
+		vctrl->vsync_irq_enabled = 0;
+		vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
+	}
+
 	undx =  vctrl->update_ndx;
 	vp = &vctrl->vlist[undx];
 	if (vp->update_cnt) {
@@ -744,8 +707,7 @@
 		 * pipe's iommu will be freed at next overlay play
 		 * and iommu_drop statistic will be increased by one
 		 */
-		pr_warn("%s: update_cnt=%d\n", __func__, vp->update_cnt);
-		mdp4_lcdc_pipe_clean(vp);
+		vp->update_cnt = 0;     /* empty queue */
 	}
 
 	if (pipe) {
@@ -769,10 +731,6 @@
 		}
 	}
 
-	mdp4_lcdc_tg_off(vctrl);
-
-	atomic_set(&vctrl->suspend, 1);
-
 	/* MDP clock disable */
 	mdp_clk_ctrl(0);
 	mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
@@ -864,6 +822,8 @@
 	}
 	vctrl = &vsync_ctrl_db[cndx];
 	pipe = vctrl->base_pipe;
+	if (pipe == NULL)
+		return;
 
 	spin_lock(&vctrl->spin_lock);
 	vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
@@ -902,6 +862,8 @@
 
 	vctrl = &vsync_ctrl_db[cndx];
 	pipe = vctrl->base_pipe;
+	if (pipe == NULL)
+		return;
 
 	spin_lock(&vctrl->spin_lock);
 	vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
@@ -1017,7 +979,7 @@
 	mdp4_overlay_mdp_perf_upd(mfd, 1);
 
 	cnt = mdp4_lcdc_pipe_commit(cndx, 0);
-	if (cnt >= 0) {
+	if (cnt) {
 		if (pipe->ov_blt_addr)
 			mdp4_lcdc_wait4ov(cndx);
 		else
diff --git a/drivers/video/msm/mdp4_overlay_mddi.c b/drivers/video/msm/mdp4_overlay_mddi.c
index 717bba9..ca84eca 100644
--- a/drivers/video/msm/mdp4_overlay_mddi.c
+++ b/drivers/video/msm/mdp4_overlay_mddi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2009-2012, Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,38 +17,680 @@
 #include <linux/time.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
-#include <linux/hrtimer.h>
 #include <linux/delay.h>
-#include <mach/hardware.h>
 #include <linux/io.h>
-
-#include <asm/system.h>
-#include <asm/mach-types.h>
 #include <linux/semaphore.h>
 #include <linux/spinlock.h>
-
 #include <linux/fb.h>
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <mach/hardware.h>
 
 #include "mdp.h"
 #include "msm_fb.h"
 #include "mdp4.h"
 
-static struct mdp4_overlay_pipe *mddi_pipe;
-static struct msm_fb_data_type *mddi_mfd;
-static int busy_wait_cnt;
+static int mddi_state;
+
+#define TOUT_PERIOD	HZ	/* 1 second */
+#define MS_100		(HZ/10)	/* 100 ms */
 
 static int vsync_start_y_adjust = 4;
 
-static int dmap_vsync_enable;
+#define MAX_CONTROLLER	1
+#define VSYNC_EXPIRE_TICK 8
 
-void mdp_dmap_vsync_set(int enable)
+static struct vsycn_ctrl {
+	struct device *dev;
+	int inited;
+	int update_ndx;
+	int expire_tick;
+	int blt_wait;
+	u32 ov_koff;
+	u32 ov_done;
+	u32 dmap_koff;
+	u32 dmap_done;
+	uint32 rdptr_intr_tot;
+	uint32 rdptr_sirq_tot;
+	atomic_t suspend;
+	int wait_vsync_cnt;
+	int blt_change;
+	int blt_free;
+	int blt_end;
+	int uevent;
+	struct mutex update_lock;
+	struct completion ov_comp;
+	struct completion dmap_comp;
+	struct completion vsync_comp;
+	spinlock_t spin_lock;
+	struct msm_fb_data_type *mfd;
+	struct mdp4_overlay_pipe *base_pipe;
+	struct vsync_update vlist[2];
+	int vsync_enabled;
+	int clk_enabled;
+	int clk_control;
+	int new_update;
+	ktime_t vsync_time;
+	struct work_struct vsync_work;
+	struct work_struct clk_work;
+} vsync_ctrl_db[MAX_CONTROLLER];
+
+static void vsync_irq_enable(int intr, int term)
 {
-	dmap_vsync_enable = enable;
+	unsigned long flag;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	/* no need to clear other interrupts for comamnd mode */
+	mdp_intr_mask |= intr;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	mdp_enable_irq(term);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
 }
 
-int mdp_dmap_vsync_get(void)
+static void vsync_irq_disable(int intr, int term)
 {
-	return dmap_vsync_enable;
+	unsigned long flag;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	/* no need to clrear other interrupts for comamnd mode */
+	mdp_intr_mask &= ~intr;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	mdp_disable_irq_nosync(term);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+}
+
+static void mdp4_mddi_blt_ov_update(struct mdp4_overlay_pipe *pipe)
+{
+	uint32 off, addr;
+	int bpp;
+	char *overlay_base;
+
+	if (pipe->ov_blt_addr == 0)
+		return;
+
+	bpp = 3; /* overlay ouput is RGB888 */
+	off = 0;
+	if (pipe->ov_cnt & 0x01)
+		off = pipe->src_height * pipe->src_width * bpp;
+	addr = pipe->ov_blt_addr + off;
+	/* overlay 0 */
+	overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
+	outpdw(overlay_base + 0x000c, addr);
+	outpdw(overlay_base + 0x001c, addr);
+}
+
+static void mdp4_mddi_blt_dmap_update(struct mdp4_overlay_pipe *pipe)
+{
+	uint32 off, addr;
+	int bpp;
+
+	if (pipe->ov_blt_addr == 0)
+		return;
+
+	bpp = 3; /* overlay ouput is RGB888 */
+	off = 0;
+	if (pipe->dmap_cnt & 0x01)
+		off = pipe->src_height * pipe->src_width * bpp;
+	addr = pipe->dma_blt_addr + off;
+
+	/* dmap */
+	MDP_OUTP(MDP_BASE + 0x90008, addr);
+}
+
+static void mdp4_mddi_wait4dmap(int cndx);
+static void mdp4_mddi_wait4ov(int cndx);
+
+static void mdp4_mddi_do_blt(struct msm_fb_data_type *mfd, int enable)
+{
+	unsigned long flags;
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+	int need_wait;
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
+
+	if (mfd->ov0_wb_buf->write_addr == 0) {
+		pr_err("%s: no blt_base assigned\n", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	if (enable && pipe->ov_blt_addr == 0) {
+		vctrl->blt_change++;
+		if (vctrl->dmap_koff != vctrl->dmap_done) {
+			INIT_COMPLETION(vctrl->dmap_comp);
+			need_wait = 1;
+		}
+	} else if (enable == 0 && pipe->ov_blt_addr) {
+		vctrl->blt_change++;
+		if (vctrl->ov_koff != vctrl->dmap_done) {
+			INIT_COMPLETION(vctrl->dmap_comp);
+			need_wait = 1;
+		}
+	}
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	if (need_wait)
+		mdp4_mddi_wait4dmap(0);
+
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	if (enable && pipe->ov_blt_addr == 0) {
+		pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
+		pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
+		pipe->ov_cnt = 0;
+		pipe->dmap_cnt = 0;
+		vctrl->ov_koff = vctrl->dmap_koff;
+		vctrl->ov_done = vctrl->dmap_done;
+		vctrl->blt_free = 0;
+		vctrl->blt_wait = 0;
+		vctrl->blt_end = 0;
+		mdp4_stat.blt_mddi++;
+	} else if (enable == 0 && pipe->ov_blt_addr) {
+		pipe->ov_blt_addr = 0;
+		pipe->dma_blt_addr =  0;
+		vctrl->blt_end = 1;
+		vctrl->blt_free = 4;	/* 4 commits to free wb buf */
+	}
+
+	pr_debug("%s: changed=%d enable=%d ov_blt_addr=%x\n", __func__,
+		vctrl->blt_change, enable, (int)pipe->ov_blt_addr);
+
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+}
+
+/*
+ * mdp4_mddi_do_update:
+ * called from thread context
+ */
+void mdp4_mddi_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+	struct vsycn_ctrl *vctrl;
+	struct vsync_update *vp;
+	struct mdp4_overlay_pipe *pp;
+	int undx;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
+
+	mutex_lock(&vctrl->update_lock);
+	undx =  vctrl->update_ndx;
+	vp = &vctrl->vlist[undx];
+
+	pp = &vp->plist[pipe->pipe_ndx - 1];	/* ndx start form 1 */
+
+	pr_debug("%s: vndx=%d pipe_ndx=%d expire=%x pid=%d\n", __func__,
+		undx, pipe->pipe_ndx, vctrl->expire_tick, current->pid);
+
+	*pp = *pipe;	/* clone it */
+	vp->update_cnt++;
+
+	mutex_unlock(&vctrl->update_lock);
+	mdp4_stat.overlay_play[pipe->mixer_num]++;
+}
+
+static void mdp4_mddi_blt_ov_update(struct mdp4_overlay_pipe *pipe);
+
+int mdp4_mddi_pipe_commit(void)
+{
+	int  i, undx;
+	int mixer = 0;
+	struct vsycn_ctrl *vctrl;
+	struct vsync_update *vp;
+	struct mdp4_overlay_pipe *pipe;
+	struct mdp4_overlay_pipe *real_pipe;
+	unsigned long flags;
+	int need_dmap_wait = 0;
+	int need_ov_wait = 0;
+	int cnt = 0;
+
+	vctrl = &vsync_ctrl_db[0];
+
+	mutex_lock(&vctrl->update_lock);
+	undx =  vctrl->update_ndx;
+	vp = &vctrl->vlist[undx];
+	pipe = vctrl->base_pipe;
+	mixer = pipe->mixer_num;
+
+	if (vp->update_cnt == 0) {
+		mutex_unlock(&vctrl->update_lock);
+		return cnt;
+	}
+
+	vctrl->update_ndx++;
+	vctrl->update_ndx &= 0x01;
+	vp->update_cnt = 0;     /* reset */
+	if (vctrl->blt_free) {
+		vctrl->blt_free--;
+		if (vctrl->blt_free == 0)
+			mdp4_free_writeback_buf(vctrl->mfd, mixer);
+	}
+	mutex_unlock(&vctrl->update_lock);
+
+	/* free previous committed iommu back to pool */
+	mdp4_overlay_iommu_unmap_freelist(mixer);
+
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	if (pipe->ov_blt_addr) {
+		/* Blt */
+		if (vctrl->blt_wait)
+			need_dmap_wait = 1;
+		if (vctrl->ov_koff != vctrl->ov_done) {
+			INIT_COMPLETION(vctrl->ov_comp);
+			need_ov_wait = 1;
+		}
+	} else {
+		/* direct out */
+		if (vctrl->dmap_koff != vctrl->dmap_done) {
+			INIT_COMPLETION(vctrl->dmap_comp);
+			pr_debug("%s: wait, ok=%d od=%d dk=%d dd=%d cpu=%d\n",
+			 __func__, vctrl->ov_koff, vctrl->ov_done,
+			vctrl->dmap_koff, vctrl->dmap_done, smp_processor_id());
+			need_dmap_wait = 1;
+		}
+	}
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	if (need_dmap_wait) {
+		pr_debug("%s: wait4dmap\n", __func__);
+		mdp4_mddi_wait4dmap(0);
+	}
+
+	if (need_ov_wait) {
+		pr_debug("%s: wait4ov\n", __func__);
+		mdp4_mddi_wait4ov(0);
+	}
+
+	if (pipe->ov_blt_addr) {
+		if (vctrl->blt_end) {
+			vctrl->blt_end = 0;
+			pipe->ov_blt_addr = 0;
+			pipe->dma_blt_addr =  0;
+		}
+	}
+
+	if (vctrl->blt_change) {
+		mdp4_overlayproc_cfg(pipe);
+		mdp4_overlay_dmap_xy(pipe);
+		vctrl->blt_change = 0;
+	}
+
+	pipe = vp->plist;
+	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
+		if (pipe->pipe_used) {
+			cnt++;
+			real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
+			if (real_pipe && real_pipe->pipe_used) {
+				/* pipe not unset */
+			mdp4_overlay_vsync_commit(pipe);
+			}
+			/* free previous iommu to freelist
+			* which will be freed at next
+			* pipe_commit
+			*/
+			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
+			pipe->pipe_used = 0; /* clear */
+		}
+	}
+
+	mdp4_mixer_stage_commit(mixer);
+
+	pipe = vctrl->base_pipe;
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	if (pipe->ov_blt_addr) {
+		mdp4_mddi_blt_ov_update(pipe);
+		pipe->ov_cnt++;
+		vctrl->ov_koff++;
+		vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+	} else {
+		vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+		vctrl->dmap_koff++;
+	}
+	pr_debug("%s: kickoff\n", __func__);
+	/* kickoff overlay engine */
+	mdp4_stat.kickoff_ov0++;
+	outpdw(MDP_BASE + 0x0004, 0);
+	mb(); /* make sure kickoff ececuted */
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	mdp4_stat.overlay_commit[pipe->mixer_num]++;
+
+	return cnt;
+}
+
+void mdp4_mddi_vsync_ctrl(struct fb_info *info, int enable)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	struct vsycn_ctrl *vctrl;
+	unsigned long flags;
+	int clk_set_on = 0;
+	int cndx = 0;
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	pr_debug("%s: clk_enabled=%d vsycn_enabeld=%d req=%d\n", __func__,
+		vctrl->clk_enabled, vctrl->vsync_enabled, enable);
+
+	mutex_lock(&vctrl->update_lock);
+
+	if (vctrl->vsync_enabled == enable) {
+		mutex_unlock(&vctrl->update_lock);
+		return;
+	}
+
+	vctrl->vsync_enabled = enable;
+
+	if (enable) {
+		if (vctrl->clk_enabled == 0) {
+			pr_debug("%s: SET_CLK_ON\n", __func__);
+			mdp_clk_ctrl(1);
+			vctrl->clk_enabled = 1;
+			clk_set_on = 1;
+		}
+		spin_lock_irqsave(&vctrl->spin_lock, flags);
+		vctrl->clk_control = 0;
+		vctrl->expire_tick = 0;
+		vctrl->uevent = 1;
+		vctrl->new_update = 1;
+		if (clk_set_on) {
+			vsync_irq_enable(INTR_PRIMARY_RDPTR,
+						MDP_PRIM_RDPTR_TERM);
+	}
+		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+		mdp4_overlay_update_mddi(mfd);
+	} else {
+		spin_lock_irqsave(&vctrl->spin_lock, flags);
+		vctrl->clk_control = 1;
+		vctrl->uevent = 0;
+		if (vctrl->clk_enabled)
+			vctrl->expire_tick = VSYNC_EXPIRE_TICK;
+		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+	}
+	mutex_unlock(&vctrl->update_lock);
+}
+
+void mdp4_mddi_wait4vsync(int cndx, long long *vtime)
+{
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+	unsigned long flags;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	if (atomic_read(&vctrl->suspend) > 0) {
+		*vtime = -1;
+		return;
+	}
+
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	if (vctrl->wait_vsync_cnt == 0)
+		INIT_COMPLETION(vctrl->vsync_comp);
+	vctrl->wait_vsync_cnt++;
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	wait_for_completion(&vctrl->vsync_comp);
+	mdp4_stat.wait4vsync0++;
+
+	*vtime = ktime_to_ns(vctrl->vsync_time);
+}
+
+static void mdp4_mddi_wait4dmap(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
+
+	wait_for_completion(&vctrl->dmap_comp);
+}
+
+static void mdp4_mddi_wait4ov(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
+
+	wait_for_completion(&vctrl->ov_comp);
+}
+
+/*
+ * primary_rdptr_isr:
+ * called from interrupt context
+ */
+static void primary_rdptr_isr(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pr_debug("%s: ISR, cpu=%d\n", __func__, smp_processor_id());
+	vctrl->rdptr_intr_tot++;
+	vctrl->vsync_time = ktime_get();
+
+	spin_lock(&vctrl->spin_lock);
+
+	if (vctrl->uevent)
+		schedule_work(&vctrl->vsync_work);
+
+	if (vctrl->wait_vsync_cnt) {
+		complete(&vctrl->vsync_comp);
+		vctrl->wait_vsync_cnt = 0;
+	}
+
+	if (vctrl->expire_tick) {
+		vctrl->expire_tick--;
+		if (vctrl->expire_tick == 0)
+			schedule_work(&vctrl->clk_work);
+	}
+	spin_unlock(&vctrl->spin_lock);
+}
+
+void mdp4_dmap_done_mddi(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+	int diff;
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	 /* blt enabled */
+	spin_lock(&vctrl->spin_lock);
+	vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+	vctrl->dmap_done++;
+	diff = vctrl->ov_done - vctrl->dmap_done;
+	pr_debug("%s: ov_koff=%d ov_done=%d dmap_koff=%d dmap_done=%d cpu=%d\n",
+		__func__, vctrl->ov_koff, vctrl->ov_done, vctrl->dmap_koff,
+		vctrl->dmap_done, smp_processor_id());
+	complete_all(&vctrl->dmap_comp);
+	if (diff <= 0) {
+		if (vctrl->blt_wait)
+			vctrl->blt_wait = 0;
+		spin_unlock(&vctrl->spin_lock);
+		return;
+	}
+
+	/* kick dmap */
+	mdp4_mddi_blt_dmap_update(pipe);
+	pipe->dmap_cnt++;
+	mdp4_stat.kickoff_dmap++;
+	vctrl->dmap_koff++;
+	vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+	outpdw(MDP_BASE + 0x000c, 0); /* kickoff dmap engine */
+	mb(); /* make sure kickoff executed */
+	spin_unlock(&vctrl->spin_lock);
+}
+
+/*
+ * mdp4_overlay0_done_mddi: called from isr
+ */
+void mdp4_overlay0_done_mddi(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+	int diff;
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	spin_lock(&vctrl->spin_lock);
+	vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+	vctrl->ov_done++;
+	complete_all(&vctrl->ov_comp);
+	diff = vctrl->ov_done - vctrl->dmap_done;
+
+	pr_debug("%s: ov_koff=%d ov_done=%d dmap_koff=%d dmap_done=%d cpu=%d\n",
+		__func__, vctrl->ov_koff, vctrl->ov_done, vctrl->dmap_koff,
+		vctrl->dmap_done, smp_processor_id());
+
+	if (pipe->ov_blt_addr == 0) {
+		/* blt disabled */
+		spin_unlock(&vctrl->spin_lock);
+		return;
+	}
+
+	if (diff > 1) {
+		/*
+		 * two overlay_done and none dmap_done yet
+		 * let dmap_done kickoff dmap
+		 * and put pipe_commit to wait
+		 */
+		vctrl->blt_wait = 1;
+		pr_debug("%s: blt_wait set\n", __func__);
+		spin_unlock(&vctrl->spin_lock);
+		return;
+	}
+	mdp4_mddi_blt_dmap_update(pipe);
+	pipe->dmap_cnt++;
+	mdp4_stat.kickoff_dmap++;
+	vctrl->dmap_koff++;
+	vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+	outpdw(MDP_BASE + 0x000c, 0); /* kickoff dmap engine */
+	mb(); /* make sure kickoff executed */
+	spin_unlock(&vctrl->spin_lock);
+}
+
+static void clk_ctrl_work(struct work_struct *work)
+{
+	struct vsycn_ctrl *vctrl =
+		container_of(work, typeof(*vctrl), clk_work);
+	unsigned long flags;
+
+	mutex_lock(&vctrl->update_lock);
+	if (vctrl->clk_control && vctrl->clk_enabled) {
+		pr_debug("%s: SET_CLK_OFF\n", __func__);
+			mdp_clk_ctrl(0);
+		spin_lock_irqsave(&vctrl->spin_lock, flags);
+		vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM);
+			vctrl->clk_enabled = 0;
+		vctrl->clk_control = 0;
+		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+	}
+	mutex_unlock(&vctrl->update_lock);
+}
+
+static void send_vsync_work(struct work_struct *work)
+{
+	struct vsycn_ctrl *vctrl =
+		container_of(work, typeof(*vctrl), vsync_work);
+	char buf[64];
+	char *envp[2];
+
+	snprintf(buf, sizeof(buf), "VSYNC=%llu",
+			ktime_to_ns(vctrl->vsync_time));
+	envp[0] = buf;
+	envp[1] = NULL;
+	kobject_uevent_env(&vctrl->dev->kobj, KOBJ_CHANGE, envp);
+}
+
+
+void mdp4_mddi_rdptr_init(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+	if (vctrl->inited)
+		return;
+
+	vctrl->inited = 1;
+	vctrl->update_ndx = 0;
+	mutex_init(&vctrl->update_lock);
+	init_completion(&vctrl->ov_comp);
+	init_completion(&vctrl->dmap_comp);
+	init_completion(&vctrl->vsync_comp);
+	spin_lock_init(&vctrl->spin_lock);
+	INIT_WORK(&vctrl->vsync_work, send_vsync_work);
+	INIT_WORK(&vctrl->clk_work, clk_ctrl_work);
+}
+
+void mdp4_primary_rdptr(void)
+{
+	primary_rdptr_isr(0);
+}
+
+void mdp4_overlay_mddi_state_set(int state)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	mddi_state = state;
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+}
+
+int mdp4_overlay_mddi_state_get(void)
+{
+	return mddi_state;
+}
+
+static __u32 msm_fb_line_length(__u32 fb_index, __u32 xres, int bpp)
+{
+	/*
+	 * The adreno GPU hardware requires that the pitch be aligned to
+	 * 32 pixels for color buffers, so for the cases where the GPU
+	 * is writing directly to fb0, the framebuffer pitch
+	 * also needs to be 32 pixel aligned
+	 */
+
+	if (fb_index == 0)
+		return ALIGN(xres, 32) * bpp;
+	else
+		return xres * bpp;
 }
 
 void mdp4_mddi_vsync_enable(struct msm_fb_data_type *mfd,
@@ -61,13 +703,6 @@
 	if ((mfd->use_mdp_vsync) && (mfd->ibuf.vsync_enable) &&
 		(mfd->panel_info.lcd.vsync_enable)) {
 
-		if (mdp_hw_revision < MDP4_REVISION_V2_1) {
-			/* need dmas dmap switch */
-			if (which == 0 && dmap_vsync_enable == 0 &&
-				mfd->panel_info.lcd.rev < 2) /* dma_p */
-				return;
-		}
-
 		if (vsync_start_y_adjust <= pipe->dst_y)
 			start_y = pipe->dst_y - vsync_start_y_adjust;
 		else
@@ -88,633 +723,337 @@
 	}
 }
 
-#define WHOLESCREEN
+void mdp4_mddi_base_swap(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+	struct vsycn_ctrl *vctrl;
 
-void mdp4_overlay_update_lcd(struct msm_fb_data_type *mfd)
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+	vctrl->base_pipe = pipe;
+}
+
+static void mdp4_overlay_setup_pipe_addr(struct msm_fb_data_type *mfd,
+			struct mdp4_overlay_pipe *pipe)
 {
 	MDPIBUF *iBuf = &mfd->ibuf;
+	struct fb_info *fbi;
+	int bpp;
 	uint8 *src;
+
+	/* whole screen for base layer */
+	src = (uint8 *) iBuf->buf;
+	fbi = mfd->fbi;
+
+	if (pipe->is_3d) {
+		bpp = fbi->var.bits_per_pixel / 8;
+		pipe->src_height = pipe->src_height_3d;
+		pipe->src_width = pipe->src_width_3d;
+		pipe->src_h = pipe->src_height_3d;
+		pipe->src_w = pipe->src_width_3d;
+		pipe->dst_h = pipe->src_height_3d;
+		pipe->dst_w = pipe->src_width_3d;
+		pipe->srcp0_ystride = msm_fb_line_length(0,
+						pipe->src_width, bpp);
+	} else {
+		 /* 2D */
+		pipe->src_height = fbi->var.yres;
+		pipe->src_width = fbi->var.xres;
+		pipe->src_h = fbi->var.yres;
+		pipe->src_w = fbi->var.xres;
+		pipe->dst_h = fbi->var.yres;
+		pipe->dst_w = fbi->var.xres;
+		pipe->srcp0_ystride = fbi->fix.line_length;
+	}
+	pipe->src_y = 0;
+	pipe->src_x = 0;
+	pipe->dst_y = 0;
+	pipe->dst_x = 0;
+	pipe->srcp0_addr = (uint32)src;
+}
+
+void mdp4_overlay_update_mddi(struct msm_fb_data_type *mfd)
+{
 	int ptype;
 	uint32 mddi_ld_param;
 	uint16 mddi_vdo_packet_reg;
 	struct mdp4_overlay_pipe *pipe;
+	uint32	data;
 	int ret;
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
 
 	if (mfd->key != MFD_KEY)
 		return;
 
-	mddi_mfd = mfd;		/* keep it */
+	vctrl = &vsync_ctrl_db[cndx];
 
-	/* MDP cmd block enable */
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
-	if (mddi_pipe == NULL) {
+	if (vctrl->base_pipe == NULL) {
 		ptype = mdp4_overlay_format2type(mfd->fb_imgType);
+
 		if (ptype < 0)
-			printk(KERN_INFO "%s: format2type failed\n", __func__);
+			pr_err("%s: format2type failed\n", __func__);
+
 		pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER0);
-		if (pipe == NULL)
-			printk(KERN_INFO "%s: pipe_alloc failed\n", __func__);
+		if (pipe == NULL) {
+			pr_err("%s: pipe_alloc failed\n", __func__);
+			return;
+		}
 		pipe->pipe_used++;
+		pipe->mixer_stage  = MDP4_MIXER_STAGE_BASE;
 		pipe->mixer_num  = MDP4_MIXER0;
 		pipe->src_format = mfd->fb_imgType;
 		mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_MDDI);
 		ret = mdp4_overlay_format2pipe(pipe);
 		if (ret < 0)
-			printk(KERN_INFO "%s: format2type failed\n", __func__);
+			pr_err("%s: format2type failed\n", __func__);
 
-		mddi_pipe = pipe; /* keep it */
-		mddi_ld_param = 0;
-		mddi_vdo_packet_reg = mfd->panel_info.mddi.vdopkt;
-
-		if (mdp_hw_revision == MDP4_REVISION_V2_1) {
-			uint32	data;
-
-			data = inpdw(MDP_BASE + 0x0028);
-			data &= ~0x0300;	/* bit 8, 9, MASTER4 */
-			if (mfd->fbi->var.xres == 540) /* qHD, 540x960 */
-				data |= 0x0200;
-			else
-				data |= 0x0100;
-
-			MDP_OUTP(MDP_BASE + 0x00028, data);
-		}
-
-		if (mfd->panel_info.type == MDDI_PANEL) {
-			if (mfd->panel_info.pdest == DISPLAY_1)
-				mddi_ld_param = 0;
-			else
-				mddi_ld_param = 1;
-		} else {
-			mddi_ld_param = 2;
-		}
-
-		MDP_OUTP(MDP_BASE + 0x00090, mddi_ld_param);
-
-		if (mfd->panel_info.bpp == 24)
-			MDP_OUTP(MDP_BASE + 0x00094,
-			 (MDDI_VDO_PACKET_DESC_24 << 16) | mddi_vdo_packet_reg);
-		else if (mfd->panel_info.bpp == 16)
-			MDP_OUTP(MDP_BASE + 0x00094,
-			 (MDDI_VDO_PACKET_DESC_16 << 16) | mddi_vdo_packet_reg);
-		else
-			MDP_OUTP(MDP_BASE + 0x00094,
-			 (MDDI_VDO_PACKET_DESC << 16) | mddi_vdo_packet_reg);
-
-		MDP_OUTP(MDP_BASE + 0x00098, 0x01);
+		vctrl->base_pipe = pipe; /* keep it */
 		mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
 		pipe->ov_blt_addr = 0;
 		pipe->dma_blt_addr = 0;
 	} else {
-		pipe = mddi_pipe;
+		pipe = vctrl->base_pipe;
 	}
 
-	/* 0 for dma_p, client_id = 0 */
-	MDP_OUTP(MDP_BASE + 0x00090, 0);
+	MDP_OUTP(MDP_BASE + 0x021c, 10); /* read pointer */
 
+	mddi_ld_param = 0;
+	mddi_vdo_packet_reg = mfd->panel_info.mddi.vdopkt;
 
-	src = (uint8 *) iBuf->buf;
+	if (mdp_hw_revision == MDP4_REVISION_V2_1) {
+		data = inpdw(MDP_BASE + 0x0028);
+		data &= ~0x0300;	/* bit 8, 9, MASTER4 */
+		if (mfd->fbi->var.xres == 540) /* qHD, 540x960 */
+			data |= 0x0200;
+		else
+			data |= 0x0100;
 
-#ifdef WHOLESCREEN
-
-	{
-		struct fb_info *fbi;
-
-		fbi = mfd->fbi;
-		pipe->src_height = fbi->var.yres;
-		pipe->src_width = fbi->var.xres;
-		pipe->src_h = fbi->var.yres;
-		pipe->src_w = fbi->var.xres;
-		pipe->src_y = 0;
-		pipe->src_x = 0;
-		pipe->dst_h = fbi->var.yres;
-		pipe->dst_w = fbi->var.xres;
-		pipe->dst_y = 0;
-		pipe->dst_x = 0;
-		pipe->srcp0_addr = (uint32)src;
-		pipe->srcp0_ystride = fbi->fix.line_length;
+			MDP_OUTP(MDP_BASE + 0x00028, data);
 	}
 
-#else
-	if (mdp4_overlay_active(MDP4_MIXER0)) {
-		struct fb_info *fbi;
-
-		fbi = mfd->fbi;
-		pipe->src_height = fbi->var.yres;
-		pipe->src_width = fbi->var.xres;
-		pipe->src_h = fbi->var.yres;
-		pipe->src_w = fbi->var.xres;
-		pipe->src_y = 0;
-		pipe->src_x = 0;
-		pipe->dst_h = fbi->var.yres;
-		pipe->dst_w = fbi->var.xres;
-		pipe->dst_y = 0;
-		pipe->dst_x = 0;
-		pipe->srcp0_addr = (uint32) src;
-		pipe->srcp0_ystride = fbi->fix.line_length;
+	if (mfd->panel_info.type == MDDI_PANEL) {
+		if (mfd->panel_info.pdest == DISPLAY_1)
+			mddi_ld_param = 0;
+		else
+			mddi_ld_param = 1;
 	} else {
-		/* starting input address */
-		src += (iBuf->dma_x + iBuf->dma_y * iBuf->ibuf_width)
-					* iBuf->bpp;
-
-		pipe->src_height = iBuf->dma_h;
-		pipe->src_width = iBuf->dma_w;
-		pipe->src_h = iBuf->dma_h;
-		pipe->src_w = iBuf->dma_w;
-		pipe->src_y = 0;
-		pipe->src_x = 0;
-		pipe->dst_h = iBuf->dma_h;
-		pipe->dst_w = iBuf->dma_w;
-		pipe->dst_y = iBuf->dma_y;
-		pipe->dst_x = iBuf->dma_x;
-		pipe->srcp0_addr = (uint32) src;
-		pipe->srcp0_ystride = iBuf->ibuf_width * iBuf->bpp;
+		mddi_ld_param = 2;
 	}
-#endif
 
-	pipe->mixer_stage  = MDP4_MIXER_STAGE_BASE;
+	MDP_OUTP(MDP_BASE + 0x00090, mddi_ld_param);
+
+	if (mfd->panel_info.bpp == 24)
+		MDP_OUTP(MDP_BASE + 0x00094,
+		 (MDDI_VDO_PACKET_DESC_24 << 16) | mddi_vdo_packet_reg);
+	else if (mfd->panel_info.bpp == 16)
+		MDP_OUTP(MDP_BASE + 0x00094,
+		 (MDDI_VDO_PACKET_DESC_16 << 16) | mddi_vdo_packet_reg);
+	else
+		MDP_OUTP(MDP_BASE + 0x00094,
+		 (MDDI_VDO_PACKET_DESC << 16) | mddi_vdo_packet_reg);
+
+		MDP_OUTP(MDP_BASE + 0x00098, 0x01);
+
+
+	mdp4_overlay_setup_pipe_addr(mfd, pipe);
 
 	mdp4_overlay_rgb_setup(pipe);
 
-	mdp4_mixer_stage_up(pipe, 1);
+	mdp4_overlay_reg_flush(pipe, 1);
+
+	mdp4_mixer_stage_up(pipe, 0);
 
 	mdp4_overlayproc_cfg(pipe);
 
 	mdp4_overlay_dmap_xy(pipe);
 
 	mdp4_overlay_dmap_cfg(mfd, 0);
+
 	mdp4_mixer_stage_commit(pipe->mixer_num);
-	mdp4_mddi_vsync_enable(mfd, pipe, 0);
 
-	/* MDP cmd block disable */
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	wmb();
 }
 
-int mdp4_mddi_overlay_blt_start(struct msm_fb_data_type *mfd)
+void mdp4_mddi_blt_start(struct msm_fb_data_type *mfd)
 {
-	unsigned long flag;
-
-	pr_debug("%s: blt_end=%d blt_addr=%x pid=%d\n",
-		__func__, mddi_pipe->blt_end,
-		(int)mddi_pipe->ov_blt_addr, current->pid);
-
-	mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
-
-	if (mfd->ov0_wb_buf->write_addr == 0) {
-		pr_info("%s: no blt_base assigned\n", __func__);
-		return -EBUSY;
-	}
-
-	if (mddi_pipe->ov_blt_addr == 0) {
-		mdp4_mddi_dma_busy_wait(mfd);
-		spin_lock_irqsave(&mdp_spin_lock, flag);
-		mddi_pipe->blt_end = 0;
-		mddi_pipe->blt_cnt = 0;
-		mddi_pipe->ov_cnt = 0;
-		mddi_pipe->dmap_cnt = 0;
-		mddi_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
-		mddi_pipe->dma_blt_addr = mfd->ov0_wb_buf->write_addr;
-		mdp4_stat.blt_mddi++;
-		spin_unlock_irqrestore(&mdp_spin_lock, flag);
-	return 0;
+	mdp4_mddi_do_blt(mfd, 1);
 }
 
-	return -EBUSY;
-}
-
-int mdp4_mddi_overlay_blt_stop(struct msm_fb_data_type *mfd)
+void mdp4_mddi_blt_stop(struct msm_fb_data_type *mfd)
 {
-	unsigned long flag;
-
-	pr_debug("%s: blt_end=%d blt_addr=%x\n",
-		 __func__, mddi_pipe->blt_end, (int)mddi_pipe->ov_blt_addr);
-
-	if ((mddi_pipe->blt_end == 0) && mddi_pipe->ov_blt_addr) {
-		spin_lock_irqsave(&mdp_spin_lock, flag);
-		mddi_pipe->blt_end = 1;	/* mark as end */
-		spin_unlock_irqrestore(&mdp_spin_lock, flag);
-		return 0;
-	}
-
-	return -EBUSY;
-}
-
-int mdp4_mddi_overlay_blt_offset(struct msm_fb_data_type *mfd,
-					struct msmfb_overlay_blt *req)
-{
-	req->offset = 0;
-	req->width = mddi_pipe->src_width;
-	req->height = mddi_pipe->src_height;
-	req->bpp = mddi_pipe->bpp;
-
-	return sizeof(*req);
+	mdp4_mddi_do_blt(mfd, 0);
 }
 
 void mdp4_mddi_overlay_blt(struct msm_fb_data_type *mfd,
 					struct msmfb_overlay_blt *req)
 {
-	if (req->enable)
-		mdp4_mddi_overlay_blt_start(mfd);
-	else if (req->enable == 0)
-		mdp4_mddi_overlay_blt_stop(mfd);
-
+	mdp4_mddi_do_blt(mfd, req->enable);
 }
 
-void mdp4_blt_xy_update(struct mdp4_overlay_pipe *pipe)
+int mdp4_mddi_on(struct platform_device *pdev)
 {
-	uint32 off, addr, addr2;
-	int bpp;
-	char *overlay_base;
+	int ret = 0;
+	int cndx = 0;
+	struct msm_fb_data_type *mfd;
+	struct vsycn_ctrl *vctrl;
 
-	if (pipe->ov_blt_addr == 0)
-		return;
+	pr_debug("%s+:\n", __func__);
 
+	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
 
-#ifdef BLT_RGB565
-	bpp = 2; /* overlay ouput is RGB565 */
-#else
-	bpp = 3; /* overlay ouput is RGB888 */
-#endif
-	off = 0;
-	if (pipe->dmap_cnt & 0x01)
-		off = pipe->src_height * pipe->src_width * bpp;
+	vctrl = &vsync_ctrl_db[cndx];
+	vctrl->mfd = mfd;
+	vctrl->dev = mfd->fbi->dev;
 
-	addr = pipe->ov_blt_addr + off;
+	mdp_clk_ctrl(1);
+	mdp4_overlay_update_mddi(mfd);
+	mdp_clk_ctrl(0);
 
-	/* dmap */
-	MDP_OUTP(MDP_BASE + 0x90008, addr);
+	mdp4_iommu_attach();
 
-	off = 0;
-	if (pipe->ov_cnt & 0x01)
-		off = pipe->src_height * pipe->src_width * bpp;
-	addr2 = pipe->ov_blt_addr + off;
-	/* overlay 0 */
-	overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
-	outpdw(overlay_base + 0x000c, addr2);
-	outpdw(overlay_base + 0x001c, addr2);
+	atomic_set(&vctrl->suspend, 0);
+	pr_debug("%s-:\n", __func__);
+
+	return ret;
 }
 
-void mdp4_primary_rdptr(void)
+int mdp4_mddi_off(struct platform_device *pdev)
 {
-}
+	int ret = 0;
+	int cndx = 0;
+	struct msm_fb_data_type *mfd;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
 
-/*
- * mdp4_dmap_done_mddi: called from isr
- */
-void mdp4_dma_p_done_mddi(struct mdp_dma_data *dma)
-{
-	int diff;
+	pr_debug("%s+:\n", __func__);
 
-	mddi_pipe->dmap_cnt++;
-	diff = mddi_pipe->ov_cnt - mddi_pipe->dmap_cnt;
-	pr_debug("%s: ov_cnt=%d dmap_cnt=%d\n",
-			__func__, mddi_pipe->ov_cnt, mddi_pipe->dmap_cnt);
+	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
 
-	if (diff <= 0) {
-		spin_lock(&mdp_spin_lock);
-		dma->dmap_busy = FALSE;
-		complete(&dma->dmap_comp);
-		spin_unlock(&mdp_spin_lock);
-
-		if (mddi_pipe->blt_end) {
-			mddi_pipe->blt_end = 0;
-			mddi_pipe->ov_blt_addr = 0;
-			mddi_pipe->dma_blt_addr = 0;
-			pr_debug("%s: END, ov_cnt=%d dmap_cnt=%d\n", __func__,
-				mddi_pipe->ov_cnt, mddi_pipe->dmap_cnt);
-			mdp_intr_mask &= ~INTR_DMA_P_DONE;
-			outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-		}
-
-		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
-		mdp_disable_irq_nosync(MDP_DMA2_TERM);  /* disable intr */
-		return;
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+	if (pipe == NULL) {
+		pr_err("%s: NO base pipe\n", __func__);
+		return ret;
 	}
 
-	spin_lock(&mdp_spin_lock);
-	dma->busy = FALSE;
-	spin_unlock(&mdp_spin_lock);
-	complete(&dma->comp);
-	if (busy_wait_cnt)
-		busy_wait_cnt--;
+	atomic_set(&vctrl->suspend, 1);
 
-	pr_debug("%s: kickoff dmap\n", __func__);
+	/* sanity check, free pipes besides base layer */
+	mdp4_overlay_unset_mixer(pipe->mixer_num);
+	mdp4_mixer_stage_down(pipe, 1);
+	mdp4_overlay_pipe_free(pipe);
+	vctrl->base_pipe = NULL;
 
-	mdp4_blt_xy_update(mddi_pipe);
-	/* kick off dmap */
-	outpdw(MDP_BASE + 0x000c, 0x0);
-	mdp4_stat.kickoff_dmap++;
-	mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
-}
-
-/*
- * mdp4_overlay0_done_mddi: called from isr
- */
-void mdp4_overlay0_done_mddi(struct mdp_dma_data *dma)
-{
-	int diff;
-
-	if (mddi_pipe->ov_blt_addr == 0) {
-		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
-		spin_lock(&mdp_spin_lock);
-		dma->busy = FALSE;
-		spin_unlock(&mdp_spin_lock);
-		complete(&dma->comp);
-
-		if (busy_wait_cnt)
-			busy_wait_cnt--;
-		mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
-
-		return;
+	if (vctrl->clk_enabled) {
+		/*
+		 * in case of suspend, vsycn_ctrl off is not
+		 * received from frame work which left clock on
+		 * then, clock need to be turned off here
+		 */
+		mdp_clk_ctrl(0);
 	}
 
-	/* blt enabled */
-	if (mddi_pipe->blt_end == 0)
-		mddi_pipe->ov_cnt++;
+	vctrl->clk_enabled = 0;
+	vctrl->vsync_enabled = 0;
+	vctrl->clk_control = 0;
+	vctrl->expire_tick = 0;
+	vctrl->uevent = 0;
 
-	pr_debug("%s: ov_cnt=%d dmap_cnt=%d\n",
-			__func__, mddi_pipe->ov_cnt, mddi_pipe->dmap_cnt);
+	vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM);
 
-	if (mddi_pipe->blt_cnt == 0) {
-		/* first kickoff since blt enabled */
-		mdp_intr_mask |= INTR_DMA_P_DONE;
-		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-	}
+	pr_debug("%s-:\n", __func__);
 
-	mddi_pipe->blt_cnt++;
-
-	diff = mddi_pipe->ov_cnt - mddi_pipe->dmap_cnt;
-	if (diff >= 2) {
-		mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
-		return;
-	}
-
-	spin_lock(&mdp_spin_lock);
-	dma->busy = FALSE;
-	dma->dmap_busy = TRUE;
-	spin_unlock(&mdp_spin_lock);
-	complete(&dma->comp);
-
-	if (busy_wait_cnt)
-		busy_wait_cnt--;
-
-	pr_debug("%s: kickoff dmap\n", __func__);
-
-	mdp4_blt_xy_update(mddi_pipe);
-	mdp_enable_irq(MDP_DMA2_TERM);	/* enable intr */
-	/* kick off dmap */
-	outpdw(MDP_BASE + 0x000c, 0x0);
-	mdp4_stat.kickoff_dmap++;
-	mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
-}
-
-void mdp4_mddi_overlay_restore(void)
-{
-	if (mddi_mfd == NULL)
-		return;
-
-	pr_debug("%s: resotre, pid=%d\n", __func__, current->pid);
-
-	if (mddi_mfd->panel_power_on == 0)
-		return;
-	if (mddi_mfd && mddi_pipe) {
-		mdp4_mddi_dma_busy_wait(mddi_mfd);
-		mdp4_overlay_update_lcd(mddi_mfd);
-
-		if (mddi_pipe->ov_blt_addr)
-			mdp4_mddi_blt_dmap_busy_wait(mddi_mfd);
-		mdp4_mddi_overlay_kickoff(mddi_mfd, mddi_pipe);
-		mddi_mfd->dma_update_flag = 1;
-	}
-	if (mdp_hw_revision < MDP4_REVISION_V2_1) /* need dmas dmap switch */
-		mdp4_mddi_overlay_dmas_restore();
-}
-
-void mdp4_mddi_blt_dmap_busy_wait(struct msm_fb_data_type *mfd)
-{
-	unsigned long flag;
-	int need_wait = 0;
-
-	spin_lock_irqsave(&mdp_spin_lock, flag);
-	if (mfd->dma->dmap_busy == TRUE) {
-		INIT_COMPLETION(mfd->dma->dmap_comp);
-		need_wait++;
-	}
-	spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
-	if (need_wait) {
-		/* wait until DMA finishes the current job */
-		wait_for_completion(&mfd->dma->dmap_comp);
-	}
-}
-
-/*
- * mdp4_mddi_cmd_dma_busy_wait: check mddi link activity
- * mddi link is a shared resource and it can only be used
- * while it is in idle state.
- * ov_mutex need to be acquired before call this function.
- */
-void mdp4_mddi_dma_busy_wait(struct msm_fb_data_type *mfd)
-{
-	unsigned long flag;
-	int need_wait = 0;
-
-	pr_debug("%s: START, pid=%d\n", __func__, current->pid);
-	spin_lock_irqsave(&mdp_spin_lock, flag);
-	if (mfd->dma->busy == TRUE) {
-		if (busy_wait_cnt == 0)
-			INIT_COMPLETION(mfd->dma->comp);
-		busy_wait_cnt++;
-		need_wait++;
-	}
-	spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
-
-	if (need_wait) {
-		/* wait until DMA finishes the current job */
-		pr_debug("%s: PENDING, pid=%d\n", __func__, current->pid);
-		wait_for_completion(&mfd->dma->comp);
-	}
-	pr_debug("%s: DONE, pid=%d\n", __func__, current->pid);
-}
-
-void mdp4_mddi_kickoff_video(struct msm_fb_data_type *mfd,
-				struct mdp4_overlay_pipe *pipe)
-{
 	/*
-	 * a video kickoff may happen before UI kickoff after
-	 * blt enabled. mdp4_overlay_update_lcd() need
-	 * to be called before kickoff.
-	 * vice versa for blt disabled.
+	 * footswitch off
+	 * this will casue all mdp register
+	 * to be reset to default
+	 * after footswitch on later
 	 */
-	if (mddi_pipe->ov_blt_addr && mddi_pipe->blt_cnt == 0)
-		mdp4_overlay_update_lcd(mfd); /* first time */
-	else if (mddi_pipe->ov_blt_addr == 0  && mddi_pipe->blt_cnt) {
-		mdp4_overlay_update_lcd(mfd); /* last time */
-		mddi_pipe->blt_cnt = 0;
-	}
 
-	pr_debug("%s: blt_addr=%d blt_cnt=%d\n",
-		__func__, (int)mddi_pipe->ov_blt_addr, mddi_pipe->blt_cnt);
-
-	if (mddi_pipe->ov_blt_addr)
-		mdp4_mddi_blt_dmap_busy_wait(mddi_mfd);
-	mdp4_mddi_overlay_kickoff(mfd, pipe);
+	return ret;
 }
 
-void mdp4_mddi_kickoff_ui(struct msm_fb_data_type *mfd,
-				struct mdp4_overlay_pipe *pipe)
+void mdp_mddi_overlay_suspend(struct msm_fb_data_type *mfd)
 {
-	pr_debug("%s: pid=%d\n", __func__, current->pid);
-	mdp4_mddi_overlay_kickoff(mfd, pipe);
-}
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
 
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+	/* dis-engage rgb0 from mixer0 */
+	if (pipe) {
+		if (mfd->ref_cnt == 0) {
+			/* adb stop */
+			if (pipe->pipe_type == OVERLAY_TYPE_BF)
+				mdp4_overlay_borderfill_stage_down(pipe);
 
-void mdp4_mddi_overlay_kickoff(struct msm_fb_data_type *mfd,
-				struct mdp4_overlay_pipe *pipe)
-{
-	unsigned long flag;
-
-	mdp_enable_irq(MDP_OVERLAY0_TERM);
-	spin_lock_irqsave(&mdp_spin_lock, flag);
-	mfd->dma->busy = TRUE;
-	if (mddi_pipe->ov_blt_addr)
-		mfd->dma->dmap_busy = TRUE;
-	spin_unlock_irqrestore(&mdp_spin_lock, flag);
-	/* start OVERLAY pipe */
-	mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd);
-	mdp4_stat.kickoff_ov0++;
-}
-
-void mdp4_dma_s_update_lcd(struct msm_fb_data_type *mfd,
-				struct mdp4_overlay_pipe *pipe)
-{
-	MDPIBUF *iBuf = &mfd->ibuf;
-	uint32 outBpp = iBuf->bpp;
-	uint16 mddi_vdo_packet_reg;
-	uint32 dma_s_cfg_reg;
-
-	dma_s_cfg_reg = 0;
-
-	if (mfd->fb_imgType == MDP_RGBA_8888)
-		dma_s_cfg_reg |= DMA_PACK_PATTERN_BGR; /* on purpose */
-	else if (mfd->fb_imgType == MDP_BGR_565)
-		dma_s_cfg_reg |= DMA_PACK_PATTERN_BGR;
-	else
-		dma_s_cfg_reg |= DMA_PACK_PATTERN_RGB;
-
-	if (outBpp == 4)
-		dma_s_cfg_reg |= (1 << 26); /* xRGB8888 */
-	else if (outBpp == 2)
-		dma_s_cfg_reg |= DMA_IBUF_FORMAT_RGB565;
-
-	dma_s_cfg_reg |= DMA_DITHER_EN;
-
-	/* MDP cmd block enable */
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-	/* PIXELSIZE */
-	MDP_OUTP(MDP_BASE + 0xa0004, (pipe->dst_h << 16 | pipe->dst_w));
-	MDP_OUTP(MDP_BASE + 0xa0008, pipe->srcp0_addr);	/* ibuf address */
-	MDP_OUTP(MDP_BASE + 0xa000c, pipe->srcp0_ystride);/* ystride */
-
-	if (mfd->panel_info.bpp == 24) {
-		dma_s_cfg_reg |= DMA_DSTC0G_8BITS |	/* 666 18BPP */
-		    DMA_DSTC1B_8BITS | DMA_DSTC2R_8BITS;
-	} else if (mfd->panel_info.bpp == 18) {
-		dma_s_cfg_reg |= DMA_DSTC0G_6BITS |	/* 666 18BPP */
-		    DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
-	} else {
-		dma_s_cfg_reg |= DMA_DSTC0G_6BITS |	/* 565 16BPP */
-		    DMA_DSTC1B_5BITS | DMA_DSTC2R_5BITS;
-	}
-
-	MDP_OUTP(MDP_BASE + 0xa0010, (pipe->dst_y << 16) | pipe->dst_x);
-
-	/* 1 for dma_s, client_id = 0 */
-	MDP_OUTP(MDP_BASE + 0x00090, 1);
-
-	mddi_vdo_packet_reg = mfd->panel_info.mddi.vdopkt;
-
-	if (mfd->panel_info.bpp == 24)
-		MDP_OUTP(MDP_BASE + 0x00094,
-			(MDDI_VDO_PACKET_DESC_24 << 16) | mddi_vdo_packet_reg);
-	else if (mfd->panel_info.bpp == 16)
-		MDP_OUTP(MDP_BASE + 0x00094,
-			 (MDDI_VDO_PACKET_DESC_16 << 16) | mddi_vdo_packet_reg);
-	else
-		MDP_OUTP(MDP_BASE + 0x00094,
-			 (MDDI_VDO_PACKET_DESC << 16) | mddi_vdo_packet_reg);
-
-	MDP_OUTP(MDP_BASE + 0x00098, 0x01);
-
-	MDP_OUTP(MDP_BASE + 0xa0000, dma_s_cfg_reg);
-
-	mdp4_mddi_vsync_enable(mfd, pipe, 1);
-
-	/* MDP cmd block disable */
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-}
-
-void mdp4_mddi_dma_s_kickoff(struct msm_fb_data_type *mfd,
-				struct mdp4_overlay_pipe *pipe)
-{
-	mdp_enable_irq(MDP_DMA_S_TERM);
-
-	if (mddi_pipe->ov_blt_addr == 0)
-		mfd->dma->busy = TRUE;
-
-	mfd->ibuf_flushed = TRUE;
-	/* start dma_s pipe */
-	mdp_pipe_kickoff(MDP_DMA_S_TERM, mfd);
-	mdp4_stat.kickoff_dmas++;
-
-	/* wait until DMA finishes the current job */
-	wait_for_completion(&mfd->dma->comp);
-	mdp_disable_irq(MDP_DMA_S_TERM);
-}
-
-void mdp4_mddi_overlay_dmas_restore(void)
-{
-	/* mutex held by caller */
-	if (mddi_mfd && mddi_pipe) {
-		mdp4_mddi_dma_busy_wait(mddi_mfd);
-		mdp4_dma_s_update_lcd(mddi_mfd, mddi_pipe);
-		mdp4_mddi_dma_s_kickoff(mddi_mfd, mddi_pipe);
-		mddi_mfd->dma_update_flag = 1;
+			/* pipe == rgb1 */
+			mdp4_overlay_unset_mixer(pipe->mixer_num);
+			vctrl->base_pipe = NULL;
+		} else {
+			mdp4_mixer_stage_down(pipe, 1);
+			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 1);
+		}
 	}
 }
 
 void mdp4_mddi_overlay(struct msm_fb_data_type *mfd)
 {
-	mutex_lock(&mfd->dma->ov_mutex);
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+	unsigned long flags;
+	long long xx;
 
-	if (mfd && mfd->panel_power_on) {
-		mdp4_mddi_dma_busy_wait(mfd);
+	vctrl = &vsync_ctrl_db[cndx];
 
-		if (mddi_pipe && mddi_pipe->ov_blt_addr)
-			mdp4_mddi_blt_dmap_busy_wait(mfd);
-		mdp4_overlay_mdp_perf_upd(mfd, 0);
-		mdp4_overlay_update_lcd(mfd);
+	if (!mfd->panel_power_on)
+		return;
 
-		mdp4_overlay_mdp_perf_upd(mfd, 1);
-		if (mdp_hw_revision < MDP4_REVISION_V2_1) {
-			/* dmas dmap switch */
-			if (mdp4_overlay_mixer_play(mddi_pipe->mixer_num)
-						== 0) {
-				mdp4_dma_s_update_lcd(mfd, mddi_pipe);
-				mdp4_mddi_dma_s_kickoff(mfd, mddi_pipe);
-			} else
-				mdp4_mddi_kickoff_ui(mfd, mddi_pipe);
-		} else	/* no dams dmap switch  */
-			mdp4_mddi_kickoff_ui(mfd, mddi_pipe);
-
-	/* signal if pan function is waiting for the update completion */
-		if (mfd->pan_waiting) {
-			mfd->pan_waiting = FALSE;
-			complete(&mfd->pan_comp);
-		}
+	pipe = vctrl->base_pipe;
+	if (pipe == NULL) {
+		pr_err("%s: NO base pipe\n", __func__);
+		return;
 	}
+
+	mutex_lock(&vctrl->update_lock);
+	if (!vctrl->clk_enabled) {
+		pr_err("%s: mdp clocks disabled\n", __func__);
+		mutex_unlock(&vctrl->update_lock);
+		return;
+
+	}
+	mutex_unlock(&vctrl->update_lock);
+
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	if (vctrl->expire_tick) {
+		/*
+		 * in the middle of shutting clocks down
+		 * delay to allow pan display to go through
+		 */
+		vctrl->expire_tick = VSYNC_EXPIRE_TICK;
+	}
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	if (pipe->mixer_stage == MDP4_MIXER_STAGE_BASE) {
+		mdp4_mddi_vsync_enable(mfd, pipe, 0);
+		mdp4_overlay_setup_pipe_addr(mfd, pipe);
+		mdp4_mddi_pipe_queue(0, pipe);
+	}
+
+	mdp4_overlay_mdp_perf_upd(mfd, 1);
+
+	mutex_lock(&mfd->dma->ov_mutex);
+	mdp4_mddi_pipe_commit();
 	mutex_unlock(&mfd->dma->ov_mutex);
+	mdp4_mddi_wait4vsync(0, &xx);
+
+	mdp4_overlay_mdp_perf_upd(mfd, 0);
 }
 
 int mdp4_mddi_overlay_cursor(struct fb_info *info, struct fb_cursor *cursor)
@@ -722,7 +1061,6 @@
 	struct msm_fb_data_type *mfd = info->par;
 	mutex_lock(&mfd->dma->ov_mutex);
 	if (mfd && mfd->panel_power_on) {
-		mdp4_mddi_dma_busy_wait(mfd);
 		mdp_hw_cursor_update(info, cursor);
 	}
 	mutex_unlock(&mfd->dma->ov_mutex);
diff --git a/drivers/video/msm/mdp4_overlay_writeback.c b/drivers/video/msm/mdp4_overlay_writeback.c
index fef1259..7caf0ad 100644
--- a/drivers/video/msm/mdp4_overlay_writeback.c
+++ b/drivers/video/msm/mdp4_overlay_writeback.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -61,6 +61,7 @@
 	struct msm_fb_data_type *mfd;
 	struct mdp4_overlay_pipe *base_pipe;
 	struct vsync_update vlist[2];
+	struct work_struct clk_work;
 } vsync_ctrl_db[MAX_CONTROLLER];
 
 static void vsync_irq_enable(int intr, int term)
@@ -90,7 +91,7 @@
 static int mdp4_overlay_writeback_update(struct msm_fb_data_type *mfd);
 static void mdp4_wfd_queue_wakeup(struct msm_fb_data_type *mfd,
 		struct msmfb_writeback_data_list *node);
-static int mdp4_wfd_dequeue_update(struct msm_fb_data_type *mfd,
+static void mdp4_wfd_dequeue_update(struct msm_fb_data_type *mfd,
 		struct msmfb_writeback_data_list **wfdnode);
 
 int mdp4_overlay_writeback_on(struct platform_device *pdev)
@@ -165,15 +166,9 @@
 		(0x0 & 0xFFF));         /* 12-bit R */
 
 	mdp_clk_ctrl(0);
-
-	atomic_set(&vctrl->suspend, 0);
-
 	return ret;
 }
 
-static void mdp4_wfd_wait4ov(int cndx);
-static void mdp4_writeback_pipe_clean(struct vsync_update *vp);
-
 int mdp4_overlay_writeback_off(struct platform_device *pdev)
 {
 	int cndx = 0;
@@ -190,23 +185,17 @@
 
 	vctrl = &vsync_ctrl_db[cndx];
 	pipe = vctrl->base_pipe;
-
-	atomic_set(&vctrl->suspend, 1);
-
 	if (pipe == NULL) {
 		pr_err("%s: NO base pipe\n", __func__);
 		return ret;
 	}
 
-	complete(&vctrl->ov_comp);
-	msleep(20);
-	mdp_clk_ctrl(1);
 	/* sanity check, free pipes besides base layer */
 	mdp4_overlay_unset_mixer(pipe->mixer_num);
 	mdp4_mixer_stage_down(pipe, 1);
 	mdp4_overlay_pipe_free(pipe);
 	vctrl->base_pipe = NULL;
-	mdp_clk_ctrl(0);
+
 	undx =  vctrl->update_ndx;
 	vp = &vctrl->vlist[undx];
 	if (vp->update_cnt) {
@@ -214,8 +203,7 @@
 		 * pipe's iommu will be freed at next overlay play
 		 * and iommu_drop statistic will be increased by one
 		 */
-		pr_warn("%s: update_cnt=%d\n", __func__, vp->update_cnt);
-		mdp4_writeback_pipe_clean(vp);
+		vp->update_cnt = 0;     /* empty queue */
 	}
 
 	ret = panel_next_off(pdev);
@@ -224,7 +212,6 @@
 	/* MDP_LAYERMIXER_WB_MUX_SEL to restore to default cfg*/
 	outpdw(MDP_BASE + 0x100F4, 0x0);
 	mdp_clk_ctrl(0);
-
 	pr_debug("%s-:\n", __func__);
 	return ret;
 }
@@ -334,20 +321,7 @@
 	mdp4_stat.overlay_play[pipe->mixer_num]++;
 }
 
-static void mdp4_writeback_pipe_clean(struct vsync_update *vp)
-{
-	struct mdp4_overlay_pipe *pipe;
-	int i;
-
-	pipe = vp->plist;
-	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
-		if (pipe->pipe_used) {
-			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
-			pipe->pipe_used = 0; /* clear */
-		}
-	}
-	vp->update_cnt = 0;     /* empty queue */
-}
+static void mdp4_wfd_wait4ov(int cndx);
 
 int mdp4_wfd_pipe_commit(struct msm_fb_data_type *mfd,
 			int cndx, int wait)
@@ -361,7 +335,6 @@
 	unsigned long flags;
 	int cnt = 0;
 	struct msmfb_writeback_data_list *node = NULL;
-	int rc = 0;
 
 	vctrl = &vsync_ctrl_db[cndx];
 
@@ -371,33 +344,21 @@
 	pipe = vctrl->base_pipe;
 	mixer = pipe->mixer_num;
 
-	/*
-	 * allow stage_commit without pipes queued
-	 * (vp->update_cnt == 0) to unstage pipes after
-	 * overlay_unset
-	 */
+	if (vp->update_cnt == 0) {
+		mutex_unlock(&vctrl->update_lock);
+		return cnt;
+	}
 
 	vctrl->update_ndx++;
 	vctrl->update_ndx &= 0x01;
 	vp->update_cnt = 0;     /* reset */
 	mutex_unlock(&vctrl->update_lock);
 
-	rc = mdp4_wfd_dequeue_update(mfd, &node);
-	if (rc != 0) {
-		pr_err("%s: mdp4_wfd_dequeue_update failed !! mfd=%x\n",
-			__func__, (int)mfd);
-		pipe = vp->plist;
-		for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
-			pipe->pipe_used = 0;
-			pr_info("%s: dequeue update failed, unsetting pipes\n",
-				__func__);
-		}
-		return cnt;
-	}
+	mdp4_wfd_dequeue_update(mfd, &node);
+
 	/* free previous committed iommu back to pool */
 	mdp4_overlay_iommu_unmap_freelist(mixer);
 
-	mdp_clk_ctrl(1);
 	pipe = vp->plist;
 	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
 		if (pipe->pipe_used) {
@@ -416,6 +377,8 @@
 		}
 	}
 
+	mdp_clk_ctrl(1);
+
 	mdp4_mixer_stage_commit(mixer);
 
 	pipe = vctrl->base_pipe;
@@ -440,6 +403,13 @@
 	return cnt;
 }
 
+static void clk_ctrl_work(struct work_struct *work)
+{
+	struct vsycn_ctrl *vctrl =
+		container_of(work, typeof(*vctrl), clk_work);
+	mdp_clk_ctrl(0);
+}
+
 void mdp4_wfd_init(int cndx)
 {
 	struct vsycn_ctrl *vctrl;
@@ -457,8 +427,8 @@
 	vctrl->update_ndx = 0;
 	mutex_init(&vctrl->update_lock);
 	init_completion(&vctrl->ov_comp);
-	atomic_set(&vctrl->suspend, 1);
 	spin_lock_init(&vctrl->spin_lock);
+	INIT_WORK(&vctrl->clk_work, clk_ctrl_work);
 }
 
 static void mdp4_wfd_wait4ov(int cndx)
@@ -492,7 +462,7 @@
 	vsync_irq_disable(INTR_OVERLAY2_DONE, MDP_OVERLAY2_TERM);
 	vctrl->ov_done++;
 	complete(&vctrl->ov_comp);
-
+	schedule_work(&vctrl->clk_work);
 	pr_debug("%s ovdone interrupt\n", __func__);
 	spin_unlock(&vctrl->spin_lock);
 }
@@ -517,16 +487,11 @@
 
 	mdp4_overlay_mdp_perf_upd(mfd, 1);
 
-	mdp_clk_ctrl(1);
-
 	mdp4_wfd_pipe_commit(mfd, 0, 1);
 
 	mdp4_overlay_mdp_perf_upd(mfd, 0);
 
-	mdp_clk_ctrl(0);
-
 	mutex_unlock(&mfd->dma->ov_mutex);
-
 }
 
 static int mdp4_overlay_writeback_register_buffer(
@@ -778,7 +743,7 @@
 	return rc;
 }
 
-static int mdp4_wfd_dequeue_update(struct msm_fb_data_type *mfd,
+static void mdp4_wfd_dequeue_update(struct msm_fb_data_type *mfd,
 			struct msmfb_writeback_data_list **wfdnode)
 {
 	struct vsycn_ctrl *vctrl;
@@ -786,7 +751,7 @@
 	struct msmfb_writeback_data_list *node = NULL;
 
 	if (mfd && !mfd->panel_power_on)
-		return -EPERM;
+		return;
 
 	pr_debug("%s:+ mfd=%x\n", __func__, (int)mfd);
 
@@ -813,18 +778,8 @@
 	if (!pipe->ov_blt_addr) {
 		pr_err("%s: no writeback buffer 0x%x, %p\n", __func__,
 			(unsigned int)pipe->ov_blt_addr, node);
-
-		if (node) {
-			mutex_lock(&mfd->writeback_mutex);
-			list_add_tail(&node->active_entry,
-				&mfd->writeback_free_queue);
-			node->state = IN_FREE_QUEUE;
-			mfd->writeback_active_cnt--;
-			mutex_unlock(&mfd->writeback_mutex);
-		}
-
 		mutex_unlock(&mfd->unregister_mutex);
-		return -EINVAL;
+		return;
 	}
 
 	mdp4_overlay_writeback_update(mfd);
@@ -832,7 +787,6 @@
 	*wfdnode = node;
 
 	mutex_unlock(&mfd->unregister_mutex);
-	return 0;
 }
 
 static void mdp4_wfd_queue_wakeup(struct msm_fb_data_type *mfd,
@@ -853,23 +807,3 @@
 	mutex_unlock(&mfd->writeback_mutex);
 	wake_up(&mfd->wait_q);
 }
-
-int mdp4_writeback_set_mirroring_hint(struct fb_info *info, int hint)
-{
-	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
-
-	if (mfd->panel.type != WRITEBACK_PANEL)
-		return -ENOTSUPP;
-
-	switch (hint) {
-	case MDP_WRITEBACK_MIRROR_ON:
-	case MDP_WRITEBACK_MIRROR_PAUSE:
-	case MDP_WRITEBACK_MIRROR_RESUME:
-	case MDP_WRITEBACK_MIRROR_OFF:
-		pr_info("wfd state switched to %d\n", hint);
-		switch_set_state(&mfd->writeback_sdev, hint);
-		return 0;
-	default:
-		return -EINVAL;
-	}
-}
diff --git a/drivers/video/msm/mdp4_util.c b/drivers/video/msm/mdp4_util.c
index f8fd8eb..f8b7f2f 100644
--- a/drivers/video/msm/mdp4_util.c
+++ b/drivers/video/msm/mdp4_util.c
@@ -392,6 +392,9 @@
 	/* MDP cmd block enable */
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 
+	mdp_bus_scale_update_request
+		(MDP_BUS_SCALE_INIT, MDP_BUS_SCALE_INIT);
+
 #ifdef MDP4_ERROR
 	/*
 	 * Issue software reset on DMA_P will casue DMA_P dma engine stall
@@ -442,8 +445,6 @@
 
 	/* max read pending cmd config */
 	outpdw(MDP_BASE + 0x004c, 0x02222);	/* 3 pending requests */
-	outpdw(MDP_BASE + 0x0400, 0x7FF);
-	outpdw(MDP_BASE + 0x0404, 0x30050);
 
 #ifndef CONFIG_FB_MSM_OVERLAY
 	/* both REFRESH_MODE and DIRECT_OUT are ignored at BLT mode */
@@ -555,7 +556,7 @@
 			mdp4_dmap_done_dsi_cmd(0);
 #else
 		else { /* MDDI */
-			mdp4_dma_p_done_mddi(dma);
+			mdp4_dmap_done_mddi(0);
 			mdp_pipe_ctrl(MDP_DMA2_BLOCK,
 				MDP_BLOCK_POWER_OFF, TRUE);
 			complete(&dma->comp);
@@ -606,7 +607,7 @@
 				mdp4_overlay0_done_dsi_cmd(0);
 #else
 			if (panel & MDP4_PANEL_MDDI)
-				mdp4_overlay0_done_mddi(dma);
+				mdp4_overlay0_done_mddi(0);
 #endif
 		}
 		mdp_hw_cursor_done();
@@ -628,10 +629,12 @@
 		if (panel & MDP4_PANEL_ATV)
 			mdp4_overlay1_done_atv();
 #endif
+		mdp_hw_cursor_done();
 	}
 #if defined(CONFIG_FB_MSM_WRITEBACK_MSM_PANEL)
 	if (isr & INTR_OVERLAY2_DONE) {
 		mdp4_stat.intr_overlay2++;
+		mdp_pipe_ctrl(MDP_OVERLAY2_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 		/* disable DTV interrupt */
 		if (panel & MDP4_PANEL_WRITEBACK)
 			mdp4_overlay2_done_wfd(&dma_wb_data);
@@ -2311,7 +2314,7 @@
 					pr_err("ion_map_iommu() read failed\n");
 					return -ENOMEM;
 				}
-				if (mfd->mem_hid & ION_SECURE) {
+				if (mfd->mem_hid & ION_FLAG_SECURE) {
 					if (ion_phys(mfd->iclient, buf->ihdl,
 						&addr, (size_t *)&len)) {
 						pr_err("%s:%d: ion_phys map failed\n",
@@ -2374,7 +2377,7 @@
 	if (!IS_ERR_OR_NULL(mfd->iclient)) {
 		if (!IS_ERR_OR_NULL(buf->ihdl)) {
 			if (mdp_iommu_split_domain) {
-				if (!(mfd->mem_hid & ION_SECURE))
+				if (!(mfd->mem_hid & ION_FLAG_SECURE))
 					ion_unmap_iommu(mfd->iclient, buf->ihdl,
 						DISPLAY_WRITE_DOMAIN, GEN_POOL);
 				ion_unmap_iommu(mfd->iclient, buf->ihdl,
@@ -3078,3 +3081,116 @@
 error:
 	return ret;
 }
+
+static int is_valid_calib_addr(void *addr)
+{
+	int ret = 0;
+	unsigned int ptr;
+
+	ptr = (unsigned int) addr;
+
+	if (mdp_rev >= MDP_REV_30 && mdp_rev < MDP_REV_40) {
+		/* if request is outside the MDP reg-map or is not aligned 4 */
+		if (ptr == 0x0 || ptr > 0xF0600 || ptr % 0x4)
+			goto end;
+
+		if (ptr >= 0x90000 && ptr < 0x94000) {
+			if (ptr == 0x90000 || ptr == 0x90070)
+				ret = 1;
+			else if (ptr >= 0x93400 && ptr <= 0x93420)
+				ret = 1;
+			else if (ptr >= 0x93500 && ptr <= 0x93508)
+				ret = 1;
+			else if (ptr >= 0x93580 && ptr <= 0x93588)
+				ret = 1;
+			else if (ptr >= 0x93600 && ptr <= 0x93614)
+				ret = 1;
+			else if (ptr >= 0x93680 && ptr <= 0x93694)
+				ret = 1;
+			else if (ptr >= 0x93800 && ptr <= 0x93BFC)
+				ret = 1;
+		}
+	} else if (mdp_rev >= MDP_REV_40 && mdp_rev <= MDP_REV_44) {
+		/* if request is outside the MDP reg-map or is not aligned 4 */
+		if (ptr > 0xF0600 || ptr % 0x4)
+			goto end;
+
+		if (ptr < 0x90000) {
+			if (ptr == 0x0 || ptr == 0x4 || ptr == 0x28200 ||
+								ptr == 0x28204)
+				ret = 1;
+		} else if (ptr < 0x95000) {
+			if (ptr == 0x90000 || ptr == 0x90070)
+				ret = 1;
+			else if (ptr >= 0x93400 && ptr <= 0x93420)
+				ret = 1;
+			else if (ptr >= 0x93500 && ptr <= 0x93508)
+				ret = 1;
+			else if (ptr >= 0x93580 && ptr <= 0x93588)
+				ret = 1;
+			else if (ptr >= 0x93600 && ptr <= 0x93614)
+				ret = 1;
+			else if (ptr >= 0x93680 && ptr <= 0x93694)
+				ret = 1;
+			else if (ptr >= 0x94800 && ptr <= 0x94BFC)
+				ret = 1;
+		} else if (ptr < 0x9A000) {
+			if (ptr >= 0x98800 && ptr <= 0x9883C)
+				ret = 1;
+			else if (ptr >= 0x98880 && ptr <= 0x988AC)
+				ret = 1;
+			else if (ptr >= 0x98900 && ptr <= 0x9893C)
+				ret = 1;
+			else if (ptr >= 0x98980 && ptr <= 0x989BC)
+				ret = 1;
+			else if (ptr >= 0x98A00 && ptr <= 0x98A3C)
+				ret = 1;
+			else if (ptr >= 0x98A80 && ptr <= 0x98ABC)
+				ret = 1;
+			else if (ptr >= 0x99000 && ptr <= 0x993FC)
+				ret = 1;
+			else if (ptr >= 0x99800 && ptr <= 0x99BFC)
+				ret = 1;
+		} else if (ptr >= 0x9A000 && ptr <= 0x9a08c) {
+			ret = 1;
+		}
+	}
+end:
+	return ret;
+}
+
+int mdp4_calib_config(struct mdp_calib_config_data *cfg)
+{
+	int ret = -1;
+	void *ptr = (void *) cfg->addr;
+
+	if (is_valid_calib_addr(ptr))
+		ret = 0;
+	else
+		return ret;
+
+	ptr = (void *)(((unsigned int) ptr) + MDP_BASE);
+	mdp_clk_ctrl(1);
+	if (cfg->ops & MDP_PP_OPS_READ) {
+		cfg->data = inpdw(ptr);
+		ret = 1;
+	} else if (cfg->ops & MDP_PP_OPS_WRITE) {
+		outpdw(ptr, cfg->data);
+	}
+	mdp_clk_ctrl(0);
+	return ret;
+}
+
+u32 mdp4_get_mixer_num(u32 panel_type)
+{
+	u32 mixer_num;
+	if ((panel_type == TV_PANEL) ||
+			(panel_type == DTV_PANEL))
+		mixer_num = MDP4_MIXER1;
+	else if (panel_type == WRITEBACK_PANEL) {
+		mixer_num = MDP4_MIXER2;
+	} else {
+		mixer_num = MDP4_MIXER0;
+	}
+	return mixer_num;
+}
diff --git a/drivers/video/msm/mdp4_wfd_writeback.c b/drivers/video/msm/mdp4_wfd_writeback.c
index 7876f9a..d96fc7d 100644
--- a/drivers/video/msm/mdp4_wfd_writeback.c
+++ b/drivers/video/msm/mdp4_wfd_writeback.c
@@ -75,13 +75,6 @@
 
 	platform_set_drvdata(mdp_dev, mfd);
 
-	mfd->writeback_sdev.name = "wfd";
-	rc = switch_dev_register(&mfd->writeback_sdev);
-	if (rc) {
-		pr_err("Failed to setup switch dev for writeback panel");
-		return rc;
-	}
-
 	rc = platform_device_add(mdp_dev);
 	if (rc) {
 		WRITEBACK_MSG_ERR("failed to add device");
@@ -91,16 +84,8 @@
 	return rc;
 }
 
-static int writeback_remove(struct platform_device *pdev)
-{
-	struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
-	switch_dev_unregister(&mfd->writeback_sdev);
-	return 0;
-}
-
 static struct platform_driver writeback_driver = {
 	.probe = writeback_probe,
-	.remove = writeback_remove,
 	.driver = {
 		.name = "writeback",
 	},
diff --git a/drivers/video/msm/mdp4_wfd_writeback_panel.c b/drivers/video/msm/mdp4_wfd_writeback_panel.c
index b9cab5f..c3d0431 100644
--- a/drivers/video/msm/mdp4_wfd_writeback_panel.c
+++ b/drivers/video/msm/mdp4_wfd_writeback_panel.c
@@ -80,4 +80,4 @@
 	return rc;
 }
 
-late_initcall(writeback_panel_init);
+module_init(writeback_panel_init);
diff --git a/drivers/video/msm/mdp_cursor.c b/drivers/video/msm/mdp_cursor.c
index 93ff388..a3920af 100644
--- a/drivers/video/msm/mdp_cursor.c
+++ b/drivers/video/msm/mdp_cursor.c
@@ -52,7 +52,11 @@
 
 	/* disable vsync */
 	spin_lock_irqsave(&mdp_spin_lock, flag);
-	mdp_disable_irq(MDP_OVERLAY0_TERM);
+	if (hdmi_prim_display)
+		mdp_disable_irq(MDP_OVERLAY1_TERM);
+	else
+		mdp_disable_irq(MDP_OVERLAY0_TERM);
+
 	spin_unlock_irqrestore(&mdp_spin_lock, flag);
 }
 
@@ -78,29 +82,37 @@
 	 *
 	 * Moving this code out of the ISR will cause the MDP to underrun!
 	 */
+	uint32_t base = 0;
+
+	if (hdmi_prim_display)
+		base = ((uint32_t)(MDP_BASE + 0xB0000));
+	else
+		base = ((uint32_t)(MDP_BASE + 0x90000));
+
+
 	spin_lock(&mdp_spin_lock);
 	if (sync_disabled) {
 		spin_unlock(&mdp_spin_lock);
 		return;
 	}
 
-	MDP_OUTP(MDP_BASE + 0x90044, (height << 16) | width);
-	MDP_OUTP(MDP_BASE + 0x90048, cursor_buf_phys);
+	MDP_OUTP(base + 0x44, (height << 16) | width);
+	MDP_OUTP(base + 0x48, cursor_buf_phys);
 
-	MDP_OUTP(MDP_BASE + 0x90060,
+	MDP_OUTP(base + 0x60,
 		 (transp_en << 3) | (calpha_en << 1) |
-		 (inp32(MDP_BASE + 0x90060) & 0x1));
+		 (inp32(base + 0x60) & 0x1));
 
-	MDP_OUTP(MDP_BASE + 0x90064, (alpha << 24));
-	MDP_OUTP(MDP_BASE + 0x90068, (0xffffff & bg_color));
-	MDP_OUTP(MDP_BASE + 0x9006C, (0xffffff & bg_color));
+	MDP_OUTP(base + 0x64, (alpha << 24));
+	MDP_OUTP(base + 0x68, (0xffffff & bg_color));
+	MDP_OUTP(base + 0x6C, (0xffffff & bg_color));
 
 	/* enable/disable the cursor as per the last request */
-	if (cursor_enabled && !(inp32(MDP_BASE + 0x90060) & (0x1)))
-		MDP_OUTP(MDP_BASE + 0x90060, inp32(MDP_BASE + 0x90060) | 0x1);
-	else if (!cursor_enabled && (inp32(MDP_BASE + 0x90060) & (0x1)))
-		MDP_OUTP(MDP_BASE + 0x90060,
-					inp32(MDP_BASE + 0x90060) & (~0x1));
+	if (cursor_enabled && !(inp32(base + 0x60) & (0x1)))
+		MDP_OUTP(base + 0x60, inp32(base + 0x60) | 0x1);
+	else if (!cursor_enabled && (inp32(base + 0x60) & (0x1)))
+		MDP_OUTP(base + 0x60,
+					inp32(base + 0x60) & (~0x1));
 
 	/* enqueue the task to disable MDP interrupts */
 	queue_work(mdp_cursor_ctrl_wq, &mdp_cursor_ctrl_worker);
@@ -119,17 +131,26 @@
 	if (sync_disabled) {
 
 		/* cancel pending task to disable MDP interrupts */
-		if (work_pending(&mdp_cursor_ctrl_worker))
+		if (work_pending(&mdp_cursor_ctrl_worker)) {
 			cancel_work_sync(&mdp_cursor_ctrl_worker);
-		else
+		} else {
 			/* enable irq */
-			mdp_enable_irq(MDP_OVERLAY0_TERM);
+			if (hdmi_prim_display)
+				mdp_enable_irq(MDP_OVERLAY1_TERM);
+			else
+				mdp_enable_irq(MDP_OVERLAY0_TERM);
+		}
 
 		sync_disabled = 0;
 
 		/* enable vsync intr */
-		outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE);
-		mdp_intr_mask |= INTR_OVERLAY0_DONE;
+		if (hdmi_prim_display) {
+			outp32(MDP_INTR_CLEAR, INTR_OVERLAY1_DONE);
+			mdp_intr_mask |= INTR_OVERLAY1_DONE;
+		} else {
+			outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE);
+			mdp_intr_mask |= INTR_OVERLAY0_DONE;
+		}
 		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
 	}
 }
@@ -140,14 +161,20 @@
 	struct fb_image *img = &cursor->image;
 	unsigned long flag;
 	int sync_needed = 0, ret = 0;
+	uint32_t base = 0;
 
 	if ((img->width > MDP_CURSOR_WIDTH) ||
 	    (img->height > MDP_CURSOR_HEIGHT) ||
 	    (img->depth != 32))
 		return -EINVAL;
 
+	if (hdmi_prim_display)
+		base = ((uint32_t)(MDP_BASE + 0xB0000));
+	else
+		base = ((uint32_t)(MDP_BASE + 0x90000));
+
 	if (cursor->set & FB_CUR_SETPOS)
-		MDP_OUTP(MDP_BASE + 0x9004c, (img->dy << 16) | img->dx);
+		MDP_OUTP(base + 0x4c, (img->dy << 16) | img->dx);
 
 	if (cursor->set & FB_CUR_SETIMAGE) {
 		ret = copy_from_user(mfd->cursor_buf, img->data,
diff --git a/drivers/video/msm/mdp_debugfs.c b/drivers/video/msm/mdp_debugfs.c
index 68727f4..d3e0c8d 100644
--- a/drivers/video/msm/mdp_debugfs.c
+++ b/drivers/video/msm/mdp_debugfs.c
@@ -719,84 +719,6 @@
 	.write = pmdh_reg_write,
 };
 
-
-
-#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDDI)
-static int vsync_reg_open(struct inode *inode, struct file *file)
-{
-	/* non-seekable */
-	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
-	return 0;
-}
-
-static int vsync_reg_release(struct inode *inode, struct file *file)
-{
-	return 0;
-}
-
-static ssize_t vsync_reg_write(
-	struct file *file,
-	const char __user *buff,
-	size_t count,
-	loff_t *ppos)
-{
-	uint32 enable;
-	int cnt;
-
-	if (count >= sizeof(debug_buf))
-		return -EFAULT;
-
-	if (copy_from_user(debug_buf, buff, count))
-		return -EFAULT;
-
-	debug_buf[count] = 0;	/* end of string */
-
-	cnt = sscanf(debug_buf, "%x", &enable);
-
-	mdp_dmap_vsync_set(enable);
-
-	return count;
-}
-
-static ssize_t vsync_reg_read(
-	struct file *file,
-	char __user *buff,
-	size_t count,
-	loff_t *ppos)
-{
-	char *bp;
-	int len = 0;
-	int tot = 0;
-	int dlen;
-
-	if (*ppos)
-		return 0;	/* the end */
-
-	bp = debug_buf;
-	dlen = sizeof(debug_buf);
-	len = snprintf(bp, dlen, "%x\n", mdp_dmap_vsync_get());
-	tot += len;
-	bp += len;
-	*bp = 0;
-	tot++;
-
-	if (copy_to_user(buff, debug_buf, tot))
-		return -EFAULT;
-
-	*ppos += tot;	/* increase offset */
-
-	return tot;
-}
-
-
-static const struct file_operations vsync_fops = {
-	.open = vsync_reg_open,
-	.release = vsync_reg_release,
-	.read = vsync_reg_read,
-	.write = vsync_reg_write,
-};
-#endif
-
 static ssize_t emdh_reg_write(
 	struct file *file,
 	const char __user *buff,
@@ -1107,6 +1029,135 @@
 	.write = dbg_reg_write,
 };
 
+u32 dbg_force_ov0_blt;
+u32 dbg_force_ov1_blt;
+
+static ssize_t dbg_force_ov0_blt_read(
+	struct file *file,
+	char __user *buff,
+	size_t count,
+	loff_t *ppos) {
+	int len;
+
+	if (*ppos)
+		return 0;
+
+	len = snprintf(debug_buf, sizeof(debug_buf),
+		       "%d\n", dbg_force_ov0_blt);
+
+	if (len < 0)
+		return 0;
+
+	if (copy_to_user(buff, debug_buf, len))
+		return -EFAULT;
+
+	*ppos += len;
+
+	return len;
+}
+
+static ssize_t dbg_force_ov0_blt_write(
+	struct file *file,
+	const char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	u32 cnt;
+
+	if (count >= sizeof(debug_buf))
+		return -EFAULT;
+
+	if (copy_from_user(debug_buf, buff, count))
+		return -EFAULT;
+
+	debug_buf[count] = 0;	/* end of string */
+
+	cnt = sscanf(debug_buf, "%x", &dbg_force_ov0_blt);
+
+	pr_info("%s: dbg_force_ov0_blt = %x\n",
+		__func__, dbg_force_ov0_blt);
+
+	if ((dbg_force_ov0_blt & 0x0f) > 2)
+		pr_err("%s: invalid dbg_force_ov0_blt = %d\n",
+			__func__, dbg_force_ov0_blt);
+
+	if ((dbg_force_ov0_blt >> 4) > 2)
+		pr_err("%s: invalid dbg_force_ov0_blt = %d\n",
+			__func__, dbg_force_ov0_blt);
+
+	return count;
+}
+
+static const struct file_operations dbg_force_ov0_blt_fops = {
+	.open = dbg_open,
+	.release = dbg_release,
+	.read = dbg_force_ov0_blt_read,
+	.write = dbg_force_ov0_blt_write,
+};
+
+static ssize_t dbg_force_ov1_blt_read(
+	struct file *file,
+	char __user *buff,
+	size_t count,
+	loff_t *ppos) {
+	int len;
+
+	if (*ppos)
+		return 0;
+
+	len = snprintf(debug_buf, sizeof(debug_buf),
+		       "%x\n", dbg_force_ov1_blt);
+
+	if (len < 0)
+		return 0;
+
+	if (copy_to_user(buff, debug_buf, len))
+		return -EFAULT;
+
+	*ppos += len;
+
+	return len;
+}
+
+static ssize_t dbg_force_ov1_blt_write(
+	struct file *file,
+	const char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	u32 cnt;
+
+	if (count >= sizeof(debug_buf))
+		return -EFAULT;
+
+	if (copy_from_user(debug_buf, buff, count))
+		return -EFAULT;
+
+	debug_buf[count] = 0;	/* end of string */
+
+	cnt = sscanf(debug_buf, "%x", &dbg_force_ov1_blt);
+
+	pr_info("%s: dbg_force_ov1_blt = %x\n",
+		__func__, dbg_force_ov1_blt);
+
+	if ((dbg_force_ov1_blt & 0x0f) > 2)
+		pr_err("%s: invalid dbg_force_ov1_blt = %x\n",
+			__func__, dbg_force_ov1_blt);
+
+	if ((dbg_force_ov1_blt >> 4) > 2)
+		pr_err("%s: invalid dbg_force_ov1_blt = %d\n",
+			__func__, dbg_force_ov1_blt);
+
+	return count;
+}
+
+static const struct file_operations dbg_force_ov1_blt_fops = {
+	.open = dbg_open,
+	.release = dbg_release,
+	.read = dbg_force_ov1_blt_read,
+	.write = dbg_force_ov1_blt_write,
+};
+
 #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL
 static uint32 hdmi_offset;
 static uint32 hdmi_count;
@@ -1327,6 +1378,22 @@
 	}
 #endif
 
+	if (debugfs_create_file("force_ov0_blt", 0644, dent, 0,
+				&dbg_force_ov0_blt_fops)
+			== NULL) {
+		pr_err("%s(%d): debugfs_create_file: debug fail\n",
+			__FILE__, __LINE__);
+		return -EFAULT;
+	}
+
+	if (debugfs_create_file("force_ov1_blt", 0644, dent, 0,
+				&dbg_force_ov1_blt_fops)
+			== NULL) {
+		pr_err("%s(%d): debugfs_create_file: debug fail\n",
+			__FILE__, __LINE__);
+		return -EFAULT;
+	}
+
 	dent = debugfs_create_dir("mddi", NULL);
 
 	if (IS_ERR(dent)) {
@@ -1342,15 +1409,6 @@
 		return -1;
 	}
 
-#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDDI)
-	if (debugfs_create_file("vsync", 0644, dent, 0, &vsync_fops)
-			== NULL) {
-		printk(KERN_ERR "%s(%d): debugfs_create_file: debug fail\n",
-			__FILE__, __LINE__);
-		return -1;
-	}
-#endif
-
 	dent = debugfs_create_dir("emdh", NULL);
 
 	if (IS_ERR(dent)) {
diff --git a/drivers/video/msm/mdp_dma.c b/drivers/video/msm/mdp_dma.c
index ffb3257..842f96b 100644
--- a/drivers/video/msm/mdp_dma.c
+++ b/drivers/video/msm/mdp_dma.c
@@ -482,10 +482,22 @@
 #endif
 {
 	unsigned long flag;
+	static int first_vsync;
+	int need_wait = 0;
 
 	down(&mfd->dma->mutex);
-	if ((mfd) && (!mfd->dma->busy) && (mfd->panel_power_on)) {
+	if ((mfd) && (mfd->panel_power_on)) {
 		down(&mfd->sem);
+		spin_lock_irqsave(&mdp_spin_lock, flag);
+		if (mfd->dma->busy == TRUE)
+			need_wait++;
+		spin_unlock_irqrestore(&mdp_spin_lock, flag);
+
+		if (need_wait)
+			wait_for_completion_killable(&mfd->dma->comp);
+
+		/* schedule DMA to start */
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 		mfd->ibuf_flushed = TRUE;
 		mdp_dma2_update_lcd(mfd);
 
@@ -493,15 +505,31 @@
 		mdp_enable_irq(MDP_DMA2_TERM);
 		mfd->dma->busy = TRUE;
 		INIT_COMPLETION(mfd->dma->comp);
-
+		INIT_COMPLETION(vsync_cntrl.vsync_comp);
+		if (!vsync_cntrl.vsync_irq_enabled &&
+				vsync_cntrl.disabled_clocks) {
+			MDP_OUTP(MDP_BASE + 0x021c, 0x10); /* read pointer */
+			outp32(MDP_INTR_CLEAR, MDP_PRIM_RDPTR);
+			mdp_intr_mask |= MDP_PRIM_RDPTR;
+			outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+			mdp_enable_irq(MDP_VSYNC_TERM);
+			vsync_cntrl.vsync_dma_enabled = 1;
+		}
 		spin_unlock_irqrestore(&mdp_spin_lock, flag);
 		/* schedule DMA to start */
 		mdp_dma_schedule(mfd, MDP_DMA2_TERM);
 		up(&mfd->sem);
 
-		/* wait until DMA finishes the current job */
-		wait_for_completion_killable(&mfd->dma->comp);
-		mdp_disable_irq(MDP_DMA2_TERM);
+		/* wait until Vsync finishes the current job */
+		if (first_vsync) {
+			if (!wait_for_completion_killable_timeout
+					(&vsync_cntrl.vsync_comp, HZ/10))
+				pr_err("Timedout DMA %s %d", __func__,
+								__LINE__);
+		} else {
+			first_vsync = 1;
+		}
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 
 	/* signal if pan function is waiting for the update completion */
 		if (mfd->pan_waiting) {
@@ -524,21 +552,25 @@
 		INIT_COMPLETION(vsync_cntrl.vsync_wait);
 
 	vsync_cntrl.vsync_irq_enabled = enable;
-	if (!enable)
-		vsync_cntrl.disabled_clocks = 0;
 	disabled_clocks = vsync_cntrl.disabled_clocks;
 	spin_unlock_irqrestore(&mdp_spin_lock, flag);
 
-	if (enable && disabled_clocks) {
+	if (enable && disabled_clocks)
 		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	if (enable && vsync_cntrl.disabled_clocks &&
+			!vsync_cntrl.vsync_dma_enabled) {
 		MDP_OUTP(MDP_BASE + 0x021c, 0x10); /* read pointer */
-		spin_lock_irqsave(&mdp_spin_lock, flag);
 		outp32(MDP_INTR_CLEAR, MDP_PRIM_RDPTR);
 		mdp_intr_mask |= MDP_PRIM_RDPTR;
 		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
 		mdp_enable_irq(MDP_VSYNC_TERM);
-		spin_unlock_irqrestore(&mdp_spin_lock, flag);
+		vsync_cntrl.disabled_clocks = 0;
+	} else if (enable && vsync_cntrl.disabled_clocks) {
+		vsync_cntrl.disabled_clocks = 0;
 	}
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
 	if (vsync_cntrl.vsync_irq_enabled &&
 		atomic_read(&vsync_cntrl.suspend) == 0)
 		atomic_set(&vsync_cntrl.vsync_resume, 1);
diff --git a/drivers/video/msm/mdp_dma_dsi_video.c b/drivers/video/msm/mdp_dma_dsi_video.c
index 89657d9..b349213 100644
--- a/drivers/video/msm/mdp_dma_dsi_video.c
+++ b/drivers/video/msm/mdp_dma_dsi_video.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -242,7 +241,7 @@
 		/*Turning on DMA_P block*/
 		mdp_pipe_ctrl(MDP_DMA2_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 	}
-	mdp_histogram_ctrl_all(TRUE);
+
 	/* MDP cmd block disable */
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 
@@ -252,7 +251,6 @@
 int mdp_dsi_video_off(struct platform_device *pdev)
 {
 	int ret = 0;
-	mdp_histogram_ctrl_all(FALSE);
 	/* MDP cmd block enable */
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0);
diff --git a/drivers/video/msm/mdp_dma_lcdc.c b/drivers/video/msm/mdp_dma_lcdc.c
index 668ce53..04d8b01 100644
--- a/drivers/video/msm/mdp_dma_lcdc.c
+++ b/drivers/video/msm/mdp_dma_lcdc.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (c) 2008-2009, 2012-2013 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2009, 2012 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -309,11 +308,9 @@
 		MDP_OUTP(MDP_BASE + timer_base, 1);
 		mdp_pipe_ctrl(block, MDP_BLOCK_POWER_ON, FALSE);
 	}
-	mdp_histogram_ctrl_all(TRUE);
 	/* MDP cmd block disable */
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 
-
 	return ret;
 }
 
@@ -332,7 +329,6 @@
 		timer_base = DTV_BASE;
 	}
 #endif
-	mdp_histogram_ctrl_all(FALSE);
 
 	down(&mfd->dma->mutex);
 	/* MDP cmd block enable */
diff --git a/drivers/video/msm/mdp_hw_init.c b/drivers/video/msm/mdp_hw_init.c
index 3818ed4..ad2789b 100644
--- a/drivers/video/msm/mdp_hw_init.c
+++ b/drivers/video/msm/mdp_hw_init.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2009, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2009, 2012 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -584,7 +584,7 @@
 
 #define   IRQ_EN_1__MDP_IRQ___M    0x00000800
 
-void mdp_hw_init(void)
+void mdp_hw_init(int splash)
 {
 	int i;
 
@@ -632,7 +632,8 @@
 	MDP_OUTP(MDP_CMD_DEBUG_ACCESS_BASE + 0x01e4, 0);
 
 #ifndef CONFIG_FB_MSM_MDP22
-	MDP_OUTP(MDP_BASE + 0xE0000, 0);
+	if (!splash)
+		MDP_OUTP(MDP_BASE + 0xE0000, 0);
 	MDP_OUTP(MDP_BASE + 0x100, 0xffffffff);
 	MDP_OUTP(MDP_BASE + 0x90070, 0);
 #endif
diff --git a/drivers/video/msm/mdp_ppp.c b/drivers/video/msm/mdp_ppp.c
index 71551b9..2e795fd 100644
--- a/drivers/video/msm/mdp_ppp.c
+++ b/drivers/video/msm/mdp_ppp.c
@@ -22,7 +22,6 @@
 #include <linux/fb.h>
 #include <linux/msm_mdp.h>
 #include <linux/file.h>
-#include <linux/android_pmem.h>
 #include <linux/major.h>
 
 #include "linux/proc_fs.h"
@@ -548,40 +547,8 @@
 	  format == MDP_Y_CRCB_H2V2) ?  2 : (format == MDP_Y_CBCR_H2V1 || \
 	  format == MDP_Y_CRCB_H2V1) ?  1 : 1)
 
-#ifdef CONFIG_ANDROID_PMEM
-static void get_len(struct mdp_img *img, struct mdp_rect *rect, uint32_t bpp,
-			uint32_t *len0, uint32_t *len1)
-{
-	*len0 = IMG_LEN(rect->h, img->width, rect->w, bpp);
-	if (IS_PSEUDOPLNR(img->format))
-		*len1 = *len0/Y_TO_CRCB_RATIO(img->format);
-	else
-		*len1 = 0;
-}
-
-static void flush_imgs(struct mdp_blit_req *req, int src_bpp, int dst_bpp,
-			struct file *p_src_file, struct file *p_dst_file)
-{
-	uint32_t src0_len, src1_len;
-
-	if (!(req->flags & MDP_BLIT_NON_CACHED)) {
-		/* flush src images to memory before dma to mdp */
-		get_len(&req->src, &req->src_rect, src_bpp,
-		&src0_len, &src1_len);
-
-		flush_pmem_file(p_src_file,
-		req->src.offset, src0_len);
-
-		if (IS_PSEUDOPLNR(req->src.format))
-			flush_pmem_file(p_src_file,
-				req->src.offset + src0_len, src1_len);
-	}
-
-}
-#else
 static void flush_imgs(struct mdp_blit_req *req, int src_bpp, int dst_bpp,
 			struct file *p_src_file, struct file *p_dst_file) { }
-#endif
 
 static void mdp_start_ppp(struct msm_fb_data_type *mfd, MDPIBUF *iBuf,
 struct mdp_blit_req *req, struct file *p_src_file, struct file *p_dst_file)
@@ -1286,9 +1253,6 @@
 #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
 	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
 #endif
-#ifdef CONFIG_ANDROID_PMEM
-	unsigned long vstart;
-#endif
 
 	if (req->flags & MDP_MEMORY_ID_TYPE_FB) {
 		file = fget_light(img->memory_id, &put_needed);
@@ -1321,21 +1285,10 @@
 			return -EINVAL;
 #endif
 
-#ifdef CONFIG_ANDROID_PMEM
-	if (!get_pmem_file(img->memory_id, start, &vstart, len, srcp_file))
-		return ret;
-	else
-		return -EINVAL;
-#endif
 }
 
 void put_img(struct file *p_src_file, struct ion_handle *p_ihdl)
 {
-#ifdef CONFIG_ANDROID_PMEM
-	if (p_src_file)
-		put_pmem_file(p_src_file);
-#endif
-
 #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
 	if (!IS_ERR_OR_NULL(p_ihdl))
 		ion_free(ppp_display_iclient, p_ihdl);
@@ -1401,6 +1354,9 @@
 
 	iBuf.mdpImg.mdpOp = MDPOP_NOP;
 
+	if (req->flags & MDP_IS_FG)
+		iBuf.mdpImg.mdpOp |= MDPOP_LAYER_IS_FG;
+
 	/* blending check */
 	if (req->transp_mask != MDP_TRANSP_NOP) {
 		iBuf.mdpImg.mdpOp |= MDPOP_TRANSP;
@@ -1698,12 +1654,19 @@
 	return 0;
 }
 
-int mdp_ppp_v4l2_overlay_play(struct fb_info *info,
+int mdp_ppp_v4l2_overlay_play(struct fb_info *info, bool bUserPtr,
 	unsigned long srcp0_addr, unsigned long srcp0_size,
 	unsigned long srcp1_addr, unsigned long srcp1_size)
 {
 	int ret;
+	unsigned long srcp0_start = 0, srcp1_start = 0;
+	unsigned long srcp0_len = 0, srcp1_len = 0;
 
+	struct ion_handle *srcp0_ihdl = NULL;
+	struct ion_handle *srcp1_ihdl = NULL;
+	struct msm_fb_data_type *mfd = info->par;
+
+	ppp_display_iclient = mfd->iclient;
 	if (!mdp_overlay_req_set) {
 		pr_err("mdp_ppp:v4l2:No overlay set, ignore play req\n");
 		return -EINVAL;
@@ -1712,6 +1675,31 @@
 	overlay_req.dst.width = info->var.xres;
 	overlay_req.dst.height = info->var.yres;
 
+	if (bUserPtr) {
+		overlay_req.src.memory_id = srcp0_addr;
+		get_img(&overlay_req.src, &overlay_req, info, &srcp0_start,
+					&srcp0_len, NULL, &srcp0_ihdl);
+		if (srcp0_len == 0) {
+			pr_err("%s: could not retrieve source image0"
+						, __func__);
+			return -EINVAL;
+		}
+		srcp0_addr = srcp0_start + srcp0_size;
+		srcp0_size = srcp0_len;
+
+		if (srcp1_addr) {
+			overlay_req.src.memory_id = srcp1_addr;
+			get_img(&overlay_req.src, &overlay_req, info,
+				&srcp1_start, &srcp1_len, NULL, &srcp1_ihdl);
+			if (srcp1_len == 0) {
+				pr_err("%s: could not retrieve source image1"
+					, __func__);
+				return -EINVAL;
+			}
+			srcp1_addr = srcp1_start + srcp1_size;
+			srcp1_size = srcp1_len;
+		}
+	}
 	ret = mdp_ppp_blit_addr(info, &overlay_req,
 		srcp0_addr, srcp0_size, srcp1_addr, srcp1_size,
 		info->fix.smem_start, info->fix.smem_len, NULL, NULL,
diff --git a/drivers/video/msm/mdp_ppp_v20.c b/drivers/video/msm/mdp_ppp_v20.c
index 4062d3a..50164fd 100644
--- a/drivers/video/msm/mdp_ppp_v20.c
+++ b/drivers/video/msm/mdp_ppp_v20.c
@@ -2467,7 +2467,8 @@
 			bg_alpha = PPP_BLEND_BG_USE_ALPHA_SEL |
 				PPP_BLEND_BG_ALPHA_REVERSE;
 
-			if (perPixelAlpha) {
+			if ((perPixelAlpha) && !(iBuf->mdpImg.mdpOp &
+							MDPOP_LAYER_IS_FG)) {
 				bg_alpha |= PPP_BLEND_BG_SRCPIXEL_ALPHA;
 			} else {
 				bg_alpha |= PPP_BLEND_BG_CONSTANT_ALPHA;
@@ -2478,7 +2479,12 @@
 			if (iBuf->mdpImg.mdpOp & MDPOP_TRANSP)
 				*pppop_reg_ptr |= PPP_BLEND_CALPHA_TRNASP;
 		} else if (perPixelAlpha) {
-				*pppop_reg_ptr |= PPP_OP_ROT_ON |
+				if (iBuf->mdpImg.mdpOp & MDPOP_LAYER_IS_FG)
+					*pppop_reg_ptr |= PPP_OP_ROT_ON |
+						  PPP_OP_BLEND_ON |
+						  PPP_OP_BLEND_CONSTANT_ALPHA;
+				else
+					*pppop_reg_ptr |= PPP_OP_ROT_ON |
 						  PPP_OP_BLEND_ON |
 						  PPP_OP_BLEND_SRCPIXEL_ALPHA;
 				outpdw(MDP_BASE + 0x70010, 0);
diff --git a/drivers/video/msm/mdss/Kconfig b/drivers/video/msm/mdss/Kconfig
index 30351a3..7682a49 100644
--- a/drivers/video/msm/mdss/Kconfig
+++ b/drivers/video/msm/mdss/Kconfig
@@ -3,3 +3,20 @@
 	---help---
 	The MDSS Writeback Panel provides support for routing the output of
 	MDSS frame buffer driver and MDP processing to memory.
+
+config FB_MSM_MDSS_HDMI_PANEL
+	depends on FB_MSM_MDSS
+	bool "MDSS HDMI Tx Panel"
+	default n
+	---help---
+	The MDSS HDMI Panel provides support for transmitting TMDS signals of
+	MDSS frame buffer data to connected hdmi compliant TVs, monitors etc.
+
+config FB_MSM_MDSS_HDMI_MHL_SII8334
+	depends on FB_MSM_MDSS_HDMI_PANEL
+	bool 'MHL SII8334 support '
+	default n
+	---help---
+	  Support the HDMI to MHL conversion.
+	  MHL (Mobile High-Definition Link) technology
+	  uses USB connector to output HDMI content
diff --git a/drivers/video/msm/mdss/Makefile b/drivers/video/msm/mdss/Makefile
index b6294f4..2c58e49 100644
--- a/drivers/video/msm/mdss/Makefile
+++ b/drivers/video/msm/mdss/Makefile
@@ -1,16 +1,41 @@
+mdss-mdp3-objs = mdp3.o mdp3_dma.o mdp3_ctrl.o
+obj-$(CONFIG_FB_MSM_MDSS) += mdss-mdp3.o
+
 mdss-mdp-objs := mdss_mdp.o mdss_mdp_ctl.o mdss_mdp_pipe.o mdss_mdp_util.o
 mdss-mdp-objs += mdss_mdp_pp.o
 mdss-mdp-objs += mdss_mdp_intf_video.o
+mdss-mdp-objs += mdss_mdp_intf_cmd.o
 mdss-mdp-objs += mdss_mdp_intf_writeback.o
 mdss-mdp-objs += mdss_mdp_rotator.o
 mdss-mdp-objs += mdss_mdp_overlay.o
 mdss-mdp-objs += mdss_mdp_wb.o
 obj-$(CONFIG_FB_MSM_MDSS) += mdss-mdp.o
-obj-$(CONFIG_FB_MSM_MDSS) += mdss_fb.o
+
+ifeq ($(CONFIG_FB_MSM_MDSS),y)
+obj-$(CONFIG_DEBUG_FS) += mdss_debug.o
+endif
+
+dsi-v2-objs = dsi_v2.o dsi_host_v2.o dsi_io_v2.o dsi_panel_v2.o
+obj-$(CONFIG_FB_MSM_MDSS) += dsi-v2.o
 
 mdss-dsi-objs := mdss_dsi.o mdss_dsi_host.o
 mdss-dsi-objs += mdss_dsi_panel.o
 mdss-dsi-objs += msm_mdss_io_8974.o
 obj-$(CONFIG_FB_MSM_MDSS) += mdss-dsi.o
+obj-$(CONFIG_FB_MSM_MDSS) += mdss_edp.o
+
+obj-$(CONFIG_FB_MSM_MDSS) += mdss_io_util.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_tx.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_util.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_edid.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_hdcp.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_cec.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_MHL_SII8334) += mhl_sii8334.o mhl_msc.o
 
 obj-$(CONFIG_FB_MSM_MDSS_WRITEBACK) += mdss_wb.o
+
+mdss-qpic-objs := mdss_qpic.o mdss_fb.o mdss_qpic_panel.o
+obj-$(CONFIG_FB_MSM_QPIC) += mdss-qpic.o
+obj-$(CONFIG_FB_MSM_QPIC_ILI_QVGA_PANEL) += qpic_panel_ili_qvga.o
+
+obj-$(CONFIG_FB_MSM_MDSS) += mdss_fb.o
diff --git a/drivers/video/msm/mdss/dsi_host_v2.c b/drivers/video/msm/mdss/dsi_host_v2.c
new file mode 100644
index 0000000..453cbaa
--- /dev/null
+++ b/drivers/video/msm/mdss/dsi_host_v2.c
@@ -0,0 +1,1034 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/of_device.h>
+
+#include "dsi_v2.h"
+#include "dsi_io_v2.h"
+#include "dsi_host_v2.h"
+
+#define DSI_POLL_SLEEP_US 1000
+#define DSI_POLL_TIMEOUT_US 16000
+#define DSI_ESC_CLK_RATE 19200000
+
+struct dsi_host_v2_private {
+	struct completion dma_comp;
+	int irq_enabled;
+	spinlock_t irq_lock;
+	spinlock_t mdp_lock;
+	int mdp_busy;
+	int irq_no;
+	unsigned char *dsi_base;
+	struct device dis_dev;
+};
+
+static struct dsi_host_v2_private *dsi_host_private;
+
+int msm_dsi_init(void)
+{
+	if (!dsi_host_private) {
+		dsi_host_private = kzalloc(sizeof(struct dsi_host_v2_private),
+					GFP_KERNEL);
+		if (!dsi_host_private) {
+			pr_err("fail to alloc dsi host private data\n");
+			return -ENOMEM;
+		}
+	}
+
+	init_completion(&dsi_host_private->dma_comp);
+	spin_lock_init(&dsi_host_private->irq_lock);
+	spin_lock_init(&dsi_host_private->mdp_lock);
+	return 0;
+}
+
+void msm_dsi_deinit(void)
+{
+	kfree(dsi_host_private);
+	dsi_host_private = NULL;
+}
+
+void msm_dsi_ack_err_status(unsigned char *ctrl_base)
+{
+	u32 status;
+
+	status = MIPI_INP(ctrl_base + DSI_ACK_ERR_STATUS);
+
+	if (status) {
+		MIPI_OUTP(ctrl_base + DSI_ACK_ERR_STATUS, status);
+		pr_debug("%s: status=%x\n", __func__, status);
+	}
+}
+
+void msm_dsi_timeout_status(unsigned char *ctrl_base)
+{
+	u32 status;
+
+	status = MIPI_INP(ctrl_base + DSI_TIMEOUT_STATUS);
+	if (status & 0x0111) {
+		MIPI_OUTP(ctrl_base + DSI_TIMEOUT_STATUS, status);
+		pr_debug("%s: status=%x\n", __func__, status);
+	}
+}
+
+void msm_dsi_dln0_phy_err(unsigned char *ctrl_base)
+{
+	u32 status;
+
+	status = MIPI_INP(ctrl_base + DSI_DLN0_PHY_ERR);
+
+	if (status & 0x011111) {
+		MIPI_OUTP(ctrl_base + DSI_DLN0_PHY_ERR, status);
+		pr_debug("%s: status=%x\n", __func__, status);
+	}
+}
+
+void msm_dsi_fifo_status(unsigned char *ctrl_base)
+{
+	u32 status;
+
+	status = MIPI_INP(ctrl_base + DSI_FIFO_STATUS);
+
+	if (status & 0x44444489) {
+		MIPI_OUTP(ctrl_base + DSI_FIFO_STATUS, status);
+		pr_debug("%s: status=%x\n", __func__, status);
+	}
+}
+
+void msm_dsi_status(unsigned char *ctrl_base)
+{
+	u32 status;
+
+	status = MIPI_INP(ctrl_base + DSI_STATUS);
+
+	if (status & 0x80000000) {
+		MIPI_OUTP(ctrl_base + DSI_STATUS, status);
+		pr_debug("%s: status=%x\n", __func__, status);
+	}
+}
+
+void msm_dsi_error(unsigned char *ctrl_base)
+{
+	msm_dsi_ack_err_status(ctrl_base);
+	msm_dsi_timeout_status(ctrl_base);
+	msm_dsi_fifo_status(ctrl_base);
+	msm_dsi_status(ctrl_base);
+	msm_dsi_dln0_phy_err(ctrl_base);
+}
+
+void msm_dsi_enable_irq(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dsi_host_private->irq_lock, flags);
+	if (dsi_host_private->irq_enabled) {
+		pr_debug("%s: IRQ aleady enabled\n", __func__);
+		spin_unlock_irqrestore(&dsi_host_private->irq_lock, flags);
+		return;
+	}
+
+	enable_irq(dsi_host_private->irq_no);
+	dsi_host_private->irq_enabled = 1;
+	spin_unlock_irqrestore(&dsi_host_private->irq_lock, flags);
+}
+
+void msm_dsi_disable_irq(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dsi_host_private->irq_lock, flags);
+	if (dsi_host_private->irq_enabled == 0) {
+		pr_debug("%s: IRQ already disabled\n", __func__);
+		spin_unlock_irqrestore(&dsi_host_private->irq_lock, flags);
+		return;
+	}
+	disable_irq(dsi_host_private->irq_no);
+	dsi_host_private->irq_enabled = 0;
+	spin_unlock_irqrestore(&dsi_host_private->irq_lock, flags);
+}
+
+void msm_dsi_disable_irq_nosync(void)
+{
+	spin_lock(&dsi_host_private->irq_lock);
+	if (dsi_host_private->irq_enabled == 0) {
+		pr_debug("%s: IRQ cannot be disabled\n", __func__);
+		spin_unlock(&dsi_host_private->irq_lock);
+		return;
+	}
+	disable_irq_nosync(dsi_host_private->irq_no);
+	dsi_host_private->irq_enabled = 0;
+	spin_unlock(&dsi_host_private->irq_lock);
+}
+
+irqreturn_t msm_dsi_isr(int irq, void *ptr)
+{
+	u32 isr;
+
+	isr = MIPI_INP(dsi_host_private->dsi_base + DSI_INT_CTRL);
+	MIPI_OUTP(dsi_host_private->dsi_base + DSI_INT_CTRL, isr);
+
+	if (isr & DSI_INTR_ERROR)
+		msm_dsi_error(dsi_host_private->dsi_base);
+
+	if (isr & DSI_INTR_CMD_DMA_DONE)
+		complete(&dsi_host_private->dma_comp);
+
+	if (isr & DSI_INTR_CMD_MDP_DONE) {
+		spin_lock(&dsi_host_private->mdp_lock);
+		dsi_host_private->mdp_busy = false;
+		msm_dsi_disable_irq_nosync();
+		spin_unlock(&dsi_host_private->mdp_lock);
+	}
+
+	return IRQ_HANDLED;
+}
+
+int msm_dsi_irq_init(struct device *dev, int irq_no)
+{
+	int ret;
+
+	ret = devm_request_irq(dev, irq_no, msm_dsi_isr,
+				IRQF_DISABLED, "DSI", NULL);
+	if (ret) {
+		pr_err("msm_dsi_irq_init request_irq() failed!\n");
+		return ret;
+	}
+	dsi_host_private->irq_no = irq_no;
+	disable_irq(irq_no);
+	return 0;
+}
+
+void msm_dsi_host_init(struct mipi_panel_info *pinfo)
+{
+	u32 dsi_ctrl, intr_ctrl, data;
+	unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+	pr_debug("msm_dsi_host_init\n");
+	pinfo->rgb_swap = DSI_RGB_SWAP_RGB;
+
+	if (pinfo->mode == DSI_VIDEO_MODE) {
+		data = 0;
+		if (pinfo->pulse_mode_hsa_he)
+			data |= BIT(28);
+		if (pinfo->hfp_power_stop)
+			data |= BIT(24);
+		if (pinfo->hbp_power_stop)
+			data |= BIT(20);
+		if (pinfo->hsa_power_stop)
+			data |= BIT(16);
+		if (pinfo->eof_bllp_power_stop)
+			data |= BIT(15);
+		if (pinfo->bllp_power_stop)
+			data |= BIT(12);
+		data |= ((pinfo->traffic_mode & 0x03) << 8);
+		data |= ((pinfo->dst_format & 0x03) << 4); /* 2 bits */
+		data |= (pinfo->vc & 0x03);
+		MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_CTRL, data);
+
+		data = 0;
+		data |= ((pinfo->rgb_swap & 0x07) << 12);
+		if (pinfo->b_sel)
+			data |= BIT(8);
+		if (pinfo->g_sel)
+			data |= BIT(4);
+		if (pinfo->r_sel)
+			data |= BIT(0);
+		MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_DATA_CTRL, data);
+	} else if (pinfo->mode == DSI_CMD_MODE) {
+		data = 0;
+		data |= ((pinfo->interleave_max & 0x0f) << 20);
+		data |= ((pinfo->rgb_swap & 0x07) << 16);
+		if (pinfo->b_sel)
+			data |= BIT(12);
+		if (pinfo->g_sel)
+			data |= BIT(8);
+		if (pinfo->r_sel)
+			data |= BIT(4);
+		data |= (pinfo->dst_format & 0x0f); /* 4 bits */
+		MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_CTRL, data);
+
+		/* DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL */
+		data = pinfo->wr_mem_continue & 0x0ff;
+		data <<= 8;
+		data |= (pinfo->wr_mem_start & 0x0ff);
+		if (pinfo->insert_dcs_cmd)
+			data |= BIT(16);
+		MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL,
+				data);
+	} else
+		pr_err("%s: Unknown DSI mode=%d\n", __func__, pinfo->mode);
+
+	dsi_ctrl = BIT(8) | BIT(2); /* clock enable & cmd mode */
+	intr_ctrl = 0;
+	intr_ctrl = (DSI_INTR_CMD_DMA_DONE_MASK | DSI_INTR_CMD_MDP_DONE_MASK);
+
+	if (pinfo->crc_check)
+		dsi_ctrl |= BIT(24);
+	if (pinfo->ecc_check)
+		dsi_ctrl |= BIT(20);
+	if (pinfo->data_lane3)
+		dsi_ctrl |= BIT(7);
+	if (pinfo->data_lane2)
+		dsi_ctrl |= BIT(6);
+	if (pinfo->data_lane1)
+		dsi_ctrl |= BIT(5);
+	if (pinfo->data_lane0)
+		dsi_ctrl |= BIT(4);
+
+	/* from frame buffer, low power mode */
+	/* DSI_COMMAND_MODE_DMA_CTRL */
+	MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_DMA_CTRL, 0x14000000);
+
+	data = 0;
+	if (pinfo->te_sel)
+		data |= BIT(31);
+	data |= pinfo->mdp_trigger << 4;/* cmd mdp trigger */
+	data |= pinfo->dma_trigger;	/* cmd dma trigger */
+	data |= (pinfo->stream & 0x01) << 8;
+	MIPI_OUTP(ctrl_base + DSI_TRIG_CTRL, data);
+
+	/* DSI_LAN_SWAP_CTRL */
+	MIPI_OUTP(ctrl_base + DSI_LANE_SWAP_CTRL, pinfo->dlane_swap);
+
+	/* clock out ctrl */
+	data = pinfo->t_clk_post & 0x3f;	/* 6 bits */
+	data <<= 8;
+	data |= pinfo->t_clk_pre & 0x3f;	/*  6 bits */
+	/* DSI_CLKOUT_TIMING_CTRL */
+	MIPI_OUTP(ctrl_base + DSI_CLKOUT_TIMING_CTRL, data);
+
+	data = 0;
+	if (pinfo->rx_eot_ignore)
+		data |= BIT(4);
+	if (pinfo->tx_eot_append)
+		data |= BIT(0);
+	MIPI_OUTP(ctrl_base + DSI_EOT_PACKET_CTRL, data);
+
+
+	/* allow only ack-err-status  to generate interrupt */
+	/* DSI_ERR_INT_MASK0 */
+	MIPI_OUTP(ctrl_base + DSI_ERR_INT_MASK0, 0x13ff3fe0);
+
+	intr_ctrl |= DSI_INTR_ERROR_MASK;
+	MIPI_OUTP(ctrl_base + DSI_INT_CTRL, intr_ctrl);
+
+	/* turn esc, byte, dsi, pclk, sclk, hclk on */
+	MIPI_OUTP(ctrl_base + DSI_CLK_CTRL, 0x23f);
+
+	dsi_ctrl |= BIT(0);	/* enable dsi */
+	MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl);
+
+	wmb();
+}
+
+void msm_dsi_set_tx_power_mode(int mode)
+{
+	u32 data;
+	unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+	data = MIPI_INP(ctrl_base + DSI_COMMAND_MODE_DMA_CTRL);
+
+	if (mode == 0)
+		data &= ~BIT(26);
+	else
+		data |= BIT(26);
+
+	MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_DMA_CTRL, data);
+}
+
+void msm_dsi_sw_reset(void)
+{
+	u32 dsi_ctrl;
+	unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+	pr_debug("msm_dsi_sw_reset\n");
+
+	dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL);
+	dsi_ctrl &= ~0x01;
+	MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl);
+	wmb();
+
+	/* turn esc, byte, dsi, pclk, sclk, hclk on */
+	MIPI_OUTP(ctrl_base + DSI_CLK_CTRL, 0x23f);
+	wmb();
+
+	MIPI_OUTP(ctrl_base + DSI_SOFT_RESET, 0x01);
+	wmb();
+	MIPI_OUTP(ctrl_base + DSI_SOFT_RESET, 0x00);
+	wmb();
+}
+
+void msm_dsi_controller_cfg(int enable)
+{
+	u32 dsi_ctrl, status;
+	unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+	pr_debug("msm_dsi_controller_cfg\n");
+
+	/* Check for CMD_MODE_DMA_BUSY */
+	if (readl_poll_timeout((ctrl_base + DSI_STATUS),
+				status,
+				((status & 0x02) == 0),
+				DSI_POLL_SLEEP_US, DSI_POLL_TIMEOUT_US))
+		pr_err("%s: DSI status=%x failed\n", __func__, status);
+
+	/* Check for x_HS_FIFO_EMPTY */
+	if (readl_poll_timeout((ctrl_base + DSI_FIFO_STATUS),
+				status,
+				((status & 0x11111000) == 0x11111000),
+				DSI_POLL_SLEEP_US, DSI_POLL_TIMEOUT_US))
+		pr_err("%s: FIFO status=%x failed\n", __func__, status);
+
+	/* Check for VIDEO_MODE_ENGINE_BUSY */
+	if (readl_poll_timeout((ctrl_base + DSI_STATUS),
+				status,
+				((status & 0x08) == 0),
+				DSI_POLL_SLEEP_US, DSI_POLL_TIMEOUT_US)) {
+		pr_err("%s: DSI status=%x\n", __func__, status);
+		pr_err("%s: Doing sw reset\n", __func__);
+		msm_dsi_sw_reset();
+	}
+
+	dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL);
+	if (enable)
+		dsi_ctrl |= 0x01;
+	else
+		dsi_ctrl &= ~0x01;
+
+	MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl);
+	wmb();
+}
+
+void msm_dsi_op_mode_config(int mode, struct mdss_panel_data *pdata)
+{
+	u32 dsi_ctrl, intr_ctrl;
+	unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+	pr_debug("msm_dsi_op_mode_config\n");
+
+	dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL);
+	/*If Video enabled, Keep Video and Cmd mode ON */
+	if (dsi_ctrl & 0x02)
+		dsi_ctrl &= ~0x05;
+	else
+		dsi_ctrl &= ~0x07;
+
+	if (mode == DSI_VIDEO_MODE) {
+		dsi_ctrl |= 0x03;
+		intr_ctrl = DSI_INTR_CMD_DMA_DONE_MASK;
+	} else {		/* command mode */
+		dsi_ctrl |= 0x05;
+		if (pdata->panel_info.type == MIPI_VIDEO_PANEL)
+			dsi_ctrl |= 0x02;
+
+		intr_ctrl = DSI_INTR_CMD_DMA_DONE_MASK | DSI_INTR_ERROR_MASK |
+				DSI_INTR_CMD_MDP_DONE_MASK;
+	}
+
+	pr_debug("%s: dsi_ctrl=%x intr=%x\n", __func__, dsi_ctrl, intr_ctrl);
+
+	MIPI_OUTP(ctrl_base + DSI_INT_CTRL, intr_ctrl);
+	MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl);
+	wmb();
+}
+
+void msm_dsi_cmd_mdp_start(void)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&dsi_host_private->mdp_lock, flag);
+	msm_dsi_enable_irq();
+	dsi_host_private->mdp_busy = true;
+	spin_unlock_irqrestore(&dsi_host_private->mdp_lock, flag);
+}
+
+int msm_dsi_cmd_reg_tx(u32 data)
+{
+	unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+	MIPI_OUTP(ctrl_base + DSI_TRIG_CTRL, 0x04);/* sw trigger */
+	MIPI_OUTP(ctrl_base + DSI_CTRL, 0x135);
+	wmb();
+
+	MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_DMA_CTRL, data);
+	wmb();
+	MIPI_OUTP(ctrl_base + DSI_CMD_MODE_DMA_SW_TRIGGER, 0x01);
+	wmb();
+
+	udelay(300); /*per spec*/
+
+	return 0;
+}
+
+int msm_dsi_cmd_dma_tx(struct dsi_buf *tp)
+{
+	int len;
+	unsigned long size, addr;
+	unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+	len = ALIGN(tp->len, 4);
+	size = ALIGN(tp->len, SZ_4K);
+
+	tp->dmap = dma_map_single(&dsi_host_private->dis_dev, tp->data, size,
+				DMA_TO_DEVICE);
+	if (dma_mapping_error(&dsi_host_private->dis_dev, tp->dmap)) {
+		pr_err("%s: dmap mapp failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	addr = tp->dmap;
+
+	INIT_COMPLETION(dsi_host_private->dma_comp);
+
+	MIPI_OUTP(ctrl_base + DSI_DMA_CMD_OFFSET, addr);
+	MIPI_OUTP(ctrl_base + DSI_DMA_CMD_LENGTH, len);
+	wmb();
+
+	MIPI_OUTP(ctrl_base + DSI_CMD_MODE_DMA_SW_TRIGGER, 0x01);
+	wmb();
+
+	wait_for_completion_interruptible(&dsi_host_private->dma_comp);
+
+	dma_unmap_single(&dsi_host_private->dis_dev, tp->dmap, size,
+			DMA_TO_DEVICE);
+	tp->dmap = 0;
+	return 0;
+}
+
+int msm_dsi_cmd_dma_rx(struct dsi_buf *rp, int rlen)
+{
+	u32 *lp, data;
+	int i, off, cnt;
+	unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+	lp = (u32 *)rp->data;
+	cnt = rlen;
+	cnt += 3;
+	cnt >>= 2;
+
+	if (cnt > 4)
+		cnt = 4; /* 4 x 32 bits registers only */
+
+	off = DSI_RDBK_DATA0;
+	off += ((cnt - 1) * 4);
+
+	for (i = 0; i < cnt; i++) {
+		data = (u32)MIPI_INP(ctrl_base + off);
+		*lp++ = ntohl(data); /* to network byte order */
+		pr_debug("%s: data = 0x%x and ntohl(data) = 0x%x\n",
+					 __func__, data, ntohl(data));
+		off -= 4;
+		rp->len += sizeof(*lp);
+	}
+
+	return 0;
+}
+
+int msm_dsi_cmds_tx(struct mdss_panel_data *pdata,
+			struct dsi_buf *tp, struct dsi_cmd_desc *cmds, int cnt)
+{
+	struct dsi_cmd_desc *cm;
+	u32 dsi_ctrl, ctrl;
+	int i, video_mode;
+	unsigned long flag;
+	unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+	/* turn on cmd mode
+	* for video mode, do not send cmds more than
+	* one pixel line, since it only transmit it
+	* during BLLP.
+	*/
+	dsi_ctrl = MIPI_INP(ctrl_base + DSI_CTRL);
+	video_mode = dsi_ctrl & 0x02; /* VIDEO_MODE_EN */
+	if (video_mode) {
+		ctrl = dsi_ctrl | 0x04; /* CMD_MODE_EN */
+		MIPI_OUTP(ctrl_base + DSI_CTRL, ctrl);
+	}
+
+	spin_lock_irqsave(&dsi_host_private->mdp_lock, flag);
+	msm_dsi_enable_irq();
+	dsi_host_private->mdp_busy = true;
+	spin_unlock_irqrestore(&dsi_host_private->mdp_lock, flag);
+
+	cm = cmds;
+	dsi_buf_init(tp);
+	for (i = 0; i < cnt; i++) {
+		dsi_buf_init(tp);
+		dsi_cmd_dma_add(tp, cm);
+		msm_dsi_cmd_dma_tx(tp);
+		if (cm->wait)
+			msleep(cm->wait);
+		cm++;
+	}
+
+	spin_lock_irqsave(&dsi_host_private->mdp_lock, flag);
+	dsi_host_private->mdp_busy = false;
+	msm_dsi_disable_irq();
+	spin_unlock_irqrestore(&dsi_host_private->mdp_lock, flag);
+
+	if (video_mode)
+		MIPI_OUTP(ctrl_base + DSI_CTRL, dsi_ctrl);
+	return 0;
+}
+
+/* MDSS_DSI_MRPS, Maximum Return Packet Size */
+static char max_pktsize[2] = {0x00, 0x00}; /* LSB tx first, 10 bytes */
+
+static struct dsi_cmd_desc pkt_size_cmd[] = {
+	{DTYPE_MAX_PKTSIZE, 1, 0, 0, 0,
+		sizeof(max_pktsize), max_pktsize}
+};
+
+/*
+ * DSI panel reply with  MAX_RETURN_PACKET_SIZE bytes of data
+ * plus DCS header, ECC and CRC for DCS long read response
+ * mdss_dsi_controller only have 4x32 bits register ( 16 bytes) to
+ * hold data per transaction.
+ * MDSS_DSI_LEN equal to 8
+ * len should be either 4 or 8
+ * any return data more than MDSS_DSI_LEN need to be break down
+ * to multiple transactions.
+ *
+ * ov_mutex need to be acquired before call this function.
+ */
+int msm_dsi_cmds_rx(struct mdss_panel_data *pdata,
+			struct dsi_buf *tp, struct dsi_buf *rp,
+			struct dsi_cmd_desc *cmds, int rlen)
+{
+	int cnt, len, diff, pkt_size;
+	unsigned long flag;
+	char cmd;
+
+	if (pdata->panel_info.mipi.no_max_pkt_size)
+		rlen = ALIGN(rlen, 4); /* Only support rlen = 4*n */
+
+	len = rlen;
+	diff = 0;
+
+	if (len <= 2) {
+		cnt = 4;	/* short read */
+	} else {
+		if (len > DSI_LEN)
+			len = DSI_LEN;	/* 8 bytes at most */
+
+		len = ALIGN(len, 4); /* len 4 bytes align */
+		diff = len - rlen;
+		/*
+		 * add extra 2 bytes to len to have overall
+		 * packet size is multipe by 4. This also make
+		 * sure 4 bytes dcs headerlocates within a
+		 * 32 bits register after shift in.
+		 * after all, len should be either 6 or 10.
+		 */
+		len += 2;
+		cnt = len + 6; /* 4 bytes header + 2 bytes crc */
+	}
+
+	spin_lock_irqsave(&dsi_host_private->mdp_lock, flag);
+	msm_dsi_enable_irq();
+	dsi_host_private->mdp_busy = true;
+	spin_unlock_irqrestore(&dsi_host_private->mdp_lock, flag);
+
+	if (!pdata->panel_info.mipi.no_max_pkt_size) {
+		/* packet size need to be set at every read */
+		pkt_size = len;
+		max_pktsize[0] = pkt_size;
+		dsi_buf_init(tp);
+		dsi_cmd_dma_add(tp, pkt_size_cmd);
+		msm_dsi_cmd_dma_tx(tp);
+		pr_debug("%s: Max packet size sent\n", __func__);
+	}
+
+	dsi_buf_init(tp);
+	dsi_cmd_dma_add(tp, cmds);
+
+	/* transmit read comamnd to client */
+	msm_dsi_cmd_dma_tx(tp);
+	/*
+	 * once cmd_dma_done interrupt received,
+	 * return data from client is ready and stored
+	 * at RDBK_DATA register already
+	 */
+	dsi_buf_init(rp);
+	if (pdata->panel_info.mipi.no_max_pkt_size) {
+		/*
+		 * expect rlen = n * 4
+		 * short alignement for start addr
+		 */
+		rp->data += 2;
+	}
+
+	msm_dsi_cmd_dma_rx(rp, cnt);
+
+	spin_lock_irqsave(&dsi_host_private->mdp_lock, flag);
+	dsi_host_private->mdp_busy = false;
+	msm_dsi_disable_irq();
+	spin_unlock_irqrestore(&dsi_host_private->mdp_lock, flag);
+
+	if (pdata->panel_info.mipi.no_max_pkt_size) {
+		/*
+		 * remove extra 2 bytes from previous
+		 * rx transaction at shift register
+		 * which was inserted during copy
+		 * shift registers to rx buffer
+		 * rx payload start from long alignment addr
+		 */
+		rp->data += 2;
+	}
+
+	cmd = rp->data[0];
+	switch (cmd) {
+	case DTYPE_ACK_ERR_RESP:
+		pr_debug("%s: rx ACK_ERR_PACLAGE\n", __func__);
+		break;
+	case DTYPE_GEN_READ1_RESP:
+	case DTYPE_DCS_READ1_RESP:
+		dsi_short_read1_resp(rp);
+		break;
+	case DTYPE_GEN_READ2_RESP:
+	case DTYPE_DCS_READ2_RESP:
+		dsi_short_read2_resp(rp);
+		break;
+	case DTYPE_GEN_LREAD_RESP:
+	case DTYPE_DCS_LREAD_RESP:
+		dsi_long_read_resp(rp);
+		rp->len -= 2; /* extra 2 bytes added */
+		rp->len -= diff; /* align bytes */
+		break;
+	default:
+		pr_debug("%s: Unknown cmd received\n", __func__);
+		break;
+	}
+
+	return rp->len;
+}
+
+static int msm_dsi_cal_clk_rate(struct mdss_panel_data *pdata,
+				u32 *bitclk_rate,
+				u32 *byteclk_rate,
+				u32 *pclk_rate)
+{
+	struct mdss_panel_info *pinfo;
+	struct mipi_panel_info *mipi;
+	u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height;
+	int lanes;
+
+	pinfo = &pdata->panel_info;
+	mipi  = &pdata->panel_info.mipi;
+
+	hbp = pdata->panel_info.lcdc.h_back_porch;
+	hfp = pdata->panel_info.lcdc.h_front_porch;
+	vbp = pdata->panel_info.lcdc.v_back_porch;
+	vfp = pdata->panel_info.lcdc.v_front_porch;
+	hspw = pdata->panel_info.lcdc.h_pulse_width;
+	vspw = pdata->panel_info.lcdc.v_pulse_width;
+	width = pdata->panel_info.xres;
+	height = pdata->panel_info.yres;
+
+	lanes = 0;
+	if (mipi->data_lane0)
+		lanes++;
+	if (mipi->data_lane1)
+		lanes++;
+	if (mipi->data_lane2)
+		lanes++;
+	if (mipi->data_lane3)
+		lanes++;
+	if (lanes == 0)
+		return -EINVAL;
+
+	*bitclk_rate = (width + hbp + hfp + hspw) * (height + vbp + vfp + vspw);
+	*bitclk_rate *= mipi->frame_rate;
+	*bitclk_rate *= pdata->panel_info.bpp;
+	*bitclk_rate /= lanes;
+
+	*byteclk_rate = *bitclk_rate / 8;
+	*pclk_rate = *byteclk_rate * lanes * 8 / pdata->panel_info.bpp;
+
+	pr_debug("bitclk=%u, byteclk=%u, pck_=%u\n",
+		*bitclk_rate, *byteclk_rate, *pclk_rate);
+	return 0;
+}
+
+static int msm_dsi_on(struct mdss_panel_data *pdata)
+{
+	int ret = 0;
+	u32 clk_rate;
+	struct mdss_panel_info *pinfo;
+	struct mipi_panel_info *mipi;
+	u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height;
+	u32 ystride, bpp, data;
+	u32 dummy_xres, dummy_yres;
+	u32 bitclk_rate = 0, byteclk_rate = 0, pclk_rate = 0;
+	unsigned char *ctrl_base = dsi_host_private->dsi_base;
+
+	pr_debug("msm_dsi_on\n");
+
+	pinfo = &pdata->panel_info;
+
+	ret = msm_dsi_regulator_enable();
+	if (ret) {
+		pr_err("%s: DSI power on failed\n", __func__);
+		return ret;
+	}
+
+	msm_dsi_ahb_ctrl(1);
+	msm_dsi_phy_sw_reset(dsi_host_private->dsi_base);
+	msm_dsi_phy_init(dsi_host_private->dsi_base, pdata);
+
+	msm_dsi_cal_clk_rate(pdata, &bitclk_rate, &byteclk_rate, &pclk_rate);
+	msm_dsi_clk_set_rate(DSI_ESC_CLK_RATE, byteclk_rate, pclk_rate);
+	msm_dsi_prepare_clocks();
+	msm_dsi_clk_enable();
+
+	clk_rate = pdata->panel_info.clk_rate;
+	clk_rate = min(clk_rate, pdata->panel_info.clk_max);
+
+	hbp = pdata->panel_info.lcdc.h_back_porch;
+	hfp = pdata->panel_info.lcdc.h_front_porch;
+	vbp = pdata->panel_info.lcdc.v_back_porch;
+	vfp = pdata->panel_info.lcdc.v_front_porch;
+	hspw = pdata->panel_info.lcdc.h_pulse_width;
+	vspw = pdata->panel_info.lcdc.v_pulse_width;
+	width = pdata->panel_info.xres;
+	height = pdata->panel_info.yres;
+
+	mipi  = &pdata->panel_info.mipi;
+	if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
+		dummy_xres = pdata->panel_info.lcdc.xres_pad;
+		dummy_yres = pdata->panel_info.lcdc.yres_pad;
+
+		MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_ACTIVE_H,
+			((hspw + hbp + width + dummy_xres) << 16 |
+			(hspw + hbp)));
+		MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_ACTIVE_V,
+			((vspw + vbp + height + dummy_yres) << 16 |
+			(vspw + vbp)));
+		MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_TOTAL,
+			(vspw + vbp + height + dummy_yres +
+				vfp - 1) << 16 | (hspw + hbp +
+				width + dummy_xres + hfp - 1));
+
+		MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_HSYNC, (hspw << 16));
+		MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_VSYNC, 0);
+		MIPI_OUTP(ctrl_base + DSI_VIDEO_MODE_VSYNC_VPOS,
+				(vspw << 16));
+
+	} else {		/* command mode */
+		if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
+			bpp = 3;
+		else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB666)
+			bpp = 3;
+		else if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565)
+			bpp = 2;
+		else
+			bpp = 3;	/* Default format set to RGB888 */
+
+		ystride = width * bpp + 1;
+
+		data = (ystride << 16) | (mipi->vc << 8) | DTYPE_DCS_LWRITE;
+		MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_STREAM0_CTRL,
+			data);
+		MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_STREAM1_CTRL,
+			data);
+
+		data = height << 16 | width;
+		MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_STREAM1_TOTAL,
+			data);
+		MIPI_OUTP(ctrl_base + DSI_COMMAND_MODE_MDP_STREAM0_TOTAL,
+			data);
+	}
+
+	msm_dsi_sw_reset();
+	msm_dsi_host_init(mipi);
+
+	if (mipi->force_clk_lane_hs) {
+		u32 tmp;
+
+		tmp = MIPI_INP(ctrl_base + DSI_LANE_CTRL);
+		tmp |= (1<<28);
+		MIPI_OUTP(ctrl_base + DSI_LANE_CTRL, tmp);
+		wmb();
+	}
+
+	msm_dsi_op_mode_config(mipi->mode, pdata);
+
+	return ret;
+}
+
+static int msm_dsi_off(struct mdss_panel_data *pdata)
+{
+	int ret = 0;
+
+	pr_debug("msm_dsi_off\n");
+	msm_dsi_clk_set_rate(0, 0, 0);
+	msm_dsi_clk_disable();
+	msm_dsi_unprepare_clocks();
+
+	/* disable DSI controller */
+	msm_dsi_controller_cfg(0);
+	msm_dsi_ahb_ctrl(0);
+
+	ret = msm_dsi_regulator_disable();
+	if (ret) {
+		pr_err("%s: Panel power off failed\n", __func__);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int __devinit msm_dsi_probe(struct platform_device *pdev)
+{
+	struct dsi_interface intf;
+	int rc = 0;
+
+	pr_debug("%s\n", __func__);
+
+	rc = msm_dsi_init();
+	if (rc)
+		return rc;
+
+	if (pdev->dev.of_node) {
+		struct resource *mdss_dsi_mres;
+		pdev->id = 0;
+		mdss_dsi_mres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (!mdss_dsi_mres) {
+			pr_err("%s:%d unable to get the MDSS reg resources",
+				__func__, __LINE__);
+			return -ENOMEM;
+		} else {
+			dsi_host_private->dsi_base = ioremap(
+						mdss_dsi_mres->start,
+						resource_size(mdss_dsi_mres));
+			if (!dsi_host_private->dsi_base) {
+				pr_err("%s:%d unable to remap dsi resources",
+					__func__, __LINE__);
+				return -ENOMEM;
+			}
+		}
+
+		mdss_dsi_mres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+		if (!mdss_dsi_mres || mdss_dsi_mres->start == 0) {
+			pr_err("%s:%d unable to get the MDSS irq resources",
+				__func__, __LINE__);
+			rc = -ENODEV;
+			goto dsi_probe_error;
+		} else {
+			rc = msm_dsi_irq_init(&pdev->dev, mdss_dsi_mres->start);
+			if (rc) {
+				dev_err(&pdev->dev,
+					"%s: failed to init irq, rc=%d\n",
+							__func__, rc);
+				goto dsi_probe_error;
+			}
+		}
+
+		rc = msm_dsi_io_init(pdev);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"%s: failed to init DSI IO, rc=%d\n",
+							__func__, rc);
+			goto dsi_probe_error;
+		}
+
+		rc = of_platform_populate(pdev->dev.of_node,
+					NULL, NULL, &pdev->dev);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"%s: failed to add child nodes, rc=%d\n",
+							__func__, rc);
+			goto dsi_probe_error;
+		}
+
+	}
+
+	dsi_host_private->dis_dev = pdev->dev;
+	intf.on = msm_dsi_on;
+	intf.off = msm_dsi_off;
+	intf.op_mode_config = msm_dsi_op_mode_config;
+	intf.tx = msm_dsi_cmds_tx;
+	intf.rx = msm_dsi_cmds_rx;
+	intf.index = 0;
+	intf.private = NULL;
+	dsi_register_interface(&intf);
+	pr_debug("%s success\n", __func__);
+	return 0;
+dsi_probe_error:
+	if (dsi_host_private->dsi_base) {
+		iounmap(dsi_host_private->dsi_base);
+		dsi_host_private->dsi_base = NULL;
+	}
+	msm_dsi_io_deinit();
+	msm_dsi_deinit();
+	return rc;
+}
+
+static int __devexit msm_dsi_remove(struct platform_device *pdev)
+{
+	msm_dsi_disable_irq();
+	msm_dsi_io_deinit();
+	iounmap(dsi_host_private->dsi_base);
+	dsi_host_private->dsi_base = NULL;
+	msm_dsi_deinit();
+	return 0;
+}
+
+static const struct of_device_id msm_dsi_v2_dt_match[] = {
+	{.compatible = "qcom,msm-dsi-v2"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_dsi_v2_dt_match);
+
+static struct platform_driver msm_dsi_v2_driver = {
+	.probe = msm_dsi_probe,
+	.remove = __devexit_p(msm_dsi_remove),
+	.shutdown = NULL,
+	.driver = {
+		.name = "msm_dsi_v2",
+		.of_match_table = msm_dsi_v2_dt_match,
+	},
+};
+
+static int msm_dsi_v2_register_driver(void)
+{
+	return platform_driver_register(&msm_dsi_v2_driver);
+}
+
+static int __init msm_dsi_v2_driver_init(void)
+{
+	int ret;
+
+	ret = msm_dsi_v2_register_driver();
+	if (ret) {
+		pr_err("msm_dsi_v2_register_driver() failed!\n");
+		return ret;
+	}
+
+	return ret;
+}
+module_init(msm_dsi_v2_driver_init);
+
+static void __exit msm_dsi_v2_driver_cleanup(void)
+{
+	platform_driver_unregister(&msm_dsi_v2_driver);
+}
+module_exit(msm_dsi_v2_driver_cleanup);
diff --git a/drivers/video/msm/mdss/dsi_host_v2.h b/drivers/video/msm/mdss/dsi_host_v2.h
new file mode 100644
index 0000000..cec9774
--- /dev/null
+++ b/drivers/video/msm/mdss/dsi_host_v2.h
@@ -0,0 +1,169 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef DSI_HOST_V2_H
+#define DSI_HOST_V2_H
+
+#include <linux/bitops.h>
+
+#define DSI_INTR_ERROR_MASK			BIT(25)
+#define DSI_INTR_ERROR				BIT(24)
+#define DSI_INTR_VIDEO_DONE_MASK		BIT(17)
+#define DSI_INTR_VIDEO_DONE			BIT(16)
+#define DSI_INTR_CMD_MDP_DONE_MASK		BIT(9)
+#define DSI_INTR_CMD_MDP_DONE			BIT(8)
+#define DSI_INTR_CMD_DMA_DONE_MASK		BIT(1)
+#define DSI_INTR_CMD_DMA_DONE			BIT(0)
+
+#define DSI_CTRL				0x0000
+#define DSI_STATUS				0x0004
+#define DSI_FIFO_STATUS			0x0008
+#define DSI_VIDEO_MODE_CTRL			0x000C
+#define DSI_VIDEO_MODE_DATA_CTRL		0x001C
+#define DSI_VIDEO_MODE_ACTIVE_H			0x0020
+#define DSI_VIDEO_MODE_ACTIVE_V		0x0024
+#define DSI_VIDEO_MODE_TOTAL		0x0028
+#define DSI_VIDEO_MODE_HSYNC			0x002C
+#define DSI_VIDEO_MODE_VSYNC			0x0030
+#define DSI_VIDEO_MODE_VSYNC_VPOS		0x0034
+#define DSI_COMMAND_MODE_DMA_CTRL		0x0038
+#define DSI_COMMAND_MODE_MDP_CTRL		0x003C
+#define DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL	0x0040
+#define DSI_DMA_CMD_OFFSET			0x0044
+#define DSI_DMA_CMD_LENGTH			0x0048
+#define DSI_DMA_FIFO_CTRL			0x004C
+#define DSI_COMMAND_MODE_MDP_STREAM0_CTRL	0x0054
+#define DSI_COMMAND_MODE_MDP_STREAM0_TOTAL	0x0058
+#define DSI_COMMAND_MODE_MDP_STREAM1_CTRL	0x005C
+#define DSI_COMMAND_MODE_MDP_STREAM1_TOTAL	0x0060
+#define DSI_ACK_ERR_STATUS			0x0064
+#define DSI_RDBK_DATA0				0x0068
+#define DSI_RDBK_DATA1				0x006C
+#define DSI_RDBK_DATA2				0x0070
+#define DSI_RDBK_DATA3				0x0074
+#define DSI_RDBK_DATATYPE0			0x0078
+#define DSI_RDBK_DATATYPE1			0x007C
+#define DSI_TRIG_CTRL				0x0080
+#define DSI_EXT_MUX				0x0084
+#define DSI_EXT_TE_PULSE_DETECT_CTRL		0x0088
+#define DSI_CMD_MODE_DMA_SW_TRIGGER		0x008C
+#define DSI_CMD_MODE_MDP_SW_TRIGGER		0x0090
+#define DSI_CMD_MODE_BTA_SW_TRIGGER		0x0094
+#define DSI_RESET_SW_TRIGGER			0x0098
+#define DSI_LANE_CTRL				0x00A8
+#define DSI_LANE_SWAP_CTRL			0x00AC
+#define DSI_DLN0_PHY_ERR			0x00B0
+#define DSI_TIMEOUT_STATUS			0x00BC
+#define DSI_CLKOUT_TIMING_CTRL			0x00C0
+#define DSI_EOT_PACKET				0x00C4
+#define DSI_EOT_PACKET_CTRL			0x00C8
+#define DSI_ERR_INT_MASK0			0x0108
+#define DSI_INT_CTRL				0x010c
+#define DSI_SOFT_RESET				0x0114
+#define DSI_CLK_CTRL				0x0118
+#define DSI_CLK_STATUS				0x011C
+#define DSI_PHY_SW_RESET			0x0128
+#define DSI_COMMAND_MODE_MDP_IDLE_CTRL		0x0190
+#define DSI_VERSION				0x01F0
+
+#define DSI_DSIPHY_PLL_CTRL_0			0x0200
+#define DSI_DSIPHY_PLL_CTRL_1			0x0204
+#define DSI_DSIPHY_PLL_CTRL_2			0x0208
+#define DSI_DSIPHY_PLL_CTRL_3			0x020C
+#define DSI_DSIPHY_PLL_CTRL_4			0x0210
+#define DSI_DSIPHY_PLL_CTRL_5			0x0214
+#define DSI_DSIPHY_PLL_CTRL_6			0x0218
+#define DSI_DSIPHY_PLL_CTRL_7			0x021C
+#define DSI_DSIPHY_PLL_CTRL_8			0x0220
+#define DSI_DSIPHY_PLL_CTRL_9			0x0224
+#define DSI_DSIPHY_PLL_CTRL_10			0x0228
+#define DSI_DSIPHY_PLL_CTRL_11			0x022C
+#define DSI_DSIPHY_PLL_CTRL_12			0x0230
+#define DSI_DSIPHY_PLL_CTRL_13			0x0234
+#define DSI_DSIPHY_PLL_CTRL_14			0x0238
+#define DSI_DSIPHY_PLL_CTRL_15			0x023C
+#define DSI_DSIPHY_PLL_CTRL_16			0x0240
+#define DSI_DSIPHY_PLL_CTRL_17			0x0244
+#define DSI_DSIPHY_PLL_CTRL_18			0x0248
+#define DSI_DSIPHY_PLL_CTRL_19			0x024C
+#define DSI_DSIPHY_ANA_CTRL0			0x0260
+#define DSI_DSIPHY_ANA_CTRL1			0x0264
+#define DSI_DSIPHY_ANA_CTRL2			0x0268
+#define DSI_DSIPHY_ANA_CTRL3			0x026C
+#define DSI_DSIPHY_ANA_CTRL4			0x0270
+#define DSI_DSIPHY_ANA_CTRL5			0x0274
+#define DSI_DSIPHY_ANA_CTRL6			0x0278
+#define DSI_DSIPHY_ANA_CTRL7			0x027C
+#define DSI_DSIPHY_PLL_RDY			0x0280
+#define DSI_DSIPHY_PLL_ANA_STATUS0		0x0294
+#define DSI_DSIPHY_PLL_ANA_STATUS1		0x0298
+#define DSI_DSIPHY_PLL_ANA_STATUS2		0x029C
+#define DSI_DSIPHY_LN0_CFG0			0x0300
+#define DSI_DSIPHY_LN0_CFG1			0x0304
+#define DSI_DSIPHY_LN0_CFG2			0x0308
+#define DSI_DSIPHY_LN1_CFG0			0x0340
+#define DSI_DSIPHY_LN1_CFG1			0x0344
+#define DSI_DSIPHY_LN1_CFG2			0x0348
+#define DSI_DSIPHY_LN2_CFG0			0x0380
+#define DSI_DSIPHY_LN2_CFG1			0x0384
+#define DSI_DSIPHY_LN2_CFG2			0x0388
+#define DSI_DSIPHY_LN3_CFG0			0x03C0
+#define DSI_DSIPHY_LN3_CFG1			0x03C4
+#define DSI_DSIPHY_LN3_CFG2			0x03C8
+#define DSI_DSIPHY_LNCK_CFG0			0x0400
+#define DSI_DSIPHY_LNCK_CFG1			0x0404
+#define DSI_DSIPHY_LNCK_CFG2			0x0408
+#define DSI_DSIPHY_TIMING_CTRL_0		0x0440
+#define DSI_DSIPHY_TIMING_CTRL_1		0x0444
+#define DSI_DSIPHY_TIMING_CTRL_2		0x0448
+#define DSI_DSIPHY_TIMING_CTRL_3		0x044C
+#define DSI_DSIPHY_TIMING_CTRL_4		0x0450
+#define DSI_DSIPHY_TIMING_CTRL_5		0x0454
+#define DSI_DSIPHY_TIMING_CTRL_6		0x0458
+#define DSI_DSIPHY_TIMING_CTRL_7		0x045C
+#define DSI_DSIPHY_TIMING_CTRL_8		0x0460
+#define DSI_DSIPHY_TIMING_CTRL_9		0x0464
+#define DSI_DSIPHY_TIMING_CTRL_10		0x0468
+#define DSI_DSIPHY_TIMING_CTRL_11		0x046C
+#define DSI_DSIPHY_CTRL_0			0x0470
+#define DSI_DSIPHY_CTRL_1			0x0474
+#define DSI_DSIPHY_CTRL_2			0x0478
+#define DSI_DSIPHY_CTRL_3			0x047C
+#define DSI_DSIPHY_STRENGTH_CTRL_0		0x0480
+#define DSI_DSIPHY_STRENGTH_CTRL_1		0x0484
+#define DSI_DSIPHY_STRENGTH_CTRL_2		0x0488
+#define DSI_DSIPHY_LDO_CNTRL			0x04B0
+#define DSI_DSIPHY_REGULATOR_CTRL_0		0x0500
+#define DSI_DSIPHY_REGULATOR_CTRL_1		0x0504
+#define DSI_DSIPHY_REGULATOR_CTRL_2		0x0508
+#define DSI_DSIPHY_REGULATOR_CTRL_3		0x050C
+#define DSI_DSIPHY_REGULATOR_CTRL_4		0x0510
+#define DSI_DSIPHY_REGULATOR_TEST		0x0514
+#define DSI_DSIPHY_REGULATOR_CAL_PWR_CFG	0x0518
+#define DSI_DSIPHY_CAL_HW_TRIGGER		0x0528
+#define DSI_DSIPHY_CAL_SW_CFG0			0x052C
+#define DSI_DSIPHY_CAL_SW_CFG1			0x0530
+#define DSI_DSIPHY_CAL_SW_CFG2			0x0534
+#define DSI_DSIPHY_CAL_HW_CFG0			0x0538
+#define DSI_DSIPHY_CAL_HW_CFG1			0x053C
+#define DSI_DSIPHY_CAL_HW_CFG2			0x0540
+#define DSI_DSIPHY_CAL_HW_CFG3			0x0544
+#define DSI_DSIPHY_CAL_HW_CFG4			0x0548
+#define DSI_DSIPHY_REGULATOR_CAL_STATUS0	0x0550
+#define DSI_DSIPHY_BIST_CTRL0			0x048C
+#define DSI_DSIPHY_BIST_CTRL1			0x0490
+#define DSI_DSIPHY_BIST_CTRL2			0x0494
+#define DSI_DSIPHY_BIST_CTRL3			0x0498
+#define DSI_DSIPHY_BIST_CTRL4			0x049C
+#define DSI_DSIPHY_BIST_CTRL5			0x04A0
+
+#endif /* DSI_HOST_V2_H */
diff --git a/drivers/video/msm/mdss/dsi_io_v2.c b/drivers/video/msm/mdss/dsi_io_v2.c
new file mode 100644
index 0000000..0486c4c
--- /dev/null
+++ b/drivers/video/msm/mdss/dsi_io_v2.c
@@ -0,0 +1,426 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+
+#include <mach/clk.h>
+
+#include "dsi_v2.h"
+#include "dsi_io_v2.h"
+#include "dsi_host_v2.h"
+
+struct msm_dsi_io_private {
+	struct regulator *vdda_vreg;
+	struct clk *dsi_byte_clk;
+	struct clk *dsi_esc_clk;
+	struct clk *dsi_pixel_clk;
+	struct clk *dsi_ahb_clk;
+	int msm_dsi_clk_on;
+	int msm_dsi_ahb_clk_on;
+};
+
+static struct msm_dsi_io_private *dsi_io_private;
+
+#define DSI_VDDA_VOLTAGE 1200000
+
+void msm_dsi_ahb_ctrl(int enable)
+{
+	if (enable) {
+		if (dsi_io_private->msm_dsi_ahb_clk_on) {
+			pr_debug("ahb clks already ON\n");
+			return;
+		}
+		clk_enable(dsi_io_private->dsi_ahb_clk);
+		dsi_io_private->msm_dsi_ahb_clk_on = 1;
+	} else {
+		if (dsi_io_private->msm_dsi_ahb_clk_on == 0) {
+			pr_debug("ahb clk already OFF\n");
+			return;
+		}
+		clk_disable(dsi_io_private->dsi_ahb_clk);
+		dsi_io_private->msm_dsi_ahb_clk_on = 0;
+	}
+}
+
+int msm_dsi_io_init(struct platform_device *dev)
+{
+	int rc;
+
+	if (!dsi_io_private) {
+		dsi_io_private = kzalloc(sizeof(struct msm_dsi_io_private),
+					GFP_KERNEL);
+		if (!dsi_io_private) {
+			pr_err("fail to alloc dsi io private data structure\n");
+			return -ENOMEM;
+		}
+	}
+
+	rc = msm_dsi_clk_init(dev);
+	if (rc) {
+		pr_err("fail to initialize DSI clock\n");
+		return rc;
+	}
+
+	rc = msm_dsi_regulator_init(dev);
+	if (rc) {
+		pr_err("fail to initialize DSI regulator\n");
+		return rc;
+	}
+	return 0;
+}
+
+void msm_dsi_io_deinit(void)
+{
+	if (dsi_io_private) {
+		msm_dsi_clk_deinit();
+		msm_dsi_regulator_deinit();
+		kfree(dsi_io_private);
+		dsi_io_private = NULL;
+	}
+}
+
+int msm_dsi_clk_init(struct platform_device *dev)
+{
+	int rc = 0;
+
+	dsi_io_private->dsi_byte_clk = clk_get(&dev->dev, "byte_clk");
+	if (IS_ERR(dsi_io_private->dsi_byte_clk)) {
+		pr_err("can't find dsi byte_clk\n");
+		rc = PTR_ERR(dsi_io_private->dsi_byte_clk);
+		dsi_io_private->dsi_byte_clk = NULL;
+		return rc;
+	}
+
+	dsi_io_private->dsi_esc_clk = clk_get(&dev->dev, "esc_clk");
+	if (IS_ERR(dsi_io_private->dsi_esc_clk)) {
+		pr_err("can't find dsi esc_clk\n");
+		rc = PTR_ERR(dsi_io_private->dsi_esc_clk);
+		dsi_io_private->dsi_esc_clk = NULL;
+		return rc;
+	}
+
+	dsi_io_private->dsi_pixel_clk = clk_get(&dev->dev, "pixel_clk");
+	if (IS_ERR(dsi_io_private->dsi_pixel_clk)) {
+		pr_err("can't find dsi pixel\n");
+		rc = PTR_ERR(dsi_io_private->dsi_pixel_clk);
+		dsi_io_private->dsi_pixel_clk = NULL;
+		return rc;
+	}
+
+	dsi_io_private->dsi_ahb_clk = clk_get(&dev->dev, "iface_clk");
+	if (IS_ERR(dsi_io_private->dsi_ahb_clk)) {
+		pr_err("can't find dsi iface_clk\n");
+		rc = PTR_ERR(dsi_io_private->dsi_ahb_clk);
+		dsi_io_private->dsi_ahb_clk = NULL;
+		return rc;
+	}
+	clk_prepare(dsi_io_private->dsi_ahb_clk);
+
+	return 0;
+}
+
+void msm_dsi_clk_deinit(void)
+{
+	if (dsi_io_private->dsi_byte_clk) {
+		clk_put(dsi_io_private->dsi_byte_clk);
+		dsi_io_private->dsi_byte_clk = NULL;
+	}
+	if (dsi_io_private->dsi_esc_clk) {
+		clk_put(dsi_io_private->dsi_esc_clk);
+		dsi_io_private->dsi_esc_clk = NULL;
+	}
+	if (dsi_io_private->dsi_pixel_clk) {
+		clk_put(dsi_io_private->dsi_pixel_clk);
+		dsi_io_private->dsi_pixel_clk = NULL;
+	}
+	if (dsi_io_private->dsi_ahb_clk) {
+		clk_unprepare(dsi_io_private->dsi_ahb_clk);
+		clk_put(dsi_io_private->dsi_ahb_clk);
+		dsi_io_private->dsi_ahb_clk = NULL;
+	}
+}
+
+int msm_dsi_prepare_clocks(void)
+{
+	clk_prepare(dsi_io_private->dsi_byte_clk);
+	clk_prepare(dsi_io_private->dsi_esc_clk);
+	clk_prepare(dsi_io_private->dsi_pixel_clk);
+	return 0;
+}
+
+int msm_dsi_unprepare_clocks(void)
+{
+	clk_unprepare(dsi_io_private->dsi_esc_clk);
+	clk_unprepare(dsi_io_private->dsi_byte_clk);
+	clk_unprepare(dsi_io_private->dsi_pixel_clk);
+	return 0;
+}
+
+int msm_dsi_clk_set_rate(unsigned long esc_rate, unsigned long byte_rate,
+			unsigned long pixel_rate)
+{
+	int rc;
+
+	rc = clk_set_rate(dsi_io_private->dsi_esc_clk, esc_rate);
+	if (rc) {
+		pr_err("dsi_esc_clk - clk_set_rate failed =%d\n", rc);
+		return rc;
+	}
+
+	rc = clk_set_rate(dsi_io_private->dsi_byte_clk, byte_rate);
+	if (rc) {
+		pr_err("dsi_byte_clk - clk_set_rate faile = %dd\n", rc);
+		return rc;
+	}
+
+	rc = clk_set_rate(dsi_io_private->dsi_pixel_clk, pixel_rate);
+	if (rc) {
+		pr_err("dsi_pixel_clk - clk_set_rate failed = %d\n", rc);
+		return rc;
+	}
+	return 0;
+}
+
+int  msm_dsi_clk_enable(void)
+{
+	if (dsi_io_private->msm_dsi_clk_on) {
+		pr_debug("dsi_clks on already\n");
+		return 0;
+	}
+
+	clk_enable(dsi_io_private->dsi_esc_clk);
+	clk_enable(dsi_io_private->dsi_byte_clk);
+	clk_enable(dsi_io_private->dsi_pixel_clk);
+
+	dsi_io_private->msm_dsi_clk_on = 1;
+	return 0;
+}
+
+int msm_dsi_clk_disable(void)
+{
+	if (dsi_io_private->msm_dsi_clk_on == 0) {
+		pr_debug("mdss_dsi_clks already OFF\n");
+		return 0;
+	}
+
+	clk_disable(dsi_io_private->dsi_byte_clk);
+	clk_disable(dsi_io_private->dsi_esc_clk);
+	clk_disable(dsi_io_private->dsi_pixel_clk);
+
+	dsi_io_private->msm_dsi_clk_on = 0;
+	return 0;
+}
+
+int msm_dsi_regulator_init(struct platform_device *dev)
+{
+	int ret = 0;
+
+	dsi_io_private->vdda_vreg = devm_regulator_get(&dev->dev, "vdda");
+	if (IS_ERR(dsi_io_private->vdda_vreg)) {
+		ret = PTR_ERR(dsi_io_private->vdda_vreg);
+		pr_err("could not get vdda 8110_l4, ret=%d\n", ret);
+		return ret;
+	}
+
+	ret = regulator_set_voltage(dsi_io_private->vdda_vreg, DSI_VDDA_VOLTAGE,
+					DSI_VDDA_VOLTAGE);
+	if (ret)
+		pr_err("vdd_io_vreg->set_voltage failed, ret=%d\n", ret);
+
+	return ret;
+}
+
+void msm_dsi_regulator_deinit(void)
+{
+	if (dsi_io_private->vdda_vreg) {
+		devm_regulator_put(dsi_io_private->vdda_vreg);
+		dsi_io_private->vdda_vreg = NULL;
+	}
+}
+
+int msm_dsi_regulator_enable(void)
+{
+	int ret;
+
+	ret = regulator_enable(dsi_io_private->vdda_vreg);
+	if (ret) {
+		pr_err("%s: Failed to enable regulator.\n", __func__);
+		return ret;
+	}
+	msleep(20); /*per DSI controller spec*/
+	return ret;
+}
+
+int msm_dsi_regulator_disable(void)
+{
+	int ret;
+
+	ret = regulator_disable(dsi_io_private->vdda_vreg);
+	if (ret) {
+		pr_err("%s: Failed to disable regulator.\n", __func__);
+		return ret;
+	}
+	wmb();
+	msleep(20); /*per DSI controller spec*/
+
+	return ret;
+}
+
+static void msm_dsi_phy_strength_init(unsigned char *ctrl_base,
+					struct mdss_dsi_phy_ctrl *pd)
+{
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_STRENGTH_CTRL_0, pd->strength[0]);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_STRENGTH_CTRL_2, pd->strength[1]);
+}
+
+static void msm_dsi_phy_ctrl_init(unsigned char *ctrl_base,
+				struct mdss_panel_data *pdata)
+{
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_CTRL_0, 0x5f);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_CTRL_3, 0x10);
+}
+
+static void msm_dsi_phy_regulator_init(unsigned char *ctrl_base,
+					struct mdss_dsi_phy_ctrl *pd)
+{
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_LDO_CNTRL, 0x04);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_0, pd->regulator[0]);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_1, pd->regulator[1]);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_2, pd->regulator[2]);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_3, pd->regulator[3]);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_4, pd->regulator[4]);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CAL_PWR_CFG,
+			pd->regulator[5]);
+
+}
+
+static int msm_dsi_phy_calibration(unsigned char *ctrl_base)
+{
+	int i = 0, term_cnt = 5000, ret = 0, cal_busy;
+
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_SW_CFG2, 0x0);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_CFG1, 0x5a);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_CFG3, 0x10);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_CFG4, 0x01);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_CFG0, 0x01);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_TRIGGER, 0x01);
+	usleep_range(5000, 5000); /*per DSI controller spec*/
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_CAL_HW_TRIGGER, 0x00);
+
+	cal_busy = MIPI_INP(ctrl_base + DSI_DSIPHY_REGULATOR_CAL_STATUS0);
+	while (cal_busy & 0x10) {
+		i++;
+		if (i > term_cnt) {
+			ret = -EINVAL;
+			pr_err("msm_dsi_phy_calibration error\n");
+			break;
+		}
+		cal_busy = MIPI_INP(ctrl_base +
+					DSI_DSIPHY_REGULATOR_CAL_STATUS0);
+	}
+
+	return ret;
+}
+
+static void msm_dsi_phy_lane_init(unsigned char *ctrl_base,
+			struct mdss_dsi_phy_ctrl *pd)
+{
+	int ln, index;
+
+	/*CFG0, CFG1, CFG2, TEST_DATAPATH, TEST_STR0, TEST_STR1*/
+	for (ln = 0; ln < 5; ln++) {
+		unsigned char *off = ctrl_base + 0x0300 + (ln * 0x40);
+		index = ln * 6;
+		MIPI_OUTP(off, pd->laneCfg[index]);
+		MIPI_OUTP(off + 4, pd->laneCfg[index + 1]);
+		MIPI_OUTP(off + 8, pd->laneCfg[index + 2]);
+		MIPI_OUTP(off + 12, pd->laneCfg[index + 3]);
+		MIPI_OUTP(off + 20, pd->laneCfg[index + 4]);
+		MIPI_OUTP(off + 24, pd->laneCfg[index + 5]);
+	}
+	wmb();
+}
+
+static void msm_dsi_phy_timing_init(unsigned char *ctrl_base,
+			struct mdss_dsi_phy_ctrl *pd)
+{
+	int i, off = DSI_DSIPHY_TIMING_CTRL_0;
+	for (i = 0; i < 12; i++) {
+		MIPI_OUTP(ctrl_base + off, pd->timing[i]);
+		off += 4;
+	}
+	wmb();
+}
+
+static void msm_dsi_phy_bist_init(unsigned char *ctrl_base,
+			struct mdss_dsi_phy_ctrl *pd)
+{
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_BIST_CTRL4, pd->bistCtrl[4]);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_BIST_CTRL1, pd->bistCtrl[1]);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_BIST_CTRL0, pd->bistCtrl[0]);
+	MIPI_OUTP(ctrl_base + DSI_DSIPHY_BIST_CTRL4, 0);
+	wmb();
+}
+
+int msm_dsi_phy_init(unsigned char *ctrl_base,
+			struct mdss_panel_data *pdata)
+{
+	struct mdss_dsi_phy_ctrl *pd;
+
+	pd = pdata->panel_info.mipi.dsi_phy_db;
+
+	msm_dsi_phy_strength_init(ctrl_base, pd);
+
+	msm_dsi_phy_ctrl_init(ctrl_base, pdata);
+
+	msm_dsi_phy_regulator_init(ctrl_base, pd);
+
+	msm_dsi_phy_calibration(ctrl_base);
+
+	msm_dsi_phy_lane_init(ctrl_base, pd);
+
+	msm_dsi_phy_timing_init(ctrl_base, pd);
+
+	msm_dsi_phy_bist_init(ctrl_base, pd);
+
+	return 0;
+}
+
+void msm_dsi_phy_sw_reset(unsigned char *ctrl_base)
+{
+	/* start phy sw reset */
+	MIPI_OUTP(ctrl_base + DSI_PHY_SW_RESET, 0x0001);
+	udelay(1000); /*per DSI controller spec*/
+	wmb();
+	/* end phy sw reset */
+	MIPI_OUTP(ctrl_base + DSI_PHY_SW_RESET, 0x0000);
+	udelay(100); /*per DSI controller spec*/
+	wmb();
+}
+
+void msm_dsi_phy_enable(unsigned char *ctrl_base, int on)
+{
+	if (on) {
+		MIPI_OUTP(ctrl_base + DSI_DSIPHY_PLL_CTRL_5, 0x050);
+	} else {
+		MIPI_OUTP(ctrl_base + DSI_DSIPHY_PLL_CTRL_5, 0x05f);
+		MIPI_OUTP(ctrl_base + DSI_DSIPHY_REGULATOR_CTRL_0, 0x02);
+		MIPI_OUTP(ctrl_base + DSI_DSIPHY_CTRL_0, 0x00);
+		MIPI_OUTP(ctrl_base + DSI_DSIPHY_CTRL_1, 0x7f);
+		MIPI_OUTP(ctrl_base + DSI_CLK_CTRL, 0);
+	}
+}
diff --git a/drivers/video/msm/mdss/dsi_io_v2.h b/drivers/video/msm/mdss/dsi_io_v2.h
new file mode 100644
index 0000000..25ecd7f
--- /dev/null
+++ b/drivers/video/msm/mdss/dsi_io_v2.h
@@ -0,0 +1,52 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef DSI_IO_V2_H
+#define DSI_IO_V2_H
+
+#include "mdss_panel.h"
+
+void msm_dsi_ahb_ctrl(int enable);
+
+int msm_dsi_io_init(struct platform_device *dev);
+
+void msm_dsi_io_deinit(void);
+
+int msm_dsi_clk_init(struct platform_device *dev);
+
+void msm_dsi_clk_deinit(void);
+
+int msm_dsi_prepare_clocks(void);
+
+int msm_dsi_unprepare_clocks(void);
+
+int msm_dsi_clk_set_rate(unsigned long esc_rate, unsigned long byte_rate,
+			unsigned long pixel_rate);
+
+int msm_dsi_clk_enable(void);
+
+int msm_dsi_clk_disable(void);
+
+int msm_dsi_regulator_init(struct platform_device *dev);
+
+void msm_dsi_regulator_deinit(void);
+
+int msm_dsi_regulator_enable(void);
+
+int msm_dsi_regulator_disable(void);
+
+int msm_dsi_phy_init(unsigned char *ctrl_base,
+			struct mdss_panel_data *pdata);
+
+void msm_dsi_phy_sw_reset(unsigned char *ctrl_base);
+
+#endif /* DSI_IO_V2_H */
diff --git a/drivers/video/msm/mdss/dsi_panel_v2.c b/drivers/video/msm/mdss/dsi_panel_v2.c
new file mode 100644
index 0000000..6686de3
--- /dev/null
+++ b/drivers/video/msm/mdss/dsi_panel_v2.c
@@ -0,0 +1,753 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/qpnp/pin.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/regulator/consumer.h>
+
+#include "dsi_v2.h"
+
+#define DT_CMD_HDR 6
+
+struct dsi_panel_private {
+	struct dsi_buf dsi_panel_tx_buf;
+	struct dsi_buf dsi_panel_rx_buf;
+
+	int rst_gpio;
+	int disp_en_gpio;
+	char bl_ctrl;
+
+	struct regulator *vddio_vreg;
+	struct regulator *vdda_vreg;
+
+	struct dsi_panel_cmds_list *on_cmds_list;
+	struct dsi_panel_cmds_list *off_cmds_list;
+	struct mdss_dsi_phy_ctrl phy_params;
+};
+
+static struct dsi_panel_private *panel_private;
+
+DEFINE_LED_TRIGGER(bl_led_trigger);
+
+int dsi_panel_init(void)
+{
+	int rc;
+
+	if (!panel_private) {
+		panel_private = kzalloc(sizeof(struct dsi_panel_private),
+					GFP_KERNEL);
+		if (!panel_private) {
+			pr_err("fail to alloc dsi panel private data\n");
+			return -ENOMEM;
+		}
+	}
+
+	rc = dsi_buf_alloc(&panel_private->dsi_panel_tx_buf,
+				ALIGN(DSI_BUF_SIZE,
+				SZ_4K));
+	if (rc)
+		return rc;
+
+	rc = dsi_buf_alloc(&panel_private->dsi_panel_rx_buf,
+				ALIGN(DSI_BUF_SIZE,
+				SZ_4K));
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+void dsi_panel_deinit(void)
+{
+	if (!panel_private)
+		return;
+
+	kfree(panel_private->dsi_panel_tx_buf.start);
+	kfree(panel_private->dsi_panel_rx_buf.start);
+
+	if (panel_private->vddio_vreg)
+		devm_regulator_put(panel_private->vddio_vreg);
+
+	if (panel_private->vdda_vreg)
+		devm_regulator_put(panel_private->vddio_vreg);
+
+	kfree(panel_private);
+	panel_private = NULL;
+}
+
+void dsi_panel_reset(struct mdss_panel_data *pdata, int enable)
+{
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	if (!gpio_is_valid(panel_private->disp_en_gpio)) {
+		pr_debug("%s:%d, reset line not configured\n",
+			   __func__, __LINE__);
+	}
+
+	if (!gpio_is_valid(panel_private->rst_gpio)) {
+		pr_debug("%s:%d, reset line not configured\n",
+			   __func__, __LINE__);
+		return;
+	}
+
+	pr_debug("%s: enable = %d\n", __func__, enable);
+
+	if (enable) {
+		gpio_set_value(panel_private->rst_gpio, 1);
+		/*
+		 * these delay values are by experiments currently, will need
+		 * to move to device tree late
+		 */
+		msleep(20);
+		gpio_set_value(panel_private->rst_gpio, 0);
+		udelay(200);
+		gpio_set_value(panel_private->rst_gpio, 1);
+		msleep(20);
+		if (gpio_is_valid(panel_private->disp_en_gpio))
+			gpio_set_value(panel_private->disp_en_gpio, 1);
+	} else {
+		gpio_set_value(panel_private->rst_gpio, 0);
+		if (gpio_is_valid(panel_private->disp_en_gpio))
+			gpio_set_value(panel_private->disp_en_gpio, 0);
+	}
+}
+
+static void dsi_panel_bl_ctrl(struct mdss_panel_data *pdata,
+				u32 bl_level)
+{
+	if (panel_private->bl_ctrl) {
+		switch (panel_private->bl_ctrl) {
+		case BL_WLED:
+			led_trigger_event(bl_led_trigger, bl_level);
+			break;
+
+		default:
+			pr_err("%s: Unknown bl_ctrl configuration\n",
+				__func__);
+			break;
+		}
+	} else
+		pr_err("%s:%d, bl_ctrl not configured", __func__, __LINE__);
+}
+
+static int dsi_panel_on(struct mdss_panel_data *pdata)
+{
+	struct mipi_panel_info *mipi;
+
+	mipi  = &pdata->panel_info.mipi;
+
+	pr_debug("%s:%d, debug info (mode) : %d\n", __func__, __LINE__,
+		 mipi->mode);
+
+	if (mipi->mode == DSI_VIDEO_MODE) {
+		dsi_cmds_tx_v2(pdata, &panel_private->dsi_panel_tx_buf,
+				panel_private->on_cmds_list->buf,
+				panel_private->on_cmds_list->size);
+	} else {
+		pr_err("%s:%d, CMD MODE NOT SUPPORTED", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int dsi_panel_off(struct mdss_panel_data *pdata)
+{
+	struct mipi_panel_info *mipi;
+	mipi  = &pdata->panel_info.mipi;
+
+	pr_debug("%s:%d, debug info\n", __func__, __LINE__);
+
+	if (mipi->mode == DSI_VIDEO_MODE) {
+		dsi_cmds_tx_v2(pdata, &panel_private->dsi_panel_tx_buf,
+				panel_private->off_cmds_list->buf,
+				panel_private->off_cmds_list->size);
+	} else {
+		pr_debug("%s:%d, CMD mode not supported", __func__, __LINE__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int dsi_panel_parse_gpio(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	panel_private->disp_en_gpio = of_get_named_gpio(np,
+						"qcom,enable-gpio", 0);
+	panel_private->rst_gpio = of_get_named_gpio(np, "qcom,rst-gpio", 0);
+	return 0;
+}
+
+static int dsi_panel_parse_regulator(struct platform_device *pdev)
+{
+	panel_private->vddio_vreg = devm_regulator_get(&pdev->dev, "vddio");
+	panel_private->vdda_vreg = devm_regulator_get(&pdev->dev, "vdda");
+	return 0;
+}
+
+static int dsi_panel_parse_timing(struct platform_device *pdev,
+				struct dsi_panel_common_pdata *panel_data)
+{
+	struct device_node *np = pdev->dev.of_node;
+	u32 res[6], tmp;
+	int rc;
+
+	rc = of_property_read_u32_array(np, "qcom,mdss-pan-res", res, 2);
+	if (rc) {
+		pr_err("%s:%d, panel resolution not specified\n",
+						__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	panel_data->panel_info.xres = (!rc ? res[0] : 480);
+	panel_data->panel_info.yres = (!rc ? res[1] : 800);
+
+	rc = of_property_read_u32_array(np, "qcom,mdss-pan-active-res", res, 2);
+	if (rc == 0) {
+		panel_data->panel_info.lcdc.xres_pad =
+			panel_data->panel_info.xres - res[0];
+		panel_data->panel_info.lcdc.yres_pad =
+			panel_data->panel_info.yres - res[1];
+	}
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-bpp", &tmp);
+	if (rc) {
+		pr_err("%s:%d, panel bpp not specified\n",
+						__func__, __LINE__);
+		return -EINVAL;
+	}
+	panel_data->panel_info.bpp = (!rc ? tmp : 24);
+
+	rc = of_property_read_u32_array(np,
+		"qcom,mdss-pan-porch-values", res, 6);
+	if (rc) {
+		pr_err("%s:%d, panel porch not specified\n",
+						__func__, __LINE__);
+		return -EINVAL;
+	}
+
+	panel_data->panel_info.lcdc.h_back_porch = (!rc ? res[0] : 6);
+	panel_data->panel_info.lcdc.h_pulse_width = (!rc ? res[1] : 2);
+	panel_data->panel_info.lcdc.h_front_porch = (!rc ? res[2] : 6);
+	panel_data->panel_info.lcdc.v_back_porch = (!rc ? res[3] : 6);
+	panel_data->panel_info.lcdc.v_pulse_width = (!rc ? res[4] : 2);
+	panel_data->panel_info.lcdc.v_front_porch = (!rc ? res[5] : 6);
+
+	return 0;
+}
+
+static int dsi_panel_parse_phy(struct platform_device *pdev,
+				struct dsi_panel_common_pdata *panel_data)
+{
+	struct device_node *np = pdev->dev.of_node;
+	u32 res[6], tmp;
+	int i, len, rc;
+	const char *data;
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-mode", &tmp);
+	panel_data->panel_info.mipi.mode = (!rc ? tmp : DSI_VIDEO_MODE);
+
+	rc = of_property_read_u32(np,
+		"qcom,mdss-pan-dsi-h-pulse-mode", &tmp);
+	panel_data->panel_info.mipi.pulse_mode_hsa_he = (!rc ? tmp : false);
+
+	rc = of_property_read_u32_array(np,
+		"qcom,mdss-pan-dsi-h-power-stop", res, 3);
+	panel_data->panel_info.mipi.hbp_power_stop = (!rc ? res[0] : false);
+	panel_data->panel_info.mipi.hsa_power_stop = (!rc ? res[1] : false);
+	panel_data->panel_info.mipi.hfp_power_stop = (!rc ? res[2] : false);
+
+	rc = of_property_read_u32_array(np,
+		"qcom,mdss-pan-dsi-bllp-power-stop", res, 2);
+	panel_data->panel_info.mipi.bllp_power_stop =
+					(!rc ? res[0] : false);
+	panel_data->panel_info.mipi.eof_bllp_power_stop =
+					(!rc ? res[1] : false);
+
+	rc = of_property_read_u32(np,
+		"qcom,mdss-pan-dsi-traffic-mode", &tmp);
+	panel_data->panel_info.mipi.traffic_mode =
+			(!rc ? tmp : DSI_NON_BURST_SYNCH_PULSE);
+
+	rc = of_property_read_u32(np,
+		"qcom,mdss-pan-dsi-dst-format", &tmp);
+	panel_data->panel_info.mipi.dst_format =
+			(!rc ? tmp : DSI_VIDEO_DST_FORMAT_RGB888);
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-vc", &tmp);
+	panel_data->panel_info.mipi.vc = (!rc ? tmp : 0);
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-rgb-swap", &tmp);
+	panel_data->panel_info.mipi.rgb_swap = (!rc ? tmp : DSI_RGB_SWAP_RGB);
+
+	rc = of_property_read_u32_array(np,
+		"qcom,mdss-pan-dsi-data-lanes", res, 4);
+	panel_data->panel_info.mipi.data_lane0 = (!rc ? res[0] : true);
+	panel_data->panel_info.mipi.data_lane1 = (!rc ? res[1] : false);
+	panel_data->panel_info.mipi.data_lane2 = (!rc ? res[2] : false);
+	panel_data->panel_info.mipi.data_lane3 = (!rc ? res[3] : false);
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-dlane-swap", &tmp);
+	panel_data->panel_info.mipi.dlane_swap = (!rc ? tmp : 0);
+
+	rc = of_property_read_u32_array(np, "qcom,mdss-pan-dsi-t-clk", res, 2);
+	panel_data->panel_info.mipi.t_clk_pre = (!rc ? res[0] : 0x24);
+	panel_data->panel_info.mipi.t_clk_post = (!rc ? res[1] : 0x03);
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-stream", &tmp);
+	panel_data->panel_info.mipi.stream = (!rc ? tmp : 0);
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-mdp-tr", &tmp);
+	panel_data->panel_info.mipi.mdp_trigger =
+			(!rc ? tmp : DSI_CMD_TRIGGER_SW);
+	if (panel_data->panel_info.mipi.mdp_trigger > 6) {
+		pr_err("%s:%d, Invalid mdp trigger. Forcing to sw trigger",
+						 __func__, __LINE__);
+		panel_data->panel_info.mipi.mdp_trigger =
+					DSI_CMD_TRIGGER_SW;
+	}
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-dma-tr", &tmp);
+	panel_data->panel_info.mipi.dma_trigger =
+			(!rc ? tmp : DSI_CMD_TRIGGER_SW);
+	if (panel_data->panel_info.mipi.dma_trigger > 6) {
+		pr_err("%s:%d, Invalid dma trigger. Forcing to sw trigger",
+						 __func__, __LINE__);
+		panel_data->panel_info.mipi.dma_trigger =
+					DSI_CMD_TRIGGER_SW;
+	}
+
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-frame-rate", &tmp);
+	panel_data->panel_info.mipi.frame_rate = (!rc ? tmp : 60);
+
+	data = of_get_property(np, "qcom,panel-phy-regulatorSettings", &len);
+	if ((!data) || (len != 6)) {
+		pr_err("%s:%d, Unable to read Phy regulator settings",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+	for (i = 0; i < len; i++)
+		panel_private->phy_params.regulator[i] = data[i];
+
+	data = of_get_property(np, "qcom,panel-phy-timingSettings", &len);
+	if ((!data) || (len != 12)) {
+		pr_err("%s:%d, Unable to read Phy timing settings",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+	for (i = 0; i < len; i++)
+		panel_private->phy_params.timing[i] = data[i];
+
+	data = of_get_property(np, "qcom,panel-phy-strengthCtrl", &len);
+	if ((!data) || (len != 2)) {
+		pr_err("%s:%d, Unable to read Phy Strength ctrl settings",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+	panel_private->phy_params.strength[0] = data[0];
+	panel_private->phy_params.strength[1] = data[1];
+
+	data = of_get_property(np, "qcom,panel-phy-bistCtrl", &len);
+	if ((!data) || (len != 6)) {
+		pr_err("%s:%d, Unable to read Phy Bist Ctrl settings",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+	for (i = 0; i < len; i++)
+		panel_private->phy_params.bistCtrl[i] = data[i];
+
+	data = of_get_property(np, "qcom,panel-phy-laneConfig", &len);
+	if ((!data) || (len != 30)) {
+		pr_err("%s:%d, Unable to read Phy lane configure settings",
+			__func__, __LINE__);
+		return -EINVAL;
+	}
+	for (i = 0; i < len; i++)
+		panel_private->phy_params.laneCfg[i] = data[i];
+
+	panel_data->panel_info.mipi.dsi_phy_db = &panel_private->phy_params;
+	return 0;
+}
+
+static int dsi_panel_parse_init_cmds(struct platform_device *pdev,
+				struct dsi_panel_common_pdata *panel_data)
+{
+	struct device_node *np = pdev->dev.of_node;
+	int i, len;
+	int cmd_plen, data_offset;
+	const char *data;
+	const char *on_cmds_state, *off_cmds_state;
+	char *on_cmds = NULL, *off_cmds = NULL;
+	int num_of_on_cmds = 0, num_of_off_cmds = 0;
+
+	data = of_get_property(np, "qcom,panel-on-cmds", &len);
+	if (!data) {
+		pr_err("%s:%d, Unable to read ON cmds", __func__, __LINE__);
+		goto parse_init_cmds_error;
+	}
+
+	on_cmds = kzalloc(sizeof(char) * len, GFP_KERNEL);
+	if (!on_cmds)
+		goto parse_init_cmds_error;
+
+	memcpy(on_cmds, data, len);
+
+	data_offset = 0;
+	cmd_plen = 0;
+	while ((len - data_offset) >= DT_CMD_HDR) {
+		data_offset += (DT_CMD_HDR - 1);
+		cmd_plen = on_cmds[data_offset++];
+		data_offset += cmd_plen;
+		num_of_on_cmds++;
+	}
+	if (!num_of_on_cmds) {
+		pr_err("%s:%d, No ON cmds specified", __func__, __LINE__);
+		goto parse_init_cmds_error;
+	}
+
+	panel_data->dsi_panel_on_cmds =
+		kzalloc(sizeof(struct dsi_panel_cmds_list), GFP_KERNEL);
+	if (!panel_data->dsi_panel_on_cmds)
+		goto parse_init_cmds_error;
+
+	(panel_data->dsi_panel_on_cmds)->buf =
+		kzalloc((num_of_on_cmds * sizeof(struct dsi_cmd_desc)),
+			GFP_KERNEL);
+	if (!(panel_data->dsi_panel_on_cmds)->buf)
+		goto parse_init_cmds_error;
+
+	data_offset = 0;
+	for (i = 0; i < num_of_on_cmds; i++) {
+		panel_data->dsi_panel_on_cmds->buf[i].dtype =
+						on_cmds[data_offset++];
+		panel_data->dsi_panel_on_cmds->buf[i].last =
+						on_cmds[data_offset++];
+		panel_data->dsi_panel_on_cmds->buf[i].vc =
+						on_cmds[data_offset++];
+		panel_data->dsi_panel_on_cmds->buf[i].ack =
+						on_cmds[data_offset++];
+		panel_data->dsi_panel_on_cmds->buf[i].wait =
+						on_cmds[data_offset++];
+		panel_data->dsi_panel_on_cmds->buf[i].dlen =
+						on_cmds[data_offset++];
+		panel_data->dsi_panel_on_cmds->buf[i].payload =
+						&on_cmds[data_offset];
+		data_offset += (panel_data->dsi_panel_on_cmds->buf[i].dlen);
+	}
+
+	if (data_offset != len) {
+		pr_err("%s:%d, Incorrect ON command entries",
+						__func__, __LINE__);
+		goto parse_init_cmds_error;
+	}
+
+	(panel_data->dsi_panel_on_cmds)->size = num_of_on_cmds;
+
+	on_cmds_state = of_get_property(pdev->dev.of_node,
+					"qcom,on-cmds-dsi-state", NULL);
+	if (!strncmp(on_cmds_state, "DSI_LP_MODE", 11)) {
+		(panel_data->dsi_panel_on_cmds)->ctrl_state = DSI_LP_MODE;
+	} else if (!strncmp(on_cmds_state, "DSI_HS_MODE", 11)) {
+		(panel_data->dsi_panel_on_cmds)->ctrl_state = DSI_HS_MODE;
+	} else {
+		pr_debug("%s: ON cmds state not specified. Set Default\n",
+							__func__);
+		(panel_data->dsi_panel_on_cmds)->ctrl_state = DSI_LP_MODE;
+	}
+
+	panel_private->on_cmds_list = panel_data->dsi_panel_on_cmds;
+	data = of_get_property(np, "qcom,panel-off-cmds", &len);
+	if (!data) {
+		pr_err("%s:%d, Unable to read OFF cmds", __func__, __LINE__);
+		goto parse_init_cmds_error;
+	}
+
+	off_cmds = kzalloc(sizeof(char) * len, GFP_KERNEL);
+	if (!off_cmds)
+		goto parse_init_cmds_error;
+
+	memcpy(off_cmds, data, len);
+
+	data_offset = 0;
+	cmd_plen = 0;
+	while ((len - data_offset) >= DT_CMD_HDR) {
+		data_offset += (DT_CMD_HDR - 1);
+		cmd_plen = off_cmds[data_offset++];
+		data_offset += cmd_plen;
+		num_of_off_cmds++;
+	}
+	if (!num_of_off_cmds) {
+		pr_err("%s:%d, No OFF cmds specified", __func__, __LINE__);
+		goto parse_init_cmds_error;
+	}
+
+	panel_data->dsi_panel_off_cmds =
+		kzalloc(sizeof(struct dsi_panel_cmds_list), GFP_KERNEL);
+	if (!panel_data->dsi_panel_off_cmds)
+		goto parse_init_cmds_error;
+
+	(panel_data->dsi_panel_off_cmds)->buf = kzalloc(num_of_off_cmds
+					* sizeof(struct dsi_cmd_desc),
+						GFP_KERNEL);
+	if (!(panel_data->dsi_panel_off_cmds)->buf)
+		goto parse_init_cmds_error;
+
+	data_offset = 0;
+	for (i = 0; i < num_of_off_cmds; i++) {
+		panel_data->dsi_panel_off_cmds->buf[i].dtype =
+						off_cmds[data_offset++];
+		panel_data->dsi_panel_off_cmds->buf[i].last =
+						off_cmds[data_offset++];
+		panel_data->dsi_panel_off_cmds->buf[i].vc =
+						off_cmds[data_offset++];
+		panel_data->dsi_panel_off_cmds->buf[i].ack =
+						off_cmds[data_offset++];
+		panel_data->dsi_panel_off_cmds->buf[i].wait =
+						off_cmds[data_offset++];
+		panel_data->dsi_panel_off_cmds->buf[i].dlen =
+						off_cmds[data_offset++];
+		panel_data->dsi_panel_off_cmds->buf[i].payload =
+						&off_cmds[data_offset];
+		data_offset += (panel_data->dsi_panel_off_cmds->buf[i].dlen);
+	}
+
+	if (data_offset != len) {
+		pr_err("%s:%d, Incorrect OFF command entries",
+						__func__, __LINE__);
+		goto parse_init_cmds_error;
+	}
+
+	(panel_data->dsi_panel_off_cmds)->size = num_of_off_cmds;
+			off_cmds_state = of_get_property(pdev->dev.of_node,
+				"qcom,off-cmds-dsi-state", NULL);
+	if (!strncmp(off_cmds_state, "DSI_LP_MODE", 11)) {
+		(panel_data->dsi_panel_off_cmds)->ctrl_state =
+						DSI_LP_MODE;
+	} else if (!strncmp(off_cmds_state, "DSI_HS_MODE", 11)) {
+		(panel_data->dsi_panel_off_cmds)->ctrl_state = DSI_HS_MODE;
+	} else {
+		pr_debug("%s: ON cmds state not specified. Set Default\n",
+							__func__);
+		(panel_data->dsi_panel_off_cmds)->ctrl_state = DSI_LP_MODE;
+	}
+
+	panel_private->off_cmds_list = panel_data->dsi_panel_on_cmds;
+	kfree(on_cmds);
+	kfree(off_cmds);
+
+	return 0;
+parse_init_cmds_error:
+	if (panel_data->dsi_panel_on_cmds) {
+		kfree((panel_data->dsi_panel_on_cmds)->buf);
+		kfree(panel_data->dsi_panel_on_cmds);
+		panel_data->dsi_panel_on_cmds = NULL;
+	}
+	if (panel_data->dsi_panel_off_cmds) {
+		kfree((panel_data->dsi_panel_off_cmds)->buf);
+		kfree(panel_data->dsi_panel_off_cmds);
+		panel_data->dsi_panel_off_cmds = NULL;
+	}
+
+	kfree(on_cmds);
+	kfree(off_cmds);
+	return -EINVAL;
+}
+
+static int dsi_panel_parse_backlight(struct platform_device *pdev,
+				struct dsi_panel_common_pdata *panel_data,
+				char *bl_ctrl)
+{
+	int rc;
+	u32 res[6];
+	static const char *bl_ctrl_type;
+
+	bl_ctrl_type = of_get_property(pdev->dev.of_node,
+				  "qcom,mdss-pan-bl-ctrl", NULL);
+	if ((bl_ctrl_type) && (!strncmp(bl_ctrl_type, "bl_ctrl_wled", 12))) {
+		led_trigger_register_simple("bkl-trigger", &bl_led_trigger);
+		pr_debug("%s: SUCCESS-> WLED TRIGGER register\n", __func__);
+		*bl_ctrl = BL_WLED;
+	}
+
+	rc = of_property_read_u32_array(pdev->dev.of_node,
+		"qcom,mdss-pan-bl-levels", res, 2);
+	panel_data->panel_info.bl_min = (!rc ? res[0] : 0);
+	panel_data->panel_info.bl_max = (!rc ? res[1] : 255);
+	return rc;
+}
+
+static int dsi_panel_parse_other(struct platform_device *pdev,
+				struct dsi_panel_common_pdata *panel_data)
+{
+	const char *pdest;
+	u32 tmp;
+	int rc;
+
+	pdest = of_get_property(pdev->dev.of_node,
+				"qcom,mdss-pan-dest", NULL);
+	if (strlen(pdest) != 9) {
+		pr_err("%s: Unknown pdest specified\n", __func__);
+		return -EINVAL;
+	}
+	if (!strncmp(pdest, "display_1", 9)) {
+		panel_data->panel_info.pdest = DISPLAY_1;
+	} else if (!strncmp(pdest, "display_2", 9)) {
+		panel_data->panel_info.pdest = DISPLAY_2;
+	} else {
+		pr_debug("%s: pdest not specified. Set Default\n",
+							__func__);
+		panel_data->panel_info.pdest = DISPLAY_1;
+	}
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"qcom,mdss-pan-underflow-clr", &tmp);
+	panel_data->panel_info.lcdc.underflow_clr = (!rc ? tmp : 0xff);
+
+	return rc;
+}
+
+static int dsi_panel_parse_dt(struct platform_device *pdev,
+				struct dsi_panel_common_pdata *panel_data,
+				char *bl_ctrl)
+{
+	int rc;
+
+	rc = dsi_panel_parse_gpio(pdev);
+	if (rc) {
+		pr_err("fail to parse panel GPIOs\n");
+		return rc;
+	}
+
+	rc = dsi_panel_parse_regulator(pdev);
+	if (rc) {
+		pr_err("fail to parse panel regulators\n");
+		return rc;
+	}
+
+	rc = dsi_panel_parse_timing(pdev, panel_data);
+	if (rc) {
+		pr_err("fail to parse panel timing\n");
+		return rc;
+	}
+
+	rc = dsi_panel_parse_phy(pdev, panel_data);
+	if (rc) {
+		pr_err("fail to parse DSI PHY settings\n");
+		return rc;
+	}
+
+	rc = dsi_panel_parse_backlight(pdev, panel_data, bl_ctrl);
+	if (rc) {
+		pr_err("fail to parse DSI backlight\n");
+		return rc;
+	}
+
+	rc = dsi_panel_parse_other(pdev, panel_data);
+	if (rc) {
+		pr_err("fail to parse DSI panel destination\n");
+		return rc;
+	}
+
+	rc = dsi_panel_parse_init_cmds(pdev, panel_data);
+	if (rc) {
+		pr_err("fail to parse DSI init commands\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+static int __devinit dsi_panel_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	static struct dsi_panel_common_pdata vendor_pdata;
+	static const char *panel_name;
+
+	pr_debug("%s:%d, debug info id=%d", __func__, __LINE__, pdev->id);
+	if (!pdev->dev.of_node)
+		return -ENODEV;
+
+	panel_name = of_get_property(pdev->dev.of_node, "label", NULL);
+	if (!panel_name)
+		pr_debug("%s:%d, panel name not specified\n",
+						__func__, __LINE__);
+	else
+		pr_debug("%s: Panel Name = %s\n", __func__, panel_name);
+
+	rc = dsi_panel_init();
+	if (rc) {
+		pr_err("dsi_panel_init failed %d\n", rc);
+		goto dsi_panel_probe_error;
+
+	}
+	rc = dsi_panel_parse_dt(pdev, &vendor_pdata, &panel_private->bl_ctrl);
+	if (rc) {
+		pr_err("dsi_panel_parse_dt failed %d\n", rc);
+		goto dsi_panel_probe_error;
+	}
+
+	vendor_pdata.on = dsi_panel_on;
+	vendor_pdata.off = dsi_panel_off;
+	vendor_pdata.bl_fnc = dsi_panel_bl_ctrl;
+
+	rc = dsi_panel_device_register_v2(pdev, &vendor_pdata,
+					panel_private->bl_ctrl);
+
+	if (rc) {
+		pr_err("dsi_panel_device_register_v2 failed %d\n", rc);
+		goto dsi_panel_probe_error;
+	}
+
+	return 0;
+dsi_panel_probe_error:
+	dsi_panel_deinit();
+	return rc;
+}
+
+static int __devexit dsi_panel_remove(struct platform_device *pdev)
+{
+	dsi_panel_deinit();
+	return 0;
+}
+
+
+static const struct of_device_id dsi_panel_match[] = {
+	{.compatible = "qcom,dsi-panel-v2"},
+	{}
+};
+
+static struct platform_driver this_driver = {
+	.probe  = dsi_panel_probe,
+	.remove = __devexit_p(dsi_panel_remove),
+	.driver = {
+		.name = "dsi_v2_panel",
+		.of_match_table = dsi_panel_match,
+	},
+};
+
+static int __init dsi_panel_module_init(void)
+{
+	return platform_driver_register(&this_driver);
+}
+module_init(dsi_panel_module_init);
diff --git a/drivers/video/msm/mdss/dsi_v2.c b/drivers/video/msm/mdss/dsi_v2.c
new file mode 100644
index 0000000..5e46bf5
--- /dev/null
+++ b/drivers/video/msm/mdss/dsi_v2.c
@@ -0,0 +1,789 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/iopoll.h>
+#include <linux/of_device.h>
+
+#include "mdss_panel.h"
+#include "dsi_v2.h"
+
+static struct dsi_panel_common_pdata *panel_common_data;
+static struct dsi_interface dsi_intf;
+
+static int dsi_off(struct mdss_panel_data *pdata)
+{
+	int rc = 0;
+	if (!panel_common_data || !pdata)
+		return -ENODEV;
+
+	if (dsi_intf.off)
+		rc = dsi_intf.off(pdata);
+
+	if (rc) {
+		pr_err("mdss_dsi_off DSI failed %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("dsi_off reset\n");
+	if (panel_common_data->off)
+		panel_common_data->off(pdata);
+
+	return rc;
+}
+
+static int dsi_on(struct mdss_panel_data *pdata)
+{
+	int rc = 0;
+
+	pr_debug("dsi_on\n");
+
+	if (!panel_common_data || !pdata)
+		return -ENODEV;
+
+	if (panel_common_data->reset)
+		panel_common_data->reset(1);
+
+	pr_debug("dsi_on DSI controller ont\n");
+	if (dsi_intf.on)
+		rc = dsi_intf.on(pdata);
+
+	if (rc) {
+		pr_err("mdss_dsi_on DSI failed %d\n", rc);
+		return rc;
+	}
+
+	pr_debug("dsi_on DSI panel ont\n");
+	if (panel_common_data->on)
+		rc = panel_common_data->on(pdata);
+
+	if (rc) {
+		pr_err("mdss_dsi_on panel failed %d\n", rc);
+		return rc;
+	}
+	return rc;
+}
+
+static int dsi_event_handler(struct mdss_panel_data *pdata,
+				int event, void *arg)
+{
+	int rc = 0;
+
+	if (!pdata || !panel_common_data) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -ENODEV;
+	}
+
+	switch (event) {
+	case MDSS_EVENT_PANEL_ON:
+		rc = dsi_on(pdata);
+		break;
+	case MDSS_EVENT_PANEL_OFF:
+		rc = dsi_off(pdata);
+		break;
+	default:
+		pr_debug("%s: unhandled event=%d\n", __func__, event);
+		break;
+	}
+	return rc;
+}
+
+static struct platform_device *get_dsi_platform_device(
+			struct platform_device *dev)
+{
+	struct device_node *dsi_ctrl_np;
+	struct platform_device *ctrl_pdev;
+
+	dsi_ctrl_np = of_parse_phandle(dev->dev.of_node,
+					"qcom,dsi-ctrl-phandle", 0);
+
+	if (!dsi_ctrl_np)
+		return NULL;
+
+	ctrl_pdev = of_find_device_by_node(dsi_ctrl_np);
+	if (!ctrl_pdev)
+		return NULL;
+
+	return ctrl_pdev;
+}
+
+int dsi_panel_device_register_v2(struct platform_device *dev,
+				struct dsi_panel_common_pdata *panel_data,
+				char backlight_ctrl)
+{
+	struct mipi_panel_info *mipi;
+	struct platform_device *ctrl_pdev;
+	int rc;
+	u8 lanes = 0, bpp;
+	u32 h_period, v_period;
+	static struct mdss_panel_data dsi_panel_data;
+
+	h_period = ((panel_data->panel_info.lcdc.h_pulse_width)
+			+ (panel_data->panel_info.lcdc.h_back_porch)
+			+ (panel_data->panel_info.xres)
+			+ (panel_data->panel_info.lcdc.h_front_porch));
+
+	v_period = ((panel_data->panel_info.lcdc.v_pulse_width)
+			+ (panel_data->panel_info.lcdc.v_back_porch)
+			+ (panel_data->panel_info.yres)
+			+ (panel_data->panel_info.lcdc.v_front_porch));
+
+	mipi  = &panel_data->panel_info.mipi;
+
+	panel_data->panel_info.type =
+		((mipi->mode == DSI_VIDEO_MODE)
+			? MIPI_VIDEO_PANEL : MIPI_CMD_PANEL);
+
+	if (mipi->data_lane3)
+		lanes += 1;
+	if (mipi->data_lane2)
+		lanes += 1;
+	if (mipi->data_lane1)
+		lanes += 1;
+	if (mipi->data_lane0)
+		lanes += 1;
+
+
+	if ((mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
+		|| (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB888)
+		|| (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB666_LOOSE))
+		bpp = 3;
+	else if ((mipi->dst_format == DSI_CMD_DST_FORMAT_RGB565)
+		|| (mipi->dst_format == DSI_VIDEO_DST_FORMAT_RGB565))
+		bpp = 2;
+	else
+		bpp = 3; /* Default format set to RGB888 */
+
+	if (panel_data->panel_info.type == MIPI_VIDEO_PANEL &&
+		!panel_data->panel_info.clk_rate) {
+		h_period += panel_data->panel_info.lcdc.xres_pad;
+		v_period += panel_data->panel_info.lcdc.yres_pad;
+
+		if (lanes > 0) {
+			panel_data->panel_info.clk_rate =
+			((h_period * v_period * (mipi->frame_rate) * bpp * 8)
+			   / lanes);
+		} else {
+			pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
+			panel_data->panel_info.clk_rate =
+				(h_period * v_period
+					 * (mipi->frame_rate) * bpp * 8);
+		}
+	}
+
+	ctrl_pdev = get_dsi_platform_device(dev);
+	if (!ctrl_pdev)
+		return -EPROBE_DEFER;
+
+	dsi_panel_data.event_handler = dsi_event_handler;
+
+	dsi_panel_data.panel_info = panel_data->panel_info;
+
+	dsi_panel_data.set_backlight = panel_data->bl_fnc;
+	panel_common_data = panel_data;
+	/*
+	 * register in mdp driver
+	 */
+	rc = mdss_register_panel(ctrl_pdev, &dsi_panel_data);
+	if (rc) {
+		dev_err(&dev->dev, "unable to register MIPI DSI panel\n");
+		return rc;
+	}
+
+	pr_debug("%s: Panal data initialized\n", __func__);
+	return 0;
+}
+
+void dsi_register_interface(struct dsi_interface *intf)
+{
+	dsi_intf = *intf;
+}
+
+int dsi_cmds_tx_v2(struct mdss_panel_data *pdata,
+			struct dsi_buf *tp, struct dsi_cmd_desc *cmds,
+			int cnt)
+{
+	int rc = 0;
+
+	if (!dsi_intf.tx)
+		return -EINVAL;
+
+	rc = dsi_intf.tx(pdata, tp, cmds, cnt);
+	return rc;
+}
+
+int dsi_cmds_rx_v2(struct mdss_panel_data *pdata,
+			struct dsi_buf *tp, struct dsi_buf *rp,
+			struct dsi_cmd_desc *cmds, int rlen)
+{
+	int rc = 0;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!dsi_intf.rx)
+		return -EINVAL;
+
+	rc = dsi_intf.rx(pdata, tp, rp, cmds, rlen);
+	return rc;
+}
+
+static char *dsi_buf_reserve(struct dsi_buf *dp, int len)
+{
+	dp->data += len;
+	return dp->data;
+}
+
+
+static char *dsi_buf_push(struct dsi_buf *dp, int len)
+{
+	dp->data -= len;
+	dp->len += len;
+	return dp->data;
+}
+
+static char *dsi_buf_reserve_hdr(struct dsi_buf *dp, int hlen)
+{
+	dp->hdr = (u32 *)dp->data;
+	return dsi_buf_reserve(dp, hlen);
+}
+
+char *dsi_buf_init(struct dsi_buf *dp)
+{
+	int off;
+
+	dp->data = dp->start;
+	off = (int)dp->data;
+	/* 8 byte align */
+	off &= 0x07;
+	if (off)
+		off = 8 - off;
+	dp->data += off;
+	dp->len = 0;
+	return dp->data;
+}
+
+int dsi_buf_alloc(struct dsi_buf *dp, int size)
+{
+	dp->start = kmalloc(size, GFP_KERNEL);
+	if (dp->start == NULL) {
+		pr_err("%s:%u\n", __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	dp->end = dp->start + size;
+	dp->size = size;
+
+	if ((int)dp->start & 0x07) {
+		pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
+		return -EINVAL;
+	}
+
+	dp->data = dp->start;
+	dp->len = 0;
+	return 0;
+}
+
+/*
+ * mipi dsi generic long write
+ */
+static int dsi_generic_lwrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	char *bp;
+	u32 *hp;
+	int i, len;
+
+	bp = dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+
+	/* fill up payload */
+	if (cm->payload) {
+		len = cm->dlen;
+		len += 3;
+		len &= ~0x03; /* multipled by 4 */
+		for (i = 0; i < cm->dlen; i++)
+			*bp++ = cm->payload[i];
+
+		/* append 0xff to the end */
+		for (; i < len; i++)
+			*bp++ = 0xff;
+
+		dp->len += len;
+	}
+
+	/* fill up header */
+	hp = dp->hdr;
+	*hp = 0;
+	*hp = DSI_HDR_WC(cm->dlen);
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_LONG_PKT;
+	*hp |= DSI_HDR_DTYPE(DTYPE_GEN_LWRITE);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;
+}
+
+/*
+ * mipi dsi generic short write with 0, 1 2 parameters
+ */
+static int dsi_generic_swrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+	int len;
+
+	if (cm->dlen && cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return 0;
+	}
+
+	dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	len = (cm->dlen > 2) ? 2 : cm->dlen;
+
+	if (len == 1) {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE1);
+		*hp |= DSI_HDR_DATA1(cm->payload[0]);
+		*hp |= DSI_HDR_DATA2(0);
+	} else if (len == 2) {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE2);
+		*hp |= DSI_HDR_DATA1(cm->payload[0]);
+		*hp |= DSI_HDR_DATA2(cm->payload[1]);
+	} else {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_WRITE);
+		*hp |= DSI_HDR_DATA1(0);
+		*hp |= DSI_HDR_DATA2(0);
+	}
+
+	dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;
+}
+
+/*
+ * mipi dsi gerneric read with 0, 1 2 parameters
+ */
+static int dsi_generic_read(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+	int len;
+
+	if (cm->dlen && cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return 0;
+	}
+
+	dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_BTA;
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	len = (cm->dlen > 2) ? 2 : cm->dlen;
+
+	if (len == 1) {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ1);
+		*hp |= DSI_HDR_DATA1(cm->payload[0]);
+		*hp |= DSI_HDR_DATA2(0);
+	} else if (len == 2) {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ2);
+		*hp |= DSI_HDR_DATA1(cm->payload[0]);
+		*hp |= DSI_HDR_DATA2(cm->payload[1]);
+	} else {
+		*hp |= DSI_HDR_DTYPE(DTYPE_GEN_READ);
+		*hp |= DSI_HDR_DATA1(0);
+		*hp |= DSI_HDR_DATA2(0);
+	}
+
+	dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+	return dp->len;
+}
+
+/*
+ * mipi dsi dcs long write
+ */
+static int dsi_dcs_lwrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	char *bp;
+	u32 *hp;
+	int i, len;
+
+	bp = dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+
+	/*
+	 * fill up payload
+	 * dcs command byte (first byte) followed by payload
+	 */
+	if (cm->payload) {
+		len = cm->dlen;
+		len += 3;
+		len &= ~0x03; /* multipled by 4 */
+		for (i = 0; i < cm->dlen; i++)
+			*bp++ = cm->payload[i];
+
+		/* append 0xff to the end */
+		for (; i < len; i++)
+			*bp++ = 0xff;
+
+		dp->len += len;
+	}
+
+	/* fill up header */
+	hp = dp->hdr;
+	*hp = 0;
+	*hp = DSI_HDR_WC(cm->dlen);
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_LONG_PKT;
+	*hp |= DSI_HDR_DTYPE(DTYPE_DCS_LWRITE);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;
+}
+
+/*
+ * mipi dsi dcs short write with 0 parameters
+ */
+static int dsi_dcs_swrite(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+	int len;
+
+	if (cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return -EINVAL;
+	}
+
+	dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	if (cm->ack)
+		*hp |= DSI_HDR_BTA;
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	len = (cm->dlen > 1) ? 1 : cm->dlen;
+
+	*hp |= DSI_HDR_DTYPE(DTYPE_DCS_WRITE);
+	*hp |= DSI_HDR_DATA1(cm->payload[0]); /* dcs command byte */
+	*hp |= DSI_HDR_DATA2(0);
+
+	dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+	return dp->len;
+}
+
+/*
+ * mipi dsi dcs short write with 1 parameters
+ */
+static int dsi_dcs_swrite1(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	if (cm->dlen < 2 || cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return -EINVAL;
+	}
+
+	dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	if (cm->ack)
+		*hp |= DSI_HDR_BTA;
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	*hp |= DSI_HDR_DTYPE(DTYPE_DCS_WRITE1);
+	*hp |= DSI_HDR_DATA1(cm->payload[0]); /* dcs comamnd byte */
+	*hp |= DSI_HDR_DATA2(cm->payload[1]); /* parameter */
+
+	dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len;
+}
+
+/*
+ * mipi dsi dcs read with 0 parameters
+ */
+static int dsi_dcs_read(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	if (cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return -EINVAL;
+	}
+
+	dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_BTA;
+	*hp |= DSI_HDR_DTYPE(DTYPE_DCS_READ);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	*hp |= DSI_HDR_DATA1(cm->payload[0]); /* dcs command byte */
+	*hp |= DSI_HDR_DATA2(0);
+
+	dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len; /* 4 bytes */
+}
+
+static int dsi_cm_on(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_CM_ON);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len; /* 4 bytes */
+}
+
+static int dsi_cm_off(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_CM_OFF);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len; /* 4 bytes */
+}
+
+static int dsi_peripheral_on(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_PERIPHERAL_ON);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len; /* 4 bytes */
+}
+
+static int dsi_peripheral_off(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_PERIPHERAL_OFF);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len; /* 4 bytes */
+}
+
+static int dsi_set_max_pktsize(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	if (cm->payload == 0) {
+		pr_err("%s: NO payload error\n", __func__);
+		return 0;
+	}
+
+	dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_MAX_PKTSIZE);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	*hp |= DSI_HDR_DATA1(cm->payload[0]);
+	*hp |= DSI_HDR_DATA2(cm->payload[1]);
+
+	dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len; /* 4 bytes */
+}
+
+static int dsi_null_pkt(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp = DSI_HDR_WC(cm->dlen);
+	*hp |= DSI_HDR_LONG_PKT;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_NULL_PKT);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len; /* 4 bytes */
+}
+
+static int dsi_blank_pkt(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	u32 *hp;
+
+	dsi_buf_reserve_hdr(dp, DSI_HOST_HDR_SIZE);
+	hp = dp->hdr;
+	*hp = 0;
+	*hp = DSI_HDR_WC(cm->dlen);
+	*hp |= DSI_HDR_LONG_PKT;
+	*hp |= DSI_HDR_VC(cm->vc);
+	*hp |= DSI_HDR_DTYPE(DTYPE_BLANK_PKT);
+	if (cm->last)
+		*hp |= DSI_HDR_LAST;
+
+	dsi_buf_push(dp, DSI_HOST_HDR_SIZE);
+
+	return dp->len; /* 4 bytes */
+}
+
+/*
+ * prepare cmd buffer to be txed
+ */
+int dsi_cmd_dma_add(struct dsi_buf *dp, struct dsi_cmd_desc *cm)
+{
+	int len = 0;
+
+	switch (cm->dtype) {
+	case DTYPE_GEN_WRITE:
+	case DTYPE_GEN_WRITE1:
+	case DTYPE_GEN_WRITE2:
+		len = dsi_generic_swrite(dp, cm);
+		break;
+	case DTYPE_GEN_LWRITE:
+		len = dsi_generic_lwrite(dp, cm);
+		break;
+	case DTYPE_GEN_READ:
+	case DTYPE_GEN_READ1:
+	case DTYPE_GEN_READ2:
+		len = dsi_generic_read(dp, cm);
+		break;
+	case DTYPE_DCS_LWRITE:
+		len = dsi_dcs_lwrite(dp, cm);
+		break;
+	case DTYPE_DCS_WRITE:
+		len = dsi_dcs_swrite(dp, cm);
+		break;
+	case DTYPE_DCS_WRITE1:
+		len = dsi_dcs_swrite1(dp, cm);
+		break;
+	case DTYPE_DCS_READ:
+		len = dsi_dcs_read(dp, cm);
+		break;
+	case DTYPE_MAX_PKTSIZE:
+		len = dsi_set_max_pktsize(dp, cm);
+		break;
+	case DTYPE_NULL_PKT:
+		len = dsi_null_pkt(dp, cm);
+		break;
+	case DTYPE_BLANK_PKT:
+		len = dsi_blank_pkt(dp, cm);
+		break;
+	case DTYPE_CM_ON:
+		len = dsi_cm_on(dp, cm);
+		break;
+	case DTYPE_CM_OFF:
+		len = dsi_cm_off(dp, cm);
+		break;
+	case DTYPE_PERIPHERAL_ON:
+		len = dsi_peripheral_on(dp, cm);
+		break;
+	case DTYPE_PERIPHERAL_OFF:
+		len = dsi_peripheral_off(dp, cm);
+		break;
+	default:
+		pr_debug("%s: dtype=%x NOT supported\n",
+					__func__, cm->dtype);
+		break;
+
+	}
+
+	return len;
+}
+
+/*
+ * mdss_dsi_short_read1_resp: 1 parameter
+ */
+int dsi_short_read1_resp(struct dsi_buf *rp)
+{
+	/* strip out dcs type */
+	rp->data++;
+	rp->len = 1;
+	return rp->len;
+}
+
+/*
+ * mdss_dsi_short_read2_resp: 2 parameter
+ */
+int dsi_short_read2_resp(struct dsi_buf *rp)
+{
+	/* strip out dcs type */
+	rp->data++;
+	rp->len = 2;
+	return rp->len;
+}
+
+int dsi_long_read_resp(struct dsi_buf *rp)
+{
+	short len;
+
+	len = rp->data[2];
+	len <<= 8;
+	len |= rp->data[1];
+	/* strip out dcs header */
+	rp->data += 4;
+	rp->len -= 4;
+	/* strip out 2 bytes of checksum */
+	rp->len -= 2;
+	return len;
+}
diff --git a/drivers/video/msm/mdss/dsi_v2.h b/drivers/video/msm/mdss/dsi_v2.h
new file mode 100644
index 0000000..fa868ab
--- /dev/null
+++ b/drivers/video/msm/mdss/dsi_v2.h
@@ -0,0 +1,237 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_DSI_H
+#define MDSS_DSI_H
+
+#include <linux/list.h>
+#include <mach/scm-io.h>
+
+#include "mdss_panel.h"
+
+#define MIPI_OUTP(addr, data) writel_relaxed((data), (addr))
+#define MIPI_INP(addr) readl_relaxed(addr)
+
+#define MIPI_DSI_PRIM 1
+#define MIPI_DSI_SECD 2
+
+#define MIPI_DSI_PANEL_VGA	0
+#define MIPI_DSI_PANEL_WVGA	1
+#define MIPI_DSI_PANEL_WVGA_PT	2
+#define MIPI_DSI_PANEL_FWVGA_PT	3
+#define MIPI_DSI_PANEL_WSVGA_PT	4
+#define MIPI_DSI_PANEL_QHD_PT 5
+#define MIPI_DSI_PANEL_WXGA	6
+#define MIPI_DSI_PANEL_WUXGA	7
+#define MIPI_DSI_PANEL_720P_PT	8
+#define DSI_PANEL_MAX	8
+
+enum {
+	DSI_VIDEO_MODE,
+	DSI_CMD_MODE,
+};
+
+enum {
+	ST_DSI_CLK_OFF,
+	ST_DSI_SUSPEND,
+	ST_DSI_RESUME,
+	ST_DSI_PLAYING,
+	ST_DSI_NUM
+};
+
+enum {
+	EV_DSI_UPDATE,
+	EV_DSI_DONE,
+	EV_DSI_TOUT,
+	EV_DSI_NUM
+};
+
+enum {
+	LANDSCAPE = 1,
+	PORTRAIT = 2,
+};
+
+enum {
+	DSI_CMD_MODE_DMA,
+	DSI_CMD_MODE_MDP,
+};
+
+enum {
+	BL_PWM,
+	BL_WLED,
+	BL_DCS_CMD,
+	UNKNOWN_CTRL,
+};
+
+enum {
+	DSI_LP_MODE,
+	DSI_HS_MODE,
+};
+
+#define DSI_NON_BURST_SYNCH_PULSE	0
+#define DSI_NON_BURST_SYNCH_EVENT	1
+#define DSI_BURST_MODE			2
+
+#define DSI_RGB_SWAP_RGB	0
+#define DSI_RGB_SWAP_RBG	1
+#define DSI_RGB_SWAP_BGR	2
+#define DSI_RGB_SWAP_BRG	3
+#define DSI_RGB_SWAP_GRB	4
+#define DSI_RGB_SWAP_GBR	5
+
+#define DSI_VIDEO_DST_FORMAT_RGB565		0
+#define DSI_VIDEO_DST_FORMAT_RGB666		1
+#define DSI_VIDEO_DST_FORMAT_RGB666_LOOSE	2
+#define DSI_VIDEO_DST_FORMAT_RGB888		3
+
+#define DSI_CMD_DST_FORMAT_RGB111	0
+#define DSI_CMD_DST_FORMAT_RGB332	3
+#define DSI_CMD_DST_FORMAT_RGB444	4
+#define DSI_CMD_DST_FORMAT_RGB565	6
+#define DSI_CMD_DST_FORMAT_RGB666	7
+#define DSI_CMD_DST_FORMAT_RGB888	8
+
+#define DSI_CMD_TRIGGER_NONE		0x0	/* mdp trigger */
+#define DSI_CMD_TRIGGER_TE		0x02
+#define DSI_CMD_TRIGGER_SW		0x04
+#define DSI_CMD_TRIGGER_SW_SEOF		0x05	/* cmd dma only */
+#define DSI_CMD_TRIGGER_SW_TE		0x06
+
+#define DSI_HOST_HDR_SIZE	4
+#define DSI_HDR_LAST		BIT(31)
+#define DSI_HDR_LONG_PKT	BIT(30)
+#define DSI_HDR_BTA		BIT(29)
+#define DSI_HDR_VC(vc)		(((vc) & 0x03) << 22)
+#define DSI_HDR_DTYPE(dtype)	(((dtype) & 0x03f) << 16)
+#define DSI_HDR_DATA2(data)	(((data) & 0x0ff) << 8)
+#define DSI_HDR_DATA1(data)	((data) & 0x0ff)
+#define DSI_HDR_WC(wc)		((wc) & 0x0ffff)
+
+#define DSI_BUF_SIZE	1024
+#define DSI_MRPS	0x04  /* Maximum Return Packet Size */
+
+#define DSI_LEN 8 /* 4 x 4 - 6 - 2, bytes dcs header+crc-align  */
+
+struct dsi_buf {
+	u32 *hdr;	/* dsi host header */
+	char *start;	/* buffer start addr */
+	char *end;	/* buffer end addr */
+	int size;	/* size of buffer */
+	char *data;	/* buffer */
+	int len;	/* data length */
+	dma_addr_t dmap; /* mapped dma addr */
+};
+
+/* dcs read/write */
+#define DTYPE_DCS_WRITE		0x05	/* short write, 0 parameter */
+#define DTYPE_DCS_WRITE1	0x15	/* short write, 1 parameter */
+#define DTYPE_DCS_READ		0x06	/* read */
+#define DTYPE_DCS_LWRITE	0x39	/* long write */
+
+/* generic read/write */
+#define DTYPE_GEN_WRITE		0x03	/* short write, 0 parameter */
+#define DTYPE_GEN_WRITE1	0x13	/* short write, 1 parameter */
+#define DTYPE_GEN_WRITE2	0x23	/* short write, 2 parameter */
+#define DTYPE_GEN_LWRITE	0x29	/* long write */
+#define DTYPE_GEN_READ		0x04	/* long read, 0 parameter */
+#define DTYPE_GEN_READ1		0x14	/* long read, 1 parameter */
+#define DTYPE_GEN_READ2		0x24	/* long read, 2 parameter */
+
+#define DTYPE_TEAR_ON		0x35	/* set tear on */
+#define DTYPE_MAX_PKTSIZE	0x37	/* set max packet size */
+#define DTYPE_NULL_PKT		0x09	/* null packet, no data */
+#define DTYPE_BLANK_PKT		0x19	/* blankiing packet, no data */
+
+#define DTYPE_CM_ON		0x02	/* color mode off */
+#define DTYPE_CM_OFF		0x12	/* color mode on */
+#define DTYPE_PERIPHERAL_OFF	0x22
+#define DTYPE_PERIPHERAL_ON	0x32
+
+/*
+ * dcs response
+ */
+#define DTYPE_ACK_ERR_RESP      0x02
+#define DTYPE_EOT_RESP          0x08    /* end of tx */
+#define DTYPE_GEN_READ1_RESP    0x11    /* 1 parameter, short */
+#define DTYPE_GEN_READ2_RESP    0x12    /* 2 parameter, short */
+#define DTYPE_GEN_LREAD_RESP    0x1a
+#define DTYPE_DCS_LREAD_RESP    0x1c
+#define DTYPE_DCS_READ1_RESP    0x21    /* 1 parameter, short */
+#define DTYPE_DCS_READ2_RESP    0x22    /* 2 parameter, short */
+
+struct dsi_cmd_desc {
+	int dtype;
+	int last;
+	int vc;
+	int ack;	/* ask ACK from peripheral */
+	int wait;
+	int dlen;
+	char *payload;
+};
+
+struct dsi_panel_cmds_list {
+	struct dsi_cmd_desc *buf;
+	char size;
+	char ctrl_state;
+};
+
+struct dsi_panel_common_pdata {
+	struct mdss_panel_info panel_info;
+	int (*on) (struct mdss_panel_data *pdata);
+	int (*off) (struct mdss_panel_data *pdata);
+	void (*reset)(int enable);
+	void (*bl_fnc) (struct mdss_panel_data *pdata, u32 bl_level);
+	struct dsi_panel_cmds_list *dsi_panel_on_cmds;
+	struct dsi_panel_cmds_list *dsi_panel_off_cmds;
+};
+
+struct dsi_interface {
+	int (*on)(struct mdss_panel_data *pdata);
+	int (*off)(struct mdss_panel_data *pdata);
+	void (*op_mode_config)(int mode, struct mdss_panel_data *pdata);
+	int (*tx)(struct mdss_panel_data *pdata,
+		struct dsi_buf *tp, struct dsi_cmd_desc *cmds, int cnt);
+	int (*rx)(struct mdss_panel_data *pdata,
+		 struct dsi_buf *tp, struct dsi_buf *rp,
+		struct dsi_cmd_desc *cmds, int len);
+	int index;
+	void *private;
+};
+
+int dsi_panel_device_register_v2(struct platform_device *pdev,
+				struct dsi_panel_common_pdata *panel_data,
+				char bl_ctrl);
+
+void dsi_register_interface(struct dsi_interface *intf);
+
+int dsi_cmds_rx_v2(struct mdss_panel_data *pdata,
+			struct dsi_buf *tp, struct dsi_buf *rp,
+			struct dsi_cmd_desc *cmds, int len);
+
+int dsi_cmds_tx_v2(struct mdss_panel_data *pdata,
+			struct dsi_buf *tp, struct dsi_cmd_desc *cmds,
+			int cnt);
+
+char *dsi_buf_init(struct dsi_buf *dp);
+
+int dsi_buf_alloc(struct dsi_buf *dp, int size);
+
+int dsi_cmd_dma_add(struct dsi_buf *dp, struct dsi_cmd_desc *cm);
+
+int dsi_short_read1_resp(struct dsi_buf *rp);
+
+int dsi_short_read2_resp(struct dsi_buf *rp);
+
+int dsi_long_read_resp(struct dsi_buf *rp);
+
+#endif /* MDSS_DSI_H */
diff --git a/drivers/video/msm/mdss/mdp3.c b/drivers/video/msm/mdss/mdp3.c
new file mode 100644
index 0000000..890b00b
--- /dev/null
+++ b/drivers/video/msm/mdss/mdp3.c
@@ -0,0 +1,917 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/memory_alloc.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+
+#include <mach/board.h>
+#include <mach/clk.h>
+#include <mach/hardware.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#include <mach/iommu.h>
+#include <mach/iommu_domains.h>
+#include <mach/msm_memtypes.h>
+
+#include "mdp3.h"
+#include "mdss_fb.h"
+#include "mdp3_hwio.h"
+#include "mdp3_ctrl.h"
+
+#define MDP_CORE_HW_VERSION	0x03030304
+struct mdp3_hw_resource *mdp3_res;
+
+#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val)		\
+	{						\
+		.src = MSM_BUS_MASTER_MDP_PORT0,	\
+		.dst = MSM_BUS_SLAVE_EBI_CH0,		\
+		.ab = (ab_val),				\
+		.ib = (ib_val),				\
+	}
+
+static struct msm_bus_vectors mdp_bus_vectors[] = {
+	MDP_BUS_VECTOR_ENTRY(0, 0),
+	MDP_BUS_VECTOR_ENTRY(SZ_128M, SZ_256M),
+	MDP_BUS_VECTOR_ENTRY(SZ_256M, SZ_512M),
+};
+
+static struct msm_bus_paths mdp_bus_usecases[ARRAY_SIZE(mdp_bus_vectors)];
+
+static struct msm_bus_scale_pdata mdp_bus_scale_table = {
+	.usecase = mdp_bus_usecases,
+	.num_usecases = ARRAY_SIZE(mdp_bus_usecases),
+	.name = "mdp3",
+};
+
+struct mdp3_iommu_domain_map mdp3_iommu_domains[MDP3_IOMMU_DOMAIN_MAX] = {
+	[MDP3_IOMMU_DOMAIN] = {
+		.domain_type = MDP3_IOMMU_DOMAIN,
+		.client_name = "mdp_dma",
+		.partitions = {
+			{
+				.start = SZ_128K,
+				.size = SZ_1G - SZ_128K,
+			},
+		},
+		.npartitions = 1,
+	},
+};
+
+struct mdp3_iommu_ctx_map mdp3_iommu_contexts[MDP3_IOMMU_CTX_MAX] = {
+	[MDP3_IOMMU_CTX_PPP_0] = {
+		.ctx_type = MDP3_IOMMU_CTX_PPP_0,
+		.domain = &mdp3_iommu_domains[MDP3_IOMMU_DOMAIN],
+		.ctx_name = "mdpe_0",
+		.attached = 0,
+	},
+	[MDP3_IOMMU_CTX_PPP_1] = {
+		.ctx_type = MDP3_IOMMU_CTX_PPP_1,
+		.domain = &mdp3_iommu_domains[MDP3_IOMMU_DOMAIN],
+		.ctx_name = "mdpe_1",
+		.attached = 0,
+	},
+
+	[MDP3_IOMMU_CTX_DMA_0] = {
+		.ctx_type = MDP3_IOMMU_CTX_DMA_0,
+		.domain = &mdp3_iommu_domains[MDP3_IOMMU_DOMAIN],
+		.ctx_name = "mdps_0",
+		.attached = 0,
+	},
+
+	[MDP3_IOMMU_CTX_DMA_1] = {
+		.ctx_type = MDP3_IOMMU_CTX_DMA_1,
+		.domain = &mdp3_iommu_domains[MDP3_IOMMU_DOMAIN],
+		.ctx_name = "mdps_1",
+		.attached = 0,
+	},
+};
+
+static irqreturn_t mdp3_irq_handler(int irq, void *ptr)
+{
+	int i = 0;
+	struct mdp3_hw_resource *mdata = (struct mdp3_hw_resource *)ptr;
+	u32 mdp_interrupt = MDP3_REG_READ(MDP3_REG_INTR_STATUS);
+
+	MDP3_REG_WRITE(MDP3_REG_INTR_CLEAR, mdp_interrupt);
+	pr_debug("mdp3_irq_handler irq=%d\n", mdp_interrupt);
+
+	spin_lock(&mdata->irq_lock);
+	mdp_interrupt &= mdata->irqMask;
+
+	while (mdp_interrupt && i < MDP3_MAX_INTR) {
+		if ((mdp_interrupt & 0x1) && mdata->callbacks[i].cb)
+			mdata->callbacks[i].cb(i, mdata->callbacks[i].data);
+		mdp_interrupt = mdp_interrupt >> 1;
+		i++;
+	}
+	spin_unlock(&mdata->irq_lock);
+
+	return IRQ_HANDLED;
+}
+
+void mdp3_irq_enable(int type)
+{
+	unsigned long flag;
+	int irqEnabled = 0;
+
+	pr_debug("mdp3_irq_enable type=%d\n", type);
+	spin_lock_irqsave(&mdp3_res->irq_lock, flag);
+	if (mdp3_res->irqMask & BIT(type)) {
+		pr_debug("interrupt %d already enabled\n", type);
+		spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+		return;
+	}
+	irqEnabled = mdp3_res->irqMask;
+	mdp3_res->irqMask |= BIT(type);
+	MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, mdp3_res->irqMask);
+	if (!irqEnabled)
+		enable_irq(mdp3_res->irq);
+	spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+}
+
+void mdp3_irq_disable(int type)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&mdp3_res->irq_lock, flag);
+	if (mdp3_res->irqMask & BIT(type)) {
+		mdp3_res->irqMask &= ~BIT(type);
+		MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, mdp3_res->irqMask);
+		if (!mdp3_res->irqMask)
+			disable_irq(mdp3_res->irq);
+	} else {
+		pr_debug("interrupt %d not enabled\n", type);
+	}
+	spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+}
+
+void mdp3_irq_disable_nosync(int type)
+{
+	if (mdp3_res->irqMask & BIT(type)) {
+		mdp3_res->irqMask &= ~BIT(type);
+		MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, mdp3_res->irqMask);
+		if (!mdp3_res->irqMask)
+			disable_irq_nosync(mdp3_res->irq);
+	} else {
+		pr_debug("interrupt %d not enabled\n", type);
+	}
+}
+
+int mdp3_set_intr_callback(u32 type, struct mdp3_intr_cb *cb)
+{
+	unsigned long flag;
+
+	pr_debug("interrupt %d callback n", type);
+	spin_lock_irqsave(&mdp3_res->irq_lock, flag);
+	if (cb)
+		mdp3_res->callbacks[type] = *cb;
+	else
+		mdp3_res->callbacks[type].cb = NULL;
+
+	spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
+	return 0;
+}
+
+static int mdp3_bus_scale_register(void)
+{
+	if (!mdp3_res->bus_handle) {
+		struct msm_bus_scale_pdata *bus_pdata = &mdp_bus_scale_table;
+		int i;
+
+		for (i = 0; i < bus_pdata->num_usecases; i++) {
+			mdp_bus_usecases[i].num_paths = 1;
+			mdp_bus_usecases[i].vectors = &mdp_bus_vectors[i];
+		}
+
+		mdp3_res->bus_handle = msm_bus_scale_register_client(bus_pdata);
+		if (!mdp3_res->bus_handle) {
+			pr_err("not able to get bus scale\n");
+			return -ENOMEM;
+		}
+		pr_debug("register bus_hdl=%x\n", mdp3_res->bus_handle);
+	}
+	return 0;
+}
+
+static void mdp3_bus_scale_unregister(void)
+{
+	pr_debug("unregister bus_handle=%x\n", mdp3_res->bus_handle);
+
+	if (mdp3_res->bus_handle)
+		msm_bus_scale_unregister_client(mdp3_res->bus_handle);
+}
+
+int mdp3_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota)
+{
+	static int current_bus_idx;
+	int bus_idx;
+	int rc;
+
+	if (mdp3_res->bus_handle < 1) {
+		pr_err("invalid bus handle %d\n", mdp3_res->bus_handle);
+		return -EINVAL;
+	}
+
+	if ((ab_quota | ib_quota) == 0) {
+		bus_idx = 0;
+	} else {
+		int num_cases = mdp_bus_scale_table.num_usecases;
+		struct msm_bus_vectors *vect = NULL;
+
+		bus_idx = (current_bus_idx % (num_cases - 1)) + 1;
+
+		/* aligning to avoid performing updates for small changes */
+		ab_quota = ALIGN(ab_quota, SZ_64M);
+		ib_quota = ALIGN(ib_quota, SZ_64M);
+
+		vect = mdp_bus_scale_table.usecase[current_bus_idx].vectors;
+		if ((ab_quota == vect->ab) && (ib_quota == vect->ib)) {
+			pr_debug("skip bus scaling, no change in vectors\n");
+			return 0;
+		}
+
+		vect = mdp_bus_scale_table.usecase[bus_idx].vectors;
+		vect->ab = ab_quota;
+		vect->ib = ib_quota;
+
+		pr_debug("bus scale idx=%d ab=%llu ib=%llu\n", bus_idx,
+				vect->ab, vect->ib);
+	}
+	current_bus_idx = bus_idx;
+	rc = msm_bus_scale_client_update_request(mdp3_res->bus_handle, bus_idx);
+	return rc;
+}
+
+static int mdp3_clk_update(u32 clk_idx, u32 enable)
+{
+	int ret = -EINVAL;
+	struct clk *clk;
+	int count = 0;
+
+	if (clk_idx >= MDP3_MAX_CLK || !mdp3_res->clocks[clk_idx])
+		return -ENODEV;
+
+	clk = mdp3_res->clocks[clk_idx];
+
+	if (enable)
+		mdp3_res->clock_ref_count[clk_idx]++;
+	else
+		mdp3_res->clock_ref_count[clk_idx]--;
+
+	count = mdp3_res->clock_ref_count[clk_idx];
+	if (count == 1) {
+		pr_debug("clk=%d en=%d\n", clk_idx, enable);
+		ret = clk_prepare_enable(clk);
+	} else if (count == 0) {
+		pr_debug("clk=%d disable\n", clk_idx);
+		clk_disable_unprepare(clk);
+		ret = 0;
+	} else if (count < 0) {
+		pr_err("clk=%d count=%d\n", clk_idx, count);
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+int mdp3_vsync_clk_enable(int enable)
+{
+	int ret = 0;
+
+	pr_debug("vsync clk enable=%d\n", enable);
+	mutex_lock(&mdp3_res->res_mutex);
+	mdp3_clk_update(MDP3_CLK_VSYNC, enable);
+	mutex_unlock(&mdp3_res->res_mutex);
+	return ret;
+}
+
+int mdp3_clk_set_rate(int clk_type, unsigned long clk_rate)
+{
+	int ret = 0;
+	unsigned long rounded_rate;
+	struct clk *clk = mdp3_res->clocks[clk_type];
+
+	if (clk) {
+		mutex_lock(&mdp3_res->res_mutex);
+		rounded_rate = clk_round_rate(clk, clk_rate);
+		if (IS_ERR_VALUE(rounded_rate)) {
+			pr_err("unable to round rate err=%ld\n", rounded_rate);
+			mutex_unlock(&mdp3_res->res_mutex);
+			return -EINVAL;
+		}
+		if (rounded_rate != clk_get_rate(clk)) {
+			ret = clk_set_rate(clk, rounded_rate);
+			if (ret)
+				pr_err("clk_set_rate failed ret=%d\n", ret);
+			else
+				pr_debug("mdp clk rate=%lu\n", rounded_rate);
+		}
+		mutex_unlock(&mdp3_res->res_mutex);
+	} else {
+		pr_err("mdp src clk not setup properly\n");
+		ret = -EINVAL;
+	}
+	return ret;
+}
+
+unsigned long mdp3_get_clk_rate(u32 clk_idx)
+{
+	unsigned long clk_rate = 0;
+	struct clk *clk;
+
+	if (clk_idx >= MDP3_MAX_CLK)
+		return -ENODEV;
+
+	clk = mdp3_res->clocks[clk_idx];
+
+	if (clk) {
+		mutex_lock(&mdp3_res->res_mutex);
+		clk_rate = clk_get_rate(clk);
+		mutex_unlock(&mdp3_res->res_mutex);
+	}
+	return clk_rate;
+}
+
+static int mdp3_clk_register(char *clk_name, int clk_idx)
+{
+	struct clk *tmp;
+
+	if (clk_idx >= MDP3_MAX_CLK) {
+		pr_err("invalid clk index %d\n", clk_idx);
+		return -EINVAL;
+	}
+
+	tmp = devm_clk_get(&mdp3_res->pdev->dev, clk_name);
+	if (IS_ERR(tmp)) {
+		pr_err("unable to get clk: %s\n", clk_name);
+		return PTR_ERR(tmp);
+	}
+
+	mdp3_res->clocks[clk_idx] = tmp;
+
+	return 0;
+}
+
+static int mdp3_clk_setup(void)
+{
+	int rc;
+
+	rc = mdp3_clk_register("iface_clk", MDP3_CLK_AHB);
+	if (rc)
+		return rc;
+
+	rc = mdp3_clk_register("core_clk", MDP3_CLK_CORE);
+	if (rc)
+		return rc;
+
+	rc = mdp3_clk_register("vsync_clk", MDP3_CLK_VSYNC);
+	if (rc)
+		return rc;
+
+	rc = mdp3_clk_register("lcdc_clk", MDP3_CLK_LCDC);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
+static void mdp3_clk_remove(void)
+{
+	clk_put(mdp3_res->clocks[MDP3_CLK_AHB]);
+	clk_put(mdp3_res->clocks[MDP3_CLK_CORE]);
+	clk_put(mdp3_res->clocks[MDP3_CLK_VSYNC]);
+	clk_put(mdp3_res->clocks[MDP3_CLK_LCDC]);
+}
+
+int mdp3_clk_enable(int enable)
+{
+	int rc;
+
+	pr_debug("MDP CLKS %s\n", (enable ? "Enable" : "Disable"));
+
+	mutex_lock(&mdp3_res->res_mutex);
+	rc = mdp3_clk_update(MDP3_CLK_AHB, enable);
+	rc |= mdp3_clk_update(MDP3_CLK_CORE, enable);
+	rc |= mdp3_clk_update(MDP3_CLK_VSYNC, enable);
+	mutex_unlock(&mdp3_res->res_mutex);
+	return rc;
+}
+
+static int mdp3_irq_setup(void)
+{
+	int ret;
+
+	ret = devm_request_irq(&mdp3_res->pdev->dev,
+				mdp3_res->irq,
+				mdp3_irq_handler,
+				IRQF_DISABLED, "MDP", mdp3_res);
+	if (ret) {
+		pr_err("mdp request_irq() failed!\n");
+		return ret;
+	}
+	disable_irq(mdp3_res->irq);
+	return 0;
+}
+
+static int mdp3_iommu_fault_handler(struct iommu_domain *domain,
+		struct device *dev, unsigned long iova, int flags, void *token)
+{
+	pr_err("MDP IOMMU page fault: iova 0x%lx\n", iova);
+	return 0;
+}
+
+int mdp3_iommu_attach(int context)
+{
+	struct mdp3_iommu_ctx_map *context_map;
+	struct mdp3_iommu_domain_map *domain_map;
+
+	if (context >= MDP3_IOMMU_CTX_MAX)
+		return -EINVAL;
+
+	context_map = mdp3_res->iommu_contexts + context;
+	if (context_map->attached) {
+		pr_warn("mdp iommu already attached\n");
+		return 0;
+	}
+
+	domain_map = context_map->domain;
+
+	iommu_attach_device(domain_map->domain, context_map->ctx);
+
+	context_map->attached = true;
+	return 0;
+}
+
+int mdp3_iommu_dettach(int context)
+{
+	struct mdp3_iommu_ctx_map *context_map;
+	struct mdp3_iommu_domain_map *domain_map;
+
+	if (context >= MDP3_IOMMU_CTX_MAX)
+		return -EINVAL;
+
+	context_map = mdp3_res->iommu_contexts + context;
+	if (!context_map->attached) {
+		pr_warn("mdp iommu not attached\n");
+		return 0;
+	}
+
+	domain_map = context_map->domain;
+	iommu_detach_device(domain_map->domain, context_map->ctx);
+	context_map->attached = false;
+
+	return 0;
+}
+
+int mdp3_iommu_domain_init(void)
+{
+	struct msm_iova_layout layout;
+	int i;
+
+	if (mdp3_res->domains) {
+		pr_warn("iommu domain already initialized\n");
+		return 0;
+	}
+
+	for (i = 0; i < MDP3_IOMMU_DOMAIN_MAX; i++) {
+		int domain_idx;
+		layout.client_name = mdp3_iommu_domains[i].client_name;
+		layout.partitions = mdp3_iommu_domains[i].partitions;
+		layout.npartitions = mdp3_iommu_domains[i].npartitions;
+		layout.is_secure = false;
+
+		domain_idx = msm_register_domain(&layout);
+		if (IS_ERR_VALUE(domain_idx))
+			return -EINVAL;
+
+		mdp3_iommu_domains[i].domain_idx = domain_idx;
+		mdp3_iommu_domains[i].domain = msm_get_iommu_domain(domain_idx);
+		if (!mdp3_iommu_domains[i].domain) {
+			pr_err("unable to get iommu domain(%d)\n",
+				domain_idx);
+			return -EINVAL;
+		}
+		iommu_set_fault_handler(mdp3_iommu_domains[i].domain,
+					mdp3_iommu_fault_handler,
+					NULL);
+	}
+
+	mdp3_res->domains = mdp3_iommu_domains;
+
+	return 0;
+}
+
+int mdp3_iommu_context_init(void)
+{
+	int i;
+
+	if (mdp3_res->iommu_contexts) {
+		pr_warn("iommu context already initialized\n");
+		return 0;
+	}
+
+	for (i = 0; i < MDP3_IOMMU_CTX_MAX; i++) {
+		mdp3_iommu_contexts[i].ctx =
+			msm_iommu_get_ctx(mdp3_iommu_contexts[i].ctx_name);
+
+		if (!mdp3_iommu_contexts[i].ctx) {
+			pr_warn("unable to get iommu ctx(%s)\n",
+				mdp3_iommu_contexts[i].ctx_name);
+			return -EINVAL;
+		}
+	}
+
+	mdp3_res->iommu_contexts = mdp3_iommu_contexts;
+
+	return 0;
+}
+
+int mdp3_iommu_init(void)
+{
+	int ret;
+
+	ret = mdp3_iommu_domain_init();
+	if (ret) {
+		pr_err("mdp3 iommu domain init fails\n");
+		return ret;
+	}
+
+	ret = mdp3_iommu_context_init();
+	if (ret) {
+		pr_err("mdp3 iommu context init fails\n");
+		return ret;
+	}
+	return ret;
+}
+
+static int mdp3_check_version(void)
+{
+	int rc;
+
+	rc = mdp3_clk_update(MDP3_CLK_AHB, 1);
+	if (rc)
+		return rc;
+
+	mdp3_res->mdp_rev = MDP3_REG_READ(MDP3_REG_HW_VERSION);
+
+	rc = mdp3_clk_update(MDP3_CLK_AHB, 0);
+	if (rc)
+		pr_err("fail to turn off the MDP3_CLK_AHB clk\n");
+
+	if (mdp3_res->mdp_rev != MDP_CORE_HW_VERSION) {
+		pr_err("mdp_hw_revision=%x mismatch\n", mdp3_res->mdp_rev);
+		rc = -ENODEV;
+	}
+	return rc;
+}
+
+static int mdp3_hw_init(void)
+{
+	int i;
+
+	for (i = MDP3_DMA_P; i < MDP3_DMA_MAX; i++) {
+		mdp3_res->dma[i].dma_sel = i;
+		mdp3_res->dma[i].capability = MDP3_DMA_CAP_ALL;
+		mdp3_res->dma[i].in_use = 0;
+		mdp3_res->dma[i].available = 1;
+	}
+	mdp3_res->dma[MDP3_DMA_S].capability = MDP3_DMA_CAP_DITHER;
+	mdp3_res->dma[MDP3_DMA_E].available = 0;
+
+	for (i = MDP3_DMA_OUTPUT_SEL_AHB; i < MDP3_DMA_OUTPUT_SEL_MAX; i++) {
+		mdp3_res->intf[i].cfg.type = i;
+		mdp3_res->intf[i].active = 0;
+		mdp3_res->intf[i].in_use = 0;
+		mdp3_res->intf[i].available = 1;
+	}
+	mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_AHB].available = 0;
+	mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_LCDC].available = 0;
+
+	return 0;
+}
+
+static int mdp3_res_init(void)
+{
+	int rc = 0;
+
+	rc = mdp3_irq_setup();
+	if (rc)
+		return rc;
+
+	rc = mdp3_clk_setup();
+	if (rc)
+		return rc;
+
+	mdp3_res->ion_client = msm_ion_client_create(-1, mdp3_res->pdev->name);
+	if (IS_ERR_OR_NULL(mdp3_res->ion_client)) {
+		pr_err("msm_ion_client_create() return error (%p)\n",
+				mdp3_res->ion_client);
+		mdp3_res->ion_client = NULL;
+		return -EINVAL;
+	}
+
+	rc = mdp3_iommu_init();
+	if (rc)
+		return rc;
+
+	rc = mdp3_iommu_attach(MDP3_IOMMU_CTX_DMA_0);
+	if (rc) {
+		pr_err("fail to attach DMA-P context 0\n");
+		return rc;
+	}
+	rc = mdp3_bus_scale_register();
+	if (rc) {
+		pr_err("unable to register bus scaling\n");
+		return rc;
+	}
+
+	rc = mdp3_hw_init();
+
+	return rc;
+}
+
+static int mdp3_parse_dt(struct platform_device *pdev)
+{
+	struct resource *res;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdp_phys");
+	if (!res) {
+		pr_err("unable to get MDP base address\n");
+		return -EINVAL;
+	}
+
+	mdp3_res->mdp_reg_size = resource_size(res);
+	mdp3_res->mdp_base = devm_ioremap(&pdev->dev, res->start,
+					mdp3_res->mdp_reg_size);
+	if (unlikely(!mdp3_res->mdp_base)) {
+		pr_err("unable to map MDP base\n");
+		return -ENOMEM;
+	}
+
+	pr_debug("MDP HW Base phy_Address=0x%x virt=0x%x\n",
+		(int) res->start,
+		(int) mdp3_res->mdp_base);
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		pr_err("unable to get MDSS irq\n");
+		return -EINVAL;
+	}
+	mdp3_res->irq = res->start;
+
+	return 0;
+}
+
+static int mdp3_init(struct msm_fb_data_type *mfd)
+{
+	return mdp3_ctrl_init(mfd);
+}
+
+u32 mdp3_fb_stride(u32 fb_index, u32 xres, int bpp)
+{
+	/*
+	 * The adreno GPU hardware requires that the pitch be aligned to
+	 * 32 pixels for color buffers, so for the cases where the GPU
+	 * is writing directly to fb0, the framebuffer pitch
+	 * also needs to be 32 pixel aligned
+	 */
+
+	if (fb_index == 0)
+		return ALIGN(xres, 32) * bpp;
+	else
+		return xres * bpp;
+}
+
+/*
+ * physical contiguous memory should be allocated in mdss_fb, and SMMU
+ * virtual address mapping can be done in the MDP h/w specific code.   It
+ * should have a reference count, if none is current mapped, the SMMU context
+ * can bedetached, thus allowing power saving in SMMU.
+ */
+static int mdp3_fbmem_alloc(struct msm_fb_data_type *mfd)
+{
+	int dom;
+	void *virt = NULL;
+	unsigned long phys = 0;
+	size_t size;
+	u32 yres = mfd->fbi->var.yres_virtual;
+
+	size = PAGE_ALIGN(mfd->fbi->fix.line_length * yres);
+
+	if (mfd->index == 0) {
+		virt = allocate_contiguous_memory(size, MEMTYPE_EBI1, SZ_1M, 0);
+		if (!virt) {
+			pr_err("unable to alloc fbmem size=%u\n", size);
+			return -ENOMEM;
+		}
+		phys = memory_pool_node_paddr(virt);
+		dom = (mdp3_res->domains + MDP3_IOMMU_DOMAIN)->domain_idx;
+		msm_iommu_map_contig_buffer(phys, dom, 0, size, SZ_4K, 0,
+					&mfd->iova);
+
+		pr_debug("allocating %u bytes at %p (%lx phys) for fb %d\n",
+			size, virt, phys, mfd->index);
+	} else {
+		size = 0;
+	}
+
+	mfd->fbi->screen_base = virt;
+	mfd->fbi->fix.smem_start = phys;
+	mfd->fbi->fix.smem_len = size;
+	return 0;
+}
+
+struct mdp3_dma *mdp3_get_dma_pipe(int capability)
+{
+	int i;
+
+	for (i = MDP3_DMA_P; i < MDP3_DMA_MAX; i++) {
+		if (!mdp3_res->dma[i].in_use && mdp3_res->dma[i].available &&
+			mdp3_res->dma[i].capability & capability) {
+			mdp3_res->dma[i].in_use = true;
+			return &mdp3_res->dma[i];
+		}
+	}
+	return NULL;
+}
+
+struct mdp3_intf *mdp3_get_display_intf(int type)
+{
+	int i;
+
+	for (i = MDP3_DMA_OUTPUT_SEL_AHB; i < MDP3_DMA_OUTPUT_SEL_MAX; i++) {
+		if (!mdp3_res->intf[i].in_use && mdp3_res->intf[i].available &&
+			mdp3_res->intf[i].cfg.type == type) {
+			mdp3_res->intf[i].in_use = true;
+			return &mdp3_res->intf[i];
+		}
+	}
+	return NULL;
+}
+
+static int mdp3_probe(struct platform_device *pdev)
+{
+	int rc;
+	static struct msm_mdp_interface mdp3_interface = {
+	.init_fnc = mdp3_init,
+	.fb_mem_alloc_fnc = mdp3_fbmem_alloc,
+	.fb_stride = mdp3_fb_stride,
+	};
+
+	if (!pdev->dev.of_node) {
+		pr_err("MDP driver only supports device tree probe\n");
+		return -ENOTSUPP;
+	}
+
+	if (mdp3_res) {
+		pr_err("MDP already initialized\n");
+		return -EINVAL;
+	}
+
+	mdp3_res = devm_kzalloc(&pdev->dev, sizeof(struct mdp3_hw_resource),
+				GFP_KERNEL);
+	if (mdp3_res == NULL)
+		return -ENOMEM;
+
+	pdev->id = 0;
+	mdp3_res->pdev = pdev;
+	mutex_init(&mdp3_res->res_mutex);
+	spin_lock_init(&mdp3_res->irq_lock);
+	platform_set_drvdata(pdev, mdp3_res);
+
+	rc = mdp3_parse_dt(pdev);
+	if (rc)
+		goto probe_done;
+
+	rc = mdp3_res_init();
+	if (rc) {
+		pr_err("unable to initialize mdp3 resources\n");
+		goto probe_done;
+	}
+
+	rc = mdp3_check_version();
+	if (rc) {
+		pr_err("mdp3 check version failed\n");
+		goto probe_done;
+	}
+
+	rc = mdss_fb_register_mdp_instance(&mdp3_interface);
+	if (rc)
+		pr_err("unable to register mdp instance\n");
+
+probe_done:
+	if (IS_ERR_VALUE(rc)) {
+		devm_kfree(&pdev->dev, mdp3_res);
+		mdp3_res = NULL;
+	}
+
+	return rc;
+}
+
+static  int mdp3_suspend_sub(struct mdp3_hw_resource *mdata)
+{
+	return 0;
+}
+
+static  int mdp3_resume_sub(struct mdp3_hw_resource *mdata)
+{
+	return 0;
+}
+
+static int mdp3_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct mdp3_hw_resource *mdata = platform_get_drvdata(pdev);
+
+	if (!mdata)
+		return -ENODEV;
+
+	pr_debug("display suspend\n");
+
+	return mdp3_suspend_sub(mdata);
+}
+
+static int mdp3_resume(struct platform_device *pdev)
+{
+	struct mdp3_hw_resource *mdata = platform_get_drvdata(pdev);
+
+	if (!mdata)
+		return -ENODEV;
+
+	pr_debug("display resume\n");
+
+	return mdp3_resume_sub(mdata);
+}
+
+static int mdp3_remove(struct platform_device *pdev)
+{
+	struct mdp3_hw_resource *mdata = platform_get_drvdata(pdev);
+
+	if (!mdata)
+		return -ENODEV;
+	pm_runtime_disable(&pdev->dev);
+	mdp3_bus_scale_unregister();
+	mdp3_clk_remove();
+	return 0;
+}
+
+static const struct of_device_id mdp3_dt_match[] = {
+	{ .compatible = "qcom,mdss_mdp3",},
+	{}
+};
+MODULE_DEVICE_TABLE(of, mdp3_dt_match);
+EXPORT_COMPAT("qcom,mdss_mdp3");
+
+static struct platform_driver mdp3_driver = {
+	.probe = mdp3_probe,
+	.remove = mdp3_remove,
+	.suspend = mdp3_suspend,
+	.resume = mdp3_resume,
+	.shutdown = NULL,
+	.driver = {
+		.name = "mdp3",
+		.of_match_table = mdp3_dt_match,
+	},
+};
+
+static int __init mdp3_driver_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&mdp3_driver);
+	if (ret) {
+		pr_err("register mdp3 driver failed!\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+module_init(mdp3_driver_init);
diff --git a/drivers/video/msm/mdss/mdp3.h b/drivers/video/msm/mdss/mdp3.h
new file mode 100644
index 0000000..c853664
--- /dev/null
+++ b/drivers/video/msm/mdss/mdp3.h
@@ -0,0 +1,123 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef MDP3_H
+#define MDP3_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/earlysuspend.h>
+
+#include <mach/iommu_domains.h>
+
+#include "mdp3_dma.h"
+
+enum  {
+	MDP3_CLK_AHB,
+	MDP3_CLK_CORE,
+	MDP3_CLK_VSYNC,
+	MDP3_CLK_LCDC,
+	MDP3_MAX_CLK
+};
+
+enum {
+	MDP3_IOMMU_DOMAIN,
+	MDP3_IOMMU_DOMAIN_MAX
+};
+
+enum {
+	MDP3_IOMMU_CTX_PPP_0,
+	MDP3_IOMMU_CTX_PPP_1,
+	MDP3_IOMMU_CTX_DMA_0,
+	MDP3_IOMMU_CTX_DMA_1,
+	MDP3_IOMMU_CTX_MAX
+};
+
+enum {
+	MDP3_BW_CLIENT_DMA_P,
+	MDP3_BW_CLIENT_DMA_S,
+	MDP3_BW_CLIENT_DMA_E,
+	MDP3_BW_CLIENT_PPP,
+};
+
+struct mdp3_iommu_domain_map {
+	u32 domain_type;
+	char *client_name;
+	struct msm_iova_partition partitions[1];
+	int npartitions;
+	int domain_idx;
+	struct iommu_domain *domain;
+};
+
+struct mdp3_iommu_ctx_map {
+	u32 ctx_type;
+	struct mdp3_iommu_domain_map *domain;
+	char *ctx_name;
+	struct device *ctx;
+	int attached;
+};
+
+#define MDP3_MAX_INTR 28
+
+struct mdp3_intr_cb {
+	void (*cb)(int type, void *);
+	void *data;
+};
+
+struct mdp3_hw_resource {
+	struct platform_device *pdev;
+	u32 mdp_rev;
+
+	struct mutex res_mutex;
+
+	struct clk *clocks[MDP3_MAX_CLK];
+	int clock_ref_count[MDP3_MAX_CLK];
+
+	char __iomem *mdp_base;
+	size_t mdp_reg_size;
+
+	u32 irq;
+	u32 bus_handle;
+
+	struct ion_client *ion_client;
+	struct mdp3_iommu_domain_map *domains;
+	struct mdp3_iommu_ctx_map *iommu_contexts;
+
+	struct mdp3_dma dma[MDP3_DMA_MAX];
+	struct mdp3_intf intf[MDP3_DMA_OUTPUT_SEL_MAX];
+
+	spinlock_t irq_lock;
+	u32 irqMask;
+	struct mdp3_intr_cb callbacks[MDP3_MAX_INTR];
+
+	struct early_suspend suspend_handler;
+};
+
+extern struct mdp3_hw_resource *mdp3_res;
+
+struct mdp3_dma *mdp3_get_dma_pipe(int capability);
+struct mdp3_intf *mdp3_get_display_intf(int type);
+void mdp3_irq_enable(int type);
+void mdp3_irq_disable(int type);
+void mdp3_irq_disable_nosync(int type);
+int mdp3_set_intr_callback(u32 type, struct mdp3_intr_cb *cb);
+int mdp3_clk_set_rate(int clk_type, unsigned long clk_rate);
+int mdp3_clk_enable(int enable);
+int mdp3_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota);
+
+#define MDP3_REG_WRITE(addr, val) writel_relaxed(val, mdp3_res->mdp_base + addr)
+#define MDP3_REG_READ(addr) readl_relaxed(mdp3_res->mdp_base + addr)
+
+#endif /* MDP3_H */
diff --git a/drivers/video/msm/mdss/mdp3_ctrl.c b/drivers/video/msm/mdss/mdp3_ctrl.c
new file mode 100644
index 0000000..e07c0a4
--- /dev/null
+++ b/drivers/video/msm/mdss/mdp3_ctrl.c
@@ -0,0 +1,511 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#include "mdp3_ctrl.h"
+#include "mdp3.h"
+
+#define MDP_VSYNC_CLK_RATE	19200000
+#define VSYNC_PERIOD 16
+
+void vsync_notify_handler(void *arg)
+{
+	struct mdp3_session_data *session = (struct mdp3_session_data *)session;
+	complete(&session->vsync_comp);
+}
+
+static int mdp3_ctrl_vsync_enable(struct msm_fb_data_type *mfd, int enable)
+{
+	struct mdp3_session_data *mdp3_session;
+	struct mdp3_vsync_notification vsync_client;
+
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+	if (!mdp3_session || !mdp3_session->panel || !mdp3_session->dma ||
+		!mdp3_session->intf)
+		return -ENODEV;
+
+	vsync_client.handler = vsync_notify_handler;
+	vsync_client.arg = mdp3_session;
+
+	mutex_lock(&mdp3_session->lock);
+	if (!mdp3_session->status) {
+		pr_debug("fb%d is not on yet", mfd->index);
+		mutex_unlock(&mdp3_session->lock);
+		return -EINVAL;
+	}
+
+	mdp3_session->dma->vsync_enable(mdp3_session->dma, &vsync_client);
+	mutex_unlock(&mdp3_session->lock);
+	return 0;
+}
+
+static ssize_t mdp3_vsync_show_event(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdp3_session_data *mdp3_session = NULL;
+	u64 vsync_ticks;
+	ktime_t vsync_time;
+	int rc;
+
+	if (!mfd || !mfd->mdp.private1)
+		return 0;
+
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+
+	rc = wait_for_completion_interruptible_timeout(
+				&mdp3_session->vsync_comp,
+				msecs_to_jiffies(VSYNC_PERIOD * 5));
+	if (rc <= 0) {
+		pr_warn("vsync wait on fb%d interrupted (%d)\n",
+			mfd->index, rc);
+		return -EBUSY;
+	}
+
+	vsync_time = mdp3_session->dma->get_vsync_time(mdp3_session->dma);
+	vsync_ticks = ktime_to_ns(vsync_time);
+
+	pr_debug("fb%d vsync=%llu", mfd->index, vsync_ticks);
+	rc = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_ticks);
+	return rc;
+}
+
+static DEVICE_ATTR(vsync_event, S_IRUGO, mdp3_vsync_show_event, NULL);
+
+static struct attribute *vsync_fs_attrs[] = {
+	&dev_attr_vsync_event.attr,
+	NULL,
+};
+
+static struct attribute_group vsync_fs_attr_group = {
+	.attrs = vsync_fs_attrs,
+};
+
+static int mdp3_ctrl_res_req_dma(struct msm_fb_data_type *mfd, int status)
+{
+	int rc = 0;
+	if (status) {
+		struct mdss_panel_info *panel_info = mfd->panel_info;
+		int ab = 0;
+		int ib = 0;
+		unsigned long core_clk = 0;
+		int vtotal = 0;
+		ab = panel_info->xres * panel_info->yres * 4;
+		ab *= panel_info->mipi.frame_rate;
+		ib = (ab * 3) / 2;
+		vtotal = panel_info->lcdc.v_back_porch +
+			panel_info->lcdc.v_front_porch +
+			panel_info->lcdc.v_pulse_width +
+			panel_info->yres;
+		core_clk = panel_info->xres * panel_info->yres;
+		core_clk *= panel_info->mipi.frame_rate;
+		core_clk = (core_clk / panel_info->yres) * vtotal;
+		mdp3_clk_set_rate(MDP3_CLK_CORE, core_clk);
+		mdp3_clk_set_rate(MDP3_CLK_VSYNC, MDP_VSYNC_CLK_RATE);
+
+		rc = mdp3_clk_enable(true);
+		if (rc)
+			return rc;
+
+		mdp3_bus_scale_set_quota(MDP3_BW_CLIENT_DMA_P, ab, ib);
+	} else {
+		rc = mdp3_clk_enable(false);
+		rc |= mdp3_bus_scale_set_quota(MDP3_BW_CLIENT_DMA_P, 0, 0);
+	}
+	return rc;
+}
+
+static int mdp3_ctrl_get_intf_type(struct msm_fb_data_type *mfd)
+{
+	int type;
+	switch (mfd->panel.type) {
+	case MIPI_VIDEO_PANEL:
+		type = MDP3_DMA_OUTPUT_SEL_DSI_VIDEO;
+		break;
+	case MIPI_CMD_PANEL:
+		type = MDP3_DMA_OUTPUT_SEL_DSI_CMD;
+		break;
+	case LCDC_PANEL:
+		type = MDP3_DMA_OUTPUT_SEL_LCDC;
+		break;
+	default:
+		type = MDP3_DMA_OUTPUT_SEL_MAX;
+	}
+	return type;
+}
+
+static int mdp3_ctrl_get_source_format(struct msm_fb_data_type *mfd)
+{
+	int format;
+	switch (mfd->fb_imgType) {
+	case MDP_RGB_565:
+		format = MDP3_DMA_IBUF_FORMAT_RGB565;
+		break;
+	case MDP_RGB_888:
+		format = MDP3_DMA_IBUF_FORMAT_RGB888;
+		break;
+	case MDP_ARGB_8888:
+	case MDP_RGBA_8888:
+		format = MDP3_DMA_IBUF_FORMAT_XRGB8888;
+		break;
+	default:
+		format = MDP3_DMA_IBUF_FORMAT_UNDEFINED;
+	}
+	return format;
+}
+
+static int mdp3_ctrl_get_pack_pattern(struct msm_fb_data_type *mfd)
+{
+	int packPattern = MDP3_DMA_OUTPUT_PACK_PATTERN_RGB;
+	if (mfd->fb_imgType == MDP_RGBA_8888)
+		packPattern = MDP3_DMA_OUTPUT_PACK_PATTERN_BGR;
+	return packPattern;
+}
+
+static int mdp3_ctrl_intf_init(struct msm_fb_data_type *mfd,
+				struct mdp3_intf *intf)
+{
+	int rc;
+	struct mdp3_intf_cfg cfg;
+	struct mdp3_video_intf_cfg *video = &cfg.video;
+	struct mdss_panel_info *p = mfd->panel_info;
+	int h_back_porch = p->lcdc.h_back_porch;
+	int h_front_porch = p->lcdc.h_front_porch;
+	int w = p->xres;
+	int v_back_porch = p->lcdc.v_back_porch;
+	int v_front_porch = p->lcdc.v_front_porch;
+	int h = p->yres;
+	int h_sync_skew = p->lcdc.hsync_skew;
+	int h_pulse_width = p->lcdc.h_pulse_width;
+	int v_pulse_width = p->lcdc.v_pulse_width;
+	int hsync_period = h_front_porch + h_back_porch + w + h_pulse_width;
+	int vsync_period = v_front_porch + v_back_porch + h + v_pulse_width;
+	vsync_period *= hsync_period;
+
+	cfg.type = mdp3_ctrl_get_intf_type(mfd);
+	if (cfg.type == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
+		cfg.type == MDP3_DMA_OUTPUT_SEL_LCDC) {
+		video->hsync_period = hsync_period;
+		video->hsync_pulse_width = h_pulse_width;
+		video->vsync_period = vsync_period;
+		video->vsync_pulse_width = v_pulse_width * hsync_period;
+		video->display_start_x = h_back_porch + h_pulse_width;
+		video->display_end_x = hsync_period - h_front_porch - 1;
+		video->display_start_y =
+			(v_back_porch + v_pulse_width) * hsync_period;
+		video->display_end_y =
+			vsync_period - v_front_porch * hsync_period - 1;
+		video->active_start_x = video->display_start_x;
+		video->active_end_x = video->display_end_x;
+		video->active_h_enable = true;
+		video->active_start_y = video->display_start_y;
+		video->active_end_y = video->display_end_y;
+		video->active_v_enable = true;
+		video->hsync_skew = h_sync_skew;
+		video->hsync_polarity = 1;
+		video->vsync_polarity = 1;
+		video->de_polarity = 1;
+	} else if (cfg.type == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+		cfg.dsi_cmd.primary_dsi_cmd_id = 0;
+		cfg.dsi_cmd.secondary_dsi_cmd_id = 1;
+		cfg.dsi_cmd.dsi_cmd_tg_intf_sel = 0;
+	} else
+		return -EINVAL;
+	rc = mdp3_intf_init(intf, &cfg);
+	return rc;
+}
+
+static int mdp3_ctrl_dma_init(struct msm_fb_data_type *mfd,
+				struct mdp3_dma *dma)
+{
+	int rc;
+	struct mdss_panel_info *panel_info = mfd->panel_info;
+	struct fb_info *fbi = mfd->fbi;
+	struct fb_fix_screeninfo *fix;
+	struct fb_var_screeninfo *var;
+	struct mdp3_dma_output_config outputConfig;
+	struct mdp3_dma_source sourceConfig;
+
+	fix = &fbi->fix;
+	var = &fbi->var;
+
+	sourceConfig.format = mdp3_ctrl_get_source_format(mfd);
+	sourceConfig.width = panel_info->xres;
+	sourceConfig.height = panel_info->yres;
+	sourceConfig.x = 0;
+	sourceConfig.y = 0;
+	sourceConfig.stride = fix->line_length;
+	sourceConfig.buf = (void *)mfd->iova;
+
+	outputConfig.dither_en = 0;
+	outputConfig.out_sel = mdp3_ctrl_get_intf_type(mfd);
+	outputConfig.bit_mask_polarity = 0;
+	outputConfig.color_components_flip = 0;
+	outputConfig.pack_pattern = mdp3_ctrl_get_pack_pattern(mfd);
+	outputConfig.pack_align = MDP3_DMA_OUTPUT_PACK_ALIGN_LSB;
+	outputConfig.color_comp_out_bits = (MDP3_DMA_OUTPUT_COMP_BITS_8 << 4) |
+					(MDP3_DMA_OUTPUT_COMP_BITS_8 << 2)|
+					MDP3_DMA_OUTPUT_COMP_BITS_8;
+
+	rc = mdp3_dma_init(dma, &sourceConfig, &outputConfig);
+	return rc;
+}
+
+static int mdp3_ctrl_on(struct msm_fb_data_type *mfd)
+{
+	int rc = 0;
+	struct mdp3_session_data *mdp3_session;
+	struct mdss_panel_data *panel;
+
+	pr_debug("mdp3_ctrl_on\n");
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+	if (!mdp3_session || !mdp3_session->panel || !mdp3_session->dma ||
+		!mdp3_session->intf) {
+		pr_err("mdp3_ctrl_on no device");
+		return -ENODEV;
+	}
+	mutex_lock(&mdp3_session->lock);
+	if (mdp3_session->status) {
+		pr_info("fb%d is on already", mfd->index);
+		goto on_error;
+	}
+
+	rc = mdp3_ctrl_res_req_dma(mfd, 1);
+	if (rc) {
+		pr_err("resource request for dma on failed\n");
+		goto on_error;
+	}
+
+	rc = mdp3_ctrl_dma_init(mfd, mdp3_session->dma);
+	if (rc) {
+		pr_err("dma init failed\n");
+		goto on_error;
+	}
+
+	rc = mdp3_ctrl_intf_init(mfd, mdp3_session->intf);
+	if (rc) {
+		pr_err("display interface init failed\n");
+		goto on_error;
+	}
+
+	panel = mdp3_session->panel;
+
+	if (panel->event_handler)
+		rc = panel->event_handler(panel, MDSS_EVENT_PANEL_ON, NULL);
+
+	if (rc) {
+		pr_err("fail to turn on the panel\n");
+		goto on_error;
+	}
+
+	rc = mdp3_session->dma->start(mdp3_session->dma, mdp3_session->intf);
+	if (rc) {
+		pr_err("fail to start the MDP display interface\n");
+		goto on_error;
+	}
+
+on_error:
+	if (!rc)
+		mdp3_session->status = 1;
+
+	mutex_unlock(&mdp3_session->lock);
+	return rc;
+}
+
+static int mdp3_ctrl_off(struct msm_fb_data_type *mfd)
+{
+	int rc = 0;
+	struct mdp3_session_data *mdp3_session;
+	struct mdss_panel_data *panel;
+
+	pr_debug("mdp3_ctrl_off\n");
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+	if (!mdp3_session || !mdp3_session->panel || !mdp3_session->dma ||
+		!mdp3_session->intf) {
+		pr_err("mdp3_ctrl_on no device");
+		return -ENODEV;
+	}
+
+	mutex_lock(&mdp3_session->lock);
+
+	if (!mdp3_session->status) {
+		pr_info("fb%d is off already", mfd->index);
+		goto off_error;
+	}
+
+	panel = mdp3_session->panel;
+	if (panel->event_handler)
+		rc = panel->event_handler(panel, MDSS_EVENT_PANEL_OFF, NULL);
+
+	if (rc)
+		pr_err("fail to turn off the panel\n");
+
+	rc = mdp3_session->dma->stop(mdp3_session->dma, mdp3_session->intf);
+
+	if (rc)
+		pr_err("fail to stop the MDP3 dma\n");
+
+	rc = mdp3_ctrl_res_req_dma(mfd, 0);
+	if (rc)
+		pr_err("resource release  for dma on failed\n");
+
+off_error:
+	mdp3_session->status = 0;
+
+	mutex_unlock(&mdp3_session->lock);
+	return 0;
+}
+
+static void mdp3_ctrl_pan_display(struct msm_fb_data_type *mfd)
+{
+	struct fb_info *fbi;
+	struct mdp3_session_data *mdp3_session;
+	u32 offset;
+	int bpp;
+
+	pr_debug("mdp3_ctrl_pan_display\n");
+	if (!mfd || !mfd->mdp.private1)
+		return;
+
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+	if (!mdp3_session || !mdp3_session->dma)
+		return;
+
+	if (!mdp3_session->status) {
+		pr_err("mdp3_ctrl_pan_display, display off!\n");
+		return;
+	}
+
+	mutex_lock(&mdp3_session->lock);
+	fbi = mfd->fbi;
+
+	bpp = fbi->var.bits_per_pixel / 8;
+	offset = fbi->var.xoffset * bpp +
+		 fbi->var.yoffset * fbi->fix.line_length;
+
+	if (offset > fbi->fix.smem_len) {
+		pr_err("invalid fb offset=%u total length=%u\n",
+			offset, fbi->fix.smem_len);
+		goto pan_error;
+	}
+
+	mdp3_session->dma->update(mdp3_session->dma,
+				(void *)mfd->iova + offset);
+pan_error:
+	mutex_unlock(&mdp3_session->lock);
+}
+
+static int mdp3_ctrl_ioctl_handler(struct msm_fb_data_type *mfd,
+					u32 cmd, void __user *argp)
+{
+	int rc = -EINVAL;
+	struct mdp3_session_data *mdp3_session;
+	int val;
+
+	pr_debug("mdp3_ctrl_ioctl_handler\n");
+
+	mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+	if (!mdp3_session)
+		return -ENODEV;
+
+	if (!mdp3_session->status) {
+		pr_err("mdp3_ctrl_ioctl_handler, display off!\n");
+		return -EINVAL;
+	}
+
+	switch (cmd) {
+	case MSMFB_VSYNC_CTRL:
+	case MSMFB_OVERLAY_VSYNC_CTRL:
+		if (!copy_from_user(&val, argp, sizeof(val))) {
+			rc = mdp3_ctrl_vsync_enable(mfd, val);
+			if (!val)
+				init_completion(&mdp3_session->vsync_comp);
+		} else {
+			pr_err("MSMFB_OVERLAY_VSYNC_CTRL failed\n");
+			rc = -EFAULT;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+int mdp3_ctrl_init(struct msm_fb_data_type *mfd)
+{
+	struct device *dev = mfd->fbi->dev;
+	struct msm_mdp_interface *mdp3_interface = &mfd->mdp;
+	struct mdp3_session_data *mdp3_session = NULL;
+	u32 intf_type = MDP3_DMA_OUTPUT_SEL_DSI_VIDEO;
+	int rc;
+
+	pr_debug("mdp3_ctrl_init\n");
+	mdp3_interface->on_fnc = mdp3_ctrl_on;
+	mdp3_interface->off_fnc = mdp3_ctrl_off;
+	mdp3_interface->do_histogram = NULL;
+	mdp3_interface->cursor_update = NULL;
+	mdp3_interface->dma_fnc = mdp3_ctrl_pan_display;
+	mdp3_interface->ioctl_handler = mdp3_ctrl_ioctl_handler;
+	mdp3_interface->kickoff_fnc = NULL;
+
+	mdp3_session = kmalloc(sizeof(struct mdp3_session_data), GFP_KERNEL);
+	if (!mdp3_session) {
+		pr_err("fail to allocate mdp3 private data structure");
+		return -ENOMEM;
+	}
+	memset(mdp3_session, 0, sizeof(struct mdp3_session_data));
+	mutex_init(&mdp3_session->lock);
+	init_completion(&mdp3_session->vsync_comp);
+	mdp3_session->dma = mdp3_get_dma_pipe(MDP3_DMA_CAP_ALL);
+	if (!mdp3_session->dma) {
+		rc = -ENODEV;
+		goto init_done;
+	}
+
+	intf_type = mdp3_ctrl_get_intf_type(mfd);
+	mdp3_session->intf = mdp3_get_display_intf(intf_type);
+	if (!mdp3_session->intf) {
+		rc = -ENODEV;
+		goto init_done;
+	}
+
+	mdp3_session->panel = dev_get_platdata(&mfd->pdev->dev);
+	mdp3_session->status = 0;
+
+	mfd->mdp.private1 = mdp3_session;
+
+	rc = sysfs_create_group(&dev->kobj, &vsync_fs_attr_group);
+	if (rc) {
+		pr_err("vsync sysfs group creation failed, ret=%d\n", rc);
+		goto init_done;
+	}
+
+	kobject_uevent(&dev->kobj, KOBJ_ADD);
+	pr_debug("vsync kobject_uevent(KOBJ_ADD)\n");
+
+init_done:
+	if (IS_ERR_VALUE(rc))
+		kfree(mdp3_session);
+
+	return rc;
+}
diff --git a/drivers/video/msm/mdss/mdp3_ctrl.h b/drivers/video/msm/mdss/mdp3_ctrl.h
new file mode 100644
index 0000000..d42ece7
--- /dev/null
+++ b/drivers/video/msm/mdss/mdp3_ctrl.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDP3_CTRL_H
+#define MDP3_CTRL_H
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+
+#include "mdp3_dma.h"
+#include "mdss_fb.h"
+#include "mdss_panel.h"
+
+struct mdp3_session_data {
+	struct mutex lock;
+	int status;
+	struct mdp3_dma *dma;
+	struct mdss_panel_data *panel;
+	struct mdp3_intf *intf;
+	struct msm_fb_data_type *mfd;
+	struct completion vsync_comp;
+};
+
+int mdp3_ctrl_init(struct msm_fb_data_type *mfd);
+
+#endif /* MDP3_CTRL_H */
diff --git a/drivers/video/msm/mdss/mdp3_dma.c b/drivers/video/msm/mdss/mdp3_dma.c
new file mode 100644
index 0000000..69e3d7e
--- /dev/null
+++ b/drivers/video/msm/mdss/mdp3_dma.c
@@ -0,0 +1,914 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/bitops.h>
+#include <linux/iopoll.h>
+
+#include "mdp3.h"
+#include "mdp3_dma.h"
+#include "mdp3_hwio.h"
+
+#define DMA_STOP_POLL_SLEEP_US 1000
+#define DMA_STOP_POLL_TIMEOUT_US 16000
+
+static ktime_t mdp3_get_vsync_time(struct mdp3_dma *dma)
+{
+	unsigned long flag;
+	ktime_t time;
+
+	spin_lock_irqsave(&dma->dma_lock, flag);
+	time = dma->vsync_time;
+	spin_unlock_irqrestore(&dma->dma_lock, flag);
+	return time;
+}
+
+static void mdp3_vsync_intr_handler(int type, void *arg)
+{
+	struct mdp3_dma *dma = (struct mdp3_dma *)arg;
+	struct mdp3_vsync_notification vsync_client;
+
+	pr_debug("mdp3_vsync_intr_handler\n");
+	spin_lock(&dma->dma_lock);
+	vsync_client = dma->vsync_client;
+	if (!vsync_client.handler)
+		dma->cb_type &= ~MDP3_DMA_CALLBACK_TYPE_VSYNC;
+	dma->vsync_time = ktime_get();
+	complete(&dma->vsync_comp);
+	if (vsync_client.handler)
+		vsync_client.handler(vsync_client.arg);
+	spin_unlock(&dma->dma_lock);
+
+	if (!vsync_client.handler)
+		mdp3_irq_disable_nosync(type);
+}
+
+static void mdp3_dma_done_intr_handler(int type, void *arg)
+{
+	struct mdp3_dma *dma = (struct mdp3_dma *)arg;
+
+	pr_debug("mdp3_dma_done_intr_handler\n");
+	spin_lock(&dma->dma_lock);
+	dma->busy = false;
+	dma->cb_type &= ~MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
+	spin_unlock(&dma->dma_lock);
+	complete(&dma->dma_comp);
+	mdp3_irq_disable_nosync(type);
+}
+
+void mdp3_dma_callback_enable(struct mdp3_dma *dma, int type)
+{
+	int irq_bit;
+	unsigned long flag;
+
+	pr_debug("mdp3_dma_callback_enable type=%d\n", type);
+
+	spin_lock_irqsave(&dma->dma_lock, flag);
+	if (dma->cb_type & type) {
+		spin_unlock_irqrestore(&dma->dma_lock, flag);
+		return;
+	} else {
+		dma->cb_type |= type;
+		spin_unlock_irqrestore(&dma->dma_lock, flag);
+	}
+
+	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
+		dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC) {
+		if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC)
+			mdp3_irq_enable(MDP3_INTR_LCDC_START_OF_FRAME);
+	} else if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+		if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC) {
+			irq_bit = MDP3_INTR_SYNC_PRIMARY_LINE;
+			irq_bit += dma->dma_sel;
+			mdp3_irq_enable(irq_bit);
+		}
+
+		if (type & MDP3_DMA_CALLBACK_TYPE_DMA_DONE) {
+			irq_bit = MDP3_INTR_DMA_P_DONE;
+			if (dma->dma_sel == MDP3_DMA_S)
+				irq_bit = MDP3_INTR_DMA_S_DONE;
+			mdp3_irq_enable(irq_bit);
+		}
+	} else {
+		pr_err("mdp3_dma_callback_enable not supported interface\n");
+	}
+}
+
+void mdp3_dma_callback_disable(struct mdp3_dma *dma, int type)
+{
+	int irq_bit;
+	unsigned long flag;
+
+	pr_debug("mdp3_dma_callback_disable type=%d\n", type);
+
+	spin_lock_irqsave(&dma->dma_lock, flag);
+	if ((dma->cb_type & type) == 0) {
+		spin_unlock_irqrestore(&dma->dma_lock, flag);
+		return;
+	} else {
+		dma->cb_type &= ~type;
+		spin_unlock_irqrestore(&dma->dma_lock, flag);
+	}
+
+	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
+		dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC) {
+		if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC)
+			mdp3_irq_disable(MDP3_INTR_LCDC_START_OF_FRAME);
+	} else if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+		if (type & MDP3_DMA_CALLBACK_TYPE_VSYNC) {
+			irq_bit = MDP3_INTR_SYNC_PRIMARY_LINE;
+			irq_bit += dma->dma_sel;
+			mdp3_irq_disable(irq_bit);
+		}
+
+		if (type & MDP3_DMA_CALLBACK_TYPE_DMA_DONE) {
+			irq_bit = MDP3_INTR_DMA_P_DONE;
+			if (dma->dma_sel == MDP3_DMA_S)
+				irq_bit = MDP3_INTR_DMA_S_DONE;
+			mdp3_irq_disable(irq_bit);
+		}
+	}
+}
+
+static int mdp3_dma_callback_setup(struct mdp3_dma *dma)
+{
+	int rc;
+	struct mdp3_intr_cb vsync_cb = {
+		.cb = mdp3_vsync_intr_handler,
+		.data = dma,
+	};
+
+	struct mdp3_intr_cb dma_cb = {
+		.cb = mdp3_dma_done_intr_handler,
+		.data = dma,
+	};
+
+	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO ||
+		dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_LCDC)
+		rc = mdp3_set_intr_callback(MDP3_INTR_LCDC_START_OF_FRAME,
+					&vsync_cb);
+	else if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+		int irq_bit = MDP3_INTR_SYNC_PRIMARY_LINE;
+		irq_bit += dma->dma_sel;
+		rc = mdp3_set_intr_callback(irq_bit, &vsync_cb);
+		irq_bit = MDP3_INTR_DMA_P_DONE;
+		if (dma->dma_sel == MDP3_DMA_S)
+			irq_bit = MDP3_INTR_DMA_S_DONE;
+		rc |= mdp3_set_intr_callback(irq_bit, &dma_cb);
+	} else {
+		pr_err("mdp3_dma_callback_setup not suppported interface\n");
+		rc = -ENODEV;
+	}
+	return rc;
+}
+
+static void mdp3_dma_vsync_enable(struct mdp3_dma *dma,
+				struct mdp3_vsync_notification *vsync_client)
+{
+	unsigned long flag;
+	int updated = 0;
+	int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC;
+
+	pr_debug("mdp3_dma_vsync_enable\n");
+
+	spin_lock_irqsave(&dma->dma_lock, flag);
+	if (vsync_client) {
+		if (dma->vsync_client.handler != vsync_client->handler) {
+			dma->vsync_client = *vsync_client;
+			updated = 1;
+		}
+	} else {
+		if (!dma->vsync_client.handler) {
+			dma->vsync_client.handler = NULL;
+			dma->vsync_client.arg = NULL;
+			updated = 1;
+		}
+	}
+	spin_unlock_irqrestore(&dma->dma_lock, flag);
+
+	if (updated) {
+		if (vsync_client && vsync_client->handler)
+			mdp3_dma_callback_enable(dma, cb_type);
+		else
+			mdp3_dma_callback_disable(dma, cb_type);
+	}
+}
+
+static int mdp3_dmap_config(struct mdp3_dma *dma,
+			struct mdp3_dma_source *source_config,
+			struct mdp3_dma_output_config *output_config)
+{
+	u32 dma_p_cfg_reg, dma_p_size, dma_p_out_xy;
+
+	dma_p_cfg_reg = source_config->format << 25;
+	if (output_config->dither_en)
+		dma_p_cfg_reg |= BIT(24);
+	dma_p_cfg_reg |= output_config->out_sel << 19;
+	dma_p_cfg_reg |= output_config->bit_mask_polarity << 18;
+	dma_p_cfg_reg |= output_config->color_components_flip << 14;
+	dma_p_cfg_reg |= output_config->pack_pattern << 8;
+	dma_p_cfg_reg |= output_config->pack_align << 7;
+	dma_p_cfg_reg |= output_config->color_comp_out_bits;
+
+	dma_p_size = source_config->width | (source_config->height << 16);
+	dma_p_out_xy = source_config->x | (source_config->y << 16);
+
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_CONFIG, dma_p_cfg_reg);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_SIZE, dma_p_size);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_ADDR, (u32)source_config->buf);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_Y_STRIDE, source_config->stride);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_OUT_XY, dma_p_out_xy);
+
+	/*
+	 * NOTE: MDP_DMA_P_FETCH_CFG: max_burst_size need to use value 4, not
+	 * the default 16 for MDP hang issue workaround
+	 */
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_FETCH_CFG, 0x10);
+	MDP3_REG_WRITE(MDP3_REG_PRIMARY_RD_PTR_IRQ, 0x10);
+
+	dma->source_config = *source_config;
+	dma->output_config = *output_config;
+
+	mdp3_dma_callback_setup(dma);
+	return 0;
+}
+
+static int mdp3_dmas_config(struct mdp3_dma *dma,
+			struct mdp3_dma_source *source_config,
+			struct mdp3_dma_output_config *output_config)
+{
+	u32 dma_s_cfg_reg, dma_s_size, dma_s_out_xy;
+
+	dma_s_cfg_reg = source_config->format << 25;
+	if (output_config->dither_en)
+		dma_s_cfg_reg |= BIT(24);
+	dma_s_cfg_reg |= output_config->out_sel << 19;
+	dma_s_cfg_reg |= output_config->bit_mask_polarity << 18;
+	dma_s_cfg_reg |= output_config->color_components_flip << 14;
+	dma_s_cfg_reg |= output_config->pack_pattern << 8;
+	dma_s_cfg_reg |= output_config->pack_align << 7;
+	dma_s_cfg_reg |= output_config->color_comp_out_bits;
+
+	dma_s_size = source_config->width | (source_config->height << 16);
+	dma_s_out_xy = source_config->x | (source_config->y << 16);
+
+	MDP3_REG_WRITE(MDP3_REG_DMA_S_CONFIG, dma_s_cfg_reg);
+	MDP3_REG_WRITE(MDP3_REG_DMA_S_SIZE, dma_s_size);
+	MDP3_REG_WRITE(MDP3_REG_DMA_S_IBUF_ADDR, (u32)source_config->buf);
+	MDP3_REG_WRITE(MDP3_REG_DMA_S_IBUF_Y_STRIDE, source_config->stride);
+	MDP3_REG_WRITE(MDP3_REG_DMA_S_OUT_XY, dma_s_out_xy);
+
+	MDP3_REG_WRITE(MDP3_REG_SECONDARY_RD_PTR_IRQ, 0x10);
+
+	dma->source_config = *source_config;
+	dma->output_config = *output_config;
+
+	mdp3_dma_callback_setup(dma);
+	return 0;
+}
+
+static int mdp3_dmap_cursor_config(struct mdp3_dma *dma,
+				struct mdp3_dma_cursor *cursor)
+{
+	u32 cursor_size, cursor_pos, blend_param, trans_mask;
+
+	cursor_size = cursor->width | (cursor->height << 16);
+	cursor_pos = cursor->x | (cursor->y << 16);
+	trans_mask = 0;
+	if (cursor->blend_config.mode == MDP3_DMA_CURSOR_BLEND_CONSTANT_ALPHA) {
+		blend_param = cursor->blend_config.constant_alpha << 24;
+	} else if (cursor->blend_config.mode ==
+			MDP3_DMA_CURSOR_BLEND_COLOR_KEYING) {
+		blend_param = cursor->blend_config.transparent_color;
+		trans_mask = cursor->blend_config.transparency_mask;
+	} else {
+		blend_param = 0;
+	}
+
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_FORMAT, cursor->format);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_SIZE, cursor_size);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_BUF_ADDR, (u32)cursor->buf);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_POS, cursor_pos);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_BLEND_CONFIG,
+			cursor->blend_config.mode);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_BLEND_PARAM, blend_param);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_BLEND_TRANS_MASK, trans_mask);
+	dma->cursor = *cursor;
+	return 0;
+}
+
+static int mdp3_dmap_ccs_config(struct mdp3_dma *dma,
+			struct mdp3_dma_color_correct_config *config,
+			struct mdp3_dma_ccs *ccs,
+			struct mdp3_dma_lut *lut)
+{
+	int i;
+	u32 addr, cc_config, color;
+
+	cc_config = config->lut_enable;
+	if (config->ccs_enable)
+		cc_config |= BIT(3);
+	cc_config |= config->lut_position << 4;
+	cc_config |= config->ccs_sel << 5;
+	cc_config |= config->pre_bias_sel << 6;
+	cc_config |= config->post_bias_sel << 7;
+	cc_config |= config->pre_limit_sel << 8;
+	cc_config |= config->post_limit_sel << 9;
+	cc_config |= config->lut_sel << 10;
+
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_COLOR_CORRECT_CONFIG, cc_config);
+
+	if (config->ccs_enable && ccs) {
+		if (ccs->mv1) {
+			addr = MDP3_REG_DMA_P_CSC_MV1;
+			for (i = 0; i < 9; i++) {
+				MDP3_REG_WRITE(addr, ccs->mv1[i]);
+				addr += 4;
+			}
+		}
+
+		if (ccs->mv2) {
+			addr = MDP3_REG_DMA_P_CSC_MV2;
+			for (i = 0; i < 9; i++) {
+				MDP3_REG_WRITE(addr, ccs->mv2[i]);
+				addr += 4;
+			}
+		}
+
+		if (ccs->pre_bv1) {
+			addr = MDP3_REG_DMA_P_CSC_PRE_BV1;
+			for (i = 0; i < 3; i++) {
+				MDP3_REG_WRITE(addr, ccs->pre_bv1[i]);
+				addr += 4;
+			}
+		}
+
+		if (ccs->pre_bv2) {
+			addr = MDP3_REG_DMA_P_CSC_PRE_BV2;
+			for (i = 0; i < 3; i++) {
+				MDP3_REG_WRITE(addr, ccs->pre_bv2[i]);
+				addr += 4;
+			}
+		}
+
+		if (ccs->post_bv1) {
+			addr = MDP3_REG_DMA_P_CSC_POST_BV1;
+			for (i = 0; i < 3; i++) {
+				MDP3_REG_WRITE(addr, ccs->post_bv1[i]);
+				addr += 4;
+			}
+		}
+
+		if (ccs->post_bv2) {
+			addr = MDP3_REG_DMA_P_CSC_POST_BV2;
+			for (i = 0; i < 3; i++) {
+				MDP3_REG_WRITE(addr, ccs->post_bv2[i]);
+				addr += 4;
+			}
+		}
+
+		if (ccs->pre_lv1) {
+			addr = MDP3_REG_DMA_P_CSC_PRE_LV1;
+			for (i = 0; i < 6; i++) {
+				MDP3_REG_WRITE(addr, ccs->pre_lv1[i]);
+				addr += 4;
+			}
+		}
+
+		if (ccs->pre_lv2) {
+			addr = MDP3_REG_DMA_P_CSC_PRE_LV2;
+			for (i = 0; i < 6; i++) {
+				MDP3_REG_WRITE(addr, ccs->pre_lv2[i]);
+				addr += 4;
+			}
+		}
+
+		if (ccs->post_lv1) {
+			addr = MDP3_REG_DMA_P_CSC_POST_LV1;
+			for (i = 0; i < 6; i++) {
+				MDP3_REG_WRITE(addr, ccs->post_lv1[i]);
+				addr += 4;
+			}
+		}
+
+		if (ccs->post_lv2) {
+			addr = MDP3_REG_DMA_P_CSC_POST_LV2;
+			for (i = 0; i < 6; i++) {
+				MDP3_REG_WRITE(addr, ccs->post_lv2[i]);
+				addr += 4;
+			}
+		}
+	}
+
+	if (config->lut_enable && lut) {
+		if (lut->color0_lut1 && lut->color1_lut1 && lut->color2_lut1) {
+			addr = MDP3_REG_DMA_P_CSC_LUT1;
+			for (i = 0; i < 256; i++) {
+				color = lut->color0_lut1[i];
+				color |= lut->color1_lut1[i] << 8;
+				color |= lut->color2_lut1[i] << 16;
+				MDP3_REG_WRITE(addr, color);
+				addr += 4;
+			}
+		}
+
+		if (lut->color0_lut2 && lut->color1_lut2 && lut->color2_lut2) {
+			addr = MDP3_REG_DMA_P_CSC_LUT2;
+			for (i = 0; i < 256; i++) {
+				color = lut->color0_lut2[i];
+				color |= lut->color1_lut2[i] << 8;
+				color |= lut->color2_lut2[i] << 16;
+				MDP3_REG_WRITE(addr, color);
+				addr += 4;
+			}
+		}
+	}
+
+	dma->ccs_config = *config;
+	return 0;
+}
+
+static int mdp3_dmap_histo_config(struct mdp3_dma *dma,
+			struct mdp3_dma_histogram_config *histo_config)
+{
+	u32 hist_bit_mask, hist_control;
+
+	if (histo_config->bit_mask_polarity)
+		hist_bit_mask = BIT(31);
+	hist_bit_mask |= histo_config->bit_mask;
+
+	if (histo_config->auto_clear_en)
+		hist_control = BIT(0);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_FRAME_CNT,
+			histo_config->frame_count);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_BIT_MASK, hist_bit_mask);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_CONTROL, hist_control);
+	return 0;
+}
+
+static int mdp3_dmap_update(struct mdp3_dma *dma, void *buf)
+{
+	int wait_for_dma_done = 0;
+	unsigned long flag;
+	int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC;
+
+	pr_debug("mdp3_dmap_update\n");
+
+	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+		cb_type |= MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
+		spin_lock_irqsave(&dma->dma_lock, flag);
+		if (dma->busy)
+			wait_for_dma_done = 1;
+		spin_unlock_irqrestore(&dma->dma_lock, flag);
+
+		if (wait_for_dma_done)
+			wait_for_completion_killable(&dma->dma_comp);
+	}
+
+	spin_lock_irqsave(&dma->dma_lock, flag);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_ADDR, (u32)buf);
+	dma->source_config.buf = buf;
+	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+		MDP3_REG_WRITE(MDP3_REG_DMA_P_START, 1);
+		dma->busy = true;
+	}
+	wmb();
+	init_completion(&dma->vsync_comp);
+	spin_unlock_irqrestore(&dma->dma_lock, flag);
+
+	mdp3_dma_callback_enable(dma, cb_type);
+	pr_debug("mdp3_dmap_update wait for vsync_comp in\n");
+	wait_for_completion_killable(&dma->vsync_comp);
+	pr_debug("mdp3_dmap_update wait for vsync_comp out\n");
+	return 0;
+}
+
+static int mdp3_dmas_update(struct mdp3_dma *dma, void *buf)
+{
+	int wait_for_dma_done = 0;
+	unsigned long flag;
+	int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC;
+
+	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+		cb_type |= MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
+		spin_lock_irqsave(&dma->dma_lock, flag);
+		if (dma->busy)
+			wait_for_dma_done = 1;
+		spin_unlock_irqrestore(&dma->dma_lock, flag);
+
+		if (wait_for_dma_done)
+			wait_for_completion_killable(&dma->dma_comp);
+	}
+
+	spin_lock_irqsave(&dma->dma_lock, flag);
+	MDP3_REG_WRITE(MDP3_REG_DMA_S_IBUF_ADDR, (u32)buf);
+	dma->source_config.buf = buf;
+	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+		MDP3_REG_WRITE(MDP3_REG_DMA_S_START, 1);
+		dma->busy = true;
+	}
+	wmb();
+	init_completion(&dma->vsync_comp);
+	spin_unlock_irqrestore(&dma->dma_lock, flag);
+
+	mdp3_dma_callback_enable(dma, cb_type);
+	wait_for_completion_killable(&dma->vsync_comp);
+	return 0;
+}
+
+static int mdp3_dmap_cursor_update(struct mdp3_dma *dma, int x, int y)
+{
+	u32 cursor_pos;
+
+	cursor_pos = x | (y << 16);
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_CURSOR_POS, cursor_pos);
+	dma->cursor.x = x;
+	dma->cursor.y = y;
+	return 0;
+}
+
+static int mdp3_dmap_histo_get(struct mdp3_dma *dma,
+			struct mdp3_dma_histogram_data *data)
+{
+	int i;
+	u32 addr, extra;
+
+	addr = MDP3_REG_DMA_P_HIST_R_DATA;
+	for (i = 0; i < 32; i++) {
+		data->r_data[i] = MDP3_REG_READ(addr);
+		addr += 4;
+	}
+
+	addr = MDP3_REG_DMA_P_HIST_G_DATA;
+	for (i = 0; i < 32; i++) {
+		data->g_data[i] = MDP3_REG_READ(addr);
+		addr += 4;
+	}
+
+	addr = MDP3_REG_DMA_P_HIST_B_DATA;
+	for (i = 0; i < 32; i++) {
+		data->b_data[i] = MDP3_REG_READ(addr);
+		addr += 4;
+	}
+
+	extra = MDP3_REG_READ(MDP3_REG_DMA_P_HIST_EXTRA_INFO_0);
+	data->r_min_value = (extra & 0x1F0000) >> 16;
+	data->r_max_value = (extra & 0x1F000000) >> 24;
+	extra = MDP3_REG_READ(MDP3_REG_DMA_P_HIST_EXTRA_INFO_1);
+	data->g_min_value = extra & 0x1F;
+	data->g_max_value = (extra & 0x1F00) >> 8;
+	data->b_min_value = (extra & 0x1F0000) >> 16;
+	data->b_max_value = (extra & 0x1F000000) >> 24;
+	return 0;
+}
+
+static int mdp3_dmap_histo_op(struct mdp3_dma *dma, u32 op)
+{
+	switch (op) {
+	case MDP3_DMA_HISTO_OP_START:
+		MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_START, 1);
+		break;
+	case MDP3_DMA_HISTO_OP_STOP:
+		MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_STOP_REQ, 1);
+		break;
+	case MDP3_DMA_HISTO_OP_CANCEL:
+		MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_CANCEL_REQ, 1);
+		break;
+	case MDP3_DMA_HISTO_OP_RESET:
+		MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_RESET_SEQ_START, 1);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int mdp3_dmap_histo_intr_status(struct mdp3_dma *dma, int *status)
+{
+	*status = MDP3_REG_READ(MDP3_REG_DMA_P_HIST_INTR_STATUS);
+	return 0;
+}
+
+static int mdp3_dmap_histo_intr_enable(struct mdp3_dma *dma, u32 mask)
+{
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_INTR_ENABLE, mask);
+	return 0;
+}
+
+static int mdp3_dmap_histo_intr_clear(struct mdp3_dma *dma, u32 mask)
+{
+	MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_INTR_CLEAR, mask);
+	return 0;
+}
+
+static int mdp3_dma_start(struct mdp3_dma *dma, struct mdp3_intf *intf)
+{
+	unsigned long flag;
+	int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC;
+	u32 dma_start_offset = MDP3_REG_DMA_P_START;
+
+	if (dma->dma_sel == MDP3_DMA_P)
+		dma_start_offset = MDP3_REG_DMA_P_START;
+	else if (dma->dma_sel == MDP3_DMA_S)
+		dma_start_offset = MDP3_REG_DMA_S_START;
+	else
+		return -EINVAL;
+
+	spin_lock_irqsave(&dma->dma_lock, flag);
+	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
+		cb_type |= MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
+		MDP3_REG_WRITE(dma_start_offset, 1);
+		dma->busy = true;
+	}
+
+	intf->start(intf);
+	wmb();
+	init_completion(&dma->vsync_comp);
+	spin_unlock_irqrestore(&dma->dma_lock, flag);
+
+	mdp3_dma_callback_enable(dma, cb_type);
+	pr_debug("mdp3_dma_start wait for vsync_comp in\n");
+	wait_for_completion_killable(&dma->vsync_comp);
+	pr_debug("mdp3_dma_start wait for vsync_comp out\n");
+	return 0;
+}
+
+static int mdp3_dma_stop(struct mdp3_dma *dma, struct mdp3_intf *intf)
+{
+	int ret = 0;
+	u32 status, display_status_bit;
+
+	if (dma->dma_sel == MDP3_DMA_P)
+		display_status_bit = BIT(6);
+	else if (dma->dma_sel == MDP3_DMA_S)
+		display_status_bit = BIT(7);
+	else
+		return -EINVAL;
+
+	if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO)
+		display_status_bit |= BIT(11);
+
+	intf->stop(intf);
+	ret = readl_poll_timeout((mdp3_res->mdp_base + MDP3_REG_DISPLAY_STATUS),
+				status,
+				((status & display_status_bit) == 0),
+				DMA_STOP_POLL_SLEEP_US,
+				DMA_STOP_POLL_TIMEOUT_US);
+
+	mdp3_dma_callback_disable(dma, MDP3_DMA_CALLBACK_TYPE_VSYNC |
+					MDP3_DMA_CALLBACK_TYPE_DMA_DONE);
+
+	dma->busy = false;
+	return ret;
+}
+
+int mdp3_dma_init(struct mdp3_dma *dma,
+		struct mdp3_dma_source *source_config,
+		struct mdp3_dma_output_config *output_config)
+{
+	int ret = 0;
+
+	pr_debug("mdp3_dma_init\n");
+	switch (dma->dma_sel) {
+	case MDP3_DMA_P:
+		dma->busy = 0;
+
+		ret = mdp3_dmap_config(dma, source_config, output_config);
+		if (ret < 0)
+			return ret;
+
+		dma->config_cursor = mdp3_dmap_cursor_config;
+		dma->config_ccs = mdp3_dmap_ccs_config;
+		dma->config_histo = mdp3_dmap_histo_config;
+		dma->update = mdp3_dmap_update;
+		dma->update_cursor = mdp3_dmap_cursor_update;
+		dma->get_histo = mdp3_dmap_histo_get;
+		dma->histo_op = mdp3_dmap_histo_op;
+		dma->histo_intr_status = mdp3_dmap_histo_intr_status;
+		dma->histo_intr_enable = mdp3_dmap_histo_intr_enable;
+		dma->histo_intr_clear = mdp3_dmap_histo_intr_clear;
+		dma->vsync_enable = mdp3_dma_vsync_enable;
+		dma->get_vsync_time = mdp3_get_vsync_time;
+		dma->start = mdp3_dma_start;
+		dma->stop = mdp3_dma_stop;
+		break;
+	case MDP3_DMA_S:
+		dma->busy = 0;
+		ret = mdp3_dmas_config(dma, source_config, output_config);
+		if (ret < 0)
+			return ret;
+
+		dma->config_cursor = NULL;
+		dma->config_ccs = NULL;
+		dma->config_histo = NULL;
+		dma->update = mdp3_dmas_update;
+		dma->update_cursor = NULL;
+		dma->get_histo = NULL;
+		dma->histo_op = NULL;
+		dma->histo_intr_status = NULL;
+		dma->histo_intr_enable = NULL;
+		dma->histo_intr_clear = NULL;
+		dma->vsync_enable = mdp3_dma_vsync_enable;
+		dma->get_vsync_time = mdp3_get_vsync_time;
+		dma->start = mdp3_dma_start;
+		dma->stop = mdp3_dma_stop;
+		break;
+	case MDP3_DMA_E:
+	default:
+		ret = -ENODEV;
+		break;
+	}
+
+	spin_lock_init(&dma->dma_lock);
+	init_completion(&dma->vsync_comp);
+	init_completion(&dma->dma_comp);
+	dma->cb_type = 0;
+	dma->vsync_client.handler = NULL;
+	dma->vsync_client.arg = NULL;
+
+	memset(&dma->cursor, 0, sizeof(dma->cursor));
+	memset(&dma->ccs_config, 0, sizeof(dma->ccs_config));
+	memset(&dma->histogram_config, 0, sizeof(dma->histogram_config));
+
+	return ret;
+}
+
+int lcdc_config(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg)
+{
+	u32 temp;
+	struct mdp3_video_intf_cfg *v = &cfg->video;
+	temp = v->hsync_pulse_width | (v->hsync_period << 16);
+	MDP3_REG_WRITE(MDP3_REG_LCDC_HSYNC_CTL, temp);
+	MDP3_REG_WRITE(MDP3_REG_LCDC_VSYNC_PERIOD, v->vsync_period);
+	MDP3_REG_WRITE(MDP3_REG_LCDC_VSYNC_PULSE_WIDTH, v->vsync_pulse_width);
+	temp = v->display_start_x | (v->display_end_x << 16);
+	MDP3_REG_WRITE(MDP3_REG_LCDC_DISPLAY_HCTL, temp);
+	MDP3_REG_WRITE(MDP3_REG_LCDC_DISPLAY_V_START, v->display_start_y);
+	MDP3_REG_WRITE(MDP3_REG_LCDC_DISPLAY_V_END, v->display_end_y);
+	temp = v->active_start_x | (v->active_end_x);
+	if (v->active_h_enable)
+		temp |= BIT(31);
+	MDP3_REG_WRITE(MDP3_REG_LCDC_ACTIVE_HCTL, temp);
+	MDP3_REG_WRITE(MDP3_REG_LCDC_ACTIVE_V_START, v->active_start_y);
+	MDP3_REG_WRITE(MDP3_REG_LCDC_ACTIVE_V_END, v->active_end_y);
+	MDP3_REG_WRITE(MDP3_REG_LCDC_HSYNC_SKEW, v->hsync_skew);
+	temp = 0;
+	if (!v->hsync_polarity)
+		temp = BIT(0);
+	if (!v->vsync_polarity)
+		temp = BIT(1);
+	if (!v->de_polarity)
+		temp = BIT(2);
+	MDP3_REG_WRITE(MDP3_REG_LCDC_CTL_POLARITY, temp);
+
+	return 0;
+}
+
+int lcdc_start(struct mdp3_intf *intf)
+{
+	MDP3_REG_WRITE(MDP3_REG_LCDC_EN, BIT(0));
+	wmb();
+	intf->active = true;
+	return 0;
+}
+
+int lcdc_stop(struct mdp3_intf *intf)
+{
+	MDP3_REG_WRITE(MDP3_REG_LCDC_EN, 0);
+	wmb();
+	intf->active = false;
+	return 0;
+}
+
+int dsi_video_config(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg)
+{
+	u32 temp;
+	struct mdp3_video_intf_cfg *v = &cfg->video;
+
+	pr_debug("dsi_video_config\n");
+
+	temp = v->hsync_pulse_width | (v->hsync_period << 16);
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_HSYNC_CTL, temp);
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_VSYNC_PERIOD, v->vsync_period);
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_VSYNC_PULSE_WIDTH,
+			v->vsync_pulse_width);
+	temp = v->display_start_x | (v->display_end_x << 16);
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_DISPLAY_HCTL, temp);
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_DISPLAY_V_START, v->display_start_y);
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_DISPLAY_V_END, v->display_end_y);
+	temp = v->active_start_x | (v->active_end_x << 16);
+	if (v->active_h_enable)
+		temp |= BIT(31);
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_ACTIVE_HCTL, temp);
+
+	temp = v->active_start_y;
+	if (v->active_v_enable)
+		temp |= BIT(31);
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_ACTIVE_V_START, temp);
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_ACTIVE_V_END, v->active_end_y);
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_HSYNC_SKEW, v->hsync_skew);
+	temp = 0;
+	if (!v->hsync_polarity)
+		temp |= BIT(0);
+	if (!v->vsync_polarity)
+		temp |= BIT(1);
+	if (!v->de_polarity)
+		temp |= BIT(2);
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_CTL_POLARITY, temp);
+
+	return 0;
+}
+
+int dsi_video_start(struct mdp3_intf *intf)
+{
+	pr_debug("dsi_video_start\n");
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_EN, BIT(0));
+	wmb();
+	intf->active = true;
+	return 0;
+}
+
+int dsi_video_stop(struct mdp3_intf *intf)
+{
+	pr_debug("dsi_video_stop\n");
+	MDP3_REG_WRITE(MDP3_REG_DSI_VIDEO_EN, 0);
+	wmb();
+	intf->active = false;
+	return 0;
+}
+
+int dsi_cmd_config(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg)
+{
+	u32 id_map = 0;
+	u32 trigger_en = 0;
+
+	if (cfg->dsi_cmd.primary_dsi_cmd_id)
+		id_map = BIT(0);
+	if (cfg->dsi_cmd.secondary_dsi_cmd_id)
+		id_map = BIT(4);
+
+	if (cfg->dsi_cmd.dsi_cmd_tg_intf_sel)
+		trigger_en = BIT(4);
+
+	MDP3_REG_WRITE(MDP3_REG_DSI_CMD_MODE_ID_MAP, id_map);
+	MDP3_REG_WRITE(MDP3_REG_DSI_CMD_MODE_TRIGGER_EN, trigger_en);
+
+	return 0;
+}
+
+int dsi_cmd_start(struct mdp3_intf *intf)
+{
+	intf->active = true;
+	return 0;
+}
+
+int dsi_cmd_stop(struct mdp3_intf *intf)
+{
+	intf->active = false;
+	return 0;
+}
+
+int mdp3_intf_init(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg)
+{
+	int ret = 0;
+	switch (cfg->type) {
+	case MDP3_DMA_OUTPUT_SEL_LCDC:
+		intf->config = lcdc_config;
+		intf->start = lcdc_start;
+		intf->stop = lcdc_stop;
+		break;
+	case MDP3_DMA_OUTPUT_SEL_DSI_VIDEO:
+		intf->config = dsi_video_config;
+		intf->start = dsi_video_start;
+		intf->stop = dsi_video_stop;
+		break;
+	case MDP3_DMA_OUTPUT_SEL_DSI_CMD:
+		intf->config = dsi_cmd_config;
+		intf->start = dsi_cmd_start;
+		intf->stop = dsi_cmd_stop;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	intf->active = false;
+	if (intf->config)
+		ret = intf->config(intf, cfg);
+
+	if (ret) {
+		pr_err("MDP interface initialization failed\n");
+		return ret;
+	}
+
+	intf->cfg = *cfg;
+	return 0;
+}
diff --git a/drivers/video/msm/mdss/mdp3_dma.h b/drivers/video/msm/mdss/mdp3_dma.h
new file mode 100644
index 0000000..2fb8427
--- /dev/null
+++ b/drivers/video/msm/mdss/mdp3_dma.h
@@ -0,0 +1,336 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDP3_DMA_H
+#define MDP3_DMA_H
+
+#include <linux/sched.h>
+
+enum {
+	MDP3_DMA_P,
+	MDP3_DMA_S,
+	MDP3_DMA_E,
+	MDP3_DMA_MAX
+};
+
+enum {
+	MDP3_DMA_CAP_CURSOR = 0x1,
+	MDP3_DMA_CAP_COLOR_CORRECTION = 0x2,
+	MDP3_DMA_CAP_HISTOGRAM = 0x4,
+	MDP3_DMA_CAP_GAMMA_CORRECTION = 0x8,
+	MDP3_DMA_CAP_DITHER = 0x10,
+	MDP3_DMA_CAP_ALL = 0x1F
+};
+
+enum {
+	MDP3_DMA_OUTPUT_SEL_AHB,
+	MDP3_DMA_OUTPUT_SEL_DSI_CMD,
+	MDP3_DMA_OUTPUT_SEL_LCDC,
+	MDP3_DMA_OUTPUT_SEL_DSI_VIDEO,
+	MDP3_DMA_OUTPUT_SEL_MAX
+};
+
+enum {
+	MDP3_DMA_IBUF_FORMAT_RGB888,
+	MDP3_DMA_IBUF_FORMAT_RGB565,
+	MDP3_DMA_IBUF_FORMAT_XRGB8888,
+	MDP3_DMA_IBUF_FORMAT_UNDEFINED
+};
+
+enum {
+	MDP3_DMA_OUTPUT_PACK_PATTERN_RGB = 0x21,
+	MDP3_DMA_OUTPUT_PACK_PATTERN_RBG = 0x24,
+	MDP3_DMA_OUTPUT_PACK_PATTERN_BGR = 0x12,
+	MDP3_DMA_OUTPUT_PACK_PATTERN_BRG = 0x18,
+	MDP3_DMA_OUTPUT_PACK_PATTERN_GBR = 0x06,
+	MDP3_DMA_OUTPUT_PACK_PATTERN_GRB = 0x09,
+};
+
+enum {
+	MDP3_DMA_OUTPUT_PACK_ALIGN_LSB,
+	MDP3_DMA_OUTPUT_PACK_ALIGN_MSB
+};
+
+enum {
+	MDP3_DMA_OUTPUT_COMP_BITS_4, /*4 bits per color component*/
+	MDP3_DMA_OUTPUT_COMP_BITS_5,
+	MDP3_DMA_OUTPUT_COMP_BITS_6,
+	MDP3_DMA_OUTPUT_COMP_BITS_8,
+};
+
+enum {
+	MDP3_DMA_CURSOR_FORMAT_ARGB888,
+};
+
+enum {
+	MDP3_DMA_COLOR_CORRECT_SET_1,
+	MDP3_DMA_COLOR_CORRECT_SET_2
+};
+
+enum {
+	MDP3_DMA_LUT_POSITION_PRE,
+	MDP3_DMA_LUT_POSITION_POST
+};
+
+enum {
+	MDP3_DMA_LUT_DISABLE = 0x0,
+	MDP3_DMA_LUT_ENABLE_C0 = 0x01,
+	MDP3_DMA_LUT_ENABLE_C1 = 0x02,
+	MDP3_DMA_LUT_ENABLE_C2 = 0x04,
+	MDP3_DMA_LUT_ENABLE_ALL = 0x07,
+};
+
+enum {
+	MDP3_DMA_HISTOGRAM_BIT_MASK_NONE = 0X0,
+	MDP3_DMA_HISTOGRAM_BIT_MASK_ONE_MSB = 0x1,
+	MDP3_DMA_HISTOGRAM_BIT_MASK_TWO_MSB = 0x2,
+	MDP3_DMA_HISTOGRAM_BIT_MASK_THREE_MSB = 0x3
+};
+
+enum {
+	MDP3_DMA_COLOR_FLIP_NONE,
+	MDP3_DMA_COLOR_FLIP_COMP1 = 0x1,
+	MDP3_DMA_COLOR_FLIP_COMP2 = 0x2,
+	MDP3_DMA_COLOR_FLIP_COMP3 = 0x4,
+};
+
+enum {
+	MDP3_DMA_CURSOR_BLEND_NONE = 0x0,
+	MDP3_DMA_CURSOR_BLEND_PER_PIXEL_ALPHA =  0x3,
+	MDP3_DMA_CURSOR_BLEND_CONSTANT_ALPHA = 0x5,
+	MDP3_DMA_CURSOR_BLEND_COLOR_KEYING = 0x9
+};
+
+enum {
+	MDP3_DMA_HISTO_OP_START,
+	MDP3_DMA_HISTO_OP_STOP,
+	MDP3_DMA_HISTO_OP_CANCEL,
+	MDP3_DMA_HISTO_OP_RESET
+};
+
+enum {
+	MDP3_DMA_CALLBACK_TYPE_VSYNC = 0x01,
+	MDP3_DMA_CALLBACK_TYPE_DMA_DONE = 0x02,
+};
+
+struct mdp3_dma_source {
+	u32 format;
+	int width;
+	int height;
+	int x;
+	int y;
+	void *buf;
+	int stride;
+};
+
+struct mdp3_dma_output_config {
+	int dither_en;
+	u32 out_sel;
+	u32 bit_mask_polarity;
+	u32 color_components_flip;
+	u32 pack_pattern;
+	u32 pack_align;
+	u32 color_comp_out_bits;
+};
+
+struct mdp3_dma_cursor_blend_config {
+	u32 mode;
+	u32 transparent_color; /*color keying*/
+	u32 transparency_mask;
+	u32 constant_alpha;
+};
+
+struct mdp3_dma_cursor {
+	int enable; /* enable cursor or not*/
+	u32 format;
+	int width;
+	int height;
+	int x;
+	int y;
+	void *buf;
+	struct mdp3_dma_cursor_blend_config blend_config;
+};
+
+struct mdp3_dma_ccs {
+	u32 *mv1; /*set1 matrix vector, 3x3 */
+	u32 *mv2;
+	u32 *pre_bv1; /*pre-bias vector for set1, 1x3*/
+	u32 *pre_bv2;
+	u32 *post_bv1; /*post-bias vecotr for set1,  */
+	u32 *post_bv2;
+	u32 *pre_lv1; /*pre-limit vector for set 1, 1x6*/
+	u32 *pre_lv2;
+	u32 *post_lv1;
+	u32 *post_lv2;
+};
+
+struct mdp3_dma_lut {
+	uint8_t *color0_lut1;
+	uint8_t *color1_lut1;
+	uint8_t *color2_lut1;
+	uint8_t *color0_lut2;
+	uint8_t *color1_lut2;
+	uint8_t *color2_lut2;
+};
+
+struct mdp3_dma_color_correct_config {
+	int ccs_enable;
+	int lut_enable;
+	u32 lut_sel;
+	u32 post_limit_sel;
+	u32 pre_limit_sel;
+	u32 post_bias_sel;
+	u32 pre_bias_sel;
+	u32 ccs_sel;
+	u32 lut_position;
+};
+
+struct mdp3_dma_histogram_config {
+	int frame_count;
+	u32 bit_mask_polarity;
+	u32 bit_mask;
+	int auto_clear_en;
+};
+
+struct mdp3_dma_histogram_data {
+	uint8_t r_max_value;
+	uint8_t r_min_value;
+	uint8_t b_max_value;
+	uint8_t b_min_value;
+	uint8_t g_max_value;
+	uint8_t g_min_value;
+	uint8_t r_data[32];
+	uint8_t g_data[32];
+	uint8_t b_data[32];
+};
+
+struct mdp3_vsync_notification {
+	void (*handler)(void *arg);
+	void *arg;
+};
+
+struct mdp3_intf;
+
+struct mdp3_dma {
+	u32 dma_sel;
+	u32 capability;
+	int in_use;
+	int available;
+	int busy;
+
+	spinlock_t dma_lock;
+	struct completion vsync_comp;
+	struct completion dma_comp;
+	ktime_t vsync_time;
+	struct mdp3_vsync_notification vsync_client;
+	u32 cb_type;
+
+	struct mdp3_dma_output_config output_config;
+	struct mdp3_dma_source source_config;
+
+	struct mdp3_dma_cursor cursor;
+	struct mdp3_dma_color_correct_config ccs_config;
+	struct mdp3_dma_histogram_config histogram_config;
+
+	int (*start)(struct mdp3_dma *dma, struct mdp3_intf *intf);
+
+	int (*stop)(struct mdp3_dma *dma, struct mdp3_intf *intf);
+
+	int (*config_cursor)(struct mdp3_dma *dma,
+				struct mdp3_dma_cursor *cursor);
+
+	int (*config_ccs)(struct mdp3_dma *dma,
+			struct mdp3_dma_color_correct_config *config,
+			struct mdp3_dma_ccs *ccs,
+			struct mdp3_dma_lut *lut);
+
+	int (*update)(struct mdp3_dma *dma, void *buf);
+
+	int (*update_cursor)(struct mdp3_dma *dma, int x, int y);
+
+	int (*get_histo)(struct mdp3_dma *dma,
+				struct mdp3_dma_histogram_data *data);
+
+	int (*config_histo)(struct mdp3_dma *dma,
+				struct mdp3_dma_histogram_config *histo_config);
+
+	int (*histo_op)(struct mdp3_dma *dma,
+				u32 op);
+
+	int (*histo_intr_status)(struct mdp3_dma *dma, int *status);
+
+	int (*histo_intr_enable)(struct mdp3_dma *dma, u32 mask);
+
+	int (*histo_intr_clear)(struct mdp3_dma *dma, u32 mask);
+
+	void (*vsync_enable)(struct mdp3_dma *dma,
+			struct mdp3_vsync_notification *vsync_client);
+
+	ktime_t (*get_vsync_time)(struct mdp3_dma *dma);
+
+};
+
+struct mdp3_video_intf_cfg {
+	int hsync_period;
+	int hsync_pulse_width;
+	int vsync_period;
+	int vsync_pulse_width;
+	int display_start_x;
+	int display_end_x;
+	int display_start_y;
+	int display_end_y;
+	int active_start_x;
+	int active_end_x;
+	int active_h_enable;
+	int active_start_y;
+	int active_end_y;
+	int active_v_enable;
+	int hsync_skew;
+	int hsync_polarity;
+	int vsync_polarity;
+	int de_polarity;
+};
+
+struct mdp3_dsi_cmd_intf_cfg {
+	int primary_dsi_cmd_id;
+	int secondary_dsi_cmd_id;
+	int dsi_cmd_tg_intf_sel;
+};
+
+struct mdp3_intf_cfg {
+	u32 type;
+	struct mdp3_video_intf_cfg video;
+	struct mdp3_dsi_cmd_intf_cfg dsi_cmd;
+};
+
+struct mdp3_intf {
+	struct mdp3_intf_cfg cfg;
+	int active;
+	int available;
+	int in_use;
+	int (*config)(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg);
+	int (*start)(struct mdp3_intf *intf);
+	int (*stop)(struct mdp3_intf *intf);
+};
+
+int mdp3_dma_init(struct mdp3_dma *dma,
+		struct mdp3_dma_source *source_config,
+		struct mdp3_dma_output_config *output_config);
+
+int mdp3_intf_init(struct mdp3_intf *intf, struct mdp3_intf_cfg *cfg);
+
+void mdp3_dma_callback_enable(struct mdp3_dma *dma, int type);
+
+void mdp3_dma_callback_disable(struct mdp3_dma *dma, int type);
+
+#endif /* MDP3_DMA_H */
diff --git a/drivers/video/msm/mdss/mdp3_hwio.h b/drivers/video/msm/mdss/mdp3_hwio.h
new file mode 100644
index 0000000..2763f46
--- /dev/null
+++ b/drivers/video/msm/mdss/mdp3_hwio.h
@@ -0,0 +1,216 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDP3_HWIO_H
+#define MDP3_HWIO_H
+
+#include <linux/bitops.h>
+
+/*synchronization*/
+#define MDP3_REG_SYNC_CONFIG_0				0x0300
+#define MDP3_REG_SYNC_CONFIG_1				0x0304
+#define MDP3_REG_SYNC_CONFIG_2				0x0308
+#define MDP3_REG_SYNC_STATUS_0				0x030c
+#define MDP3_REG_SYNC_STATUS_1				0x0310
+#define MDP3_REG_SYNC_STATUS_2				0x0314
+#define MDP3_REG_PRIMARY_VSYNC_OUT_CTRL			0x0318
+#define MDP3_REG_SECONDARY_VSYNC_OUT_CTRL		0x031c
+#define MDP3_REG_EXTERNAL_VSYNC_OUT_CTRL		0x0320
+#define MDP3_REG_VSYNC_SEL				0x0324
+
+/*interrupt*/
+#define MDP3_REG_INTR_ENABLE				0x0020
+#define MDP3_REG_INTR_STATUS				0x0024
+#define MDP3_REG_INTR_CLEAR				0x0028
+
+#define MDP3_REG_PRIMARY_RD_PTR_IRQ			0x021C
+#define MDP3_REG_SECONDARY_RD_PTR_IRQ			0x0220
+
+/*operation control*/
+#define MDP3_REG_DMA_P_START				0x0044
+#define MDP3_REG_DMA_S_START				0x0048
+#define MDP3_REG_DMA_E_START				0x004c
+
+#define MDP3_REG_DISPLAY_STATUS				0x0038
+
+#define MDP3_REG_HW_VERSION				0x0070
+#define MDP3_REG_SW_RESET				0x0074
+
+/*EBI*/
+#define MDP3_REG_EBI2_LCD0				0x003c
+#define MDP3_REG_EBI2_LCD0_YSTRIDE			0x0050
+
+/*DMA_P*/
+#define MDP3_REG_DMA_P_CONFIG				0x90000
+#define MDP3_REG_DMA_P_SIZE				0x90004
+#define MDP3_REG_DMA_P_IBUF_ADDR			0x90008
+#define MDP3_REG_DMA_P_IBUF_Y_STRIDE			0x9000C
+#define MDP3_REG_DMA_P_PROFILE_EN			0x90020
+#define MDP3_REG_DMA_P_OUT_XY				0x90010
+#define MDP3_REG_DMA_P_CURSOR_FORMAT			0x90040
+#define MDP3_REG_DMA_P_CURSOR_SIZE			0x90044
+#define MDP3_REG_DMA_P_CURSOR_BUF_ADDR			0x90048
+#define MDP3_REG_DMA_P_CURSOR_POS			0x9004c
+#define MDP3_REG_DMA_P_CURSOR_BLEND_CONFIG		0x90060
+#define MDP3_REG_DMA_P_CURSOR_BLEND_PARAM		0x90064
+#define MDP3_REG_DMA_P_CURSOR_BLEND_TRANS_MASK		0x90068
+#define MDP3_REG_DMA_P_COLOR_CORRECT_CONFIG		0x90070
+#define MDP3_REG_DMA_P_CSC_BYPASS			0X93004
+#define MDP3_REG_DMA_P_CSC_MV1				0x93400
+#define MDP3_REG_DMA_P_CSC_MV2				0x93440
+#define MDP3_REG_DMA_P_CSC_PRE_BV1			0x93500
+#define MDP3_REG_DMA_P_CSC_PRE_BV2			0x93540
+#define MDP3_REG_DMA_P_CSC_POST_BV1			0x93580
+#define MDP3_REG_DMA_P_CSC_POST_BV2			0x935c0
+#define MDP3_REG_DMA_P_CSC_PRE_LV1			0x93600
+#define MDP3_REG_DMA_P_CSC_PRE_LV2			0x93640
+#define MDP3_REG_DMA_P_CSC_POST_LV1			0x93680
+#define MDP3_REG_DMA_P_CSC_POST_LV2			0x936c0
+#define MDP3_REG_DMA_P_CSC_LUT1				0x93800
+#define MDP3_REG_DMA_P_CSC_LUT2				0x93c00
+#define MDP3_REG_DMA_P_HIST_START			0x94000
+#define MDP3_REG_DMA_P_HIST_FRAME_CNT			0x94004
+#define MDP3_REG_DMA_P_HIST_BIT_MASK			0x94008
+#define MDP3_REG_DMA_P_HIST_RESET_SEQ_START		0x9400c
+#define MDP3_REG_DMA_P_HIST_CONTROL			0x94010
+#define MDP3_REG_DMA_P_HIST_INTR_STATUS			0x94014
+#define MDP3_REG_DMA_P_HIST_INTR_CLEAR			0x94018
+#define MDP3_REG_DMA_P_HIST_INTR_ENABLE			0x9401c
+#define MDP3_REG_DMA_P_HIST_STOP_REQ			0x94020
+#define MDP3_REG_DMA_P_HIST_CANCEL_REQ			0x94024
+#define MDP3_REG_DMA_P_HIST_EXTRA_INFO_0		0x94028
+#define MDP3_REG_DMA_P_HIST_EXTRA_INFO_1		0x9402c
+#define MDP3_REG_DMA_P_HIST_R_DATA			0x94100
+#define MDP3_REG_DMA_P_HIST_G_DATA			0x94200
+#define MDP3_REG_DMA_P_HIST_B_DATA			0x94300
+#define MDP3_REG_DMA_P_FETCH_CFG			0x90074
+#define MDP3_REG_DMA_P_DCVS_CTRL			0x90080
+#define MDP3_REG_DMA_P_DCVS_STATUS			0x90084
+
+/*DMA_S*/
+#define MDP3_REG_DMA_S_CONFIG				0x90000
+#define MDP3_REG_DMA_S_SIZE				0x90004
+#define MDP3_REG_DMA_S_IBUF_ADDR			0x90008
+#define MDP3_REG_DMA_S_IBUF_Y_STRIDE			0x9000C
+#define MDP3_REG_DMA_S_OUT_XY				0x90010
+
+/*interface*/
+#define MDP3_REG_LCDC_EN				0xE0000
+#define MDP3_REG_LCDC_HSYNC_CTL				0xE0004
+#define MDP3_REG_LCDC_VSYNC_PERIOD			0xE0008
+#define MDP3_REG_LCDC_VSYNC_PULSE_WIDTH			0xE000C
+#define MDP3_REG_LCDC_DISPLAY_HCTL			0xE0010
+#define MDP3_REG_LCDC_DISPLAY_V_START			0xE0014
+#define MDP3_REG_LCDC_DISPLAY_V_END			0xE0018
+#define MDP3_REG_LCDC_ACTIVE_HCTL			0xE001C
+#define MDP3_REG_LCDC_ACTIVE_V_START			0xE0020
+#define MDP3_REG_LCDC_ACTIVE_V_END			0xE0024
+#define MDP3_REG_LCDC_BORDER_COLOR			0xE0028
+#define MDP3_REG_LCDC_UNDERFLOW_CTL			0xE002C
+#define MDP3_REG_LCDC_HSYNC_SKEW			0xE0030
+#define MDP3_REG_LCDC_TEST_CTL				0xE0034
+#define MDP3_REG_LCDC_CTL_POLARITY			0xE0038
+#define MDP3_REG_LCDC_TEST_COL_VAR1			0xE003C
+#define MDP3_REG_LCDC_TEST_COL_VAR2			0xE0040
+#define MDP3_REG_LCDC_UFLOW_HIDING_CTL			0xE0044
+#define MDP3_REG_LCDC_LOST_PIXEL_CNT_VALUE		0xE0048
+
+#define MDP3_REG_DSI_VIDEO_EN				0xF0000
+#define MDP3_REG_DSI_VIDEO_HSYNC_CTL			0xF0004
+#define MDP3_REG_DSI_VIDEO_VSYNC_PERIOD			0xF0008
+#define MDP3_REG_DSI_VIDEO_VSYNC_PULSE_WIDTH		0xF000C
+#define MDP3_REG_DSI_VIDEO_DISPLAY_HCTL			0xF0010
+#define MDP3_REG_DSI_VIDEO_DISPLAY_V_START		0xF0014
+#define MDP3_REG_DSI_VIDEO_DISPLAY_V_END		0xF0018
+#define MDP3_REG_DSI_VIDEO_ACTIVE_HCTL			0xF001C
+#define MDP3_REG_DSI_VIDEO_ACTIVE_V_START		0xF0020
+#define MDP3_REG_DSI_VIDEO_ACTIVE_V_END			0xF0024
+#define MDP3_REG_DSI_VIDEO_BORDER_COLOR			0xF0028
+#define MDP3_REG_DSI_VIDEO_UNDERFLOW_CTL		0xF002C
+#define MDP3_REG_DSI_VIDEO_HSYNC_SKEW			0xF0030
+#define MDP3_REG_DSI_VIDEO_TEST_CTL			0xF0034
+#define MDP3_REG_DSI_VIDEO_CTL_POLARITY			0xF0038
+#define MDP3_REG_DSI_VIDEO_TEST_COL_VAR1		0xF003C
+#define MDP3_REG_DSI_VIDEO_TEST_COL_VAR2		0xF0040
+#define MDP3_REG_DSI_VIDEO_UFLOW_HIDING_CTL		0xF0044
+#define MDP3_REG_DSI_VIDEO_LOST_PIXEL_CNT_VALUE		0xF0048
+
+#define MDP3_REG_DSI_CMD_MODE_ID_MAP			0xF1000
+#define MDP3_REG_DSI_CMD_MODE_TRIGGER_EN		0xF1004
+
+/*interrupt mask*/
+
+#define MDP3_INTR_DP0_ROI_DONE_BIT			BIT(0)
+#define MDP3_INTR_DP1_ROI_DONE_BIT			BIT(1)
+#define MDP3_INTR_DMA_S_DONE_BIT			BIT(2)
+#define MDP3_INTR_DMA_E_DONE_BIT			BIT(3)
+#define MDP3_INTR_DP0_TERMINAL_FRAME_DONE_BIT		BIT(4)
+#define MDP3_INTR_DP1_TERMINAL_FRAME_DONE_BIT		BIT(5)
+#define MDP3_INTR_DMA_TV_DONE_BIT			BIT(6)
+#define MDP3_INTR_TV_ENCODER_UNDER_RUN_BIT		BIT(7)
+#define MDP3_INTR_SYNC_PRIMARY_LINE_BIT			BIT(8)
+#define MDP3_INTR_SYNC_SECONDARY_LINE_BIT		BIT(9)
+#define MDP3_INTR_SYNC_EXTERNAL_LINE_BIT		BIT(10)
+#define MDP3_INTR_DP0_FETCH_DONE_BIT			BIT(11)
+#define MDP3_INTR_DP1_FETCH_DONE_BIT			BIT(12)
+#define MDP3_INTR_TV_OUT_FRAME_START_BIT		BIT(13)
+#define MDP3_INTR_DMA_P_DONE_BIT			BIT(14)
+#define MDP3_INTR_LCDC_START_OF_FRAME_BIT		BIT(15)
+#define MDP3_INTR_LCDC_UNDERFLOW_BIT			BIT(16)
+#define MDP3_INTR_DMA_P_LINE_BIT			BIT(17)
+#define MDP3_INTR_DMA_S_LINE_BIT			BIT(18)
+#define MDP3_INTR_DMA_E_LINE_BIT			BIT(19)
+#define MDP3_INTR_DMA_P_HISTO_BIT			BIT(20)
+#define MDP3_INTR_DTV_OUT_DONE_BIT			BIT(21)
+#define MDP3_INTR_DTV_OUT_START_OF_FRAME_BIT		BIT(22)
+#define MDP3_INTR_DTV_OUT_UNDERFLOW_BIT			BIT(23)
+#define MDP3_INTR_DTV_OUT_LINE_BIT			BIT(24)
+#define MDP3_INTR_DMA_P_AUTO_FREFRESH_START_BIT		BIT(25)
+#define MDP3_INTR_DMA_S_AUTO_FREFRESH_START_BIT		BIT(26)
+#define MDP3_INTR_QPIC_EOF_ENABLE_BIT			BIT(27)
+
+enum {
+	MDP3_INTR_DP0_ROI_DONE,
+	MDP3_INTR_DP1_ROI_DONE,
+	MDP3_INTR_DMA_S_DONE,
+	MDP3_INTR_DMA_E_DONE,
+	MDP3_INTR_DP0_TERMINAL_FRAME_DONE,
+	MDP3_INTR_DP1_TERMINAL_FRAME_DONE,
+	MDP3_INTR_DMA_TV_DONE,
+	MDP3_INTR_TV_ENCODER_UNDER_RUN,
+	MDP3_INTR_SYNC_PRIMARY_LINE,
+	MDP3_INTR_SYNC_SECONDARY_LINE,
+	MDP3_INTR_SYNC_EXTERNAL_LINE,
+	MDP3_INTR_DP0_FETCH_DONE,
+	MDP3_INTR_DP1_FETCH_DONE,
+	MDP3_INTR_TV_OUT_FRAME_START,
+	MDP3_INTR_DMA_P_DONE,
+	MDP3_INTR_LCDC_START_OF_FRAME,
+	MDP3_INTR_LCDC_UNDERFLOW,
+	MDP3_INTR_DMA_P_LINE,
+	MDP3_INTR_DMA_S_LINE,
+	MDP3_INTR_DMA_E_LINE,
+	MDP3_INTR_DMA_P_HISTO,
+	MDP3_INTR_DTV_OUT_DONE,
+	MDP3_INTR_DTV_OUT_START_OF_FRAME,
+	MDP3_INTR_DTV_OUT_UNDERFLOW,
+	MDP3_INTR_DTV_OUT_LINE,
+	MDP3_INTR_DMA_P_AUTO_FREFRESH_START,
+	MDP3_INTR_DMA_S_AUTO_FREFRESH_START,
+	MDP3_INTR_QPIC_EOF_ENABLE,
+};
+
+#define MDP3_DMA_P_HIST_INTR_RESET_DONE_BIT		BIT(0)
+#define MDP3_DMA_P_HIST_INTR_HIST_DONE_BIT		BIT(1)
+
+#endif /* MDP3_HWIO_H */
diff --git a/drivers/video/msm/mdss/mdss.h b/drivers/video/msm/mdss/mdss.h
index db7c836..c847ee6 100644
--- a/drivers/video/msm/mdss/mdss.h
+++ b/drivers/video/msm/mdss/mdss.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,15 +14,17 @@
 #ifndef MDSS_H
 #define MDSS_H
 
+#include <linux/msm_ion.h>
+#include <linux/earlysuspend.h>
+#include <linux/msm_mdp.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
 
-#define MDSS_REG_WRITE(addr, val) writel_relaxed(val, mdss_reg_base + addr)
-#define MDSS_REG_READ(addr) readl_relaxed(mdss_reg_base + addr)
+#include <mach/iommu_domains.h>
 
-extern unsigned char *mdss_reg_base;
-extern spinlock_t dsi_clk_lock;
+#define MDSS_REG_WRITE(addr, val) writel_relaxed(val, mdss_res->mdp_base + addr)
+#define MDSS_REG_READ(addr) readl_relaxed(mdss_res->mdp_base + addr)
 
 enum mdss_mdp_clk_type {
 	MDSS_CLK_AHB,
@@ -34,14 +36,38 @@
 	MDSS_MAX_CLK
 };
 
-struct mdss_res_type {
-	u32 rev;
+enum mdss_iommu_domain_type {
+	MDSS_IOMMU_DOMAIN_SECURE,
+	MDSS_IOMMU_DOMAIN_UNSECURE,
+	MDSS_IOMMU_MAX_DOMAIN
+};
+
+struct mdss_iommu_map_type {
+	char *client_name;
+	char *ctx_name;
+	struct device *ctx;
+	struct msm_iova_partition partitions[1];
+	int npartitions;
+	int domain_idx;
+};
+
+struct mdss_hw_settings {
+	char __iomem *reg;
+	u32 val;
+};
+
+struct mdss_data_type {
 	u32 mdp_rev;
 	struct clk *mdp_clk[MDSS_MAX_CLK];
 	struct regulator *fs;
+	u32 max_mdp_clk_rate;
 
 	struct workqueue_struct *clk_ctrl_wq;
-	struct delayed_work clk_ctrl_worker;
+	struct work_struct clk_ctrl_worker;
+	struct platform_device *pdev;
+	char __iomem *mdp_base;
+	size_t mdp_reg_size;
+	char __iomem *vbif_base;
 
 	u32 irq;
 	u32 irq_mask;
@@ -49,27 +75,50 @@
 	u32 irq_buzy;
 
 	u32 mdp_irq_mask;
+	u32 mdp_hist_irq_mask;
 
-	u32 clk_ena;
-	u32 suspend;
-	u32 timeout;
+	int suspend_fs_ena;
+	atomic_t clk_ref;
+	u8 clk_ena;
+	u8 fs_ena;
+	u8 vsync_ena;
+	unsigned long min_mdp_clk;
 
-	u32 fs_ena;
-	u32 vsync_ena;
-
-	u32 intf;
-	u32 eintf_ena;
-	u32 prim_ptype;
 	u32 res_init;
-	u32 pdev_lcnt;
 	u32 bus_hdl;
 
 	u32 smp_mb_cnt;
 	u32 smp_mb_size;
-	u32 *pipe_type_map;
-	u32 *mixer_type_map;
+
+	u32 rot_block_size;
+
+	struct mdss_hw_settings *hw_settings;
+
+	struct mdss_mdp_pipe *vig_pipes;
+	struct mdss_mdp_pipe *rgb_pipes;
+	struct mdss_mdp_pipe *dma_pipes;
+	u32 nvig_pipes;
+	u32 nrgb_pipes;
+	u32 ndma_pipes;
+	struct mdss_mdp_mixer *mixer_intf;
+	struct mdss_mdp_mixer *mixer_wb;
+	u32 nmixers_intf;
+	u32 nmixers_wb;
+	struct mdss_mdp_ctl *ctl_off;
+	u32 nctl;
+	struct mdss_mdp_dp_intf *dp_off;
+	u32 ndp;
+	void *video_intf;
+	u32 nintf;
+
+	struct ion_client *iclient;
+	int iommu_attached;
+	struct mdss_iommu_map_type *iommu_map;
+
+	struct early_suspend early_suspend;
+	void *debug_data;
 };
-extern struct mdss_res_type *mdss_res;
+extern struct mdss_data_type *mdss_res;
 
 enum mdss_hw_index {
 	MDSS_HW_MDP,
@@ -82,10 +131,36 @@
 
 struct mdss_hw {
 	u32 hw_ndx;
+	void *ptr;
 	irqreturn_t (*irq_handler)(int irq, void *ptr);
 };
 
 void mdss_enable_irq(struct mdss_hw *hw);
 void mdss_disable_irq(struct mdss_hw *hw);
 void mdss_disable_irq_nosync(struct mdss_hw *hw);
+
+static inline struct ion_client *mdss_get_ionclient(void)
+{
+	if (!mdss_res)
+		return NULL;
+	return mdss_res->iclient;
+}
+
+static inline int is_mdss_iommu_attached(void)
+{
+	if (!mdss_res)
+		return false;
+	return mdss_res->iommu_attached;
+}
+
+static inline int mdss_get_iommu_domain(u32 type)
+{
+	if (type >= MDSS_IOMMU_MAX_DOMAIN)
+		return -EINVAL;
+
+	if (!mdss_res)
+		return -ENODEV;
+
+	return mdss_res->iommu_map[type].domain_idx;
+}
 #endif /* MDSS_H */
diff --git a/drivers/video/msm/mdss/mdss_debug.c b/drivers/video/msm/mdss/mdss_debug.c
new file mode 100644
index 0000000..0b2a7c0
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_debug.c
@@ -0,0 +1,349 @@
+/* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "mdss.h"
+#include "mdss_mdp.h"
+#include "mdss_debug.h"
+
+#define DEFAULT_BASE_REG_CNT 0x100
+#define GROUP_BYTES 4
+#define ROW_BYTES 16
+
+struct mdss_debug_data {
+	struct dentry *root;
+	struct list_head base_list;
+};
+
+struct mdss_debug_base {
+	struct mdss_debug_data *mdd;
+	void __iomem *base;
+	size_t off;
+	size_t cnt;
+	size_t max_offset;
+	char *buf;
+	size_t buf_len;
+	struct list_head head;
+};
+
+static int mdss_debug_base_open(struct inode *inode, struct file *file)
+{
+	/* non-seekable */
+	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static int mdss_debug_base_release(struct inode *inode, struct file *file)
+{
+	struct mdss_debug_base *dbg = file->private_data;
+	if (dbg && dbg->buf) {
+		kfree(dbg->buf);
+		dbg->buf_len = 0;
+		dbg->buf = NULL;
+	}
+	return 0;
+}
+
+static ssize_t mdss_debug_base_offset_write(struct file *file,
+		    const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct mdss_debug_base *dbg = file->private_data;
+	u32 off = 0;
+	u32 cnt = DEFAULT_BASE_REG_CNT;
+	char buf[24];
+
+	if (!dbg)
+		return -ENODEV;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0;	/* end of string */
+
+	sscanf(buf, "%5x %x", &off, &cnt);
+
+	if (off > dbg->max_offset)
+		return -EINVAL;
+
+	if (cnt > (dbg->max_offset - off))
+		cnt = dbg->max_offset - off;
+
+	dbg->off = off;
+	dbg->cnt = cnt;
+
+	pr_debug("offset=%x cnt=%x\n", off, cnt);
+
+	return count;
+}
+
+static ssize_t mdss_debug_base_offset_read(struct file *file,
+			char __user *buff, size_t count, loff_t *ppos)
+{
+	struct mdss_debug_base *dbg = file->private_data;
+	int len = 0;
+	char buf[24];
+
+	if (!dbg)
+		return -ENODEV;
+
+	if (*ppos)
+		return 0;	/* the end */
+
+	len = snprintf(buf, sizeof(buf), "0x%08x %x\n", dbg->off, dbg->cnt);
+	if (len < 0)
+		return 0;
+
+	if (copy_to_user(buff, buf, len))
+		return -EFAULT;
+
+	*ppos += len;	/* increase offset */
+
+	return len;
+}
+
+static ssize_t mdss_debug_base_reg_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct mdss_debug_base *dbg = file->private_data;
+	size_t off;
+	u32 data, cnt;
+	char buf[24];
+
+	if (!dbg)
+		return -ENODEV;
+
+	if (count >= sizeof(buf))
+		return -EFAULT;
+
+	if (copy_from_user(buf, user_buf, count))
+		return -EFAULT;
+
+	buf[count] = 0;	/* end of string */
+
+	cnt = sscanf(buf, "%x %x", &off, &data);
+
+	if (cnt < 2)
+		return -EFAULT;
+
+	if (off >= dbg->max_offset)
+		return -EFAULT;
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+	writel_relaxed(data, dbg->base + off);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+
+	pr_debug("addr=%x data=%x\n", off, data);
+
+	return count;
+}
+
+static ssize_t mdss_debug_base_reg_read(struct file *file,
+			char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct mdss_debug_base *dbg = file->private_data;
+	size_t len;
+
+	if (!dbg) {
+		pr_err("invalid handle\n");
+		return -ENODEV;
+	}
+
+	if (!dbg->buf) {
+		char dump_buf[64];
+		char *ptr;
+		int cnt, tot;
+
+		dbg->buf_len = sizeof(dump_buf) *
+			DIV_ROUND_UP(dbg->cnt, ROW_BYTES);
+		dbg->buf = kzalloc(dbg->buf_len, GFP_KERNEL);
+
+		if (!dbg->buf) {
+			pr_err("not enough memory to hold reg dump\n");
+			return -ENOMEM;
+		}
+
+		ptr = dbg->base + dbg->off;
+		tot = 0;
+
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+		for (cnt = dbg->cnt; cnt > 0; cnt -= ROW_BYTES) {
+			hex_dump_to_buffer(ptr, min(cnt, ROW_BYTES),
+					   ROW_BYTES, GROUP_BYTES, dump_buf,
+					   sizeof(dump_buf), false);
+			len = scnprintf(dbg->buf + tot, dbg->buf_len - tot,
+					"0x%08x: %s\n",
+					((int)ptr) - ((int)dbg->base),
+					dump_buf);
+
+			ptr += ROW_BYTES;
+			tot += len;
+			if (tot >= dbg->buf_len)
+				break;
+		}
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+
+		dbg->buf_len = tot;
+	}
+
+	if (*ppos >= dbg->buf_len)
+		return 0; /* done reading */
+
+	len = min(count, dbg->buf_len - (size_t) *ppos);
+	if (copy_to_user(user_buf, dbg->buf + *ppos, len)) {
+		pr_err("failed to copy to user\n");
+		return -EFAULT;
+	}
+
+	*ppos += len; /* increase offset */
+
+	return len;
+}
+
+static const struct file_operations mdss_off_fops = {
+	.open = mdss_debug_base_open,
+	.release = mdss_debug_base_release,
+	.read = mdss_debug_base_offset_read,
+	.write = mdss_debug_base_offset_write,
+};
+
+static const struct file_operations mdss_reg_fops = {
+	.open = mdss_debug_base_open,
+	.release = mdss_debug_base_release,
+	.read = mdss_debug_base_reg_read,
+	.write = mdss_debug_base_reg_write,
+};
+
+int mdss_debug_register_base(const char *name, void __iomem *base,
+			     size_t max_offset)
+{
+	struct mdss_data_type *mdata = mdss_res;
+	struct mdss_debug_data *mdd;
+	struct mdss_debug_base *dbg;
+	struct dentry *ent_off, *ent_reg;
+	char dn[80] = "";
+	int prefix_len = 0;
+
+	if (!mdata || !mdata->debug_data)
+		return -ENODEV;
+
+	mdd = mdata->debug_data;
+
+	dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
+	if (!dbg)
+		return -ENOMEM;
+
+	dbg->base = base;
+	dbg->max_offset = max_offset;
+	dbg->off = 0;
+	dbg->cnt = DEFAULT_BASE_REG_CNT;
+
+	if (name)
+		prefix_len = snprintf(dn, sizeof(dn), "%s_", name);
+
+	strlcpy(dn + prefix_len, "off", sizeof(dn) - prefix_len);
+	ent_off = debugfs_create_file(dn, 0644, mdd->root, dbg, &mdss_off_fops);
+	if (IS_ERR_OR_NULL(ent_off)) {
+		pr_err("debugfs_create_file: offset fail\n");
+		goto off_fail;
+	}
+
+	strlcpy(dn + prefix_len, "reg", sizeof(dn) - prefix_len);
+	ent_reg = debugfs_create_file(dn, 0644, mdd->root, dbg, &mdss_reg_fops);
+	if (IS_ERR_OR_NULL(ent_reg)) {
+		pr_err("debugfs_create_file: reg fail\n");
+		goto reg_fail;
+	}
+
+	list_add(&dbg->head, &mdd->base_list);
+
+	return 0;
+reg_fail:
+	debugfs_remove(ent_off);
+off_fail:
+	kfree(dbg);
+	return -ENODEV;
+}
+
+static int mdss_debugfs_cleanup(struct mdss_debug_data *mdd)
+{
+	struct mdss_debug_base *base, *tmp;
+
+	if (!mdd)
+		return 0;
+
+	list_for_each_entry_safe(base, tmp, &mdd->base_list, head) {
+		list_del(&base->head);
+		kfree(base);
+	}
+
+	if (mdd->root)
+		debugfs_remove_recursive(mdd->root);
+
+	kfree(mdd);
+
+	return 0;
+}
+
+int mdss_debugfs_init(struct mdss_data_type *mdata)
+{
+	struct mdss_debug_data *mdd;
+
+	if (mdata->debug_data) {
+		pr_warn("mdss debugfs already initialized\n");
+		return -EBUSY;
+	}
+
+	mdd = kzalloc(sizeof(*mdd), GFP_KERNEL);
+	if (!mdd) {
+		pr_err("no memory to create mdss debug data\n");
+		return -ENOMEM;
+	}
+	INIT_LIST_HEAD(&mdd->base_list);
+
+	mdd->root = debugfs_create_dir("mdp", NULL);
+	if (IS_ERR_OR_NULL(mdd->root)) {
+		pr_err("debugfs_create_dir fail, error %ld\n",
+		       PTR_ERR(mdd->root));
+		mdd->root = NULL;
+		mdss_debugfs_cleanup(mdd);
+		return -ENODEV;
+	}
+
+	debugfs_create_u32("min_mdp_clk", 0644, mdd->root,
+		(u32 *)&mdata->min_mdp_clk);
+
+	mdata->debug_data = mdd;
+
+	return 0;
+}
+
+int mdss_debugfs_remove(struct mdss_data_type *mdata)
+{
+	struct mdss_debug_data *mdd = mdata->debug_data;
+
+	mdss_debugfs_cleanup(mdd);
+
+	return 0;
+}
diff --git a/drivers/video/msm/mdss/mdss_debug.h b/drivers/video/msm/mdss/mdss_debug.h
new file mode 100644
index 0000000..167fa8a
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_debug.h
@@ -0,0 +1,39 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_DEBUG_H
+#define MDSS_DEBUG_H
+
+#include "mdss.h"
+
+#ifdef CONFIG_DEBUG_FS
+int mdss_debugfs_init(struct mdss_data_type *mdata);
+int mdss_debugfs_remove(struct mdss_data_type *mdata);
+int mdss_debug_register_base(const char *name, void __iomem *base,
+				    size_t max_offset);
+#else
+static inline int mdss_debugfs_init(struct mdss_data_type *mdata)
+{
+	return 0;
+}
+static inline int mdss_debugfs_remove(struct mdss_data_type *mdata)
+{
+	return 0;
+}
+static inline int mdss_debug_register_base(const char *name, void __iomem *base,
+				    size_t max_offset)
+{
+	return 0;
+}
+#endif
+#endif /* MDSS_DEBUG_H */
diff --git a/drivers/video/msm/mdss/mdss_dsi.c b/drivers/video/msm/mdss/mdss_dsi.c
index 0a357be..8bf8c95 100644
--- a/drivers/video/msm/mdss/mdss_dsi.c
+++ b/drivers/video/msm/mdss/mdss_dsi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,81 +17,521 @@
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
 
 #include "mdss.h"
 #include "mdss_panel.h"
 #include "mdss_dsi.h"
-
-static struct mdss_panel_common_pdata *panel_pdata;
+#include "mdss_debug.h"
 
 static unsigned char *mdss_dsi_base;
 
+static int mdss_dsi_regulator_init(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	struct dsi_drv_cm_data *dsi_drv = NULL;
+
+	if (!pdev) {
+		pr_err("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = platform_get_drvdata(pdev);
+	if (!ctrl_pdata) {
+		pr_err("%s: invalid driver data\n", __func__);
+		return -EINVAL;
+	}
+
+	dsi_drv = &(ctrl_pdata->shared_pdata);
+	if (ctrl_pdata->power_data.num_vreg > 0) {
+		ret = msm_dss_config_vreg(&pdev->dev,
+				ctrl_pdata->power_data.vreg_config,
+				ctrl_pdata->power_data.num_vreg, 1);
+	} else {
+		dsi_drv->vdd_vreg = devm_regulator_get(&pdev->dev, "vdd");
+		if (IS_ERR(dsi_drv->vdd_vreg)) {
+			pr_err("%s: could not get vdda vreg, rc=%ld\n",
+				__func__, PTR_ERR(dsi_drv->vdd_vreg));
+			return PTR_ERR(dsi_drv->vdd_vreg);
+		}
+
+		ret = regulator_set_voltage(dsi_drv->vdd_vreg, 3000000,
+				3000000);
+		if (ret) {
+			pr_err("%s: set voltage failed on vdda vreg, rc=%d\n",
+				__func__, ret);
+			return ret;
+		}
+
+		dsi_drv->vdd_io_vreg = devm_regulator_get(&pdev->dev, "vddio");
+		if (IS_ERR(dsi_drv->vdd_io_vreg)) {
+			pr_err("%s: could not get vddio reg, rc=%ld\n",
+				__func__, PTR_ERR(dsi_drv->vdd_io_vreg));
+			return PTR_ERR(dsi_drv->vdd_io_vreg);
+		}
+
+		ret = regulator_set_voltage(dsi_drv->vdd_io_vreg, 1800000,
+				1800000);
+		if (ret) {
+			pr_err("%s: set voltage failed on vddio vreg, rc=%d\n",
+				__func__, ret);
+			return ret;
+		}
+
+		dsi_drv->vdda_vreg = devm_regulator_get(&pdev->dev, "vdda");
+		if (IS_ERR(dsi_drv->vdda_vreg)) {
+			pr_err("%s: could not get vdda vreg, rc=%ld\n",
+				__func__, PTR_ERR(dsi_drv->vdda_vreg));
+			return PTR_ERR(dsi_drv->vdda_vreg);
+		}
+
+		ret = regulator_set_voltage(dsi_drv->vdda_vreg, 1200000,
+				1200000);
+		if (ret) {
+			pr_err("%s: set voltage failed on vdda vreg, rc=%d\n",
+				__func__, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int mdss_dsi_panel_power_on(struct mdss_panel_data *pdata, int enable)
+{
+	int ret;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+	pr_debug("%s: enable=%d\n", __func__, enable);
+
+	if (enable) {
+		if (ctrl_pdata->power_data.num_vreg > 0) {
+			ret = msm_dss_enable_vreg(
+				ctrl_pdata->power_data.vreg_config,
+				ctrl_pdata->power_data.num_vreg, 1);
+			if (ret) {
+				pr_err("%s:Failed to enable regulators.rc=%d\n",
+					__func__, ret);
+				return ret;
+			}
+
+			/*
+			 * A small delay is needed here after enabling
+			 * all regulators and before issuing panel reset
+			 */
+			msleep(20);
+		} else {
+			ret = regulator_set_optimum_mode(
+				(ctrl_pdata->shared_pdata).vdd_vreg, 100000);
+			if (ret < 0) {
+				pr_err("%s: vdd_vreg set opt mode failed.\n",
+					 __func__);
+				return ret;
+			}
+
+			ret = regulator_set_optimum_mode(
+				(ctrl_pdata->shared_pdata).vdd_io_vreg, 100000);
+			if (ret < 0) {
+				pr_err("%s: vdd_io_vreg set opt mode failed.\n",
+					__func__);
+				return ret;
+			}
+
+			ret = regulator_set_optimum_mode
+			  ((ctrl_pdata->shared_pdata).vdda_vreg, 100000);
+			if (ret < 0) {
+				pr_err("%s: vdda_vreg set opt mode failed.\n",
+					__func__);
+				return ret;
+			}
+
+			ret = regulator_enable(
+				(ctrl_pdata->shared_pdata).vdd_io_vreg);
+			if (ret) {
+				pr_err("%s: Failed to enable regulator.\n",
+					__func__);
+				return ret;
+			}
+			msleep(20);
+
+			ret = regulator_enable(
+				(ctrl_pdata->shared_pdata).vdd_vreg);
+			if (ret) {
+				pr_err("%s: Failed to enable regulator.\n",
+					__func__);
+				return ret;
+			}
+			msleep(20);
+
+			ret = regulator_enable(
+				(ctrl_pdata->shared_pdata).vdda_vreg);
+			if (ret) {
+				pr_err("%s: Failed to enable regulator.\n",
+					__func__);
+				return ret;
+			}
+		}
+
+		if (pdata->panel_info.panel_power_on == 0)
+			mdss_dsi_panel_reset(pdata, 1);
+
+	} else {
+
+		mdss_dsi_panel_reset(pdata, 0);
+
+		if (ctrl_pdata->power_data.num_vreg > 0) {
+			ret = msm_dss_enable_vreg(
+				ctrl_pdata->power_data.vreg_config,
+				ctrl_pdata->power_data.num_vreg, 0);
+			if (ret) {
+				pr_err("%s: Failed to disable regs.rc=%d\n",
+					__func__, ret);
+				return ret;
+			}
+		} else {
+			ret = regulator_disable(
+				(ctrl_pdata->shared_pdata).vdd_vreg);
+			if (ret) {
+				pr_err("%s: Failed to disable regulator.\n",
+					__func__);
+				return ret;
+			}
+
+			ret = regulator_disable(
+				(ctrl_pdata->shared_pdata).vdda_vreg);
+			if (ret) {
+				pr_err("%s: Failed to disable regulator.\n",
+					__func__);
+				return ret;
+			}
+
+			ret = regulator_disable(
+				(ctrl_pdata->shared_pdata).vdd_io_vreg);
+			if (ret) {
+				pr_err("%s: Failed to disable regulator.\n",
+					__func__);
+				return ret;
+			}
+
+			ret = regulator_set_optimum_mode(
+				(ctrl_pdata->shared_pdata).vdd_vreg, 100);
+			if (ret < 0) {
+				pr_err("%s: vdd_vreg set opt mode failed.\n",
+					 __func__);
+				return ret;
+			}
+
+			ret = regulator_set_optimum_mode(
+				(ctrl_pdata->shared_pdata).vdd_io_vreg, 100);
+			if (ret < 0) {
+				pr_err("%s: vdd_io_vreg set opt mode failed.\n",
+					__func__);
+				return ret;
+			}
+			ret = regulator_set_optimum_mode(
+				(ctrl_pdata->shared_pdata).vdda_vreg, 100);
+			if (ret < 0) {
+				pr_err("%s: vdda_vreg set opt mode failed.\n",
+					__func__);
+				return ret;
+			}
+		}
+	}
+	return 0;
+}
+
+static void mdss_dsi_put_dt_vreg_data(struct device *dev,
+	struct dss_module_power *module_power)
+{
+	if (!module_power) {
+		pr_err("%s: invalid input\n", __func__);
+		return;
+	}
+
+	if (module_power->vreg_config) {
+		devm_kfree(dev, module_power->vreg_config);
+		module_power->vreg_config = NULL;
+	}
+	module_power->num_vreg = 0;
+}
+
+static int mdss_dsi_get_dt_vreg_data(struct device *dev,
+	struct dss_module_power *mp)
+{
+	int i, rc = 0;
+	int dt_vreg_total = 0;
+	u32 *val_array = NULL;
+	struct device_node *of_node = NULL;
+
+	if (!dev || !mp) {
+		pr_err("%s: invalid input\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	of_node = dev->of_node;
+
+	mp->num_vreg = 0;
+	dt_vreg_total = of_property_count_strings(of_node, "qcom,supply-names");
+	if (dt_vreg_total < 0) {
+		pr_debug("%s: vreg not found. rc=%d\n", __func__,
+			dt_vreg_total);
+		rc = 0;
+		goto error;
+	} else {
+		pr_debug("%s: vreg found. count=%d\n", __func__, dt_vreg_total);
+	}
+
+	if (dt_vreg_total > 0) {
+		mp->num_vreg = dt_vreg_total;
+		mp->vreg_config = devm_kzalloc(dev, sizeof(struct dss_vreg) *
+			dt_vreg_total, GFP_KERNEL);
+		if (!mp->vreg_config) {
+			pr_err("%s: can't alloc vreg mem\n", __func__);
+			goto error;
+		}
+	} else {
+		pr_debug("%s: no vreg\n", __func__);
+		return 0;
+	}
+
+	val_array = devm_kzalloc(dev, sizeof(u32) * dt_vreg_total, GFP_KERNEL);
+	if (!val_array) {
+		pr_err("%s: can't allocate vreg scratch mem\n", __func__);
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	for (i = 0; i < dt_vreg_total; i++) {
+		const char *st = NULL;
+		/* vreg-name */
+		rc = of_property_read_string_index(of_node, "qcom,supply-names",
+			i, &st);
+		if (rc) {
+			pr_err("%s: error reading name. i=%d, rc=%d\n",
+				__func__, i, rc);
+			goto error;
+		}
+		snprintf(mp->vreg_config[i].vreg_name,
+			ARRAY_SIZE((mp->vreg_config[i].vreg_name)), "%s", st);
+
+		/* vreg-type */
+		rc = of_property_read_string_index(of_node, "qcom,supply-type",
+			i, &st);
+		if (rc) {
+			pr_err("%s: error reading vreg type. rc=%d\n",
+				__func__, rc);
+			goto error;
+		}
+		if (!strncmp(st, "regulator", 9))
+			mp->vreg_config[i].type = 0;
+		else if (!strncmp(st, "switch", 6))
+			mp->vreg_config[i].type = 1;
+
+		/* vreg-min-voltage */
+		memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+		rc = of_property_read_u32_array(of_node,
+			"qcom,supply-min-voltage-level", val_array,
+			dt_vreg_total);
+		if (rc) {
+			pr_err("%s: error reading min volt. rc=%d\n",
+				__func__, rc);
+			goto error;
+		}
+		mp->vreg_config[i].min_voltage = val_array[i];
+
+		/* vreg-max-voltage */
+		memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+		rc = of_property_read_u32_array(of_node,
+			"qcom,supply-max-voltage-level", val_array,
+			dt_vreg_total);
+		if (rc) {
+			pr_err("%s: error reading max volt. rc=%d\n",
+				__func__, rc);
+			goto error;
+		}
+		mp->vreg_config[i].max_voltage = val_array[i];
+
+		/* vreg-peak-current*/
+		memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+		rc = of_property_read_u32_array(of_node,
+			"qcom,supply-peak-current", val_array,
+			dt_vreg_total);
+		if (rc) {
+			pr_err("%s: error reading peak current. rc=%d\n",
+				__func__, rc);
+			goto error;
+		}
+		mp->vreg_config[i].optimum_voltage = val_array[i];
+
+		pr_debug("%s: %s type=%d, min=%d, max=%d, op=%d\n",
+			__func__, mp->vreg_config[i].vreg_name,
+			mp->vreg_config[i].type,
+			mp->vreg_config[i].min_voltage,
+			mp->vreg_config[i].max_voltage,
+			mp->vreg_config[i].optimum_voltage);
+	}
+
+	devm_kfree(dev, val_array);
+
+	return rc;
+
+error:
+	if (mp->vreg_config) {
+		devm_kfree(dev, mp->vreg_config);
+		mp->vreg_config = NULL;
+	}
+	mp->num_vreg = 0;
+
+	if (val_array)
+		devm_kfree(dev, val_array);
+	return rc;
+}
+
 static int mdss_dsi_off(struct mdss_panel_data *pdata)
 {
 	int ret = 0;
-	struct mdss_panel_info *pinfo;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
 
-	pinfo = &pdata->panel_info;
-
-	if (pdata->panel_info.type == MIPI_VIDEO_PANEL)
-		mdss_dsi_controller_cfg(0, pdata);
-
-	mdss_dsi_op_mode_config(DSI_CMD_MODE, pdata);
-
-	ret = panel_pdata->off(pdata);
-	if (ret) {
-		pr_err("%s: Panel OFF failed\n", __func__);
-		return ret;
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
 	}
 
-	spin_lock_bh(&dsi_clk_lock);
-	mdss_dsi_clk_disable();
+	if (!pdata->panel_info.panel_power_on) {
+		pr_warn("%s:%d Panel already off.\n", __func__, __LINE__);
+		return -EPERM;
+	}
 
-	/* disable dsi engine */
-	MIPI_OUTP(mdss_dsi_base + 0x0004, 0);
+	pdata->panel_info.panel_power_on = 0;
 
-	spin_unlock_bh(&dsi_clk_lock);
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
 
-	mdss_dsi_unprepare_clocks();
+	pr_debug("%s+: ctrl=%p ndx=%d\n", __func__,
+				ctrl_pdata, ctrl_pdata->ndx);
+
+	mdss_dsi_clk_disable(pdata);
+	mdss_dsi_unprepare_clocks(ctrl_pdata);
+
+	/* disable DSI controller */
+	mdss_dsi_controller_cfg(0, pdata);
+
+	ret = mdss_dsi_panel_power_on(pdata, 0);
+	if (ret) {
+		pr_err("%s: Panel power off failed\n", __func__);
+		return ret;
+	}
 
 	pr_debug("%s-:\n", __func__);
 
 	return ret;
 }
 
-static int mdss_dsi_on(struct mdss_panel_data *pdata)
+int mdss_dsi_cont_splash_on(struct mdss_panel_data *pdata)
+{
+	int ret = 0;
+	struct mipi_panel_info *mipi;
+
+	pr_info("%s:%d DSI on for continuous splash.\n", __func__, __LINE__);
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	mipi  = &pdata->panel_info.mipi;
+
+	ret = mdss_dsi_panel_power_on(pdata, 1);
+	if (ret) {
+		pr_err("%s: Panel power on failed\n", __func__);
+		return ret;
+	}
+	mdss_dsi_sw_reset(pdata);
+	mdss_dsi_host_init(mipi, pdata);
+
+	pdata->panel_info.panel_power_on = 1;
+
+	mdss_dsi_op_mode_config(mipi->mode, pdata);
+
+	pr_debug("%s-:End\n", __func__);
+	return ret;
+}
+
+
+int mdss_dsi_on(struct mdss_panel_data *pdata)
 {
 	int ret = 0;
 	u32 clk_rate;
 	struct mdss_panel_info *pinfo;
 	struct mipi_panel_info *mipi;
 	u32 hbp, hfp, vbp, vfp, hspw, vspw, width, height;
-	u32 ystride, bpp, data;
+	u32 ystride, bpp, data, dst_bpp;
 	u32 dummy_xres, dummy_yres;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	if (pdata->panel_info.panel_power_on) {
+		pr_warn("%s:%d Panel already on.\n", __func__, __LINE__);
+		return 0;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	pr_debug("%s+: ctrl=%p ndx=%d\n",
+				__func__, ctrl_pdata, ctrl_pdata->ndx);
 
 	pinfo = &pdata->panel_info;
 
-	cont_splash_clk_ctrl(0);
-	mdss_dsi_prepare_clocks();
+	ret = mdss_dsi_panel_power_on(pdata, 1);
+	if (ret) {
+		pr_err("%s: Panel power on failed\n", __func__);
+		return ret;
+	}
 
-	spin_lock_bh(&dsi_clk_lock);
+	pdata->panel_info.panel_power_on = 1;
 
-	MIPI_OUTP(mdss_dsi_base + 0x118, 1);
-	MIPI_OUTP(mdss_dsi_base + 0x118, 0);
+	mdss_dsi_phy_sw_reset((ctrl_pdata->ctrl_base));
+	mdss_dsi_phy_init(pdata);
 
-	mdss_dsi_clk_enable();
-	spin_unlock_bh(&dsi_clk_lock);
+	mdss_dsi_prepare_clocks(ctrl_pdata);
+	mdss_dsi_clk_enable(pdata);
 
 	clk_rate = pdata->panel_info.clk_rate;
 	clk_rate = min(clk_rate, pdata->panel_info.clk_max);
 
-	hbp = pdata->panel_info.lcdc.h_back_porch;
-	hfp = pdata->panel_info.lcdc.h_front_porch;
-	vbp = pdata->panel_info.lcdc.v_back_porch;
-	vfp = pdata->panel_info.lcdc.v_front_porch;
-	hspw = pdata->panel_info.lcdc.h_pulse_width;
+	dst_bpp = pdata->panel_info.fbc.enabled ?
+		(pdata->panel_info.fbc.target_bpp) : (pinfo->bpp);
+
+	hbp = mult_frac(pdata->panel_info.lcdc.h_back_porch, dst_bpp,
+			pdata->panel_info.bpp);
+	hfp = mult_frac(pdata->panel_info.lcdc.h_front_porch, dst_bpp,
+			pdata->panel_info.bpp);
+	vbp = mult_frac(pdata->panel_info.lcdc.v_back_porch, dst_bpp,
+			pdata->panel_info.bpp);
+	vfp = mult_frac(pdata->panel_info.lcdc.v_front_porch, dst_bpp,
+			pdata->panel_info.bpp);
+	hspw = mult_frac(pdata->panel_info.lcdc.h_pulse_width, dst_bpp,
+			pdata->panel_info.bpp);
 	vspw = pdata->panel_info.lcdc.v_pulse_width;
-	width = pdata->panel_info.xres;
+	width = mult_frac(pdata->panel_info.xres, dst_bpp,
+			pdata->panel_info.bpp);
 	height = pdata->panel_info.yres;
 
 	mipi  = &pdata->panel_info.mipi;
@@ -99,20 +539,20 @@
 		dummy_xres = pdata->panel_info.lcdc.xres_pad;
 		dummy_yres = pdata->panel_info.lcdc.yres_pad;
 
-		MIPI_OUTP(mdss_dsi_base + 0x24,
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x24,
 			((hspw + hbp + width + dummy_xres) << 16 |
 			(hspw + hbp)));
-		MIPI_OUTP(mdss_dsi_base + 0x28,
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x28,
 			((vspw + vbp + height + dummy_yres) << 16 |
 			(vspw + vbp)));
-		MIPI_OUTP(mdss_dsi_base + 0x2C,
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
 			(vspw + vbp + height + dummy_yres +
 				vfp - 1) << 16 | (hspw + hbp +
 				width + dummy_xres + hfp - 1));
 
-		MIPI_OUTP(mdss_dsi_base + 0x30, (hspw << 16));
-		MIPI_OUTP(mdss_dsi_base + 0x34, 0);
-		MIPI_OUTP(mdss_dsi_base + 0x38, (vspw << 16));
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x30, (hspw << 16));
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x34, 0);
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x38, (vspw << 16));
 
 	} else {		/* command mode */
 		if (mipi->dst_format == DSI_CMD_DST_FORMAT_RGB888)
@@ -128,27 +568,49 @@
 
 		/* DSI_COMMAND_MODE_MDP_STREAM_CTRL */
 		data = (ystride << 16) | (mipi->vc << 8) | DTYPE_DCS_LWRITE;
-		MIPI_OUTP(mdss_dsi_base + 0x60, data);
-		MIPI_OUTP(mdss_dsi_base + 0x58, data);
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x60, data);
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x58, data);
 
 		/* DSI_COMMAND_MODE_MDP_STREAM_TOTAL */
 		data = height << 16 | width;
-		MIPI_OUTP(mdss_dsi_base + 0x64, data);
-		MIPI_OUTP(mdss_dsi_base + 0x5C, data);
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x64, data);
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x5C, data);
 	}
 
+	mdss_dsi_sw_reset(pdata);
 	mdss_dsi_host_init(mipi, pdata);
 
 	if (mipi->force_clk_lane_hs) {
 		u32 tmp;
 
-		tmp = MIPI_INP(mdss_dsi_base + 0xac);
+		tmp = MIPI_INP((ctrl_pdata->ctrl_base) + 0xac);
 		tmp |= (1<<28);
-		MIPI_OUTP(mdss_dsi_base + 0xac, tmp);
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0xac, tmp);
 		wmb();
 	}
 
-	ret = panel_pdata->on(pdata);
+	pr_debug("%s-:\n", __func__);
+	return 0;
+}
+
+static int mdss_dsi_unblank(struct mdss_panel_data *pdata)
+{
+	int ret = 0;
+	struct mipi_panel_info *mipi;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	pr_debug("%s+:\n", __func__);
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+	mipi  = &pdata->panel_info.mipi;
+
+	ret = ctrl_pdata->on(pdata);
 	if (ret) {
 		pr_err("%s: unable to initialize the panel\n", __func__);
 		return ret;
@@ -157,34 +619,142 @@
 	mdss_dsi_op_mode_config(mipi->mode, pdata);
 
 	pr_debug("%s-:\n", __func__);
+
 	return ret;
 }
 
-unsigned char *mdss_dsi_get_base_adr(void)
+static int mdss_dsi_blank(struct mdss_panel_data *pdata)
 {
-	return mdss_dsi_base;
+	int ret = 0;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	pr_debug("%s+:\n", __func__);
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	mdss_dsi_op_mode_config(DSI_CMD_MODE, pdata);
+
+	ret = ctrl_pdata->off(pdata);
+	if (ret) {
+		pr_err("%s: Panel OFF failed\n", __func__);
+		return ret;
+	}
+
+	pr_debug("%s-:End\n", __func__);
+	return ret;
 }
 
-unsigned char *mdss_dsi_get_clk_base(void)
-{
-	return mdss_dsi_base;
-}
-
-static int mdss_dsi_resource_initialized;
-
-static int __devinit mdss_dsi_probe(struct platform_device *pdev)
+static int mdss_dsi_event_handler(struct mdss_panel_data *pdata,
+				  int event, void *arg)
 {
 	int rc = 0;
-	pr_debug("%s\n", __func__);
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
 
-	if (pdev->dev.of_node && !mdss_dsi_resource_initialized) {
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+	pr_debug("%s+:event=%d\n", __func__, event);
+
+	switch (event) {
+	case MDSS_EVENT_UNBLANK:
+		rc = mdss_dsi_on(pdata);
+		if (ctrl_pdata->on_cmds->ctrl_state == DSI_LP_MODE) {
+			rc = mdss_dsi_unblank(pdata);
+		}
+		break;
+	case MDSS_EVENT_PANEL_ON:
+		if (ctrl_pdata->on_cmds->ctrl_state == DSI_HS_MODE)
+			rc = mdss_dsi_unblank(pdata);
+		break;
+	case MDSS_EVENT_BLANK:
+		if (ctrl_pdata->off_cmds->ctrl_state == DSI_HS_MODE) {
+			rc = mdss_dsi_blank(pdata);
+		}
+		break;
+	case MDSS_EVENT_PANEL_OFF:
+		if (ctrl_pdata->off_cmds->ctrl_state == DSI_LP_MODE) {
+			rc = mdss_dsi_blank(pdata);
+		}
+		rc = mdss_dsi_off(pdata);
+		break;
+	case MDSS_EVENT_CONT_SPLASH_FINISH:
+		if (ctrl_pdata->on_cmds->ctrl_state == DSI_LP_MODE) {
+			rc = mdss_dsi_cont_splash_on(pdata);
+		} else {
+			pr_debug("%s:event=%d, Dsi On not called: ctrl_state: %d\n",
+				 __func__, event,
+				 ctrl_pdata->on_cmds->ctrl_state);
+			rc = -EINVAL;
+		}
+		break;
+	default:
+		pr_debug("%s: unhandled event=%d\n", __func__, event);
+		break;
+	}
+	pr_debug("%s-:event=%d, rc=%d\n", __func__, event, rc);
+	return rc;
+}
+
+static int __devinit mdss_dsi_ctrl_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	u32 index;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	if (pdev->dev.of_node) {
 		struct resource *mdss_dsi_mres;
-		pdev->id = 1;
+		const char *ctrl_name;
+
+		ctrl_pdata = platform_get_drvdata(pdev);
+		if (!ctrl_pdata) {
+			ctrl_pdata = devm_kzalloc(&pdev->dev,
+				sizeof(struct mdss_dsi_ctrl_pdata), GFP_KERNEL);
+			if (!ctrl_pdata) {
+				pr_err("%s: FAILED: cannot alloc dsi ctrl\n",
+					__func__);
+				rc = -ENOMEM;
+				goto error_no_mem;
+			}
+			platform_set_drvdata(pdev, ctrl_pdata);
+		}
+
+		ctrl_name = of_get_property(pdev->dev.of_node, "label", NULL);
+		if (!ctrl_name)
+			pr_info("%s:%d, DSI Ctrl name not specified\n",
+						__func__, __LINE__);
+		else
+			pr_info("%s: DSI Ctrl name = %s\n",
+				__func__, ctrl_name);
+
+		rc = of_property_read_u32(pdev->dev.of_node,
+					  "cell-index", &index);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"%s: Cell-index not specified, rc=%d\n",
+							__func__, rc);
+			goto error_no_mem;
+		}
+
+		if (index == 0)
+			pdev->id = 1;
+		else
+			pdev->id = 2;
+
 		mdss_dsi_mres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 		if (!mdss_dsi_mres) {
 			pr_err("%s:%d unable to get the MDSS resources",
 				       __func__, __LINE__);
-			return -ENOMEM;
+			rc = -ENOMEM;
+			goto error_no_mem;
 		}
 		if (mdss_dsi_mres) {
 			mdss_dsi_base = ioremap(mdss_dsi_mres->start,
@@ -192,38 +762,59 @@
 			if (!mdss_dsi_base) {
 				pr_err("%s:%d unable to remap dsi resources",
 					       __func__, __LINE__);
-				return -ENOMEM;
+				rc = -ENOMEM;
+				goto error_no_mem;
 			}
 		}
 
-		if (mdss_dsi_clk_init(pdev)) {
-			iounmap(mdss_dsi_base);
-			return -EPERM;
-		}
-
 		rc = of_platform_populate(pdev->dev.of_node,
 					NULL, NULL, &pdev->dev);
 		if (rc) {
 			dev_err(&pdev->dev,
 				"%s: failed to add child nodes, rc=%d\n",
 							__func__, rc);
-			iounmap(mdss_dsi_base);
-			return rc;
+			goto error_ioremap;
 		}
 
-		mdss_dsi_resource_initialized = 1;
+		/* Parse the regulator information */
+		rc = mdss_dsi_get_dt_vreg_data(&pdev->dev,
+			&ctrl_pdata->power_data);
+		if (rc) {
+			pr_err("%s: failed to get vreg data from dt. rc=%d\n",
+				__func__, rc);
+			goto error_vreg;
+		}
+
+		pr_debug("%s: Dsi Ctrl->%d initialized\n", __func__, index);
 	}
 
-	if (!mdss_dsi_resource_initialized)
-		return -EPERM;
-
 	return 0;
+
+error_ioremap:
+	iounmap(mdss_dsi_base);
+error_no_mem:
+	devm_kfree(&pdev->dev, ctrl_pdata);
+error_vreg:
+	mdss_dsi_put_dt_vreg_data(&pdev->dev, &ctrl_pdata->power_data);
+
+	return rc;
 }
 
-static int __devexit mdss_dsi_remove(struct platform_device *pdev)
+static int __devexit mdss_dsi_ctrl_remove(struct platform_device *pdev)
 {
 	struct msm_fb_data_type *mfd;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = platform_get_drvdata(pdev);
 
+	if (!ctrl_pdata) {
+		pr_err("%s: no driver data\n", __func__);
+		return -ENODEV;
+	}
+
+	if (msm_dss_config_vreg(&pdev->dev,
+			ctrl_pdata->power_data.vreg_config,
+			ctrl_pdata->power_data.num_vreg, 1) < 0)
+		pr_err("%s: failed to de-init vregs\n", __func__);
+	mdss_dsi_put_dt_vreg_data(&pdev->dev, &ctrl_pdata->power_data);
 	mfd = platform_get_drvdata(pdev);
 	iounmap(mdss_dsi_base);
 	return 0;
@@ -231,6 +822,63 @@
 
 struct device dsi_dev;
 
+int mdss_dsi_retrieve_ctrl_resources(struct platform_device *pdev, int mode,
+			struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	int rc = 0;
+	u32 index;
+	struct resource *mdss_dsi_mres;
+
+	rc = of_property_read_u32(pdev->dev.of_node, "cell-index", &index);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"%s: Cell-index not specified, rc=%d\n",
+						__func__, rc);
+		return rc;
+	}
+
+	if (index == 0) {
+		if (mode != DISPLAY_1) {
+			pr_err("%s:%d Panel->Ctrl mapping is wrong",
+				       __func__, __LINE__);
+			return -EPERM;
+		}
+	} else if (index == 1) {
+		if (mode != DISPLAY_2) {
+			pr_err("%s:%d Panel->Ctrl mapping is wrong",
+				       __func__, __LINE__);
+			return -EPERM;
+		}
+	} else {
+		pr_err("%s:%d Unknown Ctrl mapped to panel",
+			       __func__, __LINE__);
+		return -EPERM;
+	}
+
+	mdss_dsi_mres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mdss_dsi_mres) {
+		pr_err("%s:%d unable to get the DSI ctrl resources",
+			       __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	ctrl->ctrl_base = ioremap(mdss_dsi_mres->start,
+		resource_size(mdss_dsi_mres));
+	if (!(ctrl->ctrl_base)) {
+		pr_err("%s:%d unable to remap dsi resources",
+			       __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	ctrl->reg_size = resource_size(mdss_dsi_mres);
+
+	pr_info("%s: dsi base=%x size=%x\n",
+		__func__, (int)ctrl->ctrl_base, ctrl->reg_size);
+
+	return 0;
+}
+
+
 int dsi_panel_device_register(struct platform_device *pdev,
 			      struct mdss_panel_common_pdata *panel_data)
 {
@@ -238,23 +886,25 @@
 	int rc;
 	u8 lanes = 0, bpp;
 	u32 h_period, v_period, dsi_pclk_rate;
-	struct mdss_panel_data *pdata = NULL;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata;
+	struct device_node *dsi_ctrl_np = NULL;
+	struct platform_device *ctrl_pdev = NULL;
+	bool broadcast;
+	bool cont_splash_enabled = false;
 
-	panel_pdata = panel_data;
+	h_period = ((panel_data->panel_info.lcdc.h_pulse_width)
+			+ (panel_data->panel_info.lcdc.h_back_porch)
+			+ (panel_data->panel_info.xres)
+			+ (panel_data->panel_info.lcdc.h_front_porch));
 
-	h_period = ((panel_pdata->panel_info.lcdc.h_pulse_width)
-			+ (panel_pdata->panel_info.lcdc.h_back_porch)
-			+ (panel_pdata->panel_info.xres)
-			+ (panel_pdata->panel_info.lcdc.h_front_porch));
+	v_period = ((panel_data->panel_info.lcdc.v_pulse_width)
+			+ (panel_data->panel_info.lcdc.v_back_porch)
+			+ (panel_data->panel_info.yres)
+			+ (panel_data->panel_info.lcdc.v_front_porch));
 
-	v_period = ((panel_pdata->panel_info.lcdc.v_pulse_width)
-			+ (panel_pdata->panel_info.lcdc.v_back_porch)
-			+ (panel_pdata->panel_info.yres)
-			+ (panel_pdata->panel_info.lcdc.v_front_porch));
+	mipi  = &panel_data->panel_info.mipi;
 
-	mipi  = &panel_pdata->panel_info.mipi;
-
-	panel_pdata->panel_info.type =
+	panel_data->panel_info.type =
 		((mipi->mode == DSI_VIDEO_MODE)
 			? MIPI_VIDEO_PANEL : MIPI_CMD_PANEL);
 
@@ -278,23 +928,22 @@
 	else
 		bpp = 3;		/* Default format set to RGB888 */
 
-	if (panel_pdata->panel_info.type == MIPI_VIDEO_PANEL &&
-		!panel_pdata->panel_info.clk_rate) {
-		h_period += panel_pdata->panel_info.lcdc.xres_pad;
-		v_period += panel_pdata->panel_info.lcdc.yres_pad;
+	if (!panel_data->panel_info.clk_rate) {
+		h_period += panel_data->panel_info.lcdc.xres_pad;
+		v_period += panel_data->panel_info.lcdc.yres_pad;
 
 		if (lanes > 0) {
-			panel_pdata->panel_info.clk_rate =
+			panel_data->panel_info.clk_rate =
 			((h_period * v_period * (mipi->frame_rate) * bpp * 8)
 			   / lanes);
 		} else {
 			pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
-			panel_pdata->panel_info.clk_rate =
+			panel_data->panel_info.clk_rate =
 				(h_period * v_period
 					 * (mipi->frame_rate) * bpp * 8);
 		}
 	}
-	pll_divider_config.clk_rate = panel_pdata->panel_info.clk_rate;
+	pll_divider_config.clk_rate = panel_data->panel_info.clk_rate;
 
 	rc = mdss_dsi_clk_div_config(bpp, lanes, &dsi_pclk_rate);
 	if (rc) {
@@ -302,57 +951,216 @@
 		return rc;
 	}
 
-	if ((dsi_pclk_rate < 3300000) || (dsi_pclk_rate > 103300000))
+	if ((dsi_pclk_rate < 3300000) || (dsi_pclk_rate > 250000000))
 		dsi_pclk_rate = 35000000;
 	mipi->dsi_pclk_rate = dsi_pclk_rate;
 
-	/*
-	 * data chain
-	 */
-	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
-	if (!pdata)
-		return -ENOMEM;
+	dsi_ctrl_np = of_parse_phandle(pdev->dev.of_node,
+				       "qcom,dsi-ctrl-phandle", 0);
+	if (!dsi_ctrl_np) {
+		pr_err("%s: Dsi controller node not initialized\n", __func__);
+		return -EPROBE_DEFER;
+	}
 
-	pdata->on = mdss_dsi_on;
-	pdata->off = mdss_dsi_off;
-	memcpy(&(pdata->panel_info), &(panel_pdata->panel_info),
-	       sizeof(struct mdss_panel_info));
+	ctrl_pdev = of_find_device_by_node(dsi_ctrl_np);
+	ctrl_pdata = platform_get_drvdata(ctrl_pdev);
+	if (!ctrl_pdata) {
+		pr_err("%s: no dsi ctrl driver data\n", __func__);
+		return -EINVAL;
+	}
 
-	pdata->dsi_base = mdss_dsi_base;
+	rc = mdss_dsi_regulator_init(ctrl_pdev);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"%s: failed to init regulator, rc=%d\n",
+						__func__, rc);
+		return rc;
+	}
+
+	broadcast = of_property_read_bool(pdev->dev.of_node,
+					  "qcom,mdss-pan-broadcast-mode");
+	if (broadcast)
+		ctrl_pdata->shared_pdata.broadcast_enable = 1;
+
+	ctrl_pdata->disp_en_gpio = of_get_named_gpio(pdev->dev.of_node,
+						     "qcom,enable-gpio", 0);
+	if (!gpio_is_valid(ctrl_pdata->disp_en_gpio)) {
+		pr_err("%s:%d, Disp_en gpio not specified\n",
+						__func__, __LINE__);
+	} else {
+		rc = gpio_request(ctrl_pdata->disp_en_gpio, "disp_enable");
+		if (rc) {
+			pr_err("request reset gpio failed, rc=%d\n",
+			       rc);
+			gpio_free(ctrl_pdata->disp_en_gpio);
+			return -ENODEV;
+		}
+	}
+
+	ctrl_pdata->disp_te_gpio = of_get_named_gpio(pdev->dev.of_node,
+						     "qcom,te-gpio", 0);
+	if (!gpio_is_valid(ctrl_pdata->disp_te_gpio)) {
+		pr_err("%s:%d, Disp_te gpio not specified\n",
+						__func__, __LINE__);
+	} else {
+		rc = gpio_request(ctrl_pdata->disp_te_gpio, "disp_te");
+		if (rc) {
+			pr_err("request TE gpio failed, rc=%d\n",
+			       rc);
+			gpio_free(ctrl_pdata->disp_te_gpio);
+			return -ENODEV;
+		}
+		rc = gpio_tlmm_config(GPIO_CFG(
+				ctrl_pdata->disp_te_gpio, 1,
+				GPIO_CFG_INPUT,
+				GPIO_CFG_PULL_DOWN,
+				GPIO_CFG_2MA),
+				GPIO_CFG_ENABLE);
+
+		if (rc) {
+			pr_err("%s: unable to config tlmm = %d\n",
+				__func__, ctrl_pdata->disp_te_gpio);
+			gpio_free(ctrl_pdata->disp_te_gpio);
+			return -ENODEV;
+		}
+
+		rc = gpio_direction_input(ctrl_pdata->disp_te_gpio);
+		if (rc) {
+			pr_err("set_direction for disp_en gpio failed, rc=%d\n",
+			       rc);
+			gpio_free(ctrl_pdata->disp_te_gpio);
+			return -ENODEV;
+		}
+		pr_debug("%s: te_gpio=%d\n", __func__,
+					ctrl_pdata->disp_te_gpio);
+	}
+
+
+	ctrl_pdata->rst_gpio = of_get_named_gpio(pdev->dev.of_node,
+						 "qcom,rst-gpio", 0);
+	if (!gpio_is_valid(ctrl_pdata->rst_gpio)) {
+		pr_err("%s:%d, reset gpio not specified\n",
+						__func__, __LINE__);
+	} else {
+		rc = gpio_request(ctrl_pdata->rst_gpio, "disp_rst_n");
+		if (rc) {
+			pr_err("request reset gpio failed, rc=%d\n",
+				rc);
+			gpio_free(ctrl_pdata->rst_gpio);
+			if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
+				gpio_free(ctrl_pdata->disp_en_gpio);
+			return -ENODEV;
+		}
+	}
+
+	if (mdss_dsi_clk_init(ctrl_pdev, ctrl_pdata)) {
+		pr_err("%s: unable to initialize Dsi ctrl clks\n", __func__);
+		return -EPERM;
+	}
+
+	if (mdss_dsi_retrieve_ctrl_resources(ctrl_pdev,
+					     panel_data->panel_info.pdest,
+					     ctrl_pdata)) {
+		pr_err("%s: unable to get Dsi controller res\n", __func__);
+		return -EPERM;
+	}
+
+	ctrl_pdata->panel_data.event_handler = mdss_dsi_event_handler;
+
+	ctrl_pdata->on_cmds = panel_data->dsi_panel_on_cmds;
+	ctrl_pdata->off_cmds = panel_data->dsi_panel_off_cmds;
+
+	memcpy(&((ctrl_pdata->panel_data).panel_info),
+				&(panel_data->panel_info),
+				       sizeof(struct mdss_panel_info));
+
+	mdss_dsi_irq_handler_config(ctrl_pdata);
+	ctrl_pdata->panel_data.set_backlight = panel_data->bl_fnc;
+	ctrl_pdata->bklt_ctrl = panel_data->panel_info.bklt_ctrl;
+	ctrl_pdata->pwm_gpio = panel_data->panel_info.pwm_gpio;
+	ctrl_pdata->pwm_period = panel_data->panel_info.pwm_period;
+	ctrl_pdata->pwm_lpg_chan = panel_data->panel_info.pwm_lpg_chan;
+	ctrl_pdata->bklt_max = panel_data->panel_info.bl_max;
+
+	if (ctrl_pdata->bklt_ctrl == BL_PWM)
+		mdss_dsi_panel_pwm_cfg(ctrl_pdata);
 
 	/*
 	 * register in mdp driver
 	 */
-	rc = mdss_register_panel(pdata);
+
+	cont_splash_enabled = of_property_read_bool(pdev->dev.of_node,
+			"qcom,cont-splash-enabled");
+	if (!cont_splash_enabled) {
+		pr_info("%s:%d Continous splash flag not found.\n",
+				__func__, __LINE__);
+		ctrl_pdata->panel_data.panel_info.cont_splash_enabled = 0;
+		ctrl_pdata->panel_data.panel_info.panel_power_on = 0;
+	} else {
+		pr_info("%s:%d Continous splash flag enabled.\n",
+				__func__, __LINE__);
+
+		ctrl_pdata->panel_data.panel_info.cont_splash_enabled = 1;
+		ctrl_pdata->panel_data.panel_info.panel_power_on = 1;
+	}
+
+
+	if (ctrl_pdata->panel_data.panel_info.cont_splash_enabled) {
+		mdss_dsi_prepare_clocks(ctrl_pdata);
+		mdss_dsi_clk_enable(&(ctrl_pdata->panel_data));
+	}
+
+	rc = mdss_register_panel(ctrl_pdev, &(ctrl_pdata->panel_data));
 	if (rc) {
 		dev_err(&pdev->dev, "unable to register MIPI DSI panel\n");
-		devm_kfree(&pdev->dev, pdata);
+		if (ctrl_pdata->rst_gpio)
+			gpio_free(ctrl_pdata->rst_gpio);
+		if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
+			gpio_free(ctrl_pdata->disp_en_gpio);
 		return rc;
 	}
 
+	ctrl_pdata->on = panel_data->on;
+	ctrl_pdata->off = panel_data->off;
+
+	ctrl_pdata->pclk_rate = dsi_pclk_rate;
+	ctrl_pdata->byte_clk_rate = panel_data->panel_info.clk_rate / 8;
+	pr_debug("%s: pclk=%d, bclk=%d\n", __func__,
+			ctrl_pdata->pclk_rate, ctrl_pdata->byte_clk_rate);
+
+	if (panel_data->panel_info.pdest == DISPLAY_1) {
+		mdss_debug_register_base("dsi0",
+			ctrl_pdata->ctrl_base, ctrl_pdata->reg_size);
+		ctrl_pdata->ndx = 0;
+	} else {
+		mdss_debug_register_base("dsi1",
+			ctrl_pdata->ctrl_base, ctrl_pdata->reg_size);
+		ctrl_pdata->ndx = 1;
+	}
+
 	pr_debug("%s: Panal data initialized\n", __func__);
 	return 0;
 }
 
-static const struct of_device_id msm_mdss_dsi_dt_match[] = {
-	{.compatible = "qcom,msm-mdss-dsi"},
+static const struct of_device_id mdss_dsi_ctrl_dt_match[] = {
+	{.compatible = "qcom,mdss-dsi-ctrl"},
 	{}
 };
-MODULE_DEVICE_TABLE(of, msm_mdss_dsi_dt_match);
+MODULE_DEVICE_TABLE(of, mdss_dsi_ctrl_dt_match);
 
-static struct platform_driver mdss_dsi_driver = {
-	.probe = mdss_dsi_probe,
-	.remove = __devexit_p(mdss_dsi_remove),
+static struct platform_driver mdss_dsi_ctrl_driver = {
+	.probe = mdss_dsi_ctrl_probe,
+	.remove = __devexit_p(mdss_dsi_ctrl_remove),
 	.shutdown = NULL,
 	.driver = {
-		.name = "mdss_dsi",
-		.of_match_table = msm_mdss_dsi_dt_match,
+		.name = "mdss_dsi_ctrl",
+		.of_match_table = mdss_dsi_ctrl_dt_match,
 	},
 };
 
 static int mdss_dsi_register_driver(void)
 {
-	return platform_driver_register(&mdss_dsi_driver);
+	return platform_driver_register(&mdss_dsi_ctrl_driver);
 }
 
 static int __init mdss_dsi_driver_init(void)
@@ -374,7 +1182,7 @@
 static void __exit mdss_dsi_driver_cleanup(void)
 {
 	iounmap(mdss_dsi_base);
-	platform_driver_unregister(&mdss_dsi_driver);
+	platform_driver_unregister(&mdss_dsi_ctrl_driver);
 }
 module_exit(mdss_dsi_driver_cleanup);
 
diff --git a/drivers/video/msm/mdss/mdss_dsi.h b/drivers/video/msm/mdss/mdss_dsi.h
index 52baa3e..197ff7a 100644
--- a/drivers/video/msm/mdss/mdss_dsi.h
+++ b/drivers/video/msm/mdss/mdss_dsi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,8 +18,8 @@
 #include <mach/scm-io.h>
 
 #include "mdss_panel.h"
+#include "mdss_io_util.h"
 
-#define MMSS_MDSS_CC_BASE_PHY 0xFD8C2300	/* mmss clcok control */
 #define MMSS_SERDES_BASE_PHY 0x04f01000 /* mmss (De)Serializer CFG */
 
 #define MIPI_OUTP(addr, data) writel_relaxed((data), (addr))
@@ -77,6 +77,18 @@
 	DSI_CMD_MODE_MDP,
 };
 
+enum dsi_panel_bl_ctrl {
+	BL_PWM,
+	BL_WLED,
+	BL_DCS_CMD,
+	UNKNOWN_CTRL,
+};
+
+enum dsi_ctrl_state {
+	DSI_LP_MODE,
+	DSI_HS_MODE,
+};
+
 #define DSI_NON_BURST_SYNCH_PULSE	0
 #define DSI_NON_BURST_SYNCH_EVENT	1
 #define DSI_BURST_MODE			2
@@ -238,10 +250,56 @@
 	void *data;
 };
 
+struct dsi_panel_cmds_list {
+	struct dsi_cmd_desc *buf;
+	u32 size;
+	char ctrl_state;
+};
+
 struct mdss_panel_common_pdata {
 	struct mdss_panel_info panel_info;
 	int (*on) (struct mdss_panel_data *pdata);
 	int (*off) (struct mdss_panel_data *pdata);
+	void (*bl_fnc) (struct mdss_panel_data *pdata, u32 bl_level);
+	struct dsi_panel_cmds_list *dsi_panel_on_cmds;
+	struct dsi_panel_cmds_list *dsi_panel_off_cmds;
+};
+
+struct dsi_drv_cm_data {
+	struct regulator *vdd_vreg;
+	struct regulator *vdd_io_vreg;
+	struct regulator *vdda_vreg;
+	int broadcast_enable;
+};
+
+struct mdss_dsi_ctrl_pdata {
+	int ndx;
+	int (*on) (struct mdss_panel_data *pdata);
+	int (*off) (struct mdss_panel_data *pdata);
+	struct mdss_panel_data panel_data;
+	struct mdss_hw *mdss_hw;
+	unsigned char *ctrl_base;
+	int reg_size;
+	struct clk *byte_clk;
+	struct clk *esc_clk;
+	struct clk *pixel_clk;
+	int irq_cnt;
+	int mdss_dsi_clk_on;
+	int rst_gpio;
+	int disp_en_gpio;
+	int disp_te_gpio;
+	int bklt_ctrl;	/* backlight ctrl */
+	int pwm_period;
+	int pwm_gpio;
+	int pwm_lpg_chan;
+	int bklt_max;
+	struct pwm_device *pwm_bl;
+	struct dsi_panel_cmds_list *on_cmds;
+	struct dsi_panel_cmds_list *off_cmds;
+	struct dsi_drv_cm_data shared_pdata;
+	u32 pclk_rate;
+	u32 byte_clk_rate;
+	struct dss_module_power power_data;
 };
 
 int dsi_panel_device_register(struct platform_device *pdev,
@@ -258,7 +316,7 @@
 int mdss_dsi_cmd_dma_tx(struct dsi_buf *dp,
 				struct mdss_panel_data *pdata);
 int mdss_dsi_cmd_reg_tx(u32 data,
-				struct mdss_panel_data *pdata);
+				unsigned char *ctrl_base);
 int mdss_dsi_cmds_rx(struct mdss_panel_data *pdata,
 			struct dsi_buf *tp, struct dsi_buf *rp,
 			struct dsi_cmd_desc *cmds, int len);
@@ -270,25 +328,31 @@
 				struct mdss_panel_data *pdata);
 void mdss_dsi_cmd_mode_ctrl(int enable);
 void mdp4_dsi_cmd_trigger(void);
-void mdss_dsi_cmd_mdp_start(void);
+void mdss_dsi_cmd_mdp_start(struct mdss_panel_data *pdata);
 void mdss_dsi_cmd_bta_sw_trigger(struct mdss_panel_data *pdata);
 void mdss_dsi_ack_err_status(unsigned char *dsi_base);
-void mdss_dsi_clk_enable(void);
-void mdss_dsi_clk_disable(void);
+void mdss_dsi_clk_enable(struct mdss_panel_data *pdata);
+void mdss_dsi_clk_disable(struct mdss_panel_data *pdata);
 void mdss_dsi_controller_cfg(int enable,
 				struct mdss_panel_data *pdata);
 void mdss_dsi_sw_reset(struct mdss_panel_data *pdata);
 
 irqreturn_t mdss_dsi_isr(int irq, void *ptr);
+void mdss_dsi_irq_handler_config(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
 
 void mipi_set_tx_power_mode(int mode, struct mdss_panel_data *pdata);
 int mdss_dsi_clk_div_config(u8 bpp, u8 lanes,
 			    u32 *expected_dsi_pclk);
-int mdss_dsi_clk_init(struct platform_device *pdev);
-void mdss_dsi_clk_deinit(struct device *dev);
-void mdss_dsi_prepare_clocks(void);
-void mdss_dsi_unprepare_clocks(void);
-void cont_splash_clk_ctrl(int enable);
-unsigned char *mdss_dsi_get_base_adr(void);
+int mdss_dsi_clk_init(struct platform_device *pdev,
+		      struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+void mdss_dsi_clk_deinit(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+void mdss_dsi_prepare_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+void mdss_dsi_unprepare_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata);
+void mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable);
+void mdss_dsi_phy_enable(unsigned char *ctrl_base, int on);
+void mdss_dsi_phy_init(struct mdss_panel_data *pdata);
+void mdss_dsi_phy_sw_reset(unsigned char *ctrl_base);
+void mdss_dsi_cmd_test_pattern(struct mdss_panel_data *pdata);
+void mdss_dsi_panel_pwm_cfg(struct mdss_dsi_ctrl_pdata *ctrl);
 
 #endif /* MDSS_DSI_H */
diff --git a/drivers/video/msm/mdss/mdss_dsi_host.c b/drivers/video/msm/mdss/mdss_dsi_host.c
index adaf52b..ccec0fc 100644
--- a/drivers/video/msm/mdss/mdss_dsi_host.c
+++ b/drivers/video/msm/mdss/mdss_dsi_host.c
@@ -1,5 +1,4 @@
-
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -21,19 +20,26 @@
 #include <linux/slab.h>
 #include <linux/iopoll.h>
 
+#include <mach/iommu_domains.h>
+
 #include "mdss.h"
 #include "mdss_dsi.h"
 
 static struct completion dsi_dma_comp;
-static int dsi_irq_enabled;
 static spinlock_t dsi_irq_lock;
 static spinlock_t dsi_mdp_lock;
 static int dsi_mdp_busy;
+static struct mdss_dsi_ctrl_pdata *left_ctrl_pdata;
 
-spinlock_t dsi_clk_lock;
-
-struct mdss_hw mdss_dsi_hw = {
+struct mdss_hw mdss_dsi0_hw = {
 	.hw_ndx = MDSS_HW_DSI0,
+	.ptr = NULL,
+	.irq_handler = mdss_dsi_isr,
+};
+
+struct mdss_hw mdss_dsi1_hw = {
+	.hw_ndx = MDSS_HW_DSI1,
+	.ptr = NULL,
 	.irq_handler = mdss_dsi_isr,
 };
 
@@ -42,59 +48,50 @@
 	init_completion(&dsi_dma_comp);
 	spin_lock_init(&dsi_irq_lock);
 	spin_lock_init(&dsi_mdp_lock);
-	spin_lock_init(&dsi_clk_lock);
 }
 
-void mdss_dsi_enable_irq(void)
+void mdss_dsi_irq_handler_config(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	if (ctrl->panel_data.panel_info.pdest == DISPLAY_1) {
+		mdss_dsi0_hw.ptr = (void *)(ctrl);
+		ctrl->mdss_hw = &mdss_dsi0_hw;
+	} else {
+		mdss_dsi1_hw.ptr = (void *)(ctrl);
+		ctrl->mdss_hw = &mdss_dsi1_hw;
+	}
+}
+
+void mdss_dsi_irq_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, int enable, int isr)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&dsi_irq_lock, flags);
-	if (dsi_irq_enabled) {
-		pr_debug("%s: IRQ aleady enabled\n", __func__);
-		spin_unlock_irqrestore(&dsi_irq_lock, flags);
+	if (ctrl == NULL) {
+		pr_err("%s: Invalid ctrl\n", __func__);
 		return;
 	}
-	mdss_enable_irq(&mdss_dsi_hw);
-	dsi_irq_enabled = 1;
-	/* TO DO: Check whether MDSS IRQ is enabled */
-	spin_unlock_irqrestore(&dsi_irq_lock, flags);
-}
-
-void mdss_dsi_disable_irq(void)
-{
-	unsigned long flags;
 
 	spin_lock_irqsave(&dsi_irq_lock, flags);
-	if (dsi_irq_enabled == 0) {
-		pr_debug("%s: IRQ already disabled\n", __func__);
-		spin_unlock_irqrestore(&dsi_irq_lock, flags);
-		return;
+	if (enable) {
+		if (ctrl->irq_cnt == 0)
+			mdss_enable_irq(ctrl->mdss_hw);
+		ctrl->irq_cnt++;
+	} else {
+		if (ctrl->irq_cnt) {
+			ctrl->irq_cnt--;
+			if (ctrl->irq_cnt == 0) {
+				if (isr)
+					mdss_disable_irq_nosync(ctrl->mdss_hw);
+				else
+					mdss_disable_irq(ctrl->mdss_hw);
+			}
+		}
 	}
-	mdss_disable_irq(&mdss_dsi_hw);
-	dsi_irq_enabled = 0;
-	/* TO DO: Check whether MDSS IRQ is Disabled */
+	pr_debug("%s: ctrl=%d enable=%d cnt=%d\n", __func__,
+					ctrl->ndx, enable, ctrl->irq_cnt);
 	spin_unlock_irqrestore(&dsi_irq_lock, flags);
 }
 
 /*
- * mdss_dsi_disale_irq_nosync() should be called
- * from interrupt context
- */
-void mdss_dsi_disable_irq_nosync(void)
-{
-	spin_lock(&dsi_irq_lock);
-	if (dsi_irq_enabled == 0) {
-		pr_debug("%s: IRQ cannot be disabled\n", __func__);
-		spin_unlock(&dsi_irq_lock);
-		return;
-	}
-
-	dsi_irq_enabled = 0;
-	spin_unlock(&dsi_irq_lock);
-}
-
-/*
  * mipi dsi buf mechanism
  */
 char *mdss_dsi_buf_reserve(struct dsi_buf *dp, int len)
@@ -648,11 +645,44 @@
 	return len;
 }
 
+void mdss_dsi_cmd_test_pattern(struct mdss_panel_data *pdata)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	int i;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x015c, 0x201);
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x016c, 0xff0000); /* red */
+	i = 0;
+	while (i++ < 50) {
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0184, 0x1);
+		/* Add sleep to get ~50 fps frame rate*/
+		msleep(20);
+	}
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x015c, 0x0);
+}
+
 void mdss_dsi_host_init(struct mipi_panel_info *pinfo,
 				struct mdss_panel_data *pdata)
 {
 	u32 dsi_ctrl, intr_ctrl;
 	u32 data;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
 
 	pinfo->rgb_swap = DSI_RGB_SWAP_RGB;
 
@@ -673,7 +703,7 @@
 		data |= ((pinfo->traffic_mode & 0x03) << 8);
 		data |= ((pinfo->dst_format & 0x03) << 4); /* 2 bits */
 		data |= (pinfo->vc & 0x03);
-		MIPI_OUTP((pdata->dsi_base) + 0x0010, data);
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0010, data);
 
 		data = 0;
 		data |= ((pinfo->rgb_swap & 0x07) << 12);
@@ -683,7 +713,7 @@
 			data |= BIT(4);
 		if (pinfo->r_sel)
 			data |= BIT(0);
-		MIPI_OUTP((pdata->dsi_base) + 0x0020, data);
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0020, data);
 	} else if (pinfo->mode == DSI_CMD_MODE) {
 		data = 0;
 		data |= ((pinfo->interleave_max & 0x0f) << 20);
@@ -695,7 +725,7 @@
 		if (pinfo->r_sel)
 			data |= BIT(4);
 		data |= (pinfo->dst_format & 0x0f);	/* 4 bits */
-		MIPI_OUTP((pdata->dsi_base) + 0x003c, data);
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0040, data);
 
 		/* DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL */
 		data = pinfo->wr_mem_continue & 0x0ff;
@@ -703,7 +733,7 @@
 		data |= (pinfo->wr_mem_start & 0x0ff);
 		if (pinfo->insert_dcs_cmd)
 			data |= BIT(16);
-		MIPI_OUTP((pdata->dsi_base) + 0x0044, data);
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0044, data);
 	} else
 		pr_err("%s: Unknown DSI mode=%d\n", __func__, pinfo->mode);
 
@@ -726,7 +756,17 @@
 
 	/* from frame buffer, low power mode */
 	/* DSI_COMMAND_MODE_DMA_CTRL */
-	MIPI_OUTP((pdata->dsi_base) + 0x3C, 0x14000000);
+	if (ctrl_pdata->shared_pdata.broadcast_enable)
+		MIPI_OUTP(ctrl_pdata->ctrl_base + 0x3C, 0x94000000);
+	else
+		MIPI_OUTP(ctrl_pdata->ctrl_base + 0x3C, 0x14000000);
+
+	if (ctrl_pdata->shared_pdata.broadcast_enable)
+		if (pdata->panel_info.pdest == DISPLAY_1) {
+			pr_debug("%s: Broadcast mode enabled.\n",
+				 __func__);
+			left_ctrl_pdata = ctrl_pdata;
+		}
 
 	data = 0;
 	if (pinfo->te_sel)
@@ -734,59 +774,95 @@
 	data |= pinfo->mdp_trigger << 4;/* cmd mdp trigger */
 	data |= pinfo->dma_trigger;	/* cmd dma trigger */
 	data |= (pinfo->stream & 0x01) << 8;
-	MIPI_OUTP((pdata->dsi_base) + 0x0084, data); /* DSI_TRIG_CTRL */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0084,
+				data); /* DSI_TRIG_CTRL */
 
 	/* DSI_LAN_SWAP_CTRL */
-	MIPI_OUTP((pdata->dsi_base) + 0x00b0, pinfo->dlane_swap);
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x00b0, pinfo->dlane_swap);
 
 	/* clock out ctrl */
 	data = pinfo->t_clk_post & 0x3f;	/* 6 bits */
 	data <<= 8;
 	data |= pinfo->t_clk_pre & 0x3f;	/*  6 bits */
 	/* DSI_CLKOUT_TIMING_CTRL */
-	MIPI_OUTP((pdata->dsi_base) + 0xc4, data);
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0xc4, data);
 
 	data = 0;
 	if (pinfo->rx_eot_ignore)
 		data |= BIT(4);
 	if (pinfo->tx_eot_append)
 		data |= BIT(0);
-	MIPI_OUTP((pdata->dsi_base) + 0x00cc, data); /* DSI_EOT_PACKET_CTRL */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x00cc,
+				data); /* DSI_EOT_PACKET_CTRL */
 
 
 	/* allow only ack-err-status  to generate interrupt */
 	/* DSI_ERR_INT_MASK0 */
-	MIPI_OUTP((pdata->dsi_base) + 0x010c, 0x13ff3fe0);
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x010c, 0x13ff3fe0);
 
 	intr_ctrl |= DSI_INTR_ERROR_MASK;
-	MIPI_OUTP((pdata->dsi_base) + 0x0110, intr_ctrl); /* DSI_INTL_CTRL */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0110,
+				intr_ctrl); /* DSI_INTL_CTRL */
 
 	/* turn esc, byte, dsi, pclk, sclk, hclk on */
-	MIPI_OUTP((pdata->dsi_base) + 0x11c, 0x23f); /* DSI_CLK_CTRL */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x11c,
+					0x23f); /* DSI_CLK_CTRL */
 
 	dsi_ctrl |= BIT(0);	/* enable dsi */
-	MIPI_OUTP((pdata->dsi_base) + 0x0004, dsi_ctrl);
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0004, dsi_ctrl);
+	mdss_dsi_irq_ctrl(ctrl_pdata, 1, 0); /* enable dsi irq */
 
 	wmb();
 }
 
 void mipi_set_tx_power_mode(int mode, struct mdss_panel_data *pdata)
 {
-	u32 data = MIPI_INP((pdata->dsi_base) + 0x3c);
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	u32 data;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	data = MIPI_INP((ctrl_pdata->ctrl_base) + 0x3c);
 
 	if (mode == 0)
 		data &= ~BIT(26);
 	else
 		data |= BIT(26);
 
-	MIPI_OUTP((pdata->dsi_base) + 0x3c, data);
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x3c, data);
 }
 
 void mdss_dsi_sw_reset(struct mdss_panel_data *pdata)
 {
-	MIPI_OUTP((pdata->dsi_base) + 0x118, 0x01);
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	u32 dsi_ctrl;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+	dsi_ctrl = MIPI_INP((ctrl_pdata->ctrl_base) + 0x0004);
+	dsi_ctrl &= ~0x01;
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0004, dsi_ctrl);
 	wmb();
-	MIPI_OUTP((pdata->dsi_base) + 0x118, 0x00);
+
+	/* turn esc, byte, dsi, pclk, sclk, hclk on */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x11c,
+					0x23f); /* DSI_CLK_CTRL */
+	wmb();
+
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x118, 0x01);
+	wmb();
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x118, 0x00);
 	wmb();
 }
 
@@ -798,87 +874,135 @@
 	u32 status;
 	u32 sleep_us = 1000;
 	u32 timeout_us = 16000;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
 
 	/* Check for CMD_MODE_DMA_BUSY */
-	if (readl_poll_timeout(((pdata->dsi_base) + 0x0008),
+	if (readl_poll_timeout(((ctrl_pdata->ctrl_base) + 0x0008),
 			   status,
 			   ((status & 0x02) == 0),
 			       sleep_us, timeout_us))
 		pr_info("%s: DSI status=%x failed\n", __func__, status);
 
 	/* Check for x_HS_FIFO_EMPTY */
-	if (readl_poll_timeout(((pdata->dsi_base) + 0x000c),
+	if (readl_poll_timeout(((ctrl_pdata->ctrl_base) + 0x000c),
 			   status,
 			   ((status & 0x11111000) == 0x11111000),
 			       sleep_us, timeout_us))
 		pr_info("%s: FIFO status=%x failed\n", __func__, status);
 
-	dsi_ctrl = MIPI_INP((pdata->dsi_base) + 0x0004);
+	/* Check for VIDEO_MODE_ENGINE_BUSY */
+	if (readl_poll_timeout(((ctrl_pdata->ctrl_base) + 0x0008),
+			   status,
+			   ((status & 0x08) == 0),
+			       sleep_us, timeout_us)) {
+		pr_debug("%s: DSI status=%x\n", __func__, status);
+		pr_debug("%s: Doing sw reset\n", __func__);
+		mdss_dsi_sw_reset(pdata);
+	}
+
+	dsi_ctrl = MIPI_INP((ctrl_pdata->ctrl_base) + 0x0004);
 	if (enable)
 		dsi_ctrl |= 0x01;
 	else
 		dsi_ctrl &= ~0x01;
 
-	MIPI_OUTP((pdata->dsi_base) + 0x0004, dsi_ctrl);
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0004, dsi_ctrl);
 	wmb();
 }
 
 void mdss_dsi_op_mode_config(int mode,
 			     struct mdss_panel_data *pdata)
 {
-
 	u32 dsi_ctrl, intr_ctrl;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
 
-	dsi_ctrl = MIPI_INP((pdata->dsi_base) + 0x0004);
-	dsi_ctrl &= ~0x07;
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	if (ctrl_pdata->shared_pdata.broadcast_enable)
+		if (pdata->panel_info.pdest == DISPLAY_1) {
+			pr_debug("%s: Broadcast mode. 1st ctrl\n",
+				 __func__);
+			return;
+		}
+
+	dsi_ctrl = MIPI_INP((ctrl_pdata->ctrl_base) + 0x0004);
+	/*If Video enabled, Keep Video and Cmd mode ON */
+	if (dsi_ctrl & 0x02)
+		dsi_ctrl &= ~0x05;
+	else
+		dsi_ctrl &= ~0x07;
+
 	if (mode == DSI_VIDEO_MODE) {
 		dsi_ctrl |= 0x03;
 		intr_ctrl = DSI_INTR_CMD_DMA_DONE_MASK;
 	} else {		/* command mode */
 		dsi_ctrl |= 0x05;
+		if (pdata->panel_info.type == MIPI_VIDEO_PANEL)
+			dsi_ctrl |= 0x02;
+
 		intr_ctrl = DSI_INTR_CMD_DMA_DONE_MASK | DSI_INTR_ERROR_MASK |
 				DSI_INTR_CMD_MDP_DONE_MASK;
 	}
 
-	pr_debug("%s: dsi_ctrl=%x intr=%x\n", __func__, dsi_ctrl, intr_ctrl);
+	if (ctrl_pdata->shared_pdata.broadcast_enable)
+		if ((pdata->panel_info.pdest == DISPLAY_2)
+		  && (left_ctrl_pdata != NULL)) {
+			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0110,
+				  intr_ctrl); /* DSI_INTL_CTRL */
+			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0004,
+					dsi_ctrl);
+		}
 
-	MIPI_OUTP((pdata->dsi_base) + 0x0110, intr_ctrl); /* DSI_INTL_CTRL */
-	MIPI_OUTP((pdata->dsi_base) + 0x0004, dsi_ctrl);
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0110,
+				intr_ctrl); /* DSI_INTL_CTRL */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0004, dsi_ctrl);
 	wmb();
 }
 
-void mdss_dsi_cmd_mdp_start(void)
-{
-	unsigned long flag;
-
-	spin_lock_irqsave(&dsi_mdp_lock, flag);
-	mdss_dsi_enable_irq();
-	dsi_mdp_busy = true;
-	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
-}
-
-
 void mdss_dsi_cmd_bta_sw_trigger(struct mdss_panel_data *pdata)
 {
 	u32 status;
 	int timeout_us = 10000;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
 
-	MIPI_OUTP((pdata->dsi_base) + 0x098, 0x01);	/* trigger */
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x098, 0x01);	/* trigger */
 	wmb();
 
 	/* Check for CMD_MODE_DMA_BUSY */
-	if (readl_poll_timeout(((pdata->dsi_base) + 0x0008),
+	if (readl_poll_timeout(((ctrl_pdata->ctrl_base) + 0x0008),
 				status, ((status & 0x0010) == 0),
 				0, timeout_us))
 		pr_info("%s: DSI status=%x failed\n", __func__, status);
 
-	mdss_dsi_ack_err_status((pdata->dsi_base));
+	mdss_dsi_ack_err_status((ctrl_pdata->ctrl_base));
 
 	pr_debug("%s: BTA done, status = %d\n", __func__, status);
 }
 
 int mdss_dsi_cmd_reg_tx(u32 data,
-			struct mdss_panel_data *pdata)
+			unsigned char *ctrl_base)
 {
 	int i;
 	char *bp;
@@ -890,14 +1014,14 @@
 
 	pr_debug("\n");
 
-	MIPI_OUTP((pdata->dsi_base) + 0x0084, 0x04);/* sw trigger */
-	MIPI_OUTP((pdata->dsi_base) + 0x0004, 0x135);
+	MIPI_OUTP(ctrl_base + 0x0084, 0x04);/* sw trigger */
+	MIPI_OUTP(ctrl_base + 0x0004, 0x135);
 
 	wmb();
 
-	MIPI_OUTP((pdata->dsi_base) + 0x03c, data);
+	MIPI_OUTP(ctrl_base + 0x03c, data);
 	wmb();
-	MIPI_OUTP((pdata->dsi_base) + 0x090, 0x01);	/* trigger */
+	MIPI_OUTP(ctrl_base + 0x090, 0x01);	/* trigger */
 	wmb();
 
 	udelay(300);
@@ -916,21 +1040,52 @@
 	u32 dsi_ctrl, ctrl;
 	int i, video_mode;
 	unsigned long flag;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	if (ctrl_pdata->shared_pdata.broadcast_enable)
+		if (pdata->panel_info.pdest == DISPLAY_1) {
+			pr_debug("%s: Broadcast mode. 1st ctrl\n",
+				 __func__);
+			return 0;
+		}
 
 	/* turn on cmd mode
 	* for video mode, do not send cmds more than
 	* one pixel line, since it only transmit it
 	* during BLLP.
 	*/
-	dsi_ctrl = MIPI_INP((pdata->dsi_base) + 0x0004);
+
+	if (ctrl_pdata->shared_pdata.broadcast_enable)
+		if ((pdata->panel_info.pdest == DISPLAY_2)
+		  && (left_ctrl_pdata != NULL)) {
+			dsi_ctrl = MIPI_INP(left_ctrl_pdata->ctrl_base
+								+ 0x0004);
+			video_mode = dsi_ctrl & 0x02; /* VIDEO_MODE_EN */
+			if (video_mode) {
+				ctrl = dsi_ctrl | 0x04; /* CMD_MODE_EN */
+				MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0004,
+						ctrl);
+			}
+		}
+
+	dsi_ctrl = MIPI_INP((ctrl_pdata->ctrl_base) + 0x0004);
 	video_mode = dsi_ctrl & 0x02; /* VIDEO_MODE_EN */
 	if (video_mode) {
 		ctrl = dsi_ctrl | 0x04; /* CMD_MODE_EN */
-		MIPI_OUTP((pdata->dsi_base) + 0x0004, ctrl);
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0004, ctrl);
 	}
 
 	spin_lock_irqsave(&dsi_mdp_lock, flag);
-	mdss_dsi_enable_irq();
+	mdss_dsi_irq_ctrl(ctrl_pdata, 1, 0);
+
 	dsi_mdp_busy = true;
 	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
 
@@ -947,12 +1102,12 @@
 
 	spin_lock_irqsave(&dsi_mdp_lock, flag);
 	dsi_mdp_busy = false;
-	mdss_dsi_disable_irq();
+	mdss_dsi_irq_ctrl(ctrl_pdata, 0, 0);
 	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
 
 	if (video_mode)
-		MIPI_OUTP((pdata->dsi_base) + 0x0004, dsi_ctrl); /* restore */
-
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0004,
+					dsi_ctrl); /* restore */
 	return cnt;
 }
 
@@ -983,6 +1138,15 @@
 	int cnt, len, diff, pkt_size;
 	unsigned long flag;
 	char cmd;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
 
 	if (pdata->panel_info.mipi.no_max_pkt_size)
 		rlen = ALIGN(rlen, 4); /* Only support rlen = 4*n */
@@ -1010,7 +1174,7 @@
 	}
 
 	spin_lock_irqsave(&dsi_mdp_lock, flag);
-	mdss_dsi_enable_irq();
+	mdss_dsi_irq_ctrl(ctrl_pdata, 1, 0);
 	dsi_mdp_busy = true;
 	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
 
@@ -1021,6 +1185,7 @@
 		mdss_dsi_buf_init(tp);
 		mdss_dsi_cmd_dma_add(tp, pkt_size_cmd);
 		mdss_dsi_cmd_dma_tx(tp, pdata);
+		pr_debug("%s: Max packet size sent\n", __func__);
 	}
 
 	mdss_dsi_buf_init(tp);
@@ -1046,7 +1211,7 @@
 
 	spin_lock_irqsave(&dsi_mdp_lock, flag);
 	dsi_mdp_busy = false;
-	mdss_dsi_disable_irq();
+	mdss_dsi_irq_ctrl(ctrl_pdata, 0, 0);
 	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
 
 	if (pdata->panel_info.mipi.no_max_pkt_size) {
@@ -1080,6 +1245,7 @@
 		rp->len -= diff; /* align bytes */
 		break;
 	default:
+		pr_debug("%s: Unknown cmd received\n", __func__);
 		break;
 	}
 
@@ -1090,37 +1256,70 @@
 			struct mdss_panel_data *pdata)
 {
 	int len;
-	int i;
+	int domain = MDSS_IOMMU_DOMAIN_UNSECURE;
 	char *bp;
+	unsigned long size, addr;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
 
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
 	bp = tp->data;
 
-	pr_debug("%s: ", __func__);
-	for (i = 0; i < tp->len; i++)
-		pr_debug("%x ", *bp++);
+	len = ALIGN(tp->len, 4);
+	size = ALIGN(tp->len, SZ_4K);
 
-	pr_debug("\n");
-
-	len = tp->len;
-	len += 3;
-	len &= ~0x03;	/* multipled by 4 */
-
-	tp->dmap = dma_map_single(&dsi_dev, tp->data, len, DMA_TO_DEVICE);
-	if (dma_mapping_error(&dsi_dev, tp->dmap))
+	tp->dmap = dma_map_single(&dsi_dev, tp->data, size, DMA_TO_DEVICE);
+	if (dma_mapping_error(&dsi_dev, tp->dmap)) {
 		pr_err("%s: dmap mapp failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	if (is_mdss_iommu_attached()) {
+		int ret = msm_iommu_map_contig_buffer(tp->dmap,
+					mdss_get_iommu_domain(domain), 0,
+					size, SZ_4K, 0, &(addr));
+		if (IS_ERR_VALUE(ret)) {
+			pr_err("unable to map dma memory to iommu(%d)\n", ret);
+			return -ENOMEM;
+		}
+	} else {
+		addr = tp->dmap;
+	}
 
 	INIT_COMPLETION(dsi_dma_comp);
 
-	MIPI_OUTP((pdata->dsi_base) + 0x048, tp->dmap);
-	MIPI_OUTP((pdata->dsi_base) + 0x04c, len);
+	if (ctrl_pdata->shared_pdata.broadcast_enable)
+		if ((pdata->panel_info.pdest == DISPLAY_2)
+		  && (left_ctrl_pdata != NULL)) {
+			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x048, addr);
+			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x04c, len);
+		}
+
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x048, addr);
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x04c, len);
 	wmb();
 
-	MIPI_OUTP((pdata->dsi_base) + 0x090, 0x01);	/* trigger */
+	if (ctrl_pdata->shared_pdata.broadcast_enable)
+		if ((pdata->panel_info.pdest == DISPLAY_2)
+		  && (left_ctrl_pdata != NULL)) {
+			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x090, 0x01);
+		}
+
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x090, 0x01);	/* trigger */
 	wmb();
 
 	wait_for_completion(&dsi_dma_comp);
 
-	dma_unmap_single(&dsi_dev, tp->dmap, len, DMA_TO_DEVICE);
+	if (is_mdss_iommu_attached())
+		msm_iommu_unmap_contig_buffer(addr,
+			mdss_get_iommu_domain(domain), 0, size);
+
+	dma_unmap_single(&dsi_dev, tp->dmap, size, DMA_TO_DEVICE);
 	tp->dmap = 0;
 	return tp->len;
 }
@@ -1130,7 +1329,15 @@
 {
 	u32 *lp, data;
 	int i, off, cnt;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
 
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
 	lp = (u32 *)rp->data;
 	cnt = rlen;
 	cnt += 3;
@@ -1144,8 +1351,10 @@
 
 
 	for (i = 0; i < cnt; i++) {
-		data = (u32)MIPI_INP((pdata->dsi_base) + off);
+		data = (u32)MIPI_INP((ctrl_pdata->ctrl_base) + off);
 		*lp++ = ntohl(data);	/* to network byte order */
+		pr_debug("%s: data = 0x%x and ntohl(data) = 0x%x\n",
+					 __func__, data, ntohl(data));
 		off -= 4;
 		rp->len += sizeof(*lp);
 	}
@@ -1227,8 +1436,10 @@
 {
 	u32 isr;
 	unsigned char *dsi_base;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata =
+			(struct mdss_dsi_ctrl_pdata *)ptr;
 
-	dsi_base = mdss_dsi_get_base_adr();
+	dsi_base = ctrl_pdata->ctrl_base;
 	if (!dsi_base)
 		pr_err("%s:%d DSI base adr no Initialized",
 				       __func__, __LINE__);
@@ -1236,6 +1447,17 @@
 	isr = MIPI_INP(dsi_base + 0x0110);/* DSI_INTR_CTRL */
 	MIPI_OUTP(dsi_base + 0x0110, isr);
 
+	if (ctrl_pdata->shared_pdata.broadcast_enable)
+		if ((ctrl_pdata->panel_data.panel_info.pdest == DISPLAY_2)
+		    && (left_ctrl_pdata != NULL)) {
+			u32 isr0;
+			isr0 = MIPI_INP(left_ctrl_pdata->ctrl_base
+						+ 0x0110);/* DSI_INTR_CTRL */
+			MIPI_OUTP(left_ctrl_pdata->ctrl_base + 0x0110, isr0);
+		}
+
+	pr_debug("%s: isr=%x %x", __func__, isr, (int)DSI_INTR_ERROR);
+
 	if (isr & DSI_INTR_ERROR)
 		mdss_dsi_error(dsi_base);
 
@@ -1251,7 +1473,6 @@
 	if (isr & DSI_INTR_CMD_MDP_DONE) {
 		spin_lock(&dsi_mdp_lock);
 		dsi_mdp_busy = false;
-		mdss_dsi_disable_irq_nosync();
 		spin_unlock(&dsi_mdp_lock);
 	}
 
diff --git a/drivers/video/msm/mdss/mdss_dsi_panel.c b/drivers/video/msm/mdss/mdss_dsi_panel.c
index 5fab93e..c56cd41 100644
--- a/drivers/video/msm/mdss/mdss_dsi_panel.c
+++ b/drivers/video/msm/mdss/mdss_dsi_panel.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,7 +13,14 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/qpnp/pin.h>
+#include <linux/delay.h>
 #include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/pwm.h>
+#include <linux/err.h>
 
 #include "mdss_dsi.h"
 
@@ -22,59 +29,213 @@
 static struct dsi_buf dsi_panel_tx_buf;
 static struct dsi_buf dsi_panel_rx_buf;
 
-static struct dsi_cmd_desc *dsi_panel_on_cmds;
-static struct dsi_cmd_desc *dsi_panel_off_cmds;
-static int num_of_on_cmds;
-static int num_of_off_cmds;
-static char *on_cmds, *off_cmds;
+DEFINE_LED_TRIGGER(bl_led_trigger);
+
+static struct mdss_dsi_phy_ctrl phy_params;
+
+void mdss_dsi_panel_pwm_cfg(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+	int ret;
+
+	if (!gpio_is_valid(ctrl->pwm_gpio)) {
+		pr_err("%s: pwm_gpio=%d Invalid\n", __func__,
+				ctrl->pwm_gpio);
+		return;
+	}
+
+	ret = gpio_request(ctrl->pwm_gpio, "disp_pwm");
+	if (ret) {
+		pr_err("%s: pwm_gpio=%d request failed\n", __func__,
+				ctrl->pwm_gpio);
+		return;
+	}
+
+	ctrl->pwm_bl = pwm_request(ctrl->pwm_lpg_chan, "lcd-bklt");
+	if (ctrl->pwm_bl == NULL || IS_ERR(ctrl->pwm_bl)) {
+		pr_err("%s: lpg_chan=%d pwm request failed", __func__,
+				ctrl->pwm_lpg_chan);
+		gpio_free(ctrl->pwm_gpio);
+		ctrl->pwm_gpio = -1;
+	}
+}
+
+static void mdss_dsi_panel_bklt_pwm(struct mdss_dsi_ctrl_pdata *ctrl, int level)
+{
+	int ret;
+	u32 duty;
+
+	if (ctrl->pwm_bl == NULL) {
+		pr_err("%s: no PWM\n", __func__);
+		return;
+	}
+
+	duty = level * ctrl->pwm_period;
+	duty /= ctrl->bklt_max;
+
+	pr_debug("%s: bklt_ctrl=%d pwm_period=%d pwm_gpio=%d pwm_lpg_chan=%d\n",
+			__func__, ctrl->bklt_ctrl, ctrl->pwm_period,
+				ctrl->pwm_gpio, ctrl->pwm_lpg_chan);
+
+	pr_debug("%s: ndx=%d level=%d duty=%d\n", __func__,
+					ctrl->ndx, level, duty);
+
+	ret = pwm_config(ctrl->pwm_bl, duty, ctrl->pwm_period);
+	if (ret) {
+		pr_err("%s: pwm_config() failed err=%d.\n", __func__, ret);
+		return;
+	}
+
+	ret = pwm_enable(ctrl->pwm_bl);
+	if (ret)
+		pr_err("%s: pwm_enable() failed err=%d\n", __func__, ret);
+}
+
+
+void mdss_dsi_panel_reset(struct mdss_panel_data *pdata, int enable)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	if (!gpio_is_valid(ctrl_pdata->disp_en_gpio)) {
+		pr_debug("%s:%d, reset line not configured\n",
+			   __func__, __LINE__);
+	}
+
+	if (!gpio_is_valid(ctrl_pdata->rst_gpio)) {
+		pr_debug("%s:%d, reset line not configured\n",
+			   __func__, __LINE__);
+		return;
+	}
+
+	pr_debug("%s: enable = %d\n", __func__, enable);
+
+	if (enable) {
+		gpio_set_value((ctrl_pdata->rst_gpio), 1);
+		msleep(20);
+		gpio_set_value((ctrl_pdata->rst_gpio), 0);
+		udelay(200);
+		gpio_set_value((ctrl_pdata->rst_gpio), 1);
+		msleep(20);
+		if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
+			gpio_set_value((ctrl_pdata->disp_en_gpio), 1);
+	} else {
+		gpio_set_value((ctrl_pdata->rst_gpio), 0);
+		if (gpio_is_valid(ctrl_pdata->disp_en_gpio))
+			gpio_set_value((ctrl_pdata->disp_en_gpio), 0);
+	}
+}
+
+static void mdss_dsi_panel_bl_ctrl(struct mdss_panel_data *pdata,
+							u32 bl_level)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	switch (ctrl_pdata->bklt_ctrl) {
+	case BL_WLED:
+		led_trigger_event(bl_led_trigger, bl_level);
+		break;
+	case BL_PWM:
+		mdss_dsi_panel_bklt_pwm(ctrl_pdata, bl_level);
+		break;
+	default:
+		pr_err("%s: Unknown bl_ctrl configuration\n",
+			__func__);
+		break;
+	}
+}
+
+static char set_tear_on[2] = {0x35, 0x00};
+static struct dsi_cmd_desc dsi_tear_on_cmd = {
+	DTYPE_DCS_WRITE1, 1, 0, 0, 0, sizeof(set_tear_on), set_tear_on};
+
+static char set_tear_off[2] = {0x34, 0x00};
+static struct dsi_cmd_desc dsi_tear_off_cmd = {
+	DTYPE_DCS_WRITE, 1, 0, 0, 0, sizeof(set_tear_off), set_tear_off};
 
 static int mdss_dsi_panel_on(struct mdss_panel_data *pdata)
 {
 	struct mipi_panel_info *mipi;
+	struct mdss_dsi_ctrl_pdata *ctrl = NULL;
 
-	mipi  = &pdata->panel_info.mipi;
-
-	pr_debug("%s:%d, debug info (mode) : %d\n", __func__, __LINE__,
-		 mipi->mode);
-
-	if (mipi->mode == DSI_VIDEO_MODE) {
-		mdss_dsi_cmds_tx(pdata, &dsi_panel_tx_buf, dsi_panel_on_cmds,
-			num_of_on_cmds);
-	} else {
-		pr_err("%s:%d, CMD MODE NOT SUPPORTED", __func__, __LINE__);
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
 		return -EINVAL;
 	}
 
+	ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+	mipi  = &pdata->panel_info.mipi;
+
+	pr_debug("%s: ctrl=%p ndx=%d\n", __func__, ctrl, ctrl->ndx);
+
+	if (ctrl->on_cmds->size)
+		mdss_dsi_cmds_tx(pdata, &dsi_panel_tx_buf,
+				 ctrl->on_cmds->buf,
+				 ctrl->on_cmds->size);
+
+	mdss_dsi_cmds_tx(pdata, &dsi_panel_tx_buf,
+			&dsi_tear_on_cmd, 1);
+
 	return 0;
 }
 
 static int mdss_dsi_panel_off(struct mdss_panel_data *pdata)
 {
 	struct mipi_panel_info *mipi;
+	struct mdss_dsi_ctrl_pdata *ctrl = NULL;
+
+	if (pdata == NULL) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	ctrl = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+
+	pr_debug("%s: ctrl=%p ndx=%d\n", __func__, ctrl, ctrl->ndx);
 
 	mipi  = &pdata->panel_info.mipi;
 
-	pr_debug("%s:%d, debug info\n", __func__, __LINE__);
+	mdss_dsi_cmds_tx(pdata, &dsi_panel_tx_buf,
+			&dsi_tear_off_cmd, 1);
 
-	if (mipi->mode == DSI_VIDEO_MODE) {
-		mdss_dsi_cmds_tx(pdata, &dsi_panel_tx_buf, dsi_panel_off_cmds,
-			num_of_off_cmds);
-	} else {
-		pr_debug("%s:%d, CMD mode not supported", __func__, __LINE__);
-		return -EINVAL;
-	}
+	if (ctrl->off_cmds->size)
+		mdss_dsi_cmds_tx(pdata, &dsi_panel_tx_buf,
+				 ctrl->off_cmds->buf,
+				 ctrl->off_cmds->size);
 
 	return 0;
 }
 
 static int mdss_panel_parse_dt(struct platform_device *pdev,
-			    struct mdss_panel_common_pdata *panel_data)
+			       struct mdss_panel_common_pdata *panel_data)
 {
 	struct device_node *np = pdev->dev.of_node;
 	u32 res[6], tmp;
+	u32 fbc_res[7];
 	int rc, i, len;
 	int cmd_plen, data_offset;
 	const char *data;
+	static const char *bl_ctrl_type, *pdest;
+	static const char *on_cmds_state, *off_cmds_state;
+	char *on_cmds = NULL, *off_cmds = NULL;
+	int num_of_on_cmds = 0, num_of_off_cmds = 0;
+	bool fbc_enabled = false;
 
 	rc = of_property_read_u32_array(np, "qcom,mdss-pan-res", res, 2);
 	if (rc) {
@@ -85,6 +246,14 @@
 	panel_data->panel_info.xres = (!rc ? res[0] : 640);
 	panel_data->panel_info.yres = (!rc ? res[1] : 480);
 
+	rc = of_property_read_u32_array(np, "qcom,mdss-pan-active-res", res, 2);
+	if (rc == 0) {
+		panel_data->panel_info.lcdc.xres_pad =
+			panel_data->panel_info.xres - res[0];
+		panel_data->panel_info.lcdc.yres_pad =
+			panel_data->panel_info.yres - res[1];
+	}
+
 	rc = of_property_read_u32(np, "qcom,mdss-pan-bpp", &tmp);
 	if (rc) {
 		pr_err("%s:%d, panel bpp not specified\n",
@@ -93,6 +262,22 @@
 	}
 	panel_data->panel_info.bpp = (!rc ? tmp : 24);
 
+	pdest = of_get_property(pdev->dev.of_node,
+				"qcom,mdss-pan-dest", NULL);
+	if (strlen(pdest) != 9) {
+		pr_err("%s: Unknown pdest specified\n", __func__);
+		return -EINVAL;
+	}
+	if (!strncmp(pdest, "display_1", 9))
+		panel_data->panel_info.pdest = DISPLAY_1;
+	else if (!strncmp(pdest, "display_2", 9))
+		panel_data->panel_info.pdest = DISPLAY_2;
+	else {
+		pr_debug("%s: pdest not specified. Set Default\n",
+							__func__);
+		panel_data->panel_info.pdest = DISPLAY_1;
+	}
+
 	rc = of_property_read_u32_array(np,
 		"qcom,mdss-pan-porch-values", res, 6);
 	panel_data->panel_info.lcdc.h_back_porch = (!rc ? res[0] : 6);
@@ -106,6 +291,39 @@
 		"qcom,mdss-pan-underflow-clr", &tmp);
 	panel_data->panel_info.lcdc.underflow_clr = (!rc ? tmp : 0xff);
 
+	bl_ctrl_type = of_get_property(pdev->dev.of_node,
+				  "qcom,mdss-pan-bl-ctrl", NULL);
+	if ((bl_ctrl_type) && (!strncmp(bl_ctrl_type, "bl_ctrl_wled", 12))) {
+		led_trigger_register_simple("bkl-trigger", &bl_led_trigger);
+		pr_debug("%s: SUCCESS-> WLED TRIGGER register\n", __func__);
+
+		panel_data->panel_info.bklt_ctrl = BL_WLED;
+	} else if (!strncmp(bl_ctrl_type, "bl_ctrl_pwm", 11)) {
+		panel_data->panel_info.bklt_ctrl = BL_PWM;
+
+		rc = of_property_read_u32(np, "qcom,dsi-pwm-period", &tmp);
+		if (rc) {
+			pr_err("%s:%d, Error, dsi pwm_period\n",
+						__func__, __LINE__);
+			return -EINVAL;
+		}
+		panel_data->panel_info.pwm_period = tmp;
+
+		rc = of_property_read_u32(np, "qcom,dsi-lpg-channel", &tmp);
+		if (rc) {
+			pr_err("%s:%d, Error, dsi lpg channel\n",
+						__func__, __LINE__);
+			return -EINVAL;
+		}
+		panel_data->panel_info.pwm_lpg_chan = tmp;
+
+		tmp = of_get_named_gpio(np, "qcom,dsi-pwm-gpio", 0);
+		panel_data->panel_info.pwm_gpio =  tmp;
+	} else {
+		pr_debug("%s: Unknown backlight control\n", __func__);
+		panel_data->panel_info.bklt_ctrl = UNKNOWN_CTRL;
+	}
+
 	rc = of_property_read_u32_array(np,
 		"qcom,mdss-pan-bl-levels", res, 2);
 	panel_data->panel_info.bl_min = (!rc ? res[0] : 0);
@@ -114,6 +332,13 @@
 	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-mode", &tmp);
 	panel_data->panel_info.mipi.mode = (!rc ? tmp : DSI_VIDEO_MODE);
 
+	rc = of_property_read_u32(np, "qcom,mdss-vsync-enable", &tmp);
+	panel_data->panel_info.mipi.vsync_enable = (!rc ? tmp : 0);
+
+	rc = of_property_read_u32(np, "qcom,mdss-hw-vsync-mode", &tmp);
+	panel_data->panel_info.mipi.hw_vsync_mode = (!rc ? tmp : 0);
+
+
 	rc = of_property_read_u32(np,
 		"qcom,mdss-pan-dsi-h-pulse-mode", &tmp);
 	panel_data->panel_info.mipi.pulse_mode_hsa_he = (!rc ? tmp : false);
@@ -137,6 +362,26 @@
 			(!rc ? tmp : DSI_NON_BURST_SYNCH_PULSE);
 
 	rc = of_property_read_u32(np,
+		"qcom,mdss-pan-insert-dcs-cmd", &tmp);
+	panel_data->panel_info.mipi.insert_dcs_cmd =
+			(!rc ? tmp : 1);
+
+	rc = of_property_read_u32(np,
+		"qcom,mdss-pan-wr-mem-continue", &tmp);
+	panel_data->panel_info.mipi.wr_mem_continue =
+			(!rc ? tmp : 0x3c);
+
+	rc = of_property_read_u32(np,
+		"qcom,mdss-pan-wr-mem-start", &tmp);
+	panel_data->panel_info.mipi.wr_mem_start =
+			(!rc ? tmp : 0x2c);
+
+	rc = of_property_read_u32(np,
+		"qcom,mdss-pan-te-sel", &tmp);
+	panel_data->panel_info.mipi.te_sel =
+			(!rc ? tmp : 1);
+
+	rc = of_property_read_u32(np,
 		"qcom,mdss-pan-dsi-dst-format", &tmp);
 	panel_data->panel_info.mipi.dst_format =
 			(!rc ? tmp : DSI_VIDEO_DST_FORMAT_RGB888);
@@ -154,6 +399,9 @@
 	panel_data->panel_info.mipi.data_lane2 = (!rc ? res[2] : false);
 	panel_data->panel_info.mipi.data_lane3 = (!rc ? res[3] : false);
 
+	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-dlane-swap", &tmp);
+	panel_data->panel_info.mipi.dlane_swap = (!rc ? tmp : 0);
+
 	rc = of_property_read_u32_array(np, "qcom,mdss-pan-dsi-t-clk", res, 2);
 	panel_data->panel_info.mipi.t_clk_pre = (!rc ? res[0] : 0x24);
 	panel_data->panel_info.mipi.t_clk_post = (!rc ? res[1] : 0x03);
@@ -184,6 +432,103 @@
 	rc = of_property_read_u32(np, "qcom,mdss-pan-dsi-frame-rate", &tmp);
 	panel_data->panel_info.mipi.frame_rate = (!rc ? tmp : 60);
 
+	rc = of_property_read_u32(np, "qcom,mdss-pan-clk-rate", &tmp);
+	panel_data->panel_info.clk_rate = (!rc ? tmp : 0);
+
+	data = of_get_property(np, "qcom,panel-phy-regulatorSettings", &len);
+	if ((!data) || (len != 7)) {
+		pr_err("%s:%d, Unable to read Phy regulator settings",
+		       __func__, __LINE__);
+		goto error;
+	}
+	for (i = 0; i < len; i++)
+		phy_params.regulator[i] = data[i];
+
+	data = of_get_property(np, "qcom,panel-phy-timingSettings", &len);
+	if ((!data) || (len != 12)) {
+		pr_err("%s:%d, Unable to read Phy timing settings",
+		       __func__, __LINE__);
+		goto error;
+	}
+	for (i = 0; i < len; i++)
+		phy_params.timing[i] = data[i];
+
+	data = of_get_property(np, "qcom,panel-phy-strengthCtrl", &len);
+	if ((!data) || (len != 2)) {
+		pr_err("%s:%d, Unable to read Phy Strength ctrl settings",
+		       __func__, __LINE__);
+		goto error;
+	}
+	phy_params.strength[0] = data[0];
+	phy_params.strength[1] = data[1];
+
+	data = of_get_property(np, "qcom,panel-phy-bistCtrl", &len);
+	if ((!data) || (len != 6)) {
+		pr_err("%s:%d, Unable to read Phy Bist Ctrl settings",
+		       __func__, __LINE__);
+		goto error;
+	}
+	for (i = 0; i < len; i++)
+		phy_params.bistCtrl[i] = data[i];
+
+	data = of_get_property(np, "qcom,panel-phy-laneConfig", &len);
+	if ((!data) || (len != 45)) {
+		pr_err("%s:%d, Unable to read Phy lane configure settings",
+		       __func__, __LINE__);
+		goto error;
+	}
+	for (i = 0; i < len; i++)
+		phy_params.laneCfg[i] = data[i];
+
+	panel_data->panel_info.mipi.dsi_phy_db = &phy_params;
+
+	fbc_enabled = of_property_read_bool(np,
+			"qcom,fbc-enabled");
+	if (fbc_enabled) {
+		pr_debug("%s:%d FBC panel enabled.\n", __func__, __LINE__);
+		panel_data->panel_info.fbc.enabled = 1;
+
+		rc = of_property_read_u32_array(np,
+				"qcom,fbc-mode", fbc_res, 7);
+		panel_data->panel_info.fbc.target_bpp =
+			(!rc ?	fbc_res[0] : panel_data->panel_info.bpp);
+		panel_data->panel_info.fbc.comp_mode = (!rc ? fbc_res[1] : 0);
+		panel_data->panel_info.fbc.qerr_enable =
+			(!rc ? fbc_res[2] : 0);
+		panel_data->panel_info.fbc.cd_bias = (!rc ? fbc_res[3] : 0);
+		panel_data->panel_info.fbc.pat_enable = (!rc ? fbc_res[4] : 0);
+		panel_data->panel_info.fbc.vlc_enable = (!rc ? fbc_res[5] : 0);
+		panel_data->panel_info.fbc.bflc_enable =
+			(!rc ? fbc_res[6] : 0);
+
+		rc = of_property_read_u32_array(np,
+				"qcom,fbc-budget-ctl", fbc_res, 3);
+		panel_data->panel_info.fbc.line_x_budget =
+			(!rc ? fbc_res[0] : 0);
+		panel_data->panel_info.fbc.block_x_budget =
+			(!rc ? fbc_res[1] : 0);
+		panel_data->panel_info.fbc.block_budget =
+			(!rc ? fbc_res[2] : 0);
+
+		rc = of_property_read_u32_array(np,
+				"qcom,fbc-lossy-mode", fbc_res, 4);
+		panel_data->panel_info.fbc.lossless_mode_thd =
+			(!rc ? fbc_res[0] : 0);
+		panel_data->panel_info.fbc.lossy_mode_thd =
+			(!rc ? fbc_res[1] : 0);
+		panel_data->panel_info.fbc.lossy_rgb_thd =
+			(!rc ? fbc_res[2] : 0);
+		panel_data->panel_info.fbc.lossy_mode_idx =
+			(!rc ? fbc_res[3] : 0);
+
+	} else {
+		pr_debug("%s:%d Panel does not support FBC.\n",
+				__func__, __LINE__);
+		panel_data->panel_info.fbc.enabled = 0;
+		panel_data->panel_info.fbc.target_bpp =
+			panel_data->panel_info.bpp;
+	}
+
 	data = of_get_property(np, "qcom,panel-on-cmds", &len);
 	if (!data) {
 		pr_err("%s:%d, Unable to read ON cmds", __func__, __LINE__);
@@ -209,22 +554,34 @@
 		goto error;
 	}
 
-	dsi_panel_on_cmds =
+	panel_data->dsi_panel_on_cmds =
+		kzalloc(sizeof(struct dsi_panel_cmds_list), GFP_KERNEL);
+	if (!panel_data->dsi_panel_on_cmds)
+		return -ENOMEM;
+
+	(panel_data->dsi_panel_on_cmds)->buf =
 		kzalloc((num_of_on_cmds * sizeof(struct dsi_cmd_desc)),
 						GFP_KERNEL);
-	if (!dsi_panel_on_cmds)
+	if (!(panel_data->dsi_panel_on_cmds)->buf)
 		return -ENOMEM;
 
 	data_offset = 0;
 	for (i = 0; i < num_of_on_cmds; i++) {
-		dsi_panel_on_cmds[i].dtype = on_cmds[data_offset++];
-		dsi_panel_on_cmds[i].last = on_cmds[data_offset++];
-		dsi_panel_on_cmds[i].vc = on_cmds[data_offset++];
-		dsi_panel_on_cmds[i].ack = on_cmds[data_offset++];
-		dsi_panel_on_cmds[i].wait = on_cmds[data_offset++];
-		dsi_panel_on_cmds[i].dlen = on_cmds[data_offset++];
-		dsi_panel_on_cmds[i].payload = &on_cmds[data_offset];
-		data_offset += (dsi_panel_on_cmds[i].dlen);
+		panel_data->dsi_panel_on_cmds->buf[i].dtype =
+						on_cmds[data_offset++];
+		panel_data->dsi_panel_on_cmds->buf[i].last =
+						on_cmds[data_offset++];
+		panel_data->dsi_panel_on_cmds->buf[i].vc =
+						on_cmds[data_offset++];
+		panel_data->dsi_panel_on_cmds->buf[i].ack =
+						on_cmds[data_offset++];
+		panel_data->dsi_panel_on_cmds->buf[i].wait =
+						on_cmds[data_offset++];
+		panel_data->dsi_panel_on_cmds->buf[i].dlen =
+						on_cmds[data_offset++];
+		panel_data->dsi_panel_on_cmds->buf[i].payload =
+						&on_cmds[data_offset];
+		data_offset += (panel_data->dsi_panel_on_cmds->buf[i].dlen);
 	}
 
 	if (data_offset != len) {
@@ -233,6 +590,23 @@
 		goto error;
 	}
 
+	(panel_data->dsi_panel_on_cmds)->size = num_of_on_cmds;
+
+	on_cmds_state = of_get_property(pdev->dev.of_node,
+				"qcom,on-cmds-dsi-state", NULL);
+	if (!strncmp(on_cmds_state, "DSI_LP_MODE", 11)) {
+		(panel_data->dsi_panel_on_cmds)->ctrl_state =
+						DSI_LP_MODE;
+	} else if (!strncmp(on_cmds_state, "DSI_HS_MODE", 11)) {
+		(panel_data->dsi_panel_on_cmds)->ctrl_state =
+						DSI_HS_MODE;
+	} else {
+		pr_debug("%s: ON cmds state not specified. Set Default\n",
+							__func__);
+		(panel_data->dsi_panel_on_cmds)->ctrl_state =
+						DSI_LP_MODE;
+	}
+
 	data = of_get_property(np, "qcom,panel-off-cmds", &len);
 	if (!data) {
 		pr_err("%s:%d, Unable to read OFF cmds", __func__, __LINE__);
@@ -258,22 +632,34 @@
 		goto error;
 	}
 
-	dsi_panel_off_cmds = kzalloc(num_of_off_cmds
+	panel_data->dsi_panel_off_cmds =
+		kzalloc(sizeof(struct dsi_panel_cmds_list), GFP_KERNEL);
+	if (!panel_data->dsi_panel_off_cmds)
+		return -ENOMEM;
+
+	(panel_data->dsi_panel_off_cmds)->buf = kzalloc(num_of_off_cmds
 				* sizeof(struct dsi_cmd_desc),
 					GFP_KERNEL);
-	if (!dsi_panel_off_cmds)
+	if (!(panel_data->dsi_panel_off_cmds)->buf)
 		return -ENOMEM;
 
 	data_offset = 0;
 	for (i = 0; i < num_of_off_cmds; i++) {
-		dsi_panel_off_cmds[i].dtype = off_cmds[data_offset++];
-		dsi_panel_off_cmds[i].last = off_cmds[data_offset++];
-		dsi_panel_off_cmds[i].vc = off_cmds[data_offset++];
-		dsi_panel_off_cmds[i].ack = off_cmds[data_offset++];
-		dsi_panel_off_cmds[i].wait = off_cmds[data_offset++];
-		dsi_panel_off_cmds[i].dlen = off_cmds[data_offset++];
-		dsi_panel_off_cmds[i].payload = &off_cmds[data_offset];
-		data_offset += (dsi_panel_off_cmds[i].dlen);
+		panel_data->dsi_panel_off_cmds->buf[i].dtype =
+						off_cmds[data_offset++];
+		panel_data->dsi_panel_off_cmds->buf[i].last =
+						off_cmds[data_offset++];
+		panel_data->dsi_panel_off_cmds->buf[i].vc =
+						off_cmds[data_offset++];
+		panel_data->dsi_panel_off_cmds->buf[i].ack =
+						off_cmds[data_offset++];
+		panel_data->dsi_panel_off_cmds->buf[i].wait =
+						off_cmds[data_offset++];
+		panel_data->dsi_panel_off_cmds->buf[i].dlen =
+						off_cmds[data_offset++];
+		panel_data->dsi_panel_off_cmds->buf[i].payload =
+						&off_cmds[data_offset];
+		data_offset += (panel_data->dsi_panel_off_cmds->buf[i].dlen);
 	}
 
 	if (data_offset != len) {
@@ -282,10 +668,29 @@
 		goto error;
 	}
 
+	(panel_data->dsi_panel_off_cmds)->size = num_of_off_cmds;
+
+	off_cmds_state = of_get_property(pdev->dev.of_node,
+				"qcom,off-cmds-dsi-state", NULL);
+	if (!strncmp(off_cmds_state, "DSI_LP_MODE", 11)) {
+		(panel_data->dsi_panel_off_cmds)->ctrl_state =
+						DSI_LP_MODE;
+	} else if (!strncmp(off_cmds_state, "DSI_HS_MODE", 11)) {
+		(panel_data->dsi_panel_off_cmds)->ctrl_state =
+						DSI_HS_MODE;
+	} else {
+		pr_debug("%s: ON cmds state not specified. Set Default\n",
+							__func__);
+		(panel_data->dsi_panel_off_cmds)->ctrl_state =
+						DSI_LP_MODE;
+	}
+
 	return 0;
 error:
-	kfree(dsi_panel_on_cmds);
-	kfree(dsi_panel_off_cmds);
+	kfree((panel_data->dsi_panel_on_cmds)->buf);
+	kfree((panel_data->dsi_panel_off_cmds)->buf);
+	kfree(panel_data->dsi_panel_on_cmds);
+	kfree(panel_data->dsi_panel_off_cmds);
 	kfree(on_cmds);
 	kfree(off_cmds);
 
@@ -295,14 +700,9 @@
 static int __devinit mdss_dsi_panel_probe(struct platform_device *pdev)
 {
 	int rc = 0;
-	struct mdss_panel_common_pdata *vendor_pdata = NULL;
+	static struct mdss_panel_common_pdata vendor_pdata;
 	static const char *panel_name;
 
-	if (pdev->dev.parent == NULL) {
-		pr_err("%s: parent device missing\n", __func__);
-		return -ENODEV;
-	}
-
 	pr_debug("%s:%d, debug info id=%d", __func__, __LINE__, pdev->id);
 	if (!pdev->dev.of_node)
 		return -ENODEV;
@@ -314,21 +714,15 @@
 	else
 		pr_info("%s: Panel Name = %s\n", __func__, panel_name);
 
-	vendor_pdata = devm_kzalloc(&pdev->dev,
-			sizeof(*vendor_pdata), GFP_KERNEL);
-	if (!vendor_pdata)
-		return -ENOMEM;
-
-	rc = mdss_panel_parse_dt(pdev, vendor_pdata);
-	if (rc) {
-		devm_kfree(&pdev->dev, vendor_pdata);
-		vendor_pdata = NULL;
+	rc = mdss_panel_parse_dt(pdev, &vendor_pdata);
+	if (rc)
 		return rc;
-	}
-	vendor_pdata->on = mdss_dsi_panel_on;
-	vendor_pdata->off = mdss_dsi_panel_off;
 
-	rc = dsi_panel_device_register(pdev, vendor_pdata);
+	vendor_pdata.on = mdss_dsi_panel_on;
+	vendor_pdata.off = mdss_dsi_panel_off;
+	vendor_pdata.bl_fnc = mdss_dsi_panel_bl_ctrl;
+
+	rc = dsi_panel_device_register(pdev, &vendor_pdata);
 	if (rc)
 		return rc;
 
@@ -350,8 +744,8 @@
 
 static int __init mdss_dsi_panel_init(void)
 {
-	mdss_dsi_buf_alloc(&dsi_panel_tx_buf, DSI_BUF_SIZE);
-	mdss_dsi_buf_alloc(&dsi_panel_rx_buf, DSI_BUF_SIZE);
+	mdss_dsi_buf_alloc(&dsi_panel_tx_buf, ALIGN(DSI_BUF_SIZE, SZ_4K));
+	mdss_dsi_buf_alloc(&dsi_panel_rx_buf, ALIGN(DSI_BUF_SIZE, SZ_4K));
 
 	return platform_driver_register(&this_driver);
 }
diff --git a/drivers/video/msm/mdss/mdss_edp.c b/drivers/video/msm/mdss/mdss_edp.c
new file mode 100644
index 0000000..aea2de0
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_edp.c
@@ -0,0 +1,640 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pwm.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+
+#include <mach/hardware.h>
+#include <mach/dma.h>
+
+#include "mdss_edp.h"
+
+#define RGB_COMPONENTS		3
+#define VDDA_MIN_UV			1800000	/* uV units */
+#define VDDA_MAX_UV			1800000	/* uV units */
+#define VDDA_UA_ON_LOAD		100000	/* uA units */
+#define VDDA_UA_OFF_LOAD	100		/* uA units */
+
+static int mdss_edp_get_base_address(struct mdss_edp_drv_pdata *edp_drv);
+static int mdss_edp_get_mmss_cc_base_address(struct mdss_edp_drv_pdata
+		*edp_drv);
+static int mdss_edp_regulator_init(struct mdss_edp_drv_pdata *edp_drv);
+static int mdss_edp_regulator_on(struct mdss_edp_drv_pdata *edp_drv);
+static int mdss_edp_regulator_off(struct mdss_edp_drv_pdata *edp_drv);
+static int mdss_edp_gpio_panel_en(struct mdss_edp_drv_pdata *edp_drv);
+static int mdss_edp_pwm_config(struct mdss_edp_drv_pdata *edp_drv);
+
+static void mdss_edp_edid2pinfo(struct mdss_edp_drv_pdata *edp_drv);
+static void mdss_edp_fill_edid_data(struct mdss_edp_drv_pdata *edp_drv);
+static void mdss_edp_fill_dpcd_data(struct mdss_edp_drv_pdata *edp_drv);
+
+static int mdss_edp_device_register(struct mdss_edp_drv_pdata *edp_drv);
+
+static void mdss_edp_config_sync(unsigned char *edp_base);
+static void mdss_edp_config_sw_div(unsigned char *edp_base);
+static void mdss_edp_config_static_mdiv(unsigned char *edp_base);
+static void mdss_edp_enable(unsigned char *edp_base, int enable);
+
+/*
+ * Init regulator needed for edp, 8974_l12
+ */
+static int mdss_edp_regulator_init(struct mdss_edp_drv_pdata *edp_drv)
+{
+	int ret;
+
+	edp_drv->vdda_vreg = devm_regulator_get(&(edp_drv->pdev->dev), "vdda");
+	if (IS_ERR(edp_drv->vdda_vreg)) {
+		pr_err("%s: Could not get 8941_l12, ret = %ld\n", __func__,
+				PTR_ERR(edp_drv->vdda_vreg));
+		return -ENODEV;
+	}
+
+	ret = regulator_set_voltage(edp_drv->vdda_vreg,
+			VDDA_MIN_UV, VDDA_MAX_UV);
+	if (ret) {
+		pr_err("%s: vdda_vreg set_voltage failed, ret=%d\n", __func__,
+				ret);
+		return -EINVAL;
+	}
+
+	ret = mdss_edp_regulator_on(edp_drv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+/*
+ * Set uA and enable vdda
+ */
+static int mdss_edp_regulator_on(struct mdss_edp_drv_pdata *edp_drv)
+{
+	int ret;
+
+	ret = regulator_set_optimum_mode(edp_drv->vdda_vreg, VDDA_UA_ON_LOAD);
+	if (ret < 0) {
+		pr_err("%s: vdda_vreg set regulator mode failed.\n", __func__);
+		return ret;
+	}
+
+	ret = regulator_enable(edp_drv->vdda_vreg);
+	if (ret) {
+		pr_err("%s: Failed to enable vdda_vreg regulator.\n", __func__);
+		return ret;
+	}
+
+	return 0;
+}
+
+/*
+ * Disable vdda and set uA
+ */
+static int mdss_edp_regulator_off(struct mdss_edp_drv_pdata *edp_drv)
+{
+	int ret;
+
+	ret = regulator_disable(edp_drv->vdda_vreg);
+	if (ret) {
+		pr_err("%s: Failed to disable vdda_vreg regulator.\n",
+				__func__);
+		return ret;
+	}
+
+	ret = regulator_set_optimum_mode(edp_drv->vdda_vreg, VDDA_UA_OFF_LOAD);
+	if (ret < 0) {
+		pr_err("%s: vdda_vreg set regulator mode failed.\n",
+				__func__);
+		return ret;
+	}
+
+	return 0;
+}
+
+/*
+ * Enables the gpio that supply power to the panel and enable the backlight
+ */
+static int mdss_edp_gpio_panel_en(struct mdss_edp_drv_pdata *edp_drv)
+{
+	int ret = 0;
+
+	edp_drv->gpio_panel_en = of_get_named_gpio(edp_drv->pdev->dev.of_node,
+			"gpio-panel-en", 0);
+	if (!gpio_is_valid(edp_drv->gpio_panel_en)) {
+		pr_err("%s: gpio_panel_en=%d not specified\n", __func__,
+				edp_drv->gpio_panel_en);
+		goto gpio_err;
+	}
+
+	ret = gpio_request(edp_drv->gpio_panel_en, "disp_enable");
+	if (ret) {
+		pr_err("%s: Request reset gpio_panel_en failed, ret=%d\n",
+				__func__, ret);
+		return ret;
+	}
+
+	ret = gpio_direction_output(edp_drv->gpio_panel_en, 1);
+	if (ret) {
+		pr_err("%s: Set direction for gpio_panel_en failed, ret=%d\n",
+				__func__, ret);
+		goto gpio_free;
+	}
+
+	return 0;
+
+gpio_free:
+	gpio_free(edp_drv->gpio_panel_en);
+gpio_err:
+	return -ENODEV;
+}
+
+static int mdss_edp_pwm_config(struct mdss_edp_drv_pdata *edp_drv)
+{
+	int ret = 0;
+
+	ret = of_property_read_u32(edp_drv->pdev->dev.of_node,
+			"qcom,panel-pwm-period", &edp_drv->pwm_period);
+	if (ret) {
+		pr_err("%s: panel pwm period is not specified, %d", __func__,
+				edp_drv->pwm_period);
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32(edp_drv->pdev->dev.of_node,
+			"qcom,panel-lpg-channel", &edp_drv->lpg_channel);
+	if (ret) {
+		pr_err("%s: panel lpg channel is not specified, %d", __func__,
+				edp_drv->lpg_channel);
+		return -EINVAL;
+	}
+
+	edp_drv->bl_pwm = pwm_request(edp_drv->lpg_channel, "lcd-backlight");
+	if (edp_drv->bl_pwm == NULL || IS_ERR(edp_drv->bl_pwm)) {
+		pr_err("%s: pwm request failed", __func__);
+		edp_drv->bl_pwm = NULL;
+		return -EIO;
+	}
+
+	edp_drv->gpio_panel_pwm = of_get_named_gpio(edp_drv->pdev->dev.of_node,
+			"gpio-panel-pwm", 0);
+	if (!gpio_is_valid(edp_drv->gpio_panel_pwm)) {
+		pr_err("%s: gpio_panel_pwm=%d not specified\n", __func__,
+				edp_drv->gpio_panel_pwm);
+		goto edp_free_pwm;
+	}
+
+	ret = gpio_request(edp_drv->gpio_panel_pwm, "disp_pwm");
+	if (ret) {
+		pr_err("%s: Request reset gpio_panel_pwm failed, ret=%d\n",
+				__func__, ret);
+		goto edp_free_pwm;
+	}
+
+	return 0;
+
+edp_free_pwm:
+	pwm_free(edp_drv->bl_pwm);
+	return -ENODEV;
+}
+
+void mdss_edp_set_backlight(struct mdss_panel_data *pdata, u32 bl_level)
+{
+	int ret = 0;
+	struct mdss_edp_drv_pdata *edp_drv = NULL;
+	int bl_max;
+
+	edp_drv = container_of(pdata, struct mdss_edp_drv_pdata, panel_data);
+	if (!edp_drv) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	bl_max = edp_drv->panel_data.panel_info.bl_max;
+	if (bl_level > bl_max)
+		bl_level = bl_max;
+
+	if (edp_drv->bl_pwm == NULL) {
+		pr_err("%s: edp_drv->bl_pwm=NULL.\n", __func__);
+		return;
+	}
+
+	ret = pwm_config(edp_drv->bl_pwm,
+			bl_level * edp_drv->pwm_period / bl_max,
+			edp_drv->pwm_period);
+	if (ret) {
+		pr_err("%s: pwm_config() failed err=%d.\n", __func__, ret);
+		return;
+	}
+
+	ret = pwm_enable(edp_drv->bl_pwm);
+	if (ret) {
+		pr_err("%s: pwm_enable() failed err=%d\n", __func__, ret);
+		return;
+	}
+}
+
+void mdss_edp_config_sync(unsigned char *edp_base)
+{
+	int ret = 0;
+
+	ret = edp_read(edp_base + 0xc); /* EDP_CONFIGURATION_CTRL */
+	ret &= ~0x733;
+	ret |= (0x55 & 0x733);
+	edp_write(edp_base + 0xc, ret);
+	edp_write(edp_base + 0xc, 0x55); /* EDP_CONFIGURATION_CTRL */
+}
+
+static void mdss_edp_config_sw_div(unsigned char *edp_base)
+{
+	edp_write(edp_base + 0x14, 0x13b); /* EDP_SOFTWARE_MVID */
+	edp_write(edp_base + 0x18, 0x266); /* EDP_SOFTWARE_NVID */
+}
+
+static void mdss_edp_config_static_mdiv(unsigned char *edp_base)
+{
+	int ret = 0;
+
+	ret = edp_read(edp_base + 0xc); /* EDP_CONFIGURATION_CTRL */
+	edp_write(edp_base + 0xc, ret | 0x2); /* EDP_CONFIGURATION_CTRL */
+	edp_write(edp_base + 0xc, 0x57); /* EDP_CONFIGURATION_CTRL */
+}
+
+static void mdss_edp_enable(unsigned char *edp_base, int enable)
+{
+	edp_write(edp_base + 0x8, 0x0); /* EDP_STATE_CTRL */
+	edp_write(edp_base + 0x8, 0x40); /* EDP_STATE_CTRL */
+	edp_write(edp_base + 0x94, enable); /* EDP_TIMING_ENGINE_EN */
+	edp_write(edp_base + 0x4, enable); /* EDP_MAINLINK_CTRL */
+}
+
+int mdss_edp_on(struct mdss_panel_data *pdata)
+{
+	struct mdss_edp_drv_pdata *edp_drv = NULL;
+	int i;
+
+	edp_drv = container_of(pdata, struct mdss_edp_drv_pdata,
+			panel_data);
+	if (!edp_drv) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	mdss_edp_prepare_clocks(edp_drv);
+	mdss_edp_phy_sw_reset(edp_drv->edp_base);
+	mdss_edp_hw_powerup(edp_drv->edp_base, 1);
+	mdss_edp_pll_configure(edp_drv->edp_base, edp_drv->edid.timing[0].pclk);
+	mdss_edp_clk_enable(edp_drv);
+
+	for (i = 0; i < edp_drv->dpcd.max_lane_count; ++i)
+		mdss_edp_enable_lane_bist(edp_drv->edp_base, i, 1);
+
+	mdss_edp_enable_mainlink(edp_drv->edp_base, 1);
+	mdss_edp_config_clk(edp_drv->edp_base, edp_drv->mmss_cc_base);
+
+	mdss_edp_phy_misc_cfg(edp_drv->edp_base);
+	mdss_edp_config_sync(edp_drv->edp_base);
+	mdss_edp_config_sw_div(edp_drv->edp_base);
+	mdss_edp_config_static_mdiv(edp_drv->edp_base);
+	mdss_edp_enable(edp_drv->edp_base, 1);
+	gpio_set_value(edp_drv->gpio_panel_en, 1);
+
+	return 0;
+}
+
+int mdss_edp_off(struct mdss_panel_data *pdata)
+{
+	struct mdss_edp_drv_pdata *edp_drv = NULL;
+	int ret = 0;
+	int i;
+
+	edp_drv = container_of(pdata, struct mdss_edp_drv_pdata,
+				panel_data);
+	if (!edp_drv) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return -EINVAL;
+	}
+
+	gpio_set_value(edp_drv->gpio_panel_en, 0);
+	pwm_disable(edp_drv->bl_pwm);
+	mdss_edp_enable(edp_drv->edp_base, 0);
+	mdss_edp_unconfig_clk(edp_drv->edp_base, edp_drv->mmss_cc_base);
+	mdss_edp_enable_mainlink(edp_drv->edp_base, 0);
+
+	for (i = 0; i < edp_drv->dpcd.max_lane_count; ++i)
+		mdss_edp_enable_lane_bist(edp_drv->edp_base, i, 0);
+
+	mdss_edp_clk_disable(edp_drv);
+	mdss_edp_hw_powerup(edp_drv->edp_base, 0);
+	mdss_edp_unprepare_clocks(edp_drv);
+
+	return ret;
+}
+
+static int mdss_edp_event_handler(struct mdss_panel_data *pdata,
+				  int event, void *arg)
+{
+	int rc = 0;
+
+	pr_debug("%s: event=%d\n", __func__, event);
+	switch (event) {
+	case MDSS_EVENT_UNBLANK:
+		rc = mdss_edp_on(pdata);
+		break;
+	case MDSS_EVENT_PANEL_OFF:
+		rc = mdss_edp_off(pdata);
+		break;
+	}
+	return rc;
+}
+
+/*
+ * Converts from EDID struct to mdss_panel_info
+ */
+static void mdss_edp_edid2pinfo(struct mdss_edp_drv_pdata *edp_drv)
+{
+	struct display_timing_desc *dp;
+	struct mdss_panel_info *pinfo;
+
+	dp = &edp_drv->edid.timing[0];
+	pinfo = &edp_drv->panel_data.panel_info;
+
+	pinfo->clk_rate = dp->pclk;
+
+	pinfo->xres = dp->h_addressable + dp->h_border * 2;
+	pinfo->yres = dp->v_addressable + dp->v_border * 2;
+
+	pinfo->lcdc.h_back_porch = dp->h_blank - dp->h_fporch \
+		- dp->h_sync_pulse;
+	pinfo->lcdc.h_front_porch = dp->h_fporch;
+	pinfo->lcdc.h_pulse_width = dp->h_sync_pulse;
+
+	pinfo->lcdc.v_back_porch = dp->v_blank - dp->v_fporch \
+		- dp->v_sync_pulse;
+	pinfo->lcdc.v_front_porch = dp->v_fporch;
+	pinfo->lcdc.v_pulse_width = dp->v_sync_pulse;
+
+	pinfo->type = EDP_PANEL;
+	pinfo->pdest = DISPLAY_1;
+	pinfo->wait_cycle = 0;
+	pinfo->bpp = edp_drv->edid.color_depth * RGB_COMPONENTS;
+	pinfo->fb_num = 2;
+
+	pinfo->lcdc.border_clr = 0;	 /* black */
+	pinfo->lcdc.underflow_clr = 0xff; /* blue */
+	pinfo->lcdc.hsync_skew = 0;
+}
+
+static int __devexit mdss_edp_remove(struct platform_device *pdev)
+{
+	struct mdss_edp_drv_pdata *edp_drv = NULL;
+
+	edp_drv = platform_get_drvdata(pdev);
+
+	gpio_free(edp_drv->gpio_panel_en);
+	mdss_edp_regulator_off(edp_drv);
+	iounmap(edp_drv->edp_base);
+	iounmap(edp_drv->mmss_cc_base);
+	edp_drv->edp_base = NULL;
+
+	return 0;
+}
+
+static int mdss_edp_device_register(struct mdss_edp_drv_pdata *edp_drv)
+{
+	int ret;
+
+	mdss_edp_edid2pinfo(edp_drv);
+	edp_drv->panel_data.panel_info.bl_min = 1;
+	edp_drv->panel_data.panel_info.bl_max = 255;
+
+	edp_drv->panel_data.event_handler = mdss_edp_event_handler;
+	edp_drv->panel_data.set_backlight = mdss_edp_set_backlight;
+
+	ret = mdss_register_panel(edp_drv->pdev, &edp_drv->panel_data);
+	if (ret) {
+		dev_err(&(edp_drv->pdev->dev), "unable to register eDP\n");
+		return ret;
+	}
+
+	pr_debug("%s: eDP initialized\n", __func__);
+
+	return 0;
+}
+
+/*
+ * Retrieve edp base address
+ */
+static int mdss_edp_get_base_address(struct mdss_edp_drv_pdata *edp_drv)
+{
+	struct resource *res;
+
+	res = platform_get_resource_byname(edp_drv->pdev, IORESOURCE_MEM,
+			"edp_base");
+	if (!res) {
+		pr_err("%s: Unable to get the MDSS EDP resources", __func__);
+		return -ENOMEM;
+	}
+
+	edp_drv->edp_base = ioremap(res->start, resource_size(res));
+	if (!edp_drv->edp_base) {
+		pr_err("%s: Unable to remap EDP resources",  __func__);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int mdss_edp_get_mmss_cc_base_address(struct mdss_edp_drv_pdata
+		*edp_drv)
+{
+	struct resource *res;
+
+	res = platform_get_resource_byname(edp_drv->pdev, IORESOURCE_MEM,
+			"mmss_cc_base");
+	if (!res) {
+		pr_err("%s: Unable to get the MMSS_CC resources", __func__);
+		return -ENOMEM;
+	}
+
+	edp_drv->mmss_cc_base = ioremap(res->start, resource_size(res));
+	if (!edp_drv->mmss_cc_base) {
+		pr_err("%s: Unable to remap MMSS_CC resources",  __func__);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void mdss_edp_fill_edid_data(struct mdss_edp_drv_pdata *edp_drv)
+{
+	struct edp_edid *edid = &edp_drv->edid;
+
+	edid->id_name[0] = 'A';
+	edid->id_name[0] = 'U';
+	edid->id_name[0] = 'O';
+	edid->id_name[0] = 0;
+	edid->id_product = 0x305D;
+	edid->version = 1;
+	edid->revision = 4;
+	edid->ext_block_cnt = 0;
+	edid->video_digital = 0x5;
+	edid->color_depth = 6;
+	edid->dpm = 0;
+	edid->color_format = 0;
+	edid->timing[0].pclk = 138500000;
+	edid->timing[0].h_addressable = 1920;
+	edid->timing[0].h_blank = 160;
+	edid->timing[0].v_addressable = 1080;
+	edid->timing[0].v_blank = 30;
+	edid->timing[0].h_fporch = 48;
+	edid->timing[0].h_sync_pulse = 32;
+	edid->timing[0].v_sync_pulse = 14;
+	edid->timing[0].v_fporch = 8;
+	edid->timing[0].width_mm =  256;
+	edid->timing[0].height_mm = 144;
+	edid->timing[0].h_border = 0;
+	edid->timing[0].v_border = 0;
+	edid->timing[0].interlaced = 0;
+	edid->timing[0].stereo = 0;
+	edid->timing[0].sync_type = 1;
+	edid->timing[0].sync_separate = 1;
+	edid->timing[0].vsync_pol = 0;
+	edid->timing[0].hsync_pol = 0;
+
+}
+
+static void mdss_edp_fill_dpcd_data(struct mdss_edp_drv_pdata *edp_drv)
+{
+	struct dpcd_cap *cap = &edp_drv->dpcd;
+
+	cap->max_lane_count = 2;
+	cap->max_link_clk = 270;
+}
+
+
+static int __devinit mdss_edp_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct mdss_edp_drv_pdata *edp_drv;
+
+	if (!pdev->dev.of_node) {
+		pr_err("%s: Failed\n", __func__);
+		return -EPERM;
+	}
+
+	edp_drv = devm_kzalloc(&pdev->dev, sizeof(*edp_drv), GFP_KERNEL);
+	if (edp_drv == NULL) {
+		pr_err("%s: Failed, could not allocate edp_drv", __func__);
+		return -ENOMEM;
+	}
+
+	edp_drv->pdev = pdev;
+	edp_drv->pdev->id = 1;
+	edp_drv->clk_on = 0;
+
+	ret = mdss_edp_get_base_address(edp_drv);
+	if (ret)
+		goto probe_err;
+
+	ret = mdss_edp_get_mmss_cc_base_address(edp_drv);
+	if (ret)
+		goto edp_base_unmap;
+
+	ret = mdss_edp_regulator_init(edp_drv);
+	if (ret)
+		goto mmss_cc_base_unmap;
+
+	ret = mdss_edp_clk_init(edp_drv);
+	if (ret)
+		goto edp_clk_deinit;
+
+	ret = mdss_edp_gpio_panel_en(edp_drv);
+	if (ret)
+		goto edp_clk_deinit;
+
+	ret = mdss_edp_pwm_config(edp_drv);
+	if (ret)
+		goto edp_free_gpio_panel_en;
+
+	mdss_edp_fill_edid_data(edp_drv);
+	mdss_edp_fill_dpcd_data(edp_drv);
+	mdss_edp_device_register(edp_drv);
+
+	return 0;
+
+
+edp_free_gpio_panel_en:
+	gpio_free(edp_drv->gpio_panel_en);
+edp_clk_deinit:
+	mdss_edp_clk_deinit(edp_drv);
+	mdss_edp_regulator_off(edp_drv);
+mmss_cc_base_unmap:
+	iounmap(edp_drv->mmss_cc_base);
+edp_base_unmap:
+	iounmap(edp_drv->edp_base);
+probe_err:
+	return ret;
+
+}
+
+static const struct of_device_id msm_mdss_edp_dt_match[] = {
+	{.compatible = "qcom,mdss-edp"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_mdss_edp_dt_match);
+
+static struct platform_driver mdss_edp_driver = {
+	.probe = mdss_edp_probe,
+	.remove = __devexit_p(mdss_edp_remove),
+	.shutdown = NULL,
+	.driver = {
+		.name = "mdss_edp",
+		.of_match_table = msm_mdss_edp_dt_match,
+	},
+};
+
+static int __init mdss_edp_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&mdss_edp_driver);
+	if (ret) {
+		pr_err("%s driver register failed", __func__);
+		return ret;
+	}
+
+	return ret;
+}
+module_init(mdss_edp_init);
+
+static void __exit mdss_edp_driver_cleanup(void)
+{
+	platform_driver_unregister(&mdss_edp_driver);
+}
+module_exit(mdss_edp_driver_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("eDP controller driver");
diff --git a/drivers/video/msm/mdss/mdss_edp.h b/drivers/video/msm/mdss/mdss_edp.h
new file mode 100644
index 0000000..00ef206
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_edp.h
@@ -0,0 +1,119 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_EDP_H
+#define MDSS_EDP_H
+
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+
+#include "mdss_panel.h"
+
+#define edp_read(offset) readl_relaxed((offset))
+#define edp_write(offset, data) writel_relaxed((data), (offset))
+
+struct display_timing_desc {
+	u32 pclk;
+	u32 h_addressable; /* addressable + boder = active */
+	u32 h_border;
+	u32 h_blank;	/* fporch + bporch + sync_pulse = blank */
+	u32 h_fporch;
+	u32 h_sync_pulse;
+	u32 v_addressable; /* addressable + boder = active */
+	u32 v_border;
+	u32 v_blank;	/* fporch + bporch + sync_pulse = blank */
+	u32 v_fporch;
+	u32 v_sync_pulse;
+	u32 width_mm;
+	u32 height_mm;
+	u32 interlaced;
+	u32 stereo;
+	u32 sync_type;
+	u32 sync_separate;
+	u32 vsync_pol;
+	u32 hsync_pol;
+};
+
+struct edp_edid {
+	char id_name[4];
+	short id_product;
+	char version;
+	char revision;
+	char video_digital;
+	char color_depth;	/* 6, 8, 10, 12 and 14 bits */
+	char color_format;	/* RGB 4:4:4, YCrCb 4:4:4, Ycrcb 4:2:2 */
+	char dpm;		/* display power management */
+	char sync_digital;	/* 1 = digital */
+	char sync_separate;	/* 1 = separate */
+	char vsync_pol;		/* 0 = negative, 1 = positive */
+	char hsync_pol;		/* 0 = negative, 1 = positive */
+	char ext_block_cnt;
+	struct display_timing_desc timing[4];
+};
+
+struct dpcd_cap {
+	char max_lane_count;
+	u32 max_link_clk;  /* 162, 270 and 540 Mb, divided by 10 */
+};
+
+struct mdss_edp_drv_pdata {
+	/* device driver */
+	int (*on) (struct mdss_panel_data *pdata);
+	int (*off) (struct mdss_panel_data *pdata);
+	struct platform_device *pdev;
+
+	/* edp specific */
+	struct mdss_panel_data panel_data;
+	unsigned char *edp_base;
+	unsigned char *mmss_cc_base;
+	struct edp_edid edid;
+	struct dpcd_cap dpcd;
+
+	/* regulators */
+	struct regulator *vdda_vreg;
+
+	/* clocks */
+	struct clk *aux_clk;
+	struct clk *pixel_clk;
+	struct clk *ahb_clk;
+	struct clk *link_clk;
+	int clk_on;
+
+	/* gpios */
+	int gpio_panel_en;
+	int gpio_panel_pwm;
+
+	/* backlight */
+	struct pwm_device *bl_pwm;
+	int lpg_channel;
+	int pwm_period;
+};
+
+void mdss_edp_phy_sw_reset(unsigned char *edp_base);
+void mdss_edp_pll_configure(unsigned char *edp_base, int rate);
+void mdss_edp_enable_lane_bist(unsigned char *edp_base, int lane, int enable);
+void mdss_edp_enable_mainlink(unsigned char *edp_base, int enable);
+void mdss_edp_hw_powerup(unsigned char *edp_base, int enable);
+void mdss_edp_clk_enable(struct mdss_edp_drv_pdata *edp_drv);
+void mdss_edp_clk_disable(struct mdss_edp_drv_pdata *edp_drv);
+int mdss_edp_clk_init(struct mdss_edp_drv_pdata *edp_drv);
+void mdss_edp_clk_deinit(struct mdss_edp_drv_pdata *edp_drv);
+void mdss_edp_prepare_clocks(struct mdss_edp_drv_pdata *edp_drv);
+void mdss_edp_unprepare_clocks(struct mdss_edp_drv_pdata *edp_drv);
+void mdss_edp_config_clk(unsigned char *edp_base, unsigned char *mmss_cc_base);
+void mdss_edp_unconfig_clk(unsigned char *edp_base,
+		unsigned char *mmss_cc_base);
+void mdss_edp_phy_misc_cfg(unsigned char *edp_base);
+
+#endif /* MDSS_EDP_H */
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index 094c253..a5903cf 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -2,7 +2,7 @@
  * Core MDSS framebuffer driver.
  *
  * Copyright (C) 2007 Google Incorporated
- * Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -16,7 +16,6 @@
 
 #define pr_fmt(fmt)	"%s: " fmt, __func__
 
-#include <linux/android_pmem.h>
 #include <linux/bootmem.h>
 #include <linux/console.h>
 #include <linux/debugfs.h>
@@ -40,11 +39,14 @@
 #include <linux/uaccess.h>
 #include <linux/version.h>
 #include <linux/vmalloc.h>
+#include <linux/sync.h>
+#include <linux/sw_sync.h>
+#include <linux/file.h>
 
 #include <mach/board.h>
+#include <mach/memory.h>
 
 #include "mdss_fb.h"
-#include "mdss_mdp.h"
 
 #ifdef CONFIG_FB_MSM_TRIPLE_BUFFER
 #define MDSS_FB_NUM 3
@@ -63,6 +65,8 @@
 	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff
 };
 
+static struct msm_mdp_interface *mdp_instance;
+
 static int mdss_fb_register(struct msm_fb_data_type *mfd);
 static int mdss_fb_open(struct fb_info *info, int user);
 static int mdss_fb_release(struct fb_info *info, int user);
@@ -77,6 +81,47 @@
 static int mdss_fb_ioctl(struct fb_info *info, unsigned int cmd,
 			 unsigned long arg);
 static int mdss_fb_mmap(struct fb_info *info, struct vm_area_struct *vma);
+static void mdss_fb_release_fences(struct msm_fb_data_type *mfd);
+
+static void mdss_fb_commit_wq_handler(struct work_struct *work);
+static void mdss_fb_pan_idle(struct msm_fb_data_type *mfd);
+static int mdss_fb_send_panel_event(struct msm_fb_data_type *mfd,
+					int event, void *arg);
+void mdss_fb_no_update_notify_timer_cb(unsigned long data)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data;
+	if (!mfd)
+		pr_err("%s mfd NULL\n", __func__);
+	complete(&mfd->no_update.comp);
+}
+
+static int mdss_fb_notify_update(struct msm_fb_data_type *mfd,
+							unsigned long *argp)
+{
+	int ret, notify;
+
+	ret = copy_from_user(&notify, argp, sizeof(int));
+	if (ret) {
+		pr_err("%s:ioctl failed\n", __func__);
+		return ret;
+	}
+
+	if (notify > NOTIFY_UPDATE_STOP)
+		return -EINVAL;
+
+	if (notify == NOTIFY_UPDATE_START) {
+		INIT_COMPLETION(mfd->update.comp);
+		ret = wait_for_completion_interruptible_timeout(
+						&mfd->update.comp, 4 * HZ);
+	} else {
+		INIT_COMPLETION(mfd->no_update.comp);
+		ret = wait_for_completion_interruptible_timeout(
+						&mfd->no_update.comp, 4 * HZ);
+	}
+	if (ret == 0)
+		ret = -ETIMEDOUT;
+	return (ret > 0) ? 0 : ret;
+}
 
 #define MAX_BACKLIGHT_BRIGHTNESS 255
 static int lcd_backlight_registered;
@@ -92,13 +137,15 @@
 
 	/* This maps android backlight level 0 to 255 into
 	   driver backlight level 0 to bl_max with rounding */
-	bl_lvl = (2 * value * mfd->panel_info.bl_max + MAX_BACKLIGHT_BRIGHTNESS)
-		 /(2 * MAX_BACKLIGHT_BRIGHTNESS);
+	bl_lvl = (2 * value * mfd->panel_info->bl_max +
+		  MAX_BACKLIGHT_BRIGHTNESS) / (2 * MAX_BACKLIGHT_BRIGHTNESS);
 
 	if (!bl_lvl && value)
 		bl_lvl = 1;
 
+	mutex_lock(&mfd->lock);
 	mdss_fb_set_backlight(mfd, bl_lvl);
+	mutex_unlock(&mfd->lock);
 }
 
 static struct led_classdev backlight_led = {
@@ -114,7 +161,7 @@
 	struct fb_info *fbi = dev_get_drvdata(dev);
 	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
 
-	switch (mfd->panel_info.type) {
+	switch (mfd->panel.type) {
 	case NO_PANEL:
 		ret = snprintf(buf, PAGE_SIZE, "no panel\n");
 		break;
@@ -136,6 +183,9 @@
 	case WRITEBACK_PANEL:
 		ret = snprintf(buf, PAGE_SIZE, "writeback panel\n");
 		break;
+	case EDP_PANEL:
+		ret = snprintf(buf, PAGE_SIZE, "edp panel\n");
+		break;
 	default:
 		ret = snprintf(buf, PAGE_SIZE, "unknown panel\n");
 		break;
@@ -144,9 +194,9 @@
 	return ret;
 }
 
-static DEVICE_ATTR(mdss_fb_type, S_IRUGO, mdss_fb_get_type, NULL);
+static DEVICE_ATTR(msm_fb_type, S_IRUGO, mdss_fb_get_type, NULL);
 static struct attribute *mdss_fb_attrs[] = {
-	&dev_attr_mdss_fb_type.attr,
+	&dev_attr_msm_fb_type.attr,
 	NULL,
 };
 
@@ -181,12 +231,12 @@
 
 	pdata = dev_get_platdata(&pdev->dev);
 	if (!pdata)
-		return -ENODEV;
+		return -EPROBE_DEFER;
 
 	/*
 	 * alloc framebuffer info + par data
 	 */
-	fbi = framebuffer_alloc(sizeof(struct msm_fb_data_type), &pdev->dev);
+	fbi = framebuffer_alloc(sizeof(struct msm_fb_data_type), NULL);
 	if (fbi == NULL) {
 		pr_err("can't allocate framebuffer info data!\n");
 		return -ENOMEM;
@@ -195,20 +245,22 @@
 	mfd = (struct msm_fb_data_type *)fbi->par;
 	mfd->key = MFD_KEY;
 	mfd->fbi = fbi;
-	mfd->panel_info = pdata->panel_info;
+	mfd->panel_info = &pdata->panel_info;
 	mfd->panel.type = pdata->panel_info.type;
 	mfd->panel.id = mfd->index;
 	mfd->fb_page = MDSS_FB_NUM;
 	mfd->index = fbi_list_index;
 	mfd->mdp_fb_page_protection = MDP_FB_PAGE_PROTECTION_WRITECOMBINE;
-	mfd->panel_info.frame_count = 0;
+
 	mfd->bl_level = 0;
+	mfd->bl_scale = 1024;
+	mfd->bl_min_lvl = 30;
 	mfd->fb_imgType = MDP_RGBA_8888;
-	mfd->iclient = msm_ion_client_create(-1, pdev->name);
-	if (IS_ERR(mfd->iclient))
-		mfd->iclient = NULL;
 
 	mfd->pdev = pdev;
+	if (pdata->next)
+		mfd->split_display = true;
+	mfd->mdp = *mdp_instance;
 
 	mutex_init(&mfd->lock);
 
@@ -220,6 +272,14 @@
 	if (rc)
 		return rc;
 
+	if (mfd->mdp.init_fnc) {
+		rc = mfd->mdp.init_fnc(mfd);
+		if (rc) {
+			pr_err("init_fnc failed\n");
+			return rc;
+		}
+	}
+
 	rc = pm_runtime_set_active(mfd->fbi->dev);
 	if (rc < 0)
 		pr_err("pm_runtime: fail to set active.\n");
@@ -234,8 +294,22 @@
 	}
 
 	mdss_fb_create_sysfs(mfd);
+	mdss_fb_send_panel_event(mfd, MDSS_EVENT_FB_REGISTERED, fbi);
 
-	return 0;
+	if (mfd->timeline == NULL) {
+		char timeline_name[16];
+		snprintf(timeline_name, sizeof(timeline_name),
+			"mdss_fb_%d", mfd->index);
+		mfd->timeline = sw_sync_timeline_create(timeline_name);
+		if (mfd->timeline == NULL) {
+			pr_err("%s: cannot create time line", __func__);
+			return -ENOMEM;
+		} else {
+			mfd->timeline_value = 0;
+		}
+	}
+
+	return rc;
 }
 
 static int mdss_fb_remove(struct platform_device *pdev)
@@ -269,6 +343,25 @@
 	return 0;
 }
 
+static int mdss_fb_send_panel_event(struct msm_fb_data_type *mfd,
+					int event, void *arg)
+{
+	struct mdss_panel_data *pdata;
+
+	pdata = dev_get_platdata(&mfd->pdev->dev);
+	if (!pdata) {
+		pr_err("no panel connected\n");
+		return -ENODEV;
+	}
+
+	pr_debug("sending event=%d for fb%d\n", event, mfd->index);
+
+	if (pdata->event_handler)
+		return pdata->event_handler(pdata, event, arg);
+
+	return 0;
+}
+
 static int mdss_fb_suspend_sub(struct msm_fb_data_type *mfd)
 {
 	int ret = 0;
@@ -278,6 +371,13 @@
 
 	pr_debug("mdss_fb suspend index=%d\n", mfd->index);
 
+	mdss_fb_pan_idle(mfd);
+	ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_SUSPEND, NULL);
+	if (ret) {
+		pr_warn("unable to suspend fb%d (%d)\n", mfd->index, ret);
+		return ret;
+	}
+
 	mfd->suspend.op_enable = mfd->op_enable;
 	mfd->suspend.panel_power_on = mfd->panel_power_on;
 
@@ -289,6 +389,7 @@
 			return ret;
 		}
 		mfd->op_enable = false;
+		fb_set_suspend(mfd->fbi, FBINFO_STATE_SUSPENDED);
 	}
 
 	return 0;
@@ -301,8 +402,17 @@
 	if ((!mfd) || (mfd->key != MFD_KEY))
 		return 0;
 
+	INIT_COMPLETION(mfd->power_set_comp);
+	mfd->is_power_setting = true;
 	pr_debug("mdss_fb resume index=%d\n", mfd->index);
 
+	mdss_fb_pan_idle(mfd);
+	ret = mdss_fb_send_panel_event(mfd, MDSS_EVENT_RESUME, NULL);
+	if (ret) {
+		pr_warn("unable to resume fb%d (%d)\n", mfd->index, ret);
+		return ret;
+	}
+
 	/* resume state var recover */
 	mfd->op_enable = mfd->suspend.op_enable;
 
@@ -311,104 +421,114 @@
 					mfd->op_enable);
 		if (ret)
 			pr_warn("can't turn on display!\n");
+		else
+			fb_set_suspend(mfd->fbi, FBINFO_STATE_RUNNING);
 	}
+	mfd->is_power_setting = false;
+	complete_all(&mfd->power_set_comp);
 
 	return ret;
 }
 
-int mdss_fb_suspend_all(void)
+#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
+static int mdss_fb_suspend(struct platform_device *pdev, pm_message_t state)
 {
-	struct fb_info *fbi;
-	int ret, i;
-	int result = 0;
-	console_lock();
-	for (i = 0; i < fbi_list_index; i++) {
-		fbi = fbi_list[i];
-		fb_set_suspend(fbi, FBINFO_STATE_SUSPENDED);
+	struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+	if (!mfd)
+		return -ENODEV;
 
-		ret = mdss_fb_suspend_sub(fbi->par);
-		if (ret != 0) {
-			fb_set_suspend(fbi, FBINFO_STATE_RUNNING);
-			result = ret;
-		}
-	}
-	console_unlock();
-	return result;
+	dev_dbg(&pdev->dev, "display suspend\n");
+
+	return mdss_fb_suspend_sub(mfd);
 }
 
-int mdss_fb_resume_all(void)
+static int mdss_fb_resume(struct platform_device *pdev)
 {
-	struct fb_info *fbi;
-	int ret, i;
-	int result = 0;
+	struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
+	if (!mfd)
+		return -ENODEV;
 
-	console_lock();
-	for (i = 0; i < fbi_list_index; i++) {
-		fbi = fbi_list[i];
+	dev_dbg(&pdev->dev, "display resume\n");
 
-		ret = mdss_fb_resume_sub(fbi->par);
-		if (ret == 0)
-			fb_set_suspend(fbi, FBINFO_STATE_RUNNING);
-	}
-	console_unlock();
-	return result;
-}
-
-#if defined(CONFIG_PM) && defined(CONFIG_SUSPEND)
-static int mdss_fb_ext_suspend(struct device *dev)
-{
-	struct msm_fb_data_type *mfd = dev_get_drvdata(dev);
-	int ret = 0;
-
-	if ((!mfd) || (mfd->key != MFD_KEY))
-		return 0;
-
-	if (mfd->panel_info.type == HDMI_PANEL ||
-	    mfd->panel_info.type == DTV_PANEL)
-		ret = mdss_fb_suspend_sub(mfd);
-
-	return ret;
-}
-
-static int mdss_fb_ext_resume(struct device *dev)
-{
-	struct msm_fb_data_type *mfd = dev_get_drvdata(dev);
-	int ret = 0;
-
-	if ((!mfd) || (mfd->key != MFD_KEY))
-		return 0;
-
-	if (mfd->panel_info.type == HDMI_PANEL ||
-	    mfd->panel_info.type == DTV_PANEL)
-		ret = mdss_fb_resume_sub(mfd);
-
-	return ret;
+	return mdss_fb_resume_sub(mfd);
 }
 #else
-#define mdss_fb_ext_suspend NULL
-#define mdss_fb_ext_resume NULL
+#define mdss_fb_suspend NULL
+#define mdss_fb_resume NULL
 #endif
 
-static const struct dev_pm_ops mdss_fb_dev_pm_ops = {
-	.suspend = mdss_fb_ext_suspend,
-	.resume = mdss_fb_ext_resume,
+#ifdef CONFIG_PM_SLEEP
+static int mdss_fb_pm_suspend(struct device *dev)
+{
+	struct msm_fb_data_type *mfd = dev_get_drvdata(dev);
+
+	if (!mfd)
+		return -ENODEV;
+
+	dev_dbg(dev, "display pm suspend\n");
+
+	return mdss_fb_suspend_sub(mfd);
+}
+
+static int mdss_fb_pm_resume(struct device *dev)
+{
+	struct msm_fb_data_type *mfd = dev_get_drvdata(dev);
+	if (!mfd)
+		return -ENODEV;
+
+	dev_dbg(dev, "display pm resume\n");
+
+	return mdss_fb_resume_sub(mfd);
+}
+#endif
+
+static const struct dev_pm_ops mdss_fb_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(mdss_fb_pm_suspend, mdss_fb_pm_resume)
 };
 
+static const struct of_device_id mdss_fb_dt_match[] = {
+	{ .compatible = "qcom,mdss-fb",},
+	{}
+};
+EXPORT_COMPAT("qcom,mdss-fb");
+
 static struct platform_driver mdss_fb_driver = {
 	.probe = mdss_fb_probe,
 	.remove = mdss_fb_remove,
+	.suspend = mdss_fb_suspend,
+	.resume = mdss_fb_resume,
 	.driver = {
 		.name = "mdss_fb",
-		.pm = &mdss_fb_dev_pm_ops,
+		.of_match_table = mdss_fb_dt_match,
+		.pm = &mdss_fb_pm_ops,
 	},
 };
 
 static int unset_bl_level, bl_updated;
 static int bl_level_old;
 
+static void mdss_fb_scale_bl(struct msm_fb_data_type *mfd, u32 *bl_lvl)
+{
+	u32 temp = *bl_lvl;
+	pr_debug("input = %d, scale = %d", temp, mfd->bl_scale);
+	if (temp >= mfd->bl_min_lvl) {
+		/* bl_scale is the numerator of scaling fraction (x/1024)*/
+		temp = (temp * mfd->bl_scale) / 1024;
+
+		/*if less than minimum level, use min level*/
+		if (temp < mfd->bl_min_lvl)
+			temp = mfd->bl_min_lvl;
+	}
+	pr_debug("output = %d", temp);
+
+	(*bl_lvl) = temp;
+}
+
+/* must call this function from within mfd->lock */
 void mdss_fb_set_backlight(struct msm_fb_data_type *mfd, u32 bkl_lvl)
 {
 	struct mdss_panel_data *pdata;
+	u32 temp = bkl_lvl;
 
 	if (!mfd->panel_power_on || !bl_updated) {
 		unset_bl_level = bkl_lvl;
@@ -420,15 +540,22 @@
 	pdata = dev_get_platdata(&mfd->pdev->dev);
 
 	if ((pdata) && (pdata->set_backlight)) {
-		mutex_lock(&mfd->lock);
-		if (bl_level_old == bkl_lvl) {
-			mutex_unlock(&mfd->lock);
+		mdss_fb_scale_bl(mfd, &temp);
+		/*
+		 * Even though backlight has been scaled, want to show that
+		 * backlight has been set to bkl_lvl to those that read from
+		 * sysfs node. Thus, need to set bl_level even if it appears
+		 * the backlight has already been set to the level it is at,
+		 * as well as setting bl_level to bkl_lvl even though the
+		 * backlight has been set to the scaled value.
+		 */
+		if (bl_level_old == temp) {
+			mfd->bl_level = bkl_lvl;
 			return;
 		}
+		pdata->set_backlight(pdata, temp);
 		mfd->bl_level = bkl_lvl;
-		pdata->set_backlight(mfd->bl_level);
-		bl_level_old = mfd->bl_level;
-		mutex_unlock(&mfd->lock);
+		bl_level_old = temp;
 	}
 }
 
@@ -441,7 +568,7 @@
 		if ((pdata) && (pdata->set_backlight)) {
 			mutex_lock(&mfd->lock);
 			mfd->bl_level = unset_bl_level;
-			pdata->set_backlight(mfd->bl_level);
+			pdata->set_backlight(pdata, mfd->bl_level);
 			bl_level_old = unset_bl_level;
 			mutex_unlock(&mfd->lock);
 			bl_updated = 1;
@@ -460,9 +587,8 @@
 
 	switch (blank_mode) {
 	case FB_BLANK_UNBLANK:
-		if (!mfd->panel_power_on) {
-			msleep(20);
-			ret = mfd->on_fnc(mfd);
+		if (!mfd->panel_power_on && mfd->mdp.on_fnc) {
+			ret = mfd->mdp.on_fnc(mfd);
 			if (ret == 0)
 				mfd->panel_power_on = true;
 		}
@@ -473,19 +599,22 @@
 	case FB_BLANK_NORMAL:
 	case FB_BLANK_POWERDOWN:
 	default:
-		if (mfd->panel_power_on) {
+		if (mfd->panel_power_on && mfd->mdp.off_fnc) {
 			int curr_pwr_state;
 
+			del_timer(&mfd->no_update.timer);
+			complete(&mfd->no_update.comp);
+
 			mfd->op_enable = false;
 			curr_pwr_state = mfd->panel_power_on;
 			mfd->panel_power_on = false;
 			bl_updated = 0;
 
-			msleep(20);
-			ret = mfd->off_fnc(mfd);
+			ret = mfd->mdp.off_fnc(mfd);
 			if (ret)
 				mfd->panel_power_on = curr_pwr_state;
-
+			else
+				mdss_fb_release_fences(mfd);
 			mfd->op_enable = true;
 		}
 		break;
@@ -497,6 +626,20 @@
 static int mdss_fb_blank(int blank_mode, struct fb_info *info)
 {
 	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	if (blank_mode == FB_BLANK_POWERDOWN) {
+		struct fb_event event;
+		event.info = info;
+		event.data = &blank_mode;
+		fb_notifier_call_chain(FB_EVENT_BLANK, &event);
+	}
+	mdss_fb_pan_idle(mfd);
+	if (mfd->op_enable == 0) {
+		if (blank_mode == FB_BLANK_UNBLANK)
+			mfd->suspend.panel_power_on = true;
+		else
+			mfd->suspend.panel_power_on = false;
+		return 0;
+	}
 	return mdss_fb_blank_sub(blank_mode, info, mfd->op_enable);
 }
 
@@ -513,6 +656,7 @@
 	unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
 	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
 
+	mdss_fb_pan_idle(mfd);
 	if (off >= len) {
 		/* memory mapped io */
 		off -= len;
@@ -569,71 +713,20 @@
 	.fb_mmap = mdss_fb_mmap,
 };
 
-static u32 mdss_fb_line_length(u32 fb_index, u32 xres, int bpp)
-{
-	/* The adreno GPU hardware requires that the pitch be aligned to
-	   32 pixels for color buffers, so for the cases where the GPU
-	   is writing directly to fb0, the framebuffer pitch
-	   also needs to be 32 pixel aligned */
-
-	if (fb_index == 0)
-		return ALIGN(xres, 32) * bpp;
-	else
-		return xres * bpp;
-}
-
 static int mdss_fb_alloc_fbmem(struct msm_fb_data_type *mfd)
 {
-	void *virt = NULL;
-	unsigned long phys = 0;
-	size_t size;
-
-	size = PAGE_ALIGN(mfd->fbi->fix.line_length * mfd->panel_info.yres);
-	size *= mfd->fb_page;
-
-	if (mfd->index == 0) {
-		struct ion_client *iclient = mfd->iclient;
-
-		if (iclient) {
-			mfd->ihdl = ion_alloc(iclient, size, SZ_4K,
-					 ION_HEAP(ION_CP_MM_HEAP_ID) |
-					 ION_HEAP(ION_SF_HEAP_ID), 0);
-			if (IS_ERR_OR_NULL(mfd->ihdl)) {
-				pr_err("unable to alloc fbmem from ion (%p)\n",
-					mfd->ihdl);
-				return -ENOMEM;
-			}
-
-			virt = ion_map_kernel(iclient, mfd->ihdl);
-			ion_phys(iclient, mfd->ihdl, &phys, &size);
-		} else {
-			virt = dma_alloc_coherent(NULL, size,
-					(dma_addr_t *) &phys, GFP_KERNEL);
-			if (!virt) {
-				pr_err("unable to alloc fbmem size=%u\n", size);
-				return -ENOMEM;
-			}
-		}
-
-		pr_info("allocating %u bytes at %p (%lx phys) for fb %d\n",
-			size, virt, phys, mfd->index);
-	} else {
-		pr_debug("no memory allocated for fb%d\n", mfd->index);
-		size = 0;
+	if (!mfd->mdp.fb_mem_alloc_fnc) {
+		pr_err("no fb memory allocator function defined\n");
+		return -ENOMEM;
 	}
-
-	mfd->fbi->screen_base = virt;
-	mfd->fbi->fix.smem_start = phys;
-	mfd->fbi->fix.smem_len = size;
-
-	return 0;
+	return mfd->mdp.fb_mem_alloc_fnc(mfd);
 }
 
 static int mdss_fb_register(struct msm_fb_data_type *mfd)
 {
 	int ret = -ENODEV;
 	int bpp;
-	struct mdss_panel_info *panel_info = &mfd->panel_info;
+	struct mdss_panel_info *panel_info = mfd->panel_info;
 	struct fb_info *fbi = mfd->fbi;
 	struct fb_fix_screeninfo *fix;
 	struct fb_var_screeninfo *var;
@@ -768,27 +861,29 @@
 		return ret;
 	}
 
-	fix->type = panel_info->is_3d_panel;
-	fix->line_length = mdss_fb_line_length(mfd->index, panel_info->xres,
-					       bpp);
-
 	var->xres = panel_info->xres;
+	if (mfd->split_display)
+		var->xres *= 2;
+
+	fix->type = panel_info->is_3d_panel;
+	if (mfd->mdp.fb_stride)
+		fix->line_length = mfd->mdp.fb_stride(mfd->index, var->xres,
+							bpp);
+	else
+		fix->line_length = var->xres * bpp;
+
 	var->yres = panel_info->yres;
-	var->xres_virtual = panel_info->xres;
+	var->xres_virtual = var->xres;
 	var->yres_virtual = panel_info->yres * mfd->fb_page;
 	var->bits_per_pixel = bpp * 8;	/* FrameBuffer color depth */
-	var->upper_margin = panel_info->lcdc.v_front_porch;
-	var->lower_margin = panel_info->lcdc.v_back_porch;
+	var->upper_margin = panel_info->lcdc.v_back_porch;
+	var->lower_margin = panel_info->lcdc.v_front_porch;
 	var->vsync_len = panel_info->lcdc.v_pulse_width;
-	var->left_margin = panel_info->lcdc.h_front_porch;
-	var->right_margin = panel_info->lcdc.h_back_porch;
+	var->left_margin = panel_info->lcdc.h_back_porch;
+	var->right_margin = panel_info->lcdc.h_front_porch;
 	var->hsync_len = panel_info->lcdc.h_pulse_width;
 	var->pixclock = panel_info->clk_rate / 1000;
 
-	mfd->var_xres = var->xres;
-	mfd->var_yres = var->yres;
-	mfd->var_pixclock = var->pixclock;
-
 	/* id field for fb app  */
 
 	id = (int *)&mfd->panel;
@@ -799,7 +894,6 @@
 	fbi->flags = FBINFO_FLAG_DEFAULT;
 	fbi->pseudo_palette = mdss_fb_pseudo_palette;
 
-	panel_info->fbi = fbi;
 	mfd->ref_cnt = 0;
 	mfd->panel_power_on = false;
 
@@ -810,29 +904,29 @@
 
 	mfd->op_enable = true;
 
-	/* cursor memory allocation */
-	if (mfd->cursor_update) {
-		mfd->cursor_buf = dma_alloc_coherent(NULL, MDSS_MDP_CURSOR_SIZE,
-					(dma_addr_t *) &mfd->cursor_buf_phys,
-					GFP_KERNEL);
-		if (!mfd->cursor_buf)
-			mfd->cursor_update = 0;
+	mutex_init(&mfd->no_update.lock);
+	mutex_init(&mfd->sync_mutex);
+	init_timer(&mfd->no_update.timer);
+	mfd->no_update.timer.function = mdss_fb_no_update_notify_timer_cb;
+	mfd->no_update.timer.data = (unsigned long)mfd;
+	init_completion(&mfd->update.comp);
+	init_completion(&mfd->no_update.comp);
+	init_completion(&mfd->commit_comp);
+	init_completion(&mfd->power_set_comp);
+	INIT_WORK(&mfd->commit_work, mdss_fb_commit_wq_handler);
+	mfd->msm_fb_backup = kzalloc(sizeof(struct msm_fb_backup_type),
+		GFP_KERNEL);
+	if (mfd->msm_fb_backup == 0) {
+		pr_err("error: not enough memory!\n");
+		return -ENOMEM;
 	}
 
-	if (mfd->lut_update) {
-		ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
-		if (ret)
-			pr_err("fb_alloc_cmap() failed!\n");
-	}
+	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+	if (ret)
+		pr_err("fb_alloc_cmap() failed!\n");
 
 	if (register_framebuffer(fbi) < 0) {
-		if (mfd->lut_update)
-			fb_dealloc_cmap(&fbi->cmap);
-
-		if (mfd->cursor_buf)
-			dma_free_coherent(NULL, MDSS_MDP_CURSOR_SIZE,
-					  mfd->cursor_buf,
-					  (dma_addr_t) mfd->cursor_buf_phys);
+		fb_dealloc_cmap(&fbi->cmap);
 
 		mfd->op_enable = false;
 		return -EPERM;
@@ -881,6 +975,7 @@
 		return -EINVAL;
 	}
 
+	mdss_fb_pan_idle(mfd);
 	mfd->ref_cnt--;
 
 	if (!mfd->ref_cnt) {
@@ -896,7 +991,162 @@
 	return ret;
 }
 
+static void mdss_fb_power_setting_idle(struct msm_fb_data_type *mfd)
+{
+	int ret;
+
+	if (mfd->is_power_setting) {
+		ret = wait_for_completion_timeout(
+				&mfd->power_set_comp,
+			msecs_to_jiffies(WAIT_DISP_OP_TIMEOUT));
+		if (ret < 0)
+			ret = -ERESTARTSYS;
+		else if (!ret)
+			pr_err("%s wait for power_set_comp timeout %d %d",
+				__func__, ret, mfd->is_power_setting);
+		if (ret <= 0) {
+			mfd->is_power_setting = false;
+			complete_all(&mfd->power_set_comp);
+		}
+	}
+}
+
+void mdss_fb_wait_for_fence(struct msm_fb_data_type *mfd)
+{
+	int i, ret = 0;
+	/* buf sync */
+	for (i = 0; i < mfd->acq_fen_cnt; i++) {
+		ret = sync_fence_wait(mfd->acq_fen[i],
+				WAIT_FENCE_FIRST_TIMEOUT);
+		if (ret == -ETIME) {
+			pr_warn("sync_fence_wait timed out! ");
+			pr_cont("Waiting %ld more seconds\n",
+					WAIT_FENCE_FINAL_TIMEOUT/MSEC_PER_SEC);
+			ret = sync_fence_wait(mfd->acq_fen[i],
+					WAIT_FENCE_FINAL_TIMEOUT);
+		}
+		if (ret < 0) {
+			pr_err("%s: sync_fence_wait failed! ret = %x\n",
+				__func__, ret);
+			break;
+		}
+		sync_fence_put(mfd->acq_fen[i]);
+	}
+
+	if (ret < 0) {
+		while (i < mfd->acq_fen_cnt) {
+			sync_fence_put(mfd->acq_fen[i]);
+			i++;
+		}
+	}
+	mfd->acq_fen_cnt = 0;
+}
+
+static void mdss_fb_signal_timeline_locked(struct msm_fb_data_type *mfd)
+{
+	if (mfd->timeline && !list_empty((const struct list_head *)
+				(&(mfd->timeline->obj.active_list_head)))) {
+		sw_sync_timeline_inc(mfd->timeline, 1);
+		mfd->timeline_value++;
+	}
+	mfd->last_rel_fence = mfd->cur_rel_fence;
+	mfd->cur_rel_fence = 0;
+}
+
+void mdss_fb_signal_timeline(struct msm_fb_data_type *mfd)
+{
+	mutex_lock(&mfd->sync_mutex);
+	mdss_fb_signal_timeline_locked(mfd);
+	mutex_unlock(&mfd->sync_mutex);
+}
+
+static void mdss_fb_release_fences(struct msm_fb_data_type *mfd)
+{
+	mutex_lock(&mfd->sync_mutex);
+	if (mfd->timeline) {
+		sw_sync_timeline_inc(mfd->timeline, 2);
+		mfd->timeline_value += 2;
+	}
+	mfd->last_rel_fence = 0;
+	mfd->cur_rel_fence = 0;
+	mutex_unlock(&mfd->sync_mutex);
+}
+
+static void mdss_fb_pan_idle(struct msm_fb_data_type *mfd)
+{
+	int ret;
+
+	if (mfd->is_committing) {
+		ret = wait_for_completion_timeout(
+				&mfd->commit_comp,
+			msecs_to_jiffies(WAIT_DISP_OP_TIMEOUT));
+		if (ret < 0)
+			ret = -ERESTARTSYS;
+		else if (!ret)
+			pr_err("%s wait for commit_comp timeout %d %d",
+				__func__, ret, mfd->is_committing);
+		if (ret <= 0) {
+			mutex_lock(&mfd->sync_mutex);
+			mdss_fb_signal_timeline_locked(mfd);
+			mfd->is_committing = 0;
+			complete_all(&mfd->commit_comp);
+			mutex_unlock(&mfd->sync_mutex);
+		}
+	}
+}
+
+static int mdss_fb_pan_display_ex(struct fb_info *info,
+		struct mdp_display_commit *disp_commit)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+	struct msm_fb_backup_type *fb_backup;
+	struct fb_var_screeninfo *var = &disp_commit->var;
+	u32 wait_for_finish = disp_commit->wait_for_finish;
+	int ret = 0;
+
+	if ((!mfd->op_enable) || (!mfd->panel_power_on))
+		return -EPERM;
+
+	if (var->xoffset > (info->var.xres_virtual - info->var.xres))
+		return -EINVAL;
+
+	if (var->yoffset > (info->var.yres_virtual - info->var.yres))
+		return -EINVAL;
+
+	mdss_fb_pan_idle(mfd);
+
+	mutex_lock(&mfd->sync_mutex);
+	if (info->fix.xpanstep)
+		info->var.xoffset =
+		(var->xoffset / info->fix.xpanstep) * info->fix.xpanstep;
+
+	if (info->fix.ypanstep)
+		info->var.yoffset =
+		(var->yoffset / info->fix.ypanstep) * info->fix.ypanstep;
+
+	fb_backup = (struct msm_fb_backup_type *)mfd->msm_fb_backup;
+	memcpy(&fb_backup->info, info, sizeof(struct fb_info));
+	memcpy(&fb_backup->disp_commit, disp_commit,
+		sizeof(struct mdp_display_commit));
+	INIT_COMPLETION(mfd->commit_comp);
+	mfd->is_committing = 1;
+	schedule_work(&mfd->commit_work);
+	mutex_unlock(&mfd->sync_mutex);
+	if (wait_for_finish)
+		mdss_fb_pan_idle(mfd);
+	return ret;
+}
+
 static int mdss_fb_pan_display(struct fb_var_screeninfo *var,
+		struct fb_info *info)
+{
+	struct mdp_display_commit disp_commit;
+	memset(&disp_commit, 0, sizeof(disp_commit));
+	disp_commit.wait_for_finish = true;
+	return mdss_fb_pan_display_ex(info, &disp_commit);
+}
+
+static int mdss_fb_pan_display_sub(struct fb_var_screeninfo *var,
 			       struct fb_info *info)
 {
 	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
@@ -918,23 +1168,64 @@
 		info->var.yoffset =
 		(var->yoffset / info->fix.ypanstep) * info->fix.ypanstep;
 
-	if (mfd->dma_fnc)
-		mfd->dma_fnc(mfd);
+	mdss_fb_wait_for_fence(mfd);
+	if (mfd->mdp.dma_fnc)
+		mfd->mdp.dma_fnc(mfd);
 	else
 		pr_warn("dma function not set for panel type=%d\n",
 				mfd->panel.type);
-
+	mdss_fb_signal_timeline(mfd);
 	mdss_fb_update_backlight(mfd);
-
-	++mfd->panel_info.frame_count;
 	return 0;
 }
 
+static void mdss_fb_var_to_panelinfo(struct fb_var_screeninfo *var,
+	struct mdss_panel_info *pinfo)
+{
+	pinfo->xres = var->xres;
+	pinfo->yres = var->yres;
+	pinfo->lcdc.v_front_porch = var->lower_margin;
+	pinfo->lcdc.v_back_porch = var->upper_margin;
+	pinfo->lcdc.v_pulse_width = var->vsync_len;
+	pinfo->lcdc.h_front_porch = var->right_margin;
+	pinfo->lcdc.h_back_porch = var->left_margin;
+	pinfo->lcdc.h_pulse_width = var->hsync_len;
+	pinfo->clk_rate = var->pixclock;
+}
+
+static void mdss_fb_commit_wq_handler(struct work_struct *work)
+{
+	struct msm_fb_data_type *mfd;
+	struct fb_var_screeninfo *var;
+	struct fb_info *info;
+	struct msm_fb_backup_type *fb_backup;
+	int ret;
+
+	mfd = container_of(work, struct msm_fb_data_type, commit_work);
+	fb_backup = (struct msm_fb_backup_type *)mfd->msm_fb_backup;
+	info = &fb_backup->info;
+	if (fb_backup->disp_commit.flags &
+		MDP_DISPLAY_COMMIT_OVERLAY) {
+		mdss_fb_wait_for_fence(mfd);
+		if (mfd->mdp.kickoff_fnc)
+			mfd->mdp.kickoff_fnc(mfd);
+		mdss_fb_signal_timeline(mfd);
+	} else {
+		var = &fb_backup->disp_commit.var;
+		ret = mdss_fb_pan_display_sub(var, info);
+		if (ret)
+			pr_err("%s fails: ret = %x", __func__, ret);
+	}
+	mutex_lock(&mfd->sync_mutex);
+	mfd->is_committing = 0;
+	complete_all(&mfd->commit_comp);
+	mutex_unlock(&mfd->sync_mutex);
+}
+
 static int mdss_fb_check_var(struct fb_var_screeninfo *var,
 			     struct fb_info *info)
 {
 	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
-	u32 len;
 
 	if (var->rotate != FB_ROTATE_UR)
 		return -EINVAL;
@@ -1013,23 +1304,35 @@
 	if ((var->xres_virtual <= 0) || (var->yres_virtual <= 0))
 		return -EINVAL;
 
-	len = var->xres_virtual * var->yres_virtual * (var->bits_per_pixel / 8);
-	if (len > info->fix.smem_len)
-		return -EINVAL;
+	if (info->fix.smem_start) {
+		u32 len = var->xres_virtual * var->yres_virtual *
+			(var->bits_per_pixel / 8);
+		if (len > info->fix.smem_len)
+			return -EINVAL;
+	}
 
 	if ((var->xres == 0) || (var->yres == 0))
 		return -EINVAL;
 
-	if ((var->xres > mfd->panel_info.xres) ||
-	    (var->yres > mfd->panel_info.yres))
-		return -EINVAL;
-
 	if (var->xoffset > (var->xres_virtual - var->xres))
 		return -EINVAL;
 
 	if (var->yoffset > (var->yres_virtual - var->yres))
 		return -EINVAL;
 
+	if (mfd->panel_info) {
+		struct mdss_panel_info panel_info;
+		int rc;
+
+		memcpy(&panel_info, mfd->panel_info, sizeof(panel_info));
+		mdss_fb_var_to_panelinfo(var, &panel_info);
+		rc = mdss_fb_send_panel_event(mfd, MDSS_EVENT_CHECK_PARAMS,
+			&panel_info);
+		if (IS_ERR_VALUE(rc))
+			return rc;
+		mfd->panel_reconfig = rc;
+	}
+
 	return 0;
 }
 
@@ -1038,8 +1341,8 @@
 	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
 	struct fb_var_screeninfo *var = &info->var;
 	int old_imgType;
-	int blank = 0;
 
+	mdss_fb_pan_idle(mfd);
 	old_imgType = mfd->fb_imgType;
 	switch (var->bits_per_pixel) {
 	case 16:
@@ -1070,22 +1373,20 @@
 		return -EINVAL;
 	}
 
-	if ((mfd->var_pixclock != var->pixclock) ||
-	    (mfd->hw_refresh && ((mfd->fb_imgType != old_imgType) ||
-				 (mfd->var_pixclock != var->pixclock) ||
-				 (mfd->var_xres != var->xres) ||
-				 (mfd->var_yres != var->yres)))) {
-		mfd->var_xres = var->xres;
-		mfd->var_yres = var->yres;
-		mfd->var_pixclock = var->pixclock;
-		blank = 1;
-	}
-	mfd->fbi->fix.line_length = mdss_fb_line_length(mfd->index, var->xres,
-						var->bits_per_pixel / 8);
 
-	if (blank) {
+	if (mfd->mdp.fb_stride)
+		mfd->fbi->fix.line_length = mfd->mdp.fb_stride(mfd->index,
+						var->xres,
+						var->bits_per_pixel / 8);
+	else
+		mfd->fbi->fix.line_length = var->xres * var->bits_per_pixel / 8;
+
+
+	if (mfd->panel_reconfig || (mfd->fb_imgType != old_imgType)) {
 		mdss_fb_blank_sub(FB_BLANK_POWERDOWN, info, mfd->op_enable);
+		mdss_fb_var_to_panelinfo(var, mfd->panel_info);
 		mdss_fb_blank_sub(FB_BLANK_UNBLANK, info, mfd->op_enable);
+		mfd->panel_reconfig = false;
 	}
 
 	return 0;
@@ -1097,14 +1398,14 @@
 	struct fb_cursor cursor;
 	int ret;
 
-	if (!mfd->cursor_update)
+	if (!mfd->mdp.cursor_update)
 		return -ENODEV;
 
 	ret = copy_from_user(&cursor, p, sizeof(cursor));
 	if (ret)
 		return ret;
 
-	return mfd->cursor_update(info, &cursor);
+	return mfd->mdp.cursor_update(mfd, &cursor);
 }
 
 static int mdss_fb_set_lut(struct fb_info *info, void __user *p)
@@ -1113,17 +1414,118 @@
 	struct fb_cmap cmap;
 	int ret;
 
-	if (!mfd->lut_update)
+	if (!mfd->mdp.lut_update)
 		return -ENODEV;
 
 	ret = copy_from_user(&cmap, p, sizeof(cmap));
 	if (ret)
 		return ret;
 
-	mfd->lut_update(info, &cmap);
+	mfd->mdp.lut_update(mfd, &cmap);
 	return 0;
 }
 
+static int mdss_fb_handle_buf_sync_ioctl(struct msm_fb_data_type *mfd,
+						struct mdp_buf_sync *buf_sync)
+{
+	int i, fence_cnt = 0, ret = 0;
+	int acq_fen_fd[MDP_MAX_FENCE_FD];
+	struct sync_fence *fence;
+
+	if ((buf_sync->acq_fen_fd_cnt > MDP_MAX_FENCE_FD) ||
+		(mfd->timeline == NULL))
+		return -EINVAL;
+
+	if ((!mfd->op_enable) || (!mfd->panel_power_on))
+		return -EPERM;
+
+	if (buf_sync->acq_fen_fd_cnt)
+		ret = copy_from_user(acq_fen_fd, buf_sync->acq_fen_fd,
+				buf_sync->acq_fen_fd_cnt * sizeof(int));
+	if (ret) {
+		pr_err("%s:copy_from_user failed", __func__);
+		return ret;
+	}
+	mutex_lock(&mfd->sync_mutex);
+	for (i = 0; i < buf_sync->acq_fen_fd_cnt; i++) {
+		fence = sync_fence_fdget(acq_fen_fd[i]);
+		if (fence == NULL) {
+			pr_info("%s: null fence! i=%d fd=%d\n", __func__, i,
+				acq_fen_fd[i]);
+			ret = -EINVAL;
+			break;
+		}
+		mfd->acq_fen[i] = fence;
+	}
+	fence_cnt = i;
+	if (ret)
+		goto buf_sync_err_1;
+	mfd->acq_fen_cnt = fence_cnt;
+	if (buf_sync->flags & MDP_BUF_SYNC_FLAG_WAIT)
+		mdss_fb_wait_for_fence(mfd);
+
+	mfd->cur_rel_sync_pt = sw_sync_pt_create(mfd->timeline,
+			mfd->timeline_value + 2);
+	if (mfd->cur_rel_sync_pt == NULL) {
+		pr_err("%s: cannot create sync point", __func__);
+		ret = -ENOMEM;
+		goto buf_sync_err_1;
+	}
+	/* create fence */
+	mfd->cur_rel_fence = sync_fence_create("mdp-fence",
+			mfd->cur_rel_sync_pt);
+	if (mfd->cur_rel_fence == NULL) {
+		sync_pt_free(mfd->cur_rel_sync_pt);
+		mfd->cur_rel_sync_pt = NULL;
+		pr_err("%s: cannot create fence", __func__);
+		ret = -ENOMEM;
+		goto buf_sync_err_1;
+	}
+	/* create fd */
+	mfd->cur_rel_fen_fd = get_unused_fd_flags(0);
+	if (mfd->cur_rel_fen_fd < 0) {
+		pr_err("%s: get_unused_fd_flags failed", __func__);
+		ret  = -EIO;
+		goto buf_sync_err_2;
+	}
+	sync_fence_install(mfd->cur_rel_fence, mfd->cur_rel_fen_fd);
+	ret = copy_to_user(buf_sync->rel_fen_fd,
+		&mfd->cur_rel_fen_fd, sizeof(int));
+	if (ret) {
+		pr_err("%s:copy_to_user failed", __func__);
+		goto buf_sync_err_3;
+	}
+	mutex_unlock(&mfd->sync_mutex);
+	return ret;
+buf_sync_err_3:
+	put_unused_fd(mfd->cur_rel_fen_fd);
+buf_sync_err_2:
+	sync_fence_put(mfd->cur_rel_fence);
+	mfd->cur_rel_fence = NULL;
+	mfd->cur_rel_fen_fd = 0;
+buf_sync_err_1:
+	for (i = 0; i < fence_cnt; i++)
+		sync_fence_put(mfd->acq_fen[i]);
+	mfd->acq_fen_cnt = 0;
+	mutex_unlock(&mfd->sync_mutex);
+	return ret;
+}
+static int mdss_fb_display_commit(struct fb_info *info,
+						unsigned long *argp)
+{
+	int ret;
+	struct mdp_display_commit disp_commit;
+	ret = copy_from_user(&disp_commit, argp,
+			sizeof(disp_commit));
+	if (ret) {
+		pr_err("%s:copy_from_user failed", __func__);
+		return ret;
+	}
+	ret = mdss_fb_pan_display_ex(info, &disp_commit);
+	return ret;
+}
+
+
 static int mdss_fb_ioctl(struct fb_info *info, unsigned int cmd,
 			 unsigned long arg)
 {
@@ -1131,6 +1533,11 @@
 	void __user *argp = (void __user *)arg;
 	struct mdp_page_protection fb_page_protection;
 	int ret = -ENOSYS;
+	struct mdp_buf_sync buf_sync;
+
+	mdss_fb_power_setting_idle(mfd);
+
+	mdss_fb_pan_idle(mfd);
 
 	switch (cmd) {
 	case MSMFB_CURSOR:
@@ -1150,9 +1557,28 @@
 			return ret;
 		break;
 
+	case MSMFB_BUFFER_SYNC:
+		ret = copy_from_user(&buf_sync, argp, sizeof(buf_sync));
+		if (ret)
+			return ret;
+
+		ret = mdss_fb_handle_buf_sync_ioctl(mfd, &buf_sync);
+
+		if (!ret)
+			ret = copy_to_user(argp, &buf_sync, sizeof(buf_sync));
+		break;
+
+	case MSMFB_NOTIFY_UPDATE:
+		ret = mdss_fb_notify_update(mfd, argp);
+		break;
+
+	case MSMFB_DISPLAY_COMMIT:
+		ret = mdss_fb_display_commit(info, argp);
+		break;
+
 	default:
-		if (mfd->ioctl_handler)
-			ret = mfd->ioctl_handler(mfd, cmd, argp);
+		if (mfd->mdp.ioctl_handler)
+			ret = mfd->mdp.ioctl_handler(mfd, cmd, argp);
 		break;
 	}
 
@@ -1176,52 +1602,101 @@
 }
 EXPORT_SYMBOL(msm_fb_get_writeback_fb);
 
-int mdss_register_panel(struct mdss_panel_data *pdata)
+static int mdss_fb_register_extra_panel(struct platform_device *pdev,
+	struct mdss_panel_data *pdata)
 {
-	struct platform_device *mdss_fb_dev = NULL;
-	struct msm_fb_data_type *mfd;
-	int rc;
+	struct mdss_panel_data *fb_pdata;
 
-	if (!mdss_res) {
-		pr_err("mdss mdp resources not initialized yet\n");
-		return -ENODEV;
-	}
-
-	mdss_fb_dev = platform_device_alloc("mdss_fb", pdata->panel_info.pdest);
-	if (!mdss_fb_dev) {
-		pr_err("unable to allocate mdss_fb device\n");
-		return -ENOMEM;
-	}
-
-	mdss_fb_dev->dev.platform_data = pdata;
-
-	rc = platform_device_add(mdss_fb_dev);
-	if (rc) {
-		platform_device_put(mdss_fb_dev);
-		pr_err("unable to probe mdss_fb device (%d)\n", rc);
-		return rc;
-	}
-
-	mfd = platform_get_drvdata(mdss_fb_dev);
-	if (!mfd)
-		return -ENODEV;
-	if (mfd->key != MFD_KEY)
+	fb_pdata = dev_get_platdata(&pdev->dev);
+	if (!fb_pdata) {
+		pr_err("framebuffer device %s contains invalid panel data\n",
+				dev_name(&pdev->dev));
 		return -EINVAL;
+	}
 
-	mfd->on_fnc = mdss_mdp_ctl_on;
-	mfd->off_fnc = mdss_mdp_ctl_off;
+	if (fb_pdata->next) {
+		pr_err("split panel already setup for framebuffer device %s\n",
+				dev_name(&pdev->dev));
+		return -EEXIST;
+	}
 
-	rc = mdss_mdp_overlay_init(mfd);
-	if (rc)
-		pr_err("unable to init overlay\n");
+	if ((fb_pdata->panel_info.type != MIPI_VIDEO_PANEL) ||
+			(pdata->panel_info.type != MIPI_VIDEO_PANEL)) {
+		pr_err("Split panel not supported for panel type %d\n",
+				pdata->panel_info.type);
+		return -EINVAL;
+	}
 
+	fb_pdata->next = pdata;
+
+	return 0;
+}
+
+int mdss_register_panel(struct platform_device *pdev,
+	struct mdss_panel_data *pdata)
+{
+	struct platform_device *fb_pdev, *mdss_pdev;
+	struct device_node *node;
+	int rc = 0;
+
+	if (!pdev || !pdev->dev.of_node) {
+		pr_err("Invalid device node\n");
+		return -ENODEV;
+	}
+
+	if (!mdp_instance) {
+		pr_err("mdss mdp resource not initialized yet\n");
+		return -ENODEV;
+	}
+
+	node = of_parse_phandle(pdev->dev.of_node, "qcom,mdss-fb-map", 0);
+	if (!node) {
+		pr_err("Unable to find fb node for device: %s\n",
+				pdev->name);
+		return -ENODEV;
+	}
+	mdss_pdev = of_find_device_by_node(node->parent);
+	if (!mdss_pdev) {
+		pr_err("Unable to find mdss for node: %s\n", node->full_name);
+		rc = -ENODEV;
+		goto mdss_notfound;
+	}
+
+	fb_pdev = of_find_device_by_node(node);
+	if (fb_pdev) {
+		rc = mdss_fb_register_extra_panel(fb_pdev, pdata);
+	} else {
+		pr_info("adding framebuffer device %s\n", dev_name(&pdev->dev));
+		fb_pdev = of_platform_device_create(node, NULL,
+				&mdss_pdev->dev);
+		fb_pdev->dev.platform_data = pdata;
+	}
+
+	if (mdp_instance->panel_register_done)
+		mdp_instance->panel_register_done(pdata);
+
+mdss_notfound:
+	of_node_put(node);
 	return rc;
 }
 EXPORT_SYMBOL(mdss_register_panel);
 
+int mdss_fb_register_mdp_instance(struct msm_mdp_interface *mdp)
+{
+	if (mdp_instance) {
+		pr_err("multiple MDP instance registration");
+		return -EINVAL;
+	}
+
+	mdp_instance = mdp;
+	return 0;
+}
+EXPORT_SYMBOL(mdss_fb_register_mdp_instance);
+
 int mdss_fb_get_phys_info(unsigned long *start, unsigned long *len, int fb_num)
 {
 	struct fb_info *info;
+	struct msm_fb_data_type *mfd;
 
 	if (fb_num > MAX_FBI_LIST)
 		return -EINVAL;
@@ -1230,8 +1705,16 @@
 	if (!info)
 		return -ENOENT;
 
-	*start = info->fix.smem_start;
+	mfd = (struct msm_fb_data_type *)info->par;
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->iova)
+		*start = mfd->iova;
+	else
+		*start = info->fix.smem_start;
 	*len = info->fix.smem_len;
+
 	return 0;
 }
 EXPORT_SYMBOL(mdss_fb_get_phys_info);
diff --git a/drivers/video/msm/mdss/mdss_fb.h b/drivers/video/msm/mdss/mdss_fb.h
index 3e3c04b..fdbbea9 100644
--- a/drivers/video/msm/mdss/mdss_fb.h
+++ b/drivers/video/msm/mdss/mdss_fb.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,7 +19,6 @@
 #include <linux/msm_mdp.h>
 #include <linux/types.h>
 
-#include "mdss_mdp.h"
 #include "mdss_panel.h"
 
 #define MSM_FB_DEFAULT_PAGE_SIZE 2
@@ -27,6 +26,11 @@
 #define MSM_FB_MAX_DEV_LIST 32
 
 #define MSM_FB_ENABLE_DBGFS
+#define WAIT_FENCE_FIRST_TIMEOUT MSEC_PER_SEC
+#define WAIT_FENCE_FINAL_TIMEOUT (10 * MSEC_PER_SEC)
+/* Display op timeout should be greater than total timeout */
+#define WAIT_DISP_OP_TIMEOUT ((WAIT_FENCE_FIRST_TIMEOUT + \
+		WAIT_FENCE_FINAL_TIMEOUT) * MDP_MAX_FENCE_FD)
 
 #ifndef MAX
 #define  MAX(x, y) (((x) > (y)) ? (x) : (y))
@@ -41,6 +45,33 @@
 	int panel_power_on;
 };
 
+struct disp_info_notify {
+	int type;
+	struct timer_list timer;
+	struct completion comp;
+	struct mutex lock;
+};
+
+struct msm_fb_data_type;
+
+struct msm_mdp_interface {
+	int (*fb_mem_alloc_fnc)(struct msm_fb_data_type *mfd);
+	int (*init_fnc)(struct msm_fb_data_type *mfd);
+	int (*on_fnc)(struct msm_fb_data_type *mfd);
+	int (*off_fnc)(struct msm_fb_data_type *mfd);
+	int (*kickoff_fnc)(struct msm_fb_data_type *mfd);
+	int (*ioctl_handler)(struct msm_fb_data_type *mfd, u32 cmd, void *arg);
+	void (*dma_fnc)(struct msm_fb_data_type *mfd);
+	int (*cursor_update)(struct msm_fb_data_type *mfd,
+				struct fb_cursor *cursor);
+	int (*lut_update)(struct msm_fb_data_type *mfd, struct fb_cmap *cmap);
+	int (*do_histogram)(struct msm_fb_data_type *mfd,
+				struct mdp_histogram *hist);
+	int (*panel_register_done)(struct mdss_panel_data *pdata);
+	u32 (*fb_stride)(u32 fb_index, u32 xres, int bpp);
+	void *private1;
+};
+
 struct msm_fb_data_type {
 	u32 key;
 	u32 index;
@@ -48,57 +79,69 @@
 	u32 fb_page;
 
 	struct panel_id panel;
-	struct mdss_panel_info panel_info;
+	struct mdss_panel_info *panel_info;
+	int split_display;
 
 	u32 dest;
 	struct fb_info *fbi;
 
 	int op_enable;
 	u32 fb_imgType;
+	int panel_reconfig;
+
 	u32 dst_format;
-
-	int hw_refresh;
-
-	int overlay_play_enable;
-
 	int panel_power_on;
 	struct disp_info_type_suspend suspend;
 
-	int (*on_fnc) (struct msm_fb_data_type *mfd);
-	int (*off_fnc) (struct msm_fb_data_type *mfd);
-	int (*kickoff_fnc) (struct mdss_mdp_ctl *ctl);
-	int (*ioctl_handler) (struct msm_fb_data_type *mfd, u32 cmd, void *arg);
-	void (*dma_fnc) (struct msm_fb_data_type *mfd);
-	int (*cursor_update) (struct fb_info *info,
-			      struct fb_cursor *cursor);
-	int (*lut_update) (struct fb_info *info,
-			   struct fb_cmap *cmap);
-	int (*do_histogram) (struct fb_info *info,
-			     struct mdp_histogram *hist);
-
 	struct ion_handle *ihdl;
+	unsigned long iova;
 	void *cursor_buf;
-	void *cursor_buf_phys;
+	unsigned long cursor_buf_phys;
+	unsigned long cursor_buf_iova;
 
 	u32 bl_level;
+	u32 bl_scale;
+	u32 bl_min_lvl;
 	struct mutex lock;
 
 	struct platform_device *pdev;
 
-	u32 var_xres;
-	u32 var_yres;
-	u32 var_pixclock;
-
 	u32 mdp_fb_page_protection;
-	struct ion_client *iclient;
 
-	struct mdss_mdp_ctl *ctl;
-	struct mdss_mdp_wb *wb;
+	struct disp_info_notify update;
+	struct disp_info_notify no_update;
+
+	struct msm_mdp_interface mdp;
+
+	u32 acq_fen_cnt;
+	struct sync_fence *acq_fen[MDP_MAX_FENCE_FD];
+	int cur_rel_fen_fd;
+	struct sync_pt *cur_rel_sync_pt;
+	struct sync_fence *cur_rel_fence;
+	struct sync_fence *last_rel_fence;
+	struct sw_sync_timeline *timeline;
+	int timeline_value;
+	u32 last_acq_fen_cnt;
+	struct sync_fence *last_acq_fen[MDP_MAX_FENCE_FD];
+	struct mutex sync_mutex;
+	/* for non-blocking */
+	struct completion commit_comp;
+	u32 is_committing;
+	struct work_struct commit_work;
+	void *msm_fb_backup;
+	struct completion power_set_comp;
+	u32 is_power_setting;
+};
+
+struct msm_fb_backup_type {
+	struct fb_info info;
+	struct mdp_display_commit disp_commit;
 };
 
 int mdss_fb_get_phys_info(unsigned long *start, unsigned long *len, int fb_num);
 void mdss_fb_set_backlight(struct msm_fb_data_type *mfd, u32 bkl_lvl);
 void mdss_fb_update_backlight(struct msm_fb_data_type *mfd);
-int mdss_fb_suspend_all(void);
-int mdss_fb_resume_all(void);
+void mdss_fb_wait_for_fence(struct msm_fb_data_type *mfd);
+void mdss_fb_signal_timeline(struct msm_fb_data_type *mfd);
+int mdss_fb_register_mdp_instance(struct msm_mdp_interface *mdp);
 #endif /* MDSS_FB_H */
diff --git a/drivers/video/msm/mdss/mdss_hdmi_cec.c b/drivers/video/msm/mdss/mdss_hdmi_cec.c
new file mode 100644
index 0000000..694fcde
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_hdmi_cec.c
@@ -0,0 +1,949 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <mach/board.h>
+
+#include "mdss_hdmi_cec.h"
+
+#define CEC_STATUS_WR_ERROR	BIT(0)
+#define CEC_STATUS_WR_DONE	BIT(1)
+
+/* Reference: HDMI 1.4a Specification section 7.1 */
+#define RETRANSMIT_MAX_NUM	5
+
+/*
+ * Ref. HDMI 1.4a: Supplement-1 CEC Section 6, 7
+ */
+struct hdmi_cec_msg {
+	u8 sender_id;
+	u8 recvr_id;
+	u8 opcode;
+	u8 operand[15];
+	u8 frame_size;
+	u8 retransmit;
+};
+
+struct hdmi_cec_msg_node {
+	struct hdmi_cec_msg msg;
+	struct list_head list;
+};
+
+struct hdmi_cec_ctrl {
+	bool cec_enabled;
+	bool compliance_response_enabled;
+	bool cec_engine_configed;
+
+	u8 cec_logical_addr;
+	u32 cec_msg_wr_status;
+
+	spinlock_t lock;
+	struct list_head msg_head;
+	struct work_struct cec_read_work;
+	struct completion cec_msg_wr_done;
+	struct hdmi_cec_init_data init_data;
+};
+
+static int hdmi_cec_msg_send(struct hdmi_cec_ctrl *cec_ctrl,
+	struct hdmi_cec_msg *msg);
+
+static void hdmi_cec_dump_msg(struct hdmi_cec_ctrl *cec_ctrl,
+	struct hdmi_cec_msg *msg)
+{
+	int i;
+	unsigned long flags;
+
+	if (!cec_ctrl || !msg) {
+		DEV_ERR("%pS->%s: invalid input\n",
+			__builtin_return_address(0), __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&cec_ctrl->lock, flags);
+	DEV_DBG("=================%pS dump start =====================\n",
+		__builtin_return_address(0));
+
+	DEV_DBG("sender_id     : %d", msg->sender_id);
+	DEV_DBG("recvr_id      : %d", msg->recvr_id);
+
+	if (msg->frame_size < 2) {
+		DEV_DBG("polling message");
+		spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+		return;
+	}
+
+	DEV_DBG("opcode        : %02x", msg->opcode);
+	for (i = 0; i < msg->frame_size - 2; i++)
+		DEV_DBG("operand(%2d) : %02x", i + 1, msg->operand[i]);
+
+	DEV_DBG("=================%pS dump end =====================\n",
+		__builtin_return_address(0));
+	spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+} /* hdmi_cec_dump_msg */
+
+static inline void hdmi_cec_write_logical_addr(struct hdmi_cec_ctrl *cec_ctrl,
+	u8 addr)
+{
+	if (!cec_ctrl || !cec_ctrl->init_data.io) {
+		DEV_ERR("%s: Invalid input\n", __func__);
+		return;
+	}
+
+	DSS_REG_W(cec_ctrl->init_data.io, HDMI_CEC_ADDR, addr & 0xF);
+} /* hdmi_cec_write_logical_addr */
+
+static void hdmi_cec_disable(struct hdmi_cec_ctrl *cec_ctrl)
+{
+	u32 reg_val;
+	unsigned long flags;
+	struct dss_io_data *io = NULL;
+	struct hdmi_cec_msg_node *msg_node, *tmp;
+
+	if (!cec_ctrl || !cec_ctrl->init_data.io) {
+		DEV_ERR("%s: Invalid input\n", __func__);
+		return;
+	}
+
+	io = cec_ctrl->init_data.io;
+
+	/* Disable Engine */
+	DSS_REG_W(io, HDMI_CEC_CTRL, 0);
+
+	/* Disable CEC interrupts */
+	reg_val = DSS_REG_R(io, HDMI_CEC_INT);
+	DSS_REG_W(io, HDMI_CEC_INT, reg_val & !BIT(1) & !BIT(3) & !BIT(7));
+
+	spin_lock_irqsave(&cec_ctrl->lock, flags);
+	list_for_each_entry_safe(msg_node, tmp, &cec_ctrl->msg_head, list) {
+		list_del(&msg_node->list);
+		kfree(msg_node);
+	}
+	spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+} /* hdmi_cec_disable */
+
+static void hdmi_cec_enable(struct hdmi_cec_ctrl *cec_ctrl)
+{
+	struct dss_io_data *io = NULL;
+
+	if (!cec_ctrl || !cec_ctrl->init_data.io) {
+		DEV_ERR("%s: Invalid input\n", __func__);
+		return;
+	}
+
+	io = cec_ctrl->init_data.io;
+
+	INIT_LIST_HEAD(&cec_ctrl->msg_head);
+
+	/* Enable CEC interrupts */
+	DSS_REG_W(io, HDMI_CEC_INT, BIT(1) | BIT(3) | BIT(7));
+
+	/* Enable Engine */
+	DSS_REG_W(io, HDMI_CEC_CTRL, BIT(0));
+} /* hdmi_cec_enable */
+
+static int hdmi_cec_send_abort_opcode(struct hdmi_cec_ctrl *cec_ctrl,
+	struct hdmi_cec_msg *in_msg, u8 reason_operand)
+{
+	int i = 0;
+	struct hdmi_cec_msg out_msg;
+
+	if (!cec_ctrl || !in_msg) {
+		DEV_ERR("%s: Invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	out_msg.sender_id = 0x4;
+	out_msg.recvr_id = in_msg->sender_id;
+	out_msg.opcode = 0x0; /* opcode for feature abort */
+	out_msg.operand[i++] = in_msg->opcode;
+	out_msg.operand[i++] = reason_operand;
+	out_msg.frame_size = i + 2;
+
+	return hdmi_cec_msg_send(cec_ctrl, &out_msg);
+} /* hdmi_cec_send_abort_opcode */
+
+static int hdmi_cec_msg_parser(struct hdmi_cec_ctrl *cec_ctrl,
+	struct hdmi_cec_msg *in_msg)
+{
+	int rc = 0, i = 0;
+	struct hdmi_cec_msg out_msg;
+
+	if (!cec_ctrl || !in_msg) {
+		DEV_ERR("%s: Invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	DEV_DBG("%s: in_msg->opcode = 0x%x\n", __func__, in_msg->opcode);
+	switch (in_msg->opcode) {
+	case 0x64:
+		/* Set OSD String */
+		DEV_INFO("%s: Recvd OSD Str=[0x%x]\n", __func__,
+			in_msg->operand[3]);
+		break;
+	case 0x83:
+		/* Give Phy Addr */
+		DEV_INFO("%s: Recvd a Give Phy Addr cmd\n", __func__);
+
+		out_msg.sender_id = 0x4;
+		out_msg.recvr_id = 0xF; /* Broadcast */
+		out_msg.opcode = 0x84;
+		out_msg.operand[i++] = 0x10;
+		out_msg.operand[i++] = 0x0;
+		out_msg.operand[i++] = 0x04;
+		out_msg.frame_size = i + 2;
+
+		rc = hdmi_cec_msg_send(cec_ctrl, &out_msg);
+		break;
+	case 0xFF:
+		/* Abort */
+		DEV_INFO("%s: Recvd an abort cmd.\n", __func__);
+
+		/* reason = "Refused" */
+		rc = hdmi_cec_send_abort_opcode(cec_ctrl, in_msg, 0x04);
+		break;
+	case 0x46:
+		/* Give OSD name */
+		DEV_INFO("%s: Recvd 'Give OSD name' cmd.\n", __func__);
+
+		out_msg.sender_id = 0x4;
+		out_msg.recvr_id = in_msg->sender_id;
+		out_msg.opcode = 0x47; /* OSD Name */
+		/* Display control byte */
+		out_msg.operand[i++] = 0x0;
+		out_msg.operand[i++] = 'H';
+		out_msg.operand[i++] = 'e';
+		out_msg.operand[i++] = 'l';
+		out_msg.operand[i++] = 'l';
+		out_msg.operand[i++] = 'o';
+		out_msg.operand[i++] = ' ';
+		out_msg.operand[i++] = 'W';
+		out_msg.operand[i++] = 'o';
+		out_msg.operand[i++] = 'r';
+		out_msg.operand[i++] = 'l';
+		out_msg.operand[i++] = 'd';
+		out_msg.frame_size = i + 2;
+
+		rc = hdmi_cec_msg_send(cec_ctrl, &out_msg);
+		break;
+	case 0x8F:
+		/* Give Device Power status */
+		DEV_INFO("%s: Recvd a Power status message\n", __func__);
+
+		out_msg.sender_id = 0x4;
+		out_msg.recvr_id = in_msg->sender_id;
+		out_msg.opcode = 0x90; /* OSD String */
+		out_msg.operand[i++] = 'H';
+		out_msg.operand[i++] = 'e';
+		out_msg.operand[i++] = 'l';
+		out_msg.operand[i++] = 'l';
+		out_msg.operand[i++] = 'o';
+		out_msg.operand[i++] = ' ';
+		out_msg.operand[i++] = 'W';
+		out_msg.operand[i++] = 'o';
+		out_msg.operand[i++] = 'r';
+		out_msg.operand[i++] = 'l';
+		out_msg.operand[i++] = 'd';
+		out_msg.frame_size = i + 2;
+
+		rc = hdmi_cec_msg_send(cec_ctrl, &out_msg);
+		break;
+	case 0x80:
+		/* Routing Change cmd */
+	case 0x86:
+		/* Set Stream Path */
+		DEV_INFO("%s: Recvd Set Stream or Routing Change cmd\n",
+			__func__);
+
+		out_msg.sender_id = 0x4;
+		out_msg.recvr_id = 0xF; /* broadcast this message */
+		out_msg.opcode = 0x82; /* Active Source */
+		out_msg.operand[i++] = 0x10;
+		out_msg.operand[i++] = 0x0;
+		out_msg.frame_size = i + 2;
+
+		rc = hdmi_cec_msg_send(cec_ctrl, &out_msg);
+
+		/* todo: check if need to wait for msg response from sink */
+
+		/* sending <Image View On> message */
+		memset(&out_msg, 0x0, sizeof(struct hdmi_cec_msg));
+		i = 0;
+		out_msg.sender_id = 0x4;
+		out_msg.recvr_id = in_msg->sender_id;
+		out_msg.opcode = 0x04; /* opcode for Image View On */
+		out_msg.frame_size = i + 2;
+
+		rc = hdmi_cec_msg_send(cec_ctrl, &out_msg);
+		break;
+	case 0x44:
+		/* User Control Pressed */
+		DEV_INFO("%s: User Control Pressed\n", __func__);
+		break;
+	case 0x45:
+		/* User Control Released */
+		DEV_INFO("%s: User Control Released\n", __func__);
+		break;
+	default:
+		DEV_INFO("%s: Recvd an unknown cmd = [%u]\n", __func__,
+			in_msg->opcode);
+
+		/* reason = "Unrecognized opcode" */
+		rc = hdmi_cec_send_abort_opcode(cec_ctrl, in_msg, 0x0);
+		break;
+	}
+
+	return rc;
+} /* hdmi_cec_msg_parser */
+
+static int hdmi_cec_msg_send(struct hdmi_cec_ctrl *cec_ctrl,
+	struct hdmi_cec_msg *msg)
+{
+	int i, line_check_retry = 10;
+	u32 frame_retransmit = RETRANSMIT_MAX_NUM;
+	bool frame_type;
+	unsigned long flags;
+	struct dss_io_data *io = NULL;
+
+	if (!cec_ctrl || !cec_ctrl->init_data.io || !msg) {
+		DEV_ERR("%s: Invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	io = cec_ctrl->init_data.io;
+
+	INIT_COMPLETION(cec_ctrl->cec_msg_wr_done);
+	cec_ctrl->cec_msg_wr_status = 0;
+	frame_type = (msg->recvr_id == 15 ? BIT(0) : 0);
+	if (msg->retransmit > 0 && msg->retransmit < RETRANSMIT_MAX_NUM)
+		frame_retransmit = msg->retransmit;
+
+	/* toggle cec in order to flush out bad hw state, if any */
+	DSS_REG_W(io, HDMI_CEC_CTRL, 0);
+	DSS_REG_W(io, HDMI_CEC_CTRL, BIT(0));
+
+	frame_retransmit = (frame_retransmit & 0xF) << 4;
+	DSS_REG_W(io, HDMI_CEC_RETRANSMIT, BIT(0) | frame_retransmit);
+
+	/* header block */
+	DSS_REG_W_ND(io, HDMI_CEC_WR_DATA,
+		(((msg->sender_id << 4) | msg->recvr_id) << 8) | frame_type);
+
+	/* data block 0 : opcode */
+	DSS_REG_W_ND(io, HDMI_CEC_WR_DATA,
+		((msg->frame_size < 2 ? 0 : msg->opcode) << 8) | frame_type);
+
+	/* data block 1-14 : operand 0-13 */
+	for (i = 0; i < msg->frame_size - 2; i++)
+		DSS_REG_W_ND(io, HDMI_CEC_WR_DATA,
+			(msg->operand[i] << 8) | frame_type);
+
+	while ((DSS_REG_R(io, HDMI_CEC_STATUS) & BIT(0)) &&
+		line_check_retry--) {
+		DEV_DBG("%s: CEC line is busy(%d)\n", __func__,
+			line_check_retry);
+		schedule();
+	}
+
+	if (!line_check_retry && (DSS_REG_R(io, HDMI_CEC_STATUS) & BIT(0))) {
+		DEV_ERR("%s: CEC line is busy. Retry\n", __func__);
+		return -EAGAIN;
+	}
+
+	/* start transmission */
+	DSS_REG_W(io, HDMI_CEC_CTRL, BIT(0) | BIT(1) |
+		((msg->frame_size & 0x1F) << 4) | BIT(9));
+
+	if (!wait_for_completion_interruptible_timeout(
+		&cec_ctrl->cec_msg_wr_done, HZ)) {
+		DEV_ERR("%s: timedout", __func__);
+		hdmi_cec_dump_msg(cec_ctrl, msg);
+		return -ETIMEDOUT;
+	}
+
+	spin_lock_irqsave(&cec_ctrl->lock, flags);
+	if (cec_ctrl->cec_msg_wr_status == CEC_STATUS_WR_ERROR)
+		DEV_ERR("%s: msg write failed.\n", __func__);
+	else
+		DEV_DBG("%s: CEC write frame done (frame len=%d)", __func__,
+			msg->frame_size);
+	spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+	hdmi_cec_dump_msg(cec_ctrl, msg);
+
+	return 0;
+} /* hdmi_cec_msg_send */
+
+static void hdmi_cec_msg_recv(struct work_struct *work)
+{
+	int i;
+	u32 data;
+	unsigned long flags;
+	struct hdmi_cec_ctrl *cec_ctrl = NULL;
+	struct dss_io_data *io = NULL;
+	struct hdmi_cec_msg_node *msg_node = NULL;
+
+	cec_ctrl = container_of(work, struct hdmi_cec_ctrl, cec_read_work);
+	if (!cec_ctrl || !cec_ctrl->cec_enabled || !cec_ctrl->init_data.io) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	io = cec_ctrl->init_data.io;
+
+	msg_node = kzalloc(sizeof(*msg_node), GFP_KERNEL);
+	if (!msg_node) {
+		DEV_ERR("%s: FAILED: out of memory\n", __func__);
+		return;
+	}
+
+	data = DSS_REG_R(io, HDMI_CEC_RD_DATA);
+
+	msg_node->msg.recvr_id   = (data & 0x000F);
+	msg_node->msg.sender_id  = (data & 0x00F0) >> 4;
+	msg_node->msg.frame_size = (data & 0x1F00) >> 8;
+	DEV_DBG("%s: Recvd init=[%u] dest=[%u] size=[%u]\n", __func__,
+		msg_node->msg.sender_id, msg_node->msg.recvr_id,
+		msg_node->msg.frame_size);
+
+	if (msg_node->msg.frame_size < 1) {
+		DEV_ERR("%s: invalid message (frame length = %d)",
+			__func__, msg_node->msg.frame_size);
+		kfree(msg_node);
+		return;
+	} else if (msg_node->msg.frame_size == 1) {
+		DEV_DBG("%s: polling message (dest[%x] <- init[%x])", __func__,
+			msg_node->msg.recvr_id, msg_node->msg.sender_id);
+		kfree(msg_node);
+		return;
+	}
+
+	/* data block 0 : opcode */
+	data = DSS_REG_R_ND(io, HDMI_CEC_RD_DATA);
+	msg_node->msg.opcode = data & 0xFF;
+
+	/* data block 1-14 : operand 0-13 */
+	for (i = 0; i < msg_node->msg.frame_size - 2; i++) {
+		data = DSS_REG_R_ND(io, HDMI_CEC_RD_DATA);
+		msg_node->msg.operand[i] = data & 0xFF;
+	}
+
+	for (; i < 14; i++)
+		msg_node->msg.operand[i] = 0;
+
+	DEV_DBG("%s: CEC read frame done\n", __func__);
+	hdmi_cec_dump_msg(cec_ctrl, &msg_node->msg);
+
+	spin_lock_irqsave(&cec_ctrl->lock, flags);
+	if (cec_ctrl->compliance_response_enabled) {
+		spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+		if (hdmi_cec_msg_parser(cec_ctrl, &msg_node->msg) != 0) {
+			DEV_ERR("%s: cec_msg_parser fail. Sending abort msg\n",
+				__func__);
+			/* reason = "Unrecognized opcode" */
+			hdmi_cec_send_abort_opcode(cec_ctrl,
+				&msg_node->msg, 0x0);
+		}
+		kfree(msg_node);
+	} else {
+		list_add_tail(&msg_node->list, &cec_ctrl->msg_head);
+		spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+		/* wake-up sysfs read_msg context */
+		sysfs_notify(cec_ctrl->init_data.sysfs_kobj, "cec", "rd_msg");
+	}
+} /* hdmi_cec_msg_recv*/
+
+static ssize_t hdmi_rda_cec_enable(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	unsigned long flags;
+	struct hdmi_cec_ctrl *cec_ctrl =
+		hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_CEC);
+
+	if (!cec_ctrl || !cec_ctrl->init_data.io) {
+		DEV_ERR("%s: Invalid input\n", __func__);
+		return -EPERM;
+	}
+
+	spin_lock_irqsave(&cec_ctrl->lock, flags);
+	if (cec_ctrl->cec_enabled && cec_ctrl->cec_engine_configed) {
+		DEV_DBG("%s: cec is enabled\n", __func__);
+		ret = snprintf(buf, PAGE_SIZE, "%d\n", 1);
+	} else if (cec_ctrl->cec_enabled && !cec_ctrl->cec_engine_configed) {
+		DEV_ERR("%s: CEC will be enabled when HDMI mirroring is on\n",
+			__func__);
+		ret = -EPERM;
+	} else {
+		DEV_DBG("%s: cec is disabled\n", __func__);
+		ret = snprintf(buf, PAGE_SIZE, "%d\n", 0);
+	}
+	spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+	return ret;
+} /* hdmi_rda_cec_enable */
+
+static ssize_t hdmi_wta_cec_enable(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int val;
+	bool cec_en;
+	unsigned long flags;
+	ssize_t ret = strnlen(buf, PAGE_SIZE);
+	struct hdmi_cec_ctrl *cec_ctrl =
+		hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_CEC);
+
+	if (!cec_ctrl) {
+		DEV_ERR("%s: Invalid input\n", __func__);
+		return -EPERM;
+	}
+
+	if (kstrtoint(buf, 10, &val)) {
+		DEV_ERR("%s: kstrtoint failed.\n", __func__);
+		return -EPERM;
+	}
+	cec_en = (val == 1) ? true : false;
+
+	spin_lock_irqsave(&cec_ctrl->lock, flags);
+	if (cec_ctrl->cec_enabled == cec_en) {
+		spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+		DEV_INFO("%s: cec is already %s\n", __func__,
+			cec_en ? "enabled" : "disabled");
+		return ret;
+	}
+	spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+	if (!cec_en) {
+		spin_lock_irqsave(&cec_ctrl->lock, flags);
+		if (!cec_ctrl->cec_engine_configed) {
+			DEV_DBG("%s: hdmi is already off. disable cec\n",
+				__func__);
+			cec_ctrl->cec_enabled = false;
+			spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+			return ret;
+		}
+		cec_ctrl->cec_enabled = false;
+		spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+		hdmi_cec_disable(cec_ctrl);
+		return ret;
+	} else {
+		spin_lock_irqsave(&cec_ctrl->lock, flags);
+		if (!cec_ctrl->cec_engine_configed) {
+			DEV_DBG("%s: CEC will be enabled on mirroring\n",
+				__func__);
+			cec_ctrl->cec_enabled = true;
+			spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+			return ret;
+		}
+		cec_ctrl->cec_enabled = true;
+		spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+		hdmi_cec_enable(cec_ctrl);
+
+		return ret;
+	}
+} /* hdmi_wta_cec_enable */
+
+static ssize_t hdmi_rda_cec_enable_compliance(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	unsigned long flags;
+	ssize_t ret;
+	struct hdmi_cec_ctrl *cec_ctrl =
+		hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_CEC);
+
+	if (!cec_ctrl) {
+		DEV_ERR("%s: Invalid cec_ctrl\n", __func__);
+		return -EPERM;
+	}
+
+	spin_lock_irqsave(&cec_ctrl->lock, flags);
+	ret = snprintf(buf, PAGE_SIZE, "%d\n",
+		cec_ctrl->compliance_response_enabled);
+
+	cec_ctrl->cec_logical_addr = 0x4;
+	hdmi_cec_write_logical_addr(cec_ctrl, cec_ctrl->cec_logical_addr);
+
+	spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+	return ret;
+} /* hdmi_rda_cec_enable_compliance */
+
+static ssize_t hdmi_wta_cec_enable_compliance(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int val;
+	unsigned long flags;
+	ssize_t ret = strnlen(buf, PAGE_SIZE);
+	struct hdmi_cec_ctrl *cec_ctrl =
+		hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_CEC);
+
+	if (!cec_ctrl) {
+		DEV_ERR("%s: Invalid cec_ctrl\n", __func__);
+		return -EPERM;
+	}
+
+	spin_lock_irqsave(&cec_ctrl->lock, flags);
+	if (cec_ctrl->cec_enabled && cec_ctrl->cec_engine_configed) {
+		spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+		DEV_ERR("%s: Cannot en/dis compliance when CEC session is on\n",
+			__func__);
+		return -EPERM;
+	} else {
+		if (kstrtoint(buf, 10, &val)) {
+			DEV_ERR("%s: kstrtoint failed.\n", __func__);
+			return -EPERM;
+		}
+		cec_ctrl->compliance_response_enabled =
+			(val == 1) ? true : false;
+	}
+	spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+	return ret;
+} /* hdmi_wta_cec_enable_compliance */
+
+static ssize_t hdmi_rda_cec_logical_addr(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	unsigned long flags;
+	ssize_t ret = strnlen(buf, PAGE_SIZE);
+	struct hdmi_cec_ctrl *cec_ctrl =
+		hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_CEC);
+
+	if (!cec_ctrl) {
+		DEV_ERR("%s: Invalid cec_ctrl\n", __func__);
+		return -EPERM;
+	}
+
+	spin_lock_irqsave(&cec_ctrl->lock, flags);
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", cec_ctrl->cec_logical_addr);
+	spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+	return ret;
+} /* hdmi_rda_cec_logical_addr */
+
+static ssize_t hdmi_wta_cec_logical_addr(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int logical_addr;
+	unsigned long flags;
+	ssize_t ret = strnlen(buf, PAGE_SIZE);
+	struct hdmi_cec_ctrl *cec_ctrl =
+	 hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_CEC);
+
+	if (!cec_ctrl) {
+		DEV_ERR("%s: Invalid cec_ctrl\n", __func__);
+		return -EPERM;
+	}
+
+	if (kstrtoint(buf, 10, &logical_addr)) {
+		DEV_ERR("%s: kstrtoint failed\n", __func__);
+		return -EPERM;
+	}
+
+	if (logical_addr < 0 || logical_addr > 15) {
+		DEV_ERR("%s: Invalid logical address\n", __func__);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&cec_ctrl->lock, flags);
+	cec_ctrl->cec_logical_addr = (u8)logical_addr;
+	if (cec_ctrl->cec_enabled && cec_ctrl->cec_engine_configed)
+		hdmi_cec_write_logical_addr(cec_ctrl,
+			cec_ctrl->cec_logical_addr);
+	spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+	return ret;
+} /* hdmi_wta_cec_logical_addr */
+
+static ssize_t hdmi_rda_cec_msg(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	int i = 0;
+	unsigned long flags;
+	struct hdmi_cec_msg_node *msg_node, *tmp;
+	struct hdmi_cec_ctrl *cec_ctrl =
+		hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_CEC);
+
+	if (!cec_ctrl) {
+		DEV_ERR("%s: Invalid cec_ctrl\n", __func__);
+		return -EPERM;
+	}
+
+	spin_lock_irqsave(&cec_ctrl->lock, flags);
+
+	if (cec_ctrl->compliance_response_enabled) {
+		spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+		DEV_ERR("%s: Read is disabled coz compliance response is on\n",
+			__func__);
+		return -EPERM;
+	}
+
+	if (list_empty_careful(&cec_ctrl->msg_head)) {
+		spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+		DEV_ERR("%s: CEC message queue is empty\n", __func__);
+		return -EPERM;
+	}
+
+	list_for_each_entry_safe(msg_node, tmp, &cec_ctrl->msg_head, list) {
+		if ((i+1) * sizeof(struct hdmi_cec_msg) > PAGE_SIZE) {
+			DEV_DBG("%s: Overflowing PAGE_SIZE.\n", __func__);
+			break;
+		}
+
+		memcpy(buf + (i * sizeof(struct hdmi_cec_msg)), &msg_node->msg,
+			sizeof(struct hdmi_cec_msg));
+		list_del(&msg_node->list);
+		kfree(msg_node);
+		i++;
+	}
+
+	spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+	return i * sizeof(struct hdmi_cec_msg);
+} /* hdmi_rda_cec_msg */
+
+static ssize_t hdmi_wta_cec_msg(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int rc;
+	unsigned long flags;
+	struct hdmi_cec_msg *msg = (struct hdmi_cec_msg *)buf;
+	struct hdmi_cec_ctrl *cec_ctrl =
+		hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_CEC);
+
+	if (!cec_ctrl) {
+		DEV_ERR("%s: Invalid cec_ctrl\n", __func__);
+		return -EPERM;
+	}
+
+	spin_lock_irqsave(&cec_ctrl->lock, flags);
+	if (cec_ctrl->compliance_response_enabled) {
+		spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+		DEV_ERR("%s: Write disabled coz compliance response is on.\n",
+			__func__);
+		return -EPERM;
+	}
+	spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+	rc = hdmi_cec_msg_send(cec_ctrl, msg);
+	if (rc) {
+		DEV_ERR("%s: hdmi_cec_msg_send failed\n", __func__);
+		return rc;
+	} else {
+		return sizeof(struct hdmi_cec_msg);
+	}
+} /* hdmi_wta_cec_msg */
+
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, hdmi_rda_cec_enable,
+	hdmi_wta_cec_enable);
+static DEVICE_ATTR(enable_compliance, S_IRUGO | S_IWUSR,
+	hdmi_rda_cec_enable_compliance, hdmi_wta_cec_enable_compliance);
+static DEVICE_ATTR(logical_addr, S_IRUGO | S_IWUSR,
+	hdmi_rda_cec_logical_addr, hdmi_wta_cec_logical_addr);
+static DEVICE_ATTR(rd_msg, S_IRUGO, hdmi_rda_cec_msg,	NULL);
+static DEVICE_ATTR(wr_msg, S_IWUSR, NULL, hdmi_wta_cec_msg);
+
+static struct attribute *hdmi_cec_fs_attrs[] = {
+	&dev_attr_enable.attr,
+	&dev_attr_enable_compliance.attr,
+	&dev_attr_logical_addr.attr,
+	&dev_attr_rd_msg.attr,
+	&dev_attr_wr_msg.attr,
+	NULL,
+};
+
+static struct attribute_group hdmi_cec_fs_attr_group = {
+	.name = "cec",
+	.attrs = hdmi_cec_fs_attrs,
+};
+
+int hdmi_cec_isr(void *input)
+{
+	int rc = 0;
+	u32 cec_intr, cec_status;
+	unsigned long flags;
+	struct dss_io_data *io = NULL;
+	struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)input;
+
+	if (!cec_ctrl || !cec_ctrl->init_data.io) {
+		DEV_ERR("%s: Invalid input\n", __func__);
+		return -EPERM;
+	}
+
+	io = cec_ctrl->init_data.io;
+
+	if (!cec_ctrl->cec_enabled)
+		return 0;
+
+	cec_intr = DSS_REG_R_ND(io, HDMI_CEC_INT);
+	DEV_DBG("%s: cec interrupt status is [0x%x]\n", __func__, cec_intr);
+
+	cec_status = DSS_REG_R_ND(io, HDMI_CEC_STATUS);
+	DEV_DBG("%s: cec status is [0x%x]\n", __func__, cec_status);
+
+	if ((cec_intr & BIT(0)) && (cec_intr & BIT(1))) {
+		DEV_DBG("%s: CEC_IRQ_FRAME_WR_DONE\n", __func__);
+		DSS_REG_W(io, HDMI_CEC_INT, cec_intr | BIT(0));
+
+		spin_lock_irqsave(&cec_ctrl->lock, flags);
+		cec_ctrl->cec_msg_wr_status |= CEC_STATUS_WR_DONE;
+		spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+		if (!completion_done(&cec_ctrl->cec_msg_wr_done))
+			complete_all(&cec_ctrl->cec_msg_wr_done);
+	}
+
+	if ((cec_intr & BIT(2)) && (cec_intr & BIT(3))) {
+		DEV_DBG("%s: CEC_IRQ_FRAME_ERROR\n", __func__);
+		DSS_REG_W(io, HDMI_CEC_INT, cec_intr | BIT(2));
+
+		spin_lock_irqsave(&cec_ctrl->lock, flags);
+		cec_ctrl->cec_msg_wr_status |= CEC_STATUS_WR_ERROR;
+		spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+		if (!completion_done(&cec_ctrl->cec_msg_wr_done))
+			complete_all(&cec_ctrl->cec_msg_wr_done);
+	}
+
+	if ((cec_intr & BIT(6)) && (cec_intr & BIT(7))) {
+		DEV_DBG("%s: CEC_IRQ_FRAME_RD_DONE\n", __func__);
+
+		DSS_REG_W(io, HDMI_CEC_INT, cec_intr | BIT(6));
+		queue_work(cec_ctrl->init_data.workq, &cec_ctrl->cec_read_work);
+	}
+
+	return rc;
+} /* hdmi_cec_isr */
+
+int hdmi_cec_deconfig(void *input)
+{
+	unsigned long flags;
+	struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)input;
+
+	if (!cec_ctrl) {
+		DEV_ERR("%s: Invalid input\n", __func__);
+		return -EPERM;
+	}
+
+	hdmi_cec_disable(cec_ctrl);
+
+	spin_lock_irqsave(&cec_ctrl->lock, flags);
+	cec_ctrl->cec_engine_configed = false;
+	spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+	return 0;
+} /* hdmi_cec_deconfig */
+
+int hdmi_cec_config(void *input)
+{
+	unsigned long flags;
+	u32 hdmi_hw_version;
+	struct dss_io_data *io = NULL;
+	struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)input;
+
+	if (!cec_ctrl || !cec_ctrl->init_data.io) {
+		DEV_ERR("%s: Invalid input\n", __func__);
+		return -EPERM;
+	}
+
+	io = cec_ctrl->init_data.io;
+
+	/* 19.2Mhz * 0.00005 us = 950 = 0x3B6 */
+	DSS_REG_W(io, HDMI_CEC_REFTIMER, (0x3B6 & 0xFFF) | BIT(16));
+
+	hdmi_hw_version = DSS_REG_R(io, HDMI_VERSION);
+	if (hdmi_hw_version == 0x30000001) {
+		DSS_REG_W(io, HDMI_CEC_RD_RANGE, 0x30AB9888);
+		DSS_REG_W(io, HDMI_CEC_WR_RANGE, 0x888AA888);
+
+		DSS_REG_W(io, HDMI_CEC_RD_START_RANGE, 0x88888888);
+		DSS_REG_W(io, HDMI_CEC_RD_TOTAL_RANGE, 0x99);
+		DSS_REG_W(io, HDMI_CEC_COMPL_CTL, 0xF);
+		DSS_REG_W(io, HDMI_CEC_WR_CHECK_CONFIG, 0x4);
+	} else {
+		DEV_INFO("%s: CEC is not supported on %d HDMI HW version.\n",
+			__func__, hdmi_hw_version);
+		return -EPERM;
+	}
+
+	DSS_REG_W(io, HDMI_CEC_RD_FILTER, BIT(0) | (0x7FF << 4));
+	DSS_REG_W(io, HDMI_CEC_TIME, BIT(0) | ((7 * 0x30) << 7));
+
+	if (cec_ctrl->cec_enabled)
+		hdmi_cec_enable(cec_ctrl);
+
+	spin_lock_irqsave(&cec_ctrl->lock, flags);
+	cec_ctrl->cec_engine_configed = true;
+	spin_unlock_irqrestore(&cec_ctrl->lock, flags);
+
+	return 0;
+} /* hdmi_cec_config */
+
+void hdmi_cec_deinit(void *input)
+{
+	struct hdmi_cec_msg_node *msg_node, *tmp;
+	struct hdmi_cec_ctrl *cec_ctrl = (struct hdmi_cec_ctrl *)input;
+
+	if (cec_ctrl) {
+		list_for_each_entry_safe(msg_node, tmp, &cec_ctrl->msg_head,
+					list) {
+			list_del(&msg_node->list);
+			kfree(msg_node);
+		}
+
+		sysfs_remove_group(cec_ctrl->init_data.sysfs_kobj,
+			&hdmi_cec_fs_attr_group);
+
+		kfree(cec_ctrl);
+	}
+} /* hdmi_cec_deinit */
+
+void *hdmi_cec_init(struct hdmi_cec_init_data *init_data)
+{
+	struct hdmi_cec_ctrl *cec_ctrl = NULL;
+
+	if (!init_data) {
+		DEV_ERR("%s: Invalid input\n", __func__);
+		goto error;
+	}
+
+	cec_ctrl = kzalloc(sizeof(*cec_ctrl), GFP_KERNEL);
+	if (!cec_ctrl) {
+		DEV_ERR("%s: FAILED: out of memory\n", __func__);
+		goto error;
+	}
+
+	cec_ctrl->init_data = *init_data;
+
+	if (sysfs_create_group(init_data->sysfs_kobj,
+				&hdmi_cec_fs_attr_group)) {
+		DEV_ERR("%s: cec sysfs group creation failed\n", __func__);
+		goto error;
+	}
+
+	spin_lock_init(&cec_ctrl->lock);
+	INIT_LIST_HEAD(&cec_ctrl->msg_head);
+	INIT_WORK(&cec_ctrl->cec_read_work, hdmi_cec_msg_recv);
+	init_completion(&cec_ctrl->cec_msg_wr_done);
+
+	goto exit;
+
+error:
+	kfree(cec_ctrl);
+	cec_ctrl = NULL;
+exit:
+	return (void *)cec_ctrl;
+} /* hdmi_cec_init */
diff --git a/drivers/video/msm/mdss/mdss_hdmi_cec.h b/drivers/video/msm/mdss/mdss_hdmi_cec.h
new file mode 100644
index 0000000..a554507
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_hdmi_cec.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_HDMI_CEC_H__
+#define __MDSS_HDMI_CEC_H__
+
+#include "mdss_hdmi_util.h"
+
+struct hdmi_cec_init_data {
+	struct workqueue_struct *workq;
+	struct kobject *sysfs_kobj;
+	struct dss_io_data *io;
+};
+
+int hdmi_cec_deconfig(void *cec_ctrl);
+int hdmi_cec_config(void *cec_ctrl);
+int hdmi_cec_isr(void *cec_ctrl);
+void hdmi_cec_deinit(void *cec_ctrl);
+void *hdmi_cec_init(struct hdmi_cec_init_data *init_data);
+#endif /* __MDSS_HDMI_CEC_H__ */
diff --git a/drivers/video/msm/mdss/mdss_hdmi_edid.c b/drivers/video/msm/mdss/mdss_hdmi_edid.c
new file mode 100644
index 0000000..e87f028
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_hdmi_edid.c
@@ -0,0 +1,1499 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/types.h>
+#include <mach/board.h>
+#include "mdss_hdmi_edid.h"
+
+#define DBC_START_OFFSET 4
+#define HDMI_VSDB_3D_EVF_DATA_OFFSET(vsd) \
+	(!((vsd)[8] & BIT(7)) ? 9 : (!((vsd)[8] & BIT(6)) ? 11 : 13))
+
+/*
+ * As per the CEA-861E spec, there can be a total of 10 short audio
+ * descriptors with each SAD being 3 bytes long.
+ * Thus, the maximum length of the audio data block would be 30 bytes
+ */
+#define MAX_AUDIO_DATA_BLOCK_SIZE	30
+#define MAX_SPKR_ALLOC_DATA_BLOCK_SIZE	3
+
+struct hdmi_edid_sink_data {
+	u32 disp_mode_list[HDMI_VFRMT_MAX];
+	u32 disp_3d_mode_list[HDMI_VFRMT_MAX];
+	u32 disp_multi_3d_mode_list[16];
+	u32 disp_multi_3d_mode_list_cnt;
+	u32 num_of_elements;
+	u32 preferred_video_format;
+};
+
+struct hdmi_edid_ctrl {
+	u8 pt_scan_info;
+	u8 it_scan_info;
+	u8 ce_scan_info;
+	u16 physical_address;
+	u32 video_resolution; /* selected by user */
+	u32 sink_mode; /* HDMI or DVI */
+	u16 audio_latency;
+	u16 video_latency;
+	u32 present_3d;
+	u8 audio_data_block[MAX_AUDIO_DATA_BLOCK_SIZE];
+	int adb_size;
+	u8 spkr_alloc_data_block[MAX_SPKR_ALLOC_DATA_BLOCK_SIZE];
+	int sadb_size;
+
+	struct hdmi_edid_sink_data sink_data;
+	struct hdmi_edid_init_data init_data;
+};
+
+/* The Logic ID for HDMI TX Core. Currently only support 1 HDMI TX Core. */
+struct hdmi_edid_video_mode_property_type {
+	u32	video_code;
+	u32	active_h;
+	u32	active_v;
+	u32	interlaced;
+	u32	total_h;
+	u32	total_blank_h;
+	u32	total_v;
+	u32	total_blank_v;
+	/* Must divide by 1000 to get the frequency */
+	u32	freq_h;
+	/* Must divide by 1000 to get the frequency */
+	u32	freq_v;
+	/* Must divide by 1000 to get the frequency */
+	u32	pixel_freq;
+	/* Must divide by 1000 to get the frequency */
+	u32	refresh_rate;
+	u32	aspect_ratio_4_3;
+};
+
+/* LUT is sorted from lowest Active H to highest Active H - ease searching */
+static struct hdmi_edid_video_mode_property_type
+	hdmi_edid_disp_mode_lut[] = {
+
+	/* All 640 H Active */
+	{HDMI_VFRMT_640x480p60_4_3, 640, 480, false, 800, 160, 525, 45,
+	 31465, 59940, 25175, 59940, true},
+	{HDMI_VFRMT_640x480p60_4_3, 640, 480, false, 800, 160, 525, 45,
+	 31500, 60000, 25200, 60000, true},
+
+	/* All 720 H Active */
+	{HDMI_VFRMT_720x576p50_4_3,  720, 576, false, 864, 144, 625, 49,
+	 31250, 50000, 27000, 50000, true},
+	{HDMI_VFRMT_720x480p60_4_3,  720, 480, false, 858, 138, 525, 45,
+	 31465, 59940, 27000, 59940, true},
+	{HDMI_VFRMT_720x480p60_4_3,  720, 480, false, 858, 138, 525, 45,
+	 31500, 60000, 27030, 60000, true},
+	{HDMI_VFRMT_720x576p100_4_3, 720, 576, false, 864, 144, 625, 49,
+	 62500, 100000, 54000, 100000, true},
+	{HDMI_VFRMT_720x480p120_4_3, 720, 480, false, 858, 138, 525, 45,
+	 62937, 119880, 54000, 119880, true},
+	{HDMI_VFRMT_720x480p120_4_3, 720, 480, false, 858, 138, 525, 45,
+	 63000, 120000, 54054, 120000, true},
+	{HDMI_VFRMT_720x576p200_4_3, 720, 576, false, 864, 144, 625, 49,
+	 125000, 200000, 108000, 200000, true},
+	{HDMI_VFRMT_720x480p240_4_3, 720, 480, false, 858, 138, 525, 45,
+	 125874, 239760, 108000, 239000, true},
+	{HDMI_VFRMT_720x480p240_4_3, 720, 480, false, 858, 138, 525, 45,
+	 126000, 240000, 108108, 240000, true},
+
+	/* All 1280 H Active */
+	{HDMI_VFRMT_1280x720p50_16_9,  1280, 720, false, 1980, 700, 750, 30,
+	 37500, 50000, 74250, 50000, false},
+	{HDMI_VFRMT_1280x720p60_16_9,  1280, 720, false, 1650, 370, 750, 30,
+	 44955, 59940, 74176, 59940, false},
+	{HDMI_VFRMT_1280x720p60_16_9,  1280, 720, false, 1650, 370, 750, 30,
+	 45000, 60000, 74250, 60000, false},
+	{HDMI_VFRMT_1280x720p100_16_9, 1280, 720, false, 1980, 700, 750, 30,
+	 75000, 100000, 148500, 100000, false},
+	{HDMI_VFRMT_1280x720p120_16_9, 1280, 720, false, 1650, 370, 750, 30,
+	 89909, 119880, 148352, 119880, false},
+	{HDMI_VFRMT_1280x720p120_16_9, 1280, 720, false, 1650, 370, 750, 30,
+	 90000, 120000, 148500, 120000, false},
+
+	/* All 1440 H Active */
+	{HDMI_VFRMT_1440x576i50_4_3, 1440, 576, true,  1728, 288, 625, 24,
+	 15625, 50000, 27000, 50000, true},
+	{HDMI_VFRMT_720x288p50_4_3,  1440, 288, false, 1728, 288, 312, 24,
+	 15625, 50080, 27000, 50000, true},
+	{HDMI_VFRMT_720x288p50_4_3,  1440, 288, false, 1728, 288, 313, 25,
+	 15625, 49920, 27000, 50000, true},
+	{HDMI_VFRMT_720x288p50_4_3,  1440, 288, false, 1728, 288, 314, 26,
+	 15625, 49761, 27000, 50000, true},
+	{HDMI_VFRMT_1440x576p50_4_3, 1440, 576, false, 1728, 288, 625, 49,
+	 31250, 50000, 54000, 50000, true},
+	{HDMI_VFRMT_1440x480i60_4_3, 1440, 480, true,  1716, 276, 525, 22,
+	 15734, 59940, 27000, 59940, true},
+	{HDMI_VFRMT_1440x240p60_4_3, 1440, 240, false, 1716, 276, 262, 22,
+	 15734, 60054, 27000, 59940, true},
+	{HDMI_VFRMT_1440x240p60_4_3, 1440, 240, false, 1716, 276, 263, 23,
+	 15734, 59826, 27000, 59940, true},
+	{HDMI_VFRMT_1440x480p60_4_3, 1440, 480, false, 1716, 276, 525, 45,
+	 31469, 59940, 54000, 59940, true},
+	{HDMI_VFRMT_1440x480i60_4_3, 1440, 480, true,  1716, 276, 525, 22,
+	 15750, 60000, 27027, 60000, true},
+	{HDMI_VFRMT_1440x240p60_4_3, 1440, 240, false, 1716, 276, 262, 22,
+	 15750, 60115, 27027, 60000, true},
+	{HDMI_VFRMT_1440x240p60_4_3, 1440, 240, false, 1716, 276, 263, 23,
+	 15750, 59886, 27027, 60000, true},
+	{HDMI_VFRMT_1440x480p60_4_3, 1440, 480, false, 1716, 276, 525, 45,
+	 31500, 60000, 54054, 60000, true},
+	{HDMI_VFRMT_1440x576i100_4_3, 1440, 576, true,  1728, 288, 625, 24,
+	 31250, 100000, 54000, 100000, true},
+	{HDMI_VFRMT_1440x480i120_4_3, 1440, 480, true,  1716, 276, 525, 22,
+	 31469, 119880, 54000, 119880, true},
+	{HDMI_VFRMT_1440x480i120_4_3, 1440, 480, true,  1716, 276, 525, 22,
+	 31500, 120000, 54054, 120000, true},
+	{HDMI_VFRMT_1440x576i200_4_3, 1440, 576, true,  1728, 288, 625, 24,
+	 62500, 200000, 108000, 200000, true},
+	{HDMI_VFRMT_1440x480i240_4_3, 1440, 480, true,  1716, 276, 525, 22,
+	 62937, 239760, 108000, 239000, true},
+	{HDMI_VFRMT_1440x480i240_4_3, 1440, 480, true,  1716, 276, 525, 22,
+	 63000, 240000, 108108, 240000, true},
+
+	/* All 1920 H Active */
+	{HDMI_VFRMT_1920x1080p60_16_9, 1920, 1080, false, 2200, 280, 1125,
+	 45, 67433, 59940, 148352, 59940, false},
+	{HDMI_VFRMT_1920x1080p60_16_9, 1920, 1080, true,  2200, 280, 1125,
+	 45, 67500, 60000, 148500, 60000, false},
+	{HDMI_VFRMT_1920x1080p50_16_9, 1920, 1080, false, 2640, 720, 1125,
+	 45, 56250, 50000, 148500, 50000, false},
+	{HDMI_VFRMT_1920x1080p24_16_9, 1920, 1080, false, 2750, 830, 1125,
+	 45, 26973, 23976, 74176, 24000, false},
+	{HDMI_VFRMT_1920x1080p24_16_9, 1920, 1080, false, 2750, 830, 1125,
+	 45, 27000, 24000, 74250, 24000, false},
+	{HDMI_VFRMT_1920x1080p25_16_9, 1920, 1080, false, 2640, 720, 1125,
+	 45, 28125, 25000, 74250, 25000, false},
+	{HDMI_VFRMT_1920x1080p30_16_9, 1920, 1080, false, 2200, 280, 1125,
+	 45, 33716, 29970, 74176, 30000, false},
+	{HDMI_VFRMT_1920x1080p30_16_9, 1920, 1080, false, 2200, 280, 1125,
+	 45, 33750, 30000, 74250, 30000, false},
+	{HDMI_VFRMT_1920x1080i50_16_9, 1920, 1080, true,  2304, 384, 1250,
+	 85, 31250, 50000, 72000, 50000, false},
+	{HDMI_VFRMT_1920x1080i60_16_9, 1920, 1080, true,  2200, 280, 1125,
+	 22, 33716, 59940, 74176, 59940, false},
+	{HDMI_VFRMT_1920x1080i60_16_9, 1920, 1080, true,  2200, 280, 1125,
+	 22, 33750, 60000, 74250, 60000, false},
+	{HDMI_VFRMT_1920x1080i100_16_9, 1920, 1080, true,  2640, 720, 1125,
+	 22, 56250, 100000, 148500, 100000, false},
+	{HDMI_VFRMT_1920x1080i120_16_9, 1920, 1080, true,  2200, 280, 1125,
+	 22, 67432, 119880, 148352, 119980, false},
+	{HDMI_VFRMT_1920x1080i120_16_9, 1920, 1080, true,  2200, 280, 1125,
+	 22, 67500, 120000, 148500, 120000, false},
+
+	/* All 2560 H Active */
+	{HDMI_VFRMT_2560x1600p60_16_9, 2560, 1600, false, 2720, 160, 1646,
+	 46, 98700, 60000, 268500, 60000, false},
+
+	/* All 2880 H Active */
+	{HDMI_VFRMT_2880x576i50_4_3, 2880, 576, true,  3456, 576, 625, 24,
+	 15625, 50000, 54000, 50000, true},
+	{HDMI_VFRMT_2880x288p50_4_3, 2880, 576, false, 3456, 576, 312, 24,
+	 15625, 50080, 54000, 50000, true},
+	{HDMI_VFRMT_2880x288p50_4_3, 2880, 576, false, 3456, 576, 313, 25,
+	 15625, 49920, 54000, 50000, true},
+	{HDMI_VFRMT_2880x288p50_4_3, 2880, 576, false, 3456, 576, 314, 26,
+	 15625, 49761, 54000, 50000, true},
+	{HDMI_VFRMT_2880x576p50_4_3, 2880, 576, false, 3456, 576, 625, 49,
+	 31250, 50000, 108000, 50000, true},
+	{HDMI_VFRMT_2880x480i60_4_3, 2880, 480, true,  3432, 552, 525, 22,
+	 15734, 59940, 54000, 59940, true},
+	{HDMI_VFRMT_2880x240p60_4_3, 2880, 480, false, 3432, 552, 262, 22,
+	 15734, 60054, 54000, 59940, true},
+	{HDMI_VFRMT_2880x240p60_4_3, 2880, 480, false, 3432, 552, 263, 23,
+	 15734, 59940, 54000, 59940, true},
+	{HDMI_VFRMT_2880x480p60_4_3, 2880, 480, false, 3432, 552, 525, 45,
+	 31469, 59940, 108000, 59940, true},
+	{HDMI_VFRMT_2880x480i60_4_3, 2880, 480, true,  3432, 552, 525, 22,
+	 15750, 60000, 54054, 60000, true},
+	{HDMI_VFRMT_2880x240p60_4_3, 2880, 240, false, 3432, 552, 262, 22,
+	 15750, 60115, 54054, 60000, true},
+	{HDMI_VFRMT_2880x240p60_4_3, 2880, 240, false, 3432, 552, 262, 23,
+	 15750, 59886, 54054, 60000, true},
+	{HDMI_VFRMT_2880x480p60_4_3, 2880, 480, false, 3432, 552, 525, 45,
+	 31500, 60000, 108108, 60000, true},
+};
+
+static ssize_t hdmi_edid_sysfs_rda_modes(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = 0;
+	int i;
+	struct hdmi_edid_ctrl *edid_ctrl =
+		hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_EDID);
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	buf[0] = 0;
+	if (edid_ctrl->sink_data.num_of_elements) {
+		u32 *video_mode = edid_ctrl->sink_data.disp_mode_list;
+		for (i = 0; i < edid_ctrl->sink_data.num_of_elements; ++i) {
+			if (!hdmi_get_supported_mode(*video_mode))
+				continue;
+			if (ret > 0)
+				ret += snprintf(buf+ret, PAGE_SIZE-ret, ",%d",
+					*video_mode++);
+			else
+				ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d",
+					*video_mode++);
+		}
+	} else {
+		ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d",
+			edid_ctrl->video_resolution);
+	}
+
+	DEV_DBG("%s: '%s'\n", __func__, buf);
+	ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
+
+	return ret;
+} /* hdmi_edid_sysfs_rda_modes */
+static DEVICE_ATTR(edid_modes, S_IRUGO, hdmi_edid_sysfs_rda_modes, NULL);
+
+static ssize_t hdmi_edid_sysfs_rda_physical_address(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct hdmi_edid_ctrl *edid_ctrl =
+		hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_EDID);
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", edid_ctrl->physical_address);
+	DEV_DBG("%s: '%d'\n", __func__, edid_ctrl->physical_address);
+
+	return ret;
+} /* hdmi_edid_sysfs_rda_physical_address */
+static DEVICE_ATTR(pa, S_IRUGO, hdmi_edid_sysfs_rda_physical_address, NULL);
+
+static ssize_t hdmi_edid_sysfs_rda_scan_info(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct hdmi_edid_ctrl *edid_ctrl =
+		hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_EDID);
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = snprintf(buf, PAGE_SIZE, "%d, %d, %d\n", edid_ctrl->pt_scan_info,
+		edid_ctrl->it_scan_info, edid_ctrl->ce_scan_info);
+	DEV_DBG("%s: '%s'\n", __func__, buf);
+
+	return ret;
+} /* hdmi_edid_sysfs_rda_scan_info */
+static DEVICE_ATTR(scan_info, S_IRUGO, hdmi_edid_sysfs_rda_scan_info, NULL);
+
+static ssize_t hdmi_edid_sysfs_rda_3d_modes(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = 0;
+	int i;
+	char buff_3d[128];
+	struct hdmi_edid_ctrl *edid_ctrl =
+		hdmi_get_featuredata_from_sysfs_dev(dev, HDMI_TX_FEAT_EDID);
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	buf[0] = 0;
+	if (edid_ctrl->sink_data.num_of_elements) {
+		u32 *video_mode = edid_ctrl->sink_data.disp_mode_list;
+		u32 *video_3d_mode = edid_ctrl->sink_data.disp_3d_mode_list;
+
+		for (i = 0; i < edid_ctrl->sink_data.num_of_elements; ++i) {
+			ret = hdmi_get_video_3d_fmt_2string(*video_3d_mode++,
+				buff_3d);
+			if (ret > 0)
+				ret += snprintf(buf+ret, PAGE_SIZE-ret,
+					",%d=%s", *video_mode++,
+					buff_3d);
+			else
+				ret += snprintf(buf+ret, PAGE_SIZE-ret,
+					"%d=%s", *video_mode++,
+					buff_3d);
+		}
+	} else {
+		ret += snprintf(buf+ret, PAGE_SIZE-ret, "%d",
+			edid_ctrl->video_resolution);
+	}
+
+	DEV_DBG("%s: '%s'\n", __func__, buf);
+	ret += snprintf(buf+ret, PAGE_SIZE-ret, "\n");
+
+	return ret;
+} /* hdmi_edid_sysfs_rda_3d_modes */
+static DEVICE_ATTR(edid_3d_modes, S_IRUGO, hdmi_edid_sysfs_rda_3d_modes, NULL);
+
+static struct attribute *hdmi_edid_fs_attrs[] = {
+	&dev_attr_edid_modes.attr,
+	&dev_attr_pa.attr,
+	&dev_attr_scan_info.attr,
+	&dev_attr_edid_3d_modes.attr,
+	NULL,
+};
+
+static struct attribute_group hdmi_edid_fs_attrs_group = {
+	.attrs = hdmi_edid_fs_attrs,
+};
+
+static int hdmi_edid_read_block(struct hdmi_edid_ctrl *edid_ctrl, int block,
+	u8 *edid_buf)
+{
+	const u8 *b = NULL;
+	u32 ndx, check_sum, print_len;
+	int block_size = 0x80;
+	int i, status;
+	struct hdmi_tx_ddc_data ddc_data;
+	b = edid_buf;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	do {
+		DEV_DBG("EDID: reading block(%d) with block-size=%d\n",
+			block, block_size);
+		for (i = 0; i < 0x80; i += block_size) {
+			memset(&ddc_data, 0, sizeof(ddc_data));
+			ddc_data.dev_addr    = 0xA0;
+			ddc_data.offset      = block*0x80 + i;
+			ddc_data.data_buf    = edid_buf+i;
+			ddc_data.data_len    = block_size;
+			ddc_data.request_len = block_size;
+			ddc_data.retry       = 1;
+			ddc_data.what        = "EDID";
+			ddc_data.no_align    = false;
+
+			/*Read EDID twice with 32bit alighnment too */
+			if (block < 2)
+				status = hdmi_ddc_read(
+					edid_ctrl->init_data.ddc_ctrl,
+					&ddc_data);
+			else
+				status = hdmi_ddc_read_seg(
+					edid_ctrl->init_data.ddc_ctrl,
+					&ddc_data);
+			if (status)
+				break;
+		}
+
+		block_size /= 2;
+	} while (status && (block_size >= 16));
+
+	if (status)
+		goto error;
+
+	/* Calculate checksum */
+	check_sum = 0;
+	for (ndx = 0; ndx < 0x80; ++ndx)
+		check_sum += edid_buf[ndx];
+
+	if (check_sum & 0xFF) {
+		DEV_ERR("%s: failed CHECKSUM (read:%x, expected:%x)\n",
+			__func__, (u8)edid_buf[0x7F], (u8)check_sum);
+		for (ndx = 0; ndx < 0x100; ndx += 4)
+			DEV_DBG("EDID[%02x-%02x] %02x %02x %02x %02x\n",
+				ndx, ndx+3,
+				b[ndx+0], b[ndx+1], b[ndx+2], b[ndx+3]);
+		status = -EPROTO;
+		goto error;
+	}
+
+	print_len = 0x80;
+	for (ndx = 0; ndx < print_len; ndx += 4)
+		DEV_DBG("EDID[%02x-%02x] %02x %02x %02x %02x\n",
+			ndx, ndx+3,
+			b[ndx+0], b[ndx+1], b[ndx+2], b[ndx+3]);
+
+error:
+	return status;
+} /* hdmi_edid_read_block */
+
+static const u8 *hdmi_edid_find_block(const u8 *in_buf, u32 start_offset,
+	u8 type, u8 *len)
+{
+	/* the start of data block collection, start of Video Data Block */
+	u32 offset = start_offset;
+	u32 end_dbc_offset = in_buf[2];
+
+	*len = 0;
+
+	/*
+	 * * edid buffer 1, byte 2 being 4 means no non-DTD/Data block
+	 *   collection present.
+	 * * edid buffer 1, byte 2 being 0 menas no non-DTD/DATA block
+	 *   collection present and no DTD data present.
+	 */
+	if ((end_dbc_offset == 0) || (end_dbc_offset == 4)) {
+		DEV_WARN("EDID: no DTD or non-DTD data present\n");
+		return NULL;
+	}
+
+	while (offset < end_dbc_offset) {
+		u8 block_len = in_buf[offset] & 0x1F;
+		if ((in_buf[offset] >> 5) == type) {
+			*len = block_len;
+			DEV_DBG("%s: EDID: block=%d found @ 0x%x w/ len=%d\n",
+				__func__, type, offset, block_len);
+
+			return in_buf + offset;
+		}
+		offset += 1 + block_len;
+	}
+	DEV_WARN("%s: EDID: type=%d block not found in EDID block\n",
+		__func__, type);
+
+	return NULL;
+} /* hdmi_edid_find_block */
+
+static void hdmi_edid_extract_extended_data_blocks(
+	struct hdmi_edid_ctrl *edid_ctrl, const u8 *in_buf)
+{
+	u8 len = 0;
+	u32 start_offset = DBC_START_OFFSET;
+	u8 const *etag = NULL;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	/* A Tage code of 7 identifies extended data blocks */
+	etag = hdmi_edid_find_block(in_buf, start_offset, 7, &len);
+
+	while (etag != NULL) {
+		/* The extended data block should at least be 2 bytes long */
+		if (len < 2) {
+			DEV_DBG("%s: data block of len < 2 bytes. Ignor...\n",
+				__func__);
+		} else {
+			/*
+			 * The second byte of the extended data block has the
+			 * extended tag code
+			 */
+			switch (etag[1]) {
+			case 0:
+				/* Video Capability Data Block */
+				DEV_DBG("%s: EDID: VCDB=%02X %02X\n", __func__,
+					etag[1], etag[2]);
+
+				/*
+				 * Check if the sink specifies underscan
+				 * support for:
+				 * BIT 5: preferred video format
+				 * BIT 3: IT video format
+				 * BIT 1: CE video format
+				 */
+				edid_ctrl->pt_scan_info =
+					(etag[2] & (BIT(4) | BIT(5))) >> 4;
+				edid_ctrl->it_scan_info =
+					(etag[2] & (BIT(3) | BIT(2))) >> 2;
+				edid_ctrl->ce_scan_info =
+					etag[2] & (BIT(1) | BIT(0));
+				DEV_DBG("%s: Scan Info (pt|it|ce): (%d|%d|%d)",
+					__func__,
+					edid_ctrl->pt_scan_info,
+					edid_ctrl->it_scan_info,
+					edid_ctrl->ce_scan_info);
+				break;
+			default:
+				DEV_DBG("%s: Tag Code %d not supported\n",
+					__func__, etag[1]);
+				break;
+			}
+		}
+
+		/* There could be more that one extended data block */
+		start_offset = etag - in_buf + len + 1;
+		etag = hdmi_edid_find_block(in_buf, start_offset, 7, &len);
+	}
+} /* hdmi_edid_extract_extended_data_blocks */
+
+static void hdmi_edid_extract_3d_present(struct hdmi_edid_ctrl *edid_ctrl,
+	const u8 *in_buf)
+{
+	u8 len, offset;
+	const u8 *vsd = NULL;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	vsd = hdmi_edid_find_block(in_buf, DBC_START_OFFSET, 3, &len);
+
+	edid_ctrl->present_3d = 0;
+	if (vsd == NULL || len < 9) {
+		DEV_DBG("%s: blk-id 3 not found or not long enough\n",
+			__func__);
+		return;
+	}
+
+	offset = HDMI_VSDB_3D_EVF_DATA_OFFSET(vsd);
+	DEV_DBG("%s: EDID: 3D present @ 0x%x = %02x\n", __func__,
+		offset, vsd[offset]);
+
+	if (vsd[offset] >> 7) { /* 3D format indication present */
+		DEV_INFO("%s: EDID: 3D present, 3D-len=%d\n", __func__,
+			vsd[offset+1] & 0x1F);
+		edid_ctrl->present_3d = 1;
+	}
+} /* hdmi_edid_extract_3d_present */
+
+static void hdmi_edid_extract_audio_data_blocks(
+	struct hdmi_edid_ctrl *edid_ctrl, const u8 *in_buf)
+{
+	u8 len, cnt = 0;
+	const u8 *adb = NULL;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	adb = hdmi_edid_find_block(in_buf, DBC_START_OFFSET, 1, &len);
+	if ((adb == NULL) || (len > MAX_AUDIO_DATA_BLOCK_SIZE))
+		return;
+
+	memcpy(edid_ctrl->audio_data_block, adb + 1, len);
+	edid_ctrl->adb_size = len;
+
+	while (len >= 3 && cnt < 16) {
+		DEV_DBG("%s: ch=%d fmt=%d sampling=0x%02x bitdepth=0x%02x\n",
+			__func__, (adb[1]&0x7)+1, adb[1]>>3, adb[2], adb[3]);
+
+		cnt++;
+		len -= 3;
+		adb += 3;
+	}
+} /* hdmi_edid_extract_audio_data_blocks */
+
+static void hdmi_edid_extract_speaker_allocation_data(
+	struct hdmi_edid_ctrl *edid_ctrl, const u8 *in_buf)
+{
+	u8 len;
+	const u8 *sadb = NULL;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	sadb = hdmi_edid_find_block(in_buf, DBC_START_OFFSET, 4, &len);
+	if ((sadb == NULL) || (len != MAX_SPKR_ALLOC_DATA_BLOCK_SIZE))
+		return;
+
+	memcpy(edid_ctrl->spkr_alloc_data_block, sadb + 1, len);
+	edid_ctrl->sadb_size = len;
+
+	DEV_DBG("%s: EDID: speaker alloc data SP byte = %08x %s%s%s%s%s%s%s\n",
+		__func__, sadb[1],
+		(sadb[1] & BIT(0)) ? "FL/FR," : "",
+		(sadb[1] & BIT(1)) ? "LFE," : "",
+		(sadb[1] & BIT(2)) ? "FC," : "",
+		(sadb[1] & BIT(3)) ? "RL/RR," : "",
+		(sadb[1] & BIT(4)) ? "RC," : "",
+		(sadb[1] & BIT(5)) ? "FLC/FRC," : "",
+		(sadb[1] & BIT(6)) ? "RLC/RRC," : "");
+} /* hdmi_edid_extract_speaker_allocation_data */
+
+static void hdmi_edid_extract_latency_fields(struct hdmi_edid_ctrl *edid_ctrl,
+	const u8 *in_buf)
+{
+	u8 len;
+	const u8 *vsd = NULL;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	vsd = hdmi_edid_find_block(in_buf, DBC_START_OFFSET, 3, &len);
+
+	if (vsd == NULL || len < 12 || !(vsd[8] & BIT(7))) {
+		edid_ctrl->video_latency = (u16)-1;
+		edid_ctrl->audio_latency = (u16)-1;
+		DEV_DBG("%s: EDID: No audio/video latency present\n", __func__);
+	} else {
+		edid_ctrl->video_latency = vsd[9];
+		edid_ctrl->audio_latency = vsd[10];
+		DEV_DBG("%s: EDID: video-latency=%04x, audio-latency=%04x\n",
+			__func__, edid_ctrl->video_latency,
+			edid_ctrl->audio_latency);
+	}
+} /* hdmi_edid_extract_latency_fields */
+
+static u32 hdmi_edid_extract_ieee_reg_id(struct hdmi_edid_ctrl *edid_ctrl,
+	const u8 *in_buf)
+{
+	u8 len;
+	const u8 *vsd = NULL;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return 0;
+	}
+
+	vsd = hdmi_edid_find_block(in_buf, DBC_START_OFFSET, 3, &len);
+	if (vsd == NULL)
+		return 0;
+
+	DEV_DBG("%s: EDID: VSD PhyAddr=%04x, MaxTMDS=%dMHz\n", __func__,
+		((u32)vsd[4] << 8) + (u32)vsd[5], (u32)vsd[7] * 5);
+
+	edid_ctrl->physical_address = ((u16)vsd[4] << 8) + (u16)vsd[5];
+
+	return ((u32)vsd[3] << 16) + ((u32)vsd[2] << 8) + (u32)vsd[1];
+} /* hdmi_edid_extract_ieee_reg_id */
+
+static void hdmi_edid_extract_vendor_id(const u8 *in_buf,
+	char *vendor_id)
+{
+	u32 id_codes = ((u32)in_buf[8] << 8) + in_buf[9];
+
+	vendor_id[0] = 'A' - 1 + ((id_codes >> 10) & 0x1F);
+	vendor_id[1] = 'A' - 1 + ((id_codes >> 5) & 0x1F);
+	vendor_id[2] = 'A' - 1 + (id_codes & 0x1F);
+	vendor_id[3] = 0;
+} /* hdmi_edid_extract_vendor_id */
+
+static u32 hdmi_edid_check_header(const u8 *edid_buf)
+{
+	return (edid_buf[0] == 0x00) && (edid_buf[1] == 0xff)
+		&& (edid_buf[2] == 0xff) && (edid_buf[3] == 0xff)
+		&& (edid_buf[4] == 0xff) && (edid_buf[5] == 0xff)
+		&& (edid_buf[6] == 0xff) && (edid_buf[7] == 0x00);
+} /* hdmi_edid_check_header */
+
+static void hdmi_edid_detail_desc(const u8 *data_buf, u32 *disp_mode)
+{
+	u32	aspect_ratio_4_3    = false;
+	u32	interlaced          = false;
+	u32	active_h            = 0;
+	u32	active_v            = 0;
+	u32	blank_h             = 0;
+	u32	blank_v             = 0;
+	u32	ndx                 = 0;
+	u32	max_num_of_elements = 0;
+	u32	img_size_h          = 0;
+	u32	img_size_v          = 0;
+
+	/*
+	 * * See VESA Spec
+	 * * EDID_TIMING_DESC_UPPER_H_NIBBLE[0x4]: Relative Offset to the
+	 *   EDID detailed timing descriptors - Upper 4 bit for each H
+	 *   active/blank field
+	 * * EDID_TIMING_DESC_H_ACTIVE[0x2]: Relative Offset to the EDID
+	 *   detailed timing descriptors - H active
+	 */
+	active_h = ((((u32)data_buf[0x4] >> 0x4) & 0xF) << 8)
+		| data_buf[0x2];
+
+	/*
+	 * EDID_TIMING_DESC_H_BLANK[0x3]: Relative Offset to the EDID detailed
+	 *   timing descriptors - H blank
+	 */
+	blank_h = (((u32)data_buf[0x4] & 0xF) << 8)
+		| data_buf[0x3];
+
+	/*
+	 * * EDID_TIMING_DESC_UPPER_V_NIBBLE[0x7]: Relative Offset to the
+	 *   EDID detailed timing descriptors - Upper 4 bit for each V
+	 *   active/blank field
+	 * * EDID_TIMING_DESC_V_ACTIVE[0x5]: Relative Offset to the EDID
+	 *   detailed timing descriptors - V active
+	 */
+	active_v = ((((u32)data_buf[0x7] >> 0x4) & 0xF) << 8)
+		| data_buf[0x5];
+
+	/*
+	 * EDID_TIMING_DESC_V_BLANK[0x6]: Relative Offset to the EDID
+	 * detailed timing descriptors - V blank
+	 */
+	blank_v = (((u32)data_buf[0x7] & 0xF) << 8)
+		| data_buf[0x6];
+
+	/*
+	 * * EDID_TIMING_DESC_IMAGE_SIZE_UPPER_NIBBLE[0xE]: Relative Offset
+	 *   to the EDID detailed timing descriptors - Image Size upper
+	 *   nibble V and H
+	 * * EDID_TIMING_DESC_H_IMAGE_SIZE[0xC]: Relative Offset to the EDID
+	 *   detailed timing descriptors - H image size
+	 * * EDID_TIMING_DESC_V_IMAGE_SIZE[0xD]: Relative Offset to the EDID
+	 *   detailed timing descriptors - V image size
+	 */
+	img_size_h = ((((u32)data_buf[0xE] >> 0x4) & 0xF) << 8)
+		| data_buf[0xC];
+	img_size_v = (((u32)data_buf[0xE] & 0xF) << 8)
+		| data_buf[0xD];
+
+	/*
+	 * aspect ratio as 4:3 if within specificed range , rathaer than being
+	 * absolute value
+	 */
+	aspect_ratio_4_3 = (abs(img_size_h * 3 - img_size_v * 4) < 5) ? 1 : 0;
+
+	max_num_of_elements = sizeof(hdmi_edid_disp_mode_lut)
+		/ sizeof(*hdmi_edid_disp_mode_lut);
+
+	/*
+	 * EDID_TIMING_DESC_INTERLACE[0x11:7]: Relative Offset to the EDID
+	 * detailed timing descriptors - Interlace flag
+	 */
+	DEV_DBG("%s: Interlaced mode byte data_buf[0x11]=[%x]\n", __func__,
+		data_buf[0x11]);
+
+	/*
+	 * CEA 861-D: interlaced bit is bit[7] of byte[0x11]
+	 */
+	interlaced = (data_buf[0x11] & 0x80) >> 7;
+
+	DEV_DBG("%s: A[%ux%u] B[%ux%u] V[%ux%u] %s\n", __func__,
+		active_h, active_v, blank_h, blank_v, img_size_h, img_size_v,
+		interlaced ? "i" : "p");
+
+	*disp_mode = HDMI_VFRMT_FORCE_32BIT;
+	while (ndx < max_num_of_elements) {
+		const struct hdmi_edid_video_mode_property_type *edid =
+			hdmi_edid_disp_mode_lut + ndx;
+
+		if ((interlaced    == edid->interlaced)    &&
+			(active_h  == edid->active_h)      &&
+			(blank_h   == edid->total_blank_h) &&
+			(blank_v   == edid->total_blank_v) &&
+			((active_v == edid->active_v) ||
+			(active_v  == (edid->active_v + 1)))) {
+			if (edid->aspect_ratio_4_3 && !aspect_ratio_4_3)
+				/* Aspect ratio 16:9 */
+				*disp_mode = edid->video_code + 1;
+			else
+				/* Aspect ratio 4:3 */
+				*disp_mode = edid->video_code;
+
+			DEV_DBG("%s: mode found:%d\n", __func__, *disp_mode);
+			break;
+		}
+		++ndx;
+	}
+	if (ndx == max_num_of_elements)
+		DEV_INFO("%s: *no mode* found\n", __func__);
+} /* hdmi_edid_detail_desc */
+
+static void hdmi_edid_add_sink_3d_format(struct hdmi_edid_sink_data *sink_data,
+	u32 video_format, u32 video_3d_format)
+{
+	char string[128];
+	u32 added = false;
+	int i;
+
+	for (i = 0; i < sink_data->num_of_elements; ++i) {
+		if (sink_data->disp_mode_list[i] == video_format) {
+			sink_data->disp_3d_mode_list[i] |= video_3d_format;
+			added = true;
+			break;
+		}
+	}
+
+	hdmi_get_video_3d_fmt_2string(video_3d_format, string);
+
+	DEV_DBG("%s: EDID[3D]: format: %d [%s], %s %s\n", __func__,
+		video_format, msm_hdmi_mode_2string(video_format),
+		string, added ? "added" : "NOT added");
+} /* hdmi_edid_add_sink_3d_format */
+
+static void hdmi_edid_add_sink_video_format(
+	struct hdmi_edid_sink_data *sink_data, u32 video_format)
+{
+	const struct msm_hdmi_mode_timing_info *timing =
+		hdmi_get_supported_mode(video_format);
+	u32 supported = timing != NULL;
+
+	if (video_format >= HDMI_VFRMT_MAX) {
+		DEV_ERR("%s: video format: %s is not supported\n", __func__,
+			msm_hdmi_mode_2string(video_format));
+		return;
+	}
+
+	DEV_DBG("%s: EDID: format: %d [%s], %s\n", __func__,
+		video_format, msm_hdmi_mode_2string(video_format),
+		supported ? "Supported" : "Not-Supported");
+
+	if (supported) {
+		/* todo: MHL */
+		sink_data->disp_mode_list[sink_data->num_of_elements++] =
+			video_format;
+	}
+} /* hdmi_edid_add_sink_video_format */
+
+static void hdmi_edid_get_display_vsd_3d_mode(const u8 *data_buf,
+	struct hdmi_edid_sink_data *sink_data, u32 num_of_cea_blocks)
+{
+	u8 len, offset, present_multi_3d, hdmi_vic_len, hdmi_3d_len;
+	u16 structure_all, structure_mask;
+	const u8 *vsd = num_of_cea_blocks ?
+		hdmi_edid_find_block(data_buf+0x80, DBC_START_OFFSET,
+				3, &len) : NULL;
+	int i;
+
+	offset = HDMI_VSDB_3D_EVF_DATA_OFFSET(vsd);
+	present_multi_3d = (vsd[offset] & 0x60) >> 5;
+
+	offset += 1;
+	hdmi_vic_len = (vsd[offset] >> 5) & 0x7;
+	hdmi_3d_len = vsd[offset] & 0x1F;
+	DEV_DBG("%s: EDID[3D]: HDMI_VIC_LEN = %d, HDMI_3D_LEN = %d\n", __func__,
+		hdmi_vic_len, hdmi_3d_len);
+
+	offset += (hdmi_vic_len + 1);
+	if (present_multi_3d == 1 || present_multi_3d == 2) {
+		DEV_DBG("%s: EDID[3D]: multi 3D present (%d)\n", __func__,
+			present_multi_3d);
+		/* 3d_structure_all */
+		structure_all = (vsd[offset] << 8) | vsd[offset + 1];
+		offset += 2;
+		hdmi_3d_len -= 2;
+		if (present_multi_3d == 2) {
+			/* 3d_structure_mask */
+			structure_mask = (vsd[offset] << 8) | vsd[offset + 1];
+			offset += 2;
+			hdmi_3d_len -= 2;
+		} else
+			structure_mask = 0xffff;
+
+		i = 0;
+		while (i < 16) {
+			if (i >= sink_data->disp_multi_3d_mode_list_cnt)
+				break;
+
+			if (!(structure_mask & BIT(i))) {
+				++i;
+				continue;
+			}
+
+			/* BIT0: FRAME PACKING */
+			if (structure_all & BIT(0))
+				hdmi_edid_add_sink_3d_format(sink_data,
+					sink_data->
+					disp_multi_3d_mode_list[i],
+					FRAME_PACKING);
+
+			/* BIT6: TOP AND BOTTOM */
+			if (structure_all & BIT(6))
+				hdmi_edid_add_sink_3d_format(sink_data,
+					sink_data->
+					disp_multi_3d_mode_list[i],
+					TOP_AND_BOTTOM);
+
+			/* BIT8: SIDE BY SIDE HALF */
+			if (structure_all & BIT(8))
+				hdmi_edid_add_sink_3d_format(sink_data,
+					sink_data->
+					disp_multi_3d_mode_list[i],
+					SIDE_BY_SIDE_HALF);
+
+			++i;
+		}
+	}
+
+	i = 0;
+	while (hdmi_3d_len > 0) {
+		DEV_DBG("%s: EDID: 3D_Structure_%d @ 0x%x: %02x\n",
+			__func__, i + 1, offset, vsd[offset]);
+
+		if ((vsd[offset] >> 4) >=
+			sink_data->disp_multi_3d_mode_list_cnt) {
+			if ((vsd[offset] & 0x0F) >= 8) {
+				offset += 1;
+				hdmi_3d_len -= 1;
+				DEV_DBG("%s:EDID:3D_Detail_%d @ 0x%x: %02x\n",
+					__func__, i + 1, offset, vsd[offset]);
+			}
+			i += 1;
+			offset += 1;
+			hdmi_3d_len -= 1;
+			continue;
+		}
+
+		switch (vsd[offset] & 0x0F) {
+		case 0:
+			/* 0000b: FRAME PACKING */
+			hdmi_edid_add_sink_3d_format(sink_data,
+				sink_data->
+				disp_multi_3d_mode_list[vsd[offset] >> 4],
+				FRAME_PACKING);
+			break;
+		case 6:
+			/* 0110b: TOP AND BOTTOM */
+			hdmi_edid_add_sink_3d_format(sink_data,
+				sink_data->
+				disp_multi_3d_mode_list[vsd[offset] >> 4],
+				TOP_AND_BOTTOM);
+			break;
+		case 8:
+			/* 1000b: SIDE BY SIDE HALF */
+			hdmi_edid_add_sink_3d_format(sink_data,
+				sink_data->
+				disp_multi_3d_mode_list[vsd[offset] >> 4],
+				SIDE_BY_SIDE_HALF);
+			break;
+		}
+		if ((vsd[offset] & 0x0F) >= 8) {
+			offset += 1;
+			hdmi_3d_len -= 1;
+			DEV_DBG("%s: EDID[3D]: 3D_Detail_%d @ 0x%x: %02x\n",
+				__func__, i + 1, offset,
+				vsd[offset]);
+		}
+		i += 1;
+		offset += 1;
+		hdmi_3d_len -= 1;
+	}
+} /* hdmi_edid_get_display_vsd_3d_mode */
+
+static void hdmi_edid_get_extended_video_formats(
+	struct hdmi_edid_ctrl *edid_ctrl, const u8 *in_buf)
+{
+	u8 db_len, offset, i;
+	u8 hdmi_vic_len;
+	u32 video_format;
+	const u8 *vsd = NULL;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	vsd = hdmi_edid_find_block(in_buf, DBC_START_OFFSET, 3, &db_len);
+
+	if (vsd == NULL || db_len < 9) {
+		DEV_DBG("%s: blk-id 3 not found or not long enough\n",
+			__func__);
+		return;
+	}
+
+	/* check if HDMI_Video_present flag is set or not */
+	if (!(vsd[8] & BIT(5))) {
+		DEV_DBG("%s: extended vfmts are not supported by the sink.\n",
+			__func__);
+		return;
+	}
+
+	offset = HDMI_VSDB_3D_EVF_DATA_OFFSET(vsd);
+
+	hdmi_vic_len = vsd[offset + 1] >> 5;
+	if (hdmi_vic_len) {
+		DEV_DBG("%s: EDID: EVFRMT @ 0x%x of block 3, len = %02x\n",
+			__func__, offset, hdmi_vic_len);
+
+		for (i = 0; i < hdmi_vic_len; i++) {
+			video_format = HDMI_VFRMT_END + vsd[offset + 2 + i];
+			hdmi_edid_add_sink_video_format(&edid_ctrl->sink_data,
+				video_format);
+		}
+	}
+} /* hdmi_edid_get_extended_video_formats */
+
+static void hdmi_edid_get_display_mode(struct hdmi_edid_ctrl *edid_ctrl,
+	const u8 *data_buf, u32 num_of_cea_blocks)
+{
+	u8 i = 0;
+	u32 video_format = HDMI_VFRMT_640x480p60_4_3;
+	u32 has480p = false;
+	u8 len;
+	const u8 *edid_blk0 = NULL;
+	const u8 *edid_blk1 = NULL;
+	const u8 *svd = NULL;
+	u32 has60hz_mode = false;
+	u32 has50hz_mode = false;
+	struct hdmi_edid_sink_data *sink_data = NULL;
+
+	if (!edid_ctrl || !data_buf) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	edid_blk0 = &data_buf[0x0];
+	edid_blk1 = &data_buf[0x80];
+	svd = num_of_cea_blocks ?
+		hdmi_edid_find_block(data_buf+0x80, DBC_START_OFFSET, 2,
+			&len) : NULL;
+
+	sink_data = &edid_ctrl->sink_data;
+
+	sink_data->num_of_elements = 0;
+	sink_data->disp_multi_3d_mode_list_cnt = 0;
+	if (svd != NULL) {
+		++svd;
+		for (i = 0; i < len; ++i, ++svd) {
+			/*
+			 * Subtract 1 because it is zero based in the driver,
+			 * while the Video identification code is 1 based in the
+			 * CEA_861D spec
+			 */
+			video_format = (*svd & 0x7F);
+			hdmi_edid_add_sink_video_format(sink_data,
+				video_format);
+			/* Make a note of the preferred video format */
+			if (i == 0)
+				sink_data->preferred_video_format =
+					video_format;
+
+			if (i < 16) {
+				sink_data->disp_multi_3d_mode_list[i]
+					= video_format;
+				sink_data->disp_multi_3d_mode_list_cnt++;
+			}
+
+			if (video_format <= HDMI_VFRMT_1920x1080p60_16_9 ||
+				video_format == HDMI_VFRMT_2880x480p60_4_3 ||
+				video_format == HDMI_VFRMT_2880x480p60_16_9)
+				has60hz_mode = true;
+
+			if ((video_format >= HDMI_VFRMT_720x576p50_4_3 &&
+				video_format <= HDMI_VFRMT_1920x1080p50_16_9) ||
+				video_format == HDMI_VFRMT_2880x576p50_4_3 ||
+				video_format == HDMI_VFRMT_2880x576p50_16_9 ||
+				video_format == HDMI_VFRMT_1920x1250i50_16_9)
+				has50hz_mode = true;
+
+			if (video_format == HDMI_VFRMT_640x480p60_4_3)
+				has480p = true;
+		}
+	} else if (!num_of_cea_blocks) {
+		/* Detailed timing descriptors */
+		u32 desc_offset = 0;
+		/*
+		 * * Maximum 4 timing descriptor in block 0 - No CEA
+		 *   extension in this case
+		 * * EDID_FIRST_TIMING_DESC[0x36] - 1st detailed timing
+		 *   descriptor
+		 * * EDID_DETAIL_TIMING_DESC_BLCK_SZ[0x12] - Each detailed
+		 *   timing descriptor has block size of 18
+		 */
+		while (4 > i && 0 != edid_blk0[0x36+desc_offset]) {
+			hdmi_edid_detail_desc(edid_blk0+0x36+desc_offset,
+				&video_format);
+
+			DEV_DBG("[%s:%d] Block-0 Adding vid fmt = [%s]\n",
+				__func__, __LINE__,
+				msm_hdmi_mode_2string(video_format));
+
+			hdmi_edid_add_sink_video_format(sink_data,
+				video_format);
+
+			if (video_format == HDMI_VFRMT_640x480p60_4_3)
+				has480p = true;
+
+			/* Make a note of the preferred video format */
+			if (i == 0) {
+				sink_data->preferred_video_format =
+					video_format;
+			}
+			desc_offset += 0x12;
+			++i;
+		}
+	} else if (1 == num_of_cea_blocks) {
+		u32 desc_offset = 0;
+
+		/*
+		 * Read from both block 0 and block 1
+		 * Read EDID block[0] as above
+		 */
+		while (4 > i && 0 != edid_blk0[0x36+desc_offset]) {
+			hdmi_edid_detail_desc(edid_blk0+0x36+desc_offset,
+				&video_format);
+
+			DEV_DBG("[%s:%d] Block-0 Adding vid fmt = [%s]\n",
+				__func__, __LINE__,
+				msm_hdmi_mode_2string(video_format));
+
+			hdmi_edid_add_sink_video_format(sink_data,
+				video_format);
+
+			if (video_format == HDMI_VFRMT_640x480p60_4_3)
+				has480p = true;
+
+			/* Make a note of the preferred video format */
+			if (i == 0) {
+				sink_data->preferred_video_format =
+					video_format;
+			}
+			desc_offset += 0x12;
+			++i;
+		}
+
+		/*
+		 * * Parse block 1 - CEA extension byte offset of first
+		 *   detailed timing generation - offset is relevant to
+		 *   the offset of block 1
+		 * * EDID_CEA_EXTENSION_FIRST_DESC[0x82]: Offset to CEA
+		 *   extension first timing desc - indicate the offset of
+		 *   the first detailed timing descriptor
+		 * * EDID_BLOCK_SIZE = 0x80  Each page size in the EDID ROM
+		 */
+		desc_offset = edid_blk1[0x02];
+		while (0 != edid_blk1[desc_offset]) {
+			hdmi_edid_detail_desc(edid_blk1+desc_offset,
+				&video_format);
+
+			DEV_DBG("[%s:%d] Block-1 Adding vid fmt = [%s]\n",
+				__func__, __LINE__,
+				msm_hdmi_mode_2string(video_format));
+
+			hdmi_edid_add_sink_video_format(sink_data,
+				video_format);
+			if (video_format == HDMI_VFRMT_640x480p60_4_3)
+				has480p = true;
+
+			/* Make a note of the preferred video format */
+			if (i == 0) {
+				sink_data->preferred_video_format =
+					video_format;
+			}
+			desc_offset += 0x12;
+			++i;
+		}
+	}
+
+	hdmi_edid_get_extended_video_formats(edid_ctrl, data_buf+0x80);
+
+	/* mandaroty 3d format */
+	if (edid_ctrl->present_3d) {
+		if (has60hz_mode) {
+			hdmi_edid_add_sink_3d_format(sink_data,
+				HDMI_VFRMT_1920x1080p24_16_9,
+				FRAME_PACKING | TOP_AND_BOTTOM);
+			hdmi_edid_add_sink_3d_format(sink_data,
+				HDMI_VFRMT_1280x720p60_16_9,
+				FRAME_PACKING | TOP_AND_BOTTOM);
+			hdmi_edid_add_sink_3d_format(sink_data,
+				HDMI_VFRMT_1920x1080i60_16_9,
+				SIDE_BY_SIDE_HALF);
+		}
+
+		if (has50hz_mode) {
+			hdmi_edid_add_sink_3d_format(sink_data,
+				HDMI_VFRMT_1920x1080p24_16_9,
+				FRAME_PACKING | TOP_AND_BOTTOM);
+			hdmi_edid_add_sink_3d_format(sink_data,
+				HDMI_VFRMT_1280x720p50_16_9,
+				FRAME_PACKING | TOP_AND_BOTTOM);
+			hdmi_edid_add_sink_3d_format(sink_data,
+				HDMI_VFRMT_1920x1080i50_16_9,
+				SIDE_BY_SIDE_HALF);
+		}
+
+		/* 3d format described in Vendor Specific Data */
+		hdmi_edid_get_display_vsd_3d_mode(data_buf, sink_data,
+			num_of_cea_blocks);
+	}
+
+	/*
+	 * Need to add default 640 by 480 timings, in case not described
+	 * in the EDID structure.
+	 * All DTV sink devices should support this mode
+	 */
+	if (!has480p)
+		hdmi_edid_add_sink_video_format(sink_data,
+			HDMI_VFRMT_640x480p60_4_3);
+} /* hdmi_edid_get_display_mode */
+
+int hdmi_edid_read(void *input)
+{
+	/* EDID_BLOCK_SIZE[0x80] Each page size in the EDID ROM */
+	u8 edid_buf[0x80 * 4];
+	u32 cea_extension_ver = 0;
+	u32 num_of_cea_blocks = 0;
+	u32 ieee_reg_id = 0;
+	u32 i = 1;
+	int status = 0;
+	char vendor_id[5];
+	struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	edid_ctrl->pt_scan_info = 0;
+	edid_ctrl->it_scan_info = 0;
+	edid_ctrl->ce_scan_info = 0;
+	edid_ctrl->present_3d = 0;
+	memset(&edid_ctrl->sink_data, 0, sizeof(edid_ctrl->sink_data));
+	memset(edid_buf, 0, sizeof(edid_buf));
+	memset(edid_ctrl->audio_data_block, 0,
+		sizeof(edid_ctrl->audio_data_block));
+	memset(edid_ctrl->spkr_alloc_data_block, 0,
+		sizeof(edid_ctrl->spkr_alloc_data_block));
+	edid_ctrl->adb_size = 0;
+	edid_ctrl->sadb_size = 0;
+
+	status = hdmi_edid_read_block(edid_ctrl, 0, edid_buf);
+	if (status || !hdmi_edid_check_header(edid_buf)) {
+		if (!status)
+			status = -EPROTO;
+		DEV_ERR("%s: blk0 fail:%d[%02x%02x%02x%02x%02x%02x%02x%02x]\n",
+			__func__, status,
+			edid_buf[0], edid_buf[1], edid_buf[2], edid_buf[3],
+			edid_buf[4], edid_buf[5], edid_buf[6], edid_buf[7]);
+		goto error;
+	}
+	hdmi_edid_extract_vendor_id(edid_buf, vendor_id);
+
+	/* EDID_CEA_EXTENSION_FLAG[0x7E] - CEC extension byte */
+	num_of_cea_blocks = edid_buf[0x7E];
+	DEV_DBG("%s: No. of CEA blocks is  [%u]\n", __func__,
+		num_of_cea_blocks);
+	/* Find out any CEA extension blocks following block 0 */
+	switch (num_of_cea_blocks) {
+	case 0: /* No CEA extension */
+		edid_ctrl->sink_mode = false;
+		DEV_DBG("HDMI DVI mode: %s\n",
+			edid_ctrl->sink_mode ? "no" : "yes");
+		break;
+	case 1: /* Read block 1 */
+		status = hdmi_edid_read_block(edid_ctrl, 1, &edid_buf[0x80]);
+		if (status) {
+			DEV_ERR("%s: ddc read block(1) failed: %d\n", __func__,
+				status);
+			goto error;
+		}
+		if (edid_buf[0x80] != 2)
+			num_of_cea_blocks = 0;
+		if (num_of_cea_blocks) {
+			ieee_reg_id =
+				hdmi_edid_extract_ieee_reg_id(edid_ctrl,
+					edid_buf+0x80);
+			if (ieee_reg_id == 0x0c03)
+				edid_ctrl->sink_mode = true;
+			else
+				edid_ctrl->sink_mode = false;
+
+			hdmi_edid_extract_latency_fields(edid_ctrl,
+				edid_buf+0x80);
+			hdmi_edid_extract_speaker_allocation_data(
+				edid_ctrl, edid_buf+0x80);
+			hdmi_edid_extract_audio_data_blocks(edid_ctrl,
+				edid_buf+0x80);
+			hdmi_edid_extract_3d_present(edid_ctrl,
+				edid_buf+0x80);
+			hdmi_edid_extract_extended_data_blocks(edid_ctrl,
+				edid_buf+0x80);
+		}
+		break;
+	case 2:
+	case 3:
+	case 4:
+		for (i = 1; i <= num_of_cea_blocks; i++) {
+			if (!(i % 2)) {
+				status = hdmi_edid_read_block(
+					edid_ctrl, i, edid_buf+0x00);
+				if (status) {
+					DEV_ERR("%s: read blk(%d) failed:%d\n",
+						__func__, i, status);
+					goto error;
+				}
+			} else {
+				status = hdmi_edid_read_block(
+					edid_ctrl, i, edid_buf+0x80);
+				if (status) {
+					DEV_ERR("%s: read blk(%d) failed:%d\n",
+						__func__, i, status);
+					goto error;
+				}
+			}
+		}
+		break;
+	default:
+		DEV_ERR("%s: ddc read failed, not supported multi-blocks: %d\n",
+			__func__, num_of_cea_blocks);
+		status = -EPROTO;
+		goto error;
+	}
+
+	if (num_of_cea_blocks) {
+		/* EDID_CEA_EXTENSION_VERSION[0x81]: Offset to CEA extension
+		 * version number - v1,v2,v3 (v1 is seldom, v2 is obsolete,
+		 * v3 most common) */
+		cea_extension_ver = edid_buf[0x81];
+	}
+
+	/* EDID_VERSION[0x12] - EDID Version */
+	/* EDID_REVISION[0x13] - EDID Revision */
+	DEV_INFO("%s: V=%d.%d #CEABlks=%d[V%d] ID=%s IEEE=%04x Ext=0x%02x\n",
+		__func__, edid_buf[0x12], edid_buf[0x13],
+		num_of_cea_blocks, cea_extension_ver, vendor_id, ieee_reg_id,
+		edid_buf[0x80]);
+
+	hdmi_edid_get_display_mode(edid_ctrl, edid_buf, num_of_cea_blocks);
+
+	return 0;
+
+error:
+	edid_ctrl->sink_data.num_of_elements = 1;
+	edid_ctrl->sink_data.disp_mode_list[0] = edid_ctrl->video_resolution;
+
+	return status;
+} /* hdmi_edid_read */
+
+/*
+ * If the sink specified support for both underscan/overscan then, by default,
+ * set the underscan bit. Only checking underscan support for preferred
+ * format and cea formats.
+ */
+u8 hdmi_edid_get_sink_scaninfo(void *input, u32 resolution)
+{
+	u8 scaninfo = 0;
+	int use_ce_scan_info = true;
+	struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		goto end;
+	}
+
+	if (resolution == edid_ctrl->sink_data.preferred_video_format) {
+		use_ce_scan_info = false;
+		switch (edid_ctrl->pt_scan_info) {
+		case 0:
+			/*
+			 * Need to use the info specified for the corresponding
+			 * IT or CE format
+			 */
+			DEV_DBG("%s: No underscan info for preferred V fmt\n",
+				__func__);
+			use_ce_scan_info = true;
+			break;
+		case 3:
+			DEV_DBG("%s: Set underscan bit for preferred V fmt\n",
+				__func__);
+			scaninfo = BIT(1);
+			break;
+		default:
+			DEV_DBG("%s: Underscan not set for preferred V fmt\n",
+				__func__);
+			break;
+		}
+	}
+
+	if (use_ce_scan_info) {
+		if (3 == edid_ctrl->ce_scan_info) {
+			DEV_DBG("%s: Setting underscan bit for CE video fmt\n",
+				__func__);
+			scaninfo |= BIT(1);
+		} else {
+			DEV_DBG("%s: Not setting underscan bit for CE V fmt\n",
+				__func__);
+		}
+	}
+
+end:
+	return scaninfo;
+} /* hdmi_edid_get_sink_scaninfo */
+
+u32 hdmi_edid_get_sink_mode(void *input)
+{
+	struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return 0;
+	}
+
+	return edid_ctrl->sink_mode;
+} /* hdmi_edid_get_sink_mode */
+
+int hdmi_edid_get_audio_blk(void *input, struct msm_hdmi_audio_edid_blk *blk)
+{
+	struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+
+	if (!edid_ctrl || !blk) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	blk->audio_data_blk = edid_ctrl->audio_data_block;
+	blk->audio_data_blk_size = edid_ctrl->adb_size;
+
+	blk->spk_alloc_data_blk = edid_ctrl->spkr_alloc_data_block;
+	blk->spk_alloc_data_blk_size = edid_ctrl->sadb_size;
+
+	return 0;
+} /* hdmi_edid_get_audio_blk */
+
+void hdmi_edid_set_video_resolution(void *input, u32 resolution)
+{
+	struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+
+	if (!edid_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	edid_ctrl->video_resolution = resolution;
+
+	if (1 == edid_ctrl->sink_data.num_of_elements)
+		edid_ctrl->sink_data.disp_mode_list[0] = resolution;
+} /* hdmi_edid_set_video_resolution */
+
+void hdmi_edid_deinit(void *input)
+{
+	struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+
+	if (edid_ctrl) {
+		sysfs_remove_group(edid_ctrl->init_data.sysfs_kobj,
+			&hdmi_edid_fs_attrs_group);
+		kfree(edid_ctrl);
+	}
+} /* hdmi_edid_deinit */
+
+void *hdmi_edid_init(struct hdmi_edid_init_data *init_data)
+{
+	struct hdmi_edid_ctrl *edid_ctrl = NULL;
+
+	if (!init_data || !init_data->io ||
+		!init_data->mutex || !init_data->sysfs_kobj ||
+		!init_data->ddc_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		goto error;
+	}
+
+	edid_ctrl = kzalloc(sizeof(*edid_ctrl), GFP_KERNEL);
+	if (!edid_ctrl) {
+		DEV_ERR("%s: Out of memory\n", __func__);
+		goto error;
+	}
+
+	edid_ctrl->init_data = *init_data;
+	edid_ctrl->sink_mode = false;
+
+	if (sysfs_create_group(init_data->sysfs_kobj,
+		&hdmi_edid_fs_attrs_group)) {
+		DEV_ERR("%s: EDID sysfs create failed\n", __func__);
+		kfree(edid_ctrl);
+		edid_ctrl = NULL;
+	}
+
+error:
+	return (void *)edid_ctrl;
+} /* hdmi_edid_deinit */
diff --git a/drivers/video/msm/mdss/mdss_hdmi_edid.h b/drivers/video/msm/mdss/mdss_hdmi_edid.h
new file mode 100644
index 0000000..e8d1b7c
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_hdmi_edid.h
@@ -0,0 +1,36 @@
+/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __HDMI_EDID_H__
+#define __HDMI_EDID_H__
+
+#include <mach/msm_hdmi_audio_codec.h>
+#include "mdss_hdmi_util.h"
+
+struct hdmi_edid_init_data {
+	struct dss_io_data *io;
+	struct mutex *mutex;
+	struct kobject *sysfs_kobj;
+
+	struct hdmi_tx_ddc_ctrl *ddc_ctrl;
+};
+
+int hdmi_edid_read(void *edid_ctrl);
+u8 hdmi_edid_get_sink_scaninfo(void *edid_ctrl, u32 resolution);
+u32 hdmi_edid_get_sink_mode(void *edid_ctrl);
+int hdmi_edid_get_audio_blk(void *edid_ctrl,
+	struct msm_hdmi_audio_edid_blk *blk);
+void hdmi_edid_set_video_resolution(void *edid_ctrl, u32 resolution);
+void hdmi_edid_deinit(void *edid_ctrl);
+void *hdmi_edid_init(struct hdmi_edid_init_data *init_data);
+
+#endif /* __HDMI_EDID_H__ */
diff --git a/drivers/video/msm/mdss/mdss_hdmi_hdcp.c b/drivers/video/msm/mdss/mdss_hdmi_hdcp.c
new file mode 100644
index 0000000..2e20787
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_hdmi_hdcp.c
@@ -0,0 +1,1153 @@
+/* Copyright (c) 2010-2012 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "mdss_hdmi_hdcp.h"
+
+#define HDCP_STATE_NAME (hdcp_state_name(hdcp_ctrl->hdcp_state))
+
+/* HDCP Keys state based on HDMI_HDCP_LINK0_STATUS:KEYS_STATE */
+#define HDCP_KEYS_STATE_NO_KEYS		0
+#define HDCP_KEYS_STATE_NOT_CHECKED	1
+#define HDCP_KEYS_STATE_CHECKING	2
+#define HDCP_KEYS_STATE_VALID		3
+#define HDCP_KEYS_STATE_AKSV_NOT_VALID	4
+#define HDCP_KEYS_STATE_CHKSUM_MISMATCH	5
+#define HDCP_KEYS_STATE_PROD_AKSV	6
+#define HDCP_KEYS_STATE_RESERVED	7
+
+struct hdmi_hdcp_ctrl {
+	enum hdmi_hdcp_state hdcp_state;
+	struct delayed_work hdcp_auth_work;
+	struct work_struct hdcp_int_work;
+	struct completion r0_checked;
+	struct hdmi_hdcp_init_data init_data;
+};
+
+const char *hdcp_state_name(enum hdmi_hdcp_state hdcp_state)
+{
+	switch (hdcp_state) {
+	case HDCP_STATE_INACTIVE:	return "HDCP_STATE_INACTIVE";
+	case HDCP_STATE_AUTHENTICATING:	return "HDCP_STATE_AUTHENTICATING";
+	case HDCP_STATE_AUTHENTICATED:	return "HDCP_STATE_AUTHENTICATED";
+	case HDCP_STATE_AUTH_FAIL:	return "HDCP_STATE_AUTH_FAIL";
+	default:			return "???";
+	}
+} /* hdcp_state_name */
+
+static int hdmi_hdcp_count_one(u8 *array, u8 len)
+{
+	int i, j, count = 0;
+	for (i = 0; i < len; i++)
+		for (j = 0; j < 8; j++)
+			count += (((array[i] >> j) & 0x1) ? 1 : 0);
+	return count;
+} /* hdmi_hdcp_count_one */
+
+static void reset_hdcp_ddc_failures(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int hdcp_ddc_ctrl1_reg;
+	int hdcp_ddc_status;
+	int failure;
+	int nack0;
+	struct dss_io_data *io;
+
+	if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	io = hdcp_ctrl->init_data.core_io;
+
+	/* Check for any DDC transfer failures */
+	hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
+	failure = (hdcp_ddc_status >> 16) & 0x1;
+	nack0 = (hdcp_ddc_status >> 14) & 0x1;
+	DEV_DBG("%s: %s: On Entry: HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d\n",
+		__func__, HDCP_STATE_NAME, hdcp_ddc_status, failure, nack0);
+
+	if (failure == 0x1) {
+		/*
+		 * Indicates that the last HDCP HW DDC transfer failed.
+		 * This occurs when a transfer is attempted with HDCP DDC
+		 * disabled (HDCP_DDC_DISABLE=1) or the number of retries
+		 * matches HDCP_DDC_RETRY_CNT.
+		 * Failure occured,  let's clear it.
+		 */
+		DEV_DBG("%s: %s: DDC failure detected.HDCP_DDC_STATUS=0x%08x\n",
+			 __func__, HDCP_STATE_NAME, hdcp_ddc_status);
+
+		/* First, Disable DDC */
+		DSS_REG_W(io, HDMI_HDCP_DDC_CTRL_0, BIT(0));
+
+		/* ACK the Failure to Clear it */
+		hdcp_ddc_ctrl1_reg = DSS_REG_R(io, HDMI_HDCP_DDC_CTRL_1);
+		DSS_REG_W(io, HDMI_HDCP_DDC_CTRL_1,
+			hdcp_ddc_ctrl1_reg | BIT(0));
+
+		/* Check if the FAILURE got Cleared */
+		hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
+		hdcp_ddc_status = (hdcp_ddc_status >> 16) & BIT(0);
+		if (hdcp_ddc_status == 0x0)
+			DEV_DBG("%s: %s: HDCP DDC Failure cleared\n", __func__,
+				HDCP_STATE_NAME);
+		else
+			DEV_WARN("%s: %s: Unable to clear HDCP DDC Failure",
+				__func__, HDCP_STATE_NAME);
+
+		/* Re-Enable HDCP DDC */
+		DSS_REG_W(io, HDMI_HDCP_DDC_CTRL_0, 0);
+	}
+
+	if (nack0 == 0x1) {
+		DEV_DBG("%s: %s: Before: HDMI_DDC_SW_STATUS=0x%08x\n", __func__,
+			HDCP_STATE_NAME, DSS_REG_R(io, HDMI_DDC_SW_STATUS));
+		/* Reset HDMI DDC software status */
+		DSS_REG_W_ND(io, HDMI_DDC_CTRL,
+			DSS_REG_R(io, HDMI_DDC_CTRL) | BIT(3));
+		msleep(20);
+		DSS_REG_W_ND(io, HDMI_DDC_CTRL,
+			DSS_REG_R(io, HDMI_DDC_CTRL) & ~(BIT(3)));
+
+		/* Reset HDMI DDC Controller */
+		DSS_REG_W_ND(io, HDMI_DDC_CTRL,
+			DSS_REG_R(io, HDMI_DDC_CTRL) | BIT(1));
+		msleep(20);
+		DSS_REG_W_ND(io, HDMI_DDC_CTRL,
+			DSS_REG_R(io, HDMI_DDC_CTRL) & ~BIT(1));
+		DEV_DBG("%s: %s: After: HDMI_DDC_SW_STATUS=0x%08x\n", __func__,
+			HDCP_STATE_NAME, DSS_REG_R(io, HDMI_DDC_SW_STATUS));
+	}
+
+	hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
+
+	failure = (hdcp_ddc_status >> 16) & BIT(0);
+	nack0 = (hdcp_ddc_status >> 14) & BIT(0);
+	DEV_DBG("%s: %s: On Exit: HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d\n",
+		__func__, HDCP_STATE_NAME, hdcp_ddc_status, failure, nack0);
+} /* reset_hdcp_ddc_failures */
+
+static int hdmi_hdcp_authentication_part1(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc;
+	u32 qfprom_aksv_lsb, qfprom_aksv_msb;
+	u32 link0_aksv_0, link0_aksv_1;
+	u32 link0_bksv_0, link0_bksv_1;
+	u32 link0_an_0, link0_an_1;
+	u32 timeout_count;
+	bool is_match;
+	bool stale_an = false;
+	struct dss_io_data *io;
+	u8 aksv[5], bksv[5];
+	u8 an[8];
+	u8 bcaps;
+	struct hdmi_tx_ddc_data ddc_data;
+	u32 link0_status, an_ready, keys_state;
+	u8 buf[0xFF];
+
+	if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io ||
+		!hdcp_ctrl->init_data.qfprom_io) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (HDCP_STATE_AUTHENTICATING != hdcp_ctrl->hdcp_state) {
+		DEV_DBG("%s: %s: invalid state. returning\n", __func__,
+			HDCP_STATE_NAME);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	io = hdcp_ctrl->init_data.core_io;
+
+	/* Fetch aksv from QFPROM, this info should be public. */
+	qfprom_aksv_lsb = DSS_REG_R(hdcp_ctrl->init_data.qfprom_io,
+		HDCP_KSV_LSB);
+	qfprom_aksv_msb = DSS_REG_R(hdcp_ctrl->init_data.qfprom_io,
+		HDCP_KSV_MSB);
+
+	aksv[0] =  qfprom_aksv_lsb        & 0xFF;
+	aksv[1] = (qfprom_aksv_lsb >> 8)  & 0xFF;
+	aksv[2] = (qfprom_aksv_lsb >> 16) & 0xFF;
+	aksv[3] = (qfprom_aksv_lsb >> 24) & 0xFF;
+	aksv[4] =  qfprom_aksv_msb        & 0xFF;
+
+	/* check there are 20 ones in AKSV */
+	if (hdmi_hdcp_count_one(aksv, 5) != 20) {
+		DEV_ERR("%s: %s: AKSV QFPROM doesn't have 20 1's, 20 0's\n",
+			__func__, HDCP_STATE_NAME);
+		DEV_ERR("%s: %s: QFPROM AKSV chk failed (AKSV=%02x%08x)\n",
+			__func__, HDCP_STATE_NAME, qfprom_aksv_msb,
+			qfprom_aksv_lsb);
+		rc = -EINVAL;
+		goto error;
+	}
+	DEV_DBG("%s: %s: AKSV=%02x%08x\n", __func__, HDCP_STATE_NAME,
+		qfprom_aksv_msb, qfprom_aksv_lsb);
+
+	/*
+	 * Write AKSV read from QFPROM to the HDCP registers.
+	 * This step is needed for HDCP authentication and must be
+	 * written before enabling HDCP.
+	 */
+	DSS_REG_W(io, HDMI_HDCP_SW_LOWER_AKSV, qfprom_aksv_lsb);
+	DSS_REG_W(io, HDMI_HDCP_SW_UPPER_AKSV, qfprom_aksv_msb);
+
+	/* Check to see if link0_Status has stale values for An ready bit */
+	link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
+	DEV_DBG("%s: %s: Before enabling cipher Link0_status=0x%08x\n",
+		__func__, HDCP_STATE_NAME, link0_status);
+	if (link0_status & (BIT(8) | BIT(9))) {
+		DEV_DBG("%s: %s: An ready even before enabling HDCP\n",
+		__func__, HDCP_STATE_NAME);
+		stale_an = true;
+	}
+
+	/*
+	 * Read BCAPS
+	 * We need to first try to read an HDCP register on the sink to see if
+	 * the sink is ready for HDCP authentication
+	 */
+	memset(&ddc_data, 0, sizeof(ddc_data));
+	ddc_data.dev_addr = 0x74;
+	ddc_data.offset = 0x40;
+	ddc_data.data_buf = &bcaps;
+	ddc_data.data_len = 1;
+	ddc_data.request_len = 1;
+	ddc_data.retry = 5;
+	ddc_data.what = "Bcaps";
+	ddc_data.no_align = true;
+	rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl, &ddc_data);
+	if (rc) {
+		DEV_ERR("%s: %s: BCAPS read failed\n", __func__,
+			HDCP_STATE_NAME);
+		goto error;
+	}
+	DEV_DBG("%s: %s: BCAPS=%02x\n", __func__, HDCP_STATE_NAME, bcaps);
+
+	/*
+	 * HDCP setup prior to enabling HDCP_CTRL.
+	 * Setup seed values for random number An.
+	 */
+	DSS_REG_W(io, HDMI_HDCP_ENTROPY_CTRL0, 0xB1FFB0FF);
+	DSS_REG_W(io, HDMI_HDCP_ENTROPY_CTRL1, 0xF00DFACE);
+
+	/* Disable the RngCipher state */
+	DSS_REG_W(io, HDMI_HDCP_DEBUG_CTRL,
+		DSS_REG_R(io, HDMI_HDCP_DEBUG_CTRL) & ~(BIT(2)));
+	DEV_DBG("%s: %s: HDCP_DEBUG_CTRL=0x%08x\n", __func__, HDCP_STATE_NAME,
+		DSS_REG_R(io, HDMI_HDCP_DEBUG_CTRL));
+
+	/* Ensure that all register writes are completed before
+	 * enabling HDCP cipher
+	 */
+	wmb();
+
+	/*
+	 * Enable HDCP
+	 * This needs to be done as early as possible in order for the
+	 * hardware to make An available to read
+	 */
+	DSS_REG_W(io, HDMI_HDCP_CTRL, BIT(0));
+
+	/* Clear any DDC failures from previous tries */
+	reset_hdcp_ddc_failures(hdcp_ctrl);
+
+	/* Write BCAPS to the hardware */
+	DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA12, bcaps);
+
+	/*
+	 * If we had stale values for the An ready bit, it should most
+	 * likely be cleared now after enabling HDCP cipher
+	 */
+	link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
+	DEV_DBG("%s: %s: After enabling HDCP Link0_Status=0x%08x\n",
+		__func__, HDCP_STATE_NAME, link0_status);
+	if (!(link0_status & (BIT(8) | BIT(9)))) {
+		DEV_DBG("%s: %s: An not ready after enabling HDCP\n",
+			__func__, HDCP_STATE_NAME);
+		stale_an = false;
+	}
+
+	/* Wait for HDCP keys to be checked and validated */
+	timeout_count = 100;
+	keys_state = (link0_status >> 28) & 0x7;
+	while ((keys_state != HDCP_KEYS_STATE_VALID) &&
+		timeout_count--) {
+		link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
+		keys_state = (link0_status >> 28) & 0x7;
+		DEV_DBG("%s: %s: Keys not ready(%d). s=%d\n, l0=%0x08x",
+			__func__, HDCP_STATE_NAME, timeout_count,
+			keys_state, link0_status);
+		msleep(20);
+	}
+
+	if (!timeout_count) {
+		DEV_ERR("%s: %s: Invalid Keys State: %d\n", __func__,
+			HDCP_STATE_NAME, keys_state);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	/*
+	 * 1.1_Features turned off by default.
+	 * No need to write AInfo since 1.1_Features is disabled.
+	 */
+	DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA4, 0);
+
+	/* Wait for An0 and An1 bit to be ready */
+	timeout_count = 100;
+	do {
+		link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
+		an_ready = (link0_status & BIT(8)) && (link0_status & BIT(9));
+		if (!an_ready) {
+			DEV_DBG("%s: %s: An not ready(%d). l0_status=0x%08x\n",
+				__func__, HDCP_STATE_NAME, timeout_count,
+				link0_status);
+			msleep(20);
+		}
+	} while (!an_ready && timeout_count--);
+
+	if (!timeout_count) {
+		rc = -ETIMEDOUT;
+		DEV_ERR("%s: %s: timedout, An0=%ld, An1=%ld\n", __func__,
+			HDCP_STATE_NAME, (link0_status & BIT(8)) >> 8,
+			(link0_status & BIT(9)) >> 9);
+		goto error;
+	}
+
+	/*
+	 * In cases where An_ready bits had stale values, it would be
+	 * better to delay reading of An to avoid any potential of this
+	 * read being blocked
+	 */
+	if (stale_an) {
+		msleep(200);
+		stale_an = false;
+	}
+
+	/* Read An0 and An1 */
+	link0_an_0 = DSS_REG_R(io, HDMI_HDCP_RCVPORT_DATA5);
+	link0_an_1 = DSS_REG_R(io, HDMI_HDCP_RCVPORT_DATA6);
+
+	/* Read AKSV */
+	link0_aksv_0 = DSS_REG_R(io, HDMI_HDCP_RCVPORT_DATA3);
+	link0_aksv_1 = DSS_REG_R(io, HDMI_HDCP_RCVPORT_DATA4);
+
+	/* Copy An and AKSV to byte arrays for transmission */
+	aksv[0] =  link0_aksv_0        & 0xFF;
+	aksv[1] = (link0_aksv_0 >> 8)  & 0xFF;
+	aksv[2] = (link0_aksv_0 >> 16) & 0xFF;
+	aksv[3] = (link0_aksv_0 >> 24) & 0xFF;
+	aksv[4] =  link0_aksv_1        & 0xFF;
+
+	an[0] =  link0_an_0        & 0xFF;
+	an[1] = (link0_an_0 >> 8)  & 0xFF;
+	an[2] = (link0_an_0 >> 16) & 0xFF;
+	an[3] = (link0_an_0 >> 24) & 0xFF;
+	an[4] =  link0_an_1        & 0xFF;
+	an[5] = (link0_an_1 >> 8)  & 0xFF;
+	an[6] = (link0_an_1 >> 16) & 0xFF;
+	an[7] = (link0_an_1 >> 24) & 0xFF;
+
+	/* Write An to offset 0x18 */
+	memset(&ddc_data, 0, sizeof(ddc_data));
+	ddc_data.dev_addr = 0x74;
+	ddc_data.offset = 0x18;
+	ddc_data.data_buf = an;
+	ddc_data.data_len = 8;
+	ddc_data.what = "An";
+	rc = hdmi_ddc_write(hdcp_ctrl->init_data.ddc_ctrl, &ddc_data);
+	if (rc) {
+		DEV_ERR("%s: %s: An write failed\n", __func__, HDCP_STATE_NAME);
+		goto error;
+	}
+
+	/* Write AKSV to offset 0x10 */
+	memset(&ddc_data, 0, sizeof(ddc_data));
+	ddc_data.dev_addr = 0x74;
+	ddc_data.offset = 0x10;
+	ddc_data.data_buf = aksv;
+	ddc_data.data_len = 5;
+	ddc_data.what = "Aksv";
+	rc = hdmi_ddc_write(hdcp_ctrl->init_data.ddc_ctrl, &ddc_data);
+	if (rc) {
+		DEV_ERR("%s: %s: AKSV write failed\n", __func__,
+			HDCP_STATE_NAME);
+		goto error;
+	}
+	DEV_DBG("%s: %s: Link0-AKSV=%02x%08x\n", __func__,
+		HDCP_STATE_NAME, link0_aksv_1 & 0xFF, link0_aksv_0);
+
+	/* Read BKSV at offset 0x00 */
+	memset(&ddc_data, 0, sizeof(ddc_data));
+	ddc_data.dev_addr = 0x74;
+	ddc_data.offset = 0x00;
+	ddc_data.data_buf = bksv;
+	ddc_data.data_len = 5;
+	ddc_data.request_len = 5;
+	ddc_data.retry = 5;
+	ddc_data.what = "Bksv";
+	ddc_data.no_align = true;
+	rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl, &ddc_data);
+	if (rc) {
+		DEV_ERR("%s: %s: BKSV read failed\n", __func__,
+			HDCP_STATE_NAME);
+		goto error;
+	}
+
+	/* check there are 20 ones in BKSV */
+	if (hdmi_hdcp_count_one(bksv, 5) != 20) {
+		DEV_ERR("%s: %s: BKSV doesn't have 20 1's and 20 0's\n",
+			__func__, HDCP_STATE_NAME);
+		DEV_ERR("%s: %s: BKSV chk fail. BKSV=%02x%02x%02x%02x%02x\n",
+			__func__, HDCP_STATE_NAME, bksv[4], bksv[3], bksv[2],
+			bksv[1], bksv[0]);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	link0_bksv_0 = bksv[3];
+	link0_bksv_0 = (link0_bksv_0 << 8) | bksv[2];
+	link0_bksv_0 = (link0_bksv_0 << 8) | bksv[1];
+	link0_bksv_0 = (link0_bksv_0 << 8) | bksv[0];
+	link0_bksv_1 = bksv[4];
+	DEV_DBG("%s: %s: BKSV=%02x%08x\n", __func__, HDCP_STATE_NAME,
+		link0_bksv_1, link0_bksv_0);
+
+	/* Write BKSV read from sink to HDCP registers */
+	DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA0, link0_bksv_0);
+	DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA1, link0_bksv_1);
+
+	/* Enable HDCP interrupts and ack/clear any stale interrupts */
+	DSS_REG_W(io, HDMI_HDCP_INT_CTRL, 0xE6);
+
+	/*
+	 * HDCP Compliace Test case 1A-01:
+	 * Wait here at least 100ms before reading R0'
+	 */
+	msleep(125);
+
+	/* Read R0' at offset 0x08 */
+	memset(buf, 0, sizeof(buf));
+	memset(&ddc_data, 0, sizeof(ddc_data));
+	ddc_data.dev_addr = 0x74;
+	ddc_data.offset = 0x08;
+	ddc_data.data_buf = buf;
+	ddc_data.data_len = 2;
+	ddc_data.request_len = 2;
+	ddc_data.retry = 5;
+	ddc_data.what = "R0'";
+	ddc_data.no_align = true;
+	rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl, &ddc_data);
+	if (rc) {
+		DEV_ERR("%s: %s: R0' read failed\n", __func__, HDCP_STATE_NAME);
+		goto error;
+	}
+	DEV_DBG("%s: %s: R0'=%02x%02x\n", __func__, HDCP_STATE_NAME,
+		buf[1], buf[0]);
+
+	/* Write R0' to HDCP registers and check to see if it is a match */
+	INIT_COMPLETION(hdcp_ctrl->r0_checked);
+	DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA2_0, (((u32)buf[1]) << 8) | buf[0]);
+	timeout_count = wait_for_completion_interruptible_timeout(
+		&hdcp_ctrl->r0_checked, HZ*2);
+	link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
+	is_match = link0_status & BIT(12);
+	if (!is_match) {
+		DEV_DBG("%s: %s: Link0_Status=0x%08x\n", __func__,
+			HDCP_STATE_NAME, link0_status);
+		if (!timeout_count) {
+			DEV_ERR("%s: %s: Timeout. No R0 mtch. R0'=%02x%02x\n",
+				__func__, HDCP_STATE_NAME, buf[1], buf[0]);
+			rc = -ETIMEDOUT;
+			goto error;
+		} else {
+			DEV_ERR("%s: %s: R0 mismatch. R0'=%02x%02x\n", __func__,
+				HDCP_STATE_NAME, buf[1], buf[0]);
+			rc = -EINVAL;
+			goto error;
+		}
+	} else {
+		DEV_DBG("%s: %s: R0 matches\n", __func__, HDCP_STATE_NAME);
+	}
+
+error:
+	if (rc) {
+		DEV_ERR("%s: %s: Authentication Part I failed\n", __func__,
+			HDCP_STATE_NAME);
+	} else {
+		/* Enable HDCP Encryption */
+		DSS_REG_W(io, HDMI_HDCP_CTRL, BIT(0) | BIT(8));
+		DEV_INFO("%s: %s: Authentication Part I successful\n",
+			__func__, HDCP_STATE_NAME);
+	}
+	return rc;
+} /* hdmi_hdcp_authentication_part1 */
+
+#define READ_WRITE_V_H(off, name, reg) \
+do { \
+	ddc_data.offset = (off); \
+	memset(what, 0, sizeof(what)); \
+	snprintf(what, sizeof(what), (name)); \
+	rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl, &ddc_data); \
+	if (rc) { \
+		DEV_ERR("%s: %s: Read %s failed\n", __func__, HDCP_STATE_NAME, \
+			what); \
+		goto error; \
+	} \
+	DEV_DBG("%s: %s: %s: buf[0]=%x, buf[1]=%x, buf[2]=%x, buf[3]=%x\n", \
+			__func__, HDCP_STATE_NAME, what, buf[0], buf[1], \
+			buf[2], buf[3]); \
+	DSS_REG_W(io, (reg), \
+			(buf[3] << 24 | buf[2] << 16 | buf[1] << 8 | buf[0])); \
+} while (0);
+
+static int hdmi_hdcp_transfer_v_h(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	char what[20];
+	int rc = 0;
+	u8 buf[4];
+	struct hdmi_tx_ddc_data ddc_data;
+	struct dss_io_data *io;
+
+	if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	io = hdcp_ctrl->init_data.core_io;
+	memset(&ddc_data, 0, sizeof(ddc_data));
+	ddc_data.dev_addr = 0x74;
+	ddc_data.data_buf = buf;
+	ddc_data.data_len = 4;
+	ddc_data.request_len = 4;
+	ddc_data.retry = 5;
+	ddc_data.what = what;
+	ddc_data.no_align = true;
+
+	/* Read V'.HO 4 Byte at offset 0x20 */
+	READ_WRITE_V_H(0x20, "V' H0", HDMI_HDCP_RCVPORT_DATA7);
+
+	/* Read V'.H1 4 Byte at offset 0x24 */
+	READ_WRITE_V_H(0x24, "V' H1", HDMI_HDCP_RCVPORT_DATA8);
+
+	/* Read V'.H2 4 Byte at offset 0x28 */
+	READ_WRITE_V_H(0x28, "V' H2", HDMI_HDCP_RCVPORT_DATA9);
+
+	/* Read V'.H3 4 Byte at offset 0x2C */
+	READ_WRITE_V_H(0x2C, "V' H3", HDMI_HDCP_RCVPORT_DATA10);
+
+	/* Read V'.H4 4 Byte at offset 0x30 */
+	READ_WRITE_V_H(0x30, "V' H4", HDMI_HDCP_RCVPORT_DATA11);
+
+error:
+	return rc;
+}
+
+static int hdmi_hdcp_authentication_part2(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+	int rc, cnt, i;
+	struct hdmi_tx_ddc_data ddc_data;
+	u32 timeout_count, down_stream_devices;
+	u8 buf[0xFF];
+	u8 ksv_fifo[5 * 127];
+	u8 bcaps;
+	u16 bstatus, max_devs_exceeded, max_cascade_exceeded;
+	u32 link0_status;
+	u32 ksv_bytes;
+	struct dss_io_data *io;
+
+	if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (HDCP_STATE_AUTHENTICATING != hdcp_ctrl->hdcp_state) {
+		DEV_DBG("%s: %s: invalid state. returning\n", __func__,
+			HDCP_STATE_NAME);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	io = hdcp_ctrl->init_data.core_io;
+
+	memset(buf, 0, sizeof(buf));
+	memset(ksv_fifo, 0, sizeof(ksv_fifo));
+
+	/* Read BCAPS at offset 0x40 */
+	memset(&ddc_data, 0, sizeof(ddc_data));
+	ddc_data.dev_addr = 0x74;
+	ddc_data.offset = 0x40;
+	ddc_data.data_buf = &bcaps;
+	ddc_data.data_len = 1;
+	ddc_data.request_len = 1;
+	ddc_data.retry = 5;
+	ddc_data.what = "Bcaps";
+	ddc_data.no_align = false;
+	rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl, &ddc_data);
+	if (rc) {
+		DEV_ERR("%s: %s: BCAPS read failed\n", __func__,
+			HDCP_STATE_NAME);
+		goto error;
+	}
+	DEV_DBG("%s: %s: BCAPS=%02x (%s)\n", __func__, HDCP_STATE_NAME, bcaps,
+		(bcaps & BIT(6)) ? "repeater" : "no repeater");
+
+	/* if REPEATER (Bit 6), perform Part2 Authentication */
+	if (!(bcaps & BIT(6))) {
+		DEV_INFO("%s: %s: auth part II skipped, no repeater\n",
+			__func__, HDCP_STATE_NAME);
+		return 0;
+	}
+
+	/* Wait until READY bit is set in BCAPS */
+	timeout_count = 50;
+	while (!(bcaps && BIT(5)) && timeout_count) {
+		msleep(100);
+		timeout_count--;
+		/* Read BCAPS at offset 0x40 */
+		memset(&ddc_data, 0, sizeof(ddc_data));
+		ddc_data.dev_addr = 0x74;
+		ddc_data.offset = 0x40;
+		ddc_data.data_buf = &bcaps;
+		ddc_data.data_len = 1;
+		ddc_data.request_len = 1;
+		ddc_data.retry = 5;
+		ddc_data.what = "Bcaps";
+		ddc_data.no_align = false;
+		rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl, &ddc_data);
+		if (rc) {
+			DEV_ERR("%s: %s: BCAPS read failed\n", __func__,
+				HDCP_STATE_NAME);
+			goto error;
+		}
+	}
+
+	/* Read BSTATUS at offset 0x41 */
+	memset(&ddc_data, 0, sizeof(ddc_data));
+	ddc_data.dev_addr = 0x74;
+	ddc_data.offset = 0x41;
+	ddc_data.data_buf = buf;
+	ddc_data.data_len = 2;
+	ddc_data.request_len = 2;
+	ddc_data.retry = 5;
+	ddc_data.what = "Bstatuss";
+	ddc_data.no_align = false;
+	rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl, &ddc_data);
+	if (rc) {
+		DEV_ERR("%s: %s: BSTATUS read failed\n", __func__,
+			HDCP_STATE_NAME);
+		goto error;
+	}
+	bstatus = buf[1];
+	bstatus = (bstatus << 8) | buf[0];
+
+	/* Write BSTATUS and BCAPS to HDCP registers */
+	DSS_REG_W(io, HDMI_HDCP_RCVPORT_DATA12, bcaps | (bstatus << 8));
+
+	down_stream_devices = bstatus & 0x7F;
+	if (down_stream_devices == 0) {
+		/*
+		 * If no downstream devices are attached to the repeater
+		 * then part II fails.
+		 * todo: The other approach would be to continue PART II.
+		 */
+		DEV_ERR("%s: %s: No downstream devices\n", __func__,
+			HDCP_STATE_NAME);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	/*
+	 * HDCP Compliance 1B-05:
+	 * Check if no. of devices connected to repeater
+	 * exceed max_devices_connected from bit 7 of Bstatus.
+	 */
+	max_devs_exceeded = (bstatus & BIT(7)) >> 7;
+	if (max_devs_exceeded == 0x01) {
+		DEV_ERR("%s: %s: no. of devs connected exceeds max allowed",
+			__func__, HDCP_STATE_NAME);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	/*
+	 * HDCP Compliance 1B-06:
+	 * Check if no. of cascade connected to repeater
+	 * exceed max_cascade_connected from bit 11 of Bstatus.
+	 */
+	max_cascade_exceeded = (bstatus & BIT(11)) >> 11;
+	if (max_cascade_exceeded == 0x01) {
+		DEV_ERR("%s: %s: no. of cascade conn exceeds max allowed",
+			__func__, HDCP_STATE_NAME);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	/*
+	 * Read KSV FIFO over DDC
+	 * Key Slection vector FIFO Used to pull downstream KSVs
+	 * from HDCP Repeaters.
+	 * All bytes (DEVICE_COUNT * 5) must be read in a single,
+	 * auto incrementing access.
+	 * All bytes read as 0x00 for HDCP Receivers that are not
+	 * HDCP Repeaters (REPEATER == 0).
+	 */
+	ksv_bytes = 5 * down_stream_devices;
+	memset(&ddc_data, 0, sizeof(ddc_data));
+	ddc_data.dev_addr = 0x74;
+	ddc_data.offset = 0x43;
+	ddc_data.data_buf = ksv_fifo;
+	ddc_data.data_len = ksv_bytes;
+	ddc_data.request_len = ksv_bytes;
+	ddc_data.retry = 5;
+	ddc_data.what = "KSV FIFO";
+	ddc_data.no_align = true;
+	cnt = 0;
+	do {
+		rc = hdmi_ddc_read(hdcp_ctrl->init_data.ddc_ctrl, &ddc_data);
+		if (rc) {
+			DEV_ERR("%s: %s: KSV FIFO read failed\n", __func__,
+				HDCP_STATE_NAME);
+			/*
+			 * HDCP Compliace Test case 1B-01:
+			 * Wait here until all the ksv bytes have been
+			 * read from the KSV FIFO register.
+			 */
+			msleep(25);
+		} else {
+			break;
+		}
+		cnt++;
+	} while (cnt != 20);
+
+	if (cnt == 20)
+		goto error;
+
+	rc = hdmi_hdcp_transfer_v_h(hdcp_ctrl);
+	if (rc)
+		goto error;
+
+	/*
+	 * Write KSV FIFO to HDCP_SHA_DATA.
+	 * This is done 1 byte at time starting with the LSB.
+	 * On the very last byte write, the HDCP_SHA_DATA_DONE bit[0]
+	 */
+
+	/* First, reset SHA engine */
+	DSS_REG_W(io, HDMI_HDCP_SHA_CTRL, 1);
+	/* Next, enable SHA engine, SEL=DIGA_HDCP */
+	DSS_REG_W(io, HDMI_HDCP_SHA_CTRL, 0);
+
+	for (i = 0; i < ksv_bytes - 1; i++) {
+		/* Write KSV byte and do not set DONE bit[0] */
+		DSS_REG_W_ND(io, HDMI_HDCP_SHA_DATA, ksv_fifo[i] << 16);
+
+		/*
+		 * Once 64 bytes have been written, we need to poll for
+		 * HDCP_SHA_BLOCK_DONE before writing any further
+		 */
+		if (i && !((i + 1) % 64)) {
+			timeout_count = 100;
+			while (!(DSS_REG_R(io, HDMI_HDCP_SHA_STATUS) & BIT(0))
+				&& (--timeout_count)) {
+				DEV_DBG("%s: %s: Wrote 64 bytes KVS FIFO\n",
+					__func__, HDCP_STATE_NAME);
+				DEV_DBG("%s: %s: HDCP_SHA_STATUS=%08x\n",
+					__func__, HDCP_STATE_NAME,
+					DSS_REG_R(io, HDMI_HDCP_SHA_STATUS));
+				msleep(20);
+			}
+			if (!timeout_count) {
+				rc = -ETIMEDOUT;
+				DEV_ERR("%s: %s: Write KSV FIFO timedout",
+					__func__, HDCP_STATE_NAME);
+				goto error;
+			}
+		}
+
+	}
+
+	/* Write l to DONE bit[0] */
+	DSS_REG_W_ND(io, HDMI_HDCP_SHA_DATA,
+			(ksv_fifo[ksv_bytes - 1] << 16) | 0x1);
+
+	/* Now wait for HDCP_SHA_COMP_DONE */
+	timeout_count = 100;
+	while ((0x10 != (DSS_REG_R(io, HDMI_HDCP_SHA_STATUS)
+		& 0xFFFFFF10)) && --timeout_count)
+		msleep(20);
+	if (!timeout_count) {
+		rc = -ETIMEDOUT;
+		DEV_ERR("%s: %s: SHA computation timedout", __func__,
+			HDCP_STATE_NAME);
+		goto error;
+	}
+
+	/* Wait for V_MATCHES */
+	timeout_count = 100;
+	link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
+	while (((link0_status & BIT(20)) != BIT(20)) && --timeout_count) {
+		DEV_DBG("%s: %s: Waiting for V_MATCHES(%d). l0_status=0x%08x\n",
+			__func__, HDCP_STATE_NAME, timeout_count, link0_status);
+		msleep(20);
+		link0_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
+	}
+	if (!timeout_count) {
+		rc = -ETIMEDOUT;
+		DEV_ERR("%s: %s: HDCP V Match timedout", __func__,
+			HDCP_STATE_NAME);
+		goto error;
+	}
+
+error:
+	if (rc)
+		DEV_ERR("%s: %s: Authentication Part II failed\n", __func__,
+			HDCP_STATE_NAME);
+	else
+		DEV_INFO("%s: %s: Authentication Part II successful\n",
+			__func__, HDCP_STATE_NAME);
+	return rc;
+} /* hdmi_hdcp_authentication_part2 */
+
+static void hdmi_hdcp_int_work(struct work_struct *work)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(work,
+		struct hdmi_hdcp_ctrl, hdcp_int_work);
+
+	if (!hdcp_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	mutex_lock(hdcp_ctrl->init_data.mutex);
+	hdcp_ctrl->hdcp_state = HDCP_STATE_AUTH_FAIL;
+	mutex_unlock(hdcp_ctrl->init_data.mutex);
+
+	if (hdcp_ctrl->init_data.notify_status) {
+		hdcp_ctrl->init_data.notify_status(
+			hdcp_ctrl->init_data.cb_data,
+			hdcp_ctrl->hdcp_state);
+	}
+} /* hdmi_hdcp_int_work */
+
+static void hdmi_hdcp_auth_work(struct work_struct *work)
+{
+	int rc;
+	struct delayed_work *dw = to_delayed_work(work);
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = container_of(dw,
+		struct hdmi_hdcp_ctrl, hdcp_auth_work);
+
+	if (!hdcp_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	if (HDCP_STATE_AUTHENTICATING != hdcp_ctrl->hdcp_state) {
+		DEV_DBG("%s: %s: invalid state. returning\n", __func__,
+			HDCP_STATE_NAME);
+		return;
+	}
+
+	rc = hdmi_hdcp_authentication_part1(hdcp_ctrl);
+	if (rc) {
+		DEV_DBG("%s: %s: HDCP Auth Part I failed\n", __func__,
+			HDCP_STATE_NAME);
+		goto error;
+	}
+
+	rc = hdmi_hdcp_authentication_part2(hdcp_ctrl);
+	if (rc) {
+		DEV_DBG("%s: %s: HDCP Auth Part II failed\n", __func__,
+			HDCP_STATE_NAME);
+		goto error;
+	}
+
+error:
+	/*
+	 * Ensure that the state did not change during authentication.
+	 * If it did, it means that deauthenticate/reauthenticate was
+	 * called. In that case, this function need not notify HDMI Tx
+	 * of the result
+	 */
+	mutex_lock(hdcp_ctrl->init_data.mutex);
+	if (HDCP_STATE_AUTHENTICATING == hdcp_ctrl->hdcp_state) {
+		if (rc)
+			hdcp_ctrl->hdcp_state = HDCP_STATE_AUTH_FAIL;
+		else
+			hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATED;
+		mutex_unlock(hdcp_ctrl->init_data.mutex);
+
+		/* Notify HDMI Tx controller of the result */
+		DEV_DBG("%s: %s: Notifying HDMI Tx of auth result\n",
+			__func__, HDCP_STATE_NAME);
+		if (hdcp_ctrl->init_data.notify_status) {
+			hdcp_ctrl->init_data.notify_status(
+				hdcp_ctrl->init_data.cb_data,
+				hdcp_ctrl->hdcp_state);
+		}
+	} else {
+		DEV_DBG("%s: %s: HDCP state changed during authentication\n",
+			__func__, HDCP_STATE_NAME);
+		mutex_unlock(hdcp_ctrl->init_data.mutex);
+	}
+	return;
+} /* hdmi_hdcp_auth_work */
+
+int hdmi_hdcp_authenticate(void *input)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input;
+
+	if (!hdcp_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	if (HDCP_STATE_INACTIVE != hdcp_ctrl->hdcp_state) {
+		DEV_DBG("%s: %s: already active or activating. returning\n",
+			__func__, HDCP_STATE_NAME);
+		return 0;
+	}
+
+	DEV_DBG("%s: %s: Queuing work to start HDCP authentication", __func__,
+		HDCP_STATE_NAME);
+	mutex_lock(hdcp_ctrl->init_data.mutex);
+	hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING;
+	mutex_unlock(hdcp_ctrl->init_data.mutex);
+	queue_delayed_work(hdcp_ctrl->init_data.workq,
+		&hdcp_ctrl->hdcp_auth_work, 0);
+
+	return 0;
+} /* hdmi_hdcp_authenticate */
+
+int hdmi_hdcp_reauthenticate(void *input)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input;
+	struct dss_io_data *io;
+
+	if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	io = hdcp_ctrl->init_data.core_io;
+
+
+	if (HDCP_STATE_AUTH_FAIL != hdcp_ctrl->hdcp_state) {
+		DEV_DBG("%s: %s: invalid state. returning\n", __func__,
+			HDCP_STATE_NAME);
+		return 0;
+	}
+
+	/*
+	 * Disable HPD circuitry.
+	 * This is needed to reset the HDCP cipher engine so that when we
+	 * attempt a re-authentication, HW would clear the AN0_READY and
+	 * AN1_READY bits in HDMI_HDCP_LINK0_STATUS register
+	 */
+	DSS_REG_W(io, HDMI_HPD_CTRL, DSS_REG_R(hdcp_ctrl->init_data.core_io,
+		HDMI_HPD_CTRL) & ~BIT(28));
+
+	/* Disable HDCP interrupts */
+	DSS_REG_W(io, HDMI_HDCP_INT_CTRL, 0);
+
+	DSS_REG_W(io, HDMI_HDCP_RESET, BIT(0));
+
+	/* Disable encryption and disable the HDCP block */
+	DSS_REG_W(io, HDMI_HDCP_CTRL, 0);
+
+	/* Enable HPD circuitry */
+	DSS_REG_W(hdcp_ctrl->init_data.core_io, HDMI_HPD_CTRL,
+		DSS_REG_R(hdcp_ctrl->init_data.core_io,
+		HDMI_HPD_CTRL) | BIT(28));
+
+	/* Restart authentication attempt */
+	DEV_DBG("%s: %s: Scheduling work to start HDCP authentication",
+		__func__, HDCP_STATE_NAME);
+	mutex_lock(hdcp_ctrl->init_data.mutex);
+	hdcp_ctrl->hdcp_state = HDCP_STATE_AUTHENTICATING;
+	mutex_unlock(hdcp_ctrl->init_data.mutex);
+	queue_delayed_work(hdcp_ctrl->init_data.workq,
+		&hdcp_ctrl->hdcp_auth_work, HZ/2);
+
+	return 0;
+} /* hdmi_hdcp_reauthenticate */
+
+void hdmi_hdcp_off(void *input)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input;
+	struct dss_io_data *io;
+	int rc = 0;
+
+	if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	io = hdcp_ctrl->init_data.core_io;
+
+	if (HDCP_STATE_INACTIVE == hdcp_ctrl->hdcp_state) {
+		DEV_DBG("%s: %s: inactive. returning\n", __func__,
+			HDCP_STATE_NAME);
+		return;
+	}
+
+	/*
+	 * Need to set the state to inactive here so that any ongoing
+	 * reauth works will know that the HDCP session has been turned off
+	 */
+	mutex_lock(hdcp_ctrl->init_data.mutex);
+	hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+	mutex_unlock(hdcp_ctrl->init_data.mutex);
+
+	/* Disable HDCP interrupts */
+	DSS_REG_W(io, HDMI_HDCP_INT_CTRL, 0);
+
+	/*
+	 * Cancel any pending auth/reauth attempts.
+	 * If one is ongoing, this will wait for it to finish.
+	 * No more reauthentiaction attempts will be scheduled since we
+	 * set the currect state to inactive.
+	 */
+	rc = cancel_delayed_work_sync(&hdcp_ctrl->hdcp_auth_work);
+	if (rc)
+		DEV_DBG("%s: %s: Deleted hdcp auth work\n", __func__,
+			HDCP_STATE_NAME);
+	rc = cancel_work_sync(&hdcp_ctrl->hdcp_int_work);
+	if (rc)
+		DEV_DBG("%s: %s: Deleted hdcp int work\n", __func__,
+			HDCP_STATE_NAME);
+
+	DSS_REG_W(io, HDMI_HDCP_RESET, BIT(0));
+
+	/* Disable encryption and disable the HDCP block */
+	DSS_REG_W(io, HDMI_HDCP_CTRL, 0);
+
+	DEV_DBG("%s: %s: HDCP: Off\n", __func__, HDCP_STATE_NAME);
+} /* hdmi_hdcp_off */
+
+int hdmi_hdcp_isr(void *input)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input;
+	int rc = 0;
+	struct dss_io_data *io;
+	u32 hdcp_int_val;
+
+	if (!hdcp_ctrl || !hdcp_ctrl->init_data.core_io) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	io = hdcp_ctrl->init_data.core_io;
+
+	/* Ignore HDCP interrupts if HDCP is disabled */
+	if (HDCP_STATE_INACTIVE == hdcp_ctrl->hdcp_state)
+		return 0;
+
+	hdcp_int_val = DSS_REG_R(io, HDMI_HDCP_INT_CTRL);
+	if (hdcp_int_val & BIT(0)) {
+		/* AUTH_SUCCESS_INT */
+		DSS_REG_W(io, HDMI_HDCP_INT_CTRL, (hdcp_int_val | BIT(1)));
+		DEV_INFO("%s: %s: AUTH_SUCCESS_INT received\n", __func__,
+			HDCP_STATE_NAME);
+		if (HDCP_STATE_AUTHENTICATING == hdcp_ctrl->hdcp_state)
+			complete_all(&hdcp_ctrl->r0_checked);
+	}
+
+	if (hdcp_int_val & BIT(4)) {
+		/* AUTH_FAIL_INT */
+		u32 link_status = DSS_REG_R(io, HDMI_HDCP_LINK0_STATUS);
+		DSS_REG_W(io, HDMI_HDCP_INT_CTRL, (hdcp_int_val | BIT(5)));
+		DEV_INFO("%s: %s: AUTH_FAIL_INT rcvd, LINK0_STATUS=0x%08x\n",
+			__func__, HDCP_STATE_NAME, link_status);
+		if (HDCP_STATE_AUTHENTICATED == hdcp_ctrl->hdcp_state) {
+			/* Inform HDMI Tx of the failure */
+			queue_work(hdcp_ctrl->init_data.workq,
+				&hdcp_ctrl->hdcp_int_work);
+			/* todo: print debug log with auth fail reason */
+		} else if (HDCP_STATE_AUTHENTICATING == hdcp_ctrl->hdcp_state) {
+			complete_all(&hdcp_ctrl->r0_checked);
+		}
+
+		/* Clear AUTH_FAIL_INFO as well */
+		DSS_REG_W(io, HDMI_HDCP_INT_CTRL, (hdcp_int_val | BIT(7)));
+	}
+
+	if (hdcp_int_val & BIT(8)) {
+		/* DDC_XFER_REQ_INT */
+		DSS_REG_W(io, HDMI_HDCP_INT_CTRL, (hdcp_int_val | BIT(9)));
+		DEV_INFO("%s: %s: DDC_XFER_REQ_INT received\n", __func__,
+			HDCP_STATE_NAME);
+	}
+
+	if (hdcp_int_val & BIT(12)) {
+		/* DDC_XFER_DONE_INT */
+		DSS_REG_W(io, HDMI_HDCP_INT_CTRL, (hdcp_int_val | BIT(13)));
+		DEV_INFO("%s: %s: DDC_XFER_DONE received\n", __func__,
+			HDCP_STATE_NAME);
+	}
+
+error:
+	return rc;
+} /* hdmi_hdcp_isr */
+
+void hdmi_hdcp_deinit(void *input)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = (struct hdmi_hdcp_ctrl *)input;
+
+	if (!hdcp_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	kfree(hdcp_ctrl);
+} /* hdmi_hdcp_deinit */
+
+void *hdmi_hdcp_init(struct hdmi_hdcp_init_data *init_data)
+{
+	struct hdmi_hdcp_ctrl *hdcp_ctrl = NULL;
+
+	if (!init_data || !init_data->core_io || !init_data->qfprom_io ||
+		!init_data->mutex || !init_data->ddc_ctrl ||
+		!init_data->notify_status || !init_data->workq ||
+		!init_data->cb_data) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		goto error;
+	}
+
+	hdcp_ctrl = kzalloc(sizeof(*hdcp_ctrl), GFP_KERNEL);
+	if (!hdcp_ctrl) {
+		DEV_ERR("%s: Out of memory\n", __func__);
+		goto error;
+	}
+
+	hdcp_ctrl->init_data = *init_data;
+
+	INIT_DELAYED_WORK(&hdcp_ctrl->hdcp_auth_work, hdmi_hdcp_auth_work);
+	INIT_WORK(&hdcp_ctrl->hdcp_int_work, hdmi_hdcp_int_work);
+
+	hdcp_ctrl->hdcp_state = HDCP_STATE_INACTIVE;
+	init_completion(&hdcp_ctrl->r0_checked);
+	DEV_DBG("%s: HDCP module initialized. HDCP_STATE=%s", __func__,
+		HDCP_STATE_NAME);
+
+error:
+	return (void *)hdcp_ctrl;
+} /* hdmi_hdcp_init */
diff --git a/drivers/video/msm/mdss/mdss_hdmi_hdcp.h b/drivers/video/msm/mdss/mdss_hdmi_hdcp.h
new file mode 100644
index 0000000..d35b2a9
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_hdmi_hdcp.h
@@ -0,0 +1,43 @@
+/* Copyright (c) 2012 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_HDMI_HDCP_H__
+#define __MDSS_HDMI_HDCP_H__
+
+#include "mdss_hdmi_util.h"
+
+enum hdmi_hdcp_state {
+	HDCP_STATE_INACTIVE,
+	HDCP_STATE_AUTHENTICATING,
+	HDCP_STATE_AUTHENTICATED,
+	HDCP_STATE_AUTH_FAIL
+};
+
+struct hdmi_hdcp_init_data {
+	struct dss_io_data *core_io;
+	struct dss_io_data *qfprom_io;
+	struct mutex *mutex;
+	struct workqueue_struct *workq;
+	void *cb_data;
+	void (*notify_status)(void *cb_data, enum hdmi_hdcp_state status);
+
+	struct hdmi_tx_ddc_ctrl *ddc_ctrl;
+};
+
+const char *hdcp_state_name(enum hdmi_hdcp_state hdcp_state);
+void *hdmi_hdcp_init(struct hdmi_hdcp_init_data *init_data);
+void hdmi_hdcp_deinit(void *input);
+int hdmi_hdcp_isr(void *ptr);
+int hdmi_hdcp_reauthenticate(void *input);
+int hdmi_hdcp_authenticate(void *hdcp_ctrl);
+void hdmi_hdcp_off(void *hdcp_ctrl);
+#endif /* __MDSS_HDMI_HDCP_H__ */
diff --git a/drivers/video/msm/mdss/mdss_hdmi_mhl.h b/drivers/video/msm/mdss/mdss_hdmi_mhl.h
new file mode 100644
index 0000000..8fef63e
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_hdmi_mhl.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MDSS_HDMI_MHL_H__
+#define __MDSS_HDMI_MHL_H__
+
+#include <linux/platform_device.h>
+
+struct msm_hdmi_mhl_ops {
+	u8 (*tmds_enabled)(struct platform_device *pdev);
+	int (*set_mhl_max_pclk)(struct platform_device *pdev, u32 max_val);
+};
+
+int msm_hdmi_register_mhl(struct platform_device *pdev,
+			  struct msm_hdmi_mhl_ops *ops);
+
+#endif /* __MDSS_HDMI_MHL_H__ */
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.c b/drivers/video/msm/mdss/mdss_hdmi_tx.c
new file mode 100644
index 0000000..e28a4e9
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.c
@@ -0,0 +1,3437 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/iopoll.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/types.h>
+#include <mach/msm_hdmi_audio_codec.h>
+
+#define REG_DUMP 0
+
+#include "mdss_debug.h"
+#include "mdss_fb.h"
+#include "mdss_hdmi_cec.h"
+#include "mdss_hdmi_edid.h"
+#include "mdss_hdmi_hdcp.h"
+#include "mdss_hdmi_tx.h"
+#include "mdss.h"
+#include "mdss_panel.h"
+#include "mdss_hdmi_mhl.h"
+
+#define DRV_NAME "hdmi-tx"
+#define COMPATIBLE_NAME "qcom,hdmi-tx"
+
+#define DEFAULT_VIDEO_RESOLUTION HDMI_VFRMT_640x480p60_4_3
+
+/* HDMI PHY/PLL bit field macros */
+#define SW_RESET BIT(2)
+#define SW_RESET_PLL BIT(0)
+
+#define HPD_DISCONNECT_POLARITY 0
+#define HPD_CONNECT_POLARITY    1
+
+#define IFRAME_CHECKSUM_32(d)			\
+	((d & 0xff) + ((d >> 8) & 0xff) +	\
+	((d >> 16) & 0xff) + ((d >> 24) & 0xff))
+
+/* Enable HDCP by default */
+static bool hdcp_feature_on = true;
+
+/* Supported HDMI Audio channels */
+#define MSM_HDMI_AUDIO_CHANNEL_2	2
+#define MSM_HDMI_AUDIO_CHANNEL_4	4
+#define MSM_HDMI_AUDIO_CHANNEL_6	6
+#define MSM_HDMI_AUDIO_CHANNEL_8	8
+
+enum msm_hdmi_supported_audio_sample_rates {
+	AUDIO_SAMPLE_RATE_32KHZ,
+	AUDIO_SAMPLE_RATE_44_1KHZ,
+	AUDIO_SAMPLE_RATE_48KHZ,
+	AUDIO_SAMPLE_RATE_88_2KHZ,
+	AUDIO_SAMPLE_RATE_96KHZ,
+	AUDIO_SAMPLE_RATE_176_4KHZ,
+	AUDIO_SAMPLE_RATE_192KHZ,
+	AUDIO_SAMPLE_RATE_MAX
+};
+
+/* parameters for clock regeneration */
+struct hdmi_tx_audio_acr {
+	u32 n;
+	u32 cts;
+};
+
+struct hdmi_tx_audio_acr_arry {
+	u32 pclk;
+	struct hdmi_tx_audio_acr lut[AUDIO_SAMPLE_RATE_MAX];
+};
+
+static int hdmi_tx_sysfs_enable_hpd(struct hdmi_tx_ctrl *hdmi_ctrl, int on);
+static irqreturn_t hdmi_tx_isr(int irq, void *data);
+static void hdmi_tx_hpd_off(struct hdmi_tx_ctrl *hdmi_ctrl);
+
+struct mdss_hw hdmi_tx_hw = {
+	.hw_ndx = MDSS_HW_HDMI,
+	.ptr = NULL,
+	.irq_handler = hdmi_tx_isr,
+};
+
+struct dss_gpio hpd_gpio_config[] = {
+	{0, 1, COMPATIBLE_NAME "-hpd"},
+	{0, 1, COMPATIBLE_NAME "-ddc-clk"},
+	{0, 1, COMPATIBLE_NAME "-ddc-data"},
+	{0, 1, COMPATIBLE_NAME "-mux-en"},
+	{0, 0, COMPATIBLE_NAME "-mux-sel"}
+};
+
+struct dss_gpio core_gpio_config[] = {
+};
+
+struct dss_gpio cec_gpio_config[] = {
+	{0, 1, COMPATIBLE_NAME "-cec"}
+};
+
+const char *hdmi_pm_name(enum hdmi_tx_power_module_type module)
+{
+	switch (module) {
+	case HDMI_TX_HPD_PM:	return "HDMI_TX_HPD_PM";
+	case HDMI_TX_CORE_PM:	return "HDMI_TX_CORE_PM";
+	case HDMI_TX_CEC_PM:	return "HDMI_TX_CEC_PM";
+	default: return "???";
+	}
+} /* hdmi_pm_name */
+
+static u8 hdmi_tx_avi_iframe_lut[][20] = {
+	{0x10,	0x10,	0x10,	0x10,	0x10,	0x10,	0x10,	0x10,	0x10,
+	 0x10,	0x10,	0x10,	0x10,	0x10,	0x10,	0x10,	0x10,	0x10,
+	 0x10,	0x10}, /*00*/
+	{0x18,	0x18,	0x28,	0x28,	0x28,	0x28,	0x28,	0x28,	0x28,
+	 0x28,	0x28,	0x28,	0x28,	0x18,	0x28,	0x18,	0x28,	0x28,
+	 0x28,	0x28}, /*01*/
+	{0x00,	0x04,	0x04,	0x04,	0x04,	0x04,	0x04,	0x04,	0x04,
+	 0x04,	0x04,	0x04,	0x04,	0x88,	0x00,	0x04,	0x04,	0x04,
+	 0x04,	0x04}, /*02*/
+	{0x02,	0x06,	0x11,	0x15,	0x04,	0x13,	0x10,	0x05,	0x1F,
+	 0x14,	0x20,	0x22,	0x21,	0x01,	0x03,	0x11,	0x00,	0x00,
+	 0x00,	0x00}, /*03*/
+	{0x00,	0x01,	0x00,	0x01,	0x00,	0x00,	0x00,	0x00,	0x00,
+	 0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,
+	 0x00,	0x00}, /*04*/
+	{0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,
+	 0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,
+	 0x00,	0x00}, /*05*/
+	{0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,
+	 0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,
+	 0x00,	0x00}, /*06*/
+	{0xE1,	0xE1,	0x41,	0x41,	0xD1,	0xd1,	0x39,	0x39,	0x39,
+	 0x39,	0x39,	0x39,	0x39,	0xe1,	0xE1,	0x41,	0x71,	0x71,
+	 0x71,	0x71}, /*07*/
+	{0x01,	0x01,	0x02,	0x02,	0x02,	0x02,	0x04,	0x04,	0x04,
+	 0x04,	0x04,	0x04,	0x04,	0x01,	0x01,	0x02,	0x08,	0x08,
+	 0x08,	0x08}, /*08*/
+	{0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,
+	 0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,
+	 0x00,	0x00}, /*09*/
+	{0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,
+	 0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,	0x00,
+	 0x00,	0x00}, /*10*/
+	{0xD1,	0xD1,	0xD1,	0xD1,	0x01,	0x01,	0x81,	0x81,	0x81,
+	 0x81,	0x81,	0x81,	0x81,	0x81,	0xD1,	0xD1,	0x01,	0x01,
+	 0x01,	0x01}, /*11*/
+	{0x02,	0x02,	0x02,	0x02,	0x05,	0x05,	0x07,	0x07,	0x07,
+	 0x07,	0x07,	0x07,	0x07,	0x02,	0x02,	0x02,	0x0F,	0x0F,
+	 0x0F,	0x10}  /*12*/
+};
+
+/* Audio constants lookup table for hdmi_tx_audio_acr_setup */
+/* Valid Pixel-Clock rates: 25.2MHz, 27MHz, 27.03MHz, 74.25MHz, 148.5MHz */
+static const struct hdmi_tx_audio_acr_arry hdmi_tx_audio_acr_lut[] = {
+	/*  25.200MHz  */
+	{25200, {{4096, 25200}, {6272, 28000}, {6144, 25200}, {12544, 28000},
+		{12288, 25200}, {25088, 28000}, {24576, 25200} } },
+	/*  27.000MHz  */
+	{27000, {{4096, 27000}, {6272, 30000}, {6144, 27000}, {12544, 30000},
+		{12288, 27000}, {25088, 30000}, {24576, 27000} } },
+	/*  27.027MHz */
+	{27030, {{4096, 27027}, {6272, 30030}, {6144, 27027}, {12544, 30030},
+		{12288, 27027}, {25088, 30030}, {24576, 27027} } },
+	/*  74.250MHz */
+	{74250, {{4096, 74250}, {6272, 82500}, {6144, 74250}, {12544, 82500},
+		{12288, 74250}, {25088, 82500}, {24576, 74250} } },
+	/* 148.500MHz */
+	{148500, {{4096, 148500}, {6272, 165000}, {6144, 148500},
+		{12544, 165000}, {12288, 148500}, {25088, 165000},
+		{24576, 148500} } },
+	/* 297.000MHz */
+	{297000, {{3072, 222750}, {4704, 247500}, {5120, 247500},
+		{9408, 247500}, {10240, 247500}, {18816, 247500},
+		{20480, 247500} } },
+};
+
+const char *hdmi_tx_pm_name(enum hdmi_tx_power_module_type module)
+{
+	switch (module) {
+	case HDMI_TX_HPD_PM:	return "HDMI_TX_HPD_PM";
+	case HDMI_TX_CORE_PM:	return "HDMI_TX_CORE_PM";
+	case HDMI_TX_CEC_PM:	return "HDMI_TX_CEC_PM";
+	default: return "???";
+	}
+} /* hdmi_tx_pm_name */
+
+static const char *hdmi_tx_io_name(u32 type)
+{
+	switch (type) {
+	case HDMI_TX_CORE_IO:	return "core_physical";
+	case HDMI_TX_PHY_IO:	return "phy_physical";
+	case HDMI_TX_QFPROM_IO:	return "qfprom_physical";
+	default:		return NULL;
+	}
+} /* hdmi_tx_io_name */
+
+static int hdmi_tx_get_vic_from_panel_info(struct hdmi_tx_ctrl *hdmi_ctrl,
+	struct mdss_panel_info *pinfo)
+{
+	int new_vic = -1;
+	u32 h_total, v_total;
+	struct msm_hdmi_mode_timing_info timing;
+
+	if (!hdmi_ctrl || !pinfo) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	if (pinfo->vic) {
+		if (hdmi_get_supported_mode(pinfo->vic)) {
+			new_vic = pinfo->vic;
+			DEV_DBG("%s: %s is supported\n", __func__,
+				msm_hdmi_mode_2string(new_vic));
+		} else {
+			DEV_ERR("%s: invalid or not supported vic %d\n",
+				__func__, pinfo->vic);
+			return -EPERM;
+		}
+	} else {
+		timing.active_h = pinfo->xres;
+		timing.back_porch_h = pinfo->lcdc.h_back_porch;
+		timing.front_porch_h = pinfo->lcdc.h_front_porch;
+		timing.pulse_width_h = pinfo->lcdc.h_pulse_width;
+		h_total = timing.active_h + timing.back_porch_h +
+			timing.front_porch_h + timing.pulse_width_h;
+		DEV_DBG("%s: ah=%d bph=%d fph=%d pwh=%d ht=%d\n", __func__,
+			timing.active_h, timing.back_porch_h,
+			timing.front_porch_h, timing.pulse_width_h, h_total);
+
+		timing.active_v = pinfo->yres;
+		timing.back_porch_v = pinfo->lcdc.v_back_porch;
+		timing.front_porch_v = pinfo->lcdc.v_front_porch;
+		timing.pulse_width_v = pinfo->lcdc.v_pulse_width;
+		v_total = timing.active_v + timing.back_porch_v +
+			timing.front_porch_v + timing.pulse_width_v;
+		DEV_DBG("%s: av=%d bpv=%d fpv=%d pwv=%d vt=%d\n", __func__,
+			timing.active_v, timing.back_porch_v,
+			timing.front_porch_v, timing.pulse_width_v, v_total);
+
+		timing.pixel_freq = pinfo->clk_rate / 1000;
+		if (h_total && v_total) {
+			timing.refresh_rate = ((timing.pixel_freq * 1000) /
+				(h_total * v_total)) * 1000;
+		} else {
+			DEV_ERR("%s: cannot cal refresh rate\n", __func__);
+			return -EPERM;
+		}
+		DEV_DBG("%s: pixel_freq=%d refresh_rate=%d\n", __func__,
+			timing.pixel_freq, timing.refresh_rate);
+
+		new_vic = hdmi_get_video_id_code(&timing);
+	}
+
+	return new_vic;
+} /* hdmi_tx_get_vic_from_panel_info */
+
+static struct hdmi_tx_ctrl *hdmi_tx_get_drvdata_from_panel_data(
+	struct mdss_panel_data *mpd)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+	if (mpd) {
+		hdmi_ctrl = container_of(mpd, struct hdmi_tx_ctrl, panel_data);
+		if (!hdmi_ctrl)
+			DEV_ERR("%s: hdmi_ctrl = NULL\n", __func__);
+	} else {
+		DEV_ERR("%s: mdss_panel_data = NULL\n", __func__);
+	}
+	return hdmi_ctrl;
+} /* hdmi_tx_get_drvdata_from_panel_data */
+
+static struct hdmi_tx_ctrl *hdmi_tx_get_drvdata_from_sysfs_dev(
+	struct device *device)
+{
+	struct msm_fb_data_type *mfd = NULL;
+	struct mdss_panel_data *panel_data = NULL;
+	struct fb_info *fbi = dev_get_drvdata(device);
+
+	if (fbi) {
+		mfd = (struct msm_fb_data_type *)fbi->par;
+		panel_data = dev_get_platdata(&mfd->pdev->dev);
+
+		return hdmi_tx_get_drvdata_from_panel_data(panel_data);
+	} else {
+		DEV_ERR("%s: fbi = NULL\n", __func__);
+		return NULL;
+	}
+} /* hdmi_tx_get_drvdata_from_sysfs_dev */
+
+/* todo: Fix this. Right now this is declared in hdmi_util.h */
+void *hdmi_get_featuredata_from_sysfs_dev(struct device *device,
+	u32 feature_type)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+	if (!device || feature_type > HDMI_TX_FEAT_MAX) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return NULL;
+	}
+
+	hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(device);
+	if (hdmi_ctrl)
+		return hdmi_ctrl->feature_data[feature_type];
+	else
+		return NULL;
+
+} /* hdmi_tx_get_featuredata_from_sysfs_dev */
+EXPORT_SYMBOL(hdmi_get_featuredata_from_sysfs_dev);
+
+static ssize_t hdmi_tx_sysfs_rda_connected(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct hdmi_tx_ctrl *hdmi_ctrl =
+		hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hdmi_ctrl->mutex);
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", hdmi_ctrl->hpd_state);
+	DEV_DBG("%s: '%d'\n", __func__, hdmi_ctrl->hpd_state);
+	mutex_unlock(&hdmi_ctrl->mutex);
+
+	return ret;
+} /* hdmi_tx_sysfs_rda_connected */
+
+static ssize_t hdmi_tx_sysfs_rda_hpd(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct hdmi_tx_ctrl *hdmi_ctrl =
+		hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", hdmi_ctrl->hpd_feature_on);
+	DEV_DBG("%s: '%d'\n", __func__, hdmi_ctrl->hpd_feature_on);
+
+	return ret;
+} /* hdmi_tx_sysfs_rda_hpd */
+
+static ssize_t hdmi_tx_sysfs_wta_hpd(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	int hpd, rc = 0;
+	ssize_t ret = strnlen(buf, PAGE_SIZE);
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+	DEV_DBG("%s:\n", __func__);
+	hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = kstrtoint(buf, 10, &hpd);
+	if (rc) {
+		DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+		return rc;
+	}
+
+	if (0 == hpd && hdmi_ctrl->hpd_feature_on) {
+		rc = hdmi_tx_sysfs_enable_hpd(hdmi_ctrl, false);
+	} else if (1 == hpd && !hdmi_ctrl->hpd_feature_on) {
+		rc = hdmi_tx_sysfs_enable_hpd(hdmi_ctrl, true);
+	} else {
+		DEV_DBG("%s: hpd is already '%s'. return\n", __func__,
+			hdmi_ctrl->hpd_feature_on ? "enabled" : "disabled");
+		return ret;
+	}
+
+	if (!rc) {
+		hdmi_ctrl->hpd_feature_on =
+			(~hdmi_ctrl->hpd_feature_on) & BIT(0);
+		DEV_DBG("%s: '%d'\n", __func__, hdmi_ctrl->hpd_feature_on);
+	} else {
+		DEV_ERR("%s: failed to '%s' hpd. rc = %d\n", __func__,
+			hpd ? "enable" : "disable", rc);
+		ret = rc;
+	}
+
+	return ret;
+} /* hdmi_tx_sysfs_wta_hpd */
+
+static ssize_t hdmi_tx_sysfs_wta_vendor_name(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	ssize_t ret;
+	u8 *s = (u8 *) buf;
+	u8 *d = NULL;
+	struct hdmi_tx_ctrl *hdmi_ctrl =
+		hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	d = hdmi_ctrl->spd_vendor_name;
+	ret = strnlen(buf, PAGE_SIZE);
+	ret = (ret > 8) ? 8 : ret;
+
+	memset(hdmi_ctrl->spd_vendor_name, 0, 8);
+	while (*s) {
+		if (*s & 0x60 && *s ^ 0x7f) {
+			*d = *s;
+		} else {
+			/* stop copying if control character found */
+			break;
+		}
+
+		if (++s > (u8 *) (buf + ret))
+			break;
+
+		d++;
+	}
+
+	DEV_DBG("%s: '%s'\n", __func__, hdmi_ctrl->spd_vendor_name);
+
+	return ret;
+} /* hdmi_tx_sysfs_wta_vendor_name */
+
+static ssize_t hdmi_tx_sysfs_rda_vendor_name(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct hdmi_tx_ctrl *hdmi_ctrl =
+		hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = snprintf(buf, PAGE_SIZE, "%s\n", hdmi_ctrl->spd_vendor_name);
+	DEV_DBG("%s: '%s'\n", __func__, hdmi_ctrl->spd_vendor_name);
+
+	return ret;
+} /* hdmi_tx_sysfs_rda_vendor_name */
+
+static ssize_t hdmi_tx_sysfs_wta_product_description(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	ssize_t ret;
+	u8 *s = (u8 *) buf;
+	u8 *d = NULL;
+	struct hdmi_tx_ctrl *hdmi_ctrl =
+		hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	d = hdmi_ctrl->spd_product_description;
+	ret = strnlen(buf, PAGE_SIZE);
+	ret = (ret > 16) ? 16 : ret;
+
+	memset(hdmi_ctrl->spd_product_description, 0, 16);
+	while (*s) {
+		if (*s & 0x60 && *s ^ 0x7f) {
+			*d = *s;
+		} else {
+			/* stop copying if control character found */
+			break;
+		}
+
+		if (++s > (u8 *) (buf + ret))
+			break;
+
+		d++;
+	}
+
+	DEV_DBG("%s: '%s'\n", __func__, hdmi_ctrl->spd_product_description);
+
+	return ret;
+} /* hdmi_tx_sysfs_wta_product_description */
+
+static ssize_t hdmi_tx_sysfs_rda_product_description(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	ssize_t ret;
+	struct hdmi_tx_ctrl *hdmi_ctrl =
+		hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = snprintf(buf, PAGE_SIZE, "%s\n",
+		hdmi_ctrl->spd_product_description);
+	DEV_DBG("%s: '%s'\n", __func__, hdmi_ctrl->spd_product_description);
+
+	return ret;
+} /* hdmi_tx_sysfs_rda_product_description */
+
+static DEVICE_ATTR(connected, S_IRUGO, hdmi_tx_sysfs_rda_connected, NULL);
+static DEVICE_ATTR(hpd, S_IRUGO | S_IWUSR, hdmi_tx_sysfs_rda_hpd,
+	hdmi_tx_sysfs_wta_hpd);
+static DEVICE_ATTR(vendor_name, S_IRUGO | S_IWUSR,
+	hdmi_tx_sysfs_rda_vendor_name, hdmi_tx_sysfs_wta_vendor_name);
+static DEVICE_ATTR(product_description, S_IRUGO | S_IWUSR,
+	hdmi_tx_sysfs_rda_product_description,
+	hdmi_tx_sysfs_wta_product_description);
+
+static struct attribute *hdmi_tx_fs_attrs[] = {
+	&dev_attr_connected.attr,
+	&dev_attr_hpd.attr,
+	&dev_attr_vendor_name.attr,
+	&dev_attr_product_description.attr,
+	NULL,
+};
+static struct attribute_group hdmi_tx_fs_attrs_group = {
+	.attrs = hdmi_tx_fs_attrs,
+};
+
+static int hdmi_tx_sysfs_create(struct hdmi_tx_ctrl *hdmi_ctrl,
+	struct fb_info *fbi)
+{
+	int rc;
+
+	if (!hdmi_ctrl || !fbi) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -ENODEV;
+	}
+
+	rc = sysfs_create_group(&fbi->dev->kobj,
+		&hdmi_tx_fs_attrs_group);
+	if (rc) {
+		DEV_ERR("%s: failed, rc=%d\n", __func__, rc);
+		return rc;
+	}
+	hdmi_ctrl->kobj = &fbi->dev->kobj;
+	DEV_DBG("%s: sysfs group %p\n", __func__, hdmi_ctrl->kobj);
+
+	return 0;
+} /* hdmi_tx_sysfs_create */
+
+static void hdmi_tx_sysfs_remove(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+	if (hdmi_ctrl->kobj)
+		sysfs_remove_group(hdmi_ctrl->kobj, &hdmi_tx_fs_attrs_group);
+	hdmi_ctrl->kobj = NULL;
+} /* hdmi_tx_sysfs_remove */
+
+static inline u32 hdmi_tx_is_dvi_mode(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	return hdmi_edid_get_sink_mode(
+		hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID]) ? 0 : 1;
+} /* hdmi_tx_is_dvi_mode */
+
+static inline void hdmi_tx_send_cable_notification(
+	struct hdmi_tx_ctrl *hdmi_ctrl, int val)
+{
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	if (!hdmi_ctrl->pdata.primary && (hdmi_ctrl->sdev.state != val))
+		switch_set_state(&hdmi_ctrl->sdev, val);
+} /* hdmi_tx_send_cable_notification */
+
+static inline void hdmi_tx_set_audio_switch_node(struct hdmi_tx_ctrl *hdmi_ctrl,
+	int val, bool force)
+{
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	if (!hdmi_tx_is_dvi_mode(hdmi_ctrl) &&
+		(force || (hdmi_ctrl->audio_sdev.state != val))) {
+		switch_set_state(&hdmi_ctrl->audio_sdev, val);
+		DEV_INFO("%s: hdmi_audio state switched to %d\n", __func__,
+			hdmi_ctrl->audio_sdev.state);
+	}
+} /* hdmi_tx_set_audio_switch_node */
+
+void hdmi_tx_hdcp_cb(void *ptr, enum hdmi_hdcp_state status)
+{
+	int rc = 0;
+	struct hdmi_tx_ctrl *hdmi_ctrl = (struct hdmi_tx_ctrl *)ptr;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	DEV_DBG("%s: HDCP status=%s hpd_state=%d\n", __func__,
+		hdcp_state_name(status), hdmi_ctrl->hpd_state);
+
+	switch (status) {
+	case HDCP_STATE_AUTHENTICATED:
+		if (hdmi_ctrl->hpd_state)
+			hdmi_tx_set_audio_switch_node(hdmi_ctrl, 1, false);
+		break;
+	case HDCP_STATE_AUTH_FAIL:
+		hdmi_tx_set_audio_switch_node(hdmi_ctrl, 0, false);
+
+		if (hdmi_ctrl->hpd_state) {
+			DEV_DBG("%s: Reauthenticating\n", __func__);
+			rc = hdmi_hdcp_reauthenticate(
+				hdmi_ctrl->feature_data[HDMI_TX_FEAT_HDCP]);
+			if (rc)
+				DEV_ERR("%s: HDCP reauth failed. rc=%d\n",
+					__func__, rc);
+		} else {
+			DEV_DBG("%s: Not reauthenticating. Cable not conn\n",
+				__func__);
+		}
+
+		break;
+	case HDCP_STATE_AUTHENTICATING:
+	case HDCP_STATE_INACTIVE:
+	default:
+		break;
+		/* do nothing */
+	}
+}
+
+/* Enable HDMI features */
+static int hdmi_tx_init_features(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	struct hdmi_edid_init_data edid_init_data;
+	struct hdmi_hdcp_init_data hdcp_init_data;
+	struct hdmi_cec_init_data cec_init_data;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Initialize EDID feature */
+	edid_init_data.io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	edid_init_data.mutex = &hdmi_ctrl->mutex;
+	edid_init_data.sysfs_kobj = hdmi_ctrl->kobj;
+	edid_init_data.ddc_ctrl = &hdmi_ctrl->ddc_ctrl;
+
+	hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID] =
+		hdmi_edid_init(&edid_init_data);
+	if (!hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID]) {
+		DEV_ERR("%s: hdmi_edid_init failed\n", __func__);
+		return -EPERM;
+	}
+	hdmi_edid_set_video_resolution(
+		hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID],
+		hdmi_ctrl->video_resolution);
+
+	/* Initialize HDCP feature */
+	if (hdmi_ctrl->present_hdcp) {
+		hdcp_init_data.core_io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+		hdcp_init_data.qfprom_io =
+			&hdmi_ctrl->pdata.io[HDMI_TX_QFPROM_IO];
+		hdcp_init_data.mutex = &hdmi_ctrl->mutex;
+		hdcp_init_data.ddc_ctrl = &hdmi_ctrl->ddc_ctrl;
+		hdcp_init_data.workq = hdmi_ctrl->workq;
+		hdcp_init_data.notify_status = hdmi_tx_hdcp_cb;
+		hdcp_init_data.cb_data = (void *)hdmi_ctrl;
+
+		hdmi_ctrl->feature_data[HDMI_TX_FEAT_HDCP] =
+			hdmi_hdcp_init(&hdcp_init_data);
+		if (!hdmi_ctrl->feature_data[HDMI_TX_FEAT_HDCP]) {
+			DEV_ERR("%s: hdmi_hdcp_init failed\n", __func__);
+			hdmi_edid_deinit(hdmi_ctrl->feature_data[
+				HDMI_TX_FEAT_EDID]);
+			hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID] = NULL;
+			return -EPERM;
+		}
+
+		DEV_DBG("%s: HDCP feature initialized\n", __func__);
+	}
+
+	/* Initialize CEC feature */
+	cec_init_data.io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	cec_init_data.sysfs_kobj = hdmi_ctrl->kobj;
+	cec_init_data.workq = hdmi_ctrl->workq;
+
+	hdmi_ctrl->feature_data[HDMI_TX_FEAT_CEC] =
+		hdmi_cec_init(&cec_init_data);
+	if (!hdmi_ctrl->feature_data[HDMI_TX_FEAT_CEC])
+		DEV_WARN("%s: hdmi_cec_init failed\n", __func__);
+
+	return 0;
+} /* hdmi_tx_init_features */
+
+static inline u32 hdmi_tx_is_controller_on(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	struct dss_io_data *io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	return DSS_REG_R_ND(io, HDMI_CTRL) & BIT(0);
+} /* hdmi_tx_is_controller_on */
+
+static int hdmi_tx_init_panel_info(uint32_t resolution,
+	struct mdss_panel_info *pinfo)
+{
+	const struct msm_hdmi_mode_timing_info *timing =
+		hdmi_get_supported_mode(resolution);
+
+	if (!timing || !pinfo) {
+		DEV_ERR("%s: invalid input.\n", __func__);
+		return -EINVAL;
+	}
+
+	pinfo->xres = timing->active_h;
+	pinfo->yres = timing->active_v;
+	pinfo->clk_rate = timing->pixel_freq*1000;
+
+	pinfo->lcdc.h_back_porch = timing->back_porch_h;
+	pinfo->lcdc.h_front_porch = timing->front_porch_h;
+	pinfo->lcdc.h_pulse_width = timing->pulse_width_h;
+	pinfo->lcdc.v_back_porch = timing->back_porch_v;
+	pinfo->lcdc.v_front_porch = timing->front_porch_v;
+	pinfo->lcdc.v_pulse_width = timing->pulse_width_v;
+
+	pinfo->type = DTV_PANEL;
+	pinfo->pdest = DISPLAY_2;
+	pinfo->wait_cycle = 0;
+	pinfo->bpp = 24;
+	pinfo->fb_num = 1;
+
+	pinfo->lcdc.border_clr = 0; /* blk */
+	pinfo->lcdc.underflow_clr = 0xff; /* blue */
+	pinfo->lcdc.hsync_skew = 0;
+
+	return 0;
+} /* hdmi_tx_init_panel_info */
+
+/* Table tuned to indicate video formats supported by the MHL Tx */
+/* Valid pclk rates (Mhz): 25.2, 27, 27.03, 74.25 */
+static void hdmi_tx_setup_mhl_video_mode_lut(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	u32 i;
+	struct msm_hdmi_mode_timing_info *temp_timing;
+
+	if (!hdmi_ctrl->mhl_max_pclk) {
+		DEV_WARN("%s: mhl max pclk not set!\n", __func__);
+		return;
+	}
+	DEV_DBG("%s: max mode set to [%u]\n",
+		__func__, hdmi_ctrl->mhl_max_pclk);
+	for (i = 0; i < HDMI_VFRMT_MAX; i++) {
+		temp_timing =
+		(struct msm_hdmi_mode_timing_info *)hdmi_get_supported_mode(i);
+		if (!temp_timing)
+			continue;
+		/* formats that exceed max mhl line clk bw */
+		if (temp_timing->pixel_freq > hdmi_ctrl->mhl_max_pclk)
+			hdmi_del_supported_mode(i);
+	}
+} /* hdmi_tx_setup_mhl_video_mode_lut */
+
+static int hdmi_tx_read_sink_info(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int status;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!hdmi_tx_is_controller_on(hdmi_ctrl)) {
+		DEV_ERR("%s: failed: HDMI controller is off", __func__);
+		status = -ENXIO;
+		goto error;
+	}
+
+	hdmi_ddc_config(&hdmi_ctrl->ddc_ctrl);
+
+	status = hdmi_edid_read(hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID]);
+	if (!status)
+		DEV_DBG("%s: hdmi_edid_read success\n", __func__);
+	else
+		DEV_ERR("%s: hdmi_edid_read failed\n", __func__);
+
+error:
+	return status;
+} /* hdmi_tx_read_sink_info */
+
+static void hdmi_tx_hpd_int_work(struct work_struct *work)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+	hdmi_ctrl = container_of(work, struct hdmi_tx_ctrl, hpd_int_work);
+	if (!hdmi_ctrl || !hdmi_ctrl->hpd_initialized) {
+		DEV_DBG("%s: invalid input\n", __func__);
+		return;
+	}
+
+	DEV_DBG("%s: Got HPD interrupt\n", __func__);
+
+	if (hdmi_ctrl->hpd_state) {
+		hdmi_tx_read_sink_info(hdmi_ctrl);
+		hdmi_tx_send_cable_notification(hdmi_ctrl, 1);
+		DEV_INFO("%s: sense cable CONNECTED: state switch to %d\n",
+			__func__, hdmi_ctrl->sdev.state);
+	} else {
+		hdmi_tx_send_cable_notification(hdmi_ctrl, 0);
+		DEV_INFO("%s: sense cable DISCONNECTED: state switch to %d\n",
+			__func__, hdmi_ctrl->sdev.state);
+	}
+
+	if (!completion_done(&hdmi_ctrl->hpd_done))
+		complete_all(&hdmi_ctrl->hpd_done);
+} /* hdmi_tx_hpd_int_work */
+
+static int hdmi_tx_check_capability(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	u32 hdmi_disabled, hdcp_disabled;
+	struct dss_io_data *io = NULL;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_QFPROM_IO];
+	if (!io->base) {
+		DEV_ERR("%s: QFPROM io is not initialized\n", __func__);
+		return -EINVAL;
+	}
+
+	hdcp_disabled = DSS_REG_R_ND(io,
+		QFPROM_RAW_FEAT_CONFIG_ROW0_LSB) & BIT(31);
+
+	hdmi_disabled = DSS_REG_R_ND(io,
+		QFPROM_RAW_FEAT_CONFIG_ROW0_MSB) & BIT(0);
+
+	DEV_DBG("%s: Features <HDMI:%s, HDCP:%s>\n", __func__,
+		hdmi_disabled ? "OFF" : "ON", hdcp_disabled ? "OFF" : "ON");
+
+	if (hdmi_disabled) {
+		DEV_ERR("%s: HDMI disabled\n", __func__);
+		return -ENODEV;
+	}
+
+	if (hdcp_disabled) {
+		hdmi_ctrl->present_hdcp = 0;
+		DEV_WARN("%s: HDCP disabled\n", __func__);
+	} else {
+		hdmi_ctrl->present_hdcp = 1;
+		DEV_DBG("%s: Device is HDCP enabled\n", __func__);
+	}
+
+	return 0;
+} /* hdmi_tx_check_capability */
+
+static int hdmi_tx_set_video_fmt(struct hdmi_tx_ctrl *hdmi_ctrl,
+	struct mdss_panel_info *pinfo)
+{
+	int new_vic = -1;
+	const struct msm_hdmi_mode_timing_info *timing = NULL;
+
+	if (!hdmi_ctrl || !pinfo) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	new_vic = hdmi_tx_get_vic_from_panel_info(hdmi_ctrl, pinfo);
+	if ((new_vic < 0) || (new_vic > HDMI_VFRMT_MAX)) {
+		DEV_ERR("%s: invalid or not supported vic\n", __func__);
+		return -EPERM;
+	}
+
+	DEV_DBG("%s: switching from %s => %s", __func__,
+		msm_hdmi_mode_2string(hdmi_ctrl->video_resolution),
+		msm_hdmi_mode_2string(new_vic));
+
+	hdmi_ctrl->video_resolution = (u32)new_vic;
+
+	timing = hdmi_get_supported_mode(hdmi_ctrl->video_resolution);
+
+	/* todo: find a better way */
+	hdmi_ctrl->pdata.power_data[HDMI_TX_CORE_PM].clk_config[0].rate =
+		timing->pixel_freq * 1000;
+
+	hdmi_edid_set_video_resolution(
+		hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID],
+		hdmi_ctrl->video_resolution);
+
+	return 0;
+} /* hdmi_tx_set_video_fmt */
+
+static int hdmi_tx_video_setup(struct hdmi_tx_ctrl *hdmi_ctrl,
+	int video_format)
+{
+	u32 total_v   = 0;
+	u32 total_h   = 0;
+	u32 start_h   = 0;
+	u32 end_h     = 0;
+	u32 start_v   = 0;
+	u32 end_v     = 0;
+	struct dss_io_data *io = NULL;
+
+	const struct msm_hdmi_mode_timing_info *timing =
+		hdmi_get_supported_mode(video_format);
+	if (timing == NULL) {
+		DEV_ERR("%s: video format not supported: %d\n", __func__,
+			video_format);
+		return -EPERM;
+	}
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: Core io is not initialized\n", __func__);
+		return -EPERM;
+	}
+
+	total_h = timing->active_h + timing->front_porch_h +
+		timing->back_porch_h + timing->pulse_width_h - 1;
+	total_v = timing->active_v + timing->front_porch_v +
+		timing->back_porch_v + timing->pulse_width_v - 1;
+	if (((total_v << 16) & 0xE0000000) || (total_h & 0xFFFFE000)) {
+		DEV_ERR("%s: total v=%d or h=%d is larger than supported\n",
+			__func__, total_v, total_h);
+		return -EPERM;
+	}
+	DSS_REG_W(io, HDMI_TOTAL, (total_v << 16) | (total_h << 0));
+
+	start_h = timing->back_porch_h + timing->pulse_width_h;
+	end_h   = (total_h + 1) - timing->front_porch_h;
+	if (((end_h << 16) & 0xE0000000) || (start_h & 0xFFFFE000)) {
+		DEV_ERR("%s: end_h=%d or start_h=%d is larger than supported\n",
+			__func__, end_h, start_h);
+		return -EPERM;
+	}
+	DSS_REG_W(io, HDMI_ACTIVE_H, (end_h << 16) | (start_h << 0));
+
+	start_v = timing->back_porch_v + timing->pulse_width_v - 1;
+	end_v   = total_v - timing->front_porch_v;
+	if (((end_v << 16) & 0xE0000000) || (start_v & 0xFFFFE000)) {
+		DEV_ERR("%s: end_v=%d or start_v=%d is larger than supported\n",
+			__func__, end_v, start_v);
+		return -EPERM;
+	}
+	DSS_REG_W(io, HDMI_ACTIVE_V, (end_v << 16) | (start_v << 0));
+
+	if (timing->interlaced) {
+		DSS_REG_W(io, HDMI_V_TOTAL_F2, (total_v + 1) << 0);
+		DSS_REG_W(io, HDMI_ACTIVE_V_F2,
+			((end_v + 1) << 16) | ((start_v + 1) << 0));
+	} else {
+		DSS_REG_W(io, HDMI_V_TOTAL_F2, 0);
+		DSS_REG_W(io, HDMI_ACTIVE_V_F2, 0);
+	}
+
+	DSS_REG_W(io, HDMI_FRAME_CTRL,
+		((timing->interlaced << 31) & 0x80000000) |
+		((timing->active_low_h << 29) & 0x20000000) |
+		((timing->active_low_v << 28) & 0x10000000));
+
+	return 0;
+} /* hdmi_tx_video_setup */
+
+static void hdmi_tx_set_avi_infoframe(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int i, mode = 0;
+	u8 avi_iframe[16]; /* two header + length + 13 data */
+	u8 checksum;
+	u32 sum, regVal;
+	struct dss_io_data *io = NULL;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: Core io is not initialized\n", __func__);
+		return;
+	}
+
+	switch (hdmi_ctrl->video_resolution) {
+	case HDMI_VFRMT_720x480p60_4_3:
+		mode = 0;
+		break;
+	case HDMI_VFRMT_720x480i60_16_9:
+		mode = 1;
+		break;
+	case HDMI_VFRMT_720x576p50_16_9:
+		mode = 2;
+		break;
+	case HDMI_VFRMT_720x576i50_16_9:
+		mode = 3;
+		break;
+	case HDMI_VFRMT_1280x720p60_16_9:
+		mode = 4;
+		break;
+	case HDMI_VFRMT_1280x720p50_16_9:
+		mode = 5;
+		break;
+	case HDMI_VFRMT_1920x1080p60_16_9:
+		mode = 6;
+		break;
+	case HDMI_VFRMT_1920x1080i60_16_9:
+		mode = 7;
+		break;
+	case HDMI_VFRMT_1920x1080p50_16_9:
+		mode = 8;
+		break;
+	case HDMI_VFRMT_1920x1080i50_16_9:
+		mode = 9;
+		break;
+	case HDMI_VFRMT_1920x1080p24_16_9:
+		mode = 10;
+		break;
+	case HDMI_VFRMT_1920x1080p30_16_9:
+		mode = 11;
+		break;
+	case HDMI_VFRMT_1920x1080p25_16_9:
+		mode = 12;
+		break;
+	case HDMI_VFRMT_640x480p60_4_3:
+		mode = 13;
+		break;
+	case HDMI_VFRMT_720x480p60_16_9:
+		mode = 14;
+		break;
+	case HDMI_VFRMT_720x576p50_4_3:
+		mode = 15;
+		break;
+	case HDMI_VFRMT_3840x2160p30_16_9:
+		mode = 16;
+		break;
+	case HDMI_VFRMT_3840x2160p25_16_9:
+		mode = 17;
+		break;
+	case HDMI_VFRMT_3840x2160p24_16_9:
+		mode = 18;
+		break;
+	case HDMI_VFRMT_4096x2160p24_16_9:
+		mode = 19;
+		break;
+	default:
+		DEV_INFO("%s: mode %d not supported\n", __func__,
+			hdmi_ctrl->video_resolution);
+		return;
+	}
+
+	/* InfoFrame Type = 82 */
+	avi_iframe[0]  = 0x82;
+	/* Version = 2 */
+	avi_iframe[1]  = 2;
+	/* Length of AVI InfoFrame = 13 */
+	avi_iframe[2]  = 13;
+
+	/* Data Byte 01: 0 Y1 Y0 A0 B1 B0 S1 S0 */
+	avi_iframe[3]  = hdmi_tx_avi_iframe_lut[0][mode];
+	avi_iframe[3] |= hdmi_edid_get_sink_scaninfo(
+		hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID],
+		hdmi_ctrl->video_resolution);
+
+	/* Data Byte 02: C1 C0 M1 M0 R3 R2 R1 R0 */
+	avi_iframe[4]  = hdmi_tx_avi_iframe_lut[1][mode];
+	/* Data Byte 03: ITC EC2 EC1 EC0 Q1 Q0 SC1 SC0 */
+	avi_iframe[5]  = hdmi_tx_avi_iframe_lut[2][mode];
+	/* Data Byte 04: 0 VIC6 VIC5 VIC4 VIC3 VIC2 VIC1 VIC0 */
+	avi_iframe[6]  = hdmi_tx_avi_iframe_lut[3][mode];
+	/* Data Byte 05: 0 0 0 0 PR3 PR2 PR1 PR0 */
+	avi_iframe[7]  = hdmi_tx_avi_iframe_lut[4][mode];
+	/* Data Byte 06: LSB Line No of End of Top Bar */
+	avi_iframe[8]  = hdmi_tx_avi_iframe_lut[5][mode];
+	/* Data Byte 07: MSB Line No of End of Top Bar */
+	avi_iframe[9]  = hdmi_tx_avi_iframe_lut[6][mode];
+	/* Data Byte 08: LSB Line No of Start of Bottom Bar */
+	avi_iframe[10] = hdmi_tx_avi_iframe_lut[7][mode];
+	/* Data Byte 09: MSB Line No of Start of Bottom Bar */
+	avi_iframe[11] = hdmi_tx_avi_iframe_lut[8][mode];
+	/* Data Byte 10: LSB Pixel Number of End of Left Bar */
+	avi_iframe[12] = hdmi_tx_avi_iframe_lut[9][mode];
+	/* Data Byte 11: MSB Pixel Number of End of Left Bar */
+	avi_iframe[13] = hdmi_tx_avi_iframe_lut[10][mode];
+	/* Data Byte 12: LSB Pixel Number of Start of Right Bar */
+	avi_iframe[14] = hdmi_tx_avi_iframe_lut[11][mode];
+	/* Data Byte 13: MSB Pixel Number of Start of Right Bar */
+	avi_iframe[15] = hdmi_tx_avi_iframe_lut[12][mode];
+
+	sum = 0;
+	for (i = 0; i < 16; i++)
+		sum += avi_iframe[i];
+	sum &= 0xFF;
+	sum = 256 - sum;
+	checksum = (u8) sum;
+
+	regVal = avi_iframe[5];
+	regVal = regVal << 8 | avi_iframe[4];
+	regVal = regVal << 8 | avi_iframe[3];
+	regVal = regVal << 8 | checksum;
+	DSS_REG_W(io, HDMI_AVI_INFO0, regVal);
+
+	regVal = avi_iframe[9];
+	regVal = regVal << 8 | avi_iframe[8];
+	regVal = regVal << 8 | avi_iframe[7];
+	regVal = regVal << 8 | avi_iframe[6];
+	DSS_REG_W(io, HDMI_AVI_INFO1, regVal);
+
+	regVal = avi_iframe[13];
+	regVal = regVal << 8 | avi_iframe[12];
+	regVal = regVal << 8 | avi_iframe[11];
+	regVal = regVal << 8 | avi_iframe[10];
+	DSS_REG_W(io, HDMI_AVI_INFO2, regVal);
+
+	regVal = avi_iframe[1];
+	regVal = regVal << 16 | avi_iframe[15];
+	regVal = regVal << 8 | avi_iframe[14];
+	DSS_REG_W(io, HDMI_AVI_INFO3, regVal);
+
+	/* AVI InfFrame enable (every frame) */
+	DSS_REG_W(io, HDMI_INFOFRAME_CTRL0,
+		DSS_REG_R(io, HDMI_INFOFRAME_CTRL0) | BIT(1) | BIT(0));
+} /* hdmi_tx_set_avi_infoframe */
+
+/* todo: add 3D support */
+static void hdmi_tx_set_vendor_specific_infoframe(
+	struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int i;
+	u8 vs_iframe[9]; /* two header + length + 6 data */
+	u32 sum, reg_val;
+	u32 hdmi_vic, hdmi_video_format;
+	struct dss_io_data *io = NULL;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: Core io is not initialized\n", __func__);
+		return;
+	}
+
+	/* HDMI Spec 1.4a Table 8-10 */
+	vs_iframe[0] = 0x81; /* type */
+	vs_iframe[1] = 0x1;  /* version */
+	vs_iframe[2] = 0x8;  /* length */
+
+	vs_iframe[3] = 0x0; /* PB0: checksum */
+
+	/* PB1..PB3: 24 Bit IEEE Registration Code 00_0C_03 */
+	vs_iframe[4] = 0x03;
+	vs_iframe[5] = 0x0C;
+	vs_iframe[6] = 0x00;
+
+	hdmi_video_format = 0x1;
+	switch (hdmi_ctrl->video_resolution) {
+	case HDMI_VFRMT_3840x2160p30_16_9:
+		hdmi_vic = 0x1;
+		break;
+	case HDMI_VFRMT_3840x2160p25_16_9:
+		hdmi_vic = 0x2;
+		break;
+	case HDMI_VFRMT_3840x2160p24_16_9:
+		hdmi_vic = 0x3;
+		break;
+	case HDMI_VFRMT_4096x2160p24_16_9:
+		hdmi_vic = 0x4;
+		break;
+	default:
+		hdmi_video_format = 0x0;
+		hdmi_vic = 0x0;
+	}
+
+	/* PB4: HDMI Video Format[7:5],  Reserved[4:0] */
+	vs_iframe[7] = (hdmi_video_format << 5) & 0xE0;
+
+	/* PB5: HDMI_VIC or 3D_Structure[7:4], Reserved[3:0] */
+	vs_iframe[8] = hdmi_vic;
+
+	/* compute checksum */
+	sum = 0;
+	for (i = 0; i < 9; i++)
+		sum += vs_iframe[i];
+
+	sum &= 0xFF;
+	sum = 256 - sum;
+	vs_iframe[3] = (u8)sum;
+
+	reg_val = (hdmi_vic << 16) | (vs_iframe[3] << 8) |
+		(hdmi_video_format << 5) | vs_iframe[2];
+	DSS_REG_W(io, HDMI_VENSPEC_INFO0, reg_val);
+
+	/* vendor specific info-frame enable (every frame) */
+	DSS_REG_W(io, HDMI_INFOFRAME_CTRL0,
+		DSS_REG_R(io, HDMI_INFOFRAME_CTRL0) | BIT(13) | BIT(12));
+} /* hdmi_tx_set_vendor_specific_infoframe */
+
+static void hdmi_tx_set_spd_infoframe(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	u32 packet_header  = 0;
+	u32 check_sum      = 0;
+	u32 packet_payload = 0;
+	u32 packet_control = 0;
+
+	u8 *vendor_name = NULL;
+	u8 *product_description = NULL;
+	struct dss_io_data *io = NULL;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: Core io is not initialized\n", __func__);
+		return;
+	}
+
+	vendor_name = hdmi_ctrl->spd_vendor_name;
+	product_description = hdmi_ctrl->spd_product_description;
+
+	/* Setup Packet header and payload */
+	/*
+	 * 0x83 InfoFrame Type Code
+	 * 0x01 InfoFrame Version Number
+	 * 0x19 Length of Source Product Description InfoFrame
+	 */
+	packet_header  = 0x83 | (0x01 << 8) | (0x19 << 16);
+	DSS_REG_W(io, HDMI_GENERIC1_HDR, packet_header);
+	check_sum += IFRAME_CHECKSUM_32(packet_header);
+
+	packet_payload = (vendor_name[3] & 0x7f)
+		| ((vendor_name[4] & 0x7f) << 8)
+		| ((vendor_name[5] & 0x7f) << 16)
+		| ((vendor_name[6] & 0x7f) << 24);
+	DSS_REG_W(io, HDMI_GENERIC1_1, packet_payload);
+	check_sum += IFRAME_CHECKSUM_32(packet_payload);
+
+	/* Product Description (7-bit ASCII code) */
+	packet_payload = (vendor_name[7] & 0x7f)
+		| ((product_description[0] & 0x7f) << 8)
+		| ((product_description[1] & 0x7f) << 16)
+		| ((product_description[2] & 0x7f) << 24);
+	DSS_REG_W(io, HDMI_GENERIC1_2, packet_payload);
+	check_sum += IFRAME_CHECKSUM_32(packet_payload);
+
+	packet_payload = (product_description[3] & 0x7f)
+		| ((product_description[4] & 0x7f) << 8)
+		| ((product_description[5] & 0x7f) << 16)
+		| ((product_description[6] & 0x7f) << 24);
+	DSS_REG_W(io, HDMI_GENERIC1_3, packet_payload);
+	check_sum += IFRAME_CHECKSUM_32(packet_payload);
+
+	packet_payload = (product_description[7] & 0x7f)
+		| ((product_description[8] & 0x7f) << 8)
+		| ((product_description[9] & 0x7f) << 16)
+		| ((product_description[10] & 0x7f) << 24);
+	DSS_REG_W(io, HDMI_GENERIC1_4, packet_payload);
+	check_sum += IFRAME_CHECKSUM_32(packet_payload);
+
+	packet_payload = (product_description[11] & 0x7f)
+		| ((product_description[12] & 0x7f) << 8)
+		| ((product_description[13] & 0x7f) << 16)
+		| ((product_description[14] & 0x7f) << 24);
+	DSS_REG_W(io, HDMI_GENERIC1_5, packet_payload);
+	check_sum += IFRAME_CHECKSUM_32(packet_payload);
+
+	/*
+	 * Source Device Information
+	 * 00h unknown
+	 * 01h Digital STB
+	 * 02h DVD
+	 * 03h D-VHS
+	 * 04h HDD Video
+	 * 05h DVC
+	 * 06h DSC
+	 * 07h Video CD
+	 * 08h Game
+	 * 09h PC general
+	 */
+	packet_payload = (product_description[15] & 0x7f) | 0x00 << 8;
+	DSS_REG_W(io, HDMI_GENERIC1_6, packet_payload);
+	check_sum += IFRAME_CHECKSUM_32(packet_payload);
+
+	/* Vendor Name (7bit ASCII code) */
+	packet_payload = ((vendor_name[0] & 0x7f) << 8)
+		| ((vendor_name[1] & 0x7f) << 16)
+		| ((vendor_name[2] & 0x7f) << 24);
+	check_sum += IFRAME_CHECKSUM_32(packet_payload);
+	packet_payload |= ((0x100 - (0xff & check_sum)) & 0xff);
+	DSS_REG_W(io, HDMI_GENERIC1_0, packet_payload);
+
+	/*
+	 * GENERIC1_LINE | GENERIC1_CONT | GENERIC1_SEND
+	 * Setup HDMI TX generic packet control
+	 * Enable this packet to transmit every frame
+	 * Enable HDMI TX engine to transmit Generic packet 1
+	 */
+	packet_control = DSS_REG_R_ND(io, HDMI_GEN_PKT_CTRL);
+	packet_control |= ((0x1 << 24) | (1 << 5) | (1 << 4));
+	DSS_REG_W(io, HDMI_GEN_PKT_CTRL, packet_control);
+} /* hdmi_tx_set_spd_infoframe */
+
+static void hdmi_tx_set_mode(struct hdmi_tx_ctrl *hdmi_ctrl, u32 power_on)
+{
+	struct dss_io_data *io = NULL;
+	/* Defaults: Disable block, HDMI mode */
+	u32 reg_val = BIT(1);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: Core io is not initialized\n", __func__);
+		return;
+	}
+
+	if (power_on) {
+		/* Enable the block */
+		reg_val |= BIT(0);
+
+		/* HDMI Encryption, if HDCP is enabled */
+		if (hdmi_ctrl->hdcp_feature_on && hdmi_ctrl->present_hdcp)
+			reg_val |= BIT(2);
+
+		/* Set transmission mode to DVI based in EDID info */
+		if (hdmi_edid_get_sink_mode(
+			hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID]) == 0)
+			reg_val &= ~BIT(1); /* DVI mode */
+	}
+
+	DSS_REG_W(io, HDMI_CTRL, reg_val);
+
+	DEV_DBG("HDMI Core: %s, HDMI_CTRL=0x%08x\n",
+		power_on ? "Enable" : "Disable", reg_val);
+} /* hdmi_tx_set_mode */
+
+static int hdmi_tx_config_power(struct hdmi_tx_ctrl *hdmi_ctrl,
+	enum hdmi_tx_power_module_type module, int config)
+{
+	int rc = 0;
+	struct dss_module_power *power_data = NULL;
+
+	if (!hdmi_ctrl || module >= HDMI_TX_MAX_PM) {
+		DEV_ERR("%s: Error: invalid input\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	power_data = &hdmi_ctrl->pdata.power_data[module];
+	if (!power_data) {
+		DEV_ERR("%s: Error: invalid power data\n", __func__);
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	if (config) {
+		rc = msm_dss_config_vreg(&hdmi_ctrl->pdev->dev,
+			power_data->vreg_config, power_data->num_vreg, 1);
+		if (rc) {
+			DEV_ERR("%s: Failed to config %s vreg. Err=%d\n",
+				__func__, hdmi_tx_pm_name(module), rc);
+			goto exit;
+		}
+
+		rc = msm_dss_get_clk(&hdmi_ctrl->pdev->dev,
+			power_data->clk_config, power_data->num_clk);
+		if (rc) {
+			DEV_ERR("%s: Failed to get %s clk. Err=%d\n",
+				__func__, hdmi_tx_pm_name(module), rc);
+
+			msm_dss_config_vreg(&hdmi_ctrl->pdev->dev,
+			power_data->vreg_config, power_data->num_vreg, 0);
+		}
+	} else {
+		msm_dss_put_clk(power_data->clk_config, power_data->num_clk);
+
+		rc = msm_dss_config_vreg(&hdmi_ctrl->pdev->dev,
+			power_data->vreg_config, power_data->num_vreg, 0);
+		if (rc)
+			DEV_ERR("%s: Fail to deconfig %s vreg. Err=%d\n",
+				__func__, hdmi_tx_pm_name(module), rc);
+	}
+
+exit:
+	return rc;
+} /* hdmi_tx_config_power */
+
+static int hdmi_tx_enable_power(struct hdmi_tx_ctrl *hdmi_ctrl,
+	enum hdmi_tx_power_module_type module, int enable)
+{
+	int rc = 0;
+	struct dss_module_power *power_data = NULL;
+
+	if (!hdmi_ctrl || module >= HDMI_TX_MAX_PM) {
+		DEV_ERR("%s: Error: invalid input\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	power_data = &hdmi_ctrl->pdata.power_data[module];
+	if (!power_data) {
+		DEV_ERR("%s: Error: invalid power data\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (enable) {
+		rc = msm_dss_enable_vreg(power_data->vreg_config,
+			power_data->num_vreg, 1);
+		if (rc) {
+			DEV_ERR("%s: Failed to enable %s vreg. Error=%d\n",
+				__func__, hdmi_tx_pm_name(module), rc);
+			goto error;
+		}
+
+		rc = msm_dss_enable_gpio(power_data->gpio_config,
+			power_data->num_gpio, 1);
+		if (rc) {
+			DEV_ERR("%s: Failed to enable %s gpio. Error=%d\n",
+				__func__, hdmi_tx_pm_name(module), rc);
+			goto disable_vreg;
+		}
+
+		rc = msm_dss_clk_set_rate(power_data->clk_config,
+			power_data->num_clk);
+		if (rc) {
+			DEV_ERR("%s: failed to set clks rate for %s. err=%d\n",
+				__func__, hdmi_tx_pm_name(module), rc);
+			goto disable_gpio;
+		}
+
+		rc = msm_dss_enable_clk(power_data->clk_config,
+			power_data->num_clk, 1);
+		if (rc) {
+			DEV_ERR("%s: Failed to enable clks for %s. Error=%d\n",
+				__func__, hdmi_tx_pm_name(module), rc);
+			goto disable_gpio;
+		}
+	} else {
+		msm_dss_enable_clk(power_data->clk_config,
+			power_data->num_clk, 0);
+		msm_dss_clk_set_rate(power_data->clk_config,
+			power_data->num_clk);
+		msm_dss_enable_gpio(power_data->gpio_config,
+			power_data->num_gpio, 0);
+		msm_dss_enable_vreg(power_data->vreg_config,
+			power_data->num_vreg, 0);
+	}
+
+	return rc;
+
+disable_gpio:
+	msm_dss_enable_gpio(power_data->gpio_config, power_data->num_gpio, 0);
+disable_vreg:
+	msm_dss_enable_vreg(power_data->vreg_config, power_data->num_vreg, 0);
+error:
+	return rc;
+} /* hdmi_tx_enable_power */
+
+static void hdmi_tx_core_off(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_CEC_PM, 0);
+	hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_CORE_PM, 0);
+} /* hdmi_tx_core_off */
+
+static int hdmi_tx_core_on(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int rc = 0;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_CORE_PM, 1);
+	if (rc) {
+		DEV_ERR("%s: core hdmi_msm_enable_power failed rc = %d\n",
+			__func__, rc);
+		return rc;
+	}
+	rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_CEC_PM, 1);
+	if (rc) {
+		DEV_ERR("%s: cec hdmi_msm_enable_power failed rc = %d\n",
+			__func__, rc);
+		goto disable_core_power;
+	}
+
+	return rc;
+disable_core_power:
+	hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_CORE_PM, 0);
+	return rc;
+} /* hdmi_tx_core_on */
+
+static void hdmi_tx_phy_reset(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	unsigned int phy_reset_polarity = 0x0;
+	unsigned int pll_reset_polarity = 0x0;
+	unsigned int val;
+	struct dss_io_data *io = NULL;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: core io not inititalized\n", __func__);
+		return;
+	}
+
+	val = DSS_REG_R_ND(io, HDMI_PHY_CTRL);
+
+	phy_reset_polarity = val >> 3 & 0x1;
+	pll_reset_polarity = val >> 1 & 0x1;
+
+	if (phy_reset_polarity == 0)
+		DSS_REG_W_ND(io, HDMI_PHY_CTRL, val | SW_RESET);
+	else
+		DSS_REG_W_ND(io, HDMI_PHY_CTRL, val & (~SW_RESET));
+
+	if (pll_reset_polarity == 0)
+		DSS_REG_W_ND(io, HDMI_PHY_CTRL, val | SW_RESET_PLL);
+	else
+		DSS_REG_W_ND(io, HDMI_PHY_CTRL, val & (~SW_RESET_PLL));
+
+	if (phy_reset_polarity == 0)
+		DSS_REG_W_ND(io, HDMI_PHY_CTRL, val & (~SW_RESET));
+	else
+		DSS_REG_W_ND(io, HDMI_PHY_CTRL, val | SW_RESET);
+
+	if (pll_reset_polarity == 0)
+		DSS_REG_W_ND(io, HDMI_PHY_CTRL, val & (~SW_RESET_PLL));
+	else
+		DSS_REG_W_ND(io, HDMI_PHY_CTRL, val | SW_RESET_PLL);
+} /* hdmi_tx_phy_reset */
+
+static void hdmi_tx_init_phy(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	struct dss_io_data *io = NULL;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_PHY_IO];
+	if (!io->base) {
+		DEV_ERR("%s: phy io is not initialized\n", __func__);
+		return;
+	}
+
+	DSS_REG_W_ND(io, HDMI_PHY_ANA_CFG0, 0x1B);
+	DSS_REG_W_ND(io, HDMI_PHY_ANA_CFG1, 0xF2);
+	DSS_REG_W_ND(io, HDMI_PHY_BIST_CFG0, 0x0);
+	DSS_REG_W_ND(io, HDMI_PHY_BIST_PATN0, 0x0);
+	DSS_REG_W_ND(io, HDMI_PHY_BIST_PATN1, 0x0);
+	DSS_REG_W_ND(io, HDMI_PHY_BIST_PATN2, 0x0);
+	DSS_REG_W_ND(io, HDMI_PHY_BIST_PATN3, 0x0);
+
+	DSS_REG_W_ND(io, HDMI_PHY_PD_CTRL1, 0x20);
+} /* hdmi_tx_init_phy */
+
+static void hdmi_tx_powerdown_phy(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	struct dss_io_data *io = NULL;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_PHY_IO];
+	if (!io->base) {
+		DEV_ERR("%s: phy io is not initialized\n", __func__);
+		return;
+	}
+
+	DSS_REG_W_ND(io, HDMI_PHY_PD_CTRL0, 0x7F);
+} /* hdmi_tx_powerdown_phy */
+
+static int hdmi_tx_audio_acr_setup(struct hdmi_tx_ctrl *hdmi_ctrl,
+	bool enabled, int num_of_channels)
+{
+	/* Read first before writing */
+	u32 acr_pck_ctrl_reg;
+	struct dss_io_data *io = NULL;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: Invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: core io not inititalized\n", __func__);
+		return -EINVAL;
+	}
+
+	acr_pck_ctrl_reg = DSS_REG_R(io, HDMI_ACR_PKT_CTRL);
+
+	if (enabled) {
+		const struct msm_hdmi_mode_timing_info *timing =
+			hdmi_get_supported_mode(hdmi_ctrl->video_resolution);
+		const struct hdmi_tx_audio_acr_arry *audio_acr =
+			&hdmi_tx_audio_acr_lut[0];
+		const int lut_size = sizeof(hdmi_tx_audio_acr_lut)
+			/ sizeof(*hdmi_tx_audio_acr_lut);
+		u32 i, n, cts, layout, multiplier, aud_pck_ctrl_2_reg;
+
+		if (timing == NULL) {
+			DEV_WARN("%s: video format %d not supported\n",
+				__func__, hdmi_ctrl->video_resolution);
+			return -EPERM;
+		}
+
+		for (i = 0; i < lut_size;
+			audio_acr = &hdmi_tx_audio_acr_lut[++i]) {
+			if (audio_acr->pclk == timing->pixel_freq)
+				break;
+		}
+		if (i >= lut_size) {
+			DEV_WARN("%s: pixel clk %d not supported\n", __func__,
+				timing->pixel_freq);
+			return -EPERM;
+		}
+
+		n = audio_acr->lut[hdmi_ctrl->audio_sample_rate].n;
+		cts = audio_acr->lut[hdmi_ctrl->audio_sample_rate].cts;
+		layout = (MSM_HDMI_AUDIO_CHANNEL_2 == num_of_channels) ? 0 : 1;
+
+		if (
+		(AUDIO_SAMPLE_RATE_192KHZ == hdmi_ctrl->audio_sample_rate) ||
+		(AUDIO_SAMPLE_RATE_176_4KHZ == hdmi_ctrl->audio_sample_rate)) {
+			multiplier = 4;
+			n >>= 2; /* divide N by 4 and use multiplier */
+		} else if (
+		(AUDIO_SAMPLE_RATE_96KHZ == hdmi_ctrl->audio_sample_rate) ||
+		(AUDIO_SAMPLE_RATE_88_2KHZ == hdmi_ctrl->audio_sample_rate)) {
+			multiplier = 2;
+			n >>= 1; /* divide N by 2 and use multiplier */
+		} else {
+			multiplier = 1;
+		}
+		DEV_DBG("%s: n=%u, cts=%u, layout=%u\n", __func__, n, cts,
+			layout);
+
+		/* AUDIO_PRIORITY | SOURCE */
+		acr_pck_ctrl_reg |= 0x80000100;
+		/* N_MULTIPLE(multiplier) */
+		acr_pck_ctrl_reg |= (multiplier & 7) << 16;
+
+		if ((AUDIO_SAMPLE_RATE_48KHZ == hdmi_ctrl->audio_sample_rate) ||
+		(AUDIO_SAMPLE_RATE_96KHZ == hdmi_ctrl->audio_sample_rate) ||
+		(AUDIO_SAMPLE_RATE_192KHZ == hdmi_ctrl->audio_sample_rate)) {
+			/* SELECT(3) */
+			acr_pck_ctrl_reg |= 3 << 4;
+			/* CTS_48 */
+			cts <<= 12;
+
+			/* CTS: need to determine how many fractional bits */
+			DSS_REG_W(io, HDMI_ACR_48_0, cts);
+			/* N */
+			DSS_REG_W(io, HDMI_ACR_48_1, n);
+		} else if (
+		(AUDIO_SAMPLE_RATE_44_1KHZ == hdmi_ctrl->audio_sample_rate) ||
+		(AUDIO_SAMPLE_RATE_88_2KHZ == hdmi_ctrl->audio_sample_rate) ||
+		(AUDIO_SAMPLE_RATE_176_4KHZ == hdmi_ctrl->audio_sample_rate)) {
+			/* SELECT(2) */
+			acr_pck_ctrl_reg |= 2 << 4;
+			/* CTS_44 */
+			cts <<= 12;
+
+			/* CTS: need to determine how many fractional bits */
+			DSS_REG_W(io, HDMI_ACR_44_0, cts);
+			/* N */
+			DSS_REG_W(io, HDMI_ACR_44_1, n);
+		} else {	/* default to 32k */
+			/* SELECT(1) */
+			acr_pck_ctrl_reg |= 1 << 4;
+			/* CTS_32 */
+			cts <<= 12;
+
+			/* CTS: need to determine how many fractional bits */
+			DSS_REG_W(io, HDMI_ACR_32_0, cts);
+			/* N */
+			DSS_REG_W(io, HDMI_ACR_32_1, n);
+		}
+		/* Payload layout depends on number of audio channels */
+		/* LAYOUT_SEL(layout) */
+		aud_pck_ctrl_2_reg = 1 | (layout << 1);
+		/* override | layout */
+		DSS_REG_W(io, HDMI_AUDIO_PKT_CTRL2, aud_pck_ctrl_2_reg);
+
+		/* SEND | CONT */
+		acr_pck_ctrl_reg |= 0x00000003;
+	} else {
+		/* ~(SEND | CONT) */
+		acr_pck_ctrl_reg &= ~0x00000003;
+	}
+	DSS_REG_W(io, HDMI_ACR_PKT_CTRL, acr_pck_ctrl_reg);
+
+	return 0;
+} /* hdmi_tx_audio_acr_setup */
+
+static int hdmi_tx_audio_iframe_setup(struct hdmi_tx_ctrl *hdmi_ctrl,
+	bool enabled, u32 num_of_channels, u32 channel_allocation,
+	u32 level_shift, bool down_mix)
+{
+	struct dss_io_data *io = NULL;
+
+	u32 channel_count = 1; /* Def to 2 channels -> Table 17 in CEA-D */
+	u32 check_sum, audio_info_0_reg, audio_info_1_reg;
+	u32 audio_info_ctrl_reg;
+	u32 aud_pck_ctrl_2_reg;
+	u32 layout;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: core io not inititalized\n", __func__);
+		return -EINVAL;
+	}
+
+	layout = (MSM_HDMI_AUDIO_CHANNEL_2 == num_of_channels) ? 0 : 1;
+	aud_pck_ctrl_2_reg = 1 | (layout << 1);
+	DSS_REG_W(io, HDMI_AUDIO_PKT_CTRL2, aud_pck_ctrl_2_reg);
+
+	/*
+	 * Please see table 20 Audio InfoFrame in HDMI spec
+	 * FL  = front left
+	 * FC  = front Center
+	 * FR  = front right
+	 * FLC = front left center
+	 * FRC = front right center
+	 * RL  = rear left
+	 * RC  = rear center
+	 * RR  = rear right
+	 * RLC = rear left center
+	 * RRC = rear right center
+	 * LFE = low frequency effect
+	 */
+
+	/* Read first then write because it is bundled with other controls */
+	audio_info_ctrl_reg = DSS_REG_R(io, HDMI_INFOFRAME_CTRL0);
+
+	if (enabled) {
+		switch (num_of_channels) {
+		case MSM_HDMI_AUDIO_CHANNEL_2:
+			break;
+		case MSM_HDMI_AUDIO_CHANNEL_4:
+			channel_count = 3;
+			break;
+		case MSM_HDMI_AUDIO_CHANNEL_6:
+			channel_count = 5;
+			break;
+		case MSM_HDMI_AUDIO_CHANNEL_8:
+			channel_count = 7;
+			break;
+		default:
+			DEV_ERR("%s: Unsupported num_of_channels = %u\n",
+				__func__, num_of_channels);
+			return -EINVAL;
+		}
+
+		/* Program the Channel-Speaker allocation */
+		audio_info_1_reg = 0;
+		/* CA(channel_allocation) */
+		audio_info_1_reg |= channel_allocation & 0xff;
+		/* Program the Level shifter */
+		audio_info_1_reg |= (level_shift << 11) & 0x00007800;
+		/* Program the Down-mix Inhibit Flag */
+		audio_info_1_reg |= (down_mix << 15) & 0x00008000;
+
+		DSS_REG_W(io, HDMI_AUDIO_INFO1, audio_info_1_reg);
+
+		/*
+		 * Calculate CheckSum: Sum of all the bytes in the
+		 * Audio Info Packet (See table 8.4 in HDMI spec)
+		 */
+		check_sum = 0;
+		/* HDMI_AUDIO_INFO_FRAME_PACKET_HEADER_TYPE[0x84] */
+		check_sum += 0x84;
+		/* HDMI_AUDIO_INFO_FRAME_PACKET_HEADER_VERSION[0x01] */
+		check_sum += 1;
+		/* HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH[0x0A] */
+		check_sum += 0x0A;
+		check_sum += channel_count;
+		check_sum += channel_allocation;
+		/* See Table 8.5 in HDMI spec */
+		check_sum += (level_shift & 0xF) << 3 | (down_mix & 0x1) << 7;
+		check_sum &= 0xFF;
+		check_sum = (u8) (256 - check_sum);
+
+		audio_info_0_reg = 0;
+		/* CHECKSUM(check_sum) */
+		audio_info_0_reg |= check_sum & 0xff;
+		/* CC(channel_count) */
+		audio_info_0_reg |= (channel_count << 8) & 0x00000700;
+
+		DSS_REG_W(io, HDMI_AUDIO_INFO0, audio_info_0_reg);
+
+		/*
+		 * Set these flags
+		 * AUDIO_INFO_UPDATE |
+		 * AUDIO_INFO_SOURCE |
+		 * AUDIO_INFO_CONT   |
+		 * AUDIO_INFO_SEND
+		 */
+		audio_info_ctrl_reg |= 0x000000F0;
+	} else {
+		/*Clear these flags
+		 * ~(AUDIO_INFO_UPDATE |
+		 *   AUDIO_INFO_SOURCE |
+		 *   AUDIO_INFO_CONT   |
+		 *   AUDIO_INFO_SEND)
+		 */
+		audio_info_ctrl_reg &= ~0x000000F0;
+	}
+	DSS_REG_W(io, HDMI_INFOFRAME_CTRL0, audio_info_ctrl_reg);
+
+	dss_reg_dump(io->base, io->len,
+		enabled ? "HDMI-AUDIO-ON: " : "HDMI-AUDIO-OFF: ", REG_DUMP);
+
+	return 0;
+} /* hdmi_tx_audio_iframe_setup */
+
+static int hdmi_tx_audio_info_setup(struct platform_device *pdev,
+	u32 num_of_channels, u32 channel_allocation, u32 level_shift,
+	bool down_mix)
+{
+	int rc = 0;
+	struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -ENODEV;
+	}
+
+	if (hdmi_ctrl->panel_power_on) {
+		rc = hdmi_tx_audio_iframe_setup(hdmi_ctrl, true,
+			num_of_channels, channel_allocation, level_shift,
+			down_mix);
+		if (rc)
+			DEV_ERR("%s: hdmi_tx_audio_iframe_setup failed.rc=%d\n",
+				__func__, rc);
+	} else {
+		DEV_ERR("%s: Error. panel is not on.\n", __func__);
+		rc = -EPERM;
+	}
+
+	return rc;
+} /* hdmi_tx_audio_info_setup */
+
+static int hdmi_tx_get_audio_edid_blk(struct platform_device *pdev,
+	struct msm_hdmi_audio_edid_blk *blk)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!hdmi_ctrl->audio_sdev.state) {
+		DEV_ERR("%s: failed. HDMI is not connected/ready for audio\n",
+			__func__);
+		return -EPERM;
+	}
+
+	return hdmi_edid_get_audio_blk(
+		hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID], blk);
+} /* hdmi_tx_get_audio_edid_blk */
+
+static u8 hdmi_tx_tmds_enabled(struct platform_device *pdev)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -ENODEV;
+	}
+
+	/* status of tmds */
+	return (hdmi_ctrl->timing_gen_on == true);
+}
+
+static int hdmi_tx_set_mhl_max_pclk(struct platform_device *pdev, u32 max_val)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+	hdmi_ctrl = platform_get_drvdata(pdev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -ENODEV;
+	}
+	if (max_val) {
+		hdmi_ctrl->mhl_max_pclk = max_val;
+		hdmi_tx_setup_mhl_video_mode_lut(hdmi_ctrl);
+	} else {
+		DEV_ERR("%s: invalid max pclk val\n", __func__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int msm_hdmi_register_mhl(struct platform_device *pdev,
+		struct msm_hdmi_mhl_ops *ops)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid pdev\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!ops) {
+		DEV_ERR("%s: invalid ops\n", __func__);
+		return -EINVAL;
+	}
+
+	ops->tmds_enabled = hdmi_tx_tmds_enabled;
+	ops->set_mhl_max_pclk = hdmi_tx_set_mhl_max_pclk;
+	return 0;
+}
+
+int msm_hdmi_register_audio_codec(struct platform_device *pdev,
+	struct msm_hdmi_audio_codec_ops *ops)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+
+	if (!hdmi_ctrl || !ops) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -ENODEV;
+	}
+
+	ops->audio_info_setup = hdmi_tx_audio_info_setup;
+	ops->get_audio_edid_blk = hdmi_tx_get_audio_edid_blk;
+
+	return 0;
+} /* hdmi_tx_audio_register */
+EXPORT_SYMBOL(msm_hdmi_register_audio_codec);
+
+static int hdmi_tx_audio_setup(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int rc = 0;
+	const int channels = MSM_HDMI_AUDIO_CHANNEL_2;
+	struct dss_io_data *io = NULL;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: core io not inititalized\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = hdmi_tx_audio_acr_setup(hdmi_ctrl, true, channels);
+	if (rc) {
+		DEV_ERR("%s: hdmi_tx_audio_acr_setup failed. rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	rc = hdmi_tx_audio_iframe_setup(hdmi_ctrl, true, channels, 0, 0, false);
+	if (rc) {
+		DEV_ERR("%s: hdmi_tx_audio_iframe_setup failed. rc=%d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	DEV_INFO("HDMI Audio: Enabled\n");
+
+	return 0;
+} /* hdmi_tx_audio_setup */
+
+static void hdmi_tx_audio_off(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	u32 i, status, sleep_us, timeout_us, timeout_sec = 15;
+	struct dss_io_data *io = NULL;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: core io not inititalized\n", __func__);
+		return;
+	}
+
+	/* Check if audio engine is turned off by QDSP or not */
+	/* send off notification after every 1 sec for 15 seconds */
+	for (i = 0; i < timeout_sec; i++) {
+		sleep_us = 5000; /* Maximum time to sleep between two reads */
+		timeout_us = 1000 * 1000; /* Total time for condition to meet */
+
+		if (readl_poll_timeout((io->base + HDMI_AUDIO_CFG),
+			status, ((status & BIT(0)) == 0),
+			sleep_us, timeout_us)) {
+
+			DEV_ERR("%s: audio still on after %d sec. try again\n",
+				__func__, i+1);
+
+			hdmi_tx_set_audio_switch_node(hdmi_ctrl, 0, true);
+			continue;
+		}
+		break;
+	}
+	if (i == timeout_sec)
+		DEV_ERR("%s: Error: cannot turn off audio engine\n", __func__);
+
+	if (hdmi_tx_audio_iframe_setup(hdmi_ctrl, false, 0, 0, 0, false))
+		DEV_ERR("%s: hdmi_tx_audio_iframe_setup failed.\n", __func__);
+
+	if (hdmi_tx_audio_acr_setup(hdmi_ctrl, false, 0))
+		DEV_ERR("%s: hdmi_tx_audio_acr_setup failed.\n", __func__);
+
+	DEV_INFO("HDMI Audio: Disabled\n");
+} /* hdmi_tx_audio_off */
+
+static int hdmi_tx_start(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int rc = 0;
+	struct dss_io_data *io = NULL;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: core io is not initialized\n", __func__);
+		return -EINVAL;
+	}
+
+	hdmi_tx_set_mode(hdmi_ctrl, false);
+	hdmi_tx_init_phy(hdmi_ctrl);
+	DSS_REG_W(io, HDMI_USEC_REFTIMER, 0x0001001B);
+
+	hdmi_tx_set_mode(hdmi_ctrl, true);
+
+	rc = hdmi_tx_video_setup(hdmi_ctrl, hdmi_ctrl->video_resolution);
+	if (rc) {
+		DEV_ERR("%s: hdmi_tx_video_setup failed. rc=%d\n",
+			__func__, rc);
+		hdmi_tx_set_mode(hdmi_ctrl, false);
+		return rc;
+	}
+
+	if (!hdmi_tx_is_dvi_mode(hdmi_ctrl)) {
+		rc = hdmi_tx_audio_setup(hdmi_ctrl);
+		if (rc) {
+			DEV_ERR("%s: hdmi_msm_audio_setup failed. rc=%d\n",
+				__func__, rc);
+			hdmi_tx_set_mode(hdmi_ctrl, false);
+			return rc;
+		}
+
+		if (!hdmi_ctrl->hdcp_feature_on || !hdmi_ctrl->present_hdcp)
+			hdmi_tx_set_audio_switch_node(hdmi_ctrl, 1, false);
+
+		hdmi_tx_set_avi_infoframe(hdmi_ctrl);
+		hdmi_tx_set_vendor_specific_infoframe(hdmi_ctrl);
+		hdmi_tx_set_spd_infoframe(hdmi_ctrl);
+	}
+
+	/* todo: CEC */
+
+	DEV_INFO("%s: HDMI Core: Initialized\n", __func__);
+
+	return rc;
+} /* hdmi_tx_start */
+
+static void hdmi_tx_hpd_polarity_setup(struct hdmi_tx_ctrl *hdmi_ctrl,
+	bool polarity)
+{
+	struct dss_io_data *io = NULL;
+	u32 cable_sense;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: core io is not initialized\n", __func__);
+		return;
+	}
+
+	if (polarity)
+		DSS_REG_W(io, HDMI_HPD_INT_CTRL, BIT(2) | BIT(1));
+	else
+		DSS_REG_W(io, HDMI_HPD_INT_CTRL, BIT(2));
+
+	cable_sense = (DSS_REG_R(io, HDMI_HPD_INT_STATUS) & BIT(1)) >> 1;
+	DEV_DBG("%s: listen = %s, sense = %s\n", __func__,
+		polarity ? "connect" : "disconnect",
+		cable_sense ? "connect" : "disconnect");
+
+	if (cable_sense == polarity) {
+		u32 reg_val = DSS_REG_R(io, HDMI_HPD_CTRL);
+
+		/* Toggle HPD circuit to trigger HPD sense */
+		DSS_REG_W(io, HDMI_HPD_CTRL, reg_val & ~BIT(28));
+		DSS_REG_W(io, HDMI_HPD_CTRL, reg_val | BIT(28));
+	}
+} /* hdmi_tx_hpd_polarity_setup */
+
+static void hdmi_tx_power_off_work(struct work_struct *work)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+	struct dss_io_data *io = NULL;
+
+	hdmi_ctrl = container_of(work, struct hdmi_tx_ctrl, power_off_work);
+	if (!hdmi_ctrl) {
+		DEV_DBG("%s: invalid input\n", __func__);
+		return;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: Core io is not initialized\n", __func__);
+		return;
+	}
+
+	if (hdmi_ctrl->hdcp_feature_on && hdmi_ctrl->present_hdcp) {
+		DEV_DBG("%s: Turning off HDCP\n", __func__);
+		hdmi_hdcp_off(hdmi_ctrl->feature_data[HDMI_TX_FEAT_HDCP]);
+	}
+
+	if (!hdmi_tx_is_dvi_mode(hdmi_ctrl)) {
+		hdmi_tx_set_audio_switch_node(hdmi_ctrl, 0, false);
+		hdmi_tx_audio_off(hdmi_ctrl);
+	}
+
+	hdmi_tx_powerdown_phy(hdmi_ctrl);
+
+	/*
+	 * this is needed to avoid pll lock failure due to
+	 * clk framework's rate caching.
+	 */
+	hdmi_ctrl->pdata.power_data[HDMI_TX_CORE_PM].clk_config[0].rate = 0;
+
+	hdmi_tx_core_off(hdmi_ctrl);
+
+	if (hdmi_ctrl->hpd_off_pending) {
+		hdmi_tx_hpd_off(hdmi_ctrl);
+		hdmi_ctrl->hpd_off_pending = false;
+	}
+
+	hdmi_cec_deconfig(hdmi_ctrl->feature_data[HDMI_TX_FEAT_CEC]);
+
+	mutex_lock(&hdmi_ctrl->mutex);
+	hdmi_ctrl->panel_power_on = false;
+	mutex_unlock(&hdmi_ctrl->mutex);
+
+	DEV_INFO("%s: HDMI Core: OFF\n", __func__);
+} /* hdmi_tx_power_off_work */
+
+static int hdmi_tx_power_off(struct mdss_panel_data *panel_data)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl =
+		hdmi_tx_get_drvdata_from_panel_data(panel_data);
+
+	if (!hdmi_ctrl || !hdmi_ctrl->panel_power_on) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * Queue work item to handle power down sequence.
+	 * This is needed since we need to wait for the audio engine
+	 * to shutdown first before we shutdown the HDMI core.
+	 */
+	DEV_DBG("%s: Queuing work to power off HDMI core\n", __func__);
+	queue_work(hdmi_ctrl->workq, &hdmi_ctrl->power_off_work);
+
+	return 0;
+} /* hdmi_tx_power_off */
+
+static int hdmi_tx_power_on(struct mdss_panel_data *panel_data)
+{
+	u32 timeout;
+	int rc = 0;
+	struct dss_io_data *io = NULL;
+	struct hdmi_tx_ctrl *hdmi_ctrl =
+		hdmi_tx_get_drvdata_from_panel_data(panel_data);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: core io is not initialized\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!hdmi_ctrl->hpd_initialized) {
+		DEV_ERR("%s: HDMI on is not possible w/o cable detection.\n",
+			__func__);
+		return -EPERM;
+	}
+
+	/* If a power down is already underway, wait for it to finish */
+	flush_work_sync(&hdmi_ctrl->power_off_work);
+
+	if (hdmi_ctrl->pdata.primary) {
+		timeout = wait_for_completion_interruptible_timeout(
+			&hdmi_ctrl->hpd_done, HZ);
+		if (!timeout) {
+			DEV_ERR("%s: cable connection hasn't happened yet\n",
+				__func__);
+			return -ETIMEDOUT;
+		}
+	}
+
+	rc = hdmi_tx_set_video_fmt(hdmi_ctrl, &panel_data->panel_info);
+	if (rc) {
+		DEV_ERR("%s: cannot set video_fmt.rc=%d\n", __func__, rc);
+		return rc;
+	}
+
+	hdmi_ctrl->hdcp_feature_on = hdcp_feature_on;
+
+	DEV_INFO("power: ON (%s)\n", msm_hdmi_mode_2string(
+		hdmi_ctrl->video_resolution));
+
+	rc = hdmi_tx_core_on(hdmi_ctrl);
+	if (rc) {
+		DEV_ERR("%s: hdmi_msm_core_on failed\n", __func__);
+		return rc;
+	}
+
+	mutex_lock(&hdmi_ctrl->mutex);
+	hdmi_ctrl->panel_power_on = true;
+	mutex_unlock(&hdmi_ctrl->mutex);
+
+	hdmi_cec_config(hdmi_ctrl->feature_data[HDMI_TX_FEAT_CEC]);
+
+	if (hdmi_ctrl->hpd_state) {
+		DEV_DBG("%s: Turning HDMI on\n", __func__);
+		rc = hdmi_tx_start(hdmi_ctrl);
+		if (rc) {
+			DEV_ERR("%s: hdmi_tx_start failed. rc=%d\n",
+				__func__, rc);
+			hdmi_tx_power_off(panel_data);
+			return rc;
+		}
+	}
+
+	dss_reg_dump(io->base, io->len, "HDMI-ON: ", REG_DUMP);
+
+	DEV_INFO("%s: HDMI=%s DVI= %s\n", __func__,
+		hdmi_tx_is_controller_on(hdmi_ctrl) ? "ON" : "OFF" ,
+		hdmi_tx_is_dvi_mode(hdmi_ctrl) ? "ON" : "OFF");
+
+	hdmi_tx_hpd_polarity_setup(hdmi_ctrl, HPD_DISCONNECT_POLARITY);
+
+	return 0;
+} /* hdmi_tx_power_on */
+
+static void hdmi_tx_hpd_off(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int rc = 0;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	if (!hdmi_ctrl->hpd_initialized) {
+		DEV_DBG("%s: HPD is already OFF, returning\n", __func__);
+		return;
+	}
+
+	mdss_disable_irq(&hdmi_tx_hw);
+
+	hdmi_tx_set_mode(hdmi_ctrl, false);
+
+	rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_HPD_PM, 0);
+	if (rc)
+		DEV_INFO("%s: Failed to disable hpd power. Error=%d\n",
+			__func__, rc);
+
+	hdmi_ctrl->hpd_state = false;
+	hdmi_ctrl->hpd_initialized = false;
+} /* hdmi_tx_hpd_off */
+
+static int hdmi_tx_hpd_on(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	u32 reg_val;
+	int rc = 0;
+	struct dss_io_data *io = NULL;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_ERR("%s: core io not inititalized\n", __func__);
+		return -EINVAL;
+	}
+
+	if (hdmi_ctrl->hpd_initialized) {
+		DEV_DBG("%s: HPD is already ON\n", __func__);
+	} else {
+		rc = hdmi_tx_enable_power(hdmi_ctrl, HDMI_TX_HPD_PM, true);
+		if (rc) {
+			DEV_ERR("%s: Failed to enable hpd power. rc=%d\n",
+				__func__, rc);
+			return rc;
+		}
+
+		dss_reg_dump(io->base, io->len, "HDMI-INIT: ", REG_DUMP);
+
+		hdmi_tx_set_mode(hdmi_ctrl, false);
+		hdmi_tx_phy_reset(hdmi_ctrl);
+		hdmi_tx_set_mode(hdmi_ctrl, true);
+
+		DSS_REG_W(io, HDMI_USEC_REFTIMER, 0x0001001B);
+
+		mdss_enable_irq(&hdmi_tx_hw);
+
+		hdmi_ctrl->hpd_initialized = true;
+
+		DEV_INFO("%s: HDMI HW version = 0x%x\n", __func__,
+			DSS_REG_R_ND(&hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO],
+				HDMI_VERSION));
+
+		/* set timeout to 4.1ms (max) for hardware debounce */
+		reg_val = DSS_REG_R(io, HDMI_HPD_CTRL) | 0x1FFF;
+
+		/* Turn on HPD HW circuit */
+		DSS_REG_W(io, HDMI_HPD_CTRL, reg_val | BIT(28));
+
+		hdmi_tx_hpd_polarity_setup(hdmi_ctrl, HPD_CONNECT_POLARITY);
+	}
+
+	return rc;
+} /* hdmi_tx_hpd_on */
+
+static int hdmi_tx_sysfs_enable_hpd(struct hdmi_tx_ctrl *hdmi_ctrl, int on)
+{
+	int rc = 0;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	DEV_INFO("%s: %d\n", __func__, on);
+	if (on) {
+		rc = hdmi_tx_hpd_on(hdmi_ctrl);
+	} else {
+		/* If power down is already underway, wait for it to finish */
+		flush_work_sync(&hdmi_ctrl->power_off_work);
+
+		if (!hdmi_ctrl->panel_power_on)
+			hdmi_tx_hpd_off(hdmi_ctrl);
+		else
+			hdmi_ctrl->hpd_off_pending = true;
+
+		hdmi_tx_send_cable_notification(hdmi_ctrl, 0);
+		DEV_DBG("%s: Hdmi state switch to %d\n", __func__,
+			hdmi_ctrl->sdev.state);
+	}
+
+	return rc;
+} /* hdmi_tx_sysfs_enable_hpd */
+
+static irqreturn_t hdmi_tx_isr(int irq, void *data)
+{
+	struct dss_io_data *io = NULL;
+	struct hdmi_tx_ctrl *hdmi_ctrl = (struct hdmi_tx_ctrl *)data;
+
+	if (!hdmi_ctrl || !hdmi_ctrl->hpd_initialized) {
+		DEV_WARN("%s: invalid input data, ISR ignored\n", __func__);
+		return IRQ_HANDLED;
+	}
+
+	io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+	if (!io->base) {
+		DEV_WARN("%s: core io not initialized, ISR ignored\n",
+			__func__);
+		return IRQ_HANDLED;
+	}
+
+	if (DSS_REG_R(io, HDMI_HPD_INT_STATUS) & BIT(0)) {
+		hdmi_ctrl->hpd_state =
+			(DSS_REG_R(io, HDMI_HPD_INT_STATUS) & BIT(1)) >> 1;
+
+		/*
+		 * Ack the current hpd interrupt and stop listening to
+		 * new hpd interrupt.
+		 */
+		DSS_REG_W(io, HDMI_HPD_INT_CTRL, BIT(0));
+		queue_work(hdmi_ctrl->workq, &hdmi_ctrl->hpd_int_work);
+	}
+
+	if (hdmi_ddc_isr(&hdmi_ctrl->ddc_ctrl))
+		DEV_ERR("%s: hdmi_ddc_isr failed\n", __func__);
+
+	if (hdmi_ctrl->feature_data[HDMI_TX_FEAT_CEC])
+		if (hdmi_cec_isr(hdmi_ctrl->feature_data[HDMI_TX_FEAT_CEC]))
+			DEV_ERR("%s: hdmi_cec_isr failed\n", __func__);
+
+	if (hdmi_ctrl->feature_data[HDMI_TX_FEAT_HDCP])
+		if (hdmi_hdcp_isr(hdmi_ctrl->feature_data[HDMI_TX_FEAT_HDCP]))
+			DEV_ERR("%s: hdmi_hdcp_isr failed\n", __func__);
+
+	return IRQ_HANDLED;
+} /* hdmi_tx_isr */
+
+static void hdmi_tx_dev_deinit(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	if (hdmi_ctrl->feature_data[HDMI_TX_FEAT_CEC]) {
+		hdmi_cec_deinit(hdmi_ctrl->feature_data[HDMI_TX_FEAT_CEC]);
+		hdmi_ctrl->feature_data[HDMI_TX_FEAT_CEC] = NULL;
+	}
+
+	if (hdmi_ctrl->feature_data[HDMI_TX_FEAT_HDCP]) {
+		hdmi_hdcp_deinit(hdmi_ctrl->feature_data[HDMI_TX_FEAT_HDCP]);
+		hdmi_ctrl->feature_data[HDMI_TX_FEAT_HDCP] = NULL;
+	}
+
+	if (hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID]) {
+		hdmi_edid_deinit(hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID]);
+		hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID] = NULL;
+	}
+
+	switch_dev_unregister(&hdmi_ctrl->audio_sdev);
+	switch_dev_unregister(&hdmi_ctrl->sdev);
+	if (hdmi_ctrl->workq)
+		destroy_workqueue(hdmi_ctrl->workq);
+	mutex_destroy(&hdmi_ctrl->mutex);
+
+	hdmi_tx_hw.ptr = NULL;
+} /* hdmi_tx_dev_deinit */
+
+static int hdmi_tx_dev_init(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int rc = 0;
+	struct hdmi_tx_platform_data *pdata = NULL;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	pdata = &hdmi_ctrl->pdata;
+
+	rc = hdmi_tx_check_capability(hdmi_ctrl);
+	if (rc) {
+		DEV_ERR("%s: no HDMI device\n", __func__);
+		goto fail_no_hdmi;
+	}
+
+	/* irq enable/disable will be handled in hpd on/off */
+	hdmi_tx_hw.ptr = (void *)hdmi_ctrl;
+
+	hdmi_setup_video_mode_lut();
+	mutex_init(&hdmi_ctrl->mutex);
+	hdmi_ctrl->workq = create_workqueue("hdmi_tx_workq");
+	if (!hdmi_ctrl->workq) {
+		DEV_ERR("%s: hdmi_tx_workq creation failed.\n", __func__);
+		rc = -EPERM;
+		goto fail_create_workq;
+	}
+
+	hdmi_ctrl->ddc_ctrl.io = &pdata->io[HDMI_TX_CORE_IO];
+	init_completion(&hdmi_ctrl->ddc_ctrl.ddc_sw_done);
+
+	hdmi_ctrl->panel_power_on = false;
+	hdmi_ctrl->panel_suspend = false;
+
+	hdmi_ctrl->hpd_state = false;
+	hdmi_ctrl->hpd_initialized = false;
+	hdmi_ctrl->hpd_off_pending = false;
+	init_completion(&hdmi_ctrl->hpd_done);
+	INIT_WORK(&hdmi_ctrl->hpd_int_work, hdmi_tx_hpd_int_work);
+
+	INIT_WORK(&hdmi_ctrl->power_off_work, hdmi_tx_power_off_work);
+
+	hdmi_ctrl->audio_sample_rate = AUDIO_SAMPLE_RATE_48KHZ;
+
+	hdmi_ctrl->sdev.name = "hdmi";
+	if (switch_dev_register(&hdmi_ctrl->sdev) < 0) {
+		DEV_ERR("%s: Hdmi switch registration failed\n", __func__);
+		rc = -ENODEV;
+		goto fail_create_workq;
+	}
+
+	hdmi_ctrl->audio_sdev.name = "hdmi_audio";
+	if (switch_dev_register(&hdmi_ctrl->audio_sdev) < 0) {
+		DEV_ERR("%s: hdmi_audio switch registration failed\n",
+			__func__);
+		rc = -ENODEV;
+		goto fail_audio_switch_dev;
+	}
+
+	return 0;
+
+fail_audio_switch_dev:
+	switch_dev_unregister(&hdmi_ctrl->sdev);
+fail_create_workq:
+	if (hdmi_ctrl->workq)
+		destroy_workqueue(hdmi_ctrl->workq);
+	mutex_destroy(&hdmi_ctrl->mutex);
+fail_no_hdmi:
+	return rc;
+} /* hdmi_tx_dev_init */
+
+static int hdmi_tx_panel_event_handler(struct mdss_panel_data *panel_data,
+	int event, void *arg)
+{
+	int rc = 0, new_vic = -1;
+	struct hdmi_tx_ctrl *hdmi_ctrl =
+		hdmi_tx_get_drvdata_from_panel_data(panel_data);
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	DEV_DBG("%s: event = %d suspend=%d, hpd_feature=%d\n", __func__,
+		event, hdmi_ctrl->panel_suspend, hdmi_ctrl->hpd_feature_on);
+
+	switch (event) {
+	case MDSS_EVENT_FB_REGISTERED:
+		rc = hdmi_tx_sysfs_create(hdmi_ctrl, arg);
+		if (rc) {
+			DEV_ERR("%s: hdmi_tx_sysfs_create failed.rc=%d\n",
+					__func__, rc);
+			return rc;
+		}
+		rc = hdmi_tx_init_features(hdmi_ctrl);
+		if (rc) {
+			DEV_ERR("%s: init_features failed.rc=%d\n",
+					__func__, rc);
+			hdmi_tx_sysfs_remove(hdmi_ctrl);
+			return rc;
+		}
+
+		if (hdmi_ctrl->pdata.primary) {
+			INIT_COMPLETION(hdmi_ctrl->hpd_done);
+			rc = hdmi_tx_sysfs_enable_hpd(hdmi_ctrl, true);
+			if (rc) {
+				DEV_ERR("%s: hpd_enable failed. rc=%d\n",
+					__func__, rc);
+				hdmi_tx_sysfs_remove(hdmi_ctrl);
+				return rc;
+			} else {
+				hdmi_ctrl->hpd_feature_on = true;
+			}
+		}
+
+		break;
+
+	case MDSS_EVENT_CHECK_PARAMS:
+		new_vic = hdmi_tx_get_vic_from_panel_info(hdmi_ctrl,
+			(struct mdss_panel_info *)arg);
+		if ((new_vic < 0) || (new_vic > HDMI_VFRMT_MAX)) {
+			DEV_ERR("%s: invalid or not supported vic\n", __func__);
+			return -EPERM;
+		}
+
+		/*
+		 * return value of 1 lets mdss know that panel
+		 * needs a reconfig due to new resolution and
+		 * it will issue close and open subsequently.
+		 */
+		if (new_vic != hdmi_ctrl->video_resolution)
+			rc = 1;
+		else
+			DEV_DBG("%s: no res change.\n", __func__);
+		break;
+
+	case MDSS_EVENT_RESUME:
+		/* If a suspend is already underway, wait for it to finish */
+		if (hdmi_ctrl->panel_suspend && hdmi_ctrl->panel_power_on)
+			flush_work(&hdmi_ctrl->power_off_work);
+
+		if (hdmi_ctrl->hpd_feature_on) {
+			INIT_COMPLETION(hdmi_ctrl->hpd_done);
+
+			rc = hdmi_tx_hpd_on(hdmi_ctrl);
+			if (rc)
+				DEV_ERR("%s: hdmi_tx_hpd_on failed. rc=%d\n",
+					__func__, rc);
+		}
+		break;
+
+	case MDSS_EVENT_RESET:
+		if (hdmi_ctrl->panel_suspend) {
+			u32 timeout;
+			hdmi_ctrl->panel_suspend = false;
+
+			timeout = wait_for_completion_interruptible_timeout(
+				&hdmi_ctrl->hpd_done, HZ/10);
+			if (!timeout & !hdmi_ctrl->hpd_state) {
+				DEV_INFO("%s: cable removed during suspend\n",
+					__func__);
+				hdmi_tx_send_cable_notification(hdmi_ctrl, 0);
+				rc = -EPERM;
+			} else {
+				DEV_DBG("%s: cable present after resume\n",
+					__func__);
+			}
+		}
+		break;
+
+	case MDSS_EVENT_UNBLANK:
+		rc = hdmi_tx_power_on(panel_data);
+		if (rc)
+			DEV_ERR("%s: hdmi_tx_power_on failed. rc=%d\n",
+				__func__, rc);
+		break;
+
+	case MDSS_EVENT_PANEL_ON:
+		if (hdmi_ctrl->hdcp_feature_on && hdmi_ctrl->present_hdcp) {
+			DEV_DBG("%s: Starting HDCP authentication\n", __func__);
+			rc = hdmi_hdcp_authenticate(
+				hdmi_ctrl->feature_data[HDMI_TX_FEAT_HDCP]);
+			if (rc)
+				DEV_ERR("%s: hdcp auth failed. rc=%d\n",
+					__func__, rc);
+		}
+		hdmi_ctrl->timing_gen_on = true;
+		break;
+
+	case MDSS_EVENT_SUSPEND:
+		if (!hdmi_ctrl->panel_power_on) {
+			if (hdmi_ctrl->hpd_feature_on)
+				hdmi_tx_hpd_off(hdmi_ctrl);
+
+			hdmi_ctrl->panel_suspend = false;
+		} else {
+			hdmi_ctrl->hpd_off_pending = true;
+			hdmi_ctrl->panel_suspend = true;
+		}
+		break;
+
+	case MDSS_EVENT_BLANK:
+		if (hdmi_ctrl->panel_power_on) {
+			rc = hdmi_tx_power_off(panel_data);
+			if (rc)
+				DEV_ERR("%s: hdmi_tx_power_off failed.rc=%d\n",
+					__func__, rc);
+
+		} else {
+			DEV_DBG("%s: hdmi is already powered off\n", __func__);
+		}
+		break;
+
+	case MDSS_EVENT_PANEL_OFF:
+		hdmi_ctrl->timing_gen_on = false;
+		break;
+
+	case MDSS_EVENT_CLOSE:
+		if (hdmi_ctrl->hpd_feature_on)
+			hdmi_tx_hpd_polarity_setup(hdmi_ctrl,
+				HPD_CONNECT_POLARITY);
+		break;
+	}
+
+	return rc;
+} /* hdmi_tx_panel_event_handler */
+
+static int hdmi_tx_register_panel(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int rc = 0;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	hdmi_ctrl->panel_data.event_handler = hdmi_tx_panel_event_handler;
+
+	hdmi_ctrl->video_resolution = DEFAULT_VIDEO_RESOLUTION;
+	rc = hdmi_tx_init_panel_info(hdmi_ctrl->video_resolution,
+		&hdmi_ctrl->panel_data.panel_info);
+	if (rc) {
+		DEV_ERR("%s: hdmi_init_panel_info failed\n", __func__);
+		return rc;
+	}
+
+	rc = mdss_register_panel(hdmi_ctrl->pdev, &hdmi_ctrl->panel_data);
+	if (rc) {
+		DEV_ERR("%s: FAILED: to register HDMI panel\n", __func__);
+		return rc;
+	}
+
+	return rc;
+} /* hdmi_tx_register_panel */
+
+static void hdmi_tx_deinit_resource(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int i;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	/* VREG & CLK */
+	for (i = HDMI_TX_MAX_PM - 1; i >= 0; i--) {
+		if (hdmi_tx_config_power(hdmi_ctrl, i, 0))
+			DEV_ERR("%s: '%s' power deconfig fail\n",
+				__func__, hdmi_tx_pm_name(i));
+	}
+
+	/* IO */
+	for (i = HDMI_TX_MAX_IO - 1; i >= 0; i--)
+		msm_dss_iounmap(&hdmi_ctrl->pdata.io[i]);
+} /* hdmi_tx_deinit_resource */
+
+static int hdmi_tx_init_resource(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+	int i, rc = 0;
+	struct hdmi_tx_platform_data *pdata = NULL;
+
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	pdata = &hdmi_ctrl->pdata;
+
+	/* IO */
+	for (i = 0; i < HDMI_TX_MAX_IO; i++) {
+		rc = msm_dss_ioremap_byname(hdmi_ctrl->pdev, &pdata->io[i],
+			hdmi_tx_io_name(i));
+		if (rc) {
+			DEV_ERR("%s: '%s' remap failed\n", __func__,
+				hdmi_tx_io_name(i));
+			goto error;
+		}
+		DEV_INFO("%s: '%s': start = 0x%x, len=0x%x\n", __func__,
+			hdmi_tx_io_name(i), (u32)pdata->io[i].base,
+			pdata->io[i].len);
+	}
+
+	/* VREG & CLK */
+	for (i = 0; i < HDMI_TX_MAX_PM; i++) {
+		rc = hdmi_tx_config_power(hdmi_ctrl, i, 1);
+		if (rc) {
+			DEV_ERR("%s: '%s' power config failed.rc=%d\n",
+				__func__, hdmi_tx_pm_name(i), rc);
+			goto error;
+		}
+	}
+
+	return rc;
+
+error:
+	hdmi_tx_deinit_resource(hdmi_ctrl);
+	return rc;
+} /* hdmi_tx_init_resource */
+
+static void hdmi_tx_put_dt_clk_data(struct device *dev,
+	struct dss_module_power *module_power)
+{
+	if (!module_power) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	if (module_power->clk_config) {
+		devm_kfree(dev, module_power->clk_config);
+		module_power->clk_config = NULL;
+	}
+	module_power->num_clk = 0;
+} /* hdmi_tx_put_dt_clk_data */
+
+/* todo: once clk are moved to device tree then change this implementation */
+static int hdmi_tx_get_dt_clk_data(struct device *dev,
+	struct dss_module_power *mp, u32 module_type)
+{
+	int rc = 0;
+
+	if (!dev || !mp) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	DEV_DBG("%s: module: '%s'\n", __func__, hdmi_tx_pm_name(module_type));
+
+	switch (module_type) {
+	case HDMI_TX_HPD_PM:
+		mp->num_clk = 2;
+		mp->clk_config = devm_kzalloc(dev, sizeof(struct dss_clk) *
+			mp->num_clk, GFP_KERNEL);
+		if (!mp->clk_config) {
+			DEV_ERR("%s: can't alloc '%s' clk mem\n", __func__,
+				hdmi_tx_pm_name(module_type));
+			goto error;
+		}
+
+		snprintf(mp->clk_config[0].clk_name, 32, "%s", "iface_clk");
+		mp->clk_config[0].type = DSS_CLK_AHB;
+		mp->clk_config[0].rate = 0;
+
+		snprintf(mp->clk_config[1].clk_name, 32, "%s", "core_clk");
+		mp->clk_config[1].type = DSS_CLK_OTHER;
+		mp->clk_config[1].rate = 19200000;
+		break;
+
+	case HDMI_TX_CORE_PM:
+		mp->num_clk = 2;
+		mp->clk_config = devm_kzalloc(dev, sizeof(struct dss_clk) *
+			mp->num_clk, GFP_KERNEL);
+		if (!mp->clk_config) {
+			DEV_ERR("%s: can't alloc '%s' clk mem\n", __func__,
+				hdmi_tx_pm_name(module_type));
+			goto error;
+		}
+
+		snprintf(mp->clk_config[0].clk_name, 32, "%s", "extp_clk");
+		mp->clk_config[0].type = DSS_CLK_PCLK;
+		/* This rate will be overwritten when core is powered on */
+		mp->clk_config[0].rate = 148500000;
+
+		snprintf(mp->clk_config[1].clk_name, 32, "%s", "alt_iface_clk");
+		mp->clk_config[1].type = DSS_CLK_AHB;
+		mp->clk_config[1].rate = 0;
+		break;
+
+	case HDMI_TX_CEC_PM:
+		mp->num_clk = 0;
+		DEV_DBG("%s: no clk\n", __func__);
+		break;
+
+	default:
+		DEV_ERR("%s: invalid module type=%d\n", __func__,
+			module_type);
+		return -EINVAL;
+	}
+
+	return rc;
+
+error:
+	if (mp->clk_config) {
+		devm_kfree(dev, mp->clk_config);
+		mp->clk_config = NULL;
+	}
+	mp->num_clk = 0;
+
+	return rc;
+} /* hdmi_tx_get_dt_clk_data */
+
+static void hdmi_tx_put_dt_vreg_data(struct device *dev,
+	struct dss_module_power *module_power)
+{
+	if (!module_power) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	if (module_power->vreg_config) {
+		devm_kfree(dev, module_power->vreg_config);
+		module_power->vreg_config = NULL;
+	}
+	module_power->num_vreg = 0;
+} /* hdmi_tx_put_dt_vreg_data */
+
+static int hdmi_tx_get_dt_vreg_data(struct device *dev,
+	struct dss_module_power *mp, u32 module_type)
+{
+	int i, j, rc = 0;
+	int dt_vreg_total = 0, mod_vreg_total = 0;
+	u32 ndx_mask = 0;
+	u32 *val_array = NULL;
+	const char *mod_name = NULL;
+	struct device_node *of_node = NULL;
+	char prop_name[32];
+
+	if (!dev || !mp) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	switch (module_type) {
+	case HDMI_TX_HPD_PM:
+		mod_name = "hpd";
+		break;
+	case HDMI_TX_CORE_PM:
+		mod_name = "core";
+		break;
+	case HDMI_TX_CEC_PM:
+		mod_name = "cec";
+		break;
+	default:
+		DEV_ERR("%s: invalid module type=%d\n", __func__,
+			module_type);
+		return -EINVAL;
+	}
+
+	DEV_DBG("%s: module: '%s'\n", __func__, hdmi_tx_pm_name(module_type));
+
+	of_node = dev->of_node;
+
+	memset(prop_name, 0, sizeof(prop_name));
+	snprintf(prop_name, 32, "%s-%s", COMPATIBLE_NAME, "supply-names");
+	dt_vreg_total = of_property_count_strings(of_node, prop_name);
+	if (dt_vreg_total < 0) {
+		DEV_ERR("%s: vreg not found. rc=%d\n", __func__,
+			dt_vreg_total);
+		rc = dt_vreg_total;
+		goto error;
+	}
+
+	/* count how many vreg for particular hdmi module */
+	for (i = 0; i < dt_vreg_total; i++) {
+		const char *st = NULL;
+		memset(prop_name, 0, sizeof(prop_name));
+		snprintf(prop_name, 32, "%s-%s", COMPATIBLE_NAME,
+			"supply-names");
+		rc = of_property_read_string_index(of_node,
+			prop_name, i, &st);
+		if (rc) {
+			DEV_ERR("%s: error reading name. i=%d, rc=%d\n",
+				__func__, i, rc);
+			goto error;
+		}
+
+		if (strnstr(st, mod_name, strlen(st))) {
+			ndx_mask |= BIT(i);
+			mod_vreg_total++;
+		}
+	}
+
+	if (mod_vreg_total > 0) {
+		mp->num_vreg = mod_vreg_total;
+		mp->vreg_config = devm_kzalloc(dev, sizeof(struct dss_vreg) *
+			mod_vreg_total, GFP_KERNEL);
+		if (!mp->vreg_config) {
+			DEV_ERR("%s: can't alloc '%s' vreg mem\n", __func__,
+				hdmi_tx_pm_name(module_type));
+			goto error;
+		}
+	} else {
+		DEV_DBG("%s: no vreg\n", __func__);
+		return 0;
+	}
+
+	val_array = devm_kzalloc(dev, sizeof(u32) * dt_vreg_total, GFP_KERNEL);
+	if (!val_array) {
+		DEV_ERR("%s: can't allocate vreg scratch mem\n", __func__);
+		rc = -ENOMEM;
+		goto error;
+	}
+
+	for (i = 0, j = 0; (i < dt_vreg_total) && (j < mod_vreg_total); i++) {
+		const char *st = NULL;
+
+		if (!(ndx_mask & BIT(0))) {
+			ndx_mask >>= 1;
+			continue;
+		}
+
+		/* vreg-name */
+		memset(prop_name, 0, sizeof(prop_name));
+		snprintf(prop_name, 32, "%s-%s", COMPATIBLE_NAME,
+			"supply-names");
+		rc = of_property_read_string_index(of_node,
+			prop_name, i, &st);
+		if (rc) {
+			DEV_ERR("%s: error reading name. i=%d, rc=%d\n",
+				__func__, i, rc);
+			goto error;
+		}
+		snprintf(mp->vreg_config[j].vreg_name, 32, "%s", st);
+
+		/* vreg-type */
+		memset(prop_name, 0, sizeof(prop_name));
+		snprintf(prop_name, 32, "%s-%s", COMPATIBLE_NAME,
+			"supply-type");
+		memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+		rc = of_property_read_u32_array(of_node,
+			prop_name, val_array, dt_vreg_total);
+		if (rc) {
+			DEV_ERR("%s: error read '%s' vreg type. rc=%d\n",
+				__func__, hdmi_tx_pm_name(module_type), rc);
+			goto error;
+		}
+		mp->vreg_config[j].type = val_array[i];
+
+		/* vreg-min-voltage */
+		memset(prop_name, 0, sizeof(prop_name));
+		snprintf(prop_name, 32, "%s-%s", COMPATIBLE_NAME,
+			"min-voltage-level");
+		memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+		rc = of_property_read_u32_array(of_node,
+			prop_name, val_array,
+			dt_vreg_total);
+		if (rc) {
+			DEV_ERR("%s: error read '%s' min volt. rc=%d\n",
+				__func__, hdmi_tx_pm_name(module_type), rc);
+			goto error;
+		}
+		mp->vreg_config[j].min_voltage = val_array[i];
+
+		/* vreg-max-voltage */
+		memset(prop_name, 0, sizeof(prop_name));
+		snprintf(prop_name, 32, "%s-%s", COMPATIBLE_NAME,
+			"max-voltage-level");
+		memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+		rc = of_property_read_u32_array(of_node,
+			prop_name, val_array,
+			dt_vreg_total);
+		if (rc) {
+			DEV_ERR("%s: error read '%s' max volt. rc=%d\n",
+				__func__, hdmi_tx_pm_name(module_type), rc);
+			goto error;
+		}
+		mp->vreg_config[j].max_voltage = val_array[i];
+
+		/* vreg-op-mode */
+		memset(prop_name, 0, sizeof(prop_name));
+		snprintf(prop_name, 32, "%s-%s", COMPATIBLE_NAME,
+			"op-mode");
+		memset(val_array, 0, sizeof(u32) * dt_vreg_total);
+		rc = of_property_read_u32_array(of_node,
+			prop_name, val_array,
+			dt_vreg_total);
+		if (rc) {
+			DEV_ERR("%s: error read '%s' min volt. rc=%d\n",
+				__func__, hdmi_tx_pm_name(module_type), rc);
+			goto error;
+		}
+		mp->vreg_config[j].optimum_voltage = val_array[i];
+
+		DEV_DBG("%s: %s type=%d, min=%d, max=%d, op=%d\n",
+			__func__, mp->vreg_config[j].vreg_name,
+			mp->vreg_config[j].type,
+			mp->vreg_config[j].min_voltage,
+			mp->vreg_config[j].max_voltage,
+			mp->vreg_config[j].optimum_voltage);
+
+		ndx_mask >>= 1;
+		j++;
+	}
+
+	devm_kfree(dev, val_array);
+
+	return rc;
+
+error:
+	if (mp->vreg_config) {
+		devm_kfree(dev, mp->vreg_config);
+		mp->vreg_config = NULL;
+	}
+	mp->num_vreg = 0;
+
+	if (val_array)
+		devm_kfree(dev, val_array);
+	return rc;
+} /* hdmi_tx_get_dt_vreg_data */
+
+static void hdmi_tx_put_dt_gpio_data(struct device *dev,
+	struct dss_module_power *module_power)
+{
+	if (!module_power) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	if (module_power->gpio_config) {
+		devm_kfree(dev, module_power->gpio_config);
+		module_power->gpio_config = NULL;
+	}
+	module_power->num_gpio = 0;
+} /* hdmi_tx_put_dt_gpio_data */
+
+static int hdmi_tx_get_dt_gpio_data(struct device *dev,
+	struct dss_module_power *mp, u32 module_type)
+{
+	int i, j;
+	int mp_gpio_cnt = 0, gpio_list_size = 0;
+	struct dss_gpio *gpio_list = NULL;
+	struct device_node *of_node = NULL;
+
+	DEV_DBG("%s: module: '%s'\n", __func__, hdmi_tx_pm_name(module_type));
+
+	if (!dev || !mp) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	of_node = dev->of_node;
+
+	switch (module_type) {
+	case HDMI_TX_HPD_PM:
+		gpio_list_size = ARRAY_SIZE(hpd_gpio_config);
+		gpio_list = hpd_gpio_config;
+		break;
+	case HDMI_TX_CORE_PM:
+		gpio_list_size = ARRAY_SIZE(core_gpio_config);
+		gpio_list = core_gpio_config;
+		break;
+	case HDMI_TX_CEC_PM:
+		gpio_list_size = ARRAY_SIZE(cec_gpio_config);
+		gpio_list = cec_gpio_config;
+		break;
+	default:
+		DEV_ERR("%s: invalid module type=%d\n", __func__,
+			module_type);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < gpio_list_size; i++)
+		if (of_find_property(of_node, gpio_list[i].gpio_name, NULL))
+			mp_gpio_cnt++;
+
+	if (!mp_gpio_cnt) {
+		DEV_DBG("%s: no gpio\n", __func__);
+		return 0;
+	}
+
+	DEV_DBG("%s: mp_gpio_cnt = %d\n", __func__, mp_gpio_cnt);
+	mp->num_gpio = mp_gpio_cnt;
+
+	mp->gpio_config = devm_kzalloc(dev, sizeof(struct dss_gpio) *
+		mp_gpio_cnt, GFP_KERNEL);
+	if (!mp->gpio_config) {
+		DEV_ERR("%s: can't alloc '%s' gpio mem\n", __func__,
+			hdmi_tx_pm_name(module_type));
+
+		mp->num_gpio = 0;
+		return -ENOMEM;
+	}
+
+	for (i = 0, j = 0; i < gpio_list_size; i++) {
+		int gpio = of_get_named_gpio(of_node,
+			gpio_list[i].gpio_name, 0);
+		if (gpio < 0) {
+			DEV_DBG("%s: no gpio named %s\n", __func__,
+				gpio_list[i].gpio_name);
+			continue;
+		}
+		memcpy(&mp->gpio_config[j], &gpio_list[i],
+			sizeof(struct dss_gpio));
+
+		mp->gpio_config[j].gpio = (unsigned)gpio;
+
+		DEV_DBG("%s: gpio num=%d, name=%s, value=%d\n",
+			__func__, mp->gpio_config[j].gpio,
+			mp->gpio_config[j].gpio_name,
+			mp->gpio_config[j].value);
+		j++;
+	}
+
+	return 0;
+} /* hdmi_tx_get_dt_gpio_data */
+
+static void hdmi_tx_put_dt_data(struct device *dev,
+	struct hdmi_tx_platform_data *pdata)
+{
+	int i;
+	if (!dev || !pdata) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	for (i = HDMI_TX_MAX_PM - 1; i >= 0; i--)
+		hdmi_tx_put_dt_clk_data(dev, &pdata->power_data[i]);
+
+	for (i = HDMI_TX_MAX_PM - 1; i >= 0; i--)
+		hdmi_tx_put_dt_vreg_data(dev, &pdata->power_data[i]);
+
+	for (i = HDMI_TX_MAX_PM - 1; i >= 0; i--)
+		hdmi_tx_put_dt_gpio_data(dev, &pdata->power_data[i]);
+} /* hdmi_tx_put_dt_data */
+
+static int hdmi_tx_get_dt_data(struct platform_device *pdev,
+	struct hdmi_tx_platform_data *pdata)
+{
+	int i, rc = 0;
+	struct device_node *of_node = NULL;
+
+	if (!pdev || !pdata) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	of_node = pdev->dev.of_node;
+
+	rc = of_property_read_u32(of_node, "cell-index", &pdev->id);
+	if (rc) {
+		DEV_ERR("%s: dev id from dt not found.rc=%d\n",
+			__func__, rc);
+		goto error;
+	}
+	DEV_DBG("%s: id=%d\n", __func__, pdev->id);
+
+	/* GPIO */
+	for (i = 0; i < HDMI_TX_MAX_PM; i++) {
+		rc = hdmi_tx_get_dt_gpio_data(&pdev->dev,
+			&pdata->power_data[i], i);
+		if (rc) {
+			DEV_ERR("%s: '%s' get_dt_gpio_data failed.rc=%d\n",
+				__func__, hdmi_tx_pm_name(i), rc);
+			goto error;
+		}
+	}
+
+	/* VREG */
+	for (i = 0; i < HDMI_TX_MAX_PM; i++) {
+		rc = hdmi_tx_get_dt_vreg_data(&pdev->dev,
+			&pdata->power_data[i], i);
+		if (rc) {
+			DEV_ERR("%s: '%s' get_dt_vreg_data failed.rc=%d\n",
+				__func__, hdmi_tx_pm_name(i), rc);
+			goto error;
+		}
+	}
+
+	/* CLK */
+	for (i = 0; i < HDMI_TX_MAX_PM; i++) {
+		rc = hdmi_tx_get_dt_clk_data(&pdev->dev,
+			&pdata->power_data[i], i);
+		if (rc) {
+			DEV_ERR("%s: '%s' get_dt_clk_data failed.rc=%d\n",
+				__func__, hdmi_tx_pm_name(i), rc);
+			goto error;
+		}
+	}
+
+	if (of_find_property(pdev->dev.of_node, "qcom,primary_panel", NULL)) {
+		u32 tmp;
+		of_property_read_u32(pdev->dev.of_node, "qcom,primary_panel",
+			&tmp);
+		pdata->primary = tmp ? true : false;
+	}
+
+	return rc;
+
+error:
+	hdmi_tx_put_dt_data(&pdev->dev, pdata);
+	return rc;
+} /* hdmi_tx_get_dt_data */
+
+static int __devinit hdmi_tx_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct device_node *of_node = pdev->dev.of_node;
+	struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+	if (!of_node) {
+		DEV_ERR("%s: FAILED: of_node not found\n", __func__);
+		rc = -ENODEV;
+		return rc;
+	}
+
+	hdmi_ctrl = devm_kzalloc(&pdev->dev, sizeof(*hdmi_ctrl), GFP_KERNEL);
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: FAILED: cannot alloc hdmi tx ctrl\n", __func__);
+		rc = -ENOMEM;
+		goto failed_no_mem;
+	}
+
+	platform_set_drvdata(pdev, hdmi_ctrl);
+	hdmi_ctrl->pdev = pdev;
+
+	rc = hdmi_tx_get_dt_data(pdev, &hdmi_ctrl->pdata);
+	if (rc) {
+		DEV_ERR("%s: FAILED: parsing device tree data. rc=%d\n",
+			__func__, rc);
+		goto failed_dt_data;
+	}
+
+	rc = hdmi_tx_init_resource(hdmi_ctrl);
+	if (rc) {
+		DEV_ERR("%s: FAILED: resource init. rc=%d\n",
+			__func__, rc);
+		goto failed_res_init;
+	}
+
+	rc = hdmi_tx_dev_init(hdmi_ctrl);
+	if (rc) {
+		DEV_ERR("%s: FAILED: hdmi_tx_dev_init. rc=%d\n", __func__, rc);
+		goto failed_dev_init;
+	}
+
+	rc = hdmi_tx_register_panel(hdmi_ctrl);
+	if (rc) {
+		DEV_ERR("%s: FAILED: register_panel. rc=%d\n", __func__, rc);
+		goto failed_reg_panel;
+	}
+
+	rc = of_platform_populate(of_node, NULL, NULL, &pdev->dev);
+	if (rc) {
+		DEV_ERR("%s: Failed to add child devices. rc=%d\n",
+			__func__, rc);
+		goto failed_reg_panel;
+	} else {
+		DEV_DBG("%s: Add child devices.\n", __func__);
+	}
+
+	if (mdss_debug_register_base("hdmi",
+			hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO].base,
+			hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO].len))
+		DEV_WARN("%s: hdmi_tx debugfs register failed\n", __func__);
+
+	return rc;
+
+failed_reg_panel:
+	hdmi_tx_dev_deinit(hdmi_ctrl);
+failed_dev_init:
+	hdmi_tx_deinit_resource(hdmi_ctrl);
+failed_res_init:
+	hdmi_tx_put_dt_data(&pdev->dev, &hdmi_ctrl->pdata);
+failed_dt_data:
+	devm_kfree(&pdev->dev, hdmi_ctrl);
+failed_no_mem:
+	return rc;
+} /* hdmi_tx_probe */
+
+static int __devexit hdmi_tx_remove(struct platform_device *pdev)
+{
+	struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
+	if (!hdmi_ctrl) {
+		DEV_ERR("%s: no driver data\n", __func__);
+		return -ENODEV;
+	}
+
+	hdmi_tx_sysfs_remove(hdmi_ctrl);
+	hdmi_tx_dev_deinit(hdmi_ctrl);
+	hdmi_tx_deinit_resource(hdmi_ctrl);
+	hdmi_tx_put_dt_data(&pdev->dev, &hdmi_ctrl->pdata);
+	devm_kfree(&hdmi_ctrl->pdev->dev, hdmi_ctrl);
+
+	return 0;
+} /* hdmi_tx_remove */
+
+static const struct of_device_id hdmi_tx_dt_match[] = {
+	{.compatible = COMPATIBLE_NAME,},
+};
+MODULE_DEVICE_TABLE(of, hdmi_tx_dt_match);
+
+static struct platform_driver this_driver = {
+	.probe = hdmi_tx_probe,
+	.remove = hdmi_tx_remove,
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = hdmi_tx_dt_match,
+	},
+};
+
+static int __init hdmi_tx_drv_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&this_driver);
+	if (rc)
+		DEV_ERR("%s: FAILED: rc=%d\n", __func__, rc);
+
+	return rc;
+} /* hdmi_tx_drv_init */
+
+static void __exit hdmi_tx_drv_exit(void)
+{
+	platform_driver_unregister(&this_driver);
+} /* hdmi_tx_drv_exit */
+
+static int set_hdcp_feature_on(const char *val, const struct kernel_param *kp)
+{
+	int rc = 0;
+
+	rc = param_set_bool(val, kp);
+	if (!rc)
+		pr_debug("%s: HDCP feature = %d\n", __func__, hdcp_feature_on);
+
+	return rc;
+}
+
+static struct kernel_param_ops hdcp_feature_on_param_ops = {
+	.set = set_hdcp_feature_on,
+	.get = param_get_bool,
+};
+
+module_param_cb(hdcp, &hdcp_feature_on_param_ops, &hdcp_feature_on,
+	S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(hdcp, "Enable or Disable HDCP");
+
+module_init(hdmi_tx_drv_init);
+module_exit(hdmi_tx_drv_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.3");
+MODULE_DESCRIPTION("HDMI MSM TX driver");
diff --git a/drivers/video/msm/mdss/mdss_hdmi_tx.h b/drivers/video/msm/mdss/mdss_hdmi_tx.h
new file mode 100644
index 0000000..8d9a477
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_hdmi_tx.h
@@ -0,0 +1,80 @@
+/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_HDMI_TX_H__
+#define __MDSS_HDMI_TX_H__
+
+#include <linux/switch.h>
+#include "mdss_hdmi_util.h"
+
+enum hdmi_tx_io_type {
+	HDMI_TX_CORE_IO,
+	HDMI_TX_PHY_IO,
+	HDMI_TX_QFPROM_IO,
+	HDMI_TX_MAX_IO
+};
+
+enum hdmi_tx_power_module_type {
+	HDMI_TX_HPD_PM,
+	HDMI_TX_CORE_PM,
+	HDMI_TX_CEC_PM,
+	HDMI_TX_MAX_PM
+};
+
+/* Data filled from device tree */
+struct hdmi_tx_platform_data {
+	bool primary;
+	struct dss_io_data io[HDMI_TX_MAX_IO];
+	struct dss_module_power power_data[HDMI_TX_MAX_PM];
+};
+
+struct hdmi_tx_ctrl {
+	struct platform_device *pdev;
+	struct hdmi_tx_platform_data pdata;
+	struct mdss_panel_data panel_data;
+
+	int audio_sample_rate;
+
+	struct mutex mutex;
+	struct kobject *kobj;
+	struct switch_dev sdev;
+	struct switch_dev audio_sdev;
+	struct workqueue_struct *workq;
+
+	uint32_t video_resolution;
+
+	u32 panel_power_on;
+	u32 panel_suspend;
+
+	u32 hpd_state;
+	u32 hpd_off_pending;
+	u32 hpd_feature_on;
+	u32 hpd_initialized;
+	u8  timing_gen_on;
+	u32 mhl_max_pclk;
+	struct completion hpd_done;
+	struct work_struct hpd_int_work;
+
+	struct work_struct power_off_work;
+
+	bool hdcp_feature_on;
+	u32 present_hdcp;
+
+	u8 spd_vendor_name[8];
+	u8 spd_product_description[16];
+
+	struct hdmi_tx_ddc_ctrl ddc_ctrl;
+
+	void *feature_data[HDMI_TX_FEAT_MAX];
+};
+
+#endif /* __MDSS_HDMI_TX_H__ */
diff --git a/drivers/video/msm/mdss/mdss_hdmi_util.c b/drivers/video/msm/mdss/mdss_hdmi_util.c
new file mode 100644
index 0000000..0c8b0f8
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_hdmi_util.c
@@ -0,0 +1,775 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/io.h>
+#include <mach/board.h>
+#include "mdss_hdmi_util.h"
+
+static struct msm_hdmi_mode_timing_info
+	hdmi_supported_video_mode_lut[HDMI_VFRMT_MAX];
+
+void hdmi_del_supported_mode(u32 mode)
+{
+	struct msm_hdmi_mode_timing_info *ret = NULL;
+	DEV_DBG("%s: removing %s\n", __func__,
+		 msm_hdmi_mode_2string(mode));
+	ret = &hdmi_supported_video_mode_lut[mode];
+	if (ret != NULL && ret->supported)
+		ret->supported = false;
+}
+
+const struct msm_hdmi_mode_timing_info *hdmi_get_supported_mode(u32 mode)
+{
+	const struct msm_hdmi_mode_timing_info *ret = NULL;
+
+	if (mode >= HDMI_VFRMT_MAX)
+		return NULL;
+
+	ret = &hdmi_supported_video_mode_lut[mode];
+
+	if (ret == NULL || !ret->supported)
+		return NULL;
+
+	return ret;
+} /* hdmi_get_supported_mode */
+
+int hdmi_get_video_id_code(struct msm_hdmi_mode_timing_info *timing_in)
+{
+	int i, vic = -1;
+
+	if (!timing_in) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		goto exit;
+	}
+
+	/* active_low_h, active_low_v and interlaced are not checked against */
+	for (i = 0; i < HDMI_VFRMT_MAX; i++) {
+		struct msm_hdmi_mode_timing_info *supported_timing =
+			&hdmi_supported_video_mode_lut[i];
+
+		if (!supported_timing->supported)
+			continue;
+		if (timing_in->active_h != supported_timing->active_h)
+			continue;
+		if (timing_in->front_porch_h != supported_timing->front_porch_h)
+			continue;
+		if (timing_in->pulse_width_h != supported_timing->pulse_width_h)
+			continue;
+		if (timing_in->back_porch_h != supported_timing->back_porch_h)
+			continue;
+		if (timing_in->active_v != supported_timing->active_v)
+			continue;
+		if (timing_in->front_porch_v != supported_timing->front_porch_v)
+			continue;
+		if (timing_in->pulse_width_v != supported_timing->pulse_width_v)
+			continue;
+		if (timing_in->back_porch_v != supported_timing->back_porch_v)
+			continue;
+		if (timing_in->pixel_freq != supported_timing->pixel_freq)
+			continue;
+		if (timing_in->refresh_rate != supported_timing->refresh_rate)
+			continue;
+
+		vic = (int)supported_timing->video_format;
+		break;
+	}
+
+	if (vic < 0)
+		DEV_ERR("%s: timing asked is not yet supported\n", __func__);
+
+exit:
+	DEV_DBG("%s: vic = %d timing = %s\n", __func__, vic,
+		msm_hdmi_mode_2string((u32)vic));
+
+	return vic;
+} /* hdmi_get_video_id_code */
+
+/* Table indicating the video format supported by the HDMI TX Core */
+/* Valid pclk rates (Mhz): 25.2, 27, 27.03, 74.25, 148.5, 268.5, 297 */
+void hdmi_setup_video_mode_lut(void)
+{
+	MSM_HDMI_MODES_INIT_TIMINGS(hdmi_supported_video_mode_lut);
+
+	/* Add all supported CEA modes to the lut */
+	MSM_HDMI_MODES_SET_SUPP_TIMINGS(
+		hdmi_supported_video_mode_lut, MSM_HDMI_MODES_CEA);
+
+	/* Add all supported extended hdmi modes to the lut */
+	MSM_HDMI_MODES_SET_SUPP_TIMINGS(
+		hdmi_supported_video_mode_lut, MSM_HDMI_MODES_XTND);
+
+	/* Add any other specific DVI timings (DVI modes, etc.) */
+	MSM_HDMI_MODES_SET_TIMING(hdmi_supported_video_mode_lut,
+		HDMI_VFRMT_2560x1600p60_16_9);
+} /* hdmi_setup_video_mode_lut */
+
+const char *hdmi_get_single_video_3d_fmt_2string(u32 format)
+{
+	switch (format) {
+	case TOP_AND_BOTTOM:	return "TAB";
+	case FRAME_PACKING:	return "FP";
+	case SIDE_BY_SIDE_HALF: return "SSH";
+	}
+	return "";
+} /* hdmi_get_single_video_3d_fmt_2string */
+
+ssize_t hdmi_get_video_3d_fmt_2string(u32 format, char *buf)
+{
+	ssize_t ret, len = 0;
+	ret = snprintf(buf, PAGE_SIZE, "%s",
+		hdmi_get_single_video_3d_fmt_2string(
+			format & FRAME_PACKING));
+	len += ret;
+
+	if (len && (format & TOP_AND_BOTTOM))
+		ret = snprintf(buf + len, PAGE_SIZE, ":%s",
+			hdmi_get_single_video_3d_fmt_2string(
+				format & TOP_AND_BOTTOM));
+	else
+		ret = snprintf(buf + len, PAGE_SIZE, "%s",
+			hdmi_get_single_video_3d_fmt_2string(
+				format & TOP_AND_BOTTOM));
+	len += ret;
+
+	if (len && (format & SIDE_BY_SIDE_HALF))
+		ret = snprintf(buf + len, PAGE_SIZE, ":%s",
+			hdmi_get_single_video_3d_fmt_2string(
+				format & SIDE_BY_SIDE_HALF));
+	else
+		ret = snprintf(buf + len, PAGE_SIZE, "%s",
+			hdmi_get_single_video_3d_fmt_2string(
+				format & SIDE_BY_SIDE_HALF));
+	len += ret;
+
+	return len;
+} /* hdmi_get_video_3d_fmt_2string */
+
+static void hdmi_ddc_print_data(struct hdmi_tx_ddc_data *ddc_data,
+	const char *caller)
+{
+	if (!ddc_data) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	DEV_DBG("%s: buf=%p, d_len=0x%x, d_addr=0x%x, no_align=%d\n",
+		caller, ddc_data->data_buf, ddc_data->data_len,
+		ddc_data->dev_addr, ddc_data->no_align);
+	DEV_DBG("%s: offset=0x%x, req_len=0x%x, retry=%d, what=%s\n",
+		caller, ddc_data->offset, ddc_data->request_len,
+		ddc_data->retry, ddc_data->what);
+} /* hdmi_ddc_print_data */
+
+static int hdmi_ddc_clear_irq(struct hdmi_tx_ddc_ctrl *ddc_ctrl,
+	char *what)
+{
+	u32 reg_val, time_out_count;
+
+	if (!ddc_ctrl || !ddc_ctrl->io) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	/* clear pending and enable interrupt */
+	time_out_count = 0xFFFF;
+	do {
+		--time_out_count;
+		/* Clear and Enable DDC interrupt */
+		DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_INT_CTRL,
+			BIT(2) | BIT(1));
+		reg_val = DSS_REG_R_ND(ddc_ctrl->io, HDMI_DDC_INT_CTRL);
+	} while ((reg_val & BIT(0)) && time_out_count);
+
+	if (!time_out_count) {
+		DEV_ERR("%s[%s]: timedout\n", __func__, what);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+} /*hdmi_ddc_clear_irq */
+
+static int hdmi_ddc_read_retry(struct hdmi_tx_ddc_ctrl *ddc_ctrl,
+	struct hdmi_tx_ddc_data *ddc_data)
+{
+	u32 reg_val, ndx, time_out_count;
+	int status = 0;
+	int log_retry_fail;
+
+	if (!ddc_ctrl || !ddc_ctrl->io || !ddc_data) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!ddc_data->data_buf) {
+		status = -EINVAL;
+		DEV_ERR("%s[%s]: invalid buf\n", __func__, ddc_data->what);
+		goto error;
+	}
+
+	hdmi_ddc_print_data(ddc_data, __func__);
+
+	log_retry_fail = ddc_data->retry != 1;
+again:
+	status = hdmi_ddc_clear_irq(ddc_ctrl, ddc_data->what);
+	if (status)
+		goto error;
+
+	/* Ensure Device Address has LSB set to 0 to indicate Slave addr read */
+	ddc_data->dev_addr &= 0xFE;
+
+	/*
+	 * 1. Write to HDMI_I2C_DATA with the following fields set in order to
+	 *    handle portion #1
+	 *    DATA_RW = 0x0 (write)
+	 *    DATA = linkAddress (primary link address and writing)
+	 *    INDEX = 0x0 (initial offset into buffer)
+	 *    INDEX_WRITE = 0x1 (setting initial offset)
+	 */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_DATA,
+		BIT(31) | (ddc_data->dev_addr << 8));
+
+	/*
+	 * 2. Write to HDMI_I2C_DATA with the following fields set in order to
+	 *    handle portion #2
+	 *    DATA_RW = 0x0 (write)
+	 *    DATA = offsetAddress
+	 *    INDEX = 0x0
+	 *    INDEX_WRITE = 0x0 (auto-increment by hardware)
+	 */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_DATA, ddc_data->offset << 8);
+
+	/*
+	 * 3. Write to HDMI_I2C_DATA with the following fields set in order to
+	 *    handle portion #3
+	 *    DATA_RW = 0x0 (write)
+	 *    DATA = linkAddress + 1 (primary link address 0x74 and reading)
+	 *    INDEX = 0x0
+	 *    INDEX_WRITE = 0x0 (auto-increment by hardware)
+	 */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_DATA,
+		(ddc_data->dev_addr | BIT(0)) << 8);
+
+	/* Data setup is complete, now setup the transaction characteristics */
+
+	/*
+	 * 4. Write to HDMI_I2C_TRANSACTION0 with the following fields set in
+	 *    order to handle characteristics of portion #1 and portion #2
+	 *    RW0 = 0x0 (write)
+	 *    START0 = 0x1 (insert START bit)
+	 *    STOP0 = 0x0 (do NOT insert STOP bit)
+	 *    CNT0 = 0x1 (single byte transaction excluding address)
+	 */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_TRANS0, BIT(12) | BIT(16));
+
+	/*
+	 * 5. Write to HDMI_I2C_TRANSACTION1 with the following fields set in
+	 *    order to handle characteristics of portion #3
+	 *    RW1 = 0x1 (read)
+	 *    START1 = 0x1 (insert START bit)
+	 *    STOP1 = 0x1 (insert STOP bit)
+	 *    CNT1 = data_len   (it's 128 (0x80) for a blk read)
+	 */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_TRANS1,
+		BIT(0) | BIT(12) | BIT(13) | (ddc_data->request_len << 16));
+
+	/* Trigger the I2C transfer */
+
+	/*
+	 * 6. Write to HDMI_I2C_CONTROL to kick off the hardware.
+	 *    Note that NOTHING has been transmitted on the DDC lines up to this
+	 *    point.
+	 *    TRANSACTION_CNT = 0x1 (execute transaction0 followed by
+	 *    transaction1)
+	 *    SEND_RESET = Set to 1 to send reset sequence
+	 *    GO = 0x1 (kicks off hardware)
+	 */
+	INIT_COMPLETION(ddc_ctrl->ddc_sw_done);
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_CTRL, BIT(0) | BIT(20));
+
+	time_out_count = wait_for_completion_interruptible_timeout(
+		&ddc_ctrl->ddc_sw_done, HZ/2);
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_INT_CTRL, BIT(1));
+	if (!time_out_count) {
+		if (ddc_data->retry-- > 0) {
+			DEV_INFO("%s: failed timout, retry=%d\n", __func__,
+				ddc_data->retry);
+			goto again;
+		}
+		status = -ETIMEDOUT;
+		DEV_ERR("%s: timedout(7), Int Ctrl=%08x\n", __func__,
+			DSS_REG_R(ddc_ctrl->io, HDMI_DDC_INT_CTRL));
+		DEV_ERR("%s: DDC SW Status=%08x, HW Status=%08x\n",
+			__func__,
+			DSS_REG_R(ddc_ctrl->io, HDMI_DDC_SW_STATUS),
+			DSS_REG_R(ddc_ctrl->io, HDMI_DDC_HW_STATUS));
+		goto error;
+	}
+
+	/* Read DDC status */
+	reg_val = DSS_REG_R(ddc_ctrl->io, HDMI_DDC_SW_STATUS);
+	reg_val &= BIT(12) | BIT(13) | BIT(14) | BIT(15);
+
+	/* Check if any NACK occurred */
+	if (reg_val) {
+		/* SW_STATUS_RESET */
+		DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_CTRL, BIT(3));
+
+		if (ddc_data->retry == 1)
+			/* SOFT_RESET */
+			DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_CTRL, BIT(1));
+
+		if (ddc_data->retry-- > 0) {
+			DEV_DBG("%s(%s): failed NACK=0x%08x, retry=%d\n",
+				__func__, ddc_data->what, reg_val,
+				ddc_data->retry);
+			DEV_DBG("%s: daddr=0x%02x,off=0x%02x,len=%d\n",
+				__func__, ddc_data->dev_addr,
+				ddc_data->offset, ddc_data->data_len);
+			goto again;
+		}
+		status = -EIO;
+		if (log_retry_fail) {
+			DEV_ERR("%s(%s): failed NACK=0x%08x\n",
+				__func__, ddc_data->what, reg_val);
+			DEV_ERR("%s: daddr=0x%02x,off=0x%02x,len=%d\n",
+				__func__, ddc_data->dev_addr,
+				ddc_data->offset, ddc_data->data_len);
+		}
+		goto error;
+	}
+
+	/*
+	 * 8. ALL data is now available and waiting in the DDC buffer.
+	 *    Read HDMI_I2C_DATA with the following fields set
+	 *    RW = 0x1 (read)
+	 *    DATA = BCAPS (this is field where data is pulled from)
+	 *    INDEX = 0x3 (where the data has been placed in buffer by hardware)
+	 *    INDEX_WRITE = 0x1 (explicitly define offset)
+	 */
+	/* Write this data to DDC buffer */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_DATA,
+		BIT(0) | (3 << 16) | BIT(31));
+
+	/* Discard first byte */
+	DSS_REG_R_ND(ddc_ctrl->io, HDMI_DDC_DATA);
+	for (ndx = 0; ndx < ddc_data->data_len; ++ndx) {
+		reg_val = DSS_REG_R_ND(ddc_ctrl->io, HDMI_DDC_DATA);
+		ddc_data->data_buf[ndx] = (u8)((reg_val & 0x0000FF00) >> 8);
+	}
+
+	DEV_DBG("%s[%s] success\n", __func__, ddc_data->what);
+
+error:
+	return status;
+} /* hdmi_ddc_read_retry */
+
+void hdmi_ddc_config(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
+{
+	if (!ddc_ctrl || !ddc_ctrl->io) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return;
+	}
+
+	/* Configure Pre-Scale multiplier & Threshold */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_SPEED, (10 << 16) | (2 << 0));
+
+	/*
+	 * Setting 31:24 bits : Time units to wait before timeout
+	 * when clock is being stalled by external sink device
+	 */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_SETUP, 0xFF000000);
+
+	/* Enable reference timer to 19 micro-seconds */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_REF, (1 << 16) | (19 << 0));
+} /* hdmi_ddc_config */
+
+int hdmi_ddc_isr(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
+{
+	u32 ddc_int_ctrl;
+
+	if (!ddc_ctrl || !ddc_ctrl->io) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	ddc_int_ctrl = DSS_REG_R_ND(ddc_ctrl->io, HDMI_DDC_INT_CTRL);
+	if ((ddc_int_ctrl & BIT(2)) && (ddc_int_ctrl & BIT(0))) {
+		/* SW_DONE INT occured, clr it */
+		DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_INT_CTRL,
+			ddc_int_ctrl | BIT(1));
+		complete(&ddc_ctrl->ddc_sw_done);
+	}
+
+	DEV_DBG("%s: ddc_int_ctrl=%04x\n", __func__, ddc_int_ctrl);
+
+	return 0;
+} /* hdmi_ddc_isr */
+
+int hdmi_ddc_read(struct hdmi_tx_ddc_ctrl *ddc_ctrl,
+	struct hdmi_tx_ddc_data *ddc_data)
+{
+	int rc = 0;
+
+	if (!ddc_ctrl || !ddc_data) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	rc = hdmi_ddc_read_retry(ddc_ctrl, ddc_data);
+	if (!rc)
+		return rc;
+
+	if (ddc_data->no_align) {
+		rc = hdmi_ddc_read_retry(ddc_ctrl, ddc_data);
+	} else {
+		ddc_data->request_len = 32 * ((ddc_data->data_len + 31) / 32);
+		rc = hdmi_ddc_read_retry(ddc_ctrl, ddc_data);
+	}
+
+	return rc;
+} /* hdmi_ddc_read */
+
+int hdmi_ddc_read_seg(struct hdmi_tx_ddc_ctrl *ddc_ctrl,
+	struct hdmi_tx_ddc_data *ddc_data)
+{
+	int status = 0;
+	u32 reg_val, ndx, time_out_count;
+	int log_retry_fail;
+	int seg_addr = 0x60, seg_num = 0x01;
+
+	if (!ddc_ctrl || !ddc_ctrl->io || !ddc_data) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!ddc_data->data_buf) {
+		status = -EINVAL;
+		DEV_ERR("%s[%s]: invalid buf\n", __func__, ddc_data->what);
+		goto error;
+	}
+
+	log_retry_fail = ddc_data->retry != 1;
+
+again:
+	status = hdmi_ddc_clear_irq(ddc_ctrl, ddc_data->what);
+	if (status)
+		goto error;
+
+	/* Ensure Device Address has LSB set to 0 to indicate Slave addr read */
+	ddc_data->dev_addr &= 0xFE;
+
+	/*
+	 * 1. Write to HDMI_I2C_DATA with the following fields set in order to
+	 *    handle portion #1
+	 *    DATA_RW = 0x0 (write)
+	 *    DATA = linkAddress (primary link address and writing)
+	 *    INDEX = 0x0 (initial offset into buffer)
+	 *    INDEX_WRITE = 0x1 (setting initial offset)
+	 */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_DATA, BIT(31) | (seg_addr << 8));
+
+	/*
+	 * 2. Write to HDMI_I2C_DATA with the following fields set in order to
+	 *    handle portion #2
+	 *    DATA_RW = 0x0 (write)
+	 *    DATA = offsetAddress
+	 *    INDEX = 0x0
+	 *    INDEX_WRITE = 0x0 (auto-increment by hardware)
+	 */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_DATA, seg_num << 8);
+
+	/*
+	 * 3. Write to HDMI_I2C_DATA with the following fields set in order to
+	 *    handle portion #3
+	 *    DATA_RW = 0x0 (write)
+	 *    DATA = linkAddress + 1 (primary link address 0x74 and reading)
+	 *    INDEX = 0x0
+	 *    INDEX_WRITE = 0x0 (auto-increment by hardware)
+	 */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_DATA, ddc_data->dev_addr << 8);
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_DATA, ddc_data->offset << 8);
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_DATA,
+		(ddc_data->dev_addr | BIT(0)) << 8);
+
+	/* Data setup is complete, now setup the transaction characteristics */
+
+	/*
+	 * 4. Write to HDMI_I2C_TRANSACTION0 with the following fields set in
+	 *    order to handle characteristics of portion #1 and portion #2
+	 *    RW0 = 0x0 (write)
+	 *    START0 = 0x1 (insert START bit)
+	 *    STOP0 = 0x0 (do NOT insert STOP bit)
+	 *    CNT0 = 0x1 (single byte transaction excluding address)
+	 */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_TRANS0, BIT(12) | BIT(16));
+
+	/*
+	 * 5. Write to HDMI_I2C_TRANSACTION1 with the following fields set in
+	 *    order to handle characteristics of portion #3
+	 *    RW1 = 0x1 (read)
+	 *    START1 = 0x1 (insert START bit)
+	 *    STOP1 = 0x1 (insert STOP bit)
+	 *    CNT1 = data_len   (it's 128 (0x80) for a blk read)
+	 */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_TRANS1, BIT(12) | BIT(16));
+
+	/*
+	 * 5. Write to HDMI_I2C_TRANSACTION1 with the following fields set in
+	 *    order to handle characteristics of portion #3
+	 *    RW1 = 0x1 (read)
+	 *    START1 = 0x1 (insert START bit)
+	 *    STOP1 = 0x1 (insert STOP bit)
+	 *    CNT1 = data_len   (it's 128 (0x80) for a blk read)
+	 */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_TRANS2,
+		BIT(0) | BIT(12) | BIT(13) | (ddc_data->request_len << 16));
+
+	/* Trigger the I2C transfer */
+
+	/*
+	 * 6. Write to HDMI_I2C_CONTROL to kick off the hardware.
+	 *    Note that NOTHING has been transmitted on the DDC lines up to this
+	 *    point.
+	 *    TRANSACTION_CNT = 0x2 (execute transaction0 followed by
+	 *    transaction1)
+	 *    GO = 0x1 (kicks off hardware)
+	 */
+	INIT_COMPLETION(ddc_ctrl->ddc_sw_done);
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_CTRL, BIT(0) | BIT(21));
+
+	time_out_count = wait_for_completion_interruptible_timeout(
+		&ddc_ctrl->ddc_sw_done, HZ/2);
+
+	reg_val = DSS_REG_R(ddc_ctrl->io, HDMI_DDC_INT_CTRL);
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_INT_CTRL, reg_val & (~BIT(2)));
+	if (!time_out_count) {
+		if (ddc_data->retry-- > 0) {
+			DEV_INFO("%s: failed timout, retry=%d\n", __func__,
+				ddc_data->retry);
+			goto again;
+		}
+		status = -ETIMEDOUT;
+		DEV_ERR("%s: timedout(7), Int Ctrl=%08x\n", __func__,
+			DSS_REG_R(ddc_ctrl->io, HDMI_DDC_INT_CTRL));
+		DEV_ERR("%s: DDC SW Status=%08x, HW Status=%08x\n",
+			__func__,
+			DSS_REG_R(ddc_ctrl->io, HDMI_DDC_SW_STATUS),
+			DSS_REG_R(ddc_ctrl->io, HDMI_DDC_HW_STATUS));
+		goto error;
+	}
+
+	/* Read DDC status */
+	reg_val = DSS_REG_R(ddc_ctrl->io, HDMI_DDC_SW_STATUS);
+	reg_val &= BIT(12) | BIT(13) | BIT(14) | BIT(15);
+
+	/* Check if any NACK occurred */
+	if (reg_val) {
+		/* SW_STATUS_RESET */
+		DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_CTRL, BIT(3));
+		if (ddc_data->retry == 1)
+			/* SOFT_RESET */
+			DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_CTRL, BIT(1));
+		if (ddc_data->retry-- > 0) {
+			DEV_DBG("%s(%s): failed NACK=0x%08x, retry=%d\n",
+				__func__, ddc_data->what, reg_val,
+				ddc_data->retry);
+			DEV_DBG("%s: daddr=0x%02x,off=0x%02x,len=%d\n",
+				__func__, ddc_data->dev_addr,
+				ddc_data->offset, ddc_data->data_len);
+			goto again;
+		}
+		status = -EIO;
+		if (log_retry_fail) {
+			DEV_ERR("%s(%s): failed NACK=0x%08x\n",
+				__func__, ddc_data->what, reg_val);
+			DEV_ERR("%s: daddr=0x%02x,off=0x%02x,len=%d\n",
+				__func__, ddc_data->dev_addr,
+				ddc_data->offset, ddc_data->data_len);
+		}
+		goto error;
+	}
+
+	/*
+	 * 8. ALL data is now available and waiting in the DDC buffer.
+	 *    Read HDMI_I2C_DATA with the following fields set
+	 *    RW = 0x1 (read)
+	 *    DATA = BCAPS (this is field where data is pulled from)
+	 *    INDEX = 0x5 (where the data has been placed in buffer by hardware)
+	 *    INDEX_WRITE = 0x1 (explicitly define offset)
+	 */
+	/* Write this data to DDC buffer */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_DATA,
+		BIT(0) | (5 << 16) | BIT(31));
+
+	/* Discard first byte */
+	DSS_REG_R_ND(ddc_ctrl->io, HDMI_DDC_DATA);
+
+	for (ndx = 0; ndx < ddc_data->data_len; ++ndx) {
+		reg_val = DSS_REG_R_ND(ddc_ctrl->io, HDMI_DDC_DATA);
+		ddc_data->data_buf[ndx] = (u8) ((reg_val & 0x0000FF00) >> 8);
+	}
+
+	DEV_DBG("%s[%s] success\n", __func__, ddc_data->what);
+
+error:
+	return status;
+} /* hdmi_ddc_read_seg */
+
+int hdmi_ddc_write(struct hdmi_tx_ddc_ctrl *ddc_ctrl,
+	struct hdmi_tx_ddc_data *ddc_data)
+{
+	u32 reg_val, ndx;
+	int status = 0, retry = 10;
+	u32 time_out_count;
+
+	if (!ddc_ctrl || !ddc_ctrl->io || !ddc_data) {
+		DEV_ERR("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!ddc_data->data_buf) {
+		status = -EINVAL;
+		DEV_ERR("%s[%s]: invalid buf\n", __func__, ddc_data->what);
+		goto error;
+	}
+
+again:
+	status = hdmi_ddc_clear_irq(ddc_ctrl, ddc_data->what);
+	if (status)
+		goto error;
+
+	/* Ensure Device Address has LSB set to 0 to indicate Slave addr read */
+	ddc_data->dev_addr &= 0xFE;
+
+	/*
+	 * 1. Write to HDMI_I2C_DATA with the following fields set in order to
+	 *    handle portion #1
+	 *    DATA_RW = 0x1 (write)
+	 *    DATA = linkAddress (primary link address and writing)
+	 *    INDEX = 0x0 (initial offset into buffer)
+	 *    INDEX_WRITE = 0x1 (setting initial offset)
+	 */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_DATA,
+		BIT(31) | (ddc_data->dev_addr << 8));
+
+	/*
+	 * 2. Write to HDMI_I2C_DATA with the following fields set in order to
+	 *    handle portion #2
+	 *    DATA_RW = 0x0 (write)
+	 *    DATA = offsetAddress
+	 *    INDEX = 0x0
+	 *    INDEX_WRITE = 0x0 (auto-increment by hardware)
+	 */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_DATA, ddc_data->offset << 8);
+
+	/*
+	 * 3. Write to HDMI_I2C_DATA with the following fields set in order to
+	 *    handle portion #3
+	 *    DATA_RW = 0x0 (write)
+	 *    DATA = data_buf[ndx]
+	 *    INDEX = 0x0
+	 *    INDEX_WRITE = 0x0 (auto-increment by hardware)
+	 */
+	for (ndx = 0; ndx < ddc_data->data_len; ++ndx)
+		DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_DATA,
+			((u32)ddc_data->data_buf[ndx]) << 8);
+
+	/* Data setup is complete, now setup the transaction characteristics */
+
+	/*
+	 * 4. Write to HDMI_I2C_TRANSACTION0 with the following fields set in
+	 *    order to handle characteristics of portion #1 and portion #2
+	 *    RW0 = 0x0 (write)
+	 *    START0 = 0x1 (insert START bit)
+	 *    STOP0 = 0x0 (do NOT insert STOP bit)
+	 *    CNT0 = 0x1 (single byte transaction excluding address)
+	 */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_TRANS0, BIT(12) | BIT(16));
+
+	/*
+	 * 5. Write to HDMI_I2C_TRANSACTION1 with the following fields set in
+	 *    order to handle characteristics of portion #3
+	 *    RW1 = 0x1 (read)
+	 *    START1 = 0x1 (insert START bit)
+	 *    STOP1 = 0x1 (insert STOP bit)
+	 *    CNT1 = data_len   (0xN (write N bytes of data))
+	 *    Byte count for second transition (excluding the first
+	 *    Byte which is usually the address)
+	 */
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_TRANS1,
+		BIT(13) | ((ddc_data->data_len-1) << 16));
+
+	/* Trigger the I2C transfer */
+	/*
+	 * 6. Write to HDMI_I2C_CONTROL to kick off the hardware.
+	 *    Note that NOTHING has been transmitted on the DDC lines up to this
+	 *    point.
+	 *    TRANSACTION_CNT = 0x1 (execute transaction0 followed by
+	 *    transaction1)
+	 *    GO = 0x1 (kicks off hardware)
+	 */
+	INIT_COMPLETION(ddc_ctrl->ddc_sw_done);
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_CTRL, BIT(0) | BIT(20));
+
+	time_out_count = wait_for_completion_interruptible_timeout(
+		&ddc_ctrl->ddc_sw_done, HZ/2);
+
+	reg_val = DSS_REG_R(ddc_ctrl->io, HDMI_DDC_INT_CTRL);
+	DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_INT_CTRL, reg_val & (~BIT(2)));
+	if (!time_out_count) {
+		if (retry-- > 0) {
+			DEV_INFO("%s[%s]: failed timout, retry=%d\n", __func__,
+				ddc_data->what, retry);
+			goto again;
+		}
+		status = -ETIMEDOUT;
+		DEV_ERR("%s[%s]: timedout, Int Ctrl=%08x\n",
+			__func__, ddc_data->what,
+			DSS_REG_R(ddc_ctrl->io, HDMI_DDC_INT_CTRL));
+		DEV_ERR("%s: DDC SW Status=%08x, HW Status=%08x\n",
+			__func__,
+			DSS_REG_R(ddc_ctrl->io, HDMI_DDC_SW_STATUS),
+			DSS_REG_R(ddc_ctrl->io, HDMI_DDC_HW_STATUS));
+		goto error;
+	}
+
+	/* Read DDC status */
+	reg_val = DSS_REG_R_ND(ddc_ctrl->io, HDMI_DDC_SW_STATUS);
+	reg_val &= 0x00001000 | 0x00002000 | 0x00004000 | 0x00008000;
+
+	/* Check if any NACK occurred */
+	if (reg_val) {
+		if (retry > 1)
+			/* SW_STATUS_RESET */
+			DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_CTRL, BIT(3));
+		else
+			/* SOFT_RESET */
+			DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_CTRL, BIT(1));
+
+		if (retry-- > 0) {
+			DEV_DBG("%s[%s]: failed NACK=%08x, retry=%d\n",
+				__func__, ddc_data->what, reg_val, retry);
+			msleep(100);
+			goto again;
+		}
+		status = -EIO;
+		DEV_ERR("%s[%s]: failed NACK: %08x\n", __func__,
+			ddc_data->what, reg_val);
+		goto error;
+	}
+
+	DEV_DBG("%s[%s] success\n", __func__, ddc_data->what);
+
+error:
+	return status;
+} /* hdmi_ddc_write */
diff --git a/drivers/video/msm/mdss/mdss_hdmi_util.h b/drivers/video/msm/mdss/mdss_hdmi_util.h
new file mode 100644
index 0000000..e99e549
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_hdmi_util.h
@@ -0,0 +1,271 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __HDMI_UTIL_H__
+#define __HDMI_UTIL_H__
+#include "mdss_io_util.h"
+#include "video/msm_hdmi_modes.h"
+
+/* HDMI_TX Registers */
+#define HDMI_CTRL                        (0x00000000)
+#define HDMI_TEST_PATTERN                (0x00000010)
+#define HDMI_RANDOM_PATTERN              (0x00000014)
+#define HDMI_PKT_BLK_CTRL                (0x00000018)
+#define HDMI_STATUS                      (0x0000001C)
+#define HDMI_AUDIO_PKT_CTRL              (0x00000020)
+#define HDMI_ACR_PKT_CTRL                (0x00000024)
+#define HDMI_VBI_PKT_CTRL                (0x00000028)
+#define HDMI_INFOFRAME_CTRL0             (0x0000002C)
+#define HDMI_INFOFRAME_CTRL1             (0x00000030)
+#define HDMI_GEN_PKT_CTRL                (0x00000034)
+#define HDMI_ACP                         (0x0000003C)
+#define HDMI_GC                          (0x00000040)
+#define HDMI_AUDIO_PKT_CTRL2             (0x00000044)
+#define HDMI_ISRC1_0                     (0x00000048)
+#define HDMI_ISRC1_1                     (0x0000004C)
+#define HDMI_ISRC1_2                     (0x00000050)
+#define HDMI_ISRC1_3                     (0x00000054)
+#define HDMI_ISRC1_4                     (0x00000058)
+#define HDMI_ISRC2_0                     (0x0000005C)
+#define HDMI_ISRC2_1                     (0x00000060)
+#define HDMI_ISRC2_2                     (0x00000064)
+#define HDMI_ISRC2_3                     (0x00000068)
+#define HDMI_AVI_INFO0                   (0x0000006C)
+#define HDMI_AVI_INFO1                   (0x00000070)
+#define HDMI_AVI_INFO2                   (0x00000074)
+#define HDMI_AVI_INFO3                   (0x00000078)
+#define HDMI_MPEG_INFO0                  (0x0000007C)
+#define HDMI_MPEG_INFO1                  (0x00000080)
+#define HDMI_GENERIC0_HDR                (0x00000084)
+#define HDMI_GENERIC0_0                  (0x00000088)
+#define HDMI_GENERIC0_1                  (0x0000008C)
+#define HDMI_GENERIC0_2                  (0x00000090)
+#define HDMI_GENERIC0_3                  (0x00000094)
+#define HDMI_GENERIC0_4                  (0x00000098)
+#define HDMI_GENERIC0_5                  (0x0000009C)
+#define HDMI_GENERIC0_6                  (0x000000A0)
+#define HDMI_GENERIC1_HDR                (0x000000A4)
+#define HDMI_GENERIC1_0                  (0x000000A8)
+#define HDMI_GENERIC1_1                  (0x000000AC)
+#define HDMI_GENERIC1_2                  (0x000000B0)
+#define HDMI_GENERIC1_3                  (0x000000B4)
+#define HDMI_GENERIC1_4                  (0x000000B8)
+#define HDMI_GENERIC1_5                  (0x000000BC)
+#define HDMI_GENERIC1_6                  (0x000000C0)
+#define HDMI_ACR_32_0                    (0x000000C4)
+#define HDMI_ACR_32_1                    (0x000000C8)
+#define HDMI_ACR_44_0                    (0x000000CC)
+#define HDMI_ACR_44_1                    (0x000000D0)
+#define HDMI_ACR_48_0                    (0x000000D4)
+#define HDMI_ACR_48_1                    (0x000000D8)
+#define HDMI_ACR_STATUS_0                (0x000000DC)
+#define HDMI_ACR_STATUS_1                (0x000000E0)
+#define HDMI_AUDIO_INFO0                 (0x000000E4)
+#define HDMI_AUDIO_INFO1                 (0x000000E8)
+#define HDMI_CS_60958_0                  (0x000000EC)
+#define HDMI_CS_60958_1                  (0x000000F0)
+#define HDMI_RAMP_CTRL0                  (0x000000F8)
+#define HDMI_RAMP_CTRL1                  (0x000000FC)
+#define HDMI_RAMP_CTRL2                  (0x00000100)
+#define HDMI_RAMP_CTRL3                  (0x00000104)
+#define HDMI_CS_60958_2                  (0x00000108)
+#define HDMI_HDCP_CTRL                   (0x00000110)
+#define HDMI_HDCP_DEBUG_CTRL             (0x00000114)
+#define HDMI_HDCP_INT_CTRL               (0x00000118)
+#define HDMI_HDCP_LINK0_STATUS           (0x0000011C)
+#define HDMI_HDCP_DDC_CTRL_0             (0x00000120)
+#define HDMI_HDCP_DDC_CTRL_1             (0x00000124)
+#define HDMI_HDCP_DDC_STATUS             (0x00000128)
+#define HDMI_HDCP_ENTROPY_CTRL0          (0x0000012C)
+#define HDMI_HDCP_RESET                  (0x00000130)
+#define HDMI_HDCP_RCVPORT_DATA0          (0x00000134)
+#define HDMI_HDCP_RCVPORT_DATA1          (0x00000138)
+#define HDMI_HDCP_RCVPORT_DATA2_0        (0x0000013C)
+#define HDMI_HDCP_RCVPORT_DATA2_1        (0x00000140)
+#define HDMI_HDCP_RCVPORT_DATA3          (0x00000144)
+#define HDMI_HDCP_RCVPORT_DATA4          (0x00000148)
+#define HDMI_HDCP_RCVPORT_DATA5          (0x0000014C)
+#define HDMI_HDCP_RCVPORT_DATA6          (0x00000150)
+#define HDMI_HDCP_RCVPORT_DATA7          (0x00000154)
+#define HDMI_HDCP_RCVPORT_DATA8          (0x00000158)
+#define HDMI_HDCP_RCVPORT_DATA9          (0x0000015C)
+#define HDMI_HDCP_RCVPORT_DATA10         (0x00000160)
+#define HDMI_HDCP_RCVPORT_DATA11         (0x00000164)
+#define HDMI_HDCP_RCVPORT_DATA12         (0x00000168)
+#define HDMI_VENSPEC_INFO0               (0x0000016C)
+#define HDMI_VENSPEC_INFO1               (0x00000170)
+#define HDMI_VENSPEC_INFO2               (0x00000174)
+#define HDMI_VENSPEC_INFO3               (0x00000178)
+#define HDMI_VENSPEC_INFO4               (0x0000017C)
+#define HDMI_VENSPEC_INFO5               (0x00000180)
+#define HDMI_VENSPEC_INFO6               (0x00000184)
+#define HDMI_HDCP_DEBUG                  (0x00000194)
+#define HDMI_TMDS_CTRL_CHAR              (0x0000019C)
+#define HDMI_TMDS_CTRL_SEL               (0x000001A4)
+#define HDMI_TMDS_SYNCCHAR01             (0x000001A8)
+#define HDMI_TMDS_SYNCCHAR23             (0x000001AC)
+#define HDMI_TMDS_DEBUG                  (0x000001B4)
+#define HDMI_TMDS_CTL_BITS               (0x000001B8)
+#define HDMI_TMDS_DCBAL_CTRL             (0x000001BC)
+#define HDMI_TMDS_DCBAL_CHAR             (0x000001C0)
+#define HDMI_TMDS_CTL01_GEN              (0x000001C8)
+#define HDMI_TMDS_CTL23_GEN              (0x000001CC)
+#define HDMI_AUDIO_CFG                   (0x000001D0)
+#define HDMI_DEBUG                       (0x00000204)
+#define HDMI_USEC_REFTIMER               (0x00000208)
+#define HDMI_DDC_CTRL                    (0x0000020C)
+#define HDMI_DDC_ARBITRATION             (0x00000210)
+#define HDMI_DDC_INT_CTRL                (0x00000214)
+#define HDMI_DDC_SW_STATUS               (0x00000218)
+#define HDMI_DDC_HW_STATUS               (0x0000021C)
+#define HDMI_DDC_SPEED                   (0x00000220)
+#define HDMI_DDC_SETUP                   (0x00000224)
+#define HDMI_DDC_TRANS0                  (0x00000228)
+#define HDMI_DDC_TRANS1                  (0x0000022C)
+#define HDMI_DDC_TRANS2                  (0x00000230)
+#define HDMI_DDC_TRANS3                  (0x00000234)
+#define HDMI_DDC_DATA                    (0x00000238)
+#define HDMI_HDCP_SHA_CTRL               (0x0000023C)
+#define HDMI_HDCP_SHA_STATUS             (0x00000240)
+#define HDMI_HDCP_SHA_DATA               (0x00000244)
+#define HDMI_HDCP_SHA_DBG_M0_0           (0x00000248)
+#define HDMI_HDCP_SHA_DBG_M0_1           (0x0000024C)
+#define HDMI_HPD_INT_STATUS              (0x00000250)
+#define HDMI_HPD_INT_CTRL                (0x00000254)
+#define HDMI_HPD_CTRL                    (0x00000258)
+#define HDMI_HDCP_ENTROPY_CTRL1          (0x0000025C)
+#define HDMI_HDCP_SW_UPPER_AN            (0x00000260)
+#define HDMI_HDCP_SW_LOWER_AN            (0x00000264)
+#define HDMI_CRC_CTRL                    (0x00000268)
+#define HDMI_VID_CRC                     (0x0000026C)
+#define HDMI_AUD_CRC                     (0x00000270)
+#define HDMI_VBI_CRC                     (0x00000274)
+#define HDMI_DDC_REF                     (0x0000027C)
+#define HDMI_HDCP_SW_UPPER_AKSV          (0x00000284)
+#define HDMI_HDCP_SW_LOWER_AKSV          (0x00000288)
+#define HDMI_CEC_CTRL                    (0x0000028C)
+#define HDMI_CEC_WR_DATA                 (0x00000290)
+#define HDMI_CEC_RETRANSMIT              (0x00000294)
+#define HDMI_CEC_STATUS                  (0x00000298)
+#define HDMI_CEC_INT                     (0x0000029C)
+#define HDMI_CEC_ADDR                    (0x000002A0)
+#define HDMI_CEC_TIME                    (0x000002A4)
+#define HDMI_CEC_REFTIMER                (0x000002A8)
+#define HDMI_CEC_RD_DATA                 (0x000002AC)
+#define HDMI_CEC_RD_FILTER               (0x000002B0)
+#define HDMI_ACTIVE_H                    (0x000002B4)
+#define HDMI_ACTIVE_V                    (0x000002B8)
+#define HDMI_ACTIVE_V_F2                 (0x000002BC)
+#define HDMI_TOTAL                       (0x000002C0)
+#define HDMI_V_TOTAL_F2                  (0x000002C4)
+#define HDMI_FRAME_CTRL                  (0x000002C8)
+#define HDMI_AUD_INT                     (0x000002CC)
+#define HDMI_DEBUG_BUS_CTRL              (0x000002D0)
+#define HDMI_PHY_CTRL                    (0x000002D4)
+#define HDMI_CEC_WR_RANGE                (0x000002DC)
+#define HDMI_CEC_RD_RANGE                (0x000002E0)
+#define HDMI_VERSION                     (0x000002E4)
+#define HDMI_BIST_ENABLE                 (0x000002F4)
+#define HDMI_TIMING_ENGINE_EN            (0x000002F8)
+#define HDMI_INTF_CONFIG                 (0x000002FC)
+#define HDMI_HSYNC_CTL                   (0x00000300)
+#define HDMI_VSYNC_PERIOD_F0             (0x00000304)
+#define HDMI_VSYNC_PERIOD_F1             (0x00000308)
+#define HDMI_VSYNC_PULSE_WIDTH_F0        (0x0000030C)
+#define HDMI_VSYNC_PULSE_WIDTH_F1        (0x00000310)
+#define HDMI_DISPLAY_V_START_F0          (0x00000314)
+#define HDMI_DISPLAY_V_START_F1          (0x00000318)
+#define HDMI_DISPLAY_V_END_F0            (0x0000031C)
+#define HDMI_DISPLAY_V_END_F1            (0x00000320)
+#define HDMI_ACTIVE_V_START_F0           (0x00000324)
+#define HDMI_ACTIVE_V_START_F1           (0x00000328)
+#define HDMI_ACTIVE_V_END_F0             (0x0000032C)
+#define HDMI_ACTIVE_V_END_F1             (0x00000330)
+#define HDMI_DISPLAY_HCTL                (0x00000334)
+#define HDMI_ACTIVE_HCTL                 (0x00000338)
+#define HDMI_HSYNC_SKEW                  (0x0000033C)
+#define HDMI_POLARITY_CTL                (0x00000340)
+#define HDMI_TPG_MAIN_CONTROL            (0x00000344)
+#define HDMI_TPG_VIDEO_CONFIG            (0x00000348)
+#define HDMI_TPG_COMPONENT_LIMITS        (0x0000034C)
+#define HDMI_TPG_RECTANGLE               (0x00000350)
+#define HDMI_TPG_INITIAL_VALUE           (0x00000354)
+#define HDMI_TPG_BLK_WHT_PATTERN_FRAMES  (0x00000358)
+#define HDMI_TPG_RGB_MAPPING             (0x0000035C)
+#define HDMI_CEC_COMPL_CTL               (0x00000360)
+#define HDMI_CEC_RD_START_RANGE          (0x00000364)
+#define HDMI_CEC_RD_TOTAL_RANGE          (0x00000368)
+#define HDMI_CEC_RD_ERR_RESP_LO          (0x0000036C)
+#define HDMI_CEC_WR_CHECK_CONFIG         (0x00000370)
+
+/* HDMI PHY Registers */
+#define HDMI_PHY_ANA_CFG0                (0x00000000)
+#define HDMI_PHY_ANA_CFG1                (0x00000004)
+#define HDMI_PHY_PD_CTRL0                (0x00000010)
+#define HDMI_PHY_PD_CTRL1                (0x00000014)
+#define HDMI_PHY_BIST_CFG0               (0x00000034)
+#define HDMI_PHY_BIST_PATN0              (0x0000003C)
+#define HDMI_PHY_BIST_PATN1              (0x00000040)
+#define HDMI_PHY_BIST_PATN2              (0x00000044)
+#define HDMI_PHY_BIST_PATN3              (0x00000048)
+
+/* QFPROM Registers for HDMI/HDCP */
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_LSB  (0x000000F8)
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB  (0x000000FC)
+#define HDCP_KSV_LSB                     (0x000060D8)
+#define HDCP_KSV_MSB                     (0x000060DC)
+
+#define TOP_AND_BOTTOM		0x10
+#define FRAME_PACKING		0x20
+#define SIDE_BY_SIDE_HALF	0x40
+
+enum hdmi_tx_feature_type {
+	HDMI_TX_FEAT_EDID,
+	HDMI_TX_FEAT_HDCP,
+	HDMI_TX_FEAT_CEC,
+	HDMI_TX_FEAT_MAX,
+};
+
+struct hdmi_tx_ddc_ctrl {
+	struct dss_io_data *io;
+	struct completion ddc_sw_done;
+};
+
+struct hdmi_tx_ddc_data {
+	char *what;
+	u8 *data_buf;
+	u32 data_len;
+	u32 dev_addr;
+	u32 offset;
+	u32 request_len;
+	u32 no_align;
+	int retry;
+};
+
+/* video timing related utility routines */
+void hdmi_setup_video_mode_lut(void);
+int hdmi_get_video_id_code(struct msm_hdmi_mode_timing_info *timing_in);
+const struct msm_hdmi_mode_timing_info *hdmi_get_supported_mode(u32 mode);
+void hdmi_del_supported_mode(u32 mode);
+ssize_t hdmi_get_video_3d_fmt_2string(u32 format, char *buf);
+
+/* todo: Fix this. Right now this is defined in mdss_hdmi_tx.c */
+void *hdmi_get_featuredata_from_sysfs_dev(struct device *device, u32 type);
+
+/* DDC */
+void hdmi_ddc_config(struct hdmi_tx_ddc_ctrl *);
+int hdmi_ddc_isr(struct hdmi_tx_ddc_ctrl *);
+int hdmi_ddc_write(struct hdmi_tx_ddc_ctrl *, struct hdmi_tx_ddc_data *);
+int hdmi_ddc_read_seg(struct hdmi_tx_ddc_ctrl *, struct hdmi_tx_ddc_data *);
+int hdmi_ddc_read(struct hdmi_tx_ddc_ctrl *, struct hdmi_tx_ddc_data *);
+
+#endif /* __HDMI_UTIL_H__ */
diff --git a/drivers/video/msm/mdss/mdss_io_util.c b/drivers/video/msm/mdss/mdss_io_util.c
new file mode 100644
index 0000000..c38eaa4
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_io_util.c
@@ -0,0 +1,451 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include "mdss_io_util.h"
+
+#define MAX_I2C_CMDS  16
+void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug)
+{
+	u32 in_val;
+
+	if (!io || !io->base) {
+		DEV_ERR("%pS->%s: invalid input\n",
+			__builtin_return_address(0), __func__);
+		return;
+	}
+
+	if (offset > io->len) {
+		DEV_ERR("%pS->%s: offset out of range\n",
+			__builtin_return_address(0), __func__);
+		return;
+	}
+
+	writel_relaxed(value, io->base + offset);
+	if (debug) {
+		in_val = readl_relaxed(io->base + offset);
+		DEV_DBG("[%08x] => %08x [%08x]\n", (u32)(io->base + offset),
+			value, in_val);
+	}
+} /* dss_reg_w */
+
+u32 dss_reg_r(struct dss_io_data *io, u32 offset, u32 debug)
+{
+	u32 value;
+	if (!io || !io->base) {
+		DEV_ERR("%pS->%s: invalid input\n",
+			__builtin_return_address(0), __func__);
+		return -EINVAL;
+	}
+
+	if (offset > io->len) {
+		DEV_ERR("%pS->%s: offset out of range\n",
+			__builtin_return_address(0), __func__);
+		return -EINVAL;
+	}
+
+	value = readl_relaxed(io->base + offset);
+	if (debug)
+		DEV_DBG("[%08x] <= %08x\n", (u32)(io->base + offset), value);
+
+	return value;
+} /* dss_reg_r */
+
+void dss_reg_dump(void __iomem *base, u32 length, const char *prefix,
+	u32 debug)
+{
+	if (debug)
+		print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 32, 4,
+			(void *)base, length, false);
+} /* dss_reg_dump */
+
+static struct resource *msm_dss_get_res_byname(struct platform_device *pdev,
+	unsigned int type, const char *name)
+{
+	struct resource *res = NULL;
+
+	res = platform_get_resource_byname(pdev, type, name);
+	if (!res)
+		DEV_ERR("%s: '%s' resource not found\n", __func__, name);
+
+	return res;
+} /* msm_dss_get_res_byname */
+
+int msm_dss_ioremap_byname(struct platform_device *pdev,
+	struct dss_io_data *io_data, const char *name)
+{
+	struct resource *res = NULL;
+
+	if (!pdev || !io_data) {
+		DEV_ERR("%pS->%s: invalid input\n",
+			__builtin_return_address(0), __func__);
+		return -EINVAL;
+	}
+
+	res = msm_dss_get_res_byname(pdev, IORESOURCE_MEM, name);
+	if (!res) {
+		DEV_ERR("%pS->%s: '%s' msm_dss_get_res_byname failed\n",
+			__builtin_return_address(0), __func__, name);
+		return -ENODEV;
+	}
+
+	io_data->len = resource_size(res);
+	io_data->base = ioremap(res->start, io_data->len);
+	if (!io_data->base) {
+		DEV_ERR("%pS->%s: '%s' ioremap failed\n",
+			__builtin_return_address(0), __func__, name);
+		return -EIO;
+	}
+
+	return 0;
+} /* msm_dss_ioremap_byname */
+
+void msm_dss_iounmap(struct dss_io_data *io_data)
+{
+	if (!io_data) {
+		DEV_ERR("%pS->%s: invalid input\n",
+			__builtin_return_address(0), __func__);
+		return;
+	}
+
+	if (io_data->base) {
+		iounmap(io_data->base);
+		io_data->base = NULL;
+	}
+	io_data->len = 0;
+} /* msm_dss_iounmap */
+
+int msm_dss_config_vreg(struct device *dev, struct dss_vreg *in_vreg,
+	int num_vreg, int config)
+{
+	int i = 0, rc = 0;
+	struct dss_vreg *curr_vreg = NULL;
+
+	if (config) {
+		for (i = 0; i < num_vreg; i++) {
+			curr_vreg = &in_vreg[i];
+			curr_vreg->vreg = regulator_get(dev,
+				curr_vreg->vreg_name);
+			rc = PTR_RET(curr_vreg->vreg);
+			if (rc) {
+				DEV_ERR("%pS->%s: %s get failed. rc=%d\n",
+					 __builtin_return_address(0), __func__,
+					 curr_vreg->vreg_name, rc);
+				curr_vreg->vreg = NULL;
+				goto vreg_get_fail;
+			}
+			if (curr_vreg->type == DSS_REG_LDO) {
+				rc = regulator_set_voltage(
+					curr_vreg->vreg,
+					curr_vreg->min_voltage,
+					curr_vreg->max_voltage);
+				if (rc < 0) {
+					DEV_ERR("%pS->%s: %s set vltg fail\n",
+						__builtin_return_address(0),
+						__func__,
+						curr_vreg->vreg_name);
+					goto vreg_set_voltage_fail;
+				}
+				if (curr_vreg->optimum_voltage >= 0) {
+					rc = regulator_set_optimum_mode(
+						curr_vreg->vreg,
+						curr_vreg->optimum_voltage);
+					if (rc < 0) {
+						DEV_ERR(
+						"%pS->%s: %s set opt m fail\n",
+						__builtin_return_address(0),
+						__func__,
+						curr_vreg->vreg_name);
+						goto vreg_set_opt_mode_fail;
+					}
+				}
+			}
+		}
+	} else {
+		for (i = num_vreg-1; i >= 0; i--) {
+			curr_vreg = &in_vreg[i];
+			if (curr_vreg->vreg) {
+				if (curr_vreg->type == DSS_REG_LDO) {
+					if (curr_vreg->optimum_voltage >= 0) {
+						regulator_set_optimum_mode(
+							curr_vreg->vreg, 0);
+					}
+					regulator_set_voltage(curr_vreg->vreg,
+						0, curr_vreg->max_voltage);
+				}
+				regulator_put(curr_vreg->vreg);
+				curr_vreg->vreg = NULL;
+			}
+		}
+	}
+	return 0;
+
+vreg_unconfig:
+if (curr_vreg->type == DSS_REG_LDO)
+	regulator_set_optimum_mode(curr_vreg->vreg, 0);
+
+vreg_set_opt_mode_fail:
+if (curr_vreg->type == DSS_REG_LDO)
+	regulator_set_voltage(curr_vreg->vreg, 0, curr_vreg->max_voltage);
+
+vreg_set_voltage_fail:
+	regulator_put(curr_vreg->vreg);
+	curr_vreg->vreg = NULL;
+
+vreg_get_fail:
+	for (i--; i >= 0; i--) {
+		curr_vreg = &in_vreg[i];
+		goto vreg_unconfig;
+	}
+	return rc;
+} /* msm_dss_config_vreg */
+
+int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable)
+{
+	int i = 0, rc = 0;
+	if (enable) {
+		for (i = 0; i < num_vreg; i++) {
+			rc = PTR_RET(in_vreg[i].vreg);
+			if (rc) {
+				DEV_ERR("%pS->%s: %s regulator error. rc=%d\n",
+					__builtin_return_address(0), __func__,
+					in_vreg[i].vreg_name, rc);
+				goto disable_vreg;
+			}
+			rc = regulator_enable(in_vreg[i].vreg);
+			if (rc < 0) {
+				DEV_ERR("%pS->%s: %s enable failed\n",
+					__builtin_return_address(0), __func__,
+					in_vreg[i].vreg_name);
+				goto disable_vreg;
+			}
+		}
+	} else {
+		for (i = num_vreg-1; i >= 0; i--)
+			if (regulator_is_enabled(in_vreg[i].vreg))
+				regulator_disable(in_vreg[i].vreg);
+	}
+	return rc;
+
+disable_vreg:
+	for (i--; i >= 0; i--)
+		regulator_disable(in_vreg[i].vreg);
+	return rc;
+} /* msm_dss_enable_vreg */
+
+int msm_dss_enable_gpio(struct dss_gpio *in_gpio, int num_gpio, int enable)
+{
+	int i = 0, rc = 0;
+	if (enable) {
+		for (i = 0; i < num_gpio; i++) {
+			DEV_DBG("%pS->%s: %s enable\n",
+				__builtin_return_address(0), __func__,
+				in_gpio[i].gpio_name);
+
+			rc = gpio_request(in_gpio[i].gpio,
+				in_gpio[i].gpio_name);
+			if (rc < 0) {
+				DEV_ERR("%pS->%s: %s enable failed\n",
+					__builtin_return_address(0), __func__,
+					in_gpio[i].gpio_name);
+				goto disable_gpio;
+			}
+			gpio_set_value(in_gpio[i].gpio, in_gpio[i].value);
+		}
+	} else {
+		for (i = num_gpio-1; i >= 0; i--) {
+			DEV_DBG("%pS->%s: %s disable\n",
+				__builtin_return_address(0), __func__,
+				in_gpio[i].gpio_name);
+
+			gpio_free(in_gpio[i].gpio);
+		}
+	}
+	return rc;
+
+disable_gpio:
+	for (i--; i >= 0; i--)
+		gpio_free(in_gpio[i].gpio);
+	return rc;
+} /* msm_dss_enable_gpio */
+
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
+{
+	int i;
+
+	for (i = num_clk - 1; i >= 0; i--) {
+		if (clk_arry[i].clk)
+			clk_put(clk_arry[i].clk);
+		clk_arry[i].clk = NULL;
+	}
+} /* msm_dss_put_clk */
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < num_clk; i++) {
+		clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
+		rc = PTR_RET(clk_arry[i].clk);
+		if (rc) {
+			DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name, rc);
+			goto error;
+		}
+	}
+
+	return rc;
+
+error:
+	msm_dss_put_clk(clk_arry, num_clk);
+
+	return rc;
+} /* msm_dss_get_clk */
+
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < num_clk; i++) {
+		if (clk_arry[i].clk) {
+			if (DSS_CLK_AHB != clk_arry[i].type) {
+				DEV_DBG("%pS->%s: '%s' rate %ld\n",
+					__builtin_return_address(0), __func__,
+					clk_arry[i].clk_name,
+					clk_arry[i].rate);
+				rc = clk_set_rate(clk_arry[i].clk,
+					clk_arry[i].rate);
+				if (rc) {
+					DEV_ERR("%pS->%s: %s failed. rc=%d\n",
+						__builtin_return_address(0),
+						__func__,
+						clk_arry[i].clk_name, rc);
+					break;
+				}
+			}
+		} else {
+			DEV_ERR("%pS->%s: '%s' is not available\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name);
+			rc = -EPERM;
+			break;
+		}
+	}
+
+	return rc;
+} /* msm_dss_clk_set_rate */
+
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
+{
+	int i, rc = 0;
+
+	if (enable) {
+		for (i = 0; i < num_clk; i++) {
+			DEV_DBG("%pS->%s: enable '%s'\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name);
+			if (clk_arry[i].clk) {
+				rc = clk_prepare_enable(clk_arry[i].clk);
+				if (rc)
+					DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+						__builtin_return_address(0),
+						__func__,
+						clk_arry[i].clk_name, rc);
+			} else {
+				DEV_ERR("%pS->%s: '%s' is not available\n",
+					__builtin_return_address(0), __func__,
+					clk_arry[i].clk_name);
+				rc = -EPERM;
+			}
+
+			if (rc) {
+				msm_dss_enable_clk(&clk_arry[i],
+					i, false);
+				break;
+			}
+		}
+	} else {
+		for (i = num_clk - 1; i >= 0; i--) {
+			DEV_DBG("%pS->%s: disable '%s'\n",
+				__builtin_return_address(0), __func__,
+				clk_arry[i].clk_name);
+
+			if (clk_arry[i].clk)
+				clk_disable_unprepare(clk_arry[i].clk);
+			else
+				DEV_ERR("%pS->%s: '%s' is not available\n",
+					__builtin_return_address(0), __func__,
+					clk_arry[i].clk_name);
+		}
+	}
+
+	return rc;
+} /* msm_dss_enable_clk */
+
+
+int mdss_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+			uint8_t reg_offset, uint8_t *read_buf)
+{
+	struct i2c_msg msgs[2];
+	int ret = -1;
+
+	pr_debug("%s: reading from slave_addr=[%x] and offset=[%x]\n",
+		 __func__, slave_addr, reg_offset);
+
+	msgs[0].addr = slave_addr >> 1;
+	msgs[0].flags = 0;
+	msgs[0].buf = &reg_offset;
+	msgs[0].len = 1;
+
+	msgs[1].addr = slave_addr >> 1;
+	msgs[1].flags = I2C_M_RD;
+	msgs[1].buf = read_buf;
+	msgs[1].len = 1;
+
+	ret = i2c_transfer(client->adapter, msgs, 2);
+	if (ret < 1) {
+		pr_err("%s: I2C READ FAILED=[%d]\n", __func__, ret);
+		return -EACCES;
+	}
+	pr_debug("%s: i2c buf is [%x]\n", __func__, *read_buf);
+	return 0;
+}
+
+int mdss_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+			uint8_t reg_offset, uint8_t *value)
+{
+	struct i2c_msg msgs[1];
+	uint8_t data[2];
+	int status = -EACCES;
+
+	pr_debug("%s: writing from slave_addr=[%x] and offset=[%x]\n",
+		 __func__, slave_addr, reg_offset);
+
+	data[0] = reg_offset;
+	data[1] = *value;
+
+	msgs[0].addr = slave_addr >> 1;
+	msgs[0].flags = 0;
+	msgs[0].len = 2;
+	msgs[0].buf = data;
+
+	status = i2c_transfer(client->adapter, msgs, 1);
+	if (status < 1) {
+		pr_err("I2C WRITE FAILED=[%d]\n", status);
+		return -EACCES;
+	}
+	pr_debug("%s: I2C write status=%x\n", __func__, status);
+	return status;
+}
diff --git a/drivers/video/msm/mdss/mdss_io_util.h b/drivers/video/msm/mdss/mdss_io_util.h
new file mode 100644
index 0000000..0ae62a3
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_io_util.h
@@ -0,0 +1,108 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_IO_UTIL_H__
+#define __MDSS_IO_UTIL_H__
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
+
+#ifdef DEBUG
+#define DEV_DBG(fmt, args...)   pr_err(fmt, ##args)
+#else
+#define DEV_DBG(fmt, args...)   pr_debug(fmt, ##args)
+#endif
+#define DEV_INFO(fmt, args...)  pr_info(fmt, ##args)
+#define DEV_WARN(fmt, args...)  pr_warn(fmt, ##args)
+#define DEV_ERR(fmt, args...)   pr_err(fmt, ##args)
+
+struct dss_io_data {
+	u32 len;
+	void __iomem *base;
+};
+
+void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug);
+u32 dss_reg_r(struct dss_io_data *io, u32 offset, u32 debug);
+void dss_reg_dump(void __iomem *base, u32 len, const char *prefix, u32 debug);
+
+#define DSS_REG_W_ND(io, offset, val)  dss_reg_w(io, offset, val, false)
+#define DSS_REG_W(io, offset, val)     dss_reg_w(io, offset, val, true)
+#define DSS_REG_R_ND(io, offset)       dss_reg_r(io, offset, false)
+#define DSS_REG_R(io, offset)          dss_reg_r(io, offset, true)
+
+enum dss_vreg_type {
+	DSS_REG_LDO,
+	DSS_REG_VS,
+};
+
+struct dss_vreg {
+	struct regulator *vreg; /* vreg handle */
+	char vreg_name[32];
+	enum dss_vreg_type type;
+	int min_voltage;
+	int max_voltage;
+	int optimum_voltage;
+};
+
+struct dss_gpio {
+	unsigned gpio;
+	unsigned value;
+	char gpio_name[32];
+};
+
+enum dss_clk_type {
+	DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
+	DSS_CLK_PCLK,
+	DSS_CLK_OTHER,
+};
+
+struct dss_clk {
+	struct clk *clk; /* clk handle */
+	char clk_name[32];
+	enum dss_clk_type type;
+	unsigned long rate;
+};
+
+struct dss_module_power {
+	unsigned num_vreg;
+	struct dss_vreg *vreg_config;
+	unsigned num_gpio;
+	struct dss_gpio *gpio_config;
+	unsigned num_clk;
+	struct dss_clk *clk_config;
+};
+
+int msm_dss_ioremap_byname(struct platform_device *pdev,
+	struct dss_io_data *io_data, const char *name);
+void msm_dss_iounmap(struct dss_io_data *io_data);
+
+int msm_dss_enable_gpio(struct dss_gpio *in_gpio, int num_gpio, int enable);
+int msm_dss_gpio_enable(struct dss_gpio *in_gpio, int num_gpio, int enable);
+
+int msm_dss_config_vreg(struct device *dev, struct dss_vreg *in_vreg,
+	int num_vreg, int config);
+int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg,	int enable);
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk);
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
+
+int mdss_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+		       uint8_t reg_offset, uint8_t *read_buf);
+int mdss_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+			uint8_t reg_offset, uint8_t *value);
+
+#endif /* __MDSS_IO_UTIL_H__ */
diff --git a/drivers/video/msm/mdss/mdss_mdp.c b/drivers/video/msm/mdss/mdss_mdp.c
index 4bfbc9d..8be64b2 100644
--- a/drivers/video/msm/mdss/mdss_mdp.c
+++ b/drivers/video/msm/mdss/mdss_mdp.c
@@ -1,7 +1,7 @@
 /*
  * MDSS MDP Interface (used by framebuffer core)
  *
- * Copyright (c) 2007-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2007-2013, The Linux Foundation. All rights reserved.
  * Copyright (C) 2007 Google Incorporated
  *
  * This software is licensed under the terms of the GNU General Public
@@ -24,7 +24,10 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/iommu.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/pm.h>
 #include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
 #include <linux/memory_alloc.h>
@@ -41,38 +44,29 @@
 #include <mach/hardware.h>
 #include <mach/msm_bus.h>
 #include <mach/msm_bus_board.h>
+#include <mach/iommu.h>
+#include <mach/iommu_domains.h>
+#include <mach/memory.h>
+#include <mach/msm_memtypes.h>
 
 #include "mdss.h"
 #include "mdss_fb.h"
 #include "mdss_mdp.h"
+#include "mdss_debug.h"
 
-unsigned char *mdss_reg_base;
+struct mdss_data_type *mdss_res;
+struct msm_mdp_interface mdp5 = {
+	.init_fnc = mdss_mdp_overlay_init,
+	.fb_mem_alloc_fnc = mdss_mdp_alloc_fb_mem,
+	.panel_register_done = mdss_panel_register_done,
+	.fb_stride = mdss_mdp_fb_stride,
+};
 
-struct mdss_res_type *mdss_res;
-static struct msm_panel_common_pdata *mdp_pdata;
+#define IB_QUOTA 800000000
+#define AB_QUOTA 800000000
 
 static DEFINE_SPINLOCK(mdp_lock);
 static DEFINE_MUTEX(mdp_clk_lock);
-static DEFINE_MUTEX(mdp_suspend_mutex);
-
-u32 mdss_mdp_pipe_type_map[MDSS_MDP_MAX_SSPP] = {
-	MDSS_MDP_PIPE_TYPE_VIG,
-	MDSS_MDP_PIPE_TYPE_VIG,
-	MDSS_MDP_PIPE_TYPE_VIG,
-	MDSS_MDP_PIPE_TYPE_RGB,
-	MDSS_MDP_PIPE_TYPE_RGB,
-	MDSS_MDP_PIPE_TYPE_RGB,
-	MDSS_MDP_PIPE_TYPE_DMA,
-	MDSS_MDP_PIPE_TYPE_DMA,
-};
-
-u32 mdss_mdp_mixer_type_map[MDSS_MDP_MAX_LAYERMIXER] = {
-	MDSS_MDP_MIXER_TYPE_INTF,
-	MDSS_MDP_MIXER_TYPE_INTF,
-	MDSS_MDP_MIXER_TYPE_INTF,
-	MDSS_MDP_MIXER_TYPE_WRITEBACK,
-	MDSS_MDP_MIXER_TYPE_WRITEBACK,
-};
 
 #define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val)		\
 	{						\
@@ -94,35 +88,128 @@
 	.name = "mdss_mdp",
 };
 
+struct mdss_iommu_map_type mdss_iommu_map[MDSS_IOMMU_MAX_DOMAIN] = {
+	[MDSS_IOMMU_DOMAIN_UNSECURE] = {
+		.client_name = "mdp_ns",
+		.ctx_name = "mdp_0",
+		.partitions = {
+			{
+				.start = SZ_128K,
+				.size = SZ_1G - SZ_128K,
+			},
+		},
+		.npartitions = 1,
+	},
+	[MDSS_IOMMU_DOMAIN_SECURE] = {
+		.client_name = "mdp_secure",
+		.ctx_name = "mdp_1",
+		.partitions = {
+			{
+				.start = SZ_1G,
+				.size = SZ_1G,
+			},
+		},
+		.npartitions = 1,
+	},
+};
+
 struct mdss_hw mdss_mdp_hw = {
 	.hw_ndx = MDSS_HW_MDP,
+	.ptr = NULL,
 	.irq_handler = mdss_mdp_isr,
 };
 
 static DEFINE_SPINLOCK(mdss_lock);
 struct mdss_hw *mdss_irq_handlers[MDSS_MAX_HW_BLK];
 
+static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on);
+static int mdss_mdp_parse_dt(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_mixer(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_ctl(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_video_intf(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_handler(struct platform_device *pdev,
+				      char *prop_name, u32 *offsets, int len);
+static int mdss_mdp_parse_dt_prop_len(struct platform_device *pdev,
+				       char *prop_name);
+static int mdss_mdp_parse_dt_smp(struct platform_device *pdev);
+static int mdss_mdp_parse_dt_misc(struct platform_device *pdev);
+
+int mdss_mdp_alloc_fb_mem(struct msm_fb_data_type *mfd)
+{
+	int dom;
+	void *virt = NULL;
+	unsigned long phys = 0;
+	size_t size;
+	u32 yres = mfd->fbi->var.yres_virtual;
+
+	size = PAGE_ALIGN(mfd->fbi->fix.line_length * yres);
+
+	if (mfd->index == 0) {
+		virt = allocate_contiguous_memory(size, MEMTYPE_EBI1, SZ_1M, 0);
+		if (!virt) {
+			pr_err("unable to alloc fbmem size=%u\n", size);
+			return -ENOMEM;
+		}
+		phys = memory_pool_node_paddr(virt);
+		dom = mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE);
+		msm_iommu_map_contig_buffer(phys, dom, 0, size, SZ_4K, 0,
+					&mfd->iova);
+
+		pr_debug("allocating %u bytes at %p (%lx phys) for fb %d\n",
+			size, virt, phys, mfd->index);
+	} else
+		size = 0;
+
+	mfd->fbi->screen_base = virt;
+	mfd->fbi->fix.smem_start = phys;
+	mfd->fbi->fix.smem_len = size;
+
+	return 0;
+}
+
+u32 mdss_mdp_fb_stride(u32 fb_index, u32 xres, int bpp)
+{
+	/* The adreno GPU hardware requires that the pitch be aligned to
+	   32 pixels for color buffers, so for the cases where the GPU
+	   is writing directly to fb0, the framebuffer pitch
+	   also needs to be 32 pixel aligned */
+
+	if (fb_index == 0)
+		return ALIGN(xres, 32) * bpp;
+	else
+		return xres * bpp;
+}
+
 static inline int mdss_irq_dispatch(u32 hw_ndx, int irq, void *ptr)
 {
 	struct mdss_hw *hw;
+	int rc = -ENODEV;
 
 	spin_lock(&mdss_lock);
 	hw = mdss_irq_handlers[hw_ndx];
-	spin_unlock(&mdss_lock);
 	if (hw)
-		return hw->irq_handler(irq, ptr);
+		rc = hw->irq_handler(irq, hw->ptr);
+	spin_unlock(&mdss_lock);
 
-	return -ENODEV;
+	return rc;
 }
 
 static irqreturn_t mdss_irq_handler(int irq, void *ptr)
 {
+	struct mdss_data_type *mdata = ptr;
 	u32 intr = MDSS_MDP_REG_READ(MDSS_REG_HW_INTR_STATUS);
 
-	mdss_res->irq_buzy = true;
+	if (!mdata)
+		return IRQ_NONE;
 
-	if (intr & MDSS_INTR_MDP)
+	mdata->irq_buzy = true;
+
+	if (intr & MDSS_INTR_MDP) {
+		spin_lock(&mdp_lock);
 		mdss_irq_dispatch(MDSS_HW_MDP, irq, ptr);
+		spin_unlock(&mdp_lock);
+	}
 
 	if (intr & MDSS_INTR_DSI0)
 		mdss_irq_dispatch(MDSS_HW_DSI0, irq, ptr);
@@ -136,7 +223,7 @@
 	if (intr & MDSS_INTR_HDMI)
 		mdss_irq_dispatch(MDSS_HW_HDMI, irq, ptr);
 
-	mdss_res->irq_buzy = false;
+	mdata->irq_buzy = false;
 
 	return IRQ_HANDLED;
 }
@@ -186,20 +273,22 @@
 
 	spin_lock_irqsave(&mdss_lock, irq_flags);
 	if (!(mdss_res->irq_mask & ndx_bit)) {
-		pr_warn("MDSS HW ndx=%d is NOT set, mask=%x\n",
-			hw->hw_ndx, mdss_res->mdp_irq_mask);
+		pr_warn("MDSS HW ndx=%d is NOT set, mask=%x, hist mask=%x\n",
+			hw->hw_ndx, mdss_res->mdp_irq_mask,
+			mdss_res->mdp_hist_irq_mask);
 	} else {
 		mdss_irq_handlers[hw->hw_ndx] = NULL;
 		mdss_res->irq_mask &= ~ndx_bit;
 		if (mdss_res->irq_mask == 0) {
 			mdss_res->irq_ena = false;
-			disable_irq(mdss_res->irq);
+			disable_irq_nosync(mdss_res->irq);
 		}
 	}
 	spin_unlock_irqrestore(&mdss_lock, irq_flags);
 }
 EXPORT_SYMBOL(mdss_disable_irq);
 
+/* called from interrupt context */
 void mdss_disable_irq_nosync(struct mdss_hw *hw)
 {
 	u32 ndx_bit;
@@ -212,10 +301,10 @@
 	pr_debug("Disable HW=%d irq ena=%d mask=%x\n", hw->hw_ndx,
 			mdss_res->irq_ena, mdss_res->irq_mask);
 
-	spin_lock(&mdss_lock);
 	if (!(mdss_res->irq_mask & ndx_bit)) {
-		pr_warn("MDSS HW ndx=%d is NOT set, mask=%x\n",
-			hw->hw_ndx, mdss_res->mdp_irq_mask);
+		pr_warn("MDSS HW ndx=%d is NOT set, mask=%x, hist mask=%x\n",
+			hw->hw_ndx, mdss_res->mdp_irq_mask,
+			mdss_res->mdp_hist_irq_mask);
 	} else {
 		mdss_irq_handlers[hw->hw_ndx] = NULL;
 		mdss_res->irq_mask &= ~ndx_bit;
@@ -224,13 +313,12 @@
 			disable_irq_nosync(mdss_res->irq);
 		}
 	}
-	spin_unlock(&mdss_lock);
 }
 EXPORT_SYMBOL(mdss_disable_irq_nosync);
 
-static int mdss_mdp_bus_scale_register(void)
+static int mdss_mdp_bus_scale_register(struct mdss_data_type *mdata)
 {
-	if (!mdss_res->bus_hdl) {
+	if (!mdata->bus_hdl) {
 		struct msm_bus_scale_pdata *bus_pdata = &mdp_bus_scale_table;
 		int i;
 
@@ -239,26 +327,26 @@
 			mdp_bus_usecases[i].vectors = &mdp_bus_vectors[i];
 		}
 
-		mdss_res->bus_hdl = msm_bus_scale_register_client(bus_pdata);
-		if (!mdss_res->bus_hdl) {
+		mdata->bus_hdl = msm_bus_scale_register_client(bus_pdata);
+		if (!mdata->bus_hdl) {
 			pr_err("not able to get bus scale\n");
 			return -ENOMEM;
 		}
 
-		pr_debug("register bus_hdl=%x\n", mdss_res->bus_hdl);
+		pr_debug("register bus_hdl=%x\n", mdata->bus_hdl);
 	}
 	return 0;
 }
 
-static void mdss_mdp_bus_scale_unregister(void)
+static void mdss_mdp_bus_scale_unregister(struct mdss_data_type *mdata)
 {
-	pr_debug("unregister bus_hdl=%x\n", mdss_res->bus_hdl);
+	pr_debug("unregister bus_hdl=%x\n", mdata->bus_hdl);
 
-	if (mdss_res->bus_hdl)
-		msm_bus_scale_unregister_client(mdss_res->bus_hdl);
+	if (mdata->bus_hdl)
+		msm_bus_scale_unregister_client(mdata->bus_hdl);
 }
 
-int mdss_mdp_bus_scale_set_quota(u32 ab_quota, u32 ib_quota)
+int mdss_mdp_bus_scale_set_quota(u64 ab_quota, u64 ib_quota)
 {
 	static int current_bus_idx;
 	int bus_idx;
@@ -276,6 +364,10 @@
 
 		bus_idx = (current_bus_idx % (num_cases - 1)) + 1;
 
+		/* aligning to avoid performing updates for small changes */
+		ab_quota = ALIGN(ab_quota, SZ_64M);
+		ib_quota = ALIGN(ib_quota, SZ_64M);
+
 		vect = mdp_bus_scale_table.usecase[current_bus_idx].vectors;
 		if ((ab_quota == vect->ab) && (ib_quota == vect->ib)) {
 			pr_debug("skip bus scaling, no change in vectors\n");
@@ -301,6 +393,21 @@
 	return 1 << (intr_type + intf_num);
 }
 
+/* function assumes that mdp is clocked to access hw registers */
+void mdss_mdp_irq_clear(struct mdss_data_type *mdata,
+		u32 intr_type, u32 intf_num)
+{
+	unsigned long irq_flags;
+	u32 irq;
+
+	irq = mdss_mdp_irq_mask(intr_type, intf_num);
+
+	pr_debug("clearing mdp irq mask=%x\n", irq);
+	spin_lock_irqsave(&mdp_lock, irq_flags);
+	writel_relaxed(irq, mdata->mdp_base + MDSS_MDP_REG_INTR_CLEAR);
+	spin_unlock_irqrestore(&mdp_lock, irq_flags);
+}
+
 int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num)
 {
 	u32 irq;
@@ -327,6 +434,29 @@
 
 	return ret;
 }
+int mdss_mdp_hist_irq_enable(u32 irq)
+{
+	unsigned long irq_flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&mdp_lock, irq_flags);
+	if (mdss_res->mdp_hist_irq_mask & irq) {
+		pr_warn("MDSS MDP Hist IRQ-0x%x is already set, mask=%x\n",
+				irq, mdss_res->mdp_hist_irq_mask);
+		ret = -EBUSY;
+	} else {
+		pr_debug("MDP IRQ mask old=%x new=%x\n",
+				mdss_res->mdp_hist_irq_mask, irq);
+		mdss_res->mdp_hist_irq_mask |= irq;
+		MDSS_MDP_REG_WRITE(MDSS_MDP_REG_HIST_INTR_CLEAR, irq);
+		MDSS_MDP_REG_WRITE(MDSS_MDP_REG_HIST_INTR_EN,
+				mdss_res->mdp_hist_irq_mask);
+		mdss_enable_irq(&mdss_mdp_hw);
+	}
+	spin_unlock_irqrestore(&mdp_lock, irq_flags);
+
+	return ret;
+}
 
 void mdss_mdp_irq_disable(u32 intr_type, u32 intf_num)
 {
@@ -341,20 +471,42 @@
 				irq, mdss_res->mdp_irq_mask);
 	} else {
 		mdss_res->mdp_irq_mask &= ~irq;
+
 		MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_EN,
 				mdss_res->mdp_irq_mask);
-		mdss_disable_irq(&mdss_mdp_hw);
+		if ((mdss_res->mdp_irq_mask == 0) &&
+			(mdss_res->mdp_hist_irq_mask == 0))
+			mdss_disable_irq(&mdss_mdp_hw);
 	}
 	spin_unlock_irqrestore(&mdp_lock, irq_flags);
 }
 
+void mdss_mdp_hist_irq_disable(u32 irq)
+{
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(&mdp_lock, irq_flags);
+	if (!(mdss_res->mdp_hist_irq_mask & irq)) {
+		pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
+				irq, mdss_res->mdp_hist_irq_mask);
+	} else {
+		mdss_res->mdp_hist_irq_mask &= ~irq;
+		MDSS_MDP_REG_WRITE(MDSS_MDP_REG_HIST_INTR_EN,
+				mdss_res->mdp_hist_irq_mask);
+		if ((mdss_res->mdp_irq_mask == 0) &&
+			(mdss_res->mdp_hist_irq_mask == 0))
+			mdss_disable_irq(&mdss_mdp_hw);
+	}
+	spin_unlock_irqrestore(&mdp_lock, irq_flags);
+}
+
+/* called from interrupt context */
 void mdss_mdp_irq_disable_nosync(u32 intr_type, u32 intf_num)
 {
 	u32 irq;
 
 	irq = mdss_mdp_irq_mask(intr_type, intf_num);
 
-	spin_lock(&mdp_lock);
 	if (!(mdss_res->mdp_irq_mask & irq)) {
 		pr_warn("MDSS MDP IRQ-%x is NOT set, mask=%x\n",
 				irq, mdss_res->mdp_irq_mask);
@@ -362,9 +514,10 @@
 		mdss_res->mdp_irq_mask &= ~irq;
 		MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_EN,
 				mdss_res->mdp_irq_mask);
-		mdss_disable_irq_nosync(&mdss_mdp_hw);
+		if ((mdss_res->mdp_irq_mask == 0) &&
+			(mdss_res->mdp_hist_irq_mask == 0))
+			mdss_disable_irq_nosync(&mdss_mdp_hw);
 	}
-	spin_unlock(&mdp_lock);
 }
 
 static inline struct clk *mdss_mdp_get_clk(u32 clk_idx)
@@ -404,13 +557,21 @@
 	return ret;
 }
 
-void mdss_mdp_set_clk_rate(unsigned long min_clk_rate)
+void mdss_mdp_set_clk_rate(unsigned long rate)
 {
+	struct mdss_data_type *mdata = mdss_res;
 	unsigned long clk_rate;
 	struct clk *clk = mdss_mdp_get_clk(MDSS_CLK_MDP_SRC);
+	unsigned long min_clk_rate;
+
+	min_clk_rate = max(rate, mdata->min_mdp_clk);
+
 	if (clk) {
 		mutex_lock(&mdp_clk_lock);
-		clk_rate = clk_round_rate(clk, min_clk_rate);
+		if (min_clk_rate < mdata->max_mdp_clk_rate)
+			clk_rate = clk_round_rate(clk, min_clk_rate);
+		else
+			clk_rate = mdata->max_mdp_clk_rate;
 		if (IS_ERR_VALUE(clk_rate)) {
 			pr_err("unable to round rate err=%ld\n", clk_rate);
 		} else if (clk_rate != clk_get_rate(clk)) {
@@ -437,15 +598,22 @@
 	return clk_rate;
 }
 
-static void mdss_mdp_clk_ctrl_update(int enable)
+static void mdss_mdp_clk_ctrl_update(struct mdss_data_type *mdata)
 {
-	if (mdss_res->clk_ena == enable)
-		return;
-
-	pr_debug("MDP CLKS %s\n", (enable ? "Enable" : "Disable"));
+	int enable;
 
 	mutex_lock(&mdp_clk_lock);
-	mdss_res->clk_ena = enable;
+	enable = atomic_read(&mdata->clk_ref) > 0;
+	if (mdata->clk_ena == enable) {
+		mutex_unlock(&mdp_clk_lock);
+		return;
+	}
+	mdata->clk_ena = enable;
+
+	if (enable)
+		pm_runtime_get_sync(&mdata->pdev->dev);
+
+	pr_debug("MDP CLKS %s\n", (enable ? "Enable" : "Disable"));
 	mb();
 
 	mdss_mdp_clk_update(MDSS_CLK_AHB, enable);
@@ -453,76 +621,51 @@
 
 	mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, enable);
 	mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, enable);
-	if (mdss_res->vsync_ena)
+	if (mdata->vsync_ena)
 		mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
 
+	if (!enable)
+		pm_runtime_put(&mdata->pdev->dev);
+
 	mutex_unlock(&mdp_clk_lock);
 }
 
 static void mdss_mdp_clk_ctrl_workqueue_handler(struct work_struct *work)
 {
-	mdss_mdp_clk_ctrl(MDP_BLOCK_MASTER_OFF, false);
+	struct mdss_data_type *mdata;
+
+	mdata = container_of(work, struct mdss_data_type, clk_ctrl_worker);
+	mdss_mdp_clk_ctrl_update(mdata);
 }
 
 void mdss_mdp_clk_ctrl(int enable, int isr)
 {
-	static atomic_t clk_ref = ATOMIC_INIT(0);
-	static DEFINE_MUTEX(clk_ctrl_lock);
-	int force_off = 0;
+	struct mdss_data_type *mdata = mdss_res;
 
-	pr_debug("clk enable=%d isr=%d clk_ref=%d\n", enable, isr,
-			atomic_read(&clk_ref));
-	/*
-	 * It is assumed that if isr = TRUE then start = OFF
-	 * if start = ON when isr = TRUE it could happen that the usercontext
-	 * could turn off the clocks while the interrupt is updating the
-	 * power to ON
-	 */
-	WARN_ON(isr == true && enable);
+	pr_debug("clk enable=%d isr=%d ref= %d\n", enable, isr,
+			atomic_read(&mdata->clk_ref));
 
 	if (enable == MDP_BLOCK_POWER_ON) {
-		atomic_inc(&clk_ref);
-	} else if (!atomic_add_unless(&clk_ref, -1, 0)) {
-		if (enable == MDP_BLOCK_MASTER_OFF) {
-			pr_debug("master power-off req\n");
-			force_off = 1;
-		} else {
-			WARN(1, "too many mdp clock off call\n");
-		}
-	}
+		BUG_ON(isr);
 
-	WARN_ON(enable == MDP_BLOCK_MASTER_OFF && !force_off);
-
-	if (isr) {
-		/* if it's power off send workqueue to turn off clocks */
-		if (mdss_res->clk_ena && !atomic_read(&clk_ref))
-			queue_delayed_work(mdss_res->clk_ctrl_wq,
-					   &mdss_res->clk_ctrl_worker,
-					   mdss_res->timeout);
+		if (atomic_inc_return(&mdata->clk_ref) == 1)
+			mdss_mdp_clk_ctrl_update(mdata);
 	} else {
-		mutex_lock(&clk_ctrl_lock);
-		if (delayed_work_pending(&mdss_res->clk_ctrl_worker))
-			cancel_delayed_work(&mdss_res->clk_ctrl_worker);
+		BUG_ON(atomic_read(&mdata->clk_ref) == 0);
 
-		if (atomic_read(&clk_ref)) {
-			mdss_mdp_clk_ctrl_update(true);
-		} else if (mdss_res->clk_ena) {
-			mutex_lock(&mdp_suspend_mutex);
-			if (force_off || mdss_res->suspend) {
-				mdss_mdp_clk_ctrl_update(false);
-			} else {
-				/* send workqueue to turn off mdp power */
-				queue_delayed_work(mdss_res->clk_ctrl_wq,
-						   &mdss_res->clk_ctrl_worker,
-						   mdss_res->timeout);
-			}
-			mutex_unlock(&mdp_suspend_mutex);
+		if (atomic_dec_and_test(&mdata->clk_ref)) {
+			if (isr)
+				queue_work(mdata->clk_ctrl_wq,
+						&mdata->clk_ctrl_worker);
+			else
+				mdss_mdp_clk_ctrl_update(mdata);
 		}
-		mutex_unlock(&clk_ctrl_lock);
 	}
+
+
 }
 
-static inline int mdss_mdp_irq_clk_register(struct platform_device *pdev,
+static inline int mdss_mdp_irq_clk_register(struct mdss_data_type *mdata,
 					    char *clk_name, int clk_idx)
 {
 	struct clk *tmp;
@@ -531,272 +674,1006 @@
 		return -EINVAL;
 	}
 
-
-	tmp = clk_get(&pdev->dev, clk_name);
+	tmp = devm_clk_get(&mdata->pdev->dev, clk_name);
 	if (IS_ERR(tmp)) {
 		pr_err("unable to get clk: %s\n", clk_name);
 		return PTR_ERR(tmp);
 	}
 
-	mdss_res->mdp_clk[clk_idx] = tmp;
+	mdata->mdp_clk[clk_idx] = tmp;
 	return 0;
 }
 
-static int mdss_mdp_irq_clk_setup(struct platform_device *pdev)
+static int mdss_mdp_irq_clk_setup(struct mdss_data_type *mdata)
 {
 	int ret;
-	int i;
 
-	ret = request_irq(mdss_res->irq, mdss_irq_handler, IRQF_DISABLED,
-			  "MDSS", 0);
+	ret = of_property_read_u32(mdata->pdev->dev.of_node,
+			"qcom,max-clk-rate", &mdata->max_mdp_clk_rate);
+	if (ret) {
+		pr_err("failed to get max mdp clock rate\n");
+		return ret;
+	}
+
+	pr_debug("max mdp clk rate=%d\n", mdata->max_mdp_clk_rate);
+
+	ret = devm_request_irq(&mdata->pdev->dev, mdata->irq, mdss_irq_handler,
+			 IRQF_DISABLED,	"MDSS", mdata);
 	if (ret) {
 		pr_err("mdp request_irq() failed!\n");
 		return ret;
 	}
-	disable_irq(mdss_res->irq);
+	disable_irq(mdata->irq);
 
-	mdss_res->fs = regulator_get(&pdev->dev, "vdd");
-	if (IS_ERR_OR_NULL(mdss_res->fs)) {
-		mdss_res->fs = NULL;
+	mdata->fs = devm_regulator_get(&mdata->pdev->dev, "vdd");
+	if (IS_ERR_OR_NULL(mdata->fs)) {
+		mdata->fs = NULL;
 		pr_err("unable to get gdsc regulator\n");
-		goto error;
+		return -EINVAL;
 	}
-	regulator_enable(mdss_res->fs);
-	mdss_res->fs_ena = true;
+	mdata->fs_ena = false;
 
-	if (mdss_mdp_irq_clk_register(pdev, "bus_clk", MDSS_CLK_AXI) ||
-	    mdss_mdp_irq_clk_register(pdev, "iface_clk", MDSS_CLK_AHB) ||
-	    mdss_mdp_irq_clk_register(pdev, "core_clk_src", MDSS_CLK_MDP_SRC) ||
-	    mdss_mdp_irq_clk_register(pdev, "core_clk", MDSS_CLK_MDP_CORE) ||
-	    mdss_mdp_irq_clk_register(pdev, "lut_clk", MDSS_CLK_MDP_LUT) ||
-	    mdss_mdp_irq_clk_register(pdev, "vsync_clk", MDSS_CLK_MDP_VSYNC))
-		goto error;
+	if (mdss_mdp_irq_clk_register(mdata, "bus_clk", MDSS_CLK_AXI) ||
+	    mdss_mdp_irq_clk_register(mdata, "iface_clk", MDSS_CLK_AHB) ||
+	    mdss_mdp_irq_clk_register(mdata, "core_clk_src",
+				      MDSS_CLK_MDP_SRC) ||
+	    mdss_mdp_irq_clk_register(mdata, "core_clk",
+				      MDSS_CLK_MDP_CORE) ||
+	    mdss_mdp_irq_clk_register(mdata, "lut_clk", MDSS_CLK_MDP_LUT) ||
+	    mdss_mdp_irq_clk_register(mdata, "vsync_clk", MDSS_CLK_MDP_VSYNC))
+		return -EINVAL;
 
 	mdss_mdp_set_clk_rate(MDP_CLK_DEFAULT_RATE);
 	pr_debug("mdp clk rate=%ld\n", mdss_mdp_get_clk_rate(MDSS_CLK_MDP_SRC));
 
 	return 0;
-error:
-	for (i = 0; i < MDSS_MAX_CLK; i++) {
-		if (mdss_res->mdp_clk[i])
-			clk_put(mdss_res->mdp_clk[i]);
+}
+
+static int mdss_iommu_fault_handler(struct iommu_domain *domain,
+		struct device *dev, unsigned long iova, int flags, void *token)
+{
+	pr_err("MDP IOMMU page fault: iova 0x%lx\n", iova);
+	return 0;
+}
+
+int mdss_iommu_attach(struct mdss_data_type *mdata)
+{
+	struct iommu_domain *domain;
+	struct mdss_iommu_map_type *iomap;
+	int i;
+
+	if (mdata->iommu_attached) {
+		pr_debug("mdp iommu already attached\n");
+		return 0;
 	}
-	if (mdss_res->fs)
-		regulator_put(mdss_res->fs);
-	if (mdss_res->irq)
-		free_irq(mdss_res->irq, 0);
 
-	return -EINVAL;
+	for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+		iomap = mdata->iommu_map + i;
 
-}
+		domain = msm_get_iommu_domain(iomap->domain_idx);
+		if (!domain) {
+			WARN(1, "could not attach iommu client %s to ctx %s\n",
+				iomap->client_name, iomap->ctx_name);
+			continue;
+		}
+		iommu_attach_device(domain, iomap->ctx);
+	}
 
-static struct msm_panel_common_pdata *mdss_mdp_populate_pdata(
-	struct device *dev)
-{
-	struct msm_panel_common_pdata *pdata;
-
-	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
-	if (!pdata)
-		dev_err(dev, "could not allocate memory for pdata\n");
-	return pdata;
-}
-
-static u32 mdss_mdp_res_init(struct platform_device *pdev)
-{
-	u32 rc;
-
-	rc = mdss_mdp_irq_clk_setup(pdev);
-	if (rc)
-		return rc;
-
-	mdss_res->clk_ctrl_wq = create_singlethread_workqueue("mdp_clk_wq");
-	INIT_DELAYED_WORK(&mdss_res->clk_ctrl_worker,
-			  mdss_mdp_clk_ctrl_workqueue_handler);
-
-	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
-	mdss_res->rev = MDSS_MDP_REG_READ(MDSS_REG_HW_VERSION);
-	mdss_res->mdp_rev = MDSS_MDP_REG_READ(MDSS_MDP_REG_HW_VERSION);
-	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
-
-	mdss_res->smp_mb_cnt = MDSS_MDP_SMP_MMB_BLOCKS;
-	mdss_res->smp_mb_size = MDSS_MDP_SMP_MMB_SIZE;
-	mdss_res->pipe_type_map = mdss_mdp_pipe_type_map;
-	mdss_res->mixer_type_map = mdss_mdp_mixer_type_map;
-
-	pr_info("mdss_revision=%x\n", mdss_res->rev);
-	pr_info("mdp_hw_revision=%x\n", mdss_res->mdp_rev);
-
-	mdss_res->res_init = true;
-	mdss_res->timeout = HZ/20;
-	mdss_res->clk_ena = false;
-	mdss_res->irq_mask = MDSS_MDP_DEFAULT_INTR_MASK;
-	mdss_res->suspend = false;
-	mdss_res->prim_ptype = NO_PANEL;
-	mdss_res->irq_ena = false;
+	mdata->iommu_attached = true;
 
 	return 0;
 }
 
-static int mdss_mdp_probe(struct platform_device *pdev)
+int mdss_iommu_dettach(struct mdss_data_type *mdata)
 {
-	struct resource *mdss_mdp_mres;
-	struct resource *mdss_mdp_ires;
-	resource_size_t size;
+	struct iommu_domain *domain;
+	struct mdss_iommu_map_type *iomap;
+	int i;
+
+	if (!mdata->iommu_attached) {
+		pr_debug("mdp iommu already dettached\n");
+		return 0;
+	}
+
+	for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+		iomap = mdata->iommu_map + i;
+
+		domain = msm_get_iommu_domain(iomap->domain_idx);
+		if (!domain) {
+			pr_err("unable to get iommu domain(%d)\n",
+				iomap->domain_idx);
+			continue;
+		}
+		iommu_detach_device(domain, iomap->ctx);
+	}
+
+	mdata->iommu_attached = false;
+
+	return 0;
+}
+
+int mdss_iommu_init(struct mdss_data_type *mdata)
+{
+	struct msm_iova_layout layout;
+	struct iommu_domain *domain;
+	struct mdss_iommu_map_type *iomap;
+	int i;
+
+	if (mdata->iommu_map) {
+		pr_warn("iommu already initialized\n");
+		return 0;
+	}
+
+	for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+		iomap = &mdss_iommu_map[i];
+
+		layout.client_name = iomap->client_name;
+		layout.partitions = iomap->partitions;
+		layout.npartitions = iomap->npartitions;
+		layout.is_secure = (i == MDSS_IOMMU_DOMAIN_SECURE);
+
+		iomap->domain_idx = msm_register_domain(&layout);
+		if (IS_ERR_VALUE(iomap->domain_idx))
+			return -EINVAL;
+
+		domain = msm_get_iommu_domain(iomap->domain_idx);
+		if (!domain) {
+			pr_err("unable to get iommu domain(%d)\n",
+				iomap->domain_idx);
+			return -EINVAL;
+		}
+		iommu_set_fault_handler(domain, mdss_iommu_fault_handler, NULL);
+
+		iomap->ctx = msm_iommu_get_ctx(iomap->ctx_name);
+		if (!iomap->ctx) {
+			pr_warn("unable to get iommu ctx(%s)\n",
+				iomap->ctx_name);
+			return -EINVAL;
+		}
+	}
+
+	mdata->iommu_map = mdss_iommu_map;
+
+	return 0;
+}
+
+static int mdss_mdp_debug_init(struct mdss_data_type *mdata)
+{
 	int rc;
 
-	if (!mdss_res) {
-		mdss_res = devm_kzalloc(&pdev->dev, sizeof(*mdss_res),
-				GFP_KERNEL);
-		if (mdss_res == NULL)
-			return -ENOMEM;
-	}
+	rc = mdss_debugfs_init(mdata);
+	if (rc)
+		return rc;
 
-	if (pdev->dev.of_node) {
-		pdev->id = 0;
-		mdp_pdata = mdss_mdp_populate_pdata(&pdev->dev);
-		mdss_mdp_mres = platform_get_resource(pdev,
-						IORESOURCE_MEM, 0);
-		mdss_mdp_ires = platform_get_resource(pdev,
-						IORESOURCE_IRQ, 0);
-		if (!mdss_mdp_mres || !mdss_mdp_ires) {
-			pr_err("unable to get the MDSS resources");
-			rc = -ENOMEM;
-			goto probe_done;
-		}
-		mdss_reg_base = ioremap(mdss_mdp_mres->start,
-					resource_size(mdss_mdp_mres));
+	mdss_debug_register_base(NULL, mdata->mdp_base, mdata->mdp_reg_size);
 
-		pr_info("MDP HW Base phy_Address=0x%x virt=0x%x\n",
-			(int) mdss_mdp_mres->start,
-			(int) mdss_reg_base);
+	return 0;
+}
 
-		mdss_res->irq = mdss_mdp_ires->start;
-	} else if ((pdev->id == 0) && (pdev->num_resources > 0)) {
-		mdp_pdata = pdev->dev.platform_data;
+int mdss_hw_init(struct mdss_data_type *mdata)
+{
+	int i, j;
+	char *offset;
 
-		size =  resource_size(&pdev->resource[0]);
-		mdss_reg_base = ioremap(pdev->resource[0].start, size);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+	mdata->mdp_rev = MDSS_MDP_REG_READ(MDSS_MDP_REG_HW_VERSION);
+	pr_info_once("MDP Rev=%x\n", mdata->mdp_rev);
 
-		pr_info("MDP HW Base phy_Address=0x%x virt=0x%x\n",
-			(int) pdev->resource[0].start,
-			(int) mdss_reg_base);
+	if (mdata->hw_settings) {
+		struct mdss_hw_settings *hws = mdata->hw_settings;
 
-		mdss_res->irq = platform_get_irq(pdev, 0);
-		if (mdss_res->irq < 0) {
-			pr_err("can not get mdp irq\n");
-			rc = -ENOMEM;
-			goto probe_done;
+		while (hws->reg) {
+			writel_relaxed(hws->val, hws->reg);
+			hws++;
 		}
 	}
 
-	if (unlikely(!mdss_reg_base)) {
+	for (i = 0; i < mdata->nmixers_intf; i++) {
+		offset = mdata->mixer_intf[i].dspp_base +
+				MDSS_MDP_REG_DSPP_HIST_LUT_BASE;
+		for (j = 0; j < ENHIST_LUT_ENTRIES; j++)
+			writel_relaxed(j, offset);
+
+		/* swap */
+		writel_relaxed(i, offset + 4);
+	}
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+	pr_debug("MDP hw init done\n");
+
+	return 0;
+}
+
+static u32 mdss_mdp_res_init(struct mdss_data_type *mdata)
+{
+	u32 rc = 0;
+
+	if (mdata->res_init) {
+		pr_err("mdss resources already initialized\n");
+		return -EPERM;
+	}
+
+	mdata->res_init = true;
+	mdata->clk_ena = false;
+	mdata->irq_mask = MDSS_MDP_DEFAULT_INTR_MASK;
+	mdata->irq_ena = false;
+
+	rc = mdss_mdp_irq_clk_setup(mdata);
+	if (rc)
+		return rc;
+
+	mdata->clk_ctrl_wq = create_singlethread_workqueue("mdp_clk_wq");
+	INIT_WORK(&mdata->clk_ctrl_worker, mdss_mdp_clk_ctrl_workqueue_handler);
+
+	mdata->iclient = msm_ion_client_create(-1, mdata->pdev->name);
+	if (IS_ERR_OR_NULL(mdata->iclient)) {
+		pr_err("msm_ion_client_create() return error (%p)\n",
+				mdata->iclient);
+		mdata->iclient = NULL;
+	}
+
+	rc = mdss_iommu_init(mdata);
+
+	return rc;
+}
+
+void mdss_mdp_footswitch_ctrl_splash(int on)
+{
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+	if (mdata != NULL) {
+		if (on) {
+			pr_debug("Enable MDP FS for splash.\n");
+			regulator_enable(mdata->fs);
+			mdss_hw_init(mdata);
+		} else {
+			pr_debug("Disable MDP FS for splash.\n");
+			regulator_disable(mdata->fs);
+		}
+	} else {
+		pr_warn("mdss mdata not initialized\n");
+	}
+}
+
+static int mdss_mdp_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	int rc;
+	struct mdss_data_type *mdata;
+
+	if (!pdev->dev.of_node) {
+		pr_err("MDP driver only supports device tree probe\n");
+		return -ENOTSUPP;
+	}
+
+	if (mdss_res) {
+		pr_err("MDP already initialized\n");
+		return -EINVAL;
+	}
+
+	mdata = devm_kzalloc(&pdev->dev, sizeof(*mdata), GFP_KERNEL);
+	if (mdata == NULL)
+		return -ENOMEM;
+
+	pdev->id = 0;
+	mdata->pdev = pdev;
+	platform_set_drvdata(pdev, mdata);
+	mdss_res = mdata;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdp_phys");
+	if (!res) {
+		pr_err("unable to get MDP base address\n");
 		rc = -ENOMEM;
 		goto probe_done;
 	}
 
-	rc = mdss_mdp_res_init(pdev);
+	mdata->mdp_reg_size = resource_size(res);
+	mdata->mdp_base = devm_ioremap(&pdev->dev, res->start,
+				       mdata->mdp_reg_size);
+	if (unlikely(!mdata->mdp_base)) {
+		pr_err("unable to map MDP base\n");
+		rc = -ENOMEM;
+		goto probe_done;
+	}
+	pr_info("MDP HW Base phy_Address=0x%x virt=0x%x\n",
+		(int) res->start,
+		(int) mdata->mdp_base);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vbif_phys");
+	if (!res) {
+		pr_err("unable to get MDSS VBIF base address\n");
+		rc = -ENOMEM;
+		goto probe_done;
+	}
+
+	mdata->vbif_base = devm_ioremap(&pdev->dev, res->start,
+					resource_size(res));
+	if (unlikely(!mdata->vbif_base)) {
+		pr_err("unable to map MDSS VBIF base\n");
+		rc = -ENOMEM;
+		goto probe_done;
+	}
+	pr_info("MDSS VBIF HW Base phy_Address=0x%x virt=0x%x\n",
+		(int) res->start,
+		(int) mdata->vbif_base);
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		pr_err("unable to get MDSS irq\n");
+		rc = -ENOMEM;
+		goto probe_done;
+	}
+	mdata->irq = res->start;
+
+	/*populate hw iomem base info from device tree*/
+	rc = mdss_mdp_parse_dt(pdev);
+	if (rc) {
+		pr_err("unable to parse device tree\n");
+		goto probe_done;
+	}
+
+	rc = mdss_mdp_res_init(mdata);
 	if (rc) {
 		pr_err("unable to initialize mdss mdp resources\n");
 		goto probe_done;
 	}
-	rc = mdss_mdp_bus_scale_register();
+	rc = mdss_mdp_pp_init(&pdev->dev);
+	if (rc) {
+		pr_err("unable to initialize mdss pp resources\n");
+		goto probe_done;
+	}
+	rc = mdss_mdp_bus_scale_register(mdata);
+	if (rc) {
+		pr_err("unable to register bus scaling\n");
+		goto probe_done;
+	}
+	mdss_mdp_bus_scale_set_quota(AB_QUOTA, IB_QUOTA);
+
+	rc = mdss_mdp_debug_init(mdata);
+	if (rc) {
+		pr_err("unable to initialize mdp debugging\n");
+		goto probe_done;
+	}
+
+	pm_runtime_set_suspended(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+	if (!pm_runtime_enabled(&pdev->dev))
+		mdss_mdp_footswitch_ctrl(mdata, true);
+
+	rc = mdss_fb_register_mdp_instance(&mdp5);
+	if (rc)
+		pr_err("unable to register mdp instance\n");
+
 probe_done:
 	if (IS_ERR_VALUE(rc)) {
-		if (mdss_res) {
-			devm_kfree(&pdev->dev, mdss_res);
-			mdss_res = NULL;
-		}
+		mdss_res = NULL;
+		mdss_mdp_pp_term(&pdev->dev);
 	}
 
 	return rc;
 }
 
-void mdss_mdp_footswitch_ctrl(int on)
+static void mdss_mdp_parse_dt_regs_array(const u32 *arr, char __iomem *hw_base,
+	struct mdss_hw_settings *hws, int count)
 {
-	mutex_lock(&mdp_suspend_mutex);
-	if (!mdss_res->suspend || mdss_res->eintf_ena || !mdss_res->fs) {
-		mutex_unlock(&mdp_suspend_mutex);
+	u32 len, reg;
+	int i;
+
+	if (!arr)
 		return;
-	}
 
-	if (on && !mdss_res->fs_ena) {
-		pr_debug("Enable MDP FS\n");
-		regulator_enable(mdss_res->fs);
-		mdss_res->fs_ena = true;
-	} else if (!on && mdss_res->fs_ena) {
-		pr_debug("Disable MDP FS\n");
-		regulator_disable(mdss_res->fs);
-		mdss_res->fs_ena = false;
+	for (i = 0, len = count * 2; i < len; i += 2) {
+		reg = be32_to_cpu(arr[i]);
+		hws->reg = hw_base + reg;
+		hws->val = be32_to_cpu(arr[i + 1]);
+		pr_debug("reg: 0x%04x=0x%08x\n", reg, hws->val);
+		hws++;
 	}
-	mutex_unlock(&mdp_suspend_mutex);
 }
 
-#ifdef CONFIG_PM
-static void mdss_mdp_suspend_sub(void)
+int mdss_mdp_parse_dt_hw_settings(struct platform_device *pdev)
 {
-	cancel_delayed_work(&mdss_res->clk_ctrl_worker);
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+	struct mdss_hw_settings *hws;
+	const u32 *vbif_arr, *mdp_arr;
+	int vbif_len, mdp_len;
 
-	flush_workqueue(mdss_res->clk_ctrl_wq);
-
-	mdss_mdp_clk_ctrl(MDP_BLOCK_MASTER_OFF, false);
-
-	mutex_lock(&mdp_suspend_mutex);
-	mdss_res->suspend = true;
-	mutex_unlock(&mdp_suspend_mutex);
-}
-
-static int mdss_mdp_suspend(struct platform_device *pdev, pm_message_t state)
-{
-	int ret;
-	pr_debug("display suspend");
-
-	ret = mdss_fb_suspend_all();
-	if (IS_ERR_VALUE(ret)) {
-		pr_err("Unable to suspend all fb panels (%d)\n", ret);
-		return ret;
+	vbif_arr = of_get_property(pdev->dev.of_node, "qcom,vbif-settings",
+			&vbif_len);
+	if (!vbif_arr || (vbif_len & 1)) {
+		pr_warn("MDSS VBIF settings not found\n");
+		vbif_len = 0;
 	}
-	mdss_mdp_suspend_sub();
-	if (mdss_res->clk_ena) {
-		pr_err("MDP suspend failed\n");
-		return -EBUSY;
+	vbif_len /= 2 * sizeof(u32);
+
+	mdp_arr = of_get_property(pdev->dev.of_node, "qcom,mdp-settings",
+			&mdp_len);
+	if (!mdp_arr || (mdp_len & 1)) {
+		pr_warn("MDSS MDP settings not found\n");
+		mdp_len = 0;
 	}
-	mdss_mdp_footswitch_ctrl(false);
+	mdp_len /= 2 * sizeof(u32);
+
+	if ((mdp_len + vbif_len) == 0)
+		return 0;
+
+	hws = devm_kzalloc(&pdev->dev, sizeof(*hws) * (vbif_len + mdp_len + 1),
+			GFP_KERNEL);
+	if (!hws)
+		return -ENOMEM;
+
+	mdss_mdp_parse_dt_regs_array(vbif_arr, mdata->vbif_base, hws, vbif_len);
+	mdss_mdp_parse_dt_regs_array(mdp_arr, mdata->mdp_base,
+		hws + vbif_len, mdp_len);
+
+	mdata->hw_settings = hws;
 
 	return 0;
 }
 
+static int mdss_mdp_parse_dt(struct platform_device *pdev)
+{
+	int rc;
+
+	rc = mdss_mdp_parse_dt_hw_settings(pdev);
+	if (rc) {
+		pr_err("Error in device tree : hw settings\n");
+		return rc;
+	}
+
+	rc = mdss_mdp_parse_dt_pipe(pdev);
+	if (rc) {
+		pr_err("Error in device tree : pipes\n");
+		return rc;
+	}
+
+	rc = mdss_mdp_parse_dt_mixer(pdev);
+	if (rc) {
+		pr_err("Error in device tree : mixers\n");
+		return rc;
+	}
+
+	rc = mdss_mdp_parse_dt_ctl(pdev);
+	if (rc) {
+		pr_err("Error in device tree : ctl\n");
+		return rc;
+	}
+
+	rc = mdss_mdp_parse_dt_video_intf(pdev);
+	if (rc) {
+		pr_err("Error in device tree : ctl\n");
+		return rc;
+	}
+
+	rc = mdss_mdp_parse_dt_smp(pdev);
+	if (rc) {
+		pr_err("Error in device tree : smp\n");
+		return rc;
+	}
+
+	rc = mdss_mdp_parse_dt_misc(pdev);
+	if (rc) {
+		pr_err("Error in device tree : misc\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+
+static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev)
+{
+	u32 npipes, off;
+	int rc = 0;
+	u32 nids = 0;
+	u32 *offsets = NULL, *ftch_id = NULL;
+
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+	mdata->nvig_pipes = mdss_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-pipe-vig-off");
+	mdata->nrgb_pipes = mdss_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-pipe-rgb-off");
+	mdata->ndma_pipes = mdss_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-pipe-dma-off");
+
+	nids  += mdss_mdp_parse_dt_prop_len(pdev,
+			"qcom,mdss-pipe-vig-fetch-id");
+	nids  += mdss_mdp_parse_dt_prop_len(pdev,
+			"qcom,mdss-pipe-rgb-fetch-id");
+	nids  += mdss_mdp_parse_dt_prop_len(pdev,
+			"qcom,mdss-pipe-dma-fetch-id");
+
+	npipes = mdata->nvig_pipes + mdata->nrgb_pipes + mdata->ndma_pipes;
+
+	if (npipes != nids) {
+		pr_err("device tree err: unequal number of pipes and smp ids");
+		return -EINVAL;
+	}
+
+	offsets = kzalloc(sizeof(u32) * npipes, GFP_KERNEL);
+	if (!offsets) {
+		pr_err("no mem assigned: kzalloc fail\n");
+		return -ENOMEM;
+	}
+
+	ftch_id = kzalloc(sizeof(u32) * nids, GFP_KERNEL);
+	if (!ftch_id) {
+		pr_err("no mem assigned: kzalloc fail\n");
+		rc = -ENOMEM;
+		goto ftch_alloc_fail;
+	}
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-vig-fetch-id",
+		ftch_id, mdata->nvig_pipes);
+	if (rc)
+		goto parse_done;
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-vig-off",
+		offsets, mdata->nvig_pipes);
+	if (rc)
+		goto parse_done;
+
+	rc = mdss_mdp_pipe_addr_setup(mdata, offsets, ftch_id,
+		MDSS_MDP_PIPE_TYPE_VIG, MDSS_MDP_SSPP_VIG0, mdata->nvig_pipes);
+	if (rc)
+		goto parse_done;
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-rgb-fetch-id",
+		ftch_id + mdata->nvig_pipes, mdata->nrgb_pipes);
+	if (rc)
+		goto parse_done;
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-rgb-off",
+		offsets + mdata->nvig_pipes, mdata->nrgb_pipes);
+	if (rc)
+		goto parse_done;
+
+	rc = mdss_mdp_pipe_addr_setup(mdata, offsets + mdata->nvig_pipes,
+		ftch_id + mdata->nvig_pipes, MDSS_MDP_PIPE_TYPE_RGB,
+		MDSS_MDP_SSPP_RGB0, mdata->nrgb_pipes);
+	if (rc)
+		goto parse_done;
+
+	off = mdata->nvig_pipes + mdata->nrgb_pipes;
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-dma-fetch-id",
+		ftch_id + off, mdata->ndma_pipes);
+	if (rc)
+		goto parse_done;
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-dma-off",
+		offsets + off, mdata->ndma_pipes);
+	if (rc)
+		goto parse_done;
+
+	rc = mdss_mdp_pipe_addr_setup(mdata, offsets + off, ftch_id + off,
+		MDSS_MDP_PIPE_TYPE_DMA, MDSS_MDP_SSPP_DMA0, mdata->ndma_pipes);
+	if (rc)
+		goto parse_done;
+
+parse_done:
+	kfree(ftch_id);
+ftch_alloc_fail:
+	kfree(offsets);
+	return rc;
+}
+
+static int mdss_mdp_parse_dt_mixer(struct platform_device *pdev)
+{
+
+	u32 nmixers, ndspp, npingpong;
+	int rc = 0;
+	u32 *mixer_offsets = NULL, *dspp_offsets = NULL,
+	    *pingpong_offsets = NULL;
+
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+	mdata->nmixers_intf = mdss_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-mixer-intf-off");
+	mdata->nmixers_wb = mdss_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-mixer-wb-off");
+	ndspp = mdss_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-dspp-off");
+	npingpong = mdss_mdp_parse_dt_prop_len(pdev,
+				"qcom,mdss-pingpong-off");
+	nmixers = mdata->nmixers_intf + mdata->nmixers_wb;
+
+	if (mdata->nmixers_intf != ndspp) {
+		pr_err("device tree err: unequal no of dspp and intf mixers\n");
+		return -EINVAL;
+	}
+
+	if (mdata->nmixers_intf != npingpong) {
+		pr_err("device tree err: unequal no of pingpong and intf mixers\n");
+		return -EINVAL;
+	}
+
+	mixer_offsets = kzalloc(sizeof(u32) * nmixers, GFP_KERNEL);
+	if (!mixer_offsets) {
+		pr_err("no mem assigned: kzalloc fail\n");
+		return -ENOMEM;
+	}
+
+	dspp_offsets = kzalloc(sizeof(u32) * ndspp, GFP_KERNEL);
+	if (!dspp_offsets) {
+		pr_err("no mem assigned: kzalloc fail\n");
+		rc = -ENOMEM;
+		goto dspp_alloc_fail;
+	}
+	pingpong_offsets = kzalloc(sizeof(u32) * npingpong, GFP_KERNEL);
+	if (!pingpong_offsets) {
+		pr_err("no mem assigned: kzalloc fail\n");
+		rc = -ENOMEM;
+		goto pingpong_alloc_fail;
+	}
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-mixer-intf-off",
+		mixer_offsets, mdata->nmixers_intf);
+	if (rc)
+		goto parse_done;
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-mixer-wb-off",
+		mixer_offsets + mdata->nmixers_intf, mdata->nmixers_wb);
+	if (rc)
+		goto parse_done;
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-dspp-off",
+		dspp_offsets, ndspp);
+	if (rc)
+		goto parse_done;
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pingpong-off",
+		pingpong_offsets, npingpong);
+	if (rc)
+		goto parse_done;
+
+	rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets,
+			dspp_offsets, pingpong_offsets,
+			MDSS_MDP_MIXER_TYPE_INTF, mdata->nmixers_intf);
+	if (rc)
+		goto parse_done;
+
+	rc = mdss_mdp_mixer_addr_setup(mdata, mixer_offsets +
+			mdata->nmixers_intf, NULL, NULL,
+			MDSS_MDP_MIXER_TYPE_WRITEBACK, mdata->nmixers_wb);
+	if (rc)
+		goto parse_done;
+
+parse_done:
+	kfree(pingpong_offsets);
+pingpong_alloc_fail:
+	kfree(dspp_offsets);
+dspp_alloc_fail:
+	kfree(mixer_offsets);
+
+	return rc;
+}
+
+static int mdss_mdp_parse_dt_ctl(struct platform_device *pdev)
+{
+	u32 nwb;
+	int rc = 0;
+	u32 *ctl_offsets = NULL, *wb_offsets = NULL;
+
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+	mdata->nctl = mdss_mdp_parse_dt_prop_len(pdev,
+			"qcom,mdss-ctl-off");
+	nwb =  mdss_mdp_parse_dt_prop_len(pdev,
+			"qcom,mdss-wb-off");
+
+	if (mdata->nctl != nwb) {
+		pr_err("device tree err: unequal number of ctl and wb\n");
+		rc = -EINVAL;
+		goto parse_done;
+	}
+
+	ctl_offsets = kzalloc(sizeof(u32) * mdata->nctl, GFP_KERNEL);
+	if (!ctl_offsets) {
+		pr_err("no more mem for ctl offsets\n");
+		return -ENOMEM;
+	}
+
+	wb_offsets = kzalloc(sizeof(u32) * nwb, GFP_KERNEL);
+	if (!wb_offsets) {
+		pr_err("no more mem for writeback offsets\n");
+		rc = -ENOMEM;
+		goto wb_alloc_fail;
+	}
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-ctl-off",
+		ctl_offsets, mdata->nctl);
+	if (rc)
+		goto parse_done;
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-wb-off",
+		wb_offsets, nwb);
+	if (rc)
+		goto parse_done;
+
+	rc = mdss_mdp_ctl_addr_setup(mdata, ctl_offsets, wb_offsets,
+						 mdata->nctl);
+	if (rc)
+		goto parse_done;
+
+parse_done:
+	kfree(wb_offsets);
+wb_alloc_fail:
+	kfree(ctl_offsets);
+
+	return rc;
+}
+
+static int mdss_mdp_parse_dt_video_intf(struct platform_device *pdev)
+{
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+	u32 count;
+	u32 *offsets;
+	int rc;
+
+
+	count = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-intf-off");
+	if (count == 0)
+		return -EINVAL;
+
+	offsets = kzalloc(sizeof(u32) * count, GFP_KERNEL);
+	if (!offsets) {
+		pr_err("no mem assigned for video intf\n");
+		return -ENOMEM;
+	}
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-intf-off",
+			offsets, count);
+	if (rc)
+		goto parse_fail;
+
+	rc = mdss_mdp_video_addr_setup(mdata, offsets, count);
+	if (rc)
+		pr_err("unable to setup video interfaces\n");
+
+parse_fail:
+	kfree(offsets);
+
+	return rc;
+}
+
+static int mdss_mdp_parse_dt_smp(struct platform_device *pdev)
+{
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+	u32 num;
+	u32 data[2];
+	int rc;
+
+	num = mdss_mdp_parse_dt_prop_len(pdev, "qcom,mdss-smp-data");
+
+	if (num != 2)
+		return -EINVAL;
+
+	rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-smp-data", data, num);
+	if (rc)
+		return rc;
+
+	rc = mdss_mdp_smp_setup(mdata, data[0], data[1]);
+
+	if (rc)
+		pr_err("unable to setup smp data\n");
+
+	return rc;
+}
+
+static int mdss_mdp_parse_dt_misc(struct platform_device *pdev)
+{
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+	u32 data;
+	int rc;
+
+	rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-rot-block-size",
+		&data);
+	mdata->rot_block_size = (!rc ? data : 128);
+
+	return 0;
+}
+
+static int mdss_mdp_parse_dt_handler(struct platform_device *pdev,
+		char *prop_name, u32 *offsets, int len)
+{
+	int rc;
+	rc = of_property_read_u32_array(pdev->dev.of_node, prop_name,
+					offsets, len);
+	if (rc) {
+		pr_err("Error from prop %s : u32 array read\n", prop_name);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mdss_mdp_parse_dt_prop_len(struct platform_device *pdev,
+				      char *prop_name)
+{
+	int len = 0;
+
+	of_find_property(pdev->dev.of_node, prop_name, &len);
+
+	if (len < 1) {
+		pr_err("Error from prop %s : spec error in device tree\n",
+		       prop_name);
+		return 0;
+	}
+
+	len = len/sizeof(u32);
+
+	return len;
+}
+
+struct mdss_data_type *mdss_mdp_get_mdata()
+{
+	return mdss_res;
+}
+
+static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on)
+{
+	if (!mdata->fs)
+		return;
+
+	if (on) {
+		pr_debug("Enable MDP FS\n");
+		if (!mdata->fs_ena)
+			regulator_enable(mdata->fs);
+		mdata->fs_ena = true;
+	} else {
+		pr_debug("Disable MDP FS\n");
+		mdss_iommu_dettach(mdata);
+		if (mdata->fs_ena)
+			regulator_disable(mdata->fs);
+		mdata->fs_ena = false;
+	}
+}
+
+static inline int mdss_mdp_suspend_sub(struct mdss_data_type *mdata)
+{
+	flush_workqueue(mdata->clk_ctrl_wq);
+
+	mdata->suspend_fs_ena = mdata->fs_ena;
+	mdss_mdp_footswitch_ctrl(mdata, false);
+
+	pr_debug("suspend done fs=%d\n", mdata->suspend_fs_ena);
+
+	return 0;
+}
+
+static inline int mdss_mdp_resume_sub(struct mdss_data_type *mdata)
+{
+	if (mdata->suspend_fs_ena)
+		mdss_mdp_footswitch_ctrl(mdata, true);
+
+	pr_debug("resume done fs=%d\n", mdata->suspend_fs_ena);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mdss_mdp_pm_suspend(struct device *dev)
+{
+	struct mdss_data_type *mdata;
+
+	mdata = dev_get_drvdata(dev);
+	if (!mdata)
+		return -ENODEV;
+
+	dev_dbg(dev, "display pm suspend\n");
+
+	return mdss_mdp_suspend_sub(mdata);
+}
+
+static int mdss_mdp_pm_resume(struct device *dev)
+{
+	struct mdss_data_type *mdata;
+
+	mdata = dev_get_drvdata(dev);
+	if (!mdata)
+		return -ENODEV;
+
+	dev_dbg(dev, "display pm resume\n");
+
+	return mdss_mdp_resume_sub(mdata);
+}
+#endif
+
+#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
+static int mdss_mdp_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+
+	if (!mdata)
+		return -ENODEV;
+
+	dev_dbg(&pdev->dev, "display suspend\n");
+
+	return mdss_mdp_suspend_sub(mdata);
+}
+
 static int mdss_mdp_resume(struct platform_device *pdev)
 {
-	int ret = 0;
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
 
-	pr_debug("resume display");
+	if (!mdata)
+		return -ENODEV;
 
-	mdss_mdp_footswitch_ctrl(true);
-	mutex_lock(&mdp_suspend_mutex);
-	mdss_res->suspend = false;
-	mutex_unlock(&mdp_suspend_mutex);
-	ret = mdss_fb_resume_all();
-	if (IS_ERR_VALUE(ret))
-		pr_err("Unable to resume all fb panels (%d)\n", ret);
-	return ret;
+	dev_dbg(&pdev->dev, "display resume\n");
+
+	return mdss_mdp_resume_sub(mdata);
 }
 #else
 #define mdss_mdp_suspend NULL
 #define mdss_mdp_resume NULL
 #endif
 
+#ifdef CONFIG_PM_RUNTIME
+static int mdss_mdp_runtime_resume(struct device *dev)
+{
+	struct mdss_data_type *mdata = dev_get_drvdata(dev);
+	if (!mdata)
+		return -ENODEV;
+
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+
+	mdss_mdp_footswitch_ctrl(mdata, true);
+
+	return 0;
+}
+
+static int mdss_mdp_runtime_idle(struct device *dev)
+{
+	struct mdss_data_type *mdata = dev_get_drvdata(dev);
+	if (!mdata)
+		return -ENODEV;
+
+	dev_dbg(dev, "pm_runtime: idling...\n");
+
+	flush_workqueue(mdata->clk_ctrl_wq);
+
+	return 0;
+}
+
+static int mdss_mdp_runtime_suspend(struct device *dev)
+{
+	struct mdss_data_type *mdata = dev_get_drvdata(dev);
+	if (!mdata)
+		return -ENODEV;
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+
+	if (mdata->clk_ena) {
+		pr_err("MDP suspend failed\n");
+		return -EBUSY;
+	}
+	mdss_mdp_footswitch_ctrl(mdata, false);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops mdss_mdp_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(mdss_mdp_pm_suspend, mdss_mdp_pm_resume)
+	SET_RUNTIME_PM_OPS(mdss_mdp_runtime_suspend,
+			mdss_mdp_runtime_resume,
+			mdss_mdp_runtime_idle)
+};
+
 static int mdss_mdp_remove(struct platform_device *pdev)
 {
-	if (mdss_res->fs != NULL)
-		regulator_put(mdss_res->fs);
-	iounmap(mdss_reg_base);
+	struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+	if (!mdata)
+		return -ENODEV;
 	pm_runtime_disable(&pdev->dev);
-	mdss_mdp_bus_scale_unregister();
+	mdss_mdp_pp_term(&pdev->dev);
+	mdss_mdp_bus_scale_unregister(mdata);
+	mdss_debugfs_remove(mdata);
 	return 0;
 }
 
 static const struct of_device_id mdss_mdp_dt_match[] = {
 	{ .compatible = "qcom,mdss_mdp",},
+	{}
 };
 MODULE_DEVICE_TABLE(of, mdss_mdp_dt_match);
 
@@ -813,6 +1690,7 @@
 		 */
 		.name = "mdp",
 		.of_match_table = mdss_mdp_dt_match,
+		.pm = &mdss_mdp_pm_ops,
 	},
 };
 
diff --git a/drivers/video/msm/mdss/mdss_mdp.h b/drivers/video/msm/mdss/mdss_mdp.h
index e454d50..07b083a 100644
--- a/drivers/video/msm/mdss/mdss_mdp.h
+++ b/drivers/video/msm/mdss/mdss_mdp.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -26,20 +27,23 @@
 #define MDSS_MDP_CURSOR_HEIGHT 64
 #define MDSS_MDP_CURSOR_SIZE (MDSS_MDP_CURSOR_WIDTH*MDSS_MDP_CURSOR_WIDTH*4)
 
-#define MDP_CLK_DEFAULT_RATE	37500000
+#define MDP_CLK_DEFAULT_RATE	200000000
 #define PHASE_STEP_SHIFT	21
 #define MAX_MIXER_WIDTH		2048
-#define MAX_MIXER_HEIGHT	2048
+#define MAX_MIXER_HEIGHT	2400
 #define MAX_IMG_WIDTH		0x3FFF
 #define MAX_IMG_HEIGHT		0x3FFF
-#define MIN_DST_W		10
-#define MIN_DST_H		10
 #define MAX_DST_W		MAX_MIXER_WIDTH
 #define MAX_DST_H		MAX_MIXER_HEIGHT
 #define MAX_PLANES		4
 #define MAX_DOWNSCALE_RATIO	4
 #define MAX_UPSCALE_RATIO	20
 
+#define C3_ALPHA	3	/* alpha */
+#define C2_R_Cr		2	/* R/Cr */
+#define C1_B_Cb		1	/* B/Cb */
+#define C0_G_Y		0	/* G/luma */
+
 #ifdef MDSS_MDP_DEBUG_REG
 static inline void mdss_mdp_reg_write(u32 addr, u32 val)
 {
@@ -61,7 +65,6 @@
 #endif
 
 enum mdss_mdp_block_power_state {
-	MDP_BLOCK_MASTER_OFF = -1,
 	MDP_BLOCK_POWER_OFF = 0,
 	MDP_BLOCK_POWER_ON = 1,
 };
@@ -102,8 +105,13 @@
 	MDSS_MDP_MAX_CSC
 };
 
+struct mdss_mdp_ctl;
+typedef void (*mdp_vsync_handler_t)(struct mdss_mdp_ctl *, ktime_t);
+
 struct mdss_mdp_ctl {
 	u32 num;
+	char __iomem *base;
+	char __iomem *wb_base;
 	u32 ref_cnt;
 	int power_on;
 
@@ -114,6 +122,7 @@
 	u32 flush_bits;
 
 	u32 play_cnt;
+	u32 underrun_cnt;
 
 	u16 width;
 	u16 height;
@@ -121,18 +130,23 @@
 
 	u32 bus_ab_quota;
 	u32 bus_ib_quota;
-	u32 bus_quota;
 	u32 clk_rate;
+	u32 perf_changed;
 
+	struct mdss_data_type *mdata;
 	struct msm_fb_data_type *mfd;
 	struct mdss_mdp_mixer *mixer_left;
 	struct mdss_mdp_mixer *mixer_right;
 	struct mutex lock;
 
+	struct mdss_panel_data *panel_data;
+
 	int (*start_fnc) (struct mdss_mdp_ctl *ctl);
 	int (*stop_fnc) (struct mdss_mdp_ctl *ctl);
 	int (*prepare_fnc) (struct mdss_mdp_ctl *ctl, void *arg);
 	int (*display_fnc) (struct mdss_mdp_ctl *ctl, void *arg);
+	int (*wait_fnc) (struct mdss_mdp_ctl *ctl, void *arg);
+	int (*set_vsync_handler) (struct mdss_mdp_ctl *, mdp_vsync_handler_t);
 
 	void *priv_data;
 };
@@ -140,9 +154,11 @@
 struct mdss_mdp_mixer {
 	u32 num;
 	u32 ref_cnt;
+	char __iomem *base;
+	char __iomem *dspp_base;
+	char __iomem *pingpong_base;
 	u8 type;
 	u8 params_changed;
-
 	u16 width;
 	u16 height;
 	u8 cursor_enabled;
@@ -173,23 +189,8 @@
 	u8 bpp;
 	u8 alpha_enable;	/*  source has alpha */
 
-	/*
-	 * number of bits for source component,
-	 * 0 = 1 bit, 1 = 2 bits, 2 = 6 bits, 3 = 8 bits
-	 */
-	u8 a_bit;	/* component 3, alpha */
-	u8 r_bit;	/* component 2, R_Cr */
-	u8 b_bit;	/* component 1, B_Cb */
-	u8 g_bit;	/* component 0, G_lumz */
-
-	/*
-	 * unpack pattern
-	 * A = C3, R = C2, B = C1, G = C0
-	 */
-	u8 element3;
-	u8 element2;
-	u8 element1;
-	u8 element0;
+	u8 bits[MAX_PLANES];
+	u8 element[MAX_PLANES];
 };
 
 struct mdss_mdp_plane_sizes {
@@ -197,6 +198,8 @@
 	u32 plane_size[MAX_PLANES];
 	u32 total_size;
 	u32 ystride[MAX_PLANES];
+	u32 rau_cnt;
+	u32 rau_h[2];
 };
 
 struct mdss_mdp_img_data {
@@ -206,7 +209,6 @@
 	int p_need;
 	struct file *srcp_file;
 	struct ion_handle *srcp_ihdl;
-	struct ion_client *iclient;
 };
 
 struct mdss_mdp_data {
@@ -215,10 +217,31 @@
 	struct mdss_mdp_img_data p[MAX_PLANES];
 };
 
+struct pp_sts_type {
+	u32 pa_sts;
+	u32 pcc_sts;
+	u32 igc_sts;
+	u32 igc_tbl_idx;
+	u32 argc_sts;
+	u32 enhist_sts;
+	u32 dither_sts;
+	u32 gamut_sts;
+	u32 pgc_sts;
+	u32 sharp_sts;
+};
+
+struct mdss_pipe_pp_res {
+	u32 igc_c0_c1[IGC_LUT_ENTRIES];
+	u32 igc_c2[IGC_LUT_ENTRIES];
+	struct pp_sts_type pp_sts;
+};
+
 struct mdss_mdp_pipe {
 	u32 num;
 	u32 type;
 	u32 ndx;
+	char __iomem *base;
+	u32 ftch_id;
 	atomic_t ref_cnt;
 	u32 play_cnt;
 
@@ -236,16 +259,25 @@
 	u8 mixer_stage;
 	u8 is_fg;
 	u8 alpha;
+	u8 overfetch_disable;
 	u32 transp;
 
 	struct msm_fb_data_type *mfd;
 	struct mdss_mdp_mixer *mixer;
-	struct mutex lock;
 
 	struct mdp_overlay req_data;
 	u32 params_changed;
 
 	unsigned long smp[MAX_PLANES];
+
+	struct mdss_mdp_data back_buf;
+	struct mdss_mdp_data front_buf;
+
+	struct list_head used_list;
+	struct list_head cleanup_list;
+
+	struct mdp_overlay_pp_params pp_cfg;
+	struct mdss_pipe_pp_res pp_res;
 };
 
 struct mdss_mdp_writeback_arg {
@@ -254,39 +286,86 @@
 	void *priv_data;
 };
 
+struct mdss_overlay_private {
+	int vsync_pending;
+	ktime_t vsync_time;
+	struct completion vsync_comp;
+	spinlock_t vsync_lock;
+	int borderfill_enable;
+	int overlay_play_enable;
+	int hw_refresh;
+
+	struct mdss_data_type *mdata;
+	struct mutex ov_lock;
+	struct mdss_mdp_ctl *ctl;
+	struct mdss_mdp_wb *wb;
+	struct list_head overlay_list;
+	struct list_head pipes_used;
+	struct list_head pipes_cleanup;
+};
+
+#define is_vig_pipe(_pipe_id_) ((_pipe_id_) <= MDSS_MDP_SSPP_VIG2)
 static inline void mdss_mdp_ctl_write(struct mdss_mdp_ctl *ctl,
 				      u32 reg, u32 val)
 {
-	int offset = MDSS_MDP_REG_CTL_OFFSET(ctl->num);
-	MDSS_MDP_REG_WRITE(offset + reg, val);
+	writel_relaxed(val, ctl->base + reg);
 }
 
 static inline u32 mdss_mdp_ctl_read(struct mdss_mdp_ctl *ctl, u32 reg)
 {
-	int offset = MDSS_MDP_REG_CTL_OFFSET(ctl->num);
-	return MDSS_MDP_REG_READ(offset + reg);
+	return readl_relaxed(ctl->base + reg);
+}
+
+static inline void mdss_mdp_pingpong_write(struct mdss_mdp_mixer *mixer,
+				      u32 reg, u32 val)
+{
+	writel_relaxed(val, mixer->pingpong_base + reg);
+}
+
+static inline u32 mdss_mdp_pingpong_read(struct mdss_mdp_mixer *mixer, u32 reg)
+{
+	return readl_relaxed(mixer->pingpong_base + reg);
 }
 
 irqreturn_t mdss_mdp_isr(int irq, void *ptr);
+int mdss_iommu_attach(struct mdss_data_type *mdata);
+int mdss_mdp_copy_splash_screen(struct mdss_panel_data *pdata);
+void mdss_mdp_irq_clear(struct mdss_data_type *mdata,
+		u32 intr_type, u32 intf_num);
 int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num);
 void mdss_mdp_irq_disable(u32 intr_type, u32 intf_num);
+int mdss_mdp_hist_irq_enable(u32 irq);
+void mdss_mdp_hist_irq_disable(u32 irq);
 void mdss_mdp_irq_disable_nosync(u32 intr_type, u32 intf_num);
 int mdss_mdp_set_intr_callback(u32 intr_type, u32 intf_num,
 			       void (*fnc_ptr)(void *), void *arg);
 
-int mdss_mdp_bus_scale_set_quota(u32 ab_quota, u32 ib_quota);
+void mdss_mdp_footswitch_ctrl_splash(int on);
+int mdss_mdp_bus_scale_set_quota(u64 ab_quota, u64 ib_quota);
 void mdss_mdp_set_clk_rate(unsigned long min_clk_rate);
 unsigned long mdss_mdp_get_clk_rate(u32 clk_idx);
 int mdss_mdp_vsync_clk_enable(int enable);
 void mdss_mdp_clk_ctrl(int enable, int isr);
-void mdss_mdp_footswitch_ctrl(int on);
+struct mdss_data_type *mdss_mdp_get_mdata(void);
 
 int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd);
+int mdss_mdp_overlay_vsync_ctrl(struct msm_fb_data_type *mfd, int en);
+int mdss_mdp_video_addr_setup(struct mdss_data_type *mdata,
+		u32 *offsets,  u32 count);
 int mdss_mdp_video_start(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_cmd_start(struct mdss_mdp_ctl *ctl);
 int mdss_mdp_writeback_start(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd);
 
-int mdss_mdp_ctl_on(struct msm_fb_data_type *mfd);
-int mdss_mdp_ctl_off(struct msm_fb_data_type *mfd);
+struct mdss_mdp_ctl *mdss_mdp_ctl_init(struct mdss_panel_data *pdata,
+					struct msm_fb_data_type *mfd);
+int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_ctl_split_display_setup(struct mdss_mdp_ctl *ctl,
+		struct mdss_panel_data *pdata);
+int mdss_mdp_ctl_destroy(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_ctl_start(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_ctl_stop(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_ctl_intf_event(struct mdss_mdp_ctl *ctl, int event, void *arg);
 
 struct mdss_mdp_mixer *mdss_mdp_wb_mixer_alloc(int rotator);
 int mdss_mdp_wb_mixer_destroy(struct mdss_mdp_mixer *mixer);
@@ -296,31 +375,100 @@
 int mdss_mdp_mixer_pipe_update(struct mdss_mdp_pipe *pipe, int params_changed);
 int mdss_mdp_mixer_pipe_unstage(struct mdss_mdp_pipe *pipe);
 int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg);
+int mdss_mdp_display_wait4comp(struct mdss_mdp_ctl *ctl);
 
 int mdss_mdp_csc_setup(u32 block, u32 blk_idx, u32 tbl_idx, u32 csc_type);
-int mdss_mdp_dspp_setup(struct mdss_mdp_ctl *ctl, struct mdss_mdp_mixer *mixer);
+int mdss_mdp_csc_setup_data(u32 block, u32 blk_idx, u32 tbl_idx,
+				   struct mdp_csc_cfg *data);
 
-struct mdss_mdp_pipe *mdss_mdp_pipe_alloc_pnum(u32 pnum);
-struct mdss_mdp_pipe *mdss_mdp_pipe_alloc_locked(u32 type);
-struct mdss_mdp_pipe *mdss_mdp_pipe_get_locked(u32 ndx);
-int mdss_mdp_pipe_lock(struct mdss_mdp_pipe *pipe);
-void mdss_mdp_pipe_unlock(struct mdss_mdp_pipe *pipe);
+int mdss_mdp_pp_init(struct device *dev);
+void mdss_mdp_pp_term(struct device *dev);
+
+int mdss_mdp_pp_resume(u32 mixer_num);
+
+int mdss_mdp_pp_setup(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_pp_setup_locked(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_pipe_pp_setup(struct mdss_mdp_pipe *pipe, u32 *op);
+int mdss_mdp_pipe_sspp_setup(struct mdss_mdp_pipe *pipe, u32 *op);
+void mdss_mdp_pipe_sspp_term(struct mdss_mdp_pipe *pipe);
+int mdss_mdp_smp_setup(struct mdss_data_type *mdata, u32 cnt, u32 size);
+
+int mdss_hw_init(struct mdss_data_type *mdata);
+
+int mdss_mdp_pa_config(struct mdss_mdp_ctl *ctl,
+				struct mdp_pa_cfg_data *config,
+				u32 *copyback);
+int mdss_mdp_pcc_config(struct mdss_mdp_ctl *ctl,
+				struct mdp_pcc_cfg_data *cfg_ptr,
+				u32 *copyback);
+int mdss_mdp_igc_lut_config(struct mdss_mdp_ctl *ctl,
+				struct mdp_igc_lut_data *config,
+				u32 *copyback);
+int mdss_mdp_argc_config(struct mdss_mdp_ctl *ctl,
+				struct mdp_pgc_lut_data *config,
+				u32 *copyback);
+int mdss_mdp_hist_lut_config(struct mdss_mdp_ctl *ctl,
+				struct mdp_hist_lut_data *config,
+				u32 *copyback);
+int mdss_mdp_dither_config(struct mdss_mdp_ctl *ctl,
+				struct mdp_dither_cfg_data *config,
+				u32 *copyback);
+int mdss_mdp_gamut_config(struct mdss_mdp_ctl *ctl,
+				struct mdp_gamut_cfg_data *config,
+				u32 *copyback);
+
+int mdss_mdp_histogram_start(struct mdss_mdp_ctl *ctl,
+				struct mdp_histogram_start_req *req);
+int mdss_mdp_histogram_stop(struct mdss_mdp_ctl *ctl, u32 block);
+int mdss_mdp_hist_collect(struct mdss_mdp_ctl *ctl,
+				struct mdp_histogram_data *hist,
+				u32 *hist_data_addr);
+void mdss_mdp_hist_intr_done(u32 isr);
+
+struct mdss_mdp_pipe *mdss_mdp_pipe_alloc(struct mdss_mdp_mixer *mixer,
+					  u32 type);
+struct mdss_mdp_pipe *mdss_mdp_pipe_get(struct mdss_data_type *mdata, u32 ndx);
+int mdss_mdp_pipe_map(struct mdss_mdp_pipe *pipe);
+void mdss_mdp_pipe_unmap(struct mdss_mdp_pipe *pipe);
+struct mdss_mdp_pipe *mdss_mdp_pipe_alloc_dma(struct mdss_mdp_mixer *mixer);
+
+int mdss_mdp_pipe_addr_setup(struct mdss_data_type *mdata, u32 *offsets,
+		u32 *ftch_y_id, u32 type, u32 num_base, u32 len);
+int mdss_mdp_mixer_addr_setup(struct mdss_data_type *mdata, u32 *mixer_offsets,
+		u32 *dspp_offsets, u32 *pingpong_offsets, u32 type, u32 len);
+int mdss_mdp_ctl_addr_setup(struct mdss_data_type *mdata, u32 *ctl_offsets,
+		u32 *wb_offsets, u32 len);
 
 int mdss_mdp_pipe_destroy(struct mdss_mdp_pipe *pipe);
-int mdss_mdp_pipe_release_all(struct msm_fb_data_type *mfd);
 int mdss_mdp_pipe_queue_data(struct mdss_mdp_pipe *pipe,
 			     struct mdss_mdp_data *src_data);
 
 int mdss_mdp_data_check(struct mdss_mdp_data *data,
 			struct mdss_mdp_plane_sizes *ps);
 int mdss_mdp_get_plane_sizes(u32 format, u32 w, u32 h,
-			     struct mdss_mdp_plane_sizes *ps);
+			     struct mdss_mdp_plane_sizes *ps, u32 bwc_mode);
+int mdss_mdp_get_rau_strides(u32 w, u32 h, struct mdss_mdp_format_params *fmt,
+			       struct mdss_mdp_plane_sizes *ps);
 struct mdss_mdp_format_params *mdss_mdp_get_format_params(u32 format);
 int mdss_mdp_put_img(struct mdss_mdp_img_data *data);
-int mdss_mdp_get_img(struct ion_client *iclient, struct msmfb_data *img,
-		     struct mdss_mdp_img_data *data);
+int mdss_mdp_get_img(struct msmfb_data *img, struct mdss_mdp_img_data *data);
+u32 mdss_get_panel_framerate(struct msm_fb_data_type *mfd);
 
-int mdss_mdp_wb_kickoff(struct mdss_mdp_ctl *ctl);
+int mdss_mdp_wb_kickoff(struct msm_fb_data_type *mfd);
 int mdss_mdp_wb_ioctl_handler(struct msm_fb_data_type *mfd, u32 cmd, void *arg);
 
+int mdss_mdp_get_ctl_mixers(u32 fb_num, u32 *mixer_id);
+int mdss_mdp_alloc_fb_mem(struct msm_fb_data_type *mfd);
+u32 mdss_mdp_fb_stride(u32 fb_index, u32 xres, int bpp);
+
+int mdss_panel_register_done(struct mdss_panel_data *pdata);
+
+#define mfd_to_mdp5_data(mfd) (mfd->mdp.private1)
+#define mfd_to_mdata(mfd) (((struct mdss_overlay_private *)\
+				(mfd->mdp.private1))->mdata)
+#define mfd_to_ctl(mfd) (((struct mdss_overlay_private *)\
+				(mfd->mdp.private1))->ctl)
+#define mfd_to_wb(mfd) (((struct mdss_overlay_private *)\
+				(mfd->mdp.private1))->wb)
+
 #endif /* MDSS_MDP_H */
diff --git a/drivers/video/msm/mdss/mdss_mdp_ctl.c b/drivers/video/msm/mdss/mdss_mdp_ctl.c
index fca8d08..1ced200 100644
--- a/drivers/video/msm/mdss/mdss_mdp_ctl.c
+++ b/drivers/video/msm/mdss/mdss_mdp_ctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,8 +20,10 @@
 #include "mdss_fb.h"
 #include "mdss_mdp.h"
 
-/* 1.10 bus fudge factor */
-#define MDSS_MDP_BUS_FUDGE_FACTOR(val) ALIGN((((val) * 11) / 10), SZ_16M)
+/* truncate at 1k */
+#define MDSS_MDP_BUS_FACTOR_SHIFT 10
+/* 1.5 bus fudge factor */
+#define MDSS_MDP_BUS_FUDGE_FACTOR(val) (((val) / 2) * 3)
 /* 1.25 clock fudge factor */
 #define MDSS_MDP_CLK_FUDGE_FACTOR(val) (((val) * 5) / 4)
 
@@ -36,15 +38,21 @@
 #define MDSS_MDP_PERF_UPDATE_ALL -1
 
 static DEFINE_MUTEX(mdss_mdp_ctl_lock);
-static struct mdss_mdp_ctl mdss_mdp_ctl_list[MDSS_MDP_MAX_CTL];
-static struct mdss_mdp_mixer mdss_mdp_mixer_list[MDSS_MDP_MAX_LAYERMIXER];
 
-static int mdss_mdp_ctl_perf_commit(u32 flags)
+static int mdss_mdp_mixer_free(struct mdss_mdp_mixer *mixer);
+
+static inline void mdp_mixer_write(struct mdss_mdp_mixer *mixer,
+				   u32 reg, u32 val)
+{
+	writel_relaxed(val, mixer->base + reg);
+}
+
+static int mdss_mdp_ctl_perf_commit(struct mdss_data_type *mdata, u32 flags)
 {
 	struct mdss_mdp_ctl *ctl;
 	int cnum;
 	unsigned long clk_rate = 0;
-	u32 bus_ab_quota = 0, bus_ib_quota = 0;
+	u64 bus_ab_quota = 0, bus_ib_quota = 0;
 
 	if (!flags) {
 		pr_err("nothing to update\n");
@@ -52,8 +60,8 @@
 	}
 
 	mutex_lock(&mdss_mdp_ctl_lock);
-	for (cnum = 0; cnum < MDSS_MDP_MAX_CTL; cnum++) {
-		ctl = &mdss_mdp_ctl_list[cnum];
+	for (cnum = 0; cnum < mdata->nctl; cnum++) {
+		ctl = mdata->ctl_off + cnum;
 		if (ctl->power_on) {
 			bus_ab_quota += ctl->bus_ab_quota;
 			bus_ib_quota += ctl->bus_ib_quota;
@@ -63,8 +71,10 @@
 		}
 	}
 	if (flags & MDSS_MDP_PERF_UPDATE_BUS) {
-		bus_ab_quota = MDSS_MDP_BUS_FUDGE_FACTOR(bus_ab_quota);
+		bus_ab_quota = bus_ib_quota << MDSS_MDP_BUS_FACTOR_SHIFT;
 		bus_ib_quota = MDSS_MDP_BUS_FUDGE_FACTOR(bus_ib_quota);
+		bus_ib_quota <<= MDSS_MDP_BUS_FACTOR_SHIFT;
+
 		mdss_mdp_bus_scale_set_quota(bus_ab_quota, bus_ib_quota);
 	}
 	if (flags & MDSS_MDP_PERF_UPDATE_CLK) {
@@ -84,25 +94,40 @@
 	struct mdss_mdp_pipe *pipe;
 	const int fps = 60;
 	u32 quota, rate;
-	u32 v_total, v_active;
+	u32 v_total;
 	int i;
+	u32 max_clk_rate = 0, ab_total = 0, ib_total = 0;
 
 	*bus_ab_quota = 0;
 	*bus_ib_quota = 0;
 	*clk_rate = 0;
 
-	if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
-		struct mdss_panel_info *pinfo = &mixer->ctl->mfd->panel_info;
-		v_total = (pinfo->yres + pinfo->lcdc.v_back_porch +
-			pinfo->lcdc.v_front_porch + pinfo->lcdc.v_pulse_width);
-		v_active = pinfo->yres;
-	} else if (mixer->rotator_mode) {
+	if (mixer->rotator_mode) {
 		pipe = mixer->stage_pipe[0]; /* rotator pipe */
 		v_total = pipe->flags & MDP_ROT_90 ? pipe->dst.w : pipe->dst.h;
-		v_active = v_total;
 	} else {
-		v_total = mixer->height;
-		v_active = v_total;
+		int is_writeback = false;
+		if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
+			struct mdss_panel_info *pinfo;
+			pinfo = &mixer->ctl->panel_data->panel_info;
+			v_total = (pinfo->yres + pinfo->lcdc.v_back_porch +
+				   pinfo->lcdc.v_front_porch +
+				   pinfo->lcdc.v_pulse_width);
+
+			if (pinfo->type == WRITEBACK_PANEL)
+				is_writeback = true;
+		} else {
+			v_total = mixer->height;
+
+			is_writeback = true;
+		}
+		*clk_rate = mixer->width * v_total * fps;
+		if (is_writeback) {
+			/* perf for bus writeback */
+			*bus_ab_quota = fps * mixer->width * mixer->height * 3;
+			*bus_ab_quota >>= MDSS_MDP_BUS_FACTOR_SHIFT;
+			*bus_ib_quota = *bus_ab_quota;
+		}
 	}
 
 	for (i = 0; i < MDSS_MDP_MAX_STAGE; i++) {
@@ -110,6 +135,11 @@
 		pipe = mixer->stage_pipe[i];
 		if (pipe == NULL)
 			continue;
+		if (pipe->is_fg) {
+			ab_total = 0;
+			ib_total = 0;
+			max_clk_rate = 0;
+		}
 
 		quota = fps * pipe->src.w * pipe->src.h;
 		if (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420)
@@ -117,36 +147,39 @@
 		else
 			quota *= pipe->src_fmt->bpp;
 
-		if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF)
-			quota = (quota / v_active) * v_total;
-		else
-			quota *= 2; /* bus read + write */
-
 		rate = pipe->dst.w;
-		if (pipe->src.h > pipe->dst.h) {
+		if (pipe->src.h > pipe->dst.h)
 			rate = (rate * pipe->src.h) / pipe->dst.h;
-			ib_quota = (quota / pipe->dst.h) * pipe->src.h;
-		} else {
-			ib_quota = quota;
-		}
-		rate *= v_total * fps;
-		if (mixer->rotator_mode)
-			rate /= 4; /* block mode fetch at 4 pix/clk */
 
-		*bus_ab_quota += quota;
-		*bus_ib_quota += ib_quota;
-		if (rate > *clk_rate)
-			*clk_rate = rate;
+		rate *= v_total * fps;
+		if (mixer->rotator_mode) {
+			rate /= 4; /* block mode fetch at 4 pix/clk */
+			quota *= 2; /* bus read + write */
+			ib_quota = quota;
+		} else {
+			ib_quota = (quota / pipe->dst.h) * v_total;
+		}
+
 
 		pr_debug("mixer=%d pnum=%d clk_rate=%u bus ab=%u ib=%u\n",
 			 mixer->num, pipe->num, rate, quota, ib_quota);
+
+		ab_total += quota >> MDSS_MDP_BUS_FACTOR_SHIFT;
+		ib_total += ib_quota >> MDSS_MDP_BUS_FACTOR_SHIFT;
+		if (rate > max_clk_rate)
+			max_clk_rate = rate;
 	}
 
+	*bus_ab_quota += ab_total;
+	*bus_ib_quota += ib_total;
+	if (max_clk_rate > *clk_rate)
+		*clk_rate = max_clk_rate;
+
 	pr_debug("final mixer=%d clk_rate=%u bus ab=%u ib=%u\n", mixer->num,
 		 *clk_rate, *bus_ab_quota, *bus_ib_quota);
 }
 
-static int mdss_mdp_ctl_perf_update(struct mdss_mdp_ctl *ctl, u32 *flags)
+static int mdss_mdp_ctl_perf_update(struct mdss_mdp_ctl *ctl)
 {
 	int ret = MDSS_MDP_PERF_UPDATE_SKIP;
 	u32 clk_rate, ab_quota, ib_quota;
@@ -167,9 +200,25 @@
 		total_ib_quota += ib_quota;
 		if (clk_rate > max_clk_rate)
 			max_clk_rate = clk_rate;
+
+		if (ctl->intf_type) {
+			struct mdss_panel_info *pinfo;
+
+			pinfo = &ctl->panel_data->panel_info;
+			clk_rate = (ctl->intf_type == MDSS_INTF_DSI) ?
+					pinfo->mipi.dsi_pclk_rate :
+					pinfo->clk_rate;
+
+			/* minimum clock rate due to inefficiency in 3dmux */
+			clk_rate = mult_frac(clk_rate >> 1, 9, 8);
+			if (clk_rate > max_clk_rate)
+				max_clk_rate = clk_rate;
+		}
 	}
 
-	*flags = 0;
+	/* request minimum bandwidth to have bus clock on when display is on */
+	if (total_ib_quota == 0)
+		total_ib_quota = SZ_16M >> MDSS_MDP_BUS_FACTOR_SHIFT;
 
 	if (max_clk_rate != ctl->clk_rate) {
 		if (max_clk_rate > ctl->clk_rate)
@@ -177,41 +226,41 @@
 		else
 			ret = MDSS_MDP_PERF_UPDATE_LATE;
 		ctl->clk_rate = max_clk_rate;
-		*flags |= MDSS_MDP_PERF_UPDATE_CLK;
+		ctl->perf_changed |= MDSS_MDP_PERF_UPDATE_CLK;
 	}
 
 	if ((total_ab_quota != ctl->bus_ab_quota) ||
 			(total_ib_quota != ctl->bus_ib_quota)) {
 		if (ret == MDSS_MDP_PERF_UPDATE_SKIP) {
-			if (total_ib_quota > ctl->bus_ib_quota)
+			if (total_ib_quota >= ctl->bus_ib_quota)
 				ret = MDSS_MDP_PERF_UPDATE_EARLY;
 			else
 				ret = MDSS_MDP_PERF_UPDATE_LATE;
 		}
 		ctl->bus_ab_quota = total_ab_quota;
 		ctl->bus_ib_quota = total_ib_quota;
-		*flags |= MDSS_MDP_PERF_UPDATE_BUS;
+		ctl->perf_changed |= MDSS_MDP_PERF_UPDATE_BUS;
 	}
 
 	return ret;
 }
 
-static struct mdss_mdp_ctl *mdss_mdp_ctl_alloc(void)
+static struct mdss_mdp_ctl *mdss_mdp_ctl_alloc(struct mdss_data_type *mdata)
 {
 	struct mdss_mdp_ctl *ctl = NULL;
 	int cnum;
 
 	mutex_lock(&mdss_mdp_ctl_lock);
-	for (cnum = 0; cnum < MDSS_MDP_MAX_CTL; cnum++) {
-		if (mdss_mdp_ctl_list[cnum].ref_cnt == 0) {
-			ctl = &mdss_mdp_ctl_list[cnum];
-			ctl->num = cnum;
+	for (cnum = 0; cnum < mdata->nctl; cnum++) {
+		ctl = mdata->ctl_off + cnum;
+		if (ctl->ref_cnt == 0) {
 			ctl->ref_cnt++;
+			ctl->mdata = mdata;
 			mutex_init(&ctl->lock);
-
 			pr_debug("alloc ctl_num=%d\n", ctl->num);
 			break;
 		}
+		ctl = NULL;
 	}
 	mutex_unlock(&mdss_mdp_ctl_lock);
 
@@ -231,31 +280,78 @@
 	}
 
 	mutex_lock(&mdss_mdp_ctl_lock);
-	if (--ctl->ref_cnt == 0)
-		memset(ctl, 0, sizeof(*ctl));
+	ctl->ref_cnt--;
+	if (ctl->mixer_left) {
+		mdss_mdp_mixer_free(ctl->mixer_left);
+		ctl->mixer_left = NULL;
+	}
+	if (ctl->mixer_right) {
+		mdss_mdp_mixer_free(ctl->mixer_right);
+		ctl->mixer_right = NULL;
+	}
+	ctl->power_on = false;
+	ctl->start_fnc = NULL;
+	ctl->stop_fnc = NULL;
+	ctl->prepare_fnc = NULL;
+	ctl->display_fnc = NULL;
 	mutex_unlock(&mdss_mdp_ctl_lock);
 
 	return 0;
 }
 
-static struct mdss_mdp_mixer *mdss_mdp_mixer_alloc(u32 type)
+static struct mdss_mdp_mixer *mdss_mdp_mixer_alloc(
+		struct mdss_mdp_ctl *ctl, u32 type, int mux)
 {
 	struct mdss_mdp_mixer *mixer = NULL;
-	int mnum;
+	u32 nmixers_intf;
+	u32 nmixers_wb;
+	u32 i;
+	u32 nmixers;
+	struct mdss_mdp_mixer *mixer_pool = NULL;
+
+	if (!ctl || !ctl->mdata)
+		return NULL;
 
 	mutex_lock(&mdss_mdp_ctl_lock);
-	for (mnum = 0; mnum < MDSS_MDP_MAX_LAYERMIXER; mnum++) {
-		if (type == mdss_res->mixer_type_map[mnum] &&
-		    mdss_mdp_mixer_list[mnum].ref_cnt == 0) {
-			mixer = &mdss_mdp_mixer_list[mnum];
-			mixer->num = mnum;
+	nmixers_intf = ctl->mdata->nmixers_intf;
+	nmixers_wb = ctl->mdata->nmixers_wb;
+
+	switch (type) {
+	case MDSS_MDP_MIXER_TYPE_INTF:
+		mixer_pool = ctl->mdata->mixer_intf;
+		nmixers = nmixers_intf;
+		break;
+
+	case MDSS_MDP_MIXER_TYPE_WRITEBACK:
+		mixer_pool = ctl->mdata->mixer_wb;
+		nmixers = nmixers_wb;
+		break;
+
+	default:
+		nmixers = 0;
+		pr_err("invalid pipe type %d\n", type);
+		break;
+	}
+
+	/* early mdp revision only supports mux of dual pipe on mixers 0 and 1,
+	 * need to ensure that these pipes are readily available by using
+	 * mixer 2 if available and mux is not required */
+	if (!mux && (ctl->mdata->mdp_rev == MDSS_MDP_HW_REV_100) &&
+			(type == MDSS_MDP_MIXER_TYPE_INTF) &&
+			(nmixers >= MDSS_MDP_INTF_LAYERMIXER2) &&
+			(mixer_pool[MDSS_MDP_INTF_LAYERMIXER2].ref_cnt == 0))
+		mixer_pool += MDSS_MDP_INTF_LAYERMIXER2;
+
+	for (i = 0; i < nmixers; i++) {
+		mixer = mixer_pool + i;
+		if (mixer->ref_cnt == 0) {
 			mixer->ref_cnt++;
 			mixer->params_changed++;
-			mixer->type = type;
-
-			pr_debug("mixer_num=%d\n", mixer->num);
+			mixer->ctl = ctl;
+			pr_debug("alloc mixer num%d\n", mixer->num);
 			break;
 		}
+		mixer = NULL;
 	}
 	mutex_unlock(&mdss_mdp_ctl_lock);
 
@@ -275,8 +371,7 @@
 	}
 
 	mutex_lock(&mdss_mdp_ctl_lock);
-	if (--mixer->ref_cnt == 0)
-		memset(mixer, 0, sizeof(*mixer));
+	mixer->ref_cnt--;
 	mutex_unlock(&mdss_mdp_ctl_lock);
 
 	return 0;
@@ -287,23 +382,22 @@
 	struct mdss_mdp_ctl *ctl = NULL;
 	struct mdss_mdp_mixer *mixer = NULL;
 
-	ctl = mdss_mdp_ctl_alloc();
-
+	ctl = mdss_mdp_ctl_alloc(mdss_res);
 	if (!ctl)
 		return NULL;
 
-	mixer = mdss_mdp_mixer_alloc(MDSS_MDP_MIXER_TYPE_WRITEBACK);
+	mixer = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_WRITEBACK, false);
 	if (!mixer)
 		goto error;
 
 	mixer->rotator_mode = rotator;
 
 	switch (mixer->num) {
-	case MDSS_MDP_LAYERMIXER3:
+	case MDSS_MDP_WB_LAYERMIXER0:
 		ctl->opmode = (rotator ? MDSS_MDP_CTL_OP_ROT0_MODE :
 			       MDSS_MDP_CTL_OP_WB0_MODE);
 		break;
-	case MDSS_MDP_LAYERMIXER4:
+	case MDSS_MDP_WB_LAYERMIXER1:
 		ctl->opmode = (rotator ? MDSS_MDP_CTL_OP_ROT1_MODE :
 			       MDSS_MDP_CTL_OP_WB1_MODE);
 		break;
@@ -313,7 +407,6 @@
 	}
 
 	ctl->mixer_left = mixer;
-	mixer->ctl = ctl;
 
 	ctl->start_fnc = mdss_mdp_writeback_start;
 	ctl->power_on = true;
@@ -345,65 +438,215 @@
 	mdss_mdp_mixer_free(mixer);
 	mdss_mdp_ctl_free(ctl);
 
+	mdss_mdp_ctl_perf_commit(ctl->mdata, MDSS_MDP_PERF_UPDATE_ALL);
+
 	return 0;
 }
 
-static int mdss_mdp_ctl_init(struct msm_fb_data_type *mfd)
+static inline int mdss_mdp_set_split_ctl(struct mdss_mdp_ctl *ctl,
+		struct mdss_mdp_ctl *split_ctl)
 {
-	struct mdss_mdp_ctl *ctl;
-	u32 width, height;
-
-	if (!mfd)
+	if (!ctl || !split_ctl)
 		return -ENODEV;
 
-	width = mfd->fbi->var.xres;
-	height = mfd->fbi->var.yres;
+	/* setup split ctl mixer as right mixer of original ctl so that
+	 * original ctl can work the same way as dual pipe solution */
+	ctl->mixer_right = split_ctl->mixer_left;
 
-	if (width > (2 * MAX_MIXER_WIDTH)) {
-		pr_err("unsupported resolution\n");
+	return 0;
+}
+
+static inline struct mdss_mdp_ctl *mdss_mdp_get_split_ctl(
+		struct mdss_mdp_ctl *ctl)
+{
+	if (ctl && ctl->mixer_right && (ctl->mixer_right->ctl != ctl))
+		return ctl->mixer_right->ctl;
+
+	return NULL;
+}
+
+static int mdss_mdp_ctl_fbc_enable(int enable,
+		struct mdss_mdp_mixer *mixer, struct mdss_panel_info *pdata)
+{
+	struct fbc_panel_info *fbc;
+	u32 mode = 0, budget_ctl = 0, lossy_mode = 0;
+
+	if (!pdata) {
+		pr_err("Invalid pdata\n");
 		return -EINVAL;
 	}
 
-	ctl = mdss_mdp_ctl_alloc();
+	fbc = &pdata->fbc;
 
-	if (!ctl) {
-		pr_err("unable to allocate ctl\n");
-		return -ENOMEM;
+	if (!fbc || !fbc->enabled) {
+		pr_err("Invalid FBC structure\n");
+		return -EINVAL;
 	}
 
-	ctl->mfd = mfd;
+	if (mixer->num == MDSS_MDP_INTF_LAYERMIXER0)
+		pr_debug("Mixer supports FBC.\n");
+	else {
+		pr_debug("Mixer doesn't support FBC.\n");
+		return -EINVAL;
+	}
+
+	if (enable) {
+		mode = ((pdata->xres) << 16) | ((fbc->comp_mode) << 8) |
+			((fbc->qerr_enable) << 7) | ((fbc->cd_bias) << 4) |
+			((fbc->pat_enable) << 3) | ((fbc->vlc_enable) << 2) |
+			((fbc->bflc_enable) << 1) | enable;
+
+		budget_ctl = ((fbc->line_x_budget) << 12) |
+			((fbc->block_x_budget) << 8) | fbc->block_budget;
+
+		lossy_mode = ((fbc->lossless_mode_thd) << 16) |
+			((fbc->lossy_mode_thd) << 8) |
+			((fbc->lossy_rgb_thd) << 3) | fbc->lossy_mode_idx;
+	}
+
+	mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_FBC_MODE, mode);
+	mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_FBC_BUDGET_CTL,
+			budget_ctl);
+	mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_FBC_LOSSY_MODE,
+			lossy_mode);
+
+	return 0;
+}
+
+int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_ctl *split_ctl;
+	u32 width, height;
+
+	if (!ctl || !ctl->panel_data) {
+		pr_err("invalid ctl handle\n");
+		return -ENODEV;
+	}
+
+	split_ctl = mdss_mdp_get_split_ctl(ctl);
+
+	width = ctl->panel_data->panel_info.xres;
+	height = ctl->panel_data->panel_info.yres;
+
+	if ((split_ctl && (width > MAX_MIXER_WIDTH)) ||
+			(width > (2 * MAX_MIXER_WIDTH))) {
+		pr_err("Unsupported panel resolution: %dx%d\n", width, height);
+		return -ENOTSUPP;
+	}
+
 	ctl->width = width;
 	ctl->height = height;
-	ctl->dst_format = mfd->panel_info.out_format;
 
-	ctl->mixer_left = mdss_mdp_mixer_alloc(MDSS_MDP_MIXER_TYPE_INTF);
 	if (!ctl->mixer_left) {
-		pr_err("unable to allocate layer mixer\n");
-		mdss_mdp_ctl_free(ctl);
-		return -ENOMEM;
+		ctl->mixer_left =
+			mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_INTF,
+					(width > MAX_MIXER_WIDTH));
+		if (!ctl->mixer_left) {
+			pr_err("unable to allocate layer mixer\n");
+			return -ENOMEM;
+		}
 	}
 
-	ctl->mixer_left->width = MIN(width, MAX_MIXER_WIDTH);
+	if (width > MAX_MIXER_WIDTH)
+		width /= 2;
+
+	ctl->mixer_left->width = width;
 	ctl->mixer_left->height = height;
-	ctl->mixer_left->ctl = ctl;
 
-	width -= ctl->mixer_left->width;
+	if (split_ctl) {
+		pr_debug("split display detected\n");
+		return 0;
+	}
 
-	if (width) {
-		ctl->mixer_right =
-		mdss_mdp_mixer_alloc(MDSS_MDP_MIXER_TYPE_INTF);
-		if (!ctl->mixer_right) {
-			pr_err("unable to allocate layer mixer\n");
-			mdss_mdp_mixer_free(ctl->mixer_left);
-			mdss_mdp_ctl_free(ctl);
-			return -ENOMEM;
+	if (width < ctl->width) {
+		if (ctl->mixer_right == NULL) {
+			ctl->mixer_right = mdss_mdp_mixer_alloc(ctl,
+					MDSS_MDP_MIXER_TYPE_INTF, true);
+			if (!ctl->mixer_right) {
+				pr_err("unable to allocate right mixer\n");
+				if (ctl->mixer_left)
+					mdss_mdp_mixer_free(ctl->mixer_left);
+				return -ENOMEM;
+			}
 		}
 		ctl->mixer_right->width = width;
 		ctl->mixer_right->height = height;
-		ctl->mixer_right->ctl = ctl;
+	} else if (ctl->mixer_right) {
+		mdss_mdp_mixer_free(ctl->mixer_right);
+		ctl->mixer_right = NULL;
 	}
 
-	switch (mfd->panel_info.type) {
+	if (ctl->mixer_right) {
+		ctl->opmode |= MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
+			       MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT;
+	} else {
+		ctl->opmode &= ~(MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
+				  MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT);
+	}
+
+	return 0;
+}
+
+static int mdss_mdp_ctl_setup_wfd(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_data_type *mdata = ctl->mdata;
+	struct mdss_mdp_mixer *mixer;
+	int mixer_type;
+
+	/* if WB2 is supported, try to allocate it first */
+	if (mdata->nmixers_intf >= MDSS_MDP_INTF_LAYERMIXER2)
+		mixer_type = MDSS_MDP_MIXER_TYPE_INTF;
+	else
+		mixer_type = MDSS_MDP_MIXER_TYPE_WRITEBACK;
+
+	mixer = mdss_mdp_mixer_alloc(ctl, mixer_type, false);
+	if (!mixer && mixer_type == MDSS_MDP_MIXER_TYPE_INTF)
+		mixer = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_WRITEBACK,
+				false);
+
+	if (!mixer) {
+		pr_err("Unable to allocate writeback mixer\n");
+		return -ENOMEM;
+	}
+
+	if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
+		ctl->opmode = MDSS_MDP_CTL_OP_WFD_MODE;
+	} else {
+		switch (mixer->num) {
+		case MDSS_MDP_WB_LAYERMIXER0:
+			ctl->opmode = MDSS_MDP_CTL_OP_WB0_MODE;
+			break;
+		case MDSS_MDP_WB_LAYERMIXER1:
+			ctl->opmode = MDSS_MDP_CTL_OP_WB1_MODE;
+			break;
+		default:
+			pr_err("Incorrect writeback config num=%d\n",
+					mixer->num);
+			mdss_mdp_mixer_free(mixer);
+			return -EINVAL;
+		}
+	}
+	ctl->mixer_left = mixer;
+
+	return 0;
+}
+
+struct mdss_mdp_ctl *mdss_mdp_ctl_init(struct mdss_panel_data *pdata,
+				       struct msm_fb_data_type *mfd)
+{
+	struct mdss_mdp_ctl *ctl;
+	int ret = 0;
+
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+	ctl = mdss_mdp_ctl_alloc(mdata);
+	if (!ctl) {
+		pr_err("unable to allocate ctl\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	ctl->mfd = mfd;
+	ctl->panel_data = pdata;
+
+	switch (pdata->panel_info.type) {
 	case EDP_PANEL:
 		ctl->intf_num = MDSS_MDP_INTF0;
 		ctl->intf_type = MDSS_INTF_EDP;
@@ -411,7 +654,7 @@
 		ctl->start_fnc = mdss_mdp_video_start;
 		break;
 	case MIPI_VIDEO_PANEL:
-		if (mfd->panel_info.pdest == DISPLAY_1)
+		if (pdata->panel_info.pdest == DISPLAY_1)
 			ctl->intf_num = MDSS_MDP_INTF1;
 		else
 			ctl->intf_num = MDSS_MDP_INTF2;
@@ -419,6 +662,15 @@
 		ctl->opmode = MDSS_MDP_CTL_OP_VIDEO_MODE;
 		ctl->start_fnc = mdss_mdp_video_start;
 		break;
+	case MIPI_CMD_PANEL:
+		if (pdata->panel_info.pdest == DISPLAY_1)
+			ctl->intf_num = MDSS_MDP_INTF1;
+		else
+			ctl->intf_num = MDSS_MDP_INTF2;
+		ctl->intf_type = MDSS_INTF_DSI;
+		ctl->opmode = MDSS_MDP_CTL_OP_CMD_MODE;
+		ctl->start_fnc = mdss_mdp_cmd_start;
+		break;
 	case DTV_PANEL:
 		ctl->intf_num = MDSS_MDP_INTF3;
 		ctl->intf_type = MDSS_INTF_HDMI;
@@ -427,93 +679,207 @@
 		break;
 	case WRITEBACK_PANEL:
 		ctl->intf_num = MDSS_MDP_NO_INTF;
-		ctl->opmode = MDSS_MDP_CTL_OP_WFD_MODE;
 		ctl->start_fnc = mdss_mdp_writeback_start;
+		ret = mdss_mdp_ctl_setup_wfd(ctl);
+		if (ret)
+			goto ctl_init_fail;
 		break;
 	default:
-		pr_err("unsupported panel type (%d)\n", mfd->panel_info.type);
-		mdss_mdp_ctl_free(ctl);
-		return -EINVAL;
-
+		pr_err("unsupported panel type (%d)\n", pdata->panel_info.type);
+		ret = -EINVAL;
+		goto ctl_init_fail;
 	}
 
 	ctl->opmode |= (ctl->intf_num << 4);
 
-	if (ctl->mixer_right) {
-		ctl->opmode |= MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
-			       MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT;
+	if (ctl->intf_num == MDSS_MDP_NO_INTF) {
+		ctl->dst_format = pdata->panel_info.out_format;
+	} else {
+		struct mdp_dither_cfg_data dither = {
+			.block = mfd->index + MDP_LOGICAL_BLOCK_DISP_0,
+			.flags = MDP_PP_OPS_DISABLE,
+		};
+
+		switch (pdata->panel_info.bpp) {
+		case 18:
+			ctl->dst_format = MDSS_MDP_PANEL_FORMAT_RGB666;
+			dither.flags = MDP_PP_OPS_ENABLE | MDP_PP_OPS_WRITE;
+			dither.g_y_depth = 2;
+			dither.r_cr_depth = 2;
+			dither.b_cb_depth = 2;
+			break;
+		case 24:
+		default:
+			ctl->dst_format = MDSS_MDP_PANEL_FORMAT_RGB888;
+			break;
+		}
+		mdss_mdp_dither_config(ctl, &dither, NULL);
 	}
 
-	mfd->ctl = ctl;
+	return ctl;
+ctl_init_fail:
+	mdss_mdp_ctl_free(ctl);
 
-	return 0;
+	return ERR_PTR(ret);
 }
 
-static int mdss_mdp_ctl_destroy(struct msm_fb_data_type *mfd)
+int mdss_mdp_ctl_split_display_setup(struct mdss_mdp_ctl *ctl,
+		struct mdss_panel_data *pdata)
 {
-	struct mdss_mdp_ctl *ctl;
-	if (!mfd || !mfd->ctl)
+	struct mdss_mdp_ctl *sctl;
+	struct mdss_mdp_mixer *mixer;
+
+	if (!ctl || !pdata)
 		return -ENODEV;
 
-	ctl = mfd->ctl;
-	mfd->ctl = NULL;
+	if (pdata->panel_info.xres > MAX_MIXER_WIDTH) {
+		pr_err("Unsupported second panel resolution: %dx%d\n",
+				pdata->panel_info.xres, pdata->panel_info.yres);
+		return -ENOTSUPP;
+	}
 
-	if (ctl->mixer_left)
-		mdss_mdp_mixer_free(ctl->mixer_left);
-	if (ctl->mixer_right)
+	if (ctl->mixer_right) {
+		pr_err("right mixer already setup for ctl=%d\n", ctl->num);
+		return -EPERM;
+	}
+
+	sctl = mdss_mdp_ctl_init(pdata, ctl->mfd);
+	if (!sctl) {
+		pr_err("unable to setup split display\n");
+		return -ENODEV;
+	}
+
+	sctl->width = pdata->panel_info.xres;
+	sctl->height = pdata->panel_info.yres;
+
+	ctl->mixer_left = mdss_mdp_mixer_alloc(ctl, MDSS_MDP_MIXER_TYPE_INTF,
+			false);
+	if (!ctl->mixer_left) {
+		pr_err("unable to allocate layer mixer\n");
+		mdss_mdp_ctl_destroy(sctl);
+		return -ENOMEM;
+	}
+
+	mixer = mdss_mdp_mixer_alloc(sctl, MDSS_MDP_MIXER_TYPE_INTF, false);
+	if (!mixer) {
+		pr_err("unable to allocate layer mixer\n");
+		mdss_mdp_ctl_destroy(sctl);
+		return -ENOMEM;
+	}
+
+	mixer->width = sctl->width;
+	mixer->height = sctl->height;
+	sctl->mixer_left = mixer;
+
+	return mdss_mdp_set_split_ctl(ctl, sctl);
+}
+
+static void mdss_mdp_ctl_split_display_enable(int enable,
+	struct mdss_mdp_ctl *main_ctl, struct mdss_mdp_ctl *slave_ctl)
+{
+	u32 upper = 0, lower = 0;
+
+	pr_debug("split main ctl=%d intf=%d slave ctl=%d intf=%d\n",
+			main_ctl->num, main_ctl->intf_num,
+			slave_ctl->num, slave_ctl->intf_num);
+	if (enable) {
+		if (main_ctl->opmode & MDSS_MDP_CTL_OP_CMD_MODE) {
+			upper |= BIT(1);
+			lower |= BIT(1);
+
+			/* interface controlling sw trigger */
+			if (main_ctl->intf_num == MDSS_MDP_INTF2)
+				upper |= BIT(4);
+			else
+				upper |= BIT(8);
+		} else { /* video mode */
+			if (main_ctl->intf_num == MDSS_MDP_INTF2)
+				lower |= BIT(4);
+			else
+				lower |= BIT(8);
+		}
+	}
+	MDSS_MDP_REG_WRITE(MDSS_MDP_REG_SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper);
+	MDSS_MDP_REG_WRITE(MDSS_MDP_REG_SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower);
+	MDSS_MDP_REG_WRITE(MDSS_MDP_REG_SPLIT_DISPLAY_EN, enable);
+}
+
+
+int mdss_mdp_ctl_destroy(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_ctl *sctl;
+	int rc;
+
+	rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CLOSE, NULL);
+	WARN(rc, "unable to close panel for intf=%d\n", ctl->intf_num);
+
+	sctl = mdss_mdp_get_split_ctl(ctl);
+	if (sctl) {
+		pr_debug("destroying split display ctl=%d\n", sctl->num);
+		if (sctl->mixer_left)
+			mdss_mdp_mixer_free(sctl->mixer_left);
+		mdss_mdp_ctl_free(sctl);
+	} else if (ctl->mixer_right) {
 		mdss_mdp_mixer_free(ctl->mixer_right);
+		ctl->mixer_right = NULL;
+	}
+
+	if (ctl->mixer_left) {
+		mdss_mdp_mixer_free(ctl->mixer_left);
+		ctl->mixer_left = NULL;
+	}
 	mdss_mdp_ctl_free(ctl);
 
 	return 0;
 }
 
-int mdss_mdp_ctl_on(struct msm_fb_data_type *mfd)
+int mdss_mdp_ctl_intf_event(struct mdss_mdp_ctl *ctl, int event, void *arg)
 {
 	struct mdss_panel_data *pdata;
-	struct mdss_mdp_ctl *ctl;
+	int rc = 0;
+
+	if (!ctl || !ctl->panel_data)
+		return -ENODEV;
+
+	pdata = ctl->panel_data;
+
+	pr_debug("sending ctl=%d event=%d\n", ctl->num, event);
+
+	do {
+		if (pdata->event_handler)
+			rc = pdata->event_handler(pdata, event, arg);
+		pdata = pdata->next;
+	} while (rc == 0 && pdata);
+
+	return rc;
+}
+
+static int mdss_mdp_ctl_start_sub(struct mdss_mdp_ctl *ctl)
+{
 	struct mdss_mdp_mixer *mixer;
-	u32 outsize, temp, off;
+	u32 outsize, temp;
 	int ret = 0;
+	int i, nmixers;
 
-	if (!mfd)
-		return -ENODEV;
-
-	if (mfd->key != MFD_KEY)
-		return -EINVAL;
-
-	pdata = dev_get_platdata(&mfd->pdev->dev);
-	if (!pdata) {
-		pr_err("no panel connected\n");
-		return -ENODEV;
-	}
-
-	if (!mfd->ctl) {
-		if (mdss_mdp_ctl_init(mfd)) {
-			pr_err("unable to initialize ctl\n");
-			return -ENODEV;
-		}
-	}
-	ctl = mfd->ctl;
-
-	mutex_lock(&ctl->lock);
-
-	ctl->power_on = true;
-
-	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
 	if (ctl->start_fnc)
 		ret = ctl->start_fnc(ctl);
 	else
 		pr_warn("no start function for ctl=%d type=%d\n", ctl->num,
-				mfd->panel_info.type);
+				ctl->panel_data->panel_info.type);
 
 	if (ret) {
 		pr_err("unable to start intf\n");
-		goto start_fail;
+		return ret;
 	}
 
 	pr_debug("ctl_num=%d\n", ctl->num);
 
+	nmixers = MDSS_MDP_INTF_MAX_LAYERMIXER + MDSS_MDP_WB_MAX_LAYERMIXER;
+	for (i = 0; i < nmixers; i++)
+		mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_LAYER(i), 0);
+
 	mixer = ctl->mixer_left;
+	mdss_mdp_pp_resume(mixer->num);
 	mixer->params_changed++;
 
 	temp = MDSS_MDP_REG_READ(MDSS_MDP_REG_DISP_INTF_SEL);
@@ -521,127 +887,159 @@
 	MDSS_MDP_REG_WRITE(MDSS_MDP_REG_DISP_INTF_SEL, temp);
 
 	outsize = (mixer->height << 16) | mixer->width;
-	off = MDSS_MDP_REG_LM_OFFSET(mixer->num);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_LM_OUT_SIZE, outsize);
+	mdp_mixer_write(mixer, MDSS_MDP_REG_LM_OUT_SIZE, outsize);
 
-	if (ctl->mixer_right) {
-		mixer = ctl->mixer_right;
-		mixer->params_changed++;
-		outsize = (mixer->height << 16) | mixer->width;
-		off = MDSS_MDP_REG_LM_OFFSET(mixer->num);
-		MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_LM_OUT_SIZE, outsize);
-		mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_PACK_3D, 0);
+	if (ctl->panel_data->panel_info.fbc.enabled) {
+		ret = mdss_mdp_ctl_fbc_enable(1, ctl->mixer_left,
+				&ctl->panel_data->panel_info);
 	}
 
-	ret = pdata->on(pdata);
+	return ret;
+}
 
-start_fail:
+int mdss_mdp_ctl_start(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_ctl *sctl;
+	int ret = 0;
+
+	if (ctl->power_on) {
+		pr_debug("%d: panel already on!\n", __LINE__);
+		return 0;
+	}
+
+	ret = mdss_mdp_ctl_setup(ctl);
+	if (ret)
+		return ret;
+
+
+	sctl = mdss_mdp_get_split_ctl(ctl);
+
+	mutex_lock(&ctl->lock);
+
+	ctl->power_on = true;
+	ctl->bus_ab_quota = 0;
+	ctl->bus_ib_quota = 0;
+	ctl->clk_rate = 0;
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+
+	ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_RESET, NULL);
+	if (ret) {
+		pr_err("panel power on failed ctl=%d\n", ctl->num);
+		return ret;
+	}
+
+	ret = mdss_mdp_ctl_start_sub(ctl);
+	if (ret == 0) {
+		if (sctl) { /* split display is available */
+			ret = mdss_mdp_ctl_start_sub(sctl);
+			if (!ret)
+				mdss_mdp_ctl_split_display_enable(1, ctl, sctl);
+		} else if (ctl->mixer_right) {
+			struct mdss_mdp_mixer *mixer = ctl->mixer_right;
+			u32 out, off;
+
+			mdss_mdp_pp_resume(mixer->num);
+			mixer->params_changed++;
+			out = (mixer->height << 16) | mixer->width;
+			off = MDSS_MDP_REG_LM_OFFSET(mixer->num);
+			MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_LM_OUT_SIZE, out);
+			mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_PACK_3D, 0);
+		}
+	}
+
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 	mutex_unlock(&ctl->lock);
 
 	return ret;
 }
 
-int mdss_mdp_ctl_off(struct msm_fb_data_type *mfd)
+int mdss_mdp_ctl_stop(struct mdss_mdp_ctl *ctl)
 {
-	struct mdss_panel_data *pdata;
-	struct mdss_mdp_ctl *ctl;
+	struct mdss_mdp_ctl *sctl;
 	int ret = 0;
 
-	if (!mfd)
-		return -ENODEV;
-
-	if (mfd->key != MFD_KEY)
-		return -EINVAL;
-
-	if (!mfd->ctl) {
-		pr_err("ctl not initialized\n");
-		return -ENODEV;
+	if (!ctl->power_on) {
+		pr_debug("%s %d already off!\n", __func__, __LINE__);
+		return 0;
 	}
 
-	pdata = dev_get_platdata(&mfd->pdev->dev);
-	if (!pdata) {
-		pr_err("no panel connected\n");
-		return -ENODEV;
-	}
+	sctl = mdss_mdp_get_split_ctl(ctl);
 
-	ctl = mfd->ctl;
-
-	pr_debug("ctl_num=%d\n", mfd->ctl->num);
+	pr_debug("ctl_num=%d\n", ctl->num);
 
 	mutex_lock(&ctl->lock);
-	ctl->power_on = false;
 
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+
 	if (ctl->stop_fnc)
 		ret = ctl->stop_fnc(ctl);
 	else
 		pr_warn("no stop func for ctl=%d\n", ctl->num);
 
-	if (ret)
-		pr_warn("error powering off intf ctl=%d\n", ctl->num);
+	if (sctl && sctl->stop_fnc) {
+		ret = sctl->stop_fnc(sctl);
 
-	ret = pdata->off(pdata);
+		mdss_mdp_ctl_split_display_enable(0, ctl, sctl);
+	}
+
+	if (ret) {
+		pr_warn("error powering off intf ctl=%d\n", ctl->num);
+	} else {
+		ctl->power_on = false;
+		ctl->play_cnt = 0;
+		ctl->clk_rate = 0;
+		mdss_mdp_ctl_perf_commit(ctl->mdata, MDSS_MDP_PERF_UPDATE_ALL);
+	}
+
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 
-	ctl->play_cnt = 0;
-
-	mdss_mdp_ctl_perf_commit(MDSS_MDP_PERF_UPDATE_ALL);
-
 	mutex_unlock(&ctl->lock);
 
-	mdss_mdp_pipe_release_all(mfd);
-
-	if (!mfd->ref_cnt)
-		mdss_mdp_ctl_destroy(mfd);
-
 	return ret;
 }
 
 static int mdss_mdp_mixer_setup(struct mdss_mdp_ctl *ctl,
 				struct mdss_mdp_mixer *mixer)
 {
-	struct mdss_mdp_pipe *pipe, *bgpipe = NULL;
+	struct mdss_mdp_pipe *pipe;
 	u32 off, blend_op, blend_stage;
 	u32 mixercfg = 0, blend_color_out = 0, bgalpha = 0;
-	int stage;
+	int stage, secure = 0;
 
 	if (!mixer)
 		return -ENODEV;
 
 	pr_debug("setup mixer=%d\n", mixer->num);
 
-	for (stage = MDSS_MDP_STAGE_BASE; stage < MDSS_MDP_MAX_STAGE; stage++) {
+	pipe = mixer->stage_pipe[MDSS_MDP_STAGE_BASE];
+	if (pipe == NULL) {
+		mixercfg = MDSS_MDP_LM_BORDER_COLOR;
+	} else {
+		mixercfg = 1 << (3 * pipe->num);
+		if (pipe->src_fmt->alpha_enable)
+			bgalpha = 1;
+		secure = pipe->flags & MDP_SECURE_OVERLAY_SESSION;
+	}
+
+	for (stage = MDSS_MDP_STAGE_0; stage < MDSS_MDP_MAX_STAGE; stage++) {
 		pipe = mixer->stage_pipe[stage];
-		if (pipe == NULL) {
-			if (stage == MDSS_MDP_STAGE_BASE)
-				mixercfg |= MDSS_MDP_LM_BORDER_COLOR;
+		if (pipe == NULL)
 			continue;
-		}
 
 		if (stage != pipe->mixer_stage) {
 			mixer->stage_pipe[stage] = NULL;
 			continue;
 		}
-		mixercfg |= stage << (3 * pipe->num);
-
-		if (stage == MDSS_MDP_STAGE_BASE) {
-			bgpipe = pipe;
-			if (pipe->src_fmt->alpha_enable)
-				bgalpha = 1;
-			continue;
-		}
 
 		blend_stage = stage - MDSS_MDP_STAGE_0;
-		off = MDSS_MDP_REG_LM_OFFSET(mixer->num) +
-		      MDSS_MDP_REG_LM_BLEND_OFFSET(blend_stage);
+		off = MDSS_MDP_REG_LM_BLEND_OFFSET(blend_stage);
 
 		if (pipe->is_fg) {
 			bgalpha = 0;
-			if (bgpipe) {
-				mixercfg &= ~(0x7 << (3 * bgpipe->num));
-				mixercfg |= MDSS_MDP_LM_BORDER_COLOR;
-			}
+			if (!secure)
+				mixercfg = MDSS_MDP_LM_BORDER_COLOR;
+
 			blend_op = (MDSS_MDP_BLEND_FG_ALPHA_FG_CONST |
 				    MDSS_MDP_BLEND_BG_ALPHA_BG_CONST);
 			/* keep fg alpha */
@@ -672,10 +1070,20 @@
 					stage);
 		}
 
-		MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_LM_OP_MODE, blend_op);
-		MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_LM_BLEND_FG_ALPHA,
+		if (mixercfg == MDSS_MDP_LM_BORDER_COLOR &&
+				pipe->src_fmt->alpha_enable &&
+				pipe->dst.w == mixer->width &&
+				pipe->dst.h == mixer->height) {
+			pr_debug("setting pipe=%d as BG_PIPE\n", pipe->num);
+			bgalpha = 1;
+		}
+
+		mixercfg |= stage << (3 * pipe->num);
+
+		mdp_mixer_write(mixer, off + MDSS_MDP_REG_LM_OP_MODE, blend_op);
+		mdp_mixer_write(mixer, off + MDSS_MDP_REG_LM_BLEND_FG_ALPHA,
 				   pipe->alpha);
-		MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_LM_BLEND_BG_ALPHA,
+		mdp_mixer_write(mixer, off + MDSS_MDP_REG_LM_BLEND_BG_ALPHA,
 				   0xFF - pipe->alpha);
 	}
 
@@ -686,9 +1094,87 @@
 
 	ctl->flush_bits |= BIT(6) << mixer->num;	/* LAYER_MIXER */
 
-	off = MDSS_MDP_REG_LM_OFFSET(mixer->num);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_LM_OP_MODE, blend_color_out);
-	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_LAYER(mixer->num), mixercfg);
+	mdp_mixer_write(mixer, MDSS_MDP_REG_LM_OP_MODE, blend_color_out);
+	if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF)
+		off = MDSS_MDP_REG_CTL_LAYER(mixer->num);
+	else
+		off = MDSS_MDP_REG_CTL_LAYER(mixer->num +
+				MDSS_MDP_INTF_MAX_LAYERMIXER);
+	mdss_mdp_ctl_write(ctl, off, mixercfg);
+
+	return 0;
+}
+
+int mdss_mdp_mixer_addr_setup(struct mdss_data_type *mdata,
+	 u32 *mixer_offsets, u32 *dspp_offsets, u32 *pingpong_offsets,
+	 u32 type, u32 len)
+{
+	struct mdss_mdp_mixer *head;
+	u32 i;
+	int rc = 0;
+
+	head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_mixer) *
+			len, GFP_KERNEL);
+
+	if (!head) {
+		pr_err("unable to setup mixer type=%d :kzalloc fail\n",
+			type);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		head[i].type = type;
+		head[i].base = mdata->mdp_base + mixer_offsets[i];
+		head[i].ref_cnt = 0;
+		head[i].num = i;
+		if (type == MDSS_MDP_MIXER_TYPE_INTF) {
+			head[i].dspp_base = mdata->mdp_base + dspp_offsets[i];
+			head[i].pingpong_base = mdata->mdp_base +
+				pingpong_offsets[i];
+		}
+	}
+
+	switch (type) {
+
+	case MDSS_MDP_MIXER_TYPE_INTF:
+		mdata->mixer_intf = head;
+		break;
+
+	case MDSS_MDP_MIXER_TYPE_WRITEBACK:
+		mdata->mixer_wb = head;
+		break;
+
+	default:
+		pr_err("Invalid mixer type=%d\n", type);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+int mdss_mdp_ctl_addr_setup(struct mdss_data_type *mdata,
+	u32 *ctl_offsets, u32 *wb_offsets, u32 len)
+{
+	struct mdss_mdp_ctl *head;
+	u32 i;
+
+	head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_ctl) *
+			len, GFP_KERNEL);
+
+	if (!head) {
+		pr_err("unable to setup ctl and wb: kzalloc fail\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		head[i].num = i;
+		head[i].base = (mdata->mdp_base) + ctl_offsets[i];
+		head[i].wb_base = (mdata->mdp_base) + wb_offsets[i];
+		head[i].ref_cnt = 0;
+	}
+
+	mdata->ctl_off = head;
 
 	return 0;
 }
@@ -803,9 +1289,6 @@
 {
 	mixer->params_changed = 0;
 
-	if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF)
-		mdss_mdp_dspp_setup(mixer->ctl, mixer);
-
 	/* skip mixer setup for rotator */
 	if (!mixer->rotator_mode)
 		mdss_mdp_mixer_setup(mixer->ctl, mixer);
@@ -813,32 +1296,63 @@
 	return 0;
 }
 
+int mdss_mdp_display_wait4comp(struct mdss_mdp_ctl *ctl)
+{
+	int ret;
+
+	ret = mutex_lock_interruptible(&ctl->lock);
+	if (ret)
+		return ret;
+
+	if (!ctl->power_on) {
+		mutex_unlock(&ctl->lock);
+		return 0;
+	}
+
+	if (ctl->wait_fnc)
+		ret = ctl->wait_fnc(ctl, NULL);
+
+	if (ctl->perf_changed) {
+		mdss_mdp_ctl_perf_commit(ctl->mdata, ctl->perf_changed);
+		ctl->perf_changed = 0;
+	}
+
+	mutex_unlock(&ctl->lock);
+
+	return ret;
+}
+
 int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg)
 {
+	struct mdss_mdp_ctl *sctl = NULL;
 	int mixer1_changed, mixer2_changed;
 	int ret = 0;
 	int perf_update = MDSS_MDP_PERF_UPDATE_SKIP;
-	u32 update_flags = 0;
 
 	if (!ctl) {
 		pr_err("display function not set\n");
 		return -ENODEV;
 	}
 
-	if (!ctl->power_on)
-		return 0;
-
 	pr_debug("commit ctl=%d play_cnt=%d\n", ctl->num, ctl->play_cnt);
 
-	if (mutex_lock_interruptible(&ctl->lock))
-		return -EINTR;
+	ret = mutex_lock_interruptible(&ctl->lock);
+	if (ret)
+		return ret;
+
+	if (!ctl->power_on) {
+		mutex_unlock(&ctl->lock);
+		return 0;
+	}
+
+	sctl = mdss_mdp_get_split_ctl(ctl);
 
 	mixer1_changed = (ctl->mixer_left && ctl->mixer_left->params_changed);
 	mixer2_changed = (ctl->mixer_right && ctl->mixer_right->params_changed);
 
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
 	if (mixer1_changed || mixer2_changed) {
-		perf_update = mdss_mdp_ctl_perf_update(ctl, &update_flags);
+		perf_update = mdss_mdp_ctl_perf_update(ctl);
 
 		if (ctl->prepare_fnc)
 			ret = ctl->prepare_fnc(ctl, arg);
@@ -847,8 +1361,10 @@
 			goto done;
 		}
 
-		if (perf_update == MDSS_MDP_PERF_UPDATE_EARLY)
-			mdss_mdp_ctl_perf_commit(update_flags);
+		if (perf_update == MDSS_MDP_PERF_UPDATE_EARLY) {
+			mdss_mdp_ctl_perf_commit(ctl->mdata, ctl->perf_changed);
+			ctl->perf_changed = 0;
+		}
 
 		if (mixer1_changed)
 			mdss_mdp_mixer_update(ctl->mixer_left);
@@ -857,9 +1373,22 @@
 
 		mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, ctl->opmode);
 		ctl->flush_bits |= BIT(17);	/* CTL */
+
+		if (sctl) {
+			mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_TOP,
+					sctl->opmode);
+			sctl->flush_bits |= BIT(17);
+		}
 	}
 
+	/* postprocessing setup, including dspp */
+	mdss_mdp_pp_setup_locked(ctl);
 	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, ctl->flush_bits);
+	if (sctl) {
+		mdss_mdp_pp_setup_locked(sctl);
+		mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_FLUSH,
+			sctl->flush_bits);
+	}
 	wmb();
 	ctl->flush_bits = 0;
 
@@ -870,9 +1399,6 @@
 
 	ctl->play_cnt++;
 
-	if (perf_update == MDSS_MDP_PERF_UPDATE_LATE)
-		mdss_mdp_ctl_perf_commit(update_flags);
-
 done:
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 
@@ -880,3 +1406,32 @@
 
 	return ret;
 }
+
+int mdss_mdp_get_ctl_mixers(u32 fb_num, u32 *mixer_id)
+{
+	int i;
+	struct mdss_mdp_ctl *ctl;
+	struct mdss_data_type *mdata;
+	u32 mixer_cnt = 0;
+	mutex_lock(&mdss_mdp_ctl_lock);
+	mdata = mdss_mdp_get_mdata();
+	for (i = 0; i < mdata->nctl; i++) {
+		ctl = mdata->ctl_off + i;
+		if ((ctl->power_on) && (ctl->mfd) &&
+			(ctl->mfd->index == fb_num)) {
+			if (ctl->mixer_left) {
+				mixer_id[mixer_cnt] = ctl->mixer_left->num;
+				mixer_cnt++;
+			}
+			if (mixer_cnt && ctl->mixer_right) {
+				mixer_id[mixer_cnt] = ctl->mixer_right->num;
+				mixer_cnt++;
+			}
+			if (mixer_cnt)
+				break;
+		}
+	}
+	mutex_unlock(&mdss_mdp_ctl_lock);
+	return mixer_cnt;
+}
+
diff --git a/drivers/video/msm/mdss/mdss_mdp_formats.h b/drivers/video/msm/mdss/mdss_mdp_formats.h
index b73f1e9..c6d5fb9 100644
--- a/drivers/video/msm/mdss/mdss_mdp_formats.h
+++ b/drivers/video/msm/mdss/mdss_mdp_formats.h
@@ -18,311 +18,149 @@
 
 #include "mdss_mdp.h"
 
-#define C3_ALPHA	3	/* alpha */
-#define C2_R_Cr		2	/* R/Cr */
-#define C1_B_Cb		1	/* B/Cb */
-#define C0_G_Y		0	/* G/luma */
+	/*
+	 * number of bits for source component,
+	 * 0 = 1 bit, 1 = 2 bits, 2 = 6 bits, 3 = 8 bits
+	 */
+enum {
+	COLOR_4BIT,
+	COLOR_5BIT,
+	COLOR_6BIT,
+	COLOR_8BIT,
+};
 
-static struct mdss_mdp_format_params mdss_mdp_format_map[MDP_IMGTYPE_LIMIT] = {
-	[MDP_RGB_565] = {
-		.format = MDP_RGB_565,
+#define FMT_RGB_565(fmt, e0, e1, e2)				\
+	{							\
+		.format = (fmt),				\
+		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,	\
+		.unpack_tight = 1,				\
+		.unpack_align_msb = 0,				\
+		.alpha_enable = 0,				\
+		.unpack_count = 3,				\
+		.bpp = 2,					\
+		.element = { (e0), (e1), (e2) },		\
+		.bits = {					\
+			[C2_R_Cr] = COLOR_5BIT,			\
+			[C0_G_Y] = COLOR_6BIT,			\
+			[C1_B_Cb] = COLOR_5BIT,			\
+		},						\
+	}
+
+#define FMT_RGB_888(fmt, e0, e1, e2)				\
+	{							\
+		.format = (fmt),				\
+		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,	\
+		.unpack_tight = 1,				\
+		.unpack_align_msb = 0,				\
+		.alpha_enable = 0,				\
+		.unpack_count = 3,				\
+		.bpp = 3,					\
+		.element = { (e0), (e1), (e2) },		\
+		.bits = {					\
+			[C2_R_Cr] = COLOR_8BIT,			\
+			[C0_G_Y] = COLOR_8BIT,			\
+			[C1_B_Cb] = COLOR_8BIT,			\
+		},						\
+	}
+
+#define FMT_RGB_8888(fmt, alpha_en, e0, e1, e2, e3)		\
+	{							\
+		.format = (fmt),				\
+		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,	\
+		.unpack_tight = 1,				\
+		.unpack_align_msb = 0,				\
+		.alpha_enable = (alpha_en),			\
+		.unpack_count = 4,				\
+		.bpp = 4,					\
+		.element = { (e0), (e1), (e2), (e3) },		\
+		.bits = {					\
+			[C3_ALPHA] = COLOR_8BIT,		\
+			[C2_R_Cr] = COLOR_8BIT,			\
+			[C0_G_Y] = COLOR_8BIT,			\
+			[C1_B_Cb] = COLOR_8BIT,			\
+		},						\
+	}
+
+#define FMT_YUV_COMMON(fmt)					\
+		.format = (fmt),				\
+		.is_yuv = 1,					\
+		.bits = {					\
+			[C2_R_Cr] = COLOR_8BIT,			\
+			[C0_G_Y] = COLOR_8BIT,			\
+			[C1_B_Cb] = COLOR_8BIT,			\
+		},						\
+		.alpha_enable = 0,				\
+		.unpack_tight = 1,				\
+		.unpack_align_msb = 0
+
+#define FMT_YUV_PSEUDO(fmt, samp, e0, e1)			\
+	{							\
+		FMT_YUV_COMMON(fmt),				\
+		.fetch_planes = MDSS_MDP_PLANE_PSEUDO_PLANAR,	\
+		.chroma_sample = samp,				\
+		.unpack_count = 2,				\
+		.bpp = 2,					\
+		.element = { (e0), (e1) },			\
+	}
+
+#define FMT_YUV_PLANR(fmt, samp, e0, e1)			\
+	{							\
+		FMT_YUV_COMMON(fmt),				\
+		.fetch_planes = MDSS_MDP_PLANE_PLANAR,		\
+		.chroma_sample = samp,				\
+		.element = { (e0), (e1) }			\
+	}
+
+static struct mdss_mdp_format_params mdss_mdp_format_map[] = {
+	FMT_RGB_565(MDP_RGB_565, C1_B_Cb, C0_G_Y, C2_R_Cr),
+	FMT_RGB_565(MDP_BGR_565, C2_R_Cr, C0_G_Y, C1_B_Cb),
+	FMT_RGB_888(MDP_RGB_888, C2_R_Cr, C0_G_Y, C1_B_Cb),
+	FMT_RGB_888(MDP_BGR_888, C1_B_Cb, C0_G_Y, C2_R_Cr),
+
+	FMT_RGB_8888(MDP_XRGB_8888, 0, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
+	FMT_RGB_8888(MDP_ARGB_8888, 1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb),
+	FMT_RGB_8888(MDP_RGBA_8888, 1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+	FMT_RGB_8888(MDP_RGBX_8888, 0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA),
+	FMT_RGB_8888(MDP_BGRA_8888, 1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
+
+	FMT_YUV_PSEUDO(MDP_Y_CRCB_H1V1, MDSS_MDP_CHROMA_RGB, C2_R_Cr, C1_B_Cb),
+	FMT_YUV_PSEUDO(MDP_Y_CBCR_H1V1, MDSS_MDP_CHROMA_RGB, C1_B_Cb, C2_R_Cr),
+	FMT_YUV_PSEUDO(MDP_Y_CRCB_H2V1, MDSS_MDP_CHROMA_H2V1, C2_R_Cr, C1_B_Cb),
+	FMT_YUV_PSEUDO(MDP_Y_CBCR_H2V1, MDSS_MDP_CHROMA_H2V1, C1_B_Cb, C2_R_Cr),
+	FMT_YUV_PSEUDO(MDP_Y_CRCB_H1V2, MDSS_MDP_CHROMA_H1V2, C2_R_Cr, C1_B_Cb),
+	FMT_YUV_PSEUDO(MDP_Y_CBCR_H1V2, MDSS_MDP_CHROMA_H1V2, C1_B_Cb, C2_R_Cr),
+	FMT_YUV_PSEUDO(MDP_Y_CRCB_H2V2, MDSS_MDP_CHROMA_420, C2_R_Cr, C1_B_Cb),
+	FMT_YUV_PSEUDO(MDP_Y_CBCR_H2V2, MDSS_MDP_CHROMA_420, C1_B_Cb, C2_R_Cr),
+	FMT_YUV_PSEUDO(MDP_Y_CBCR_H2V2_VENUS, MDSS_MDP_CHROMA_420,
+		       C1_B_Cb, C2_R_Cr),
+
+	FMT_YUV_PLANR(MDP_Y_CR_CB_H2V2, MDSS_MDP_CHROMA_420, C2_R_Cr, C1_B_Cb),
+	FMT_YUV_PLANR(MDP_Y_CB_CR_H2V2, MDSS_MDP_CHROMA_420, C1_B_Cb, C2_R_Cr),
+	FMT_YUV_PLANR(MDP_Y_CR_CB_GH2V2, MDSS_MDP_CHROMA_420, C2_R_Cr, C1_B_Cb),
+
+	{
+		FMT_YUV_COMMON(MDP_YCBCR_H1V1),
 		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
-		.a_bit = 0,
-		.r_bit = 1,	/* R, 5 bits */
-		.b_bit = 1,	/* B, 5 bits */
-		.g_bit = 2,	/* G, 6 bits */
-		.alpha_enable = 0,
-		.unpack_tight = 1,
-		.unpack_align_msb = 0,
-		.unpack_count = 2,
-		.element2 = C2_R_Cr,
-		.element1 = C0_G_Y,
-		.element0 = C1_B_Cb,
-		.bpp = 1,	/* 2 bpp */
-	},
-	[MDP_BGR_565] = {
-		.format = MDP_BGR_565,
-		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
-		.a_bit = 0,
-		.r_bit = 1,	/* R, 5 bits */
-		.b_bit = 1,	/* B, 5 bits */
-		.g_bit = 2,	/* G, 6 bits */
-		.alpha_enable = 0,
-		.unpack_tight = 1,
-		.unpack_align_msb = 0,
-		.unpack_count = 2,
-		.element2 = C1_B_Cb,
-		.element1 = C0_G_Y,
-		.element0 = C2_R_Cr,
-		.bpp = 1,	/* 2 bpp */
-	},
-	[MDP_RGB_888] = {
-		.format = MDP_RGB_888,
-		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
-		.a_bit = 0,
-		.r_bit = 3,	/* R, 8 bits */
-		.b_bit = 3,	/* B, 8 bits */
-		.g_bit = 3,	/* G, 8 bits */
-		.alpha_enable = 0,
-		.unpack_tight = 1,
-		.unpack_align_msb = 0,
-		.unpack_count = 2,
-		.element2 = C1_B_Cb,
-		.element1 = C0_G_Y,
-		.element0 = C2_R_Cr,
-		.bpp = 2,	/* 3 bpp */
-	},
-	[MDP_XRGB_8888] = {
-		.format = MDP_XRGB_8888,
-		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
-		.a_bit = 3,	/* alpha, 4 bits */
-		.r_bit = 3,	/* R, 8 bits */
-		.b_bit = 3,	/* B, 8 bits */
-		.g_bit = 3,	/* G, 8 bits */
-		.alpha_enable = 0,
-		.unpack_tight = 1,
-		.unpack_align_msb = 0,
+		.chroma_sample = MDSS_MDP_CHROMA_RGB,
 		.unpack_count = 3,
-		.element3 = C1_B_Cb,
-		.element2 = C0_G_Y,
-		.element1 = C2_R_Cr,
-		.element0 = C3_ALPHA,
-		.bpp = 3,	/* 4 bpp */
+		.bpp = 3,
+		.element = { C2_R_Cr, C1_B_Cb, C0_G_Y },
 	},
-	[MDP_ARGB_8888] = {
-		.format = MDP_ARGB_8888,
+	{
+		FMT_YUV_COMMON(MDP_YCRCB_H1V1),
 		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
-		.a_bit = 3,	/* alpha, 4 bits */
-		.r_bit = 3,	/* R, 8 bits */
-		.b_bit = 3,	/* B, 8 bits */
-		.g_bit = 3,	/* G, 8 bits */
-		.alpha_enable = 1,
-		.unpack_tight = 1,
-		.unpack_align_msb = 0,
+		.chroma_sample = MDSS_MDP_CHROMA_RGB,
 		.unpack_count = 3,
-		.element3 = C1_B_Cb,
-		.element2 = C0_G_Y,
-		.element1 = C2_R_Cr,
-		.element0 = C3_ALPHA,
-		.bpp = 3,	/* 4 bpp */
+		.bpp = 3,
+		.element = { C1_B_Cb, C2_R_Cr, C0_G_Y },
 	},
-	[MDP_RGBA_8888] = {
-		.format = MDP_RGBA_8888,
-		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
-		.a_bit = 3,	/* alpha, 4 bits */
-		.r_bit = 3,	/* R, 8 bits */
-		.b_bit = 3,	/* B, 8 bits */
-		.g_bit = 3,	/* G, 8 bits */
-		.alpha_enable = 1,
-		.unpack_tight = 1,
-		.unpack_align_msb = 0,
-		.unpack_count = 3,
-		.element3 = C3_ALPHA,
-		.element2 = C1_B_Cb,
-		.element1 = C0_G_Y,
-		.element0 = C2_R_Cr,
-		.bpp = 3,	/* 4 bpp */
-	},
-	[MDP_RGBX_8888] = {
-		.format = MDP_RGBX_8888,
-		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
-		.a_bit = 3,
-		.r_bit = 3,	/* R, 8 bits */
-		.b_bit = 3,	/* B, 8 bits */
-		.g_bit = 3,	/* G, 8 bits */
-		.alpha_enable = 0,
-		.unpack_tight = 1,
-		.unpack_align_msb = 0,
-		.unpack_count = 3,
-		.element3 = C3_ALPHA,
-		.element2 = C1_B_Cb,
-		.element1 = C0_G_Y,
-		.element0 = C2_R_Cr,
-		.bpp = 3,	/* 4 bpp */
-	},
-	[MDP_BGRA_8888] = {
-		.format = MDP_BGRA_8888,
-		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
-		.a_bit = 3,	/* alpha, 4 bits */
-		.r_bit = 3,	/* R, 8 bits */
-		.b_bit = 3,	/* B, 8 bits */
-		.g_bit = 3,	/* G, 8 bits */
-		.alpha_enable = 1,
-		.unpack_tight = 1,
-		.unpack_align_msb = 0,
-		.unpack_count = 3,
-		.element3 = C3_ALPHA,
-		.element2 = C2_R_Cr,
-		.element1 = C0_G_Y,
-		.element0 = C1_B_Cb,
-		.bpp = 3,	/* 4 bpp */
-	},
-	[MDP_YCRYCB_H2V1] = {
-		.format = MDP_YCRYCB_H2V1,
-		.is_yuv = 1,
-		.a_bit = 0,
-		.r_bit = 3,	/* R, 8 bits */
-		.b_bit = 3,	/* B, 8 bits */
-		.g_bit = 3,	/* G, 8 bits */
-		.alpha_enable = 0,
-		.unpack_tight = 1,
-		.unpack_align_msb = 0,
-		.unpack_count = 1,	/* 2 */
-		.bpp = 1,	/* 2 bpp */
+	{
+		FMT_YUV_COMMON(MDP_YCRYCB_H2V1),
 		.fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
 		.chroma_sample = MDSS_MDP_CHROMA_H2V1,
-		.unpack_count = 3,
-		.element3 = C0_G_Y,
-		.element2 = C2_R_Cr,
-		.element1 = C0_G_Y,
-		.element0 = C1_B_Cb,
-	},
-	[MDP_Y_CRCB_H2V1] = {
-		.format = MDP_Y_CRCB_H2V1,
-		.is_yuv = 1,
-		.a_bit = 0,
-		.r_bit = 3,	/* R, 8 bits */
-		.b_bit = 3,	/* B, 8 bits */
-		.g_bit = 3,	/* G, 8 bits */
-		.alpha_enable = 0,
-		.unpack_tight = 1,
-		.unpack_align_msb = 0,
-		.unpack_count = 1,	/* 2 */
-		.bpp = 1,	/* 2 bpp */
-		.fetch_planes = MDSS_MDP_PLANE_PSEUDO_PLANAR,
-		.chroma_sample = MDSS_MDP_CHROMA_H2V1,
-		.element1 = C1_B_Cb,
-		.element0 = C2_R_Cr,
-	},
-	[MDP_Y_CBCR_H2V1] = {
-		.format = MDP_Y_CBCR_H2V1,
-		.is_yuv = 1,
-		.a_bit = 0,
-		.r_bit = 3,	/* R, 8 bits */
-		.b_bit = 3,	/* B, 8 bits */
-		.g_bit = 3,	/* G, 8 bits */
-		.alpha_enable = 0,
-		.unpack_tight = 1,
-		.unpack_align_msb = 0,
-		.unpack_count = 1,	/* 2 */
-		.bpp = 1,	/* 2 bpp */
-		.fetch_planes = MDSS_MDP_PLANE_PSEUDO_PLANAR,
-		.chroma_sample = MDSS_MDP_CHROMA_H2V1,
-		.element1 = C2_R_Cr,
-		.element0 = C1_B_Cb,
-	},
-	[MDP_Y_CRCB_H1V2] = {
-		.format = MDP_Y_CRCB_H1V2,
-		.is_yuv = 1,
-		.a_bit = 0,
-		.r_bit = 3,	/* R, 8 bits */
-		.b_bit = 3,	/* B, 8 bits */
-		.g_bit = 3,	/* G, 8 bits */
-		.alpha_enable = 0,
-		.unpack_tight = 1,
-		.unpack_align_msb = 0,
-		.unpack_count = 1,	/* 2 */
-		.bpp = 1,	/* 2 bpp */
-		.fetch_planes = MDSS_MDP_PLANE_PSEUDO_PLANAR,
-		.chroma_sample = MDSS_MDP_CHROMA_H1V2,
-		.element1 = C1_B_Cb,
-		.element0 = C2_R_Cr,
-	},
-	[MDP_Y_CBCR_H1V2] = {
-		.format = MDP_Y_CBCR_H1V2,
-		.is_yuv = 1,
-		.a_bit = 0,
-		.r_bit = 3,	/* R, 8 bits */
-		.b_bit = 3,	/* B, 8 bits */
-		.g_bit = 3,	/* G, 8 bits */
-		.alpha_enable = 0,
-		.unpack_tight = 1,
-		.unpack_align_msb = 0,
-		.unpack_count = 1,	/* 2 */
-		.bpp = 1,	/* 2 bpp */
-		.fetch_planes = MDSS_MDP_PLANE_PSEUDO_PLANAR,
-		.chroma_sample = MDSS_MDP_CHROMA_H1V2,
-		.element1 = C2_R_Cr,
-		.element0 = C1_B_Cb,
-	},
-	[MDP_Y_CRCB_H2V2] = {
-		.format = MDP_Y_CRCB_H2V2,
-		.is_yuv = 1,
-		.a_bit = 0,
-		.r_bit = 3,	/* R, 8 bits */
-		.b_bit = 3,	/* B, 8 bits */
-		.g_bit = 3,	/* G, 8 bits */
-		.alpha_enable = 0,
-		.unpack_tight = 1,
-		.unpack_align_msb = 0,
-		.unpack_count = 1,	/* 2 */
-		.bpp = 1,	/* 2 bpp */
-		.fetch_planes = MDSS_MDP_PLANE_PSEUDO_PLANAR,
-		.chroma_sample = MDSS_MDP_CHROMA_420,
-		.element1 = C1_B_Cb,
-		.element0 = C2_R_Cr,
-	},
-	[MDP_Y_CBCR_H2V2] = {
-		.format = MDP_Y_CBCR_H2V2,
-		.is_yuv = 1,
-		.a_bit = 0,
-		.r_bit = 3,	/* R, 8 bits */
-		.b_bit = 3,	/* B, 8 bits */
-		.g_bit = 3,	/* G, 8 bits */
-		.alpha_enable = 0,
-		.unpack_tight = 1,
-		.unpack_align_msb = 0,
-		.unpack_count = 1,	/* 2 */
-		.bpp = 1,	/* 2 bpp */
-		.fetch_planes = MDSS_MDP_PLANE_PSEUDO_PLANAR,
-		.chroma_sample = MDSS_MDP_CHROMA_420,
-		.element1 = C2_R_Cr,
-		.element0 = C1_B_Cb,
-	},
-	[MDP_Y_CR_CB_H2V2] = {
-		.format = MDP_Y_CR_CB_H2V2,
-		.is_yuv = 1,
-		.a_bit = 0,
-		.r_bit = 3,	/* R, 8 bits */
-		.b_bit = 3,	/* B, 8 bits */
-		.g_bit = 3,	/* G, 8 bits */
-		.alpha_enable = 0,
-		.unpack_tight = 1,
-		.unpack_align_msb = 0,
-		.unpack_count = 1,	/* 2 */
-		.bpp = 1,	/* 2 bpp */
-		.fetch_planes = MDSS_MDP_PLANE_PLANAR,
-		.chroma_sample = MDSS_MDP_CHROMA_420,
-	},
-	[MDP_Y_CB_CR_H2V2] = {
-		.format = MDP_Y_CB_CR_H2V2,
-		.is_yuv = 1,
-		.a_bit = 0,
-		.r_bit = 3,	/* R, 8 bits */
-		.b_bit = 3,	/* B, 8 bits */
-		.g_bit = 3,	/* G, 8 bits */
-		.alpha_enable = 0,
-		.unpack_tight = 1,
-		.unpack_align_msb = 0,
-		.unpack_count = 1,	/* 2 */
-		.bpp = 1,	/* 2 bpp */
-		.fetch_planes = MDSS_MDP_PLANE_PLANAR,
-		.chroma_sample = MDSS_MDP_CHROMA_420,
-	},
-	[MDP_Y_CR_CB_GH2V2] = {
-		.format = MDP_Y_CR_CB_GH2V2,
-		.is_yuv = 1,
-		.a_bit = 0,
-		.r_bit = 3,	/* R, 8 bits */
-		.b_bit = 3,	/* B, 8 bits */
-		.g_bit = 3,	/* G, 8 bits */
-		.alpha_enable = 0,
-		.unpack_tight = 1,
-		.unpack_align_msb = 0,
-		.unpack_count = 1,	/* 2 */
-		.bpp = 1,	/* 2 bpp */
-		.fetch_planes = MDSS_MDP_PLANE_PLANAR,
-		.chroma_sample = MDSS_MDP_CHROMA_420,
+		.unpack_count = 4,
+		.bpp = 2,
+		.element = { C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y },
 	},
 };
 #endif
diff --git a/drivers/video/msm/mdss/mdss_mdp_hwio.h b/drivers/video/msm/mdss/mdss_mdp_hwio.h
index d013a4f..d50f47e 100644
--- a/drivers/video/msm/mdss/mdss_mdp_hwio.h
+++ b/drivers/video/msm/mdss/mdss_mdp_hwio.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,14 @@
 
 #include <linux/bitops.h>
 
+#define IGC_LUT_ENTRIES	256
+#define GC_LUT_SEGMENTS	16
+#define ENHIST_LUT_ENTRIES 256
+#define HIST_V_SIZE	256
+
+#define MDSS_MDP_HW_REV_100		0x10000000
+#define MDSS_MDP_HW_REV_102		0x10020000
+
 #define MDSS_REG_HW_VERSION				0x0
 #define MDSS_REG_HW_INTR_STATUS				0x10
 
@@ -34,6 +42,10 @@
 #define MDSS_MDP_REG_HIST_INTR_STATUS			0x00120
 #define MDSS_MDP_REG_HIST_INTR_CLEAR			0x00124
 
+#define MDSS_MDP_REG_SPLIT_DISPLAY_EN			0x003F4
+#define MDSS_MDP_REG_SPLIT_DISPLAY_UPPER_PIPE_CTRL	0x003F8
+#define MDSS_MDP_REG_SPLIT_DISPLAY_LOWER_PIPE_CTRL	0x004F0
+
 #define MDSS_INTF_DSI	0x1
 #define MDSS_INTF_HDMI	0x3
 #define MDSS_INTF_LCDC	0x5
@@ -74,6 +86,11 @@
 	MDSS_MDP_IRQ_INTF_VSYNC = 25,
 };
 
+#define MDSS_MDP_REG_IGC_VIG_BASE			0x300
+#define MDSS_MDP_REG_IGC_RGB_BASE			0x310
+#define MDSS_MDP_REG_IGC_DMA_BASE			0x320
+#define MDSS_MDP_REG_IGC_DSPP_BASE			0x400
+
 enum mdss_mdp_ctl_index {
 	MDSS_MDP_CTL0,
 	MDSS_MDP_CTL1,
@@ -147,10 +164,16 @@
 #define MDSS_MDP_REG_SSPP_STILE_FRAME_SIZE		0x02C
 #define MDSS_MDP_REG_SSPP_SRC_FORMAT			0x030
 #define MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN		0x034
+#define MDSS_MDP_REG_SSPP_REQPRIO_FIFO_WM_0		0x050
+#define MDSS_MDP_REG_SSPP_REQPRIO_FIFO_WM_1		0x054
+#define MDSS_MDP_REG_SSPP_REQPRIO_FIFO_WM_2		0x058
 
 #define MDSS_MDP_REG_SSPP_SRC_OP_MODE			0x038
 #define MDSS_MDP_OP_DEINTERLACE			BIT(22)
 #define MDSS_MDP_OP_DEINTERLACE_ODD		BIT(23)
+#define MDSS_MDP_OP_IGC_ROM_1			BIT(18)
+#define MDSS_MDP_OP_IGC_ROM_0			BIT(17)
+#define MDSS_MDP_OP_IGC_EN			BIT(16)
 #define MDSS_MDP_OP_FLIP_UD			BIT(14)
 #define MDSS_MDP_OP_FLIP_LR			BIT(13)
 #define MDSS_MDP_OP_BWC_EN			BIT(0)
@@ -161,6 +184,7 @@
 #define MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR		0x03C
 #define MDSS_MDP_REG_SSPP_FETCH_CONFIG			0x048
 #define MDSS_MDP_REG_SSPP_VC1_RANGE			0x04C
+#define MDSS_MDP_REG_SSPP_SRC_ADDR_SW_STATUS		0x070
 
 #define MDSS_MDP_REG_SSPP_CURRENT_SRC0_ADDR		0x0A4
 #define MDSS_MDP_REG_SSPP_CURRENT_SRC1_ADDR		0x0A8
@@ -179,6 +203,8 @@
 #define MDSS_MDP_REG_VIG_QSEED2_C03_INIT_PHASEY		0x224
 #define MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEX		0x228
 #define MDSS_MDP_REG_VIG_QSEED2_C12_INIT_PHASEY		0x22C
+#define MDSS_MDP_REG_VIG_QSEED2_SHARP			0x230
+#define MDSS_MDP_REG_VIG_PA_BASE			0x310
 
 #define MDSS_MDP_REG_SCALE_CONFIG			0x204
 #define MDSS_MDP_REG_SCALE_PHASE_STEP_X			0x210
@@ -189,23 +215,31 @@
 #define MDSS_MDP_REG_VIG_CSC_0_BASE			0x280
 #define MDSS_MDP_REG_VIG_CSC_1_BASE			0x320
 
+#define MDSS_MDP_REG_VIG_HIST_CTL_BASE			0x2C4
+#define MDSS_MDP_REG_VIG_HIST_LUT_BASE			0x2F0
+
 #define MDSS_MDP_SCALE_FILTER_NEAREST		0x0
 #define MDSS_MDP_SCALE_FILTER_BIL		0x1
 #define MDSS_MDP_SCALE_FILTER_PCMN		0x2
 #define MDSS_MDP_SCALE_FILTER_CA		0x3
 #define MDSS_MDP_SCALEY_EN			BIT(1)
 #define MDSS_MDP_SCALEX_EN			BIT(0)
+#define MDSS_MDP_FMT_SOLID_FILL			0x4037FF
 
 #define MDSS_MDP_NUM_REG_MIXERS 3
 #define MDSS_MDP_NUM_WB_MIXERS 2
 
-enum mdss_mdp_mixer_index {
-	MDSS_MDP_LAYERMIXER0,
-	MDSS_MDP_LAYERMIXER1,
-	MDSS_MDP_LAYERMIXER2,
-	MDSS_MDP_LAYERMIXER3,
-	MDSS_MDP_LAYERMIXER4,
-	MDSS_MDP_MAX_LAYERMIXER
+enum mdss_mdp_mixer_intf_index {
+	MDSS_MDP_INTF_LAYERMIXER0,
+	MDSS_MDP_INTF_LAYERMIXER1,
+	MDSS_MDP_INTF_LAYERMIXER2,
+	MDSS_MDP_INTF_MAX_LAYERMIXER,
+};
+
+enum mdss_mdp_mixer_wb_index {
+	MDSS_MDP_WB_LAYERMIXER0,
+	MDSS_MDP_WB_LAYERMIXER1,
+	MDSS_MDP_WB_MAX_LAYERMIXER,
 };
 
 enum mdss_mdp_stage_index {
@@ -260,6 +294,8 @@
 #define MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_HIGH0	0x108
 #define MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_HIGH1	0x10C
 
+#define MDSS_MDP_REG_LM_GC_LUT_BASE	0x110
+
 #define MDSS_MDP_LM_BORDER_COLOR		(1 << 24)
 #define MDSS_MDP_LM_CURSOR_OUT			(1 << 25)
 #define MDSS_MDP_BLEND_FG_ALPHA_FG_CONST	(0 << 0)
@@ -321,6 +357,16 @@
 	MDSS_MDP_MAX_DSPP
 };
 
+#define MDSS_MDP_REG_DSPP_OFFSET(pipe)	(0x4600 + ((pipe) * 0x400))
+#define MDSS_MDP_REG_DSPP_OP_MODE			0x000
+#define MDSS_MDP_REG_DSPP_PCC_BASE			0x030
+#define MDSS_MDP_REG_DSPP_DITHER_DEPTH			0x150
+#define MDSS_MDP_REG_DSPP_HIST_CTL_BASE			0x210
+#define MDSS_MDP_REG_DSPP_HIST_LUT_BASE			0x230
+#define MDSS_MDP_REG_DSPP_PA_BASE			0x238
+#define MDSS_MDP_REG_DSPP_GAMUT_BASE			0x2DC
+#define MDSS_MDP_REG_DSPP_GC_BASE			0x2B0
+
 enum mdss_mpd_intf_index {
 	MDSS_MDP_NO_INTF,
 	MDSS_MDP_INTF0,
@@ -376,6 +422,7 @@
 #define MDSS_MDP_REG_INTF_FRAME_COUNT			0x0AC
 #define MDSS_MDP_REG_INTF_LINE_COUNT			0x0B0
 #define MDSS_MDP_PANEL_FORMAT_RGB888			0x213F
+#define MDSS_MDP_PANEL_FORMAT_RGB666			0x212A
 
 enum mdss_mdp_pingpong_index {
 	MDSS_MDP_PINGPONG0,
@@ -384,8 +431,6 @@
 	MDSS_MDP_MAX_PINGPONG
 };
 
-#define MDSS_MDP_REG_PP_OFFSET(pp)	(0x21B00 + ((pp) * 0x100))
-
 #define MDSS_MDP_REG_PP_TEAR_CHECK_EN			0x000
 #define MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC		0x004
 #define MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT		0x008
@@ -400,11 +445,14 @@
 #define MDSS_MDP_REG_PP_LINE_COUNT			0x02C
 #define MDSS_MDP_REG_PP_AUTOREFRESH_CONFIG		0x030
 
+#define MDSS_MDP_REG_PP_FBC_MODE			0x034
+#define MDSS_MDP_REG_PP_FBC_BUDGET_CTL			0x038
+#define MDSS_MDP_REG_PP_FBC_LOSSY_MODE			0x03C
+
 #define MDSS_MDP_REG_SMP_ALLOC_W0			0x00180
 #define MDSS_MDP_REG_SMP_ALLOC_R0			0x00230
 
-#define MDSS_MDP_SMP_MMB_SIZE		4096
-#define MDSS_MDP_SMP_MMB_BLOCKS		22
+#define MDSS_MDP_SMP_MMB_BLOCKS			22
 
 enum mdss_mdp_smp_client_index {
 	MDSS_MDP_SMP_CLIENT_UNUSED,
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
new file mode 100644
index 0000000..d6b0fb2
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_cmd.c
@@ -0,0 +1,333 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include "mdss_panel.h"
+#include "mdss_mdp.h"
+
+#define START_THRESHOLD 4
+#define CONTINUE_TRESHOLD 4
+
+#define MAX_SESSIONS 2
+
+struct mdss_mdp_cmd_ctx {
+	u32 pp_num;
+	u8 ref_cnt;
+
+	struct completion pp_comp;
+	atomic_t vsync_ref;
+	spinlock_t vsync_lock;
+	mdp_vsync_handler_t vsync_handler;
+	int panel_on;
+
+	/* te config */
+	u8 tear_check;
+	u16 total_lcd_lines;
+	u16 v_porch;	/* vertical porches */
+	u32 vsync_cnt;
+};
+
+struct mdss_mdp_cmd_ctx mdss_mdp_cmd_ctx_list[MAX_SESSIONS];
+
+static int mdss_mdp_cmd_tearcheck_cfg(struct mdss_mdp_mixer *mixer,
+			struct mdss_mdp_cmd_ctx *ctx, int enable)
+{
+	u32 cfg;
+
+	cfg = BIT(19); /* VSYNC_COUNTER_EN */
+	if (ctx->tear_check)
+		cfg |= BIT(20);	/* VSYNC_IN_EN */
+	cfg |= ctx->vsync_cnt;
+
+	mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_SYNC_CONFIG_VSYNC, cfg);
+	mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT,
+				0xfff0); /* set to verh height */
+	mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_VSYNC_INIT_VAL, 0);
+	mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_RD_PTR_IRQ, 0);
+
+	mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_START_POS, ctx->v_porch);
+	mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_SYNC_THRESH,
+			   (CONTINUE_TRESHOLD << 16) | (START_THRESHOLD));
+
+	mdss_mdp_pingpong_write(mixer, MDSS_MDP_REG_PP_TEAR_CHECK_EN, enable);
+	return 0;
+}
+
+static int mdss_mdp_cmd_tearcheck_setup(struct mdss_mdp_ctl *ctl, int enable)
+{
+	struct mdss_mdp_cmd_ctx *ctx = ctl->priv_data;
+	struct mdss_panel_info *pinfo;
+	struct mdss_mdp_mixer *mixer;
+
+	pinfo = &ctl->panel_data->panel_info;
+
+	if (pinfo->mipi.vsync_enable && enable) {
+		u32 mdp_vsync_clk_speed_hz, total_lines;
+		u32 vsync_cnt_cfg_dem;
+
+		mdss_mdp_vsync_clk_enable(1);
+
+		mdp_vsync_clk_speed_hz =
+		mdss_mdp_get_clk_rate(MDSS_CLK_MDP_VSYNC);
+		pr_debug("%s: vsync_clk_rate=%d\n", __func__,
+					mdp_vsync_clk_speed_hz);
+
+		if (mdp_vsync_clk_speed_hz == 0) {
+			pr_err("can't get clk speed\n");
+			return -EINVAL;
+		}
+
+		ctx->tear_check = pinfo->mipi.hw_vsync_mode;
+
+		total_lines = pinfo->lcdc.v_back_porch +
+				    pinfo->lcdc.v_front_porch +
+				    pinfo->lcdc.v_pulse_width + pinfo->yres;
+
+		vsync_cnt_cfg_dem =
+			mult_frac(pinfo->mipi.frame_rate * total_lines,
+						1, 100);
+
+		ctx->vsync_cnt = mdp_vsync_clk_speed_hz / vsync_cnt_cfg_dem;
+
+		ctx->v_porch = pinfo->lcdc.v_back_porch +
+				    pinfo->lcdc.v_front_porch +
+				    pinfo->lcdc.v_pulse_width;
+		ctx->total_lcd_lines = total_lines;
+	} else {
+		enable = 0;
+	}
+
+	mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
+	if (mixer)
+		mdss_mdp_cmd_tearcheck_cfg(mixer, ctx, enable);
+
+	mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
+	if (mixer)
+		mdss_mdp_cmd_tearcheck_cfg(mixer, ctx, enable);
+
+	return 0;
+}
+
+static inline void cmd_readptr_irq_enable(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_cmd_ctx *ctx = ctl->priv_data;
+
+	if (atomic_inc_return(&ctx->vsync_ref) == 1) {
+		pr_debug("%s:\n", __func__);
+		mdss_mdp_irq_enable(MDSS_MDP_IRQ_PING_PONG_RD_PTR, ctx->pp_num);
+	}
+}
+
+static inline void cmd_readptr_irq_disable(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_cmd_ctx *ctx = ctl->priv_data;
+
+	if (atomic_dec_return(&ctx->vsync_ref) == 0) {
+		pr_debug("%s:\n", __func__);
+		mdss_mdp_irq_disable(MDSS_MDP_IRQ_PING_PONG_RD_PTR,
+							ctx->pp_num);
+	}
+}
+
+int mdss_mdp_cmd_set_vsync_handler(struct mdss_mdp_ctl *ctl,
+		mdp_vsync_handler_t vsync_handler)
+{
+	struct mdss_mdp_cmd_ctx *ctx;
+	unsigned long flags;
+
+	ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
+	if (!ctx) {
+		pr_err("invalid ctx for ctl=%d\n", ctl->num);
+		return -ENODEV;
+	}
+
+	spin_lock_irqsave(&ctx->vsync_lock, flags);
+
+	if (!ctx->vsync_handler && vsync_handler) {
+		ctx->vsync_handler = vsync_handler;
+		cmd_readptr_irq_enable(ctl);
+	} else if (ctx->vsync_handler && !vsync_handler) {
+		cmd_readptr_irq_disable(ctl);
+		ctx->vsync_handler = vsync_handler;
+	}
+
+	spin_unlock_irqrestore(&ctx->vsync_lock, flags);
+
+	return 0;
+}
+
+static void mdss_mdp_cmd_readptr_done(void *arg)
+{
+	struct mdss_mdp_ctl *ctl = arg;
+	struct mdss_mdp_cmd_ctx *ctx = ctl->priv_data;
+	ktime_t vsync_time;
+
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return;
+	}
+
+	pr_debug("%s: ctl=%d intf_num=%d\n", __func__, ctl->num, ctl->intf_num);
+
+	vsync_time = ktime_get();
+
+	spin_lock(&ctx->vsync_lock);
+	if (ctx->vsync_handler)
+		ctx->vsync_handler(ctl, vsync_time);
+	spin_unlock(&ctx->vsync_lock);
+}
+
+static void mdss_mdp_cmd_pingpong_done(void *arg)
+{
+	struct mdss_mdp_ctl *ctl = arg;
+	struct mdss_mdp_cmd_ctx *ctx = ctl->priv_data;
+
+	pr_debug("%s: intf_num=%d ctx=%p\n", __func__, ctl->intf_num, ctx);
+
+	mdss_mdp_irq_disable_nosync(MDSS_MDP_IRQ_PING_PONG_COMP, ctx->pp_num);
+
+	if (ctx)
+		complete(&ctx->pp_comp);
+}
+
+int mdss_mdp_cmd_kickoff(struct mdss_mdp_ctl *ctl, void *arg)
+{
+	struct mdss_mdp_cmd_ctx *ctx;
+	int rc;
+
+	ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
+	pr_debug("%s: kickoff intf_num=%d ctx=%p\n", __func__,
+					ctl->intf_num, ctx);
+
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return -ENODEV;
+	}
+
+	if (ctx->panel_on == 0) {
+		rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_UNBLANK, NULL);
+		WARN(rc, "intf %d unblank error (%d)\n", ctl->intf_num, rc);
+
+		ctx->panel_on++;
+
+		rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_ON, NULL);
+		WARN(rc, "intf %d panel on error (%d)\n", ctl->intf_num, rc);
+	}
+
+	INIT_COMPLETION(ctx->pp_comp);
+	mdss_mdp_irq_enable(MDSS_MDP_IRQ_PING_PONG_COMP, ctx->pp_num);
+
+	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
+
+	wait_for_completion_interruptible(&ctx->pp_comp);
+
+	return 0;
+}
+
+int mdss_mdp_cmd_stop(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_cmd_ctx *ctx;
+	int ret;
+
+	pr_debug("%s: +\n", __func__);
+
+	ctx = (struct mdss_mdp_cmd_ctx *) ctl->priv_data;
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return -ENODEV;
+	}
+
+	ctx->panel_on = 0;
+
+	mdss_mdp_cmd_set_vsync_handler(ctl, NULL);
+
+	mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_PING_PONG_RD_PTR, ctl->intf_num,
+				   NULL, NULL);
+	mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_PING_PONG_COMP, ctx->pp_num,
+				   NULL, NULL);
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctl->priv_data = NULL;
+
+	ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_BLANK, NULL);
+	WARN(ret, "intf %d unblank error (%d)\n", ctl->intf_num, ret);
+
+	ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_OFF, NULL);
+	WARN(ret, "intf %d unblank error (%d)\n", ctl->intf_num, ret);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+
+	pr_debug("%s:-\n", __func__);
+
+	return 0;
+}
+
+int mdss_mdp_cmd_start(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_cmd_ctx *ctx;
+	struct mdss_mdp_mixer *mixer;
+	int i, ret;
+
+	pr_debug("%s:+\n", __func__);
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+
+	mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
+	if (!mixer) {
+		pr_err("mixer not setup correctly\n");
+		return -ENODEV;
+	}
+
+	for (i = 0; i < MAX_SESSIONS; i++) {
+		ctx = &mdss_mdp_cmd_ctx_list[i];
+		if (ctx->ref_cnt == 0) {
+			ctx->ref_cnt++;
+			break;
+		}
+	}
+	if (i == MAX_SESSIONS) {
+		pr_err("too many sessions\n");
+		return -ENOMEM;
+	}
+
+	ctl->priv_data = ctx;
+	if (!ctx) {
+		pr_err("invalid ctx\n");
+		return -ENODEV;
+	}
+
+	ctx->pp_num = mixer->num;
+	init_completion(&ctx->pp_comp);
+	spin_lock_init(&ctx->vsync_lock);
+	atomic_set(&ctx->vsync_ref, 0);
+
+	mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_PING_PONG_RD_PTR, ctx->pp_num,
+				   mdss_mdp_cmd_readptr_done, ctl);
+
+	mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_PING_PONG_COMP, ctx->pp_num,
+				   mdss_mdp_cmd_pingpong_done, ctl);
+
+	ret = mdss_mdp_cmd_tearcheck_setup(ctl, 1);
+	if (ret) {
+		pr_err("tearcheck setup failed\n");
+		return ret;
+	}
+
+	ctl->stop_fnc = mdss_mdp_cmd_stop;
+	ctl->display_fnc = mdss_mdp_cmd_kickoff;
+	ctl->set_vsync_handler = mdss_mdp_cmd_set_vsync_handler;
+
+	pr_debug("%s:-\n", __func__);
+
+	return 0;
+}
+
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_video.c b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
index 3f06bc3..0426784 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_video.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_video.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,9 @@
 #include "mdss_fb.h"
 #include "mdss_mdp.h"
 
+/* wait for at most 2 vsync for lowest refresh rate (24hz) */
+#define VSYNC_TIMEOUT msecs_to_jiffies(84)
+
 /* intf timing settings */
 struct intf_timing_params {
 	u32 width;
@@ -35,29 +38,59 @@
 	u32 hsync_skew;
 };
 
-#define MAX_SESSIONS 3
 struct mdss_mdp_video_ctx {
-	u32 ctl_num;
-	u32 pp_num;
+	u32 intf_num;
+	char __iomem *base;
+	u32 intf_type;
 	u8 ref_cnt;
 
 	u8 timegen_en;
-	struct completion pp_comp;
 	struct completion vsync_comp;
+	int wait_pending;
+
+	atomic_t vsync_ref;
+	spinlock_t vsync_lock;
+	mdp_vsync_handler_t vsync_handler;
 };
 
-struct mdss_mdp_video_ctx mdss_mdp_video_ctx_list[MAX_SESSIONS];
+static inline void mdp_video_write(struct mdss_mdp_video_ctx *ctx,
+				   u32 reg, u32 val)
+{
+	writel_relaxed(val, ctx->base + reg);
+}
 
-static int mdss_mdp_video_timegen_setup(struct mdss_mdp_ctl *ctl,
+int mdss_mdp_video_addr_setup(struct mdss_data_type *mdata,
+				u32 *offsets,  u32 count)
+{
+	struct mdss_mdp_video_ctx *head;
+	u32 i;
+
+	head = devm_kzalloc(&mdata->pdev->dev,
+			sizeof(struct mdss_mdp_video_ctx) * count, GFP_KERNEL);
+	if (!head)
+		return -ENOMEM;
+
+	for (i = 0; i < count; i++) {
+		head[i].base = mdata->mdp_base + offsets[i];
+		pr_debug("adding Video Intf #%d offset=0x%x virt=%p\n", i,
+				offsets[i], head[i].base);
+		head[i].ref_cnt = 0;
+		head[i].intf_num = i + MDSS_MDP_INTF0;
+	}
+
+	mdata->video_intf = head;
+	mdata->nintf = count;
+	return 0;
+}
+
+static int mdss_mdp_video_timegen_setup(struct mdss_mdp_video_ctx *ctx,
 					struct intf_timing_params *p)
 {
 	u32 hsync_period, vsync_period;
 	u32 hsync_start_x, hsync_end_x, display_v_start, display_v_end;
 	u32 active_h_start, active_h_end, active_v_start, active_v_end;
+	u32 den_polarity, hsync_polarity, vsync_polarity;
 	u32 display_hctl, active_hctl, hsync_ctl, polarity_ctl;
-	int off;
-
-	off = MDSS_MDP_REG_INTF_OFFSET(ctl->intf_num);
 
 	hsync_period = p->hsync_pulse_width + p->h_back_porch +
 			p->width + p->h_front_porch;
@@ -69,7 +102,7 @@
 	display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
 			p->hsync_skew - 1;
 
-	if (ctl->intf_type == MDSS_INTF_EDP) {
+	if (ctx->intf_type == MDSS_INTF_EDP) {
 		display_v_start += p->hsync_pulse_width + p->h_back_porch;
 		display_v_end -= p->h_front_porch;
 	}
@@ -106,37 +139,87 @@
 
 	hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
 	display_hctl = (hsync_end_x << 16) | hsync_start_x;
-	polarity_ctl = (0 << 2) |	/* DEN Polarity */
-		       (0 << 1) |      /* VSYNC Polarity */
-		       (0);	       /* HSYNC Polarity */
 
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_HSYNC_CTL, hsync_ctl);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
+	den_polarity = 0;
+	if (MDSS_INTF_HDMI == ctx->intf_type) {
+		hsync_polarity = p->yres >= 720 ? 0 : 1;
+		vsync_polarity = p->yres >= 720 ? 0 : 1;
+	} else {
+		hsync_polarity = 0;
+		vsync_polarity = 0;
+	}
+	polarity_ctl = (den_polarity << 2)   | /*  DEN Polarity  */
+		       (vsync_polarity << 1) | /* VSYNC Polarity */
+		       (hsync_polarity << 0);  /* HSYNC Polarity */
+
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_HSYNC_CTL, hsync_ctl);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
 			   vsync_period * hsync_period);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_VSYNC_PULSE_WIDTH_F0,
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PULSE_WIDTH_F0,
 			   p->vsync_pulse_width * hsync_period);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_DISPLAY_HCTL,
-			   display_hctl);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_DISPLAY_V_START_F0,
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_DISPLAY_HCTL, display_hctl);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_DISPLAY_V_START_F0,
 			   display_v_start);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_DISPLAY_V_END_F0,
-			   display_v_end);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_ACTIVE_HCTL, active_hctl);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_ACTIVE_V_START_F0,
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_DISPLAY_V_END_F0, display_v_end);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_ACTIVE_HCTL, active_hctl);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_ACTIVE_V_START_F0,
 			   active_v_start);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_ACTIVE_V_END_F0,
-			   active_v_end);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_ACTIVE_V_END_F0, active_v_end);
 
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_BORDER_COLOR,
-			   p->border_clr);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_UNDERFLOW_COLOR,
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_BORDER_COLOR, p->border_clr);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_UNDERFLOW_COLOR,
 			   p->underflow_clr);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_HSYNC_SKEW,
-			   p->hsync_skew);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_POLARITY_CTL,
-			   polarity_ctl);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_PANEL_FORMAT,
-			   MDSS_MDP_PANEL_FORMAT_RGB888);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_HSYNC_SKEW, p->hsync_skew);
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_POLARITY_CTL, polarity_ctl);
+
+	return 0;
+}
+
+
+static inline void video_vsync_irq_enable(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_video_ctx *ctx = ctl->priv_data;
+
+	if (atomic_inc_return(&ctx->vsync_ref) == 1)
+		mdss_mdp_irq_enable(MDSS_MDP_IRQ_INTF_VSYNC, ctl->intf_num);
+	else
+		mdss_mdp_irq_clear(ctl->mdata, MDSS_MDP_IRQ_INTF_VSYNC,
+				ctl->intf_num);
+}
+
+static inline void video_vsync_irq_disable(struct mdss_mdp_ctl *ctl)
+{
+	struct mdss_mdp_video_ctx *ctx = ctl->priv_data;
+
+	if (atomic_dec_return(&ctx->vsync_ref) == 0)
+		mdss_mdp_irq_disable(MDSS_MDP_IRQ_INTF_VSYNC, ctl->intf_num);
+}
+
+static int mdss_mdp_video_set_vsync_handler(struct mdss_mdp_ctl *ctl,
+		mdp_vsync_handler_t vsync_handler)
+{
+	struct mdss_mdp_video_ctx *ctx;
+	unsigned long flags;
+	int need_update;
+
+	ctx = (struct mdss_mdp_video_ctx *) ctl->priv_data;
+	if (!ctx) {
+		pr_err("invalid ctx for ctl=%d\n", ctl->num);
+		return -ENODEV;
+	}
+
+	spin_lock_irqsave(&ctx->vsync_lock, flags);
+	need_update = (!ctx->vsync_handler && vsync_handler) ||
+			(ctx->vsync_handler && !vsync_handler);
+	ctx->vsync_handler = vsync_handler;
+	spin_unlock_irqrestore(&ctx->vsync_lock, flags);
+
+	if (need_update) {
+		if (vsync_handler)
+			video_vsync_irq_enable(ctl);
+		else
+			video_vsync_irq_disable(ctl);
+	}
 
 	return 0;
 }
@@ -144,7 +227,7 @@
 static int mdss_mdp_video_stop(struct mdss_mdp_ctl *ctl)
 {
 	struct mdss_mdp_video_ctx *ctx;
-	int off;
+	int rc;
 
 	pr_debug("stop ctl=%d\n", ctl->num);
 
@@ -155,50 +238,64 @@
 	}
 
 	if (ctx->timegen_en) {
-		off = MDSS_MDP_REG_INTF_OFFSET(ctl->intf_num);
-		MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 0);
+		rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_BLANK, NULL);
+		if (rc == -EBUSY) {
+			pr_debug("intf #%d busy don't turn off\n",
+				 ctl->intf_num);
+			return rc;
+		}
+		WARN(rc, "intf %d blank error (%d)\n", ctl->intf_num, rc);
+
+		mdp_video_write(ctx, MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 0);
 		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 		ctx->timegen_en = false;
+
+		rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_OFF, NULL);
+		WARN(rc, "intf %d timegen off error (%d)\n", ctl->intf_num, rc);
+
+		mdss_mdp_irq_disable(MDSS_MDP_IRQ_INTF_UNDER_RUN,
+			ctl->intf_num);
 	}
 
-	memset(ctx, 0, sizeof(*ctx));
+	mdss_mdp_video_set_vsync_handler(ctl, NULL);
+
+	mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_INTF_VSYNC, ctl->intf_num,
+				   NULL, NULL);
+	mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_INTF_UNDER_RUN, ctl->intf_num,
+				   NULL, NULL);
+
+	ctx->ref_cnt--;
+	ctl->priv_data = NULL;
 
 	return 0;
 }
 
-static void mdss_mdp_video_pp_intr_done(void *arg)
-{
-	struct mdss_mdp_video_ctx *ctx;
-
-	ctx = (struct mdss_mdp_video_ctx *) arg;
-	if (!ctx) {
-		pr_err("invalid ctx\n");
-		return;
-	}
-
-	pr_debug("intr mixer=%d\n", ctx->pp_num);
-
-	complete(&ctx->pp_comp);
-}
-
 static void mdss_mdp_video_vsync_intr_done(void *arg)
 {
-	struct mdss_mdp_video_ctx *ctx;
+	struct mdss_mdp_ctl *ctl = arg;
+	struct mdss_mdp_video_ctx *ctx = ctl->priv_data;
+	ktime_t vsync_time;
 
-	ctx = (struct mdss_mdp_video_ctx *) arg;
 	if (!ctx) {
 		pr_err("invalid ctx\n");
 		return;
 	}
 
-	pr_debug("intr ctl=%d\n", ctx->ctl_num);
+	vsync_time = ktime_get();
 
-	complete(&ctx->vsync_comp);
+	pr_debug("intr ctl=%d\n", ctl->num);
+
+	complete_all(&ctx->vsync_comp);
+	spin_lock(&ctx->vsync_lock);
+	if (ctx->vsync_handler)
+		ctx->vsync_handler(ctl, vsync_time);
+	spin_unlock(&ctx->vsync_lock);
 }
 
-static int mdss_mdp_video_prepare(struct mdss_mdp_ctl *ctl, void *arg)
+static int mdss_mdp_video_wait4comp(struct mdss_mdp_ctl *ctl, void *arg)
 {
 	struct mdss_mdp_video_ctx *ctx;
+	int rc;
 
 	ctx = (struct mdss_mdp_video_ctx *) ctl->priv_data;
 	if (!ctx) {
@@ -206,25 +303,35 @@
 		return -ENODEV;
 	}
 
-	if (ctx->timegen_en) {
-		u32 intr_type = MDSS_MDP_IRQ_PING_PONG_COMP;
+	WARN(!ctx->wait_pending, "waiting without commit! ctl=%d", ctl->num);
 
-		pr_debug("waiting for ping pong %d done\n", ctx->pp_num);
-		mdss_mdp_set_intr_callback(intr_type, ctx->pp_num,
-					   mdss_mdp_video_pp_intr_done, ctx);
-		mdss_mdp_irq_enable(intr_type, ctx->pp_num);
+	rc = wait_for_completion_interruptible_timeout(&ctx->vsync_comp,
+			VSYNC_TIMEOUT);
+	WARN(rc <= 0, "vsync timed out (%d) ctl=%d\n", rc, ctl->num);
 
-		wait_for_completion_interruptible(&ctx->pp_comp);
-		mdss_mdp_irq_disable(intr_type, ctx->pp_num);
+	if (ctx->wait_pending) {
+		ctx->wait_pending = 0;
+		video_vsync_irq_disable(ctl);
 	}
 
-	return 0;
+	return rc;
+}
+
+static void mdss_mdp_video_underrun_intr_done(void *arg)
+{
+	struct mdss_mdp_ctl *ctl = arg;
+	if (unlikely(!ctl))
+		return;
+
+	ctl->underrun_cnt++;
+	pr_warn("display underrun detected for ctl=%d count=%d\n", ctl->num,
+			ctl->underrun_cnt);
 }
 
 static int mdss_mdp_video_display(struct mdss_mdp_ctl *ctl, void *arg)
 {
 	struct mdss_mdp_video_ctx *ctx;
-	u32 intr_type = MDSS_MDP_IRQ_INTF_VSYNC;
+	int rc;
 
 	pr_debug("kickoff ctl=%d\n", ctl->num);
 
@@ -233,40 +340,52 @@
 		pr_err("invalid ctx\n");
 		return -ENODEV;
 	}
-	mdss_mdp_set_intr_callback(intr_type, ctl->intf_num,
-				   mdss_mdp_video_vsync_intr_done, ctx);
-	mdss_mdp_irq_enable(intr_type, ctl->intf_num);
+
+	if (!ctx->wait_pending) {
+		ctx->wait_pending++;
+		video_vsync_irq_enable(ctl);
+		INIT_COMPLETION(ctx->vsync_comp);
+	} else {
+		WARN(1, "commit without wait! ctl=%d", ctl->num);
+	}
 
 	if (!ctx->timegen_en) {
-		int off = MDSS_MDP_REG_INTF_OFFSET(ctl->intf_num);
+		rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_UNBLANK, NULL);
+		WARN(rc, "intf %d unblank error (%d)\n", ctl->intf_num, rc);
 
 		pr_debug("enabling timing gen for intf=%d\n", ctl->intf_num);
 
 		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
-		MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 1);
-		ctx->timegen_en = true;
-		wmb();
-	}
 
-	wait_for_completion_interruptible(&ctx->vsync_comp);
-	mdss_mdp_irq_disable(intr_type, ctl->intf_num);
+		mdss_mdp_irq_enable(MDSS_MDP_IRQ_INTF_UNDER_RUN, ctl->intf_num);
+		mdp_video_write(ctx, MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 1);
+		wmb();
+
+		rc = wait_for_completion_interruptible_timeout(&ctx->vsync_comp,
+				VSYNC_TIMEOUT);
+		WARN(rc <= 0, "timeout (%d) enabling timegen on ctl=%d\n",
+				rc, ctl->num);
+
+		ctx->timegen_en = true;
+		rc = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_PANEL_ON, NULL);
+		WARN(rc, "intf %d panel on error (%d)\n", ctl->intf_num, rc);
+	}
 
 	return 0;
 }
 
 int mdss_mdp_video_start(struct mdss_mdp_ctl *ctl)
 {
-	struct msm_fb_data_type *mfd;
+	struct mdss_data_type *mdata;
 	struct mdss_panel_info *pinfo;
 	struct mdss_mdp_video_ctx *ctx;
 	struct mdss_mdp_mixer *mixer;
 	struct intf_timing_params itp = {0};
-	struct fb_info *fbi;
+	u32 dst_bpp;
 	int i;
 
-	mfd = ctl->mfd;
-	fbi = mfd->fbi;
-	pinfo = &mfd->panel_info;
+	mdata = ctl->mdata;
+	pinfo = &ctl->panel_data->panel_info;
 	mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
 
 	if (!mixer) {
@@ -274,48 +393,66 @@
 		return -ENODEV;
 	}
 
+	i = ctl->intf_num - MDSS_MDP_INTF0;
+	if (i < mdata->nintf) {
+		ctx = ((struct mdss_mdp_video_ctx *) mdata->video_intf) + i;
+		if (ctx->ref_cnt) {
+			pr_err("Intf %d already in use\n", ctl->intf_num);
+			return -EBUSY;
+		}
+		pr_debug("video Intf #%d base=%p", ctx->intf_num, ctx->base);
+		ctx->ref_cnt++;
+	} else {
+		pr_err("Invalid intf number: %d\n", ctl->intf_num);
+		return -EINVAL;
+	}
+
 	pr_debug("start ctl=%u\n", ctl->num);
 
-	for (i = 0; i < MAX_SESSIONS; i++) {
-		ctx = &mdss_mdp_video_ctx_list[i];
-		if (ctx->ref_cnt == 0) {
-			ctx->ref_cnt++;
-			break;
-		}
-	}
-	if (i == MAX_SESSIONS) {
-		pr_err("too many sessions\n");
-		return -ENOMEM;
-	}
 	ctl->priv_data = ctx;
-	ctx->ctl_num = ctl->num;
-	ctx->pp_num = mixer->num;
-	init_completion(&ctx->pp_comp);
+	ctx->intf_type = ctl->intf_type;
 	init_completion(&ctx->vsync_comp);
+	spin_lock_init(&ctx->vsync_lock);
+	atomic_set(&ctx->vsync_ref, 0);
 
-	itp.width = pinfo->xres + pinfo->lcdc.xres_pad;
+	mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_INTF_VSYNC, ctl->intf_num,
+				   mdss_mdp_video_vsync_intr_done, ctl);
+	mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_INTF_UNDER_RUN, ctl->intf_num,
+				   mdss_mdp_video_underrun_intr_done, ctl);
+
+	dst_bpp = pinfo->fbc.enabled ? (pinfo->fbc.target_bpp) : (pinfo->bpp);
+
+	itp.width = mult_frac((pinfo->xres + pinfo->lcdc.xres_pad),
+				dst_bpp, pinfo->bpp);
 	itp.height = pinfo->yres + pinfo->lcdc.yres_pad;
 	itp.border_clr = pinfo->lcdc.border_clr;
 	itp.underflow_clr = pinfo->lcdc.underflow_clr;
 	itp.hsync_skew = pinfo->lcdc.hsync_skew;
 
-	itp.xres =  pinfo->xres;
+	itp.xres =  mult_frac(pinfo->xres, dst_bpp, pinfo->bpp);
 	itp.yres = pinfo->yres;
-	itp.h_back_porch =  pinfo->lcdc.h_back_porch;
-	itp.h_front_porch =  pinfo->lcdc.h_front_porch;
-	itp.v_back_porch =  pinfo->lcdc.v_back_porch;
-	itp.v_front_porch = pinfo->lcdc.v_front_porch;
-	itp.hsync_pulse_width = pinfo->lcdc.h_pulse_width;
+	itp.h_back_porch =  mult_frac(pinfo->lcdc.h_back_porch, dst_bpp,
+			pinfo->bpp);
+	itp.h_front_porch = mult_frac(pinfo->lcdc.h_front_porch, dst_bpp,
+			pinfo->bpp);
+	itp.v_back_porch =  mult_frac(pinfo->lcdc.v_back_porch, dst_bpp,
+			pinfo->bpp);
+	itp.v_front_porch = mult_frac(pinfo->lcdc.v_front_porch, dst_bpp,
+			pinfo->bpp);
+	itp.hsync_pulse_width = mult_frac(pinfo->lcdc.h_pulse_width, dst_bpp,
+			pinfo->bpp);
 	itp.vsync_pulse_width = pinfo->lcdc.v_pulse_width;
 
-	if (mdss_mdp_video_timegen_setup(ctl, &itp)) {
+	if (mdss_mdp_video_timegen_setup(ctx, &itp)) {
 		pr_err("unable to get timing parameters\n");
 		return -EINVAL;
 	}
+	mdp_video_write(ctx, MDSS_MDP_REG_INTF_PANEL_FORMAT, ctl->dst_format);
 
 	ctl->stop_fnc = mdss_mdp_video_stop;
-	ctl->prepare_fnc = mdss_mdp_video_prepare;
 	ctl->display_fnc = mdss_mdp_video_display;
+	ctl->wait_fnc = mdss_mdp_video_wait4comp;
+	ctl->set_vsync_handler = mdss_mdp_video_set_vsync_handler;
 
 	return 0;
 }
diff --git a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
index cd6bd14..7fbb031 100644
--- a/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
+++ b/drivers/video/msm/mdss/mdss_mdp_intf_writeback.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,8 +17,6 @@
 #include "mdss_mdp.h"
 #include "mdss_mdp_rotator.h"
 
-#define ROT_BLK_SIZE	128
-
 enum mdss_mdp_writeback_type {
 	MDSS_MDP_WRITEBACK_TYPE_ROTATOR,
 	MDSS_MDP_WRITEBACK_TYPE_LINE,
@@ -27,6 +25,7 @@
 
 struct mdss_mdp_writeback_ctx {
 	u32 wb_num;
+	char __iomem *base;
 	u8 ref_cnt;
 	u8 type;
 
@@ -38,7 +37,7 @@
 	u16 width;
 	u16 height;
 	u8 rot90;
-
+	u32 bwc_mode;
 	int initialized;
 
 	struct mdss_mdp_plane_sizes dst_planes;
@@ -75,25 +74,33 @@
 	},
 };
 
+static inline void mdp_wb_write(struct mdss_mdp_writeback_ctx *ctx,
+				u32 reg, u32 val)
+{
+	writel_relaxed(val, ctx->base + reg);
+}
+
 static int mdss_mdp_writeback_addr_setup(struct mdss_mdp_writeback_ctx *ctx,
 					 struct mdss_mdp_data *data)
 {
-	int off, ret;
+	int ret;
 
 	if (!data)
 		return -EINVAL;
 
 	pr_debug("wb_num=%d addr=0x%x\n", ctx->wb_num, data->p[0].addr);
 
+	if (ctx->bwc_mode)
+		data->bwc_enabled = 1;
+
 	ret = mdss_mdp_data_check(data, &ctx->dst_planes);
 	if (ret)
 		return ret;
 
-	off = MDSS_MDP_REG_WB_OFFSET(ctx->wb_num);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST0_ADDR, data->p[0].addr);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST1_ADDR, data->p[1].addr);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST2_ADDR, data->p[2].addr);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST3_ADDR, data->p[3].addr);
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST0_ADDR, data->p[0].addr);
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST1_ADDR, data->p[1].addr);
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST2_ADDR, data->p[2].addr);
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST3_ADDR, data->p[3].addr);
 
 	return 0;
 }
@@ -102,13 +109,14 @@
 {
 	struct mdss_mdp_format_params *fmt;
 	u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
-	int off;
 	u32 opmode = ctx->opmode;
+	struct mdss_data_type *mdata;
 
 	pr_debug("wb_num=%d format=%d\n", ctx->wb_num, ctx->format);
 
 	mdss_mdp_get_plane_sizes(ctx->format, ctx->width, ctx->height,
-				 &ctx->dst_planes);
+				 &ctx->dst_planes,
+				 ctx->opmode & MDSS_MDP_OP_BWC_EN);
 
 	fmt = mdss_mdp_get_format_params(ctx->format);
 	if (!fmt) {
@@ -145,20 +153,38 @@
 
 	dst_format = (chroma_samp << 23) |
 		     (fmt->fetch_planes << 19) |
-		     (fmt->unpack_align_msb << 18) |
-		     (fmt->unpack_tight << 17) |
-		     (fmt->unpack_count << 12) |
-		     (fmt->bpp << 9) |
-		     (fmt->alpha_enable << 8) |
-		     (fmt->a_bit << 6) |
-		     (fmt->r_bit << 4) |
-		     (fmt->b_bit << 2) |
-		     (fmt->g_bit << 0);
+		     (fmt->bits[C3_ALPHA] << 6) |
+		     (fmt->bits[C2_R_Cr] << 4) |
+		     (fmt->bits[C1_B_Cb] << 2) |
+		     (fmt->bits[C0_G_Y] << 0);
 
-	pattern = (fmt->element3 << 24) |
-		  (fmt->element2 << 15) |
-		  (fmt->element1 << 8) |
-		  (fmt->element0 << 0);
+	if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
+		dst_format |= BIT(8); /* DSTC3_EN */
+		if (!fmt->alpha_enable)
+			dst_format |= BIT(14); /* DST_ALPHA_X */
+	}
+
+	if (fmt->fetch_planes != MDSS_MDP_PLANE_PLANAR) {
+		mdata = mdss_mdp_get_mdata();
+		if (mdata && mdata->mdp_rev >= MDSS_MDP_HW_REV_102) {
+			pattern = (fmt->element[3] << 24) |
+				  (fmt->element[2] << 16) |
+				  (fmt->element[1] << 8)  |
+				  (fmt->element[0] << 0);
+		} else {
+			pattern = (fmt->element[3] << 24) |
+				  (fmt->element[2] << 15) |
+				  (fmt->element[1] << 8)  |
+				  (fmt->element[0] << 0);
+		}
+
+		dst_format |= (fmt->unpack_align_msb << 18) |
+			      (fmt->unpack_tight << 17) |
+			      ((fmt->unpack_count - 1) << 12) |
+			      ((fmt->bpp - 1) << 9);
+	} else {
+		pattern = 0;
+	}
 
 	ystride0 = (ctx->dst_planes.ystride[0]) |
 		   (ctx->dst_planes.ystride[1] << 16);
@@ -166,13 +192,12 @@
 		   (ctx->dst_planes.ystride[3] << 16);
 	outsize = (ctx->height << 16) | ctx->width;
 
-	off = MDSS_MDP_REG_WB_OFFSET(ctx->wb_num);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_FORMAT, dst_format);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_OP_MODE, opmode);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_PACK_PATTERN, pattern);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_YSTRIDE0, ystride0);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_DST_YSTRIDE1, ystride1);
-	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_WB_OUT_SIZE, outsize);
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_FORMAT, dst_format);
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_OP_MODE, opmode);
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_PACK_PATTERN, pattern);
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_YSTRIDE0, ystride0);
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_DST_YSTRIDE1, ystride1);
+	mdp_wb_write(ctx, MDSS_MDP_REG_WB_OUT_SIZE, outsize);
 
 	return 0;
 }
@@ -228,26 +253,30 @@
 	pr_debug("rot setup wb_num=%d\n", ctx->wb_num);
 
 	ctx->opmode = BIT(6); /* ROT EN */
-	if (ROT_BLK_SIZE == 128)
+	if (ctl->mdata->rot_block_size == 128)
 		ctx->opmode |= BIT(4); /* block size 128 */
 
-	ctx->opmode |= rot->bwc_mode;
+	ctx->bwc_mode = rot->bwc_mode;
+	ctx->opmode |= ctx->bwc_mode;
 
 	ctx->width = rot->src_rect.w;
 	ctx->height = rot->src_rect.h;
 
 	ctx->format = rot->format;
 
-	ctx->rot90 = !!(rot->rotations & MDP_ROT_90);
+	ctx->rot90 = !!(rot->flags & MDP_ROT_90);
+
+	if (ctx->bwc_mode || ctx->rot90)
+		ctx->format = mdss_mdp_get_rotator_dst_format(rot->format);
+	else
+		ctx->format = rot->format;
+
 	if (ctx->rot90) {
 		ctx->opmode |= BIT(5); /* ROT 90 */
 		swap(ctx->width, ctx->height);
 	}
 
-	if (mdss_mdp_writeback_format_setup(ctx))
-		return -EINVAL;
-
-	return 0;
+	return mdss_mdp_writeback_format_setup(ctx);
 }
 
 static int mdss_mdp_writeback_stop(struct mdss_mdp_ctl *ctl)
@@ -258,6 +287,9 @@
 
 	ctx = (struct mdss_mdp_writeback_ctx *) ctl->priv_data;
 	if (ctx) {
+		mdss_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
+				   NULL, NULL);
+
 		ctl->priv_data = NULL;
 		ctx->ref_cnt--;
 	}
@@ -311,8 +343,6 @@
 	flush_bits = BIT(16); /* WB */
 	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, flush_bits);
 
-	mdss_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
-				   mdss_mdp_writeback_intr_done, ctx);
 	mdss_mdp_irq_enable(ctx->intr_type, ctx->intf_num);
 
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
@@ -344,8 +374,12 @@
 	}
 	ctl->priv_data = ctx;
 	ctx->wb_num = ctl->num;	/* wb num should match ctl num */
+	ctx->base = ctl->wb_base;
 	ctx->initialized = false;
 
+	mdss_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
+				   mdss_mdp_writeback_intr_done, ctx);
+
 	if (ctx->type == MDSS_MDP_WRITEBACK_TYPE_ROTATOR)
 		ctl->prepare_fnc = mdss_mdp_writeback_prepare_rot;
 	else /* wfd or line mode */
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 9950790..dae3e05 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,32 +13,45 @@
 
 #define pr_fmt(fmt)	"%s: " fmt, __func__
 
+#include <linux/dma-mapping.h>
 #include <linux/errno.h>
 #include <linux/kernel.h>
 #include <linux/major.h>
 #include <linux/module.h>
+#include <linux/pm_runtime.h>
 #include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/msm_mdp.h>
 
+#include <mach/iommu_domains.h>
+
+#include "mdss.h"
 #include "mdss_fb.h"
 #include "mdss_mdp.h"
 #include "mdss_mdp_rotator.h"
 
+#define VSYNC_PERIOD 16
+#define BORDERFILL_NDX	0x0BF000BF
 #define CHECK_BOUNDS(offset, size, max_size) \
 	(((size) > (max_size)) || ((offset) > ((max_size) - (size))))
 
+static atomic_t ov_active_panels = ATOMIC_INIT(0);
+static int mdss_mdp_overlay_free_fb_pipe(struct msm_fb_data_type *mfd);
+
 static int mdss_mdp_overlay_get(struct msm_fb_data_type *mfd,
 				struct mdp_overlay *req)
 {
 	struct mdss_mdp_pipe *pipe;
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
 
-	pipe = mdss_mdp_pipe_get_locked(req->id);
-	if (pipe == NULL) {
+	pipe = mdss_mdp_pipe_get(mdata, req->id);
+	if (IS_ERR_OR_NULL(pipe)) {
 		pr_err("invalid pipe ndx=%x\n", req->id);
-		return -ENODEV;
+		return pipe ? PTR_ERR(pipe) : -ENODEV;
 	}
 
 	*req = pipe->req_data;
-	mdss_mdp_pipe_unlock(pipe);
+	mdss_mdp_pipe_unmap(pipe);
 
 	return 0;
 }
@@ -48,11 +61,20 @@
 				      struct mdss_mdp_format_params *fmt)
 {
 	u32 xres, yres;
-	u32 dst_w, dst_h;
+	u32 min_src_size, min_dst_size;
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
 
 	xres = mfd->fbi->var.xres;
 	yres = mfd->fbi->var.yres;
 
+	if (mdata->mdp_rev >= MDSS_MDP_HW_REV_102) {
+		min_src_size = fmt->is_yuv ? 2 : 1;
+		min_dst_size = 1;
+	} else {
+		min_src_size = fmt->is_yuv ? 10 : 5;
+		min_dst_size = 2;
+	}
+
 	if (req->z_order >= MDSS_MDP_MAX_STAGE) {
 		pr_err("zorder %d out of range\n", req->z_order);
 		return -ERANGE;
@@ -60,33 +82,42 @@
 
 	if (req->src.width > MAX_IMG_WIDTH ||
 	    req->src.height > MAX_IMG_HEIGHT ||
-	    req->src_rect.w == 0 || req->src_rect.h == 0 ||
-	    req->dst_rect.w < MIN_DST_W || req->dst_rect.h < MIN_DST_H ||
-	    req->dst_rect.w > MAX_DST_W || req->dst_rect.h > MAX_DST_H ||
+	    req->src_rect.w < min_src_size || req->src_rect.h < min_src_size ||
 	    CHECK_BOUNDS(req->src_rect.x, req->src_rect.w, req->src.width) ||
-	    CHECK_BOUNDS(req->src_rect.y, req->src_rect.h, req->src.height) ||
-	    CHECK_BOUNDS(req->dst_rect.x, req->dst_rect.w, xres) ||
-	    CHECK_BOUNDS(req->dst_rect.y, req->dst_rect.h, yres)) {
-		pr_err("invalid image img_w=%d img_h=%d\n",
-				req->src.width, req->src.height);
-
-		pr_err("\tsrc_rect=%d,%d,%d,%d dst_rect=%d,%d,%d,%d\n",
+	    CHECK_BOUNDS(req->src_rect.y, req->src_rect.h, req->src.height)) {
+		pr_err("invalid source image img wh=%dx%d rect=%d,%d,%d,%d\n",
+		       req->src.width, req->src.height,
 		       req->src_rect.x, req->src_rect.y,
-		       req->src_rect.w, req->src_rect.h,
-		       req->dst_rect.x, req->dst_rect.y,
-		       req->dst_rect.w, req->dst_rect.h);
-		return -EINVAL;
+		       req->src_rect.w, req->src_rect.h);
+		return -EOVERFLOW;
 	}
 
-	if (req->flags & MDP_ROT_90) {
-		dst_h = req->dst_rect.w;
-		dst_w = req->dst_rect.h;
-	} else {
-		dst_w = req->dst_rect.w;
-		dst_h = req->dst_rect.h;
+	if (req->dst_rect.w < min_dst_size || req->dst_rect.h < min_dst_size ||
+	    req->dst_rect.w > MAX_DST_W || req->dst_rect.h > MAX_DST_H) {
+		pr_err("invalid destination resolution (%dx%d)",
+		       req->dst_rect.w, req->dst_rect.h);
+		return -EOVERFLOW;
 	}
 
 	if (!(req->flags & MDSS_MDP_ROT_ONLY)) {
+		u32 dst_w, dst_h;
+
+		if ((CHECK_BOUNDS(req->dst_rect.x, req->dst_rect.w, xres) ||
+		     CHECK_BOUNDS(req->dst_rect.y, req->dst_rect.h, yres))) {
+			pr_err("invalid destination rect=%d,%d,%d,%d\n",
+			       req->dst_rect.x, req->dst_rect.y,
+			       req->dst_rect.w, req->dst_rect.h);
+			return -EOVERFLOW;
+		}
+
+		if (req->flags & MDP_ROT_90) {
+			dst_h = req->dst_rect.w;
+			dst_w = req->dst_rect.h;
+		} else {
+			dst_w = req->dst_rect.w;
+			dst_h = req->dst_rect.h;
+		}
+
 		if ((req->src_rect.w * MAX_UPSCALE_RATIO) < dst_w) {
 			pr_err("too much upscaling Width %d->%d\n",
 			       req->src_rect.w, req->dst_rect.w);
@@ -110,6 +141,22 @@
 			       req->src_rect.h, req->dst_rect.h);
 			return -EINVAL;
 		}
+
+		if ((fmt->chroma_sample == MDSS_MDP_CHROMA_420 ||
+		     fmt->chroma_sample == MDSS_MDP_CHROMA_H2V1) &&
+		    ((req->src_rect.w * (MAX_UPSCALE_RATIO / 2)) < dst_w)) {
+			pr_err("too much YUV upscaling Width %d->%d\n",
+			       req->src_rect.w, req->dst_rect.w);
+			return -EINVAL;
+		}
+
+		if ((fmt->chroma_sample == MDSS_MDP_CHROMA_420 ||
+		     fmt->chroma_sample == MDSS_MDP_CHROMA_H1V2) &&
+		    (req->src_rect.h * (MAX_UPSCALE_RATIO / 2)) < dst_h) {
+			pr_err("too much YUV upscaling Height %d->%d\n",
+			       req->src_rect.h, req->dst_rect.h);
+			return -EINVAL;
+		}
 	}
 
 	if (fmt->is_yuv) {
@@ -118,26 +165,6 @@
 			pr_err("invalid odd src resolution or coordinates\n");
 			return -EINVAL;
 		}
-		if ((req->dst_rect.w & 0x1) || (req->dst_rect.h & 0x1)) {
-			pr_err("invalid odd dst resolution\n");
-			return -EINVAL;
-		}
-
-		if (((req->src_rect.w * (MAX_UPSCALE_RATIO / 2)) < dst_w) &&
-		    (fmt->chroma_sample == MDSS_MDP_CHROMA_420 ||
-		     fmt->chroma_sample == MDSS_MDP_CHROMA_H2V1)) {
-			pr_err("too much YUV upscaling Width %d->%d\n",
-			       req->src_rect.w, req->dst_rect.w);
-			return -EINVAL;
-		}
-
-		if (((req->src_rect.h * (MAX_UPSCALE_RATIO / 2)) < dst_h) &&
-		    (fmt->chroma_sample == MDSS_MDP_CHROMA_420 ||
-		     fmt->chroma_sample == MDSS_MDP_CHROMA_H1V2)) {
-			pr_err("too much YUV upscaling Height %d->%d\n",
-			       req->src_rect.h, req->dst_rect.h);
-			return -EINVAL;
-		}
 	}
 
 	return 0;
@@ -146,11 +173,12 @@
 static int mdss_mdp_overlay_rotator_setup(struct msm_fb_data_type *mfd,
 					  struct mdp_overlay *req)
 {
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
 	struct mdss_mdp_rotator_session *rot;
 	struct mdss_mdp_format_params *fmt;
 	int ret = 0;
 
-	pr_debug("rot ctl=%u req id=%x\n", mfd->ctl->num, req->id);
+	pr_debug("rot ctl=%u req id=%x\n", mdp5_data->ctl->num, req->id);
 
 	fmt = mdss_mdp_get_format_params(req->src.format);
 	if (!fmt) {
@@ -181,8 +209,11 @@
 		return -EINVAL;
 	}
 
-	rot->rotations = req->flags & (MDP_ROT_90 | MDP_FLIP_LR | MDP_FLIP_UD);
+	/* keep only flags of interest to rotator */
+	rot->flags = req->flags & (MDP_ROT_90 | MDP_FLIP_LR | MDP_FLIP_UD |
+				   MDP_SECURE_OVERLAY_SESSION);
 
+	rot->bwc_mode = (req->flags & MDP_BWC_EN) ? 1 : 0;
 	rot->format = fmt->format;
 	rot->img_width = req->src.width;
 	rot->img_height = req->src.height;
@@ -191,6 +222,11 @@
 	rot->src_rect.w = req->src_rect.w;
 	rot->src_rect.h = req->src_rect.h;
 
+	if (req->flags & MDP_DEINTERLACE) {
+		rot->flags |= MDP_DEINTERLACE;
+		rot->src_rect.h /= 2;
+	}
+
 	rot->params_changed++;
 
 	req->id = rot->session_id;
@@ -205,10 +241,11 @@
 	struct mdss_mdp_format_params *fmt;
 	struct mdss_mdp_pipe *pipe;
 	struct mdss_mdp_mixer *mixer = NULL;
-	u32 pipe_type, mixer_mux;
+	u32 pipe_type, mixer_mux, len, src_format;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
 	int ret;
 
-	if (mfd == NULL || mfd->ctl == NULL)
+	if (mdp5_data->ctl == NULL)
 		return -ENODEV;
 
 	if (req->flags & MDSS_MDP_RIGHT_MIXER)
@@ -216,7 +253,7 @@
 	else
 		mixer_mux = MDSS_MDP_MIXER_MUX_LEFT;
 
-	pr_debug("pipe ctl=%u req id=%x mux=%d\n", mfd->ctl->num, req->id,
+	pr_debug("pipe ctl=%u req id=%x mux=%d\n", mdp5_data->ctl->num, req->id,
 			mixer_mux);
 
 	if (req->flags & MDP_ROT_90) {
@@ -224,9 +261,13 @@
 		return -ENOTSUPP;
 	}
 
-	fmt = mdss_mdp_get_format_params(req->src.format);
+	src_format = req->src.format;
+	if (req->flags & (MDP_SOURCE_ROTATED_90 | MDP_BWC_EN))
+		src_format = mdss_mdp_get_rotator_dst_format(src_format);
+
+	fmt = mdss_mdp_get_format_params(src_format);
 	if (!fmt) {
-		pr_err("invalid pipe format %d\n", req->src.format);
+		pr_err("invalid pipe format %d\n", src_format);
 		return -EINVAL;
 	}
 
@@ -234,31 +275,34 @@
 	if (ret)
 		return ret;
 
-	pipe = mdss_mdp_mixer_stage_pipe(mfd->ctl, mixer_mux, req->z_order);
+	pipe = mdss_mdp_mixer_stage_pipe(mdp5_data->ctl, mixer_mux,
+					req->z_order);
 	if (pipe && pipe->ndx != req->id) {
-		pr_err("stage %d taken by pnum=%d\n", req->z_order, pipe->num);
-		return -EBUSY;
+		pr_debug("replacing pnum=%d at stage=%d mux=%d\n",
+				pipe->num, req->z_order, mixer_mux);
+		pipe->params_changed = true;
 	}
 
+	mixer = mdss_mdp_mixer_get(mdp5_data->ctl, mixer_mux);
+	if (!mixer) {
+		pr_err("unable to get mixer\n");
+		return -ENODEV;
+	}
 
 	if (req->id == MSMFB_NEW_REQUEST) {
-		mixer = mdss_mdp_mixer_get(mfd->ctl, mixer_mux);
-		if (!mixer) {
-			pr_err("unable to get mixer\n");
-			return -ENODEV;
-		}
-
-		if (fmt->is_yuv || (req->flags & MDP_OV_PIPE_SHARE))
+		if (req->flags & MDP_OV_PIPE_FORCE_DMA)
+			pipe_type = MDSS_MDP_PIPE_TYPE_DMA;
+		else if (fmt->is_yuv || (req->flags & MDP_OV_PIPE_SHARE))
 			pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
 		else
 			pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
 
-		pipe = mdss_mdp_pipe_alloc_locked(pipe_type);
+		pipe = mdss_mdp_pipe_alloc(mixer, pipe_type);
 
 		/* VIG pipes can also support RGB format */
 		if (!pipe && pipe_type == MDSS_MDP_PIPE_TYPE_RGB) {
 			pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
-			pipe = mdss_mdp_pipe_alloc_locked(pipe_type);
+			pipe = mdss_mdp_pipe_alloc(mixer, pipe_type);
 		}
 
 		if (pipe == NULL) {
@@ -266,18 +310,44 @@
 			return -ENOMEM;
 		}
 
+		ret = mdss_mdp_pipe_map(pipe);
+		if (ret) {
+			pr_err("unable to map pipe=%d\n", pipe->num);
+			return ret;
+		}
+
+		mutex_lock(&mfd->lock);
+		list_add(&pipe->used_list, &mdp5_data->pipes_used);
+		mutex_unlock(&mfd->lock);
 		pipe->mixer = mixer;
 		pipe->mfd = mfd;
+		pipe->play_cnt = 0;
 	} else {
-		pipe = mdss_mdp_pipe_get_locked(req->id);
-		if (pipe == NULL) {
+		pipe = mdss_mdp_pipe_get(mdp5_data->mdata, req->id);
+		if (IS_ERR_OR_NULL(pipe)) {
 			pr_err("invalid pipe ndx=%x\n", req->id);
-			return -ENODEV;
+			return pipe ? PTR_ERR(pipe) : -ENODEV;
+		}
+
+		if (pipe->mixer != mixer) {
+			if (!mixer->ctl || (mixer->ctl->mfd != mfd)) {
+				pr_err("Can't switch mixer %d->%d pnum %d!\n",
+						pipe->mixer->num, mixer->num,
+						pipe->num);
+				mdss_mdp_pipe_unmap(pipe);
+				return -EINVAL;
+			}
+			pr_debug("switching pipe mixer %d->%d pnum %d\n",
+					pipe->mixer->num, mixer->num,
+					pipe->num);
+			mdss_mdp_mixer_pipe_unstage(pipe);
+			pipe->mixer = mixer;
 		}
 	}
 
 	pipe->flags = req->flags;
-
+	pipe->bwc_mode = pipe->mixer->rotator_mode ?
+		0 : (req->flags & MDP_BWC_EN ? 1 : 0) ;
 	pipe->img_width = req->src.width & 0x3fff;
 	pipe->img_height = req->src.height & 0x3fff;
 	pipe->src.x = req->src_rect.x;
@@ -295,16 +365,48 @@
 	pipe->is_fg = req->is_fg;
 	pipe->alpha = req->alpha;
 	pipe->transp = req->transp_mask;
+	pipe->overfetch_disable = fmt->is_yuv;
 
 	pipe->req_data = *req;
 
+	if (pipe->flags & MDP_OVERLAY_PP_CFG_EN) {
+		memcpy(&pipe->pp_cfg, &req->overlay_pp_cfg,
+					sizeof(struct mdp_overlay_pp_params));
+		len = pipe->pp_cfg.igc_cfg.len;
+		if ((pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_IGC_CFG) &&
+						(len == IGC_LUT_ENTRIES)) {
+			ret = copy_from_user(pipe->pp_res.igc_c0_c1,
+					pipe->pp_cfg.igc_cfg.c0_c1_data,
+					sizeof(uint32_t) * len);
+			if (ret)
+				return -ENOMEM;
+			ret = copy_from_user(pipe->pp_res.igc_c2,
+					pipe->pp_cfg.igc_cfg.c2_data,
+					sizeof(uint32_t) * len);
+			if (ret)
+				return -ENOMEM;
+			pipe->pp_cfg.igc_cfg.c0_c1_data =
+							pipe->pp_res.igc_c0_c1;
+			pipe->pp_cfg.igc_cfg.c2_data = pipe->pp_res.igc_c2;
+		}
+	}
+
+	if (pipe->flags & MDP_DEINTERLACE) {
+		if (pipe->flags & MDP_SOURCE_ROTATED_90) {
+			pipe->src.w /= 2;
+			pipe->img_width /= 2;
+		} else {
+			pipe->src.h /= 2;
+		}
+	}
+
 	pipe->params_changed++;
 
 	req->id = pipe->ndx;
 
 	*ppipe = pipe;
 
-	mdss_mdp_pipe_unlock(pipe);
+	mdss_mdp_pipe_unmap(pipe);
 
 	return ret;
 }
@@ -312,10 +414,22 @@
 static int mdss_mdp_overlay_set(struct msm_fb_data_type *mfd,
 				struct mdp_overlay *req)
 {
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
 	int ret;
 
+	ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
+	if (ret)
+		return ret;
+
+	if (!mfd->panel_power_on) {
+		mutex_unlock(&mdp5_data->ov_lock);
+		return -EPERM;
+	}
+
 	if (req->flags & MDSS_MDP_ROT_ONLY) {
 		ret = mdss_mdp_overlay_rotator_setup(mfd, req);
+	} else if (req->src.format == MDP_RGB_BORDERFILL) {
+		req->id = BORDERFILL_NDX;
 	} else {
 		struct mdss_mdp_pipe *pipe;
 
@@ -327,117 +441,446 @@
 		req->z_order -= MDSS_MDP_STAGE_0;
 	}
 
+	mutex_unlock(&mdp5_data->ov_lock);
+
 	return ret;
 }
 
-static int mdss_mdp_overlay_unset(struct msm_fb_data_type *mfd, int ndx)
+static inline int mdss_mdp_overlay_get_buf(struct msm_fb_data_type *mfd,
+					   struct mdss_mdp_data *data,
+					   struct msmfb_data *planes,
+					   int num_planes,
+					   u32 flags)
 {
+	int i, rc = 0;
+
+	if ((num_planes <= 0) || (num_planes > MAX_PLANES))
+		return -EINVAL;
+
+	memset(data, 0, sizeof(*data));
+	for (i = 0; i < num_planes; i++) {
+		data->p[i].flags = flags;
+		rc = mdss_mdp_get_img(&planes[i], &data->p[i]);
+		if (rc) {
+			pr_err("failed to map buf p=%d flags=%x\n", i, flags);
+			while (i > 0) {
+				i--;
+				mdss_mdp_put_img(&data->p[i]);
+			}
+			break;
+		}
+	}
+
+	data->num_planes = i;
+
+	return rc;
+}
+
+static inline int mdss_mdp_overlay_free_buf(struct mdss_mdp_data *data)
+{
+	int i;
+	for (i = 0; i < data->num_planes && data->p[i].len; i++)
+		mdss_mdp_put_img(&data->p[i]);
+
+	data->num_planes = 0;
+
+	return 0;
+}
+
+static int mdss_mdp_overlay_cleanup(struct msm_fb_data_type *mfd)
+{
+	struct mdss_mdp_pipe *pipe, *tmp;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	LIST_HEAD(destroy_pipes);
+
+	mutex_lock(&mfd->lock);
+	list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_cleanup,
+				cleanup_list) {
+		list_move(&pipe->cleanup_list, &destroy_pipes);
+		mdss_mdp_overlay_free_buf(&pipe->back_buf);
+		mdss_mdp_overlay_free_buf(&pipe->front_buf);
+	}
+
+	list_for_each_entry(pipe, &mdp5_data->pipes_used, used_list) {
+		if (pipe->back_buf.num_planes) {
+			/* make back buffer active */
+			mdss_mdp_overlay_free_buf(&pipe->front_buf);
+			swap(pipe->back_buf, pipe->front_buf);
+		}
+	}
+	mutex_unlock(&mfd->lock);
+	list_for_each_entry_safe(pipe, tmp, &destroy_pipes, cleanup_list)
+		mdss_mdp_pipe_destroy(pipe);
+
+	return 0;
+}
+
+int mdss_mdp_copy_splash_screen(struct mdss_panel_data *pdata)
+{
+	void *virt = NULL;
+	unsigned long bl_fb_addr = 0;
+	unsigned long *bl_fb_addr_va;
+	unsigned long  pipe_addr, pipe_src_size;
+	u32 height, width, rgb_size, bpp;
+	size_t size;
+	static struct ion_handle *ihdl;
+	struct ion_client *iclient = mdss_get_ionclient();
+	static ion_phys_addr_t phys;
+
+	pipe_addr = MDSS_MDP_REG_SSPP_OFFSET(3) +
+		MDSS_MDP_REG_SSPP_SRC0_ADDR;
+	pipe_src_size =
+		MDSS_MDP_REG_SSPP_OFFSET(3) + MDSS_MDP_REG_SSPP_SRC_SIZE;
+
+	bpp        = 3;
+	rgb_size   = MDSS_MDP_REG_READ(pipe_src_size);
+	bl_fb_addr = MDSS_MDP_REG_READ(pipe_addr);
+
+	height = (rgb_size >> 16) & 0xffff;
+	width  = rgb_size & 0xffff;
+	size = PAGE_ALIGN(height * width * bpp);
+	pr_debug("%s:%d splash_height=%d splash_width=%d Buffer size=%d\n",
+			__func__, __LINE__, height, width, size);
+
+	ihdl = ion_alloc(iclient, size, SZ_1M,
+			ION_HEAP(ION_QSECOM_HEAP_ID), 0);
+	if (IS_ERR_OR_NULL(ihdl)) {
+		pr_err("unable to alloc fbmem from ion (%p)\n", ihdl);
+		return -ENOMEM;
+	}
+
+	pdata->panel_info.splash_ihdl = ihdl;
+
+	virt = ion_map_kernel(iclient, ihdl);
+	ion_phys(iclient, ihdl, &phys, &size);
+
+	pr_debug("%s %d Allocating %u bytes at 0x%lx (%pa phys)\n",
+			__func__, __LINE__, size,
+			(unsigned long int)virt, &phys);
+
+	bl_fb_addr_va = (unsigned long *)ioremap(bl_fb_addr, size);
+
+	memcpy(virt, bl_fb_addr_va, size);
+
+	MDSS_MDP_REG_WRITE(pipe_addr, phys);
+	MDSS_MDP_REG_WRITE(MDSS_MDP_REG_CTL_FLUSH + MDSS_MDP_REG_CTL_OFFSET(0),
+			0x48);
+
+	return 0;
+
+}
+
+int mdss_mdp_reconfigure_splash_done(struct mdss_mdp_ctl *ctl)
+{
+	struct ion_client *iclient = mdss_get_ionclient();
+	struct mdss_panel_data *pdata;
+	int ret = 0, off;
+	int mdss_mdp_rev = MDSS_MDP_REG_READ(MDSS_MDP_REG_HW_VERSION);
+	int mdss_v2_intf_off = 0;
+
+	off = 0;
+
+	pdata = ctl->panel_data;
+
+	pdata->panel_info.cont_splash_enabled = 0;
+
+	ion_free(iclient, pdata->panel_info.splash_ihdl);
+
+	mdss_mdp_ctl_write(ctl, 0, MDSS_MDP_LM_BORDER_COLOR);
+	off = MDSS_MDP_REG_INTF_OFFSET(ctl->intf_num);
+
+	if (mdss_mdp_rev == MDSS_MDP_HW_REV_102)
+		mdss_v2_intf_off =  0xEC00;
+
+	/* wait for 1 VSYNC for the pipe to be unstaged */
+	msleep(20);
+	MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_INTF_TIMING_ENGINE_EN -
+			mdss_v2_intf_off, 0);
+	ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CONT_SPLASH_FINISH,
+			NULL);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+	mdss_mdp_footswitch_ctrl_splash(0);
+	return ret;
+}
+
+static int mdss_mdp_overlay_start(struct msm_fb_data_type *mfd)
+{
+	int rc;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+	if (mdp5_data->ctl->power_on)
+		return 0;
+
+	pr_debug("starting fb%d overlay\n", mfd->index);
+
+	rc = pm_runtime_get_sync(&mfd->pdev->dev);
+	if (IS_ERR_VALUE(rc)) {
+		pr_err("unable to resume with pm_runtime_get_sync rc=%d\n", rc);
+		return rc;
+	}
+
+	if (mfd->panel_info->cont_splash_enabled)
+		mdss_mdp_reconfigure_splash_done(mdp5_data->ctl);
+
+	if (!is_mdss_iommu_attached()) {
+		mdss_iommu_attach(mdss_res);
+		mdss_hw_init(mdss_res);
+	}
+
+	rc = mdss_mdp_ctl_start(mdp5_data->ctl);
+	if (rc == 0) {
+		atomic_inc(&ov_active_panels);
+	} else {
+		pr_err("overlay start failed.\n");
+		mdss_mdp_ctl_destroy(mdp5_data->ctl);
+		mdp5_data->ctl = NULL;
+
+		pm_runtime_put(&mfd->pdev->dev);
+	}
+
+	return rc;
+}
+
+int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
 	struct mdss_mdp_pipe *pipe;
-	struct mdss_mdp_pipe *cleanup_pipes[MDSS_MDP_MAX_SSPP];
-	int i, ret = 0, clean_cnt = 0;
-	u32 pipe_ndx, unset_ndx = 0;
+	int ret;
 
-	if (!mfd || !mfd->ctl)
-		return -ENODEV;
-
-	pr_debug("unset ndx=%x\n", ndx);
-
-	if (ndx & MDSS_MDP_ROT_SESSION_MASK) {
-		struct mdss_mdp_rotator_session *rot;
-		rot = mdss_mdp_rotator_session_get(ndx);
-		if (rot) {
-			mdss_mdp_rotator_finish(rot);
+	mutex_lock(&mdp5_data->ov_lock);
+	mutex_lock(&mfd->lock);
+	list_for_each_entry(pipe, &mdp5_data->pipes_used, used_list) {
+		struct mdss_mdp_data *buf;
+		if (pipe->back_buf.num_planes) {
+			buf = &pipe->back_buf;
+		} else if (!pipe->params_changed) {
+			continue;
+		} else if (pipe->front_buf.num_planes) {
+			buf = &pipe->front_buf;
 		} else {
-			pr_warn("unknown session id=%x\n", ndx);
-			ret = -ENODEV;
+			pr_warn("pipe queue without buffer\n");
+			buf = NULL;
 		}
 
-		return ret;
+		ret = mdss_mdp_pipe_queue_data(pipe, buf);
+		if (IS_ERR_VALUE(ret)) {
+			pr_warn("Unable to queue data for pnum=%d\n",
+					pipe->num);
+			mdss_mdp_overlay_free_buf(buf);
+		}
 	}
 
+	if (mfd->panel.type == WRITEBACK_PANEL)
+		ret = mdss_mdp_wb_kickoff(mfd);
+	else
+		ret = mdss_mdp_display_commit(mdp5_data->ctl, NULL);
+
+	mutex_unlock(&mfd->lock);
+
+	if (IS_ERR_VALUE(ret))
+		goto commit_fail;
+
+	ret = mdss_mdp_display_wait4comp(mdp5_data->ctl);
+
+	complete(&mfd->update.comp);
+	mutex_lock(&mfd->no_update.lock);
+	if (mfd->no_update.timer.function)
+		del_timer(&(mfd->no_update.timer));
+
+	mfd->no_update.timer.expires = jiffies + (2 * HZ);
+	add_timer(&mfd->no_update.timer);
+	mutex_unlock(&mfd->no_update.lock);
+
+commit_fail:
+	ret = mdss_mdp_overlay_cleanup(mfd);
+
+	mutex_unlock(&mdp5_data->ov_lock);
+
+	return ret;
+}
+
+static int mdss_mdp_overlay_release(struct msm_fb_data_type *mfd, int ndx)
+{
+	struct mdss_mdp_pipe *pipe;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	u32 pipe_ndx, unset_ndx = 0;
+	int i;
+
 	for (i = 0; unset_ndx != ndx && i < MDSS_MDP_MAX_SSPP; i++) {
 		pipe_ndx = BIT(i);
 		if (pipe_ndx & ndx) {
 			unset_ndx |= pipe_ndx;
-			pipe = mdss_mdp_pipe_get_locked(pipe_ndx);
-			if (pipe) {
-				mdss_mdp_mixer_pipe_unstage(pipe);
-				cleanup_pipes[clean_cnt++] = pipe;
-			} else {
+			pipe = mdss_mdp_pipe_get(mdp5_data->mdata, pipe_ndx);
+			if (IS_ERR_OR_NULL(pipe)) {
 				pr_warn("unknown pipe ndx=%x\n", pipe_ndx);
+				continue;
 			}
+			mutex_lock(&mfd->lock);
+			list_del(&pipe->used_list);
+			list_add(&pipe->cleanup_list,
+				&mdp5_data->pipes_cleanup);
+			mutex_unlock(&mfd->lock);
+			mdss_mdp_mixer_pipe_unstage(pipe);
+			mdss_mdp_pipe_unmap(pipe);
 		}
 	}
+	return 0;
+}
 
-	if (clean_cnt) {
-		ret = mfd->kickoff_fnc(mfd->ctl);
+static int mdss_mdp_overlay_unset(struct msm_fb_data_type *mfd, int ndx)
+{
+	int ret = 0;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
 
-		for (i = 0; i < clean_cnt; i++)
-			mdss_mdp_pipe_destroy(cleanup_pipes[i]);
+	if (!mfd || !mdp5_data->ctl)
+		return -ENODEV;
+
+	ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
+	if (ret)
+		return ret;
+
+	if (ndx == BORDERFILL_NDX) {
+		pr_debug("borderfill disable\n");
+		mdp5_data->borderfill_enable = false;
+		return 0;
 	}
 
+	if (!mfd->panel_power_on) {
+		mutex_unlock(&mdp5_data->ov_lock);
+		return -EPERM;
+	}
+
+	pr_debug("unset ndx=%x\n", ndx);
+
+	if (ndx & MDSS_MDP_ROT_SESSION_MASK)
+		ret = mdss_mdp_rotator_release(ndx);
+	else
+		ret = mdss_mdp_overlay_release(mfd, ndx);
+
+	mutex_unlock(&mdp5_data->ov_lock);
+
 	return ret;
 }
 
+static int mdss_mdp_overlay_release_all(struct msm_fb_data_type *mfd)
+{
+	struct mdss_mdp_pipe *pipe;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	u32 unset_ndx = 0;
+	int cnt = 0;
+
+	mutex_lock(&mdp5_data->ov_lock);
+	mutex_lock(&mfd->lock);
+	list_for_each_entry(pipe, &mdp5_data->pipes_used, used_list) {
+		unset_ndx |= pipe->ndx;
+		cnt++;
+	}
+
+	if (cnt == 0 && !list_empty(&mdp5_data->pipes_cleanup)) {
+		pr_debug("overlay release on fb%d called without commit!",
+			mfd->index);
+		cnt++;
+	}
+
+	mutex_unlock(&mfd->lock);
+
+	if (unset_ndx) {
+		pr_debug("%d pipes need cleanup (%x)\n", cnt, unset_ndx);
+		mdss_mdp_overlay_release(mfd, unset_ndx);
+	}
+	mutex_unlock(&mdp5_data->ov_lock);
+
+	if (cnt)
+		mfd->mdp.kickoff_fnc(mfd);
+
+	return 0;
+}
+
 static int mdss_mdp_overlay_play_wait(struct msm_fb_data_type *mfd,
 				      struct msmfb_overlay_data *req)
 {
-	int ret;
+	int ret = 0;
 
-	if (!mfd || !mfd->ctl)
+	if (!mfd)
 		return -ENODEV;
 
-	ret = mfd->kickoff_fnc(mfd->ctl);
+	ret = mfd->mdp.kickoff_fnc(mfd);
 	if (!ret)
 		pr_err("error displaying\n");
 
 	return ret;
 }
 
-static int mdss_mdp_overlay_rotate(struct msmfb_overlay_data *req,
-				   struct mdss_mdp_data *src_data,
-				   struct mdss_mdp_data *dst_data)
+static int mdss_mdp_overlay_rotate(struct msm_fb_data_type *mfd,
+				   struct msmfb_overlay_data *req)
 {
 	struct mdss_mdp_rotator_session *rot;
+	struct mdss_mdp_data src_data, dst_data;
 	int ret;
+	u32 flgs;
 
 	rot = mdss_mdp_rotator_session_get(req->id);
 	if (!rot) {
 		pr_err("invalid session id=%x\n", req->id);
-		return -ENODEV;
+		return -ENOENT;
 	}
 
-	ret = mdss_mdp_rotator_queue(rot, src_data, dst_data);
+	flgs = rot->flags & MDP_SECURE_OVERLAY_SESSION;
+
+	ret = mdss_mdp_overlay_get_buf(mfd, &src_data, &req->data, 1, flgs);
 	if (ret) {
-		pr_err("rotator queue error session id=%x\n", req->id);
+		pr_err("src_data pmem error\n");
 		return ret;
 	}
 
-	return 0;
+	ret = mdss_mdp_overlay_get_buf(mfd, &dst_data, &req->dst_data, 1, flgs);
+	if (ret) {
+		pr_err("dst_data pmem error\n");
+		goto dst_buf_fail;
+	}
+
+	ret = mdss_mdp_rotator_queue(rot, &src_data, &dst_data);
+	if (ret)
+		pr_err("rotator queue error session id=%x\n", req->id);
+
+	mdss_mdp_overlay_free_buf(&dst_data);
+dst_buf_fail:
+	mdss_mdp_overlay_free_buf(&src_data);
+
+	return ret;
 }
 
-static int mdss_mdp_overlay_queue(struct msmfb_overlay_data *req,
-				  struct mdss_mdp_data *src_data)
+static int mdss_mdp_overlay_queue(struct msm_fb_data_type *mfd,
+				  struct msmfb_overlay_data *req)
 {
 	struct mdss_mdp_pipe *pipe;
-	struct mdss_mdp_ctl *ctl;
+	struct mdss_mdp_data *src_data;
 	int ret;
+	u32 flags;
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
 
-	pipe = mdss_mdp_pipe_get_locked(req->id);
-	if (pipe == NULL) {
+	pipe = mdss_mdp_pipe_get(mdata, req->id);
+	if (IS_ERR_OR_NULL(pipe)) {
 		pr_err("pipe ndx=%x doesn't exist\n", req->id);
-		return -ENODEV;
+		return pipe ? PTR_ERR(pipe) : -ENODEV;
 	}
 
 	pr_debug("ov queue pnum=%d\n", pipe->num);
 
-	ret = mdss_mdp_pipe_queue_data(pipe, src_data);
-	ctl = pipe->mixer->ctl;
-	mdss_mdp_pipe_unlock(pipe);
+	flags = (pipe->flags & MDP_SECURE_OVERLAY_SESSION);
 
-	if (ret == 0 && !(pipe->flags & MDP_OV_PLAY_NOWAIT))
-		ret = ctl->mfd->kickoff_fnc(ctl);
+	src_data = &pipe->back_buf;
+	if (src_data->num_planes) {
+		pr_warn("dropped buffer pnum=%d play=%d addr=0x%x\n",
+			pipe->num, pipe->play_cnt, src_data->p[0].addr);
+		mdss_mdp_overlay_free_buf(src_data);
+	}
 
+	ret = mdss_mdp_overlay_get_buf(mfd, src_data, &req->data, 1, flags);
+	if (IS_ERR_VALUE(ret)) {
+		pr_err("src_data pmem error\n");
+	}
+	mdss_mdp_pipe_unmap(pipe);
 
 	return ret;
 }
@@ -445,78 +888,112 @@
 static int mdss_mdp_overlay_play(struct msm_fb_data_type *mfd,
 				 struct msmfb_overlay_data *req)
 {
-	struct mdss_mdp_data src_data;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
 	int ret = 0;
 
-	if (mfd == NULL)
-		return -ENODEV;
-
 	pr_debug("play req id=%x\n", req->id);
 
-	memset(&src_data, 0, sizeof(src_data));
-	mdss_mdp_get_img(mfd->iclient, &req->data, &src_data.p[0]);
-	if (src_data.p[0].len == 0) {
-		pr_err("src data pmem error\n");
-		return -ENOMEM;
+	ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
+	if (ret)
+		return ret;
+
+	if (!mfd->panel_power_on) {
+		mutex_unlock(&mdp5_data->ov_lock);
+		return -EPERM;
 	}
-	src_data.num_planes = 1;
+
+	ret = mdss_mdp_overlay_start(mfd);
+	if (ret) {
+		pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
+		return ret;
+	}
 
 	if (req->id & MDSS_MDP_ROT_SESSION_MASK) {
-		struct mdss_mdp_data dst_data;
-		memset(&dst_data, 0, sizeof(dst_data));
-
-		mdss_mdp_get_img(mfd->iclient, &req->dst_data, &dst_data.p[0]);
-		if (dst_data.p[0].len == 0) {
-			pr_err("dst data pmem error\n");
-			return -ENOMEM;
-		}
-		dst_data.num_planes = 1;
-
-		ret = mdss_mdp_overlay_rotate(req, &src_data, &dst_data);
-
-		mdss_mdp_put_img(&dst_data.p[0]);
+		ret = mdss_mdp_overlay_rotate(mfd, req);
+	} else if (req->id == BORDERFILL_NDX) {
+		pr_debug("borderfill enable\n");
+		mdp5_data->borderfill_enable = true;
+		ret = mdss_mdp_overlay_free_fb_pipe(mfd);
 	} else {
-		ret = mdss_mdp_overlay_queue(req, &src_data);
+		ret = mdss_mdp_overlay_queue(mfd, req);
 	}
 
-	mdss_mdp_put_img(&src_data.p[0]);
+	mutex_unlock(&mdp5_data->ov_lock);
 
 	return ret;
 }
 
+static int mdss_mdp_overlay_free_fb_pipe(struct msm_fb_data_type *mfd)
+{
+	struct mdss_mdp_pipe *pipe;
+	u32 fb_ndx = 0;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+	pipe = mdss_mdp_mixer_stage_pipe(mdp5_data->ctl,
+					MDSS_MDP_MIXER_MUX_LEFT,
+					 MDSS_MDP_STAGE_BASE);
+	if (pipe)
+		fb_ndx |= pipe->ndx;
+
+	pipe = mdss_mdp_mixer_stage_pipe(mdp5_data->ctl,
+					MDSS_MDP_MIXER_MUX_RIGHT,
+					 MDSS_MDP_STAGE_BASE);
+	if (pipe)
+		fb_ndx |= pipe->ndx;
+
+	if (fb_ndx) {
+		pr_debug("unstaging framebuffer pipes %x\n", fb_ndx);
+		mdss_mdp_overlay_release(mfd, fb_ndx);
+	}
+	return 0;
+}
+
 static int mdss_mdp_overlay_get_fb_pipe(struct msm_fb_data_type *mfd,
 					struct mdss_mdp_pipe **ppipe,
 					int mixer_mux)
 {
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
 	struct mdss_mdp_pipe *pipe;
 
-	pipe = mdss_mdp_mixer_stage_pipe(mfd->ctl, mixer_mux,
+	pipe = mdss_mdp_mixer_stage_pipe(mdp5_data->ctl, mixer_mux,
 					 MDSS_MDP_STAGE_BASE);
 	if (pipe == NULL) {
 		struct mdp_overlay req;
-		int ret;
+		struct fb_info *fbi = mfd->fbi;
+		struct mdss_mdp_mixer *mixer;
+		int ret, bpp;
+
+		mixer = mdss_mdp_mixer_get(mdp5_data->ctl,
+					MDSS_MDP_MIXER_MUX_LEFT);
+		if (!mixer) {
+			pr_err("unable to retrieve mixer\n");
+			return -ENODEV;
+		}
 
 		memset(&req, 0, sizeof(req));
 
+		bpp = fbi->var.bits_per_pixel / 8;
 		req.id = MSMFB_NEW_REQUEST;
 		req.src.format = mfd->fb_imgType;
-		req.src.height = mfd->fbi->var.yres;
-		req.src.width = mfd->fbi->var.xres;
+		req.src.height = fbi->var.yres;
+		req.src.width = fbi->fix.line_length / bpp;
 		if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT) {
-			if (req.src.width <= MAX_MIXER_WIDTH)
-				return -ENODEV;
+			if (req.src.width <= mixer->width) {
+				pr_warn("right fb pipe not needed\n");
+				return -EINVAL;
+			}
 
 			req.flags |= MDSS_MDP_RIGHT_MIXER;
-			req.src_rect.x = MAX_MIXER_WIDTH;
-			req.src_rect.w = req.src.width - MAX_MIXER_WIDTH;
+			req.src_rect.x = mixer->width;
+			req.src_rect.w = fbi->var.xres - mixer->width;
 		} else {
 			req.src_rect.x = 0;
-			req.src_rect.w = MIN(req.src.width, MAX_MIXER_WIDTH);
+			req.src_rect.w = MIN(fbi->var.xres, mixer->width);
 		}
 
 		req.src_rect.y = 0;
 		req.src_rect.h = req.src.height;
-		req.dst_rect.x = req.src_rect.x;
+		req.dst_rect.x = 0;
 		req.dst_rect.y = 0;
 		req.dst_rect.w = req.src_rect.w;
 		req.dst_rect.h = req.src_rect.h;
@@ -528,7 +1005,7 @@
 		if (ret)
 			return ret;
 
-		pr_debug("ctl=%d pnum=%d\n", mfd->ctl->num, pipe->num);
+		pr_debug("ctl=%d pnum=%d\n", mdp5_data->ctl->num, pipe->num);
 	}
 
 	*ppipe = pipe;
@@ -540,19 +1017,25 @@
 	struct mdss_mdp_data data;
 	struct mdss_mdp_pipe *pipe;
 	struct fb_info *fbi;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
 	u32 offset;
 	int bpp, ret;
 
-	if (!mfd)
-		return;
-
-	if (!mfd->ctl || !mfd->panel_power_on)
+	if (!mfd || !mdp5_data->ctl)
 		return;
 
 	fbi = mfd->fbi;
 
-	if (fbi->fix.smem_len == 0) {
-		pr_warn("fb memory not allocated\n");
+	if (fbi->fix.smem_len == 0 || mdp5_data->borderfill_enable) {
+		mfd->mdp.kickoff_fnc(mfd);
+		return;
+	}
+
+	if (mutex_lock_interruptible(&mdp5_data->ov_lock))
+		return;
+
+	if (!mfd->panel_power_on) {
+		mutex_unlock(&mdp5_data->ov_lock);
 		return;
 	}
 
@@ -562,59 +1045,219 @@
 	offset = fbi->var.xoffset * bpp +
 		 fbi->var.yoffset * fbi->fix.line_length;
 
-	data.p[0].addr = fbi->fix.smem_start + offset;
+	if (offset > fbi->fix.smem_len) {
+		pr_err("invalid fb offset=%u total length=%u\n",
+		       offset, fbi->fix.smem_len);
+		goto pan_display_error;
+	}
+
+	ret = mdss_mdp_overlay_start(mfd);
+	if (ret) {
+		pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
+		goto pan_display_error;
+	}
+
+	if (is_mdss_iommu_attached())
+		data.p[0].addr = mfd->iova;
+	else
+		data.p[0].addr = fbi->fix.smem_start;
+
+	data.p[0].addr += offset;
 	data.p[0].len = fbi->fix.smem_len - offset;
 	data.num_planes = 1;
 
 	ret = mdss_mdp_overlay_get_fb_pipe(mfd, &pipe, MDSS_MDP_MIXER_MUX_LEFT);
 	if (ret) {
 		pr_err("unable to allocate base pipe\n");
-		return;
+		goto pan_display_error;
 	}
 
-	mdss_mdp_pipe_lock(pipe);
+	if (mdss_mdp_pipe_map(pipe)) {
+		pr_err("unable to map base pipe\n");
+		goto pan_display_error;
+	}
 	ret = mdss_mdp_pipe_queue_data(pipe, &data);
-	mdss_mdp_pipe_unlock(pipe);
+	mdss_mdp_pipe_unmap(pipe);
 	if (ret) {
 		pr_err("unable to queue data\n");
-		return;
+		goto pan_display_error;
 	}
 
-	if (fbi->var.xres > MAX_MIXER_WIDTH) {
+	if (fbi->var.xres > MAX_MIXER_WIDTH || mfd->split_display) {
 		ret = mdss_mdp_overlay_get_fb_pipe(mfd, &pipe,
 						   MDSS_MDP_MIXER_MUX_RIGHT);
 		if (ret) {
 			pr_err("unable to allocate right base pipe\n");
-			return;
+			goto pan_display_error;
 		}
-		mdss_mdp_pipe_lock(pipe);
+		if (mdss_mdp_pipe_map(pipe)) {
+			pr_err("unable to map right base pipe\n");
+			goto pan_display_error;
+		}
 		ret = mdss_mdp_pipe_queue_data(pipe, &data);
-		mdss_mdp_pipe_unlock(pipe);
+		mdss_mdp_pipe_unmap(pipe);
 		if (ret) {
 			pr_err("unable to queue right data\n");
-			return;
+			goto pan_display_error;
+		}
+	}
+	mutex_unlock(&mdp5_data->ov_lock);
+
+	if ((fbi->var.activate & FB_ACTIVATE_VBL) ||
+	    (fbi->var.activate & FB_ACTIVATE_FORCE))
+		mfd->mdp.kickoff_fnc(mfd);
+
+	return;
+
+pan_display_error:
+	mutex_unlock(&mdp5_data->ov_lock);
+}
+
+/* function is called in irq context should have minimum processing */
+static void mdss_mdp_overlay_handle_vsync(struct mdss_mdp_ctl *ctl,
+						ktime_t t)
+{
+	struct msm_fb_data_type *mfd = ctl->mfd;
+	struct mdss_overlay_private *mdp5_data;
+
+	if (!mfd || !mfd->mdp.private1) {
+		pr_warn("Invalid handle for vsync\n");
+		return;
+	}
+
+	mdp5_data = mfd_to_mdp5_data(mfd);
+	pr_debug("vsync on fb%d play_cnt=%d\n", mfd->index, ctl->play_cnt);
+
+	spin_lock(&mdp5_data->vsync_lock);
+	mdp5_data->vsync_time = t;
+	complete(&mdp5_data->vsync_comp);
+	spin_unlock(&mdp5_data->vsync_lock);
+}
+
+int mdss_mdp_overlay_vsync_ctrl(struct msm_fb_data_type *mfd, int en)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+	unsigned long flags;
+	int rc;
+
+	if (!ctl)
+		return -ENODEV;
+	if (!ctl->set_vsync_handler)
+		return -ENOTSUPP;
+
+	rc = mutex_lock_interruptible(&ctl->lock);
+	if (rc)
+		return rc;
+
+	if (!ctl->power_on) {
+		pr_debug("fb%d vsync pending first update en=%d\n",
+				mfd->index, en);
+		mdp5_data->vsync_pending = en;
+		mutex_unlock(&ctl->lock);
+		return 0;
+	}
+
+	pr_debug("fb%d vsync en=%d\n", mfd->index, en);
+
+	spin_lock_irqsave(&mdp5_data->vsync_lock, flags);
+	INIT_COMPLETION(mdp5_data->vsync_comp);
+	spin_unlock_irqrestore(&mdp5_data->vsync_lock, flags);
+
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+	if (en)
+		rc = ctl->set_vsync_handler(ctl, mdss_mdp_overlay_handle_vsync);
+	else
+		rc = ctl->set_vsync_handler(ctl, NULL);
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+
+	mutex_unlock(&ctl->lock);
+
+	return rc;
+}
+
+static ssize_t mdss_mdp_vsync_show_event(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct fb_info *fbi = dev_get_drvdata(dev);
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	unsigned long flags;
+	u64 vsync_ticks;
+	unsigned long timeout;
+	int ret;
+
+	if (!mdp5_data->ctl || !mdp5_data->ctl->power_on)
+		return 0;
+
+	timeout = msecs_to_jiffies(VSYNC_PERIOD * 5);
+	ret = wait_for_completion_interruptible_timeout(&mdp5_data->vsync_comp,
+			timeout);
+	if (ret <= 0) {
+		pr_debug("Sending current time as vsync timestamp for fb%d\n",
+				mfd->index);
+		mdp5_data->vsync_time = ktime_get();
+	}
+
+	spin_lock_irqsave(&mdp5_data->vsync_lock, flags);
+	vsync_ticks = ktime_to_ns(mdp5_data->vsync_time);
+	spin_unlock_irqrestore(&mdp5_data->vsync_lock, flags);
+
+	pr_debug("fb%d vsync=%llu", mfd->index, vsync_ticks);
+	ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_ticks);
+
+	return ret;
+}
+
+static DEVICE_ATTR(vsync_event, S_IRUGO, mdss_mdp_vsync_show_event, NULL);
+
+static struct attribute *vsync_fs_attrs[] = {
+	&dev_attr_vsync_event.attr,
+	NULL,
+};
+
+static struct attribute_group vsync_fs_attr_group = {
+	.attrs = vsync_fs_attrs,
+};
+
+static int mdss_mdp_hw_cursor_update(struct msm_fb_data_type *mfd,
+				     struct fb_cursor *cursor)
+{
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_mixer *mixer;
+	struct fb_image *img = &cursor->image;
+	u32 blendcfg;
+	int off, ret = 0;
+
+	if (!mfd->cursor_buf && (cursor->set & FB_CUR_SETIMAGE)) {
+		mfd->cursor_buf = dma_alloc_coherent(NULL, MDSS_MDP_CURSOR_SIZE,
+					(dma_addr_t *) &mfd->cursor_buf_phys,
+					GFP_KERNEL);
+		if (!mfd->cursor_buf) {
+			pr_err("can't allocate cursor buffer\n");
+			return -ENOMEM;
+		}
+
+		ret = msm_iommu_map_contig_buffer(mfd->cursor_buf_phys,
+			mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE),
+			0, MDSS_MDP_CURSOR_SIZE, SZ_4K, 0,
+			&(mfd->cursor_buf_iova));
+		if (IS_ERR_VALUE(ret)) {
+			dma_free_coherent(NULL, MDSS_MDP_CURSOR_SIZE,
+					  mfd->cursor_buf,
+					  (dma_addr_t) mfd->cursor_buf_phys);
+			pr_err("unable to map cursor buffer to iommu(%d)\n",
+			       ret);
+			return -ENOMEM;
 		}
 	}
 
-	if (fbi->var.activate & FB_ACTIVATE_VBL)
-		mfd->kickoff_fnc(mfd->ctl);
-}
-
-static int mdss_mdp_hw_cursor_update(struct fb_info *info,
-				     struct fb_cursor *cursor)
-{
-	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
-	struct mdss_mdp_mixer *mixer;
-	struct fb_image *img = &cursor->image;
-	int calpha_en, transp_en, blendcfg, alpha;
-	int off, ret = 0;
-
-	mixer = mdss_mdp_mixer_get(mfd->ctl, MDSS_MDP_MIXER_MUX_DEFAULT);
+	mixer = mdss_mdp_mixer_get(mdp5_data->ctl, MDSS_MDP_MIXER_MUX_DEFAULT);
 	off = MDSS_MDP_REG_LM_OFFSET(mixer->num);
 
 	if ((img->width > MDSS_MDP_CURSOR_WIDTH) ||
-	    (img->height > MDSS_MDP_CURSOR_HEIGHT) ||
-	    (img->depth != 32))
+		(img->height > MDSS_MDP_CURSOR_HEIGHT) ||
+		(img->depth != 32))
 		return -EINVAL;
 
 	pr_debug("mixer=%d enable=%x set=%x\n", mixer->num, cursor->enable,
@@ -628,11 +1271,17 @@
 				   (img->dy << 16) | img->dx);
 
 	if (cursor->set & FB_CUR_SETIMAGE) {
+		int calpha_en, transp_en, alpha, size, cursor_addr;
 		ret = copy_from_user(mfd->cursor_buf, img->data,
 				     img->width * img->height * 4);
 		if (ret)
 			return ret;
 
+		if (is_mdss_iommu_attached())
+			cursor_addr = mfd->cursor_buf_iova;
+		else
+			cursor_addr = mfd->cursor_buf_phys;
+
 		if (img->bg_color == 0xffffffff)
 			transp_en = 0;
 		else
@@ -645,12 +1294,13 @@
 		else
 			calpha_en = 0x2; /* argb */
 
-		MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_LM_CURSOR_SIZE,
-				   (img->height << 16) | img->width);
+		size = (img->height << 16) | img->width;
+		MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_LM_CURSOR_IMG_SIZE, size);
+		MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_LM_CURSOR_SIZE, size);
 		MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_LM_CURSOR_STRIDE,
 				   img->width * 4);
 		MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_LM_CURSOR_BASE_ADDR,
-				   mfd->cursor_buf_phys);
+				   cursor_addr);
 
 		wmb();
 
@@ -682,10 +1332,13 @@
 	}
 
 	if (!cursor->enable != !(blendcfg & 0x1)) {
-		if (cursor->enable)
+		if (cursor->enable) {
+			pr_debug("enable hw cursor on mixer=%d\n", mixer->num);
 			blendcfg |= 0x1;
-		else
+		} else {
+			pr_debug("disable hw cursor on mixer=%d\n", mixer->num);
 			blendcfg &= ~0x1;
+		}
 
 		MDSS_MDP_REG_WRITE(off + MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG,
 				   blendcfg);
@@ -700,18 +1353,227 @@
 	return 0;
 }
 
-static int mdss_mdp_overlay_kickoff(struct mdss_mdp_ctl *ctl)
+static int mdss_bl_scale_config(struct msm_fb_data_type *mfd,
+						struct mdp_bl_scale_data *data)
 {
-	return mdss_mdp_display_commit(ctl, NULL);
+	int ret = 0;
+	int curr_bl;
+	mutex_lock(&mfd->lock);
+	curr_bl = mfd->bl_level;
+	mfd->bl_scale = data->scale;
+	mfd->bl_min_lvl = data->min_lvl;
+	pr_debug("update scale = %d, min_lvl = %d\n", mfd->bl_scale,
+							mfd->bl_min_lvl);
+
+	/* update current backlight to use new scaling*/
+	mdss_fb_set_backlight(mfd, curr_bl);
+	mutex_unlock(&mfd->lock);
+	return ret;
+}
+
+static int mdss_mdp_pp_ioctl(struct msm_fb_data_type *mfd,
+				void __user *argp)
+{
+	int ret;
+	struct msmfb_mdp_pp mdp_pp;
+	u32 copyback = 0;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+	ret = copy_from_user(&mdp_pp, argp, sizeof(mdp_pp));
+	if (ret)
+		return ret;
+
+	switch (mdp_pp.op) {
+	case mdp_op_pa_cfg:
+		ret = mdss_mdp_pa_config(mdp5_data->ctl,
+					&mdp_pp.data.pa_cfg_data,
+					&copyback);
+		break;
+
+	case mdp_op_pcc_cfg:
+		ret = mdss_mdp_pcc_config(mdp5_data->ctl,
+					&mdp_pp.data.pcc_cfg_data,
+					&copyback);
+		break;
+
+	case mdp_op_lut_cfg:
+		switch (mdp_pp.data.lut_cfg_data.lut_type) {
+		case mdp_lut_igc:
+			ret = mdss_mdp_igc_lut_config(
+					mdp5_data->ctl,
+					(struct mdp_igc_lut_data *)
+					&mdp_pp.data.lut_cfg_data.data,
+					&copyback);
+			break;
+
+		case mdp_lut_pgc:
+			ret = mdss_mdp_argc_config(
+				mdp5_data->ctl,
+				&mdp_pp.data.lut_cfg_data.data.pgc_lut_data,
+				&copyback);
+			break;
+
+		case mdp_lut_hist:
+			ret = mdss_mdp_hist_lut_config(
+				mdp5_data->ctl,
+				(struct mdp_hist_lut_data *)
+				&mdp_pp.data.lut_cfg_data.data, &copyback);
+			break;
+
+		default:
+			ret = -ENOTSUPP;
+			break;
+		}
+		break;
+	case mdp_op_dither_cfg:
+		ret = mdss_mdp_dither_config(
+				mdp5_data->ctl,
+				&mdp_pp.data.dither_cfg_data,
+				&copyback);
+		break;
+	case mdp_op_gamut_cfg:
+		ret = mdss_mdp_gamut_config(
+				mdp5_data->ctl,
+				&mdp_pp.data.gamut_cfg_data,
+				&copyback);
+		break;
+	case mdp_bl_scale_cfg:
+		ret = mdss_bl_scale_config(mfd, (struct mdp_bl_scale_data *)
+						&mdp_pp.data.bl_scale_data);
+		break;
+	default:
+		pr_err("Unsupported request to MDP_PP IOCTL.\n");
+		ret = -EINVAL;
+		break;
+	}
+	if ((ret == 0) && copyback)
+		ret = copy_to_user(argp, &mdp_pp, sizeof(struct msmfb_mdp_pp));
+	return ret;
+}
+
+static int mdss_mdp_histo_ioctl(struct msm_fb_data_type *mfd, u32 cmd,
+				void __user *argp)
+{
+	int ret = -ENOSYS;
+	struct mdp_histogram_data hist;
+	struct mdp_histogram_start_req hist_req;
+	u32 block, hist_data_addr = 0;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+	switch (cmd) {
+	case MSMFB_HISTOGRAM_START:
+		if (!mfd->panel_power_on)
+			return -EPERM;
+
+		ret = copy_from_user(&hist_req, argp, sizeof(hist_req));
+		if (ret)
+			return ret;
+
+		ret = mdss_mdp_histogram_start(mdp5_data->ctl, &hist_req);
+		break;
+
+	case MSMFB_HISTOGRAM_STOP:
+		ret = copy_from_user(&block, argp, sizeof(int));
+		if (ret)
+			return ret;
+
+		ret = mdss_mdp_histogram_stop(mdp5_data->ctl, block);
+		break;
+
+	case MSMFB_HISTOGRAM:
+		if (!mfd->panel_power_on)
+			return -EPERM;
+
+		ret = copy_from_user(&hist, argp, sizeof(hist));
+		if (ret)
+			return ret;
+
+		ret = mdss_mdp_hist_collect(mdp5_data->ctl, &hist,
+					&hist_data_addr);
+		if ((ret == 0) && hist_data_addr) {
+			ret = copy_to_user(hist.c0, (u32 *)hist_data_addr,
+				sizeof(u32) * hist.bin_cnt);
+			if (ret == 0)
+				ret = copy_to_user(argp, &hist,
+						   sizeof(hist));
+		}
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+static int mdss_fb_set_metadata(struct msm_fb_data_type *mfd,
+				struct msmfb_metadata *metadata)
+{
+	int ret = 0;
+	switch (metadata->op) {
+	case metadata_op_vic:
+		if (mfd->panel_info)
+			mfd->panel_info->vic =
+				metadata->data.video_info_code;
+		else
+			ret = -EINVAL;
+		break;
+	default:
+		pr_warn("unsupported request to MDP META IOCTL\n");
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static int mdss_fb_get_hw_caps(struct msm_fb_data_type *mfd,
+		struct mdss_hw_caps *caps)
+{
+	struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+	caps->mdp_rev = mdata->mdp_rev;
+	caps->vig_pipes = mdata->nvig_pipes;
+	caps->rgb_pipes = mdata->nrgb_pipes;
+	caps->dma_pipes = mdata->ndma_pipes;
+	return 0;
+}
+
+static int mdss_fb_get_metadata(struct msm_fb_data_type *mfd,
+				struct msmfb_metadata *metadata)
+{
+	int ret = 0;
+	switch (metadata->op) {
+	case metadata_op_frame_rate:
+		metadata->data.panel_frame_rate =
+			mdss_get_panel_framerate(mfd);
+		break;
+	case metadata_op_get_caps:
+		ret = mdss_fb_get_hw_caps(mfd, &metadata->data.caps);
+		break;
+	default:
+		pr_warn("Unsupported request to MDP META IOCTL.\n");
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
 }
 
 static int mdss_mdp_overlay_ioctl_handler(struct msm_fb_data_type *mfd,
 					  u32 cmd, void __user *argp)
 {
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
 	struct mdp_overlay req;
 	int val, ret = -ENOSYS;
+	struct msmfb_metadata metadata;
 
 	switch (cmd) {
+	case MSMFB_MDP_PP:
+		ret = mdss_mdp_pp_ioctl(mfd, argp);
+		break;
+
+	case MSMFB_HISTOGRAM_START:
+	case MSMFB_HISTOGRAM_STOP:
+	case MSMFB_HISTOGRAM:
+		ret = mdss_mdp_histo_ioctl(mfd, cmd, argp);
+		break;
+
 	case MSMFB_OVERLAY_GET:
 		ret = copy_from_user(&req, argp, sizeof(req));
 		if (!ret) {
@@ -722,7 +1584,7 @@
 		}
 
 		if (ret) {
-			pr_err("OVERLAY_GET failed (%d)\n", ret);
+			pr_debug("OVERLAY_GET failed (%d)\n", ret);
 			ret = -EFAULT;
 		}
 		break;
@@ -736,7 +1598,7 @@
 				ret = copy_to_user(argp, &req, sizeof(req));
 		}
 		if (ret) {
-			pr_err("OVERLAY_SET failed (%d)\n", ret);
+			pr_debug("OVERLAY_SET failed (%d)\n", ret);
 			ret = -EFAULT;
 		}
 		break;
@@ -749,7 +1611,7 @@
 
 	case MSMFB_OVERLAY_PLAY_ENABLE:
 		if (!copy_from_user(&val, argp, sizeof(val))) {
-			mfd->overlay_play_enable = val;
+			mdp5_data->overlay_play_enable = val;
 		} else {
 			pr_err("OVERLAY_PLAY_ENABLE failed (%d)\n", ret);
 			ret = -EFAULT;
@@ -757,7 +1619,7 @@
 		break;
 
 	case MSMFB_OVERLAY_PLAY:
-		if (mfd->overlay_play_enable) {
+		if (mdp5_data->overlay_play_enable) {
 			struct msmfb_overlay_data data;
 
 			ret = copy_from_user(&data, argp, sizeof(data));
@@ -768,7 +1630,7 @@
 			}
 
 			if (ret) {
-				pr_err("OVERLAY_PLAY failed (%d)\n", ret);
+				pr_debug("OVERLAY_PLAY failed (%d)\n", ret);
 				ret = -EFAULT;
 			}
 		} else {
@@ -777,7 +1639,7 @@
 		break;
 
 	case MSMFB_OVERLAY_PLAY_WAIT:
-		if (mfd->overlay_play_enable) {
+		if (mdp5_data->overlay_play_enable) {
 			struct msmfb_overlay_data data;
 
 			ret = copy_from_user(&data, argp, sizeof(data));
@@ -793,8 +1655,36 @@
 		}
 		break;
 
+	case MSMFB_VSYNC_CTRL:
+	case MSMFB_OVERLAY_VSYNC_CTRL:
+		if (!copy_from_user(&val, argp, sizeof(val))) {
+			ret = mdss_mdp_overlay_vsync_ctrl(mfd, val);
+		} else {
+			pr_err("MSMFB_OVERLAY_VSYNC_CTRL failed (%d)\n", ret);
+			ret = -EFAULT;
+		}
+		break;
+	case MSMFB_OVERLAY_COMMIT:
+		mdss_fb_wait_for_fence(mfd);
+		ret = mfd->mdp.kickoff_fnc(mfd);
+		mdss_fb_signal_timeline(mfd);
+		break;
+	case MSMFB_METADATA_SET:
+		ret = copy_from_user(&metadata, argp, sizeof(metadata));
+		if (ret)
+			return ret;
+		ret = mdss_fb_set_metadata(mfd, &metadata);
+		break;
+	case MSMFB_METADATA_GET:
+		ret = copy_from_user(&metadata, argp, sizeof(metadata));
+		if (ret)
+			return ret;
+		ret = mdss_fb_get_metadata(mfd, &metadata);
+		if (!ret)
+			ret = copy_to_user(argp, &metadata, sizeof(metadata));
+		break;
 	default:
-		if (mfd->panel_info.type == WRITEBACK_PANEL)
+		if (mfd->panel.type == WRITEBACK_PANEL)
 			ret = mdss_mdp_wb_ioctl_handler(mfd, cmd, argp);
 		break;
 	}
@@ -802,22 +1692,170 @@
 	return ret;
 }
 
+static int mdss_mdp_overlay_on(struct msm_fb_data_type *mfd)
+{
+	int rc;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	if (!mdp5_data->ctl) {
+		struct mdss_mdp_ctl *ctl;
+		struct mdss_panel_data *pdata;
+
+		pdata = dev_get_platdata(&mfd->pdev->dev);
+		if (!pdata) {
+			pr_err("no panel connected for fb%d\n", mfd->index);
+			return -ENODEV;
+		}
+
+		ctl = mdss_mdp_ctl_init(pdata, mfd);
+		if (IS_ERR_OR_NULL(ctl)) {
+			pr_err("Unable to initialize ctl for fb%d\n",
+				mfd->index);
+			return PTR_ERR(ctl);
+		}
+
+		if (mfd->split_display && pdata->next) {
+			/* enable split display */
+			rc = mdss_mdp_ctl_split_display_setup(ctl, pdata->next);
+			if (rc) {
+				mdss_mdp_ctl_destroy(ctl);
+				return rc;
+			}
+		}
+		mdp5_data->ctl = ctl;
+	}
+
+	if (!mfd->panel_info->cont_splash_enabled) {
+		rc = mdss_mdp_overlay_start(mfd);
+		if (!IS_ERR_VALUE(rc) && (mfd->panel_info->type != DTV_PANEL))
+			rc = mdss_mdp_overlay_kickoff(mfd);
+	} else {
+		rc = mdss_mdp_ctl_setup(mdp5_data->ctl);
+		if (rc)
+			return rc;
+	}
+
+	if (!IS_ERR_VALUE(rc) && mdp5_data->vsync_pending) {
+		mdp5_data->vsync_pending = 0;
+		mdss_mdp_overlay_vsync_ctrl(mfd, mdp5_data->vsync_pending);
+	}
+
+	return rc;
+}
+
+static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd)
+{
+	int rc;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+	if (!mfd)
+		return -ENODEV;
+
+	if (mfd->key != MFD_KEY)
+		return -EINVAL;
+
+	if (!mdp5_data->ctl) {
+		pr_err("ctl not initialized\n");
+		return -ENODEV;
+	}
+
+	if (!mdp5_data->ctl->power_on)
+		return 0;
+
+	mdss_mdp_overlay_release_all(mfd);
+
+	rc = mdss_mdp_ctl_stop(mdp5_data->ctl);
+	if (rc == 0) {
+		if (!mfd->ref_cnt) {
+			mdp5_data->borderfill_enable = false;
+			mdss_mdp_ctl_destroy(mdp5_data->ctl);
+			mdp5_data->ctl = NULL;
+		}
+
+		if (atomic_dec_return(&ov_active_panels) == 0)
+			mdss_mdp_rotator_release_all();
+
+		rc = pm_runtime_put(&mfd->pdev->dev);
+		if (rc)
+			pr_err("unable to suspend w/pm_runtime_put (%d)\n", rc);
+	}
+
+	return rc;
+}
+
+int mdss_panel_register_done(struct mdss_panel_data *pdata)
+{
+	/*
+	 * Clocks are already on if continuous splash is enabled,
+	 * increasing ref_cnt to help balance clocks once done.
+	 */
+	if (pdata->panel_info.cont_splash_enabled) {
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+		mdss_mdp_footswitch_ctrl_splash(1);
+		mdss_mdp_copy_splash_screen(pdata);
+	}
+	return 0;
+}
+
 int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd)
 {
-	mfd->on_fnc = mdss_mdp_ctl_on;
-	mfd->off_fnc = mdss_mdp_ctl_off;
-	mfd->hw_refresh = true;
-	mfd->lut_update = NULL;
-	mfd->do_histogram = NULL;
-	mfd->overlay_play_enable = true;
-	mfd->cursor_update = mdss_mdp_hw_cursor_update;
-	mfd->dma_fnc = mdss_mdp_overlay_pan_display;
-	mfd->ioctl_handler = mdss_mdp_overlay_ioctl_handler;
+	struct device *dev = mfd->fbi->dev;
+	struct msm_mdp_interface *mdp5_interface = &mfd->mdp;
+	struct mdss_overlay_private *mdp5_data = NULL;
+	int rc;
 
-	if (mfd->panel_info.type == WRITEBACK_PANEL)
-		mfd->kickoff_fnc = mdss_mdp_wb_kickoff;
-	else
-		mfd->kickoff_fnc = mdss_mdp_overlay_kickoff;
+	mdp5_interface->on_fnc = mdss_mdp_overlay_on;
+	mdp5_interface->off_fnc = mdss_mdp_overlay_off;
+	mdp5_interface->do_histogram = NULL;
+	mdp5_interface->cursor_update = mdss_mdp_hw_cursor_update;
+	mdp5_interface->dma_fnc = mdss_mdp_overlay_pan_display;
+	mdp5_interface->ioctl_handler = mdss_mdp_overlay_ioctl_handler;
+	mdp5_interface->panel_register_done = mdss_panel_register_done;
+	mdp5_interface->kickoff_fnc = mdss_mdp_overlay_kickoff;
 
-	return 0;
+	mdp5_data = kmalloc(sizeof(struct mdss_overlay_private), GFP_KERNEL);
+	if (!mdp5_data) {
+		pr_err("fail to allocate mdp5 private data structure");
+		return -ENOMEM;
+	}
+	memset(mdp5_data, 0, sizeof(struct mdss_overlay_private));
+
+	INIT_LIST_HEAD(&mdp5_data->pipes_used);
+	INIT_LIST_HEAD(&mdp5_data->pipes_cleanup);
+	init_completion(&mdp5_data->vsync_comp);
+	spin_lock_init(&mdp5_data->vsync_lock);
+	mutex_init(&mdp5_data->ov_lock);
+	mdp5_data->hw_refresh = true;
+	mdp5_data->overlay_play_enable = true;
+
+	mdp5_data->mdata = dev_get_drvdata(mfd->pdev->dev.parent);
+	if (!mdp5_data->mdata) {
+		pr_err("unable to initialize overlay for fb%d\n", mfd->index);
+		rc = -ENODEV;
+		goto init_fail;
+	}
+	mfd->mdp.private1 = mdp5_data;
+
+	rc = sysfs_create_group(&dev->kobj, &vsync_fs_attr_group);
+	if (rc) {
+		pr_err("vsync sysfs group creation failed, ret=%d\n", rc);
+		goto init_fail;
+	}
+
+	pm_runtime_set_suspended(&mfd->pdev->dev);
+	pm_runtime_enable(&mfd->pdev->dev);
+
+	kobject_uevent(&dev->kobj, KOBJ_ADD);
+	pr_debug("vsync kobject_uevent(KOBJ_ADD)\n");
+
+	return rc;
+init_fail:
+	kfree(mdp5_data);
+	return rc;
 }
diff --git a/drivers/video/msm/mdss/mdss_mdp_pipe.c b/drivers/video/msm/mdss/mdss_mdp_pipe.c
index 3a30ca1..b169c43 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pipe.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pipe.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -19,12 +19,28 @@
 
 #include "mdss_mdp.h"
 
-#define SMP_MB_CNT (mdss_res->smp_mb_cnt)
+#define SMP_MB_SIZE		(mdss_res->smp_mb_size)
+#define SMP_MB_CNT		(mdss_res->smp_mb_cnt)
+#define SMP_ENTRIES_PER_MB	(SMP_MB_SIZE / 16)
 
 static DEFINE_MUTEX(mdss_mdp_sspp_lock);
+static DEFINE_MUTEX(mdss_mdp_smp_lock);
 static DECLARE_BITMAP(mdss_mdp_smp_mmb_pool, MDSS_MDP_SMP_MMB_BLOCKS);
 
-static struct mdss_mdp_pipe mdss_mdp_pipe_list[MDSS_MDP_MAX_SSPP];
+static struct mdss_mdp_pipe *mdss_mdp_pipe_search(struct mdss_data_type *mdata,
+						  u32 ndx);
+static int mdss_mdp_pipe_free(struct mdss_mdp_pipe *pipe);
+
+static inline void mdss_mdp_pipe_write(struct mdss_mdp_pipe *pipe,
+				       u32 reg, u32 val)
+{
+	writel_relaxed(val, pipe->base + reg);
+}
+
+static inline u32 mdss_mdp_pipe_read(struct mdss_mdp_pipe *pipe, u32 reg)
+{
+	return readl_relaxed(pipe->base + reg);
+}
 
 static u32 mdss_mdp_smp_mmb_reserve(unsigned long *smp, size_t n)
 {
@@ -42,9 +58,10 @@
 	return i;
 }
 
-static void mdss_mdp_smp_mmb_set(int client_id, unsigned long *smp)
+static int mdss_mdp_smp_mmb_set(int client_id, unsigned long *smp)
 {
 	u32 mmb, off, data, s;
+	int cnt = 0;
 
 	for_each_set_bit(mmb, smp, SMP_MB_CNT) {
 		off = (mmb / 3) * 4;
@@ -54,7 +71,9 @@
 		data |= client_id << s;
 		MDSS_MDP_REG_WRITE(MDSS_MDP_REG_SMP_ALLOC_W0 + off, data);
 		MDSS_MDP_REG_WRITE(MDSS_MDP_REG_SMP_ALLOC_R0 + off, data);
+		cnt++;
 	}
+	return cnt;
 }
 
 static void mdss_mdp_smp_mmb_free(unsigned long *smp)
@@ -67,26 +86,68 @@
 	}
 }
 
+static void mdss_mdp_smp_set_wm_levels(struct mdss_mdp_pipe *pipe, int mb_cnt)
+{
+	u32 entries, val, wm[3];
+
+	entries = mb_cnt * SMP_ENTRIES_PER_MB;
+	val = entries >> 2;
+
+	wm[0] = val;
+	wm[1] = wm[0] + val;
+	wm[2] = wm[1] + val;
+
+	pr_debug("pnum=%d watermarks %u,%u,%u\n", pipe->num,
+			wm[0], wm[1], wm[2]);
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_REQPRIO_FIFO_WM_0, wm[0]);
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_REQPRIO_FIFO_WM_1, wm[1]);
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_REQPRIO_FIFO_WM_2, wm[2]);
+}
+
 static void mdss_mdp_smp_free(struct mdss_mdp_pipe *pipe)
 {
+	mutex_lock(&mdss_mdp_smp_lock);
 	mdss_mdp_smp_mmb_free(&pipe->smp[0]);
 	mdss_mdp_smp_mmb_free(&pipe->smp[1]);
 	mdss_mdp_smp_mmb_free(&pipe->smp[2]);
+	mutex_unlock(&mdss_mdp_smp_lock);
 }
 
 static int mdss_mdp_smp_reserve(struct mdss_mdp_pipe *pipe)
 {
+	struct mdss_data_type *mdata = mdss_mdp_get_mdata();
 	u32 num_blks = 0, reserved = 0;
-	int i;
+	struct mdss_mdp_plane_sizes ps;
+	int i, rc;
+	u32 nlines;
 
-	if ((pipe->src_planes.num_planes > 1) &&
-	    (pipe->type == MDSS_MDP_PIPE_TYPE_RGB))
-		return -EINVAL;
+	if (pipe->bwc_mode) {
+		rc = mdss_mdp_get_rau_strides(pipe->src.w, pipe->src.h,
+			pipe->src_fmt, &ps);
+		if (rc)
+			return rc;
+		pr_debug("BWC SMP strides ystride0=%x ystride1=%x\n",
+			ps.ystride[0], ps.ystride[1]);
+	} else if ((mdata->mdp_rev >= MDSS_MDP_HW_REV_102) &&
+			pipe->src_fmt->is_yuv) {
+		ps.num_planes = 2;
+		ps.ystride[0] = pipe->src.w;
+		ps.ystride[1] = pipe->src.w;
+	} else {
+		rc = mdss_mdp_get_plane_sizes(pipe->src_fmt->format,
+			pipe->src.w, pipe->src.h, &ps, 0);
+		if (rc)
+			return rc;
+	}
 
-	mutex_lock(&mdss_mdp_sspp_lock);
-	for (i = 0; i < pipe->src_planes.num_planes; i++) {
-		num_blks = DIV_ROUND_UP(2 * pipe->src_planes.ystride[i],
-					mdss_res->smp_mb_size);
+	mutex_lock(&mdss_mdp_smp_lock);
+	for (i = 0; i < ps.num_planes; i++) {
+		nlines = pipe->bwc_mode ? ps.rau_h[i] : 2;
+		num_blks = DIV_ROUND_UP(nlines * ps.ystride[i],
+			mdss_res->smp_mb_size);
+
+		if (mdata->mdp_rev == MDSS_MDP_HW_REV_100)
+			num_blks = roundup_pow_of_two(num_blks);
 
 		pr_debug("reserving %d mmb for pnum=%d plane=%d\n",
 				num_blks, pipe->num, i);
@@ -98,408 +159,244 @@
 
 	if (reserved < num_blks) {
 		pr_err("insufficient MMB blocks\n");
-		mdss_mdp_smp_free(pipe);
+		for (; i >= 0; i--)
+			mdss_mdp_smp_mmb_free(&pipe->smp[i]);
 		return -ENOMEM;
 	}
-	mutex_unlock(&mdss_mdp_sspp_lock);
+	mutex_unlock(&mdss_mdp_smp_lock);
 
 	return 0;
 }
 
 static int mdss_mdp_smp_alloc(struct mdss_mdp_pipe *pipe)
 {
-	u32 client_id;
 	int i;
+	int cnt = 0;
 
-	switch (pipe->num) {
-	case MDSS_MDP_SSPP_VIG0:
-		client_id = MDSS_MDP_SMP_CLIENT_VIG0_FETCH_Y;
-		break;
-	case MDSS_MDP_SSPP_VIG1:
-		client_id = MDSS_MDP_SMP_CLIENT_VIG1_FETCH_Y;
-		break;
-	case MDSS_MDP_SSPP_VIG2:
-		client_id = MDSS_MDP_SMP_CLIENT_VIG2_FETCH_Y;
-		break;
-	case MDSS_MDP_SSPP_RGB0:
-		client_id = MDSS_MDP_SMP_CLIENT_RGB0_FETCH;
-		break;
-	case MDSS_MDP_SSPP_RGB1:
-		client_id = MDSS_MDP_SMP_CLIENT_RGB1_FETCH;
-		break;
-	case MDSS_MDP_SSPP_RGB2:
-		client_id = MDSS_MDP_SMP_CLIENT_RGB2_FETCH;
-		break;
-	case MDSS_MDP_SSPP_DMA0:
-		client_id = MDSS_MDP_SMP_CLIENT_DMA0_FETCH_Y;
-		break;
-	case MDSS_MDP_SSPP_DMA1:
-		client_id = MDSS_MDP_SMP_CLIENT_DMA1_FETCH_Y;
-		break;
-	default:
-		pr_err("no valid smp client for pnum=%d\n", pipe->num);
-		return -EINVAL;
-	}
-
-	mutex_lock(&mdss_mdp_sspp_lock);
-	for (i = 0; i < pipe->src_planes.num_planes; i++)
-		mdss_mdp_smp_mmb_set(client_id + i, &pipe->smp[i]);
-	mutex_unlock(&mdss_mdp_sspp_lock);
+	mutex_lock(&mdss_mdp_smp_lock);
+	for (i = 0; i < MAX_PLANES; i++)
+		cnt += mdss_mdp_smp_mmb_set(pipe->ftch_id + i, &pipe->smp[i]);
+	mdss_mdp_smp_set_wm_levels(pipe, cnt);
+	mutex_unlock(&mdss_mdp_smp_lock);
 	return 0;
 }
 
-void mdss_mdp_pipe_unlock(struct mdss_mdp_pipe *pipe)
+int mdss_mdp_smp_setup(struct mdss_data_type *mdata, u32 cnt, u32 size)
 {
-	atomic_dec(&pipe->ref_cnt);
-	mutex_unlock(&pipe->lock);
+	if (!mdata)
+		return -EINVAL;
+
+	mdata->smp_mb_cnt = cnt;
+	mdata->smp_mb_size = size;
+
+	return 0;
 }
 
-int mdss_mdp_pipe_lock(struct mdss_mdp_pipe *pipe)
+void mdss_mdp_pipe_unmap(struct mdss_mdp_pipe *pipe)
 {
-	if (atomic_inc_not_zero(&pipe->ref_cnt)) {
-		if (mutex_lock_interruptible(&pipe->lock)) {
-			atomic_dec(&pipe->ref_cnt);
-			return -EINTR;
-		}
-		return 0;
+	int tmp;
+
+	tmp = atomic_dec_return(&pipe->ref_cnt);
+
+	WARN(tmp < 0, "Invalid unmap with ref_cnt=%d", tmp);
+	if (tmp == 0)
+		mdss_mdp_pipe_free(pipe);
+}
+
+int mdss_mdp_pipe_map(struct mdss_mdp_pipe *pipe)
+{
+	if (!atomic_inc_not_zero(&pipe->ref_cnt)) {
+		pr_err("attempting to map unallocated pipe (%d)", pipe->num);
+		return -EINVAL;
 	}
-	return -EINVAL;
+	return 0;
 }
 
-static struct mdss_mdp_pipe *mdss_mdp_pipe_init(u32 pnum)
+static struct mdss_mdp_pipe *mdss_mdp_pipe_init(struct mdss_mdp_mixer *mixer,
+						u32 type)
 {
-	struct mdss_mdp_pipe *pipe = NULL;
+	struct mdss_mdp_pipe *pipe;
+	struct mdss_data_type *mdata;
+	struct mdss_mdp_pipe *pipe_pool = NULL;
+	u32 npipes;
+	u32 i;
 
-	if (atomic_read(&mdss_mdp_pipe_list[pnum].ref_cnt) == 0) {
-		pipe = &mdss_mdp_pipe_list[pnum];
-		memset(pipe, 0, sizeof(*pipe));
-
-		mutex_init(&pipe->lock);
-		atomic_set(&pipe->ref_cnt, 1);
-
-		if (mdss_mdp_pipe_lock(pipe) == 0) {
-			pipe->num = pnum;
-			pipe->type = mdss_res->pipe_type_map[pnum];
-			pipe->ndx = BIT(pnum);
-
-			pr_debug("ndx=%x pnum=%d\n", pipe->ndx, pipe->num);
-		} else {
-			atomic_set(&pipe->ref_cnt, 0);
-			pipe = NULL;
-		}
-	}
-	return pipe;
-}
-
-struct mdss_mdp_pipe *mdss_mdp_pipe_alloc_pnum(u32 pnum)
-{
-	struct mdss_mdp_pipe *pipe = NULL;
-	mutex_lock(&mdss_mdp_sspp_lock);
-	if (mdss_res->pipe_type_map[pnum] != MDSS_MDP_PIPE_TYPE_UNUSED)
-		pipe = mdss_mdp_pipe_init(pnum);
-	mutex_unlock(&mdss_mdp_sspp_lock);
-	return pipe;
-}
-
-struct mdss_mdp_pipe *mdss_mdp_pipe_alloc_locked(u32 type)
-{
-	struct mdss_mdp_pipe *pipe = NULL;
-	int pnum;
-
-	mutex_lock(&mdss_mdp_sspp_lock);
-	for (pnum = 0; pnum < MDSS_MDP_MAX_SSPP; pnum++) {
-		if (type == mdss_res->pipe_type_map[pnum]) {
-			pipe = mdss_mdp_pipe_init(pnum);
-			if (pipe)
-				break;
-		}
-	}
-	mutex_unlock(&mdss_mdp_sspp_lock);
-
-	return pipe;
-}
-
-struct mdss_mdp_pipe *mdss_mdp_pipe_get_locked(u32 ndx)
-{
-	struct mdss_mdp_pipe *pipe = NULL;
-	int i;
-
-	if (!ndx)
+	if (!mixer || !mixer->ctl || !mixer->ctl->mdata)
 		return NULL;
 
-	mutex_lock(&mdss_mdp_sspp_lock);
-	for (i = 0; i < MDSS_MDP_MAX_SSPP; i++) {
-		pipe = &mdss_mdp_pipe_list[i];
-		if (ndx == pipe->ndx)
+	mdata = mixer->ctl->mdata;
+
+	switch (type) {
+	case MDSS_MDP_PIPE_TYPE_VIG:
+		pipe_pool = mdata->vig_pipes;
+		npipes = mdata->nvig_pipes;
+		break;
+
+	case MDSS_MDP_PIPE_TYPE_RGB:
+		pipe_pool = mdata->rgb_pipes;
+		npipes = mdata->nrgb_pipes;
+		break;
+
+	case MDSS_MDP_PIPE_TYPE_DMA:
+		pipe_pool = mdata->dma_pipes;
+		npipes = mdata->ndma_pipes;
+		break;
+
+	default:
+		npipes = 0;
+		pr_err("invalid pipe type %d\n", type);
+		break;
+	}
+
+	for (i = 0; i < npipes; i++) {
+		pipe = pipe_pool + i;
+		if (atomic_cmpxchg(&pipe->ref_cnt, 0, 1) == 0) {
+			pipe->mixer = mixer;
 			break;
-	}
-	mutex_unlock(&mdss_mdp_sspp_lock);
-
-	if (i == MDSS_MDP_MAX_SSPP)
-		return NULL;
-
-	if (mdss_mdp_pipe_lock(pipe))
-		return NULL;
-
-	if (pipe->ndx != ndx) {
-		mdss_mdp_pipe_unlock(pipe);
+		}
 		pipe = NULL;
 	}
 
+	if (pipe)
+		pr_debug("type=%x   pnum=%d\n", pipe->type, pipe->num);
+	else
+		pr_err("no %d type pipes available\n", type);
+
 	return pipe;
 }
 
-
-static void mdss_mdp_pipe_free(struct mdss_mdp_pipe *pipe)
+struct mdss_mdp_pipe *mdss_mdp_pipe_alloc_dma(struct mdss_mdp_mixer *mixer)
 {
-	mdss_mdp_smp_free(pipe);
-	pipe->ndx = 0;
-	atomic_dec(&pipe->ref_cnt);
-	mdss_mdp_pipe_unlock(pipe);
+	struct mdss_mdp_pipe *pipe = NULL;
+	struct mdss_data_type *mdata;
+	u32 pnum;
+
+	mutex_lock(&mdss_mdp_sspp_lock);
+	mdata = mixer->ctl->mdata;
+	pnum = mixer->num;
+
+	if (atomic_cmpxchg(&((mdata->dma_pipes[pnum]).ref_cnt), 0, 1) == 0) {
+		pipe = &mdata->dma_pipes[pnum];
+		pipe->mixer = mixer;
+
+	} else {
+		pr_err("DMA pnum%d\t not available\n", pnum);
+	}
+
+	mutex_unlock(&mdss_mdp_sspp_lock);
+	return pipe;
 }
 
-int mdss_mdp_pipe_destroy(struct mdss_mdp_pipe *pipe)
+struct mdss_mdp_pipe *mdss_mdp_pipe_alloc(struct mdss_mdp_mixer *mixer,
+						 u32 type)
+{
+	struct mdss_mdp_pipe *pipe;
+	mutex_lock(&mdss_mdp_sspp_lock);
+	pipe = mdss_mdp_pipe_init(mixer, type);
+	mutex_unlock(&mdss_mdp_sspp_lock);
+	return pipe;
+}
+
+struct mdss_mdp_pipe *mdss_mdp_pipe_get(struct mdss_data_type *mdata, u32 ndx)
+{
+	struct mdss_mdp_pipe *pipe = NULL;
+
+	if (!ndx)
+		return ERR_PTR(-EINVAL);
+
+	mutex_lock(&mdss_mdp_sspp_lock);
+
+	pipe = mdss_mdp_pipe_search(mdata, ndx);
+	if (!pipe)
+		return ERR_PTR(-EINVAL);
+
+	if (mdss_mdp_pipe_map(pipe))
+		return ERR_PTR(-EACCES);
+
+	mutex_unlock(&mdss_mdp_sspp_lock);
+
+	return pipe;
+}
+
+static struct mdss_mdp_pipe *mdss_mdp_pipe_search(struct mdss_data_type *mdata,
+						  u32 ndx)
+{
+	u32 i;
+	for (i = 0; i < mdata->nvig_pipes; i++) {
+		if (mdata->vig_pipes[i].ndx == ndx)
+			return &mdata->vig_pipes[i];
+	}
+
+	for (i = 0; i < mdata->nrgb_pipes; i++) {
+		if (mdata->rgb_pipes[i].ndx == ndx)
+			return &mdata->rgb_pipes[i];
+	}
+
+	for (i = 0; i < mdata->ndma_pipes; i++) {
+		if (mdata->dma_pipes[i].ndx == ndx)
+			return &mdata->dma_pipes[i];
+	}
+
+	return NULL;
+}
+
+static int mdss_mdp_pipe_free(struct mdss_mdp_pipe *pipe)
 {
 	pr_debug("ndx=%x pnum=%d ref_cnt=%d\n", pipe->ndx, pipe->num,
 			atomic_read(&pipe->ref_cnt));
 
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
-	mutex_lock(&mdss_mdp_sspp_lock);
+	mdss_mdp_pipe_sspp_term(pipe);
+	mdss_mdp_smp_free(pipe);
+	pipe->flags = 0;
+	pipe->bwc_mode = 0;
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+
+	return 0;
+}
+
+int mdss_mdp_pipe_destroy(struct mdss_mdp_pipe *pipe)
+{
+	int tmp;
+
+	tmp = atomic_dec_return(&pipe->ref_cnt);
+
+	if (tmp != 0) {
+		pr_err("unable to free pipe %d while still in use (%d)\n",
+				pipe->num, tmp);
+		return -EBUSY;
+	}
 	mdss_mdp_pipe_free(pipe);
-	mutex_unlock(&mdss_mdp_sspp_lock);
-	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
 
 	return 0;
-}
 
-int mdss_mdp_pipe_release_all(struct msm_fb_data_type *mfd)
-{
-	struct mdss_mdp_pipe *pipe;
-	int i;
-
-	if (!mfd)
-		return -ENODEV;
-
-	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
-	mutex_lock(&mdss_mdp_sspp_lock);
-	for (i = 0; i < MDSS_MDP_MAX_SSPP; i++) {
-		pipe = &mdss_mdp_pipe_list[i];
-		if (atomic_read(&pipe->ref_cnt) && pipe->mfd == mfd) {
-			pr_debug("release pnum=%d\n", pipe->num);
-			if (mdss_mdp_pipe_lock(pipe) == 0) {
-				mdss_mdp_mixer_pipe_unstage(pipe);
-				mdss_mdp_pipe_free(pipe);
-			} else {
-				pr_err("unable to lock pipe=%d for release",
-				       pipe->num);
-			}
-		}
-	}
-	mutex_unlock(&mdss_mdp_sspp_lock);
-	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
-
-	return 0;
-}
-
-static inline void mdss_mdp_pipe_write(struct mdss_mdp_pipe *pipe,
-				       u32 reg, u32 val)
-{
-	int offset = MDSS_MDP_REG_SSPP_OFFSET(pipe->num);
-	MDSS_MDP_REG_WRITE(offset + reg, val);
-}
-
-static inline u32 mdss_mdp_pipe_read(struct mdss_mdp_pipe *pipe, u32 reg)
-{
-	int offset = MDSS_MDP_REG_SSPP_OFFSET(pipe->num);
-	return MDSS_MDP_REG_READ(offset + reg);
-}
-
-static int mdss_mdp_leading_zero(u32 num)
-{
-	u32 bit = 0x80000000;
-	int i;
-
-	for (i = 0; i < 32; i++) {
-		if (bit & num)
-			return i;
-		bit >>= 1;
-	}
-
-	return i;
-}
-
-static u32 mdss_mdp_scale_phase_step(int f_num, u32 src, u32 dst)
-{
-	u32 val, s;
-	int n;
-
-	n = mdss_mdp_leading_zero(src);
-	if (n > f_num)
-		n = f_num;
-	s = src << n;	/* maximum to reduce lose of resolution */
-	val = s / dst;
-	if (n < f_num) {
-		n = f_num - n;
-		val <<= n;
-		val |= ((s % dst) << n) / dst;
-	}
-
-	return val;
-}
-
-static int mdss_mdp_scale_setup(struct mdss_mdp_pipe *pipe)
-{
-	u32 scale_config = 0;
-	u32 phasex_step = 0, phasey_step = 0;
-	u32 chroma_sample;
-
-	if (pipe->type == MDSS_MDP_PIPE_TYPE_DMA) {
-		if (pipe->dst.h != pipe->src.h || pipe->dst.w != pipe->src.w) {
-			pr_err("no scaling supported on dma pipe\n");
-			return -EINVAL;
-		} else {
-			return 0;
-		}
-	}
-
-	chroma_sample = pipe->src_fmt->chroma_sample;
-
-	if ((pipe->src.h != pipe->dst.h) ||
-	    (chroma_sample == MDSS_MDP_CHROMA_420) ||
-	    (chroma_sample == MDSS_MDP_CHROMA_H1V2)) {
-		pr_debug("scale y - src_h=%d dst_h=%d\n",
-				pipe->src.h, pipe->dst.h);
-
-		if ((pipe->src.h / MAX_DOWNSCALE_RATIO) > pipe->dst.h) {
-			pr_err("too much downscaling height=%d->%d",
-			       pipe->src.h, pipe->dst.h);
-			return -EINVAL;
-		}
-
-		scale_config |= MDSS_MDP_SCALEY_EN;
-
-		if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
-			u32 chr_dst_h = pipe->dst.h;
-			if ((chroma_sample == MDSS_MDP_CHROMA_420) ||
-			    (chroma_sample == MDSS_MDP_CHROMA_H1V2))
-				chr_dst_h *= 2;	/* 2x upsample chroma */
-
-			if (pipe->src.h <= pipe->dst.h)
-				scale_config |= /* G/Y, A */
-					(MDSS_MDP_SCALE_FILTER_BIL << 10) |
-					(MDSS_MDP_SCALE_FILTER_NEAREST << 18);
-			else
-				scale_config |= /* G/Y, A */
-					(MDSS_MDP_SCALE_FILTER_PCMN << 10) |
-					(MDSS_MDP_SCALE_FILTER_NEAREST << 18);
-
-			if (pipe->src.h <= chr_dst_h)
-				scale_config |= /* CrCb */
-					(MDSS_MDP_SCALE_FILTER_BIL << 14);
-			else
-				scale_config |= /* CrCb */
-					(MDSS_MDP_SCALE_FILTER_PCMN << 14);
-
-			phasey_step = mdss_mdp_scale_phase_step(
-				PHASE_STEP_SHIFT, pipe->src.h, chr_dst_h);
-
-			mdss_mdp_pipe_write(pipe,
-					MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPY,
-					phasey_step);
-		} else {
-			if (pipe->src.h <= pipe->dst.h)
-				scale_config |= /* RGB, A */
-					(MDSS_MDP_SCALE_FILTER_BIL << 10) |
-					(MDSS_MDP_SCALE_FILTER_NEAREST << 18);
-			else
-				scale_config |= /* RGB, A */
-					(MDSS_MDP_SCALE_FILTER_PCMN << 10) |
-					(MDSS_MDP_SCALE_FILTER_NEAREST << 18);
-		}
-
-		phasey_step = mdss_mdp_scale_phase_step(
-			PHASE_STEP_SHIFT, pipe->src.h, pipe->dst.h);
-	}
-
-	if ((pipe->src.w != pipe->dst.w) ||
-	    (chroma_sample == MDSS_MDP_CHROMA_420) ||
-	    (chroma_sample == MDSS_MDP_CHROMA_H2V1)) {
-		pr_debug("scale x - src_w=%d dst_w=%d\n",
-				pipe->src.w, pipe->dst.w);
-
-		if ((pipe->src.w / MAX_DOWNSCALE_RATIO) > pipe->dst.w) {
-			pr_err("too much downscaling width=%d->%d",
-			       pipe->src.w, pipe->dst.w);
-			return -EINVAL;
-		}
-
-		scale_config |= MDSS_MDP_SCALEX_EN;
-
-		if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
-			u32 chr_dst_w = pipe->dst.w;
-
-			if ((chroma_sample == MDSS_MDP_CHROMA_420) ||
-			    (chroma_sample == MDSS_MDP_CHROMA_H2V1))
-				chr_dst_w *= 2;	/* 2x upsample chroma */
-
-			if (pipe->src.w <= pipe->dst.w)
-				scale_config |= /* G/Y, A */
-					(MDSS_MDP_SCALE_FILTER_BIL << 8) |
-					(MDSS_MDP_SCALE_FILTER_NEAREST << 16);
-			else
-				scale_config |= /* G/Y, A */
-					(MDSS_MDP_SCALE_FILTER_PCMN << 8) |
-					(MDSS_MDP_SCALE_FILTER_NEAREST << 16);
-
-			if (pipe->src.w <= chr_dst_w)
-				scale_config |= /* CrCb */
-					(MDSS_MDP_SCALE_FILTER_BIL << 12);
-			else
-				scale_config |= /* CrCb */
-					(MDSS_MDP_SCALE_FILTER_PCMN << 12);
-
-			phasex_step = mdss_mdp_scale_phase_step(
-				PHASE_STEP_SHIFT, pipe->src.w, chr_dst_w);
-			mdss_mdp_pipe_write(pipe,
-					MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPX,
-					phasex_step);
-		} else {
-			if (pipe->src.w <= pipe->dst.w)
-				scale_config |= /* RGB, A */
-					(MDSS_MDP_SCALE_FILTER_BIL << 8) |
-					(MDSS_MDP_SCALE_FILTER_NEAREST << 16);
-			else
-				scale_config |= /* RGB, A */
-					(MDSS_MDP_SCALE_FILTER_PCMN << 8) |
-					(MDSS_MDP_SCALE_FILTER_NEAREST << 16);
-		}
-
-		phasex_step = mdss_mdp_scale_phase_step(
-			PHASE_STEP_SHIFT, pipe->src.w, pipe->dst.w);
-	}
-
-	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SCALE_CONFIG, scale_config);
-	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SCALE_PHASE_STEP_X, phasex_step);
-	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SCALE_PHASE_STEP_Y, phasey_step);
-	return 0;
 }
 
 static int mdss_mdp_image_setup(struct mdss_mdp_pipe *pipe)
 {
 	u32 img_size, src_size, src_xy, dst_size, dst_xy, ystride0, ystride1;
+	u32 width, height;
 
 	pr_debug("pnum=%d wh=%dx%d src={%d,%d,%d,%d} dst={%d,%d,%d,%d}\n",
 		   pipe->num, pipe->img_width, pipe->img_height,
 		   pipe->src.x, pipe->src.y, pipe->src.w, pipe->src.h,
 		   pipe->dst.x, pipe->dst.y, pipe->dst.w, pipe->dst.h);
 
-	if (mdss_mdp_scale_setup(pipe))
-		return -EINVAL;
+	width = pipe->img_width;
+	height = pipe->img_height;
+	mdss_mdp_get_plane_sizes(pipe->src_fmt->format, width, height,
+			&pipe->src_planes, pipe->bwc_mode);
 
-	mdss_mdp_get_plane_sizes(pipe->src_fmt->format, pipe->img_width,
-				 pipe->img_height, &pipe->src_planes);
+	if ((pipe->flags & MDP_DEINTERLACE) &&
+			!(pipe->flags & MDP_SOURCE_ROTATED_90)) {
+		int i;
+		for (i = 0; i < pipe->src_planes.num_planes; i++)
+			pipe->src_planes.ystride[i] *= 2;
+		width *= 2;
+		height /= 2;
+	}
 
-	img_size = (pipe->img_height << 16) | pipe->img_width;
+	img_size = (height << 16) | width;
 	src_size = (pipe->src.h << 16) | pipe->src.w;
 	src_xy = (pipe->src.y << 16) | pipe->src.x;
 	dst_size = (pipe->dst.h << 16) | pipe->dst.w;
@@ -509,6 +406,11 @@
 	ystride1 =  (pipe->src_planes.ystride[2]) |
 		    (pipe->src_planes.ystride[3] << 16);
 
+	if (pipe->overfetch_disable) {
+		img_size = src_size;
+		src_xy = 0;
+	}
+
 	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_IMG_SIZE, img_size);
 	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_SIZE, src_size);
 	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_XY, src_xy);
@@ -523,10 +425,15 @@
 static int mdss_mdp_format_setup(struct mdss_mdp_pipe *pipe)
 {
 	struct mdss_mdp_format_params *fmt;
-	u32 rot90, opmode, chroma_samp;
+	u32 chroma_samp, unpack, src_format;
+	u32 secure = 0;
+	u32 opmode;
 
 	fmt = pipe->src_fmt;
 
+	if (pipe->flags & MDP_SECURE_OVERLAY_SESSION)
+		secure = 0xF;
+
 	opmode = pipe->bwc_mode;
 	if (pipe->flags & MDP_FLIP_LR)
 		opmode |= MDSS_MDP_OP_FLIP_LR;
@@ -536,8 +443,6 @@
 	pr_debug("pnum=%d format=%d opmode=%x\n", pipe->num, fmt->format,
 			opmode);
 
-	rot90 = !!(pipe->flags & MDP_ROT_90);
-
 	chroma_samp = fmt->chroma_sample;
 	if (pipe->flags & MDP_SOURCE_ROTATED_90) {
 		if (chroma_samp == MDSS_MDP_CHROMA_H2V1)
@@ -546,66 +451,133 @@
 			chroma_samp = MDSS_MDP_CHROMA_H2V1;
 	}
 
-	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_FORMAT,
-			    (chroma_samp << 23) |
-			    (fmt->fetch_planes << 19) |
-			    (fmt->unpack_align_msb << 18) |
-			    (fmt->unpack_tight << 17) |
-			    (fmt->unpack_count << 12) |
-			    (rot90 << 11) |
-			    (fmt->bpp << 9) |
-			    (fmt->alpha_enable << 8) |
-			    (fmt->a_bit << 6) |
-			    (fmt->r_bit << 4) |
-			    (fmt->b_bit << 2) |
-			    (fmt->g_bit << 0));
+	src_format = (chroma_samp << 23) |
+		     (fmt->fetch_planes << 19) |
+		     (fmt->bits[C3_ALPHA] << 6) |
+		     (fmt->bits[C2_R_Cr] << 4) |
+		     (fmt->bits[C1_B_Cb] << 2) |
+		     (fmt->bits[C0_G_Y] << 0);
 
-	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN,
-			    (fmt->element3 << 24) |
-			    (fmt->element2 << 16) |
-			    (fmt->element1 << 8) |
-			    (fmt->element0 << 0));
+	if (pipe->flags & MDP_ROT_90)
+		src_format |= BIT(11); /* ROT90 */
 
+	if (fmt->alpha_enable &&
+			fmt->fetch_planes != MDSS_MDP_PLANE_INTERLEAVED)
+		src_format |= BIT(8); /* SRCC3_EN */
+
+	if (fmt->fetch_planes != MDSS_MDP_PLANE_PLANAR) {
+		unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
+			(fmt->element[1] << 8) | (fmt->element[0] << 0);
+
+		src_format |= ((fmt->unpack_count - 1) << 12) |
+			  (fmt->unpack_tight << 17) |
+			  (fmt->unpack_align_msb << 18) |
+			  ((fmt->bpp - 1) << 9);
+	} else {
+		unpack = 0;
+	}
+
+	mdss_mdp_pipe_sspp_setup(pipe, &opmode);
+
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_FORMAT, src_format);
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN, unpack);
 	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_OP_MODE, opmode);
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_ADDR_SW_STATUS, secure);
 
 	return 0;
 }
 
-static int mdss_mdp_vig_setup(struct mdss_mdp_pipe *pipe)
+static void mdss_mdp_addr_add_offset(struct mdss_mdp_pipe *pipe,
+				    struct mdss_mdp_data *data)
 {
-	u32 opmode = 0;
+	data->p[0].addr += pipe->src.x +
+		(pipe->src.y * pipe->src_planes.ystride[0]);
+	if (data->num_planes > 1) {
+		u8 hmap[] = { 1, 2, 1, 2 };
+		u8 vmap[] = { 1, 1, 2, 2 };
+		u16 xoff = pipe->src.x / hmap[pipe->src_fmt->chroma_sample];
+		u16 yoff = pipe->src.y / vmap[pipe->src_fmt->chroma_sample];
 
-	pr_debug("pnum=%x\n", pipe->num);
+		if (data->num_planes == 2) /* pseudo planar */
+			xoff *= 2;
+		data->p[1].addr += xoff + (yoff * pipe->src_planes.ystride[1]);
 
-	if (pipe->src_fmt->is_yuv)
-		opmode |= (0 << 19) |	/* DST_DATA=RGB */
-			  (1 << 18) |	/* SRC_DATA=YCBCR */
-			  (1 << 17);	/* CSC_1_EN */
-
-	/* only need to program once */
-	if (pipe->play_cnt == 0) {
-		mdss_mdp_csc_setup(MDSS_MDP_BLOCK_SSPP, pipe->num, 1,
-				   MDSS_MDP_CSC_YUV2RGB);
+		if (data->num_planes > 2) { /* planar */
+			data->p[2].addr += xoff +
+				(yoff * pipe->src_planes.ystride[2]);
+		}
 	}
-	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_VIG_OP_MODE, opmode);
+}
 
-	return 0;
+int mdss_mdp_pipe_addr_setup(struct mdss_data_type *mdata, u32 *offsets,
+				u32 *ftch_id, u32 type, u32 num_base, u32 len)
+{
+	struct mdss_mdp_pipe *head;
+	u32 i;
+	int rc = 0;
+
+	head = devm_kzalloc(&mdata->pdev->dev, sizeof(struct mdss_mdp_pipe) *
+			len, GFP_KERNEL);
+
+	if (!head) {
+		pr_err("unable to setup pipe type=%d :devm_kzalloc fail\n",
+			type);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < len; i++) {
+		head[i].type = type;
+		head[i].ftch_id  = ftch_id[i];
+		head[i].num = i + num_base;
+		head[i].ndx = BIT(i + num_base);
+		head[i].base = mdata->mdp_base + offsets[i];
+	}
+
+	switch (type) {
+
+	case MDSS_MDP_PIPE_TYPE_VIG:
+		mdata->vig_pipes = head;
+		break;
+
+	case MDSS_MDP_PIPE_TYPE_RGB:
+		mdata->rgb_pipes = head;
+		break;
+
+	case MDSS_MDP_PIPE_TYPE_DMA:
+		mdata->dma_pipes = head;
+		break;
+
+	default:
+		pr_err("Invalid pipe type=%d\n", type);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
 }
 
 static int mdss_mdp_src_addr_setup(struct mdss_mdp_pipe *pipe,
 				   struct mdss_mdp_data *data)
 {
-	int ret;
+	int is_rot = pipe->mixer->rotator_mode;
+	int ret = 0;
 
 	pr_debug("pnum=%d\n", pipe->num);
 
-	if (pipe->type != MDSS_MDP_PIPE_TYPE_DMA)
-		data->bwc_enabled = pipe->bwc_mode;
+	data->bwc_enabled = pipe->bwc_mode;
 
 	ret = mdss_mdp_data_check(data, &pipe->src_planes);
 	if (ret)
 		return ret;
 
+	if (pipe->overfetch_disable)
+		mdss_mdp_addr_add_offset(pipe, data);
+
+	/* planar format expects YCbCr, swap chroma planes if YCrCb */
+	if (!is_rot && (pipe->src_fmt->fetch_planes == MDSS_MDP_PLANE_PLANAR) &&
+	    (pipe->src_fmt->element[0] == C2_R_Cr))
+		swap(data->p[1].addr, data->p[2].addr);
+
 	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC0_ADDR, data->p[0].addr);
 	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC1_ADDR, data->p[1].addr);
 	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC2_ADDR, data->p[2].addr);
@@ -614,11 +586,33 @@
 	return 0;
 }
 
+static int mdss_mdp_pipe_solidfill_setup(struct mdss_mdp_pipe *pipe)
+{
+	int ret;
+	u32 secure, format;
+
+	pr_debug("solid fill setup on pnum=%d\n", pipe->num);
+
+	ret = mdss_mdp_image_setup(pipe);
+	if (ret) {
+		pr_err("image setup error for pnum=%d\n", pipe->num);
+		return ret;
+	}
+
+	format = MDSS_MDP_FMT_SOLID_FILL;
+	secure = (pipe->flags & MDP_SECURE_OVERLAY_SESSION ? 0xF : 0x0);
+
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_FORMAT, format);
+	mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_ADDR_SW_STATUS, secure);
+
+	return 0;
+}
+
 int mdss_mdp_pipe_queue_data(struct mdss_mdp_pipe *pipe,
 			     struct mdss_mdp_data *src_data)
 {
 	int ret = 0;
-	u32 params_changed;
+	u32 params_changed, opmode;
 
 	if (!pipe) {
 		pr_err("pipe not setup properly for queue\n");
@@ -636,9 +630,20 @@
 	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
 
 	params_changed = pipe->params_changed;
+	if (src_data == NULL) {
+		mdss_mdp_pipe_solidfill_setup(pipe);
+		goto update_nobuf;
+	}
+
 	if (params_changed) {
 		pipe->params_changed = 0;
 
+		ret = mdss_mdp_pipe_pp_setup(pipe, &opmode);
+		if (ret) {
+			pr_err("pipe pp setup error for pnum=%d\n", pipe->num);
+			goto done;
+		}
+
 		ret = mdss_mdp_image_setup(pipe);
 		if (ret) {
 			pr_err("image setup error for pnum=%d\n", pipe->num);
@@ -653,7 +658,8 @@
 		}
 
 		if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG)
-			mdss_mdp_vig_setup(pipe);
+			mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_VIG_OP_MODE,
+			opmode);
 
 		ret = mdss_mdp_smp_reserve(pipe);
 		if (ret) {
@@ -671,6 +677,7 @@
 		goto done;
 	}
 
+update_nobuf:
 	mdss_mdp_mixer_pipe_update(pipe, params_changed);
 
 	pipe->play_cnt++;
diff --git a/drivers/video/msm/mdss/mdss_mdp_pp.c b/drivers/video/msm/mdss/mdss_mdp_pp.c
index b84a075..2c0d5e0 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pp.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pp.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,7 +14,11 @@
 
 #define pr_fmt(fmt)	"%s: " fmt, __func__
 
+#include "mdss_fb.h"
 #include "mdss_mdp.h"
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
 
 struct mdp_csc_cfg mdp_csc_convert[MDSS_MDP_MAX_CSC] = {
 	[MDSS_MDP_CSC_RGB2RGB] = {
@@ -71,21 +76,150 @@
 #define CSC_LV_OFF	0x14
 #define CSC_POST_OFF	0xC
 
-static int mdss_mdp_csc_setup_data(u32 block, u32 blk_idx, u32 tbl_idx,
+#define MDSS_BLOCK_DISP_NUM	(MDP_BLOCK_MAX - MDP_LOGICAL_BLOCK_DISP_0)
+
+#define HIST_WAIT_TIMEOUT(frame) ((60 * HZ * (frame)) / 1000)
+/* hist collect state */
+enum {
+	HIST_UNKNOWN,
+	HIST_IDLE,
+	HIST_RESET,
+	HIST_START,
+	HIST_READY,
+};
+
+struct pp_hist_col_info {
+	u32 col_state;
+	u32 col_en;
+	u32 read_request;
+	u32 hist_cnt_read;
+	u32 hist_cnt_sent;
+	u32 frame_cnt;
+	u32 is_kick_ready;
+	struct completion comp;
+	u32 data[HIST_V_SIZE];
+};
+
+static u32 dither_matrix[16] = {
+	15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10};
+static u32 dither_depth_map[9] = {
+	0, 0, 0, 0, 0, 1, 2, 3, 3};
+
+#define GAMUT_T0_SIZE	125
+#define GAMUT_T1_SIZE	100
+#define GAMUT_T2_SIZE	80
+#define GAMUT_T3_SIZE	100
+#define GAMUT_T4_SIZE	100
+#define GAMUT_T5_SIZE	80
+#define GAMUT_T6_SIZE	64
+#define GAMUT_T7_SIZE	80
+#define GAMUT_TOTAL_TABLE_SIZE (GAMUT_T0_SIZE + GAMUT_T1_SIZE + \
+	GAMUT_T2_SIZE + GAMUT_T3_SIZE + GAMUT_T4_SIZE + \
+	GAMUT_T5_SIZE + GAMUT_T6_SIZE + GAMUT_T7_SIZE)
+
+#define PP_FLAGS_DIRTY_PA	0x1
+#define PP_FLAGS_DIRTY_PCC	0x2
+#define PP_FLAGS_DIRTY_IGC	0x4
+#define PP_FLAGS_DIRTY_ARGC	0x8
+#define PP_FLAGS_DIRTY_ENHIST	0x10
+#define PP_FLAGS_DIRTY_DITHER	0x20
+#define PP_FLAGS_DIRTY_GAMUT	0x40
+#define PP_FLAGS_DIRTY_HIST_COL	0x80
+#define PP_FLAGS_DIRTY_PGC	0x100
+#define PP_FLAGS_DIRTY_SHARP	0x200
+
+#define PP_STS_ENABLE	0x1
+#define PP_STS_GAMUT_FIRST	0x2
+
+#define SHARP_STRENGTH_DEFAULT	32
+#define SHARP_EDGE_THR_DEFAULT	112
+#define SHARP_SMOOTH_THR_DEFAULT	8
+#define SHARP_NOISE_THR_DEFAULT	2
+
+struct mdss_pp_res_type {
+	/* logical info */
+	u32 pp_disp_flags[MDSS_BLOCK_DISP_NUM];
+	u32 igc_lut_c0c1[MDSS_BLOCK_DISP_NUM][IGC_LUT_ENTRIES];
+	u32 igc_lut_c2[MDSS_BLOCK_DISP_NUM][IGC_LUT_ENTRIES];
+	struct mdp_ar_gc_lut_data
+		gc_lut_r[MDSS_BLOCK_DISP_NUM][GC_LUT_SEGMENTS];
+	struct mdp_ar_gc_lut_data
+		gc_lut_g[MDSS_BLOCK_DISP_NUM][GC_LUT_SEGMENTS];
+	struct mdp_ar_gc_lut_data
+		gc_lut_b[MDSS_BLOCK_DISP_NUM][GC_LUT_SEGMENTS];
+	u32 enhist_lut[MDSS_BLOCK_DISP_NUM][ENHIST_LUT_ENTRIES];
+	struct mdp_pa_cfg pa_disp_cfg[MDSS_BLOCK_DISP_NUM];
+	struct mdp_pcc_cfg_data pcc_disp_cfg[MDSS_BLOCK_DISP_NUM];
+	struct mdp_igc_lut_data igc_disp_cfg[MDSS_BLOCK_DISP_NUM];
+	struct mdp_pgc_lut_data argc_disp_cfg[MDSS_BLOCK_DISP_NUM];
+	struct mdp_pgc_lut_data pgc_disp_cfg[MDSS_BLOCK_DISP_NUM];
+	struct mdp_hist_lut_data enhist_disp_cfg[MDSS_BLOCK_DISP_NUM];
+	struct mdp_dither_cfg_data dither_disp_cfg[MDSS_BLOCK_DISP_NUM];
+	struct mdp_gamut_cfg_data gamut_disp_cfg[MDSS_BLOCK_DISP_NUM];
+	uint16_t gamut_tbl[MDSS_BLOCK_DISP_NUM][GAMUT_TOTAL_TABLE_SIZE];
+	struct pp_hist_col_info
+		*hist_col[MDSS_BLOCK_DISP_NUM][MDSS_MDP_MAX_DSPP];
+	u32 hist_data[MDSS_BLOCK_DISP_NUM][HIST_V_SIZE];
+	/* physical info */
+	struct pp_sts_type pp_dspp_sts[MDSS_MDP_MAX_DSPP];
+	struct pp_hist_col_info dspp_hist[MDSS_MDP_MAX_DSPP];
+};
+
+static DEFINE_MUTEX(mdss_pp_mutex);
+static DEFINE_SPINLOCK(mdss_hist_lock);
+static DEFINE_MUTEX(mdss_mdp_hist_mutex);
+static struct mdss_pp_res_type *mdss_pp_res;
+
+static void pp_hist_read(u32 v_base, struct pp_hist_col_info *hist_info);
+static void pp_update_pcc_regs(u32 offset,
+				struct mdp_pcc_cfg_data *cfg_ptr);
+static void pp_update_igc_lut(struct mdp_igc_lut_data *cfg,
+				u32 offset, u32 blk_idx);
+static void pp_update_gc_one_lut(u32 offset,
+				struct mdp_ar_gc_lut_data *lut_data);
+static void pp_update_argc_lut(u32 offset,
+				struct mdp_pgc_lut_data *config);
+static void pp_update_hist_lut(u32 offset, struct mdp_hist_lut_data *cfg);
+static void pp_pa_config(unsigned long flags, u32 base,
+				struct pp_sts_type *pp_sts,
+				struct mdp_pa_cfg *pa_config);
+static void pp_pcc_config(unsigned long flags, u32 base,
+				struct pp_sts_type *pp_sts,
+				struct mdp_pcc_cfg_data *pcc_config);
+static void pp_igc_config(unsigned long flags, u32 base,
+				struct pp_sts_type *pp_sts,
+				struct mdp_igc_lut_data *igc_config,
+				u32 pipe_num);
+static void pp_enhist_config(unsigned long flags, u32 base,
+				struct pp_sts_type *pp_sts,
+				struct mdp_hist_lut_data *enhist_cfg);
+static void pp_sharp_config(char __iomem *offset,
+				struct pp_sts_type *pp_sts,
+				struct mdp_sharp_cfg *sharp_config);
+
+
+int mdss_mdp_csc_setup_data(u32 block, u32 blk_idx, u32 tbl_idx,
 				   struct mdp_csc_cfg *data)
 {
 	int i, ret = 0;
-	u32 *off, base, val = 0;
+	char __iomem *base, *off;
+	u32 val = 0;
+	struct mdss_data_type *mdata;
+	struct mdss_mdp_pipe *pipe;
+	struct mdss_mdp_ctl *ctl;
+
 
 	if (data == NULL) {
 		pr_err("no csc matrix specified\n");
 		return -EINVAL;
 	}
 
+	mdata = mdss_mdp_get_mdata();
 	switch (block) {
 	case MDSS_MDP_BLOCK_SSPP:
-		if (blk_idx < MDSS_MDP_SSPP_RGB0) {
-			base = MDSS_MDP_REG_SSPP_OFFSET(blk_idx);
+		if (blk_idx < mdata->nvig_pipes) {
+			pipe = mdata->vig_pipes + blk_idx;
+			base = pipe->base;
 			if (tbl_idx == 1)
 				base += MDSS_MDP_REG_VIG_CSC_1_BASE;
 			else
@@ -95,9 +229,9 @@
 		}
 		break;
 	case MDSS_MDP_BLOCK_WB:
-		if (blk_idx < MDSS_MDP_MAX_WRITEBACK) {
-			base = MDSS_MDP_REG_WB_OFFSET(blk_idx) +
-			       MDSS_MDP_REG_WB_CSC_BASE;
+		if (blk_idx < mdata->nctl) {
+			ctl = mdata->ctl_off + blk_idx;
+			base = ctl->wb_base + MDSS_MDP_REG_WB_CSC_BASE;
 		} else {
 			ret = -EINVAL;
 		}
@@ -111,34 +245,33 @@
 		return ret;
 	}
 
-	off = (u32 *) (base + CSC_MV_OFF);
+	off = base + CSC_MV_OFF;
 	for (i = 0; i < 9; i++) {
 		if (i & 0x1) {
 			val |= data->csc_mv[i] << 16;
-			MDSS_MDP_REG_WRITE(off, val);
-			off++;
+			writel_relaxed(val, off);
+			off += sizeof(u32 *);
 		} else {
 			val = data->csc_mv[i];
 		}
 	}
-	MDSS_MDP_REG_WRITE(off, val); /* COEFF_33 */
+	writel_relaxed(val, off); /* COEFF_33 */
 
-	off = (u32 *) (base + CSC_BV_OFF);
+	off = base + CSC_BV_OFF;
 	for (i = 0; i < 3; i++) {
-		MDSS_MDP_REG_WRITE(off, data->csc_pre_bv[i]);
-		MDSS_MDP_REG_WRITE((u32 *)(((u32)off) + CSC_POST_OFF),
-				   data->csc_post_bv[i]);
-		off++;
+		writel_relaxed(data->csc_pre_bv[i], off);
+		writel_relaxed(data->csc_post_bv[i], off + CSC_POST_OFF);
+		off += sizeof(u32 *);
 	}
 
-	off = (u32 *) (base + CSC_LV_OFF);
+	off = base + CSC_LV_OFF;
 	for (i = 0; i < 6; i += 2) {
 		val = (data->csc_pre_lv[i] << 8) | data->csc_pre_lv[i+1];
-		MDSS_MDP_REG_WRITE(off, val);
+		writel_relaxed(val, off);
 
 		val = (data->csc_post_lv[i] << 8) | data->csc_post_lv[i+1];
-		MDSS_MDP_REG_WRITE((u32 *)(((u32)off) + CSC_POST_OFF), val);
-		off++;
+		writel_relaxed(val, off + CSC_POST_OFF);
+		off += sizeof(u32 *);
 	}
 
 	return ret;
@@ -160,16 +293,1884 @@
 	return mdss_mdp_csc_setup_data(block, blk_idx, tbl_idx, data);
 }
 
-int mdss_mdp_dspp_setup(struct mdss_mdp_ctl *ctl, struct mdss_mdp_mixer *mixer)
+static void pp_gamut_config(struct mdp_gamut_cfg_data *gamut_cfg,
+				u32 base, struct pp_sts_type *pp_sts)
 {
-	int dspp_num;
+	u32 offset;
+	int i, j;
+	if (gamut_cfg->flags & MDP_PP_OPS_WRITE) {
+		offset = base + MDSS_MDP_REG_DSPP_GAMUT_BASE;
+		for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+			for (j = 0; j < gamut_cfg->tbl_size[i]; j++)
+				MDSS_MDP_REG_WRITE(offset,
+					(u32)gamut_cfg->r_tbl[i][j]);
+			offset += 4;
+		}
+		for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+			for (j = 0; j < gamut_cfg->tbl_size[i]; j++)
+				MDSS_MDP_REG_WRITE(offset,
+					(u32)gamut_cfg->g_tbl[i][j]);
+			offset += 4;
+		}
+		for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+			for (j = 0; j < gamut_cfg->tbl_size[i]; j++)
+				MDSS_MDP_REG_WRITE(offset,
+					(u32)gamut_cfg->b_tbl[i][j]);
+			offset += 4;
+		}
+		if (gamut_cfg->gamut_first)
+			pp_sts->gamut_sts |= PP_STS_GAMUT_FIRST;
+	}
 
-	if (!ctl || !mixer)
+	if (gamut_cfg->flags & MDP_PP_OPS_DISABLE)
+		pp_sts->gamut_sts &= ~PP_STS_ENABLE;
+	else if (gamut_cfg->flags & MDP_PP_OPS_ENABLE)
+		pp_sts->gamut_sts |= PP_STS_ENABLE;
+}
+
+static void pp_pa_config(unsigned long flags, u32 base,
+				struct pp_sts_type *pp_sts,
+				struct mdp_pa_cfg *pa_config)
+{
+	if (flags & PP_FLAGS_DIRTY_PA) {
+		if (pa_config->flags & MDP_PP_OPS_WRITE) {
+			MDSS_MDP_REG_WRITE(base, pa_config->hue_adj);
+			base += 4;
+			MDSS_MDP_REG_WRITE(base, pa_config->sat_adj);
+			base += 4;
+			MDSS_MDP_REG_WRITE(base, pa_config->val_adj);
+			base += 4;
+			MDSS_MDP_REG_WRITE(base, pa_config->cont_adj);
+		}
+		if (pa_config->flags & MDP_PP_OPS_DISABLE)
+			pp_sts->pa_sts &= ~PP_STS_ENABLE;
+		else if (pa_config->flags & MDP_PP_OPS_ENABLE)
+			pp_sts->pa_sts |= PP_STS_ENABLE;
+	}
+}
+
+static void pp_pcc_config(unsigned long flags, u32 base,
+				struct pp_sts_type *pp_sts,
+				struct mdp_pcc_cfg_data *pcc_config)
+{
+	if (flags & PP_FLAGS_DIRTY_PCC) {
+		if (pcc_config->ops & MDP_PP_OPS_WRITE)
+			pp_update_pcc_regs(base, pcc_config);
+
+		if (pcc_config->ops & MDP_PP_OPS_DISABLE)
+			pp_sts->pcc_sts &= ~PP_STS_ENABLE;
+		else if (pcc_config->ops & MDP_PP_OPS_ENABLE)
+			pp_sts->pcc_sts |= PP_STS_ENABLE;
+	}
+}
+
+static void pp_igc_config(unsigned long flags, u32 base,
+				struct pp_sts_type *pp_sts,
+				struct mdp_igc_lut_data *igc_config,
+				u32 pipe_num)
+{
+	u32 tbl_idx;
+	if (flags & PP_FLAGS_DIRTY_IGC) {
+		if (igc_config->ops & MDP_PP_OPS_WRITE)
+			pp_update_igc_lut(igc_config, base, pipe_num);
+
+		if (igc_config->ops & MDP_PP_IGC_FLAG_ROM0) {
+			pp_sts->pcc_sts |= PP_STS_ENABLE;
+			tbl_idx = 1;
+		} else if (igc_config->ops & MDP_PP_IGC_FLAG_ROM1) {
+			pp_sts->pcc_sts |= PP_STS_ENABLE;
+			tbl_idx = 2;
+		} else {
+			tbl_idx = 0;
+		}
+		pp_sts->igc_tbl_idx = tbl_idx;
+		if (igc_config->ops & MDP_PP_OPS_DISABLE)
+			pp_sts->igc_sts &= ~PP_STS_ENABLE;
+		else if (igc_config->ops & MDP_PP_OPS_ENABLE)
+			pp_sts->igc_sts |= PP_STS_ENABLE;
+	}
+}
+
+static void pp_enhist_config(unsigned long flags, u32 base,
+				struct pp_sts_type *pp_sts,
+				struct mdp_hist_lut_data *enhist_cfg)
+{
+	if (flags & PP_FLAGS_DIRTY_ENHIST) {
+		if (enhist_cfg->ops & MDP_PP_OPS_WRITE)
+			pp_update_hist_lut(base, enhist_cfg);
+
+		if (enhist_cfg->ops & MDP_PP_OPS_DISABLE)
+			pp_sts->enhist_sts &= ~PP_STS_ENABLE;
+		else if (enhist_cfg->ops & MDP_PP_OPS_ENABLE)
+			pp_sts->enhist_sts |= PP_STS_ENABLE;
+	}
+}
+
+/*the below function doesn't do error checking on the input params*/
+static void pp_sharp_config(char __iomem *base,
+				struct pp_sts_type *pp_sts,
+				struct mdp_sharp_cfg *sharp_config)
+{
+	if (sharp_config->flags & MDP_PP_OPS_WRITE) {
+		writel_relaxed(sharp_config->strength, base);
+		base += 4;
+		writel_relaxed(sharp_config->edge_thr, base);
+		base += 4;
+		writel_relaxed(sharp_config->smooth_thr, base);
+		base += 4;
+		writel_relaxed(sharp_config->noise_thr, base);
+	}
+	if (sharp_config->flags & MDP_PP_OPS_DISABLE)
+		pp_sts->sharp_sts &= ~PP_STS_ENABLE;
+	else if (sharp_config->flags & MDP_PP_OPS_ENABLE)
+		pp_sts->sharp_sts |= PP_STS_ENABLE;
+
+}
+
+static int pp_vig_pipe_setup(struct mdss_mdp_pipe *pipe, u32 *op)
+{
+	u32 opmode = 0, base = 0;
+	unsigned long flags = 0;
+
+	pr_debug("pnum=%x\n", pipe->num);
+
+	if ((pipe->flags & MDP_OVERLAY_PP_CFG_EN) &&
+		(pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_CSC_CFG)) {
+			opmode |= !!(pipe->pp_cfg.csc_cfg.flags &
+						MDP_CSC_FLAG_ENABLE) << 17;
+			opmode |= !!(pipe->pp_cfg.csc_cfg.flags &
+						MDP_CSC_FLAG_YUV_IN) << 18;
+			opmode |= !!(pipe->pp_cfg.csc_cfg.flags &
+						MDP_CSC_FLAG_YUV_OUT) << 19;
+			/*
+			 * TODO: Allow pipe to be programmed whenever new CSC is
+			 * applied (i.e. dirty bit)
+			 */
+			if (pipe->play_cnt == 0)
+				mdss_mdp_csc_setup_data(MDSS_MDP_BLOCK_SSPP,
+				  pipe->num, 1, &pipe->pp_cfg.csc_cfg);
+	} else {
+		if (pipe->src_fmt->is_yuv)
+			opmode |= (0 << 19) |	/* DST_DATA=RGB */
+				  (1 << 18) |	/* SRC_DATA=YCBCR */
+				  (1 << 17);	/* CSC_1_EN */
+		/*
+		 * TODO: Needs to be part of dirty bit logic: if there is a
+		 * previously configured pipe need to re-configure CSC matrix
+		 */
+		if (pipe->play_cnt == 0) {
+			mdss_mdp_csc_setup(MDSS_MDP_BLOCK_SSPP, pipe->num, 1,
+					   MDSS_MDP_CSC_YUV2RGB);
+		}
+	}
+
+	if (pipe->flags & MDP_OVERLAY_PP_CFG_EN) {
+		if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_PA_CFG) {
+			flags = PP_FLAGS_DIRTY_PA;
+			base = MDSS_MDP_REG_SSPP_OFFSET(pipe->num) +
+				MDSS_MDP_REG_VIG_PA_BASE;
+			pp_pa_config(flags, base, &pipe->pp_res.pp_sts,
+					&pipe->pp_cfg.pa_cfg);
+
+			if (pipe->pp_res.pp_sts.pa_sts & PP_STS_ENABLE)
+				opmode |= (1 << 4); /* PA_EN */
+		}
+	}
+
+	*op = opmode;
+
+	return 0;
+}
+
+static int mdss_mdp_leading_zero(u32 num)
+{
+	u32 bit = 0x80000000;
+	int i;
+
+	for (i = 0; i < 32; i++) {
+		if (bit & num)
+			return i;
+		bit >>= 1;
+	}
+
+	return i;
+}
+
+static u32 mdss_mdp_scale_phase_step(int f_num, u32 src, u32 dst)
+{
+	u32 val, s;
+	int n;
+
+	n = mdss_mdp_leading_zero(src);
+	if (n > f_num)
+		n = f_num;
+	s = src << n;	/* maximum to reduce lose of resolution */
+	val = s / dst;
+	if (n < f_num) {
+		n = f_num - n;
+		val <<= n;
+		val |= ((s % dst) << n) / dst;
+	}
+
+	return val;
+}
+
+static int mdss_mdp_scale_setup(struct mdss_mdp_pipe *pipe)
+{
+	u32 scale_config = 0;
+	u32 phasex_step = 0, phasey_step = 0;
+	u32 chroma_sample;
+	u32 filter_mode;
+	struct mdss_data_type *mdata;
+
+	mdata = mdss_mdp_get_mdata();
+	if (mdata->mdp_rev >= MDSS_MDP_HW_REV_102 && pipe->src_fmt->is_yuv)
+		filter_mode = MDSS_MDP_SCALE_FILTER_CA;
+	else
+		filter_mode = MDSS_MDP_SCALE_FILTER_BIL;
+
+	if (pipe->type == MDSS_MDP_PIPE_TYPE_DMA) {
+		if (pipe->dst.h != pipe->src.h || pipe->dst.w != pipe->src.w) {
+			pr_err("no scaling supported on dma pipe\n");
+			return -EINVAL;
+		} else {
+			return 0;
+		}
+	}
+
+	chroma_sample = pipe->src_fmt->chroma_sample;
+	if (pipe->flags & MDP_SOURCE_ROTATED_90) {
+		if (chroma_sample == MDSS_MDP_CHROMA_H1V2)
+			chroma_sample = MDSS_MDP_CHROMA_H2V1;
+		else if (chroma_sample == MDSS_MDP_CHROMA_H2V1)
+			chroma_sample = MDSS_MDP_CHROMA_H1V2;
+	}
+
+	if (!(pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_SHARP_CFG)) {
+		pipe->pp_cfg.sharp_cfg.flags = MDP_PP_OPS_ENABLE |
+			MDP_PP_OPS_WRITE;
+		pipe->pp_cfg.sharp_cfg.strength = SHARP_STRENGTH_DEFAULT;
+		pipe->pp_cfg.sharp_cfg.edge_thr = SHARP_EDGE_THR_DEFAULT;
+		pipe->pp_cfg.sharp_cfg.smooth_thr = SHARP_SMOOTH_THR_DEFAULT;
+		pipe->pp_cfg.sharp_cfg.noise_thr = SHARP_NOISE_THR_DEFAULT;
+	}
+
+	if ((pipe->src_fmt->is_yuv) &&
+		!((pipe->dst.w < pipe->src.w) || (pipe->dst.h < pipe->src.h))) {
+		pp_sharp_config(pipe->base +
+		   MDSS_MDP_REG_VIG_QSEED2_SHARP,
+		   &pipe->pp_res.pp_sts,
+		   &pipe->pp_cfg.sharp_cfg);
+	}
+
+	if ((pipe->src.h != pipe->dst.h) ||
+	    (pipe->pp_res.pp_sts.sharp_sts & PP_STS_ENABLE) ||
+	    (chroma_sample == MDSS_MDP_CHROMA_420) ||
+	    (chroma_sample == MDSS_MDP_CHROMA_H1V2)) {
+		pr_debug("scale y - src_h=%d dst_h=%d\n",
+				pipe->src.h, pipe->dst.h);
+
+		if ((pipe->src.h / MAX_DOWNSCALE_RATIO) > pipe->dst.h) {
+			pr_err("too much downscaling height=%d->%d",
+			       pipe->src.h, pipe->dst.h);
+			return -EINVAL;
+		}
+
+		scale_config |= MDSS_MDP_SCALEY_EN;
+
+		if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
+			u32 chr_dst_h = pipe->dst.h;
+			if ((chroma_sample == MDSS_MDP_CHROMA_420) ||
+			    (chroma_sample == MDSS_MDP_CHROMA_H1V2))
+				chr_dst_h *= 2;	/* 2x upsample chroma */
+
+			if (pipe->src.h <= pipe->dst.h) {
+				scale_config |= /* G/Y, A */
+					(filter_mode << 10) |
+					(MDSS_MDP_SCALE_FILTER_NEAREST << 18);
+			} else
+				scale_config |= /* G/Y, A */
+					(MDSS_MDP_SCALE_FILTER_PCMN << 10) |
+					(MDSS_MDP_SCALE_FILTER_PCMN << 18);
+
+			if (pipe->src.h <= chr_dst_h)
+				scale_config |= /* CrCb */
+					(MDSS_MDP_SCALE_FILTER_BIL << 14);
+			else
+				scale_config |= /* CrCb */
+					(MDSS_MDP_SCALE_FILTER_PCMN << 14);
+
+			phasey_step = mdss_mdp_scale_phase_step(
+				PHASE_STEP_SHIFT, pipe->src.h, chr_dst_h);
+
+			writel_relaxed(phasey_step, pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPY);
+		} else {
+			if (pipe->src.h <= pipe->dst.h)
+				scale_config |= /* RGB, A */
+					(MDSS_MDP_SCALE_FILTER_BIL << 10) |
+					(MDSS_MDP_SCALE_FILTER_NEAREST << 18);
+			else
+				scale_config |= /* RGB, A */
+					(MDSS_MDP_SCALE_FILTER_PCMN << 10) |
+					(MDSS_MDP_SCALE_FILTER_NEAREST << 18);
+		}
+
+		phasey_step = mdss_mdp_scale_phase_step(
+			PHASE_STEP_SHIFT, pipe->src.h, pipe->dst.h);
+	}
+
+	if ((pipe->src.w != pipe->dst.w) ||
+	    (pipe->pp_res.pp_sts.sharp_sts & PP_STS_ENABLE) ||
+	    (chroma_sample == MDSS_MDP_CHROMA_420) ||
+	    (chroma_sample == MDSS_MDP_CHROMA_H2V1)) {
+		pr_debug("scale x - src_w=%d dst_w=%d\n",
+				pipe->src.w, pipe->dst.w);
+
+		if ((pipe->src.w / MAX_DOWNSCALE_RATIO) > pipe->dst.w) {
+			pr_err("too much downscaling width=%d->%d",
+			       pipe->src.w, pipe->dst.w);
+			return -EINVAL;
+		}
+
+		scale_config |= MDSS_MDP_SCALEX_EN;
+
+		if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) {
+			u32 chr_dst_w = pipe->dst.w;
+
+			if ((chroma_sample == MDSS_MDP_CHROMA_420) ||
+			    (chroma_sample == MDSS_MDP_CHROMA_H2V1))
+				chr_dst_w *= 2;	/* 2x upsample chroma */
+
+			if (pipe->src.w <= pipe->dst.w) {
+				scale_config |= /* G/Y, A */
+					(filter_mode << 8) |
+					(MDSS_MDP_SCALE_FILTER_NEAREST << 16);
+			} else
+				scale_config |= /* G/Y, A */
+					(MDSS_MDP_SCALE_FILTER_PCMN << 8) |
+					(MDSS_MDP_SCALE_FILTER_PCMN << 16);
+
+			if (pipe->src.w <= chr_dst_w)
+				scale_config |= /* CrCb */
+					(MDSS_MDP_SCALE_FILTER_BIL << 12);
+			else
+				scale_config |= /* CrCb */
+					(MDSS_MDP_SCALE_FILTER_PCMN << 12);
+
+			phasex_step = mdss_mdp_scale_phase_step(
+				PHASE_STEP_SHIFT, pipe->src.w, chr_dst_w);
+			writel_relaxed(phasex_step, pipe->base +
+				MDSS_MDP_REG_VIG_QSEED2_C12_PHASESTEPX);
+		} else {
+			if (pipe->src.w <= pipe->dst.w)
+				scale_config |= /* RGB, A */
+					(MDSS_MDP_SCALE_FILTER_BIL << 8) |
+					(MDSS_MDP_SCALE_FILTER_NEAREST << 16);
+			else
+				scale_config |= /* RGB, A */
+					(MDSS_MDP_SCALE_FILTER_PCMN << 8) |
+					(MDSS_MDP_SCALE_FILTER_NEAREST << 16);
+		}
+
+		phasex_step = mdss_mdp_scale_phase_step(
+			PHASE_STEP_SHIFT, pipe->src.w, pipe->dst.w);
+	}
+
+	writel_relaxed(scale_config, pipe->base +
+	   MDSS_MDP_REG_SCALE_CONFIG);
+	writel_relaxed(phasex_step, pipe->base +
+	   MDSS_MDP_REG_SCALE_PHASE_STEP_X);
+	writel_relaxed(phasey_step, pipe->base +
+	   MDSS_MDP_REG_SCALE_PHASE_STEP_Y);
+	return 0;
+}
+
+int mdss_mdp_pipe_pp_setup(struct mdss_mdp_pipe *pipe, u32 *op)
+{
+	int ret = 0;
+	if (!pipe)
+		return -ENODEV;
+
+	ret = mdss_mdp_scale_setup(pipe);
+	if (ret)
+		return -EINVAL;
+
+	if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG)
+		ret = pp_vig_pipe_setup(pipe, op);
+
+	return ret;
+}
+
+void mdss_mdp_pipe_sspp_term(struct mdss_mdp_pipe *pipe)
+{
+	memset(&pipe->pp_cfg, 0, sizeof(struct mdp_overlay_pp_params));
+	memset(&pipe->pp_res, 0, sizeof(struct mdss_pipe_pp_res));
+}
+
+int mdss_mdp_pipe_sspp_setup(struct mdss_mdp_pipe *pipe, u32 *op)
+{
+	int ret = 0;
+	unsigned long flags = 0;
+	u32 pipe_base;
+	u32 pipe_num;
+
+	if (pipe == NULL)
+		return -EINVAL;
+
+	/*
+	 * TODO: should this function be responsible for masking multiple
+	 * pipes to be written in dual pipe case?
+	 * if so, requires rework of update_igc_lut
+	 */
+	switch (pipe->type) {
+	case MDSS_MDP_PIPE_TYPE_VIG:
+		pipe_base = MDSS_MDP_REG_IGC_VIG_BASE;
+		pipe_num = pipe->num - MDSS_MDP_SSPP_VIG0;
+		break;
+	case MDSS_MDP_PIPE_TYPE_RGB:
+		pipe_base = MDSS_MDP_REG_IGC_RGB_BASE;
+		pipe_num = pipe->num - MDSS_MDP_SSPP_RGB0;
+		break;
+	case MDSS_MDP_PIPE_TYPE_DMA:
+		pipe_base = MDSS_MDP_REG_IGC_DMA_BASE;
+		pipe_num = pipe->num - MDSS_MDP_SSPP_DMA0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_IGC_CFG) {
+		flags |= PP_FLAGS_DIRTY_IGC;
+		pp_igc_config(flags, pipe_base, &pipe->pp_res.pp_sts,
+					&pipe->pp_cfg.igc_cfg, pipe_num);
+	}
+
+	if (pipe->pp_res.pp_sts.igc_sts & PP_STS_ENABLE)
+		*op |= (1 << 16); /* IGC_LUT_EN */
+
+	return ret;
+}
+
+static int pp_mixer_setup(u32 disp_num, struct mdss_mdp_ctl *ctl,
+		struct mdss_mdp_mixer *mixer)
+{
+	u32 flags, offset, dspp_num, opmode = 0;
+	struct mdp_pgc_lut_data *pgc_config;
+	struct pp_sts_type *pp_sts;
+	dspp_num = mixer->num;
+
+	/* no corresponding dspp */
+	if ((mixer->type != MDSS_MDP_MIXER_TYPE_INTF) ||
+		(dspp_num >= MDSS_MDP_MAX_DSPP))
+		return 0;
+	if (disp_num < MDSS_BLOCK_DISP_NUM)
+		flags = mdss_pp_res->pp_disp_flags[disp_num];
+	else
+		flags = 0;
+
+	pp_sts = &mdss_pp_res->pp_dspp_sts[dspp_num];
+	/* GC_LUT is in layer mixer */
+	if (flags & PP_FLAGS_DIRTY_ARGC) {
+		pgc_config = &mdss_pp_res->argc_disp_cfg[disp_num];
+		if (pgc_config->flags & MDP_PP_OPS_WRITE) {
+			offset = MDSS_MDP_REG_LM_OFFSET(disp_num) +
+				MDSS_MDP_REG_LM_GC_LUT_BASE;
+			pp_update_argc_lut(offset, pgc_config);
+		}
+		if (pgc_config->flags & MDP_PP_OPS_DISABLE)
+			pp_sts->argc_sts &= ~PP_STS_ENABLE;
+		else if (pgc_config->flags & MDP_PP_OPS_ENABLE)
+			pp_sts->argc_sts |= PP_STS_ENABLE;
+		ctl->flush_bits |= BIT(6) << dspp_num; /* LAYER_MIXER */
+	}
+	/* update LM opmode if LM needs flush */
+	if ((pp_sts->argc_sts & PP_STS_ENABLE) &&
+		(ctl->flush_bits & (BIT(6) << dspp_num))) {
+		offset = MDSS_MDP_REG_LM_OFFSET(dspp_num) +
+			MDSS_MDP_REG_LM_OP_MODE;
+		opmode = MDSS_MDP_REG_READ(offset);
+		opmode |= (1 << 0); /* GC_LUT_EN */
+		MDSS_MDP_REG_WRITE(offset, opmode);
+	}
+	return 0;
+}
+
+static int pp_dspp_setup(u32 disp_num, struct mdss_mdp_ctl *ctl,
+				struct mdss_mdp_mixer *mixer)
+{
+	u32 flags, base, offset, dspp_num, opmode = 0;
+	struct mdp_dither_cfg_data *dither_cfg;
+	struct pp_hist_col_info *hist_info;
+	struct mdp_pgc_lut_data *pgc_config;
+	struct pp_sts_type *pp_sts;
+	u32 data, col_state;
+	unsigned long flag;
+	int i, ret = 0;
+
+	if (!mixer || !ctl)
 		return -EINVAL;
 
 	dspp_num = mixer->num;
+	/* no corresponding dspp */
+	if ((mixer->type != MDSS_MDP_MIXER_TYPE_INTF) ||
+		(dspp_num >= MDSS_MDP_MAX_DSPP))
+		return -EINVAL;
+	base = MDSS_MDP_REG_DSPP_OFFSET(dspp_num);
+	hist_info = &mdss_pp_res->dspp_hist[dspp_num];
 
-	ctl->flush_bits |= BIT(13 + dspp_num);	/* DSPP */
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+
+	if (hist_info->col_en) {
+		/* HIST_EN & AUTO_CLEAR */
+		opmode |= (1 << 16) | (1 << 17);
+		mutex_lock(&mdss_mdp_hist_mutex);
+		spin_lock_irqsave(&mdss_hist_lock, flag);
+		col_state = hist_info->col_state;
+		if (hist_info->is_kick_ready &&
+				((col_state == HIST_IDLE) ||
+				((false == hist_info->read_request) &&
+						col_state == HIST_READY))) {
+			/* Kick off collection */
+			MDSS_MDP_REG_WRITE(base +
+				MDSS_MDP_REG_DSPP_HIST_CTL_BASE, 1);
+			hist_info->col_state = HIST_START;
+		}
+		spin_unlock_irqrestore(&mdss_hist_lock, flag);
+		mutex_unlock(&mdss_mdp_hist_mutex);
+	}
+
+	if (disp_num < MDSS_BLOCK_DISP_NUM)
+		flags = mdss_pp_res->pp_disp_flags[disp_num];
+	else
+		flags = 0;
+
+	/* nothing to update */
+	if ((!flags) && (!(hist_info->col_en)))
+		goto dspp_exit;
+
+	pp_sts = &mdss_pp_res->pp_dspp_sts[dspp_num];
+
+	pp_pa_config(flags, base + MDSS_MDP_REG_DSPP_PA_BASE, pp_sts,
+					&mdss_pp_res->pa_disp_cfg[disp_num]);
+
+	pp_pcc_config(flags, base + MDSS_MDP_REG_DSPP_PCC_BASE, pp_sts,
+					&mdss_pp_res->pcc_disp_cfg[disp_num]);
+
+	pp_igc_config(flags, MDSS_MDP_REG_IGC_DSPP_BASE, pp_sts,
+				&mdss_pp_res->igc_disp_cfg[disp_num], dspp_num);
+
+	pp_enhist_config(flags, base + MDSS_MDP_REG_DSPP_HIST_LUT_BASE,
+			pp_sts, &mdss_pp_res->enhist_disp_cfg[disp_num]);
+
+	if (pp_sts->pa_sts & PP_STS_ENABLE)
+		opmode |= (1 << 20); /* PA_EN */
+
+	if (pp_sts->pcc_sts & PP_STS_ENABLE)
+		opmode |= (1 << 4); /* PCC_EN */
+
+	if (pp_sts->igc_sts & PP_STS_ENABLE) {
+		opmode |= (1 << 0) | /* IGC_LUT_EN */
+			      (pp_sts->igc_tbl_idx << 1);
+	}
+
+	if (pp_sts->enhist_sts & PP_STS_ENABLE) {
+		opmode |= (1 << 19) | /* HIST_LUT_EN */
+				  (1 << 20); /* PA_EN */
+		if (!(pp_sts->pa_sts & PP_STS_ENABLE)) {
+			/* Program default value */
+			offset = base + MDSS_MDP_REG_DSPP_PA_BASE;
+			MDSS_MDP_REG_WRITE(offset, 0);
+			MDSS_MDP_REG_WRITE(offset + 4, 0);
+			MDSS_MDP_REG_WRITE(offset + 8, 0);
+			MDSS_MDP_REG_WRITE(offset + 12, 0);
+		}
+	}
+	if (flags & PP_FLAGS_DIRTY_DITHER) {
+		dither_cfg = &mdss_pp_res->dither_disp_cfg[disp_num];
+		if (dither_cfg->flags & MDP_PP_OPS_WRITE) {
+			offset = base + MDSS_MDP_REG_DSPP_DITHER_DEPTH;
+			MDSS_MDP_REG_WRITE(offset,
+			  dither_depth_map[dither_cfg->g_y_depth] |
+			  (dither_depth_map[dither_cfg->b_cb_depth] << 2) |
+			  (dither_depth_map[dither_cfg->r_cr_depth] << 4));
+			offset += 0x14;
+			for (i = 0; i << 16; i += 4) {
+				data = dither_matrix[i] |
+					(dither_matrix[i + 1] << 4) |
+					(dither_matrix[i + 2] << 8) |
+					(dither_matrix[i + 3] << 12);
+				MDSS_MDP_REG_WRITE(offset, data);
+				offset += 4;
+			}
+		}
+		if (dither_cfg->flags & MDP_PP_OPS_DISABLE)
+			pp_sts->dither_sts &= ~PP_STS_ENABLE;
+		else if (dither_cfg->flags & MDP_PP_OPS_ENABLE)
+			pp_sts->dither_sts |= PP_STS_ENABLE;
+	}
+	if (pp_sts->dither_sts & PP_STS_ENABLE)
+		opmode |= (1 << 8); /* DITHER_EN */
+	if (flags & PP_FLAGS_DIRTY_GAMUT)
+		pp_gamut_config(&mdss_pp_res->gamut_disp_cfg[disp_num], base,
+				pp_sts);
+	if (pp_sts->gamut_sts & PP_STS_ENABLE) {
+		opmode |= (1 << 23); /* GAMUT_EN */
+		if (pp_sts->gamut_sts & PP_STS_GAMUT_FIRST)
+			opmode |= (1 << 24); /* GAMUT_ORDER */
+	}
+
+	if (flags & PP_FLAGS_DIRTY_PGC) {
+		pgc_config = &mdss_pp_res->pgc_disp_cfg[disp_num];
+		if (pgc_config->flags & MDP_PP_OPS_WRITE) {
+			offset = base + MDSS_MDP_REG_DSPP_GC_BASE;
+			pp_update_argc_lut(offset, pgc_config);
+		}
+		if (pgc_config->flags & MDP_PP_OPS_DISABLE)
+			pp_sts->pgc_sts &= ~PP_STS_ENABLE;
+		else if (pgc_config->flags & MDP_PP_OPS_ENABLE)
+			pp_sts->pgc_sts |= PP_STS_ENABLE;
+	}
+	if (pp_sts->pgc_sts & PP_STS_ENABLE)
+		opmode |= (1 << 22);
+
+	MDSS_MDP_REG_WRITE(base + MDSS_MDP_REG_DSPP_OP_MODE, opmode);
+	mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_FLUSH, BIT(13 + dspp_num));
+	wmb();
+dspp_exit:
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+	return ret;
+}
+
+int mdss_mdp_pp_setup(struct mdss_mdp_ctl *ctl)
+{
+	int ret = 0;
+
+	if ((!ctl->mfd) || (!mdss_pp_res))
+		return -EINVAL;
+
+	/* TODO: have some sort of reader/writer lock to prevent unclocked
+	 * access while display power is toggled */
+	if (!ctl->mfd->panel_power_on) {
+		ret = -EPERM;
+		goto error;
+	}
+	mutex_lock(&ctl->mfd->lock);
+	ret = mdss_mdp_pp_setup_locked(ctl);
+	mutex_unlock(&ctl->mfd->lock);
+error:
+	return ret;
+}
+
+/* call only when holding and mfd->lock */
+int mdss_mdp_pp_setup_locked(struct mdss_mdp_ctl *ctl)
+{
+	u32 disp_num;
+	if ((!ctl->mfd) || (!mdss_pp_res))
+		return -EINVAL;
+
+	/* treat fb_num the same as block logical id*/
+	disp_num = ctl->mfd->index;
+
+	mutex_lock(&mdss_pp_mutex);
+	if (ctl->mixer_left) {
+		pp_mixer_setup(disp_num, ctl, ctl->mixer_left);
+		pp_dspp_setup(disp_num, ctl, ctl->mixer_left);
+	}
+	if (ctl->mixer_right) {
+		pp_mixer_setup(disp_num, ctl, ctl->mixer_right);
+		pp_dspp_setup(disp_num, ctl, ctl->mixer_right);
+	}
+	/* clear dirty flag */
+	if (disp_num < MDSS_BLOCK_DISP_NUM)
+		mdss_pp_res->pp_disp_flags[disp_num] = 0;
+	mutex_unlock(&mdss_pp_mutex);
 
 	return 0;
 }
+
+/*
+ * Set dirty and write bits on features that were enabled so they will be
+ * reconfigured
+ */
+int mdss_mdp_pp_resume(u32 mixer_num)
+{
+	u32 flags = 0;
+	struct pp_sts_type pp_sts;
+
+	if (mixer_num >= MDSS_MDP_MAX_DSPP) {
+		pr_warn("invalid mixer_num");
+		return -EINVAL;
+	}
+
+	pp_sts = mdss_pp_res->pp_dspp_sts[mixer_num];
+
+	if (pp_sts.pa_sts & PP_STS_ENABLE) {
+		flags |= PP_FLAGS_DIRTY_PA;
+		if (!(mdss_pp_res->pa_disp_cfg[mixer_num].flags
+					& MDP_PP_OPS_DISABLE))
+			mdss_pp_res->pa_disp_cfg[mixer_num].flags |=
+				MDP_PP_OPS_WRITE;
+	}
+	if (pp_sts.pcc_sts & PP_STS_ENABLE) {
+		flags |= PP_FLAGS_DIRTY_PCC;
+		if (!(mdss_pp_res->pcc_disp_cfg[mixer_num].ops
+					& MDP_PP_OPS_DISABLE))
+			mdss_pp_res->pcc_disp_cfg[mixer_num].ops |=
+				MDP_PP_OPS_WRITE;
+	}
+	if (pp_sts.igc_sts & PP_STS_ENABLE) {
+		flags |= PP_FLAGS_DIRTY_IGC;
+		if (!(mdss_pp_res->igc_disp_cfg[mixer_num].ops
+					& MDP_PP_OPS_DISABLE))
+			mdss_pp_res->igc_disp_cfg[mixer_num].ops |=
+				MDP_PP_OPS_WRITE;
+	}
+	if (pp_sts.argc_sts & PP_STS_ENABLE) {
+		flags |= PP_FLAGS_DIRTY_ARGC;
+		if (!(mdss_pp_res->argc_disp_cfg[mixer_num].flags
+					& MDP_PP_OPS_DISABLE))
+			mdss_pp_res->argc_disp_cfg[mixer_num].flags |=
+				MDP_PP_OPS_WRITE;
+	}
+	if (pp_sts.enhist_sts & PP_STS_ENABLE) {
+		flags |= PP_FLAGS_DIRTY_ENHIST;
+		if (!(mdss_pp_res->enhist_disp_cfg[mixer_num].ops
+					& MDP_PP_OPS_DISABLE))
+			mdss_pp_res->enhist_disp_cfg[mixer_num].ops |=
+				MDP_PP_OPS_WRITE;
+	}
+	if (pp_sts.dither_sts & PP_STS_ENABLE) {
+		flags |= PP_FLAGS_DIRTY_DITHER;
+		if (!(mdss_pp_res->dither_disp_cfg[mixer_num].flags
+					& MDP_PP_OPS_DISABLE))
+			mdss_pp_res->dither_disp_cfg[mixer_num].flags |=
+				MDP_PP_OPS_WRITE;
+	}
+	if (pp_sts.gamut_sts & PP_STS_ENABLE) {
+		flags |= PP_FLAGS_DIRTY_GAMUT;
+		if (!(mdss_pp_res->gamut_disp_cfg[mixer_num].flags
+					& MDP_PP_OPS_DISABLE))
+			mdss_pp_res->gamut_disp_cfg[mixer_num].flags |=
+				MDP_PP_OPS_WRITE;
+	}
+	if (pp_sts.pgc_sts & PP_STS_ENABLE) {
+		flags |= PP_FLAGS_DIRTY_PGC;
+		if (!(mdss_pp_res->pgc_disp_cfg[mixer_num].flags
+					& MDP_PP_OPS_DISABLE))
+			mdss_pp_res->pgc_disp_cfg[mixer_num].flags |=
+				MDP_PP_OPS_WRITE;
+	}
+
+	mdss_pp_res->pp_disp_flags[mixer_num] = flags;
+	return 0;
+}
+
+int mdss_mdp_pp_init(struct device *dev)
+{
+	int ret = 0;
+
+	mutex_lock(&mdss_pp_mutex);
+	if (!mdss_pp_res) {
+		mdss_pp_res = devm_kzalloc(dev, sizeof(*mdss_pp_res),
+				GFP_KERNEL);
+		if (mdss_pp_res == NULL) {
+			pr_err("%s mdss_pp_res allocation failed!", __func__);
+			ret = -ENOMEM;
+		}
+	}
+	mutex_unlock(&mdss_pp_mutex);
+	return ret;
+}
+void mdss_mdp_pp_term(struct device *dev)
+{
+	if (!mdss_pp_res) {
+		mutex_lock(&mdss_pp_mutex);
+		devm_kfree(dev, mdss_pp_res);
+		mdss_pp_res = NULL;
+		mutex_unlock(&mdss_pp_mutex);
+	}
+}
+static int pp_get_dspp_num(u32 disp_num, u32 *dspp_num)
+{
+	int i;
+	u32 mixer_cnt;
+	u32 mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+	mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
+
+	if (!mixer_cnt)
+		return -EPERM;
+
+	/* only read the first mixer */
+	for (i = 0; i < mixer_cnt; i++) {
+		if (mixer_id[i] < MDSS_MDP_MAX_DSPP)
+			break;
+	}
+	if (i >= mixer_cnt)
+		return -EPERM;
+	*dspp_num = mixer_id[i];
+	return 0;
+}
+
+int mdss_mdp_pa_config(struct mdss_mdp_ctl *ctl, struct mdp_pa_cfg_data *config,
+			u32 *copyback)
+{
+	int ret = 0;
+	u32 pa_offset, disp_num, dspp_num = 0;
+
+	if (!ctl)
+		return -EINVAL;
+
+	if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
+		(config->block >= MDP_BLOCK_MAX))
+		return -EINVAL;
+
+	mutex_lock(&mdss_pp_mutex);
+	disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+
+	if (config->pa_data.flags & MDP_PP_OPS_READ) {
+		ret = pp_get_dspp_num(disp_num, &dspp_num);
+		if (ret) {
+			pr_err("%s, no dspp connects to disp %d",
+				__func__, disp_num);
+			goto pa_config_exit;
+		}
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+		pa_offset = MDSS_MDP_REG_DSPP_OFFSET(dspp_num) +
+			  MDSS_MDP_REG_DSPP_PA_BASE;
+		config->pa_data.hue_adj = MDSS_MDP_REG_READ(pa_offset);
+		pa_offset += 4;
+		config->pa_data.sat_adj = MDSS_MDP_REG_READ(pa_offset);
+		pa_offset += 4;
+		config->pa_data.val_adj = MDSS_MDP_REG_READ(pa_offset);
+		pa_offset += 4;
+		config->pa_data.cont_adj = MDSS_MDP_REG_READ(pa_offset);
+		*copyback = 1;
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+	} else {
+		mdss_pp_res->pa_disp_cfg[disp_num] = config->pa_data;
+		mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_PA;
+	}
+
+pa_config_exit:
+	mutex_unlock(&mdss_pp_mutex);
+	if (!ret)
+		mdss_mdp_pp_setup(ctl);
+	return ret;
+}
+
+static void pp_read_pcc_regs(u32 offset,
+				struct mdp_pcc_cfg_data *cfg_ptr)
+{
+	cfg_ptr->r.c = MDSS_MDP_REG_READ(offset);
+	cfg_ptr->g.c = MDSS_MDP_REG_READ(offset + 4);
+	cfg_ptr->b.c = MDSS_MDP_REG_READ(offset + 8);
+	offset += 0x10;
+
+	cfg_ptr->r.r = MDSS_MDP_REG_READ(offset);
+	cfg_ptr->g.r = MDSS_MDP_REG_READ(offset + 4);
+	cfg_ptr->b.r = MDSS_MDP_REG_READ(offset + 8);
+	offset += 0x10;
+
+	cfg_ptr->r.g = MDSS_MDP_REG_READ(offset);
+	cfg_ptr->g.g = MDSS_MDP_REG_READ(offset + 4);
+	cfg_ptr->b.g = MDSS_MDP_REG_READ(offset + 8);
+	offset += 0x10;
+
+	cfg_ptr->r.b = MDSS_MDP_REG_READ(offset);
+	cfg_ptr->g.b = MDSS_MDP_REG_READ(offset + 4);
+	cfg_ptr->b.b = MDSS_MDP_REG_READ(offset + 8);
+	offset += 0x10;
+
+	cfg_ptr->r.rr = MDSS_MDP_REG_READ(offset);
+	cfg_ptr->g.rr = MDSS_MDP_REG_READ(offset + 4);
+	cfg_ptr->b.rr = MDSS_MDP_REG_READ(offset + 8);
+	offset += 0x10;
+
+	cfg_ptr->r.rg = MDSS_MDP_REG_READ(offset);
+	cfg_ptr->g.rg = MDSS_MDP_REG_READ(offset + 4);
+	cfg_ptr->b.rg = MDSS_MDP_REG_READ(offset + 8);
+	offset += 0x10;
+
+	cfg_ptr->r.rb = MDSS_MDP_REG_READ(offset);
+	cfg_ptr->g.rb = MDSS_MDP_REG_READ(offset + 4);
+	cfg_ptr->b.rb = MDSS_MDP_REG_READ(offset + 8);
+	offset += 0x10;
+
+	cfg_ptr->r.gg = MDSS_MDP_REG_READ(offset);
+	cfg_ptr->g.gg = MDSS_MDP_REG_READ(offset + 4);
+	cfg_ptr->b.gg = MDSS_MDP_REG_READ(offset + 8);
+	offset += 0x10;
+
+	cfg_ptr->r.gb = MDSS_MDP_REG_READ(offset);
+	cfg_ptr->g.gb = MDSS_MDP_REG_READ(offset + 4);
+	cfg_ptr->b.gb = MDSS_MDP_REG_READ(offset + 8);
+	offset += 0x10;
+
+	cfg_ptr->r.bb = MDSS_MDP_REG_READ(offset);
+	cfg_ptr->g.bb = MDSS_MDP_REG_READ(offset + 4);
+	cfg_ptr->b.bb = MDSS_MDP_REG_READ(offset + 8);
+	offset += 0x10;
+
+	cfg_ptr->r.rgb_0 = MDSS_MDP_REG_READ(offset);
+	cfg_ptr->g.rgb_0 = MDSS_MDP_REG_READ(offset + 4);
+	cfg_ptr->b.rgb_0 = MDSS_MDP_REG_READ(offset + 8);
+	offset += 0x10;
+
+	cfg_ptr->r.rgb_1 = MDSS_MDP_REG_READ(offset);
+	cfg_ptr->g.rgb_1 = MDSS_MDP_REG_READ(offset + 4);
+	cfg_ptr->b.rgb_1 = MDSS_MDP_REG_READ(offset + 8);
+}
+
+static void pp_update_pcc_regs(u32 offset,
+				struct mdp_pcc_cfg_data *cfg_ptr)
+{
+	MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.c);
+	MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.c);
+	MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.c);
+	offset += 0x10;
+
+	MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.r);
+	MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.r);
+	MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.r);
+	offset += 0x10;
+
+	MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.g);
+	MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.g);
+	MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.g);
+	offset += 0x10;
+
+	MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.b);
+	MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.b);
+	MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.b);
+	offset += 0x10;
+
+	MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.rr);
+	MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.rr);
+	MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.rr);
+	offset += 0x10;
+
+	MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.rg);
+	MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.rg);
+	MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.rg);
+	offset += 0x10;
+
+	MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.rb);
+	MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.rb);
+	MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.rb);
+	offset += 0x10;
+
+	MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.gg);
+	MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.gg);
+	MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.gg);
+	offset += 0x10;
+
+	MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.gb);
+	MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.gb);
+	MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.gb);
+	offset += 0x10;
+
+	MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.bb);
+	MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.bb);
+	MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.bb);
+	offset += 0x10;
+
+	MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.rgb_0);
+	MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.rgb_0);
+	MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.rgb_0);
+	offset += 0x10;
+
+	MDSS_MDP_REG_WRITE(offset, cfg_ptr->r.rgb_1);
+	MDSS_MDP_REG_WRITE(offset + 4, cfg_ptr->g.rgb_1);
+	MDSS_MDP_REG_WRITE(offset + 8, cfg_ptr->b.rgb_1);
+}
+
+int mdss_mdp_pcc_config(struct mdss_mdp_ctl *ctl,
+					struct mdp_pcc_cfg_data *config,
+					u32 *copyback)
+{
+	int ret = 0;
+	u32 base, disp_num, dspp_num = 0;
+
+	if (!ctl)
+		return -EINVAL;
+
+	if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
+		(config->block >= MDP_BLOCK_MAX))
+		return -EINVAL;
+
+	mutex_lock(&mdss_pp_mutex);
+	disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+
+	if (config->ops & MDP_PP_OPS_READ) {
+		ret = pp_get_dspp_num(disp_num, &dspp_num);
+		if (ret) {
+			pr_err("%s, no dspp connects to disp %d",
+				__func__, disp_num);
+			goto pcc_config_exit;
+		}
+
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+
+		base = MDSS_MDP_REG_DSPP_OFFSET(dspp_num) +
+			  MDSS_MDP_REG_DSPP_PCC_BASE;
+		pp_read_pcc_regs(base, config);
+		*copyback = 1;
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+	} else {
+		mdss_pp_res->pcc_disp_cfg[disp_num] = *config;
+		mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_PCC;
+	}
+
+pcc_config_exit:
+	mutex_unlock(&mdss_pp_mutex);
+	if (!ret)
+		mdss_mdp_pp_setup(ctl);
+	return ret;
+}
+
+static void pp_read_igc_lut(struct mdp_igc_lut_data *cfg,
+				u32 offset, u32 blk_idx)
+{
+	int i;
+	u32 data;
+
+	/* INDEX_UPDATE & VALUE_UPDATEN */
+	data = (3 << 24) | (((~(1 << blk_idx)) & 0x7) << 28);
+	MDSS_MDP_REG_WRITE(offset, data);
+
+	for (i = 0; i < cfg->len; i++)
+		cfg->c0_c1_data[i] = MDSS_MDP_REG_READ(offset) & 0xFFF;
+
+	offset += 0x4;
+	MDSS_MDP_REG_WRITE(offset, data);
+	for (i = 0; i < cfg->len; i++)
+		cfg->c0_c1_data[i] |= (MDSS_MDP_REG_READ(offset) & 0xFFF) << 16;
+
+	offset += 0x4;
+	MDSS_MDP_REG_WRITE(offset, data);
+	for (i = 0; i < cfg->len; i++)
+		cfg->c2_data[i] = MDSS_MDP_REG_READ(offset) & 0xFFF;
+}
+
+static void pp_update_igc_lut(struct mdp_igc_lut_data *cfg,
+				u32 offset, u32 blk_idx)
+{
+	int i;
+	u32 data;
+	/* INDEX_UPDATE */
+	data = (1 << 25) | (((~(1 << blk_idx)) & 0x7) << 28);
+	MDSS_MDP_REG_WRITE(offset, (cfg->c0_c1_data[0] & 0xFFF) | data);
+
+	/* disable index update */
+	data &= ~(1 << 25);
+	for (i = 1; i < cfg->len; i++)
+		MDSS_MDP_REG_WRITE(offset, (cfg->c0_c1_data[i] & 0xFFF) | data);
+
+	offset += 0x4;
+	data |= (1 << 25);
+	MDSS_MDP_REG_WRITE(offset, ((cfg->c0_c1_data[0] >> 16) & 0xFFF) | data);
+	data &= ~(1 << 25);
+	for (i = 1; i < cfg->len; i++)
+		MDSS_MDP_REG_WRITE(offset,
+		((cfg->c0_c1_data[i] >> 16) & 0xFFF) | data);
+
+	offset += 0x4;
+	data |= (1 << 25);
+	MDSS_MDP_REG_WRITE(offset, (cfg->c2_data[0] & 0xFFF) | data);
+	data &= ~(1 << 25);
+	for (i = 1; i < cfg->len; i++)
+		MDSS_MDP_REG_WRITE(offset, (cfg->c2_data[i] & 0xFFF) | data);
+}
+
+int mdss_mdp_igc_lut_config(struct mdss_mdp_ctl *ctl,
+					struct mdp_igc_lut_data *config,
+					u32 *copyback)
+{
+	int ret = 0;
+	u32 tbl_idx, igc_offset, disp_num, dspp_num = 0;
+	struct mdp_igc_lut_data local_cfg;
+
+	if (!ctl)
+		return -EINVAL;
+
+	if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
+		(config->block >= MDP_BLOCK_MAX))
+		return -EINVAL;
+
+	mutex_lock(&mdss_pp_mutex);
+	disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+
+	if (config->ops & MDP_PP_OPS_READ) {
+		ret = pp_get_dspp_num(disp_num, &dspp_num);
+		if (ret) {
+			pr_err("%s, no dspp connects to disp %d",
+				__func__, disp_num);
+			goto igc_config_exit;
+		}
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+		if (config->ops & MDP_PP_IGC_FLAG_ROM0)
+			tbl_idx = 1;
+		else if (config->ops & MDP_PP_IGC_FLAG_ROM1)
+			tbl_idx = 2;
+		else
+			tbl_idx = 0;
+		igc_offset = MDSS_MDP_REG_IGC_DSPP_BASE + (0x10 * tbl_idx);
+		local_cfg = *config;
+		local_cfg.c0_c1_data =
+			&mdss_pp_res->igc_lut_c0c1[disp_num][0];
+		local_cfg.c2_data =
+			&mdss_pp_res->igc_lut_c2[disp_num][0];
+		pp_read_igc_lut(&local_cfg, igc_offset, dspp_num);
+		if (copy_to_user(config->c0_c1_data, local_cfg.c2_data,
+			config->len * sizeof(u32))) {
+			ret = -EFAULT;
+			goto igc_config_exit;
+		}
+		if (copy_to_user(config->c2_data, local_cfg.c0_c1_data,
+			config->len * sizeof(u32))) {
+			ret = -EFAULT;
+			goto igc_config_exit;
+		}
+		*copyback = 1;
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+	} else {
+		if (copy_from_user(&mdss_pp_res->igc_lut_c0c1[disp_num][0],
+			config->c0_c1_data, config->len * sizeof(u32))) {
+			ret = -EFAULT;
+			goto igc_config_exit;
+		}
+		if (copy_from_user(&mdss_pp_res->igc_lut_c2[disp_num][0],
+			config->c2_data, config->len * sizeof(u32))) {
+			ret = -EFAULT;
+			goto igc_config_exit;
+		}
+		mdss_pp_res->igc_disp_cfg[disp_num] = *config;
+		mdss_pp_res->igc_disp_cfg[disp_num].c0_c1_data =
+			&mdss_pp_res->igc_lut_c0c1[disp_num][0];
+		mdss_pp_res->igc_disp_cfg[disp_num].c2_data =
+			&mdss_pp_res->igc_lut_c2[disp_num][0];
+		mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_IGC;
+	}
+
+igc_config_exit:
+	mutex_unlock(&mdss_pp_mutex);
+	if (!ret)
+		mdss_mdp_pp_setup(ctl);
+	return ret;
+}
+static void pp_update_gc_one_lut(u32 offset,
+		struct mdp_ar_gc_lut_data *lut_data)
+{
+	int i, start_idx;
+
+	start_idx = (MDSS_MDP_REG_READ(offset) >> 16) & 0xF;
+	for (i = start_idx; i < GC_LUT_SEGMENTS; i++)
+		MDSS_MDP_REG_WRITE(offset, lut_data[i].x_start);
+	for (i = 0; i < start_idx; i++)
+		MDSS_MDP_REG_WRITE(offset, lut_data[i].x_start);
+	offset += 4;
+	start_idx = (MDSS_MDP_REG_READ(offset) >> 16) & 0xF;
+	for (i = start_idx; i < GC_LUT_SEGMENTS; i++)
+		MDSS_MDP_REG_WRITE(offset, lut_data[i].slope);
+	for (i = 0; i < start_idx; i++)
+		MDSS_MDP_REG_WRITE(offset, lut_data[i].slope);
+	offset += 4;
+	start_idx = (MDSS_MDP_REG_READ(offset) >> 16) & 0xF;
+	for (i = start_idx; i < GC_LUT_SEGMENTS; i++)
+		MDSS_MDP_REG_WRITE(offset, lut_data[i].offset);
+	for (i = 0; i < start_idx; i++)
+		MDSS_MDP_REG_WRITE(offset, lut_data[i].offset);
+}
+static void pp_update_argc_lut(u32 offset, struct mdp_pgc_lut_data *config)
+{
+	pp_update_gc_one_lut(offset, config->r_data);
+	offset += 0x10;
+	pp_update_gc_one_lut(offset, config->g_data);
+	offset += 0x10;
+	pp_update_gc_one_lut(offset, config->b_data);
+}
+static void pp_read_gc_one_lut(u32 offset,
+		struct mdp_ar_gc_lut_data *gc_data)
+{
+	int i, start_idx, data;
+	data = MDSS_MDP_REG_READ(offset);
+	start_idx = (data >> 16) & 0xF;
+	gc_data[start_idx].x_start = data & 0xFFF;
+
+	for (i = start_idx + 1; i < GC_LUT_SEGMENTS; i++) {
+		data = MDSS_MDP_REG_READ(offset);
+		gc_data[i].x_start = data & 0xFFF;
+	}
+	for (i = 0; i < start_idx; i++) {
+		data = MDSS_MDP_REG_READ(offset);
+		gc_data[i].x_start = data & 0xFFF;
+	}
+
+	offset += 4;
+	data = MDSS_MDP_REG_READ(offset);
+	start_idx = (data >> 16) & 0xF;
+	gc_data[start_idx].slope = data & 0x7FFF;
+	for (i = start_idx + 1; i < GC_LUT_SEGMENTS; i++) {
+		data = MDSS_MDP_REG_READ(offset);
+		gc_data[i].slope = data & 0x7FFF;
+	}
+	for (i = 0; i < start_idx; i++) {
+		data = MDSS_MDP_REG_READ(offset);
+		gc_data[i].slope = data & 0x7FFF;
+	}
+	offset += 4;
+	data = MDSS_MDP_REG_READ(offset);
+	start_idx = (data >> 16) & 0xF;
+	gc_data[start_idx].offset = data & 0x7FFF;
+	for (i = start_idx + 1; i < GC_LUT_SEGMENTS; i++) {
+		data = MDSS_MDP_REG_READ(offset);
+		gc_data[i].offset = data & 0x7FFF;
+	}
+	for (i = 0; i < start_idx; i++) {
+		data = MDSS_MDP_REG_READ(offset);
+		gc_data[i].offset = data & 0x7FFF;
+	}
+}
+
+static int pp_read_argc_lut(struct mdp_pgc_lut_data *config, u32 offset)
+{
+	int ret = 0;
+	pp_read_gc_one_lut(offset, config->r_data);
+	offset += 0x10;
+	pp_read_gc_one_lut(offset, config->g_data);
+	offset += 0x10;
+	pp_read_gc_one_lut(offset, config->b_data);
+	return ret;
+}
+
+/* Note: Assumes that its inputs have been checked by calling function */
+static void pp_update_hist_lut(u32 offset, struct mdp_hist_lut_data *cfg)
+{
+	int i;
+	for (i = 0; i < ENHIST_LUT_ENTRIES; i++)
+		MDSS_MDP_REG_WRITE(offset, cfg->data[i]);
+	/* swap */
+	MDSS_MDP_REG_WRITE(offset + 4, 1);
+}
+
+int mdss_mdp_argc_config(struct mdss_mdp_ctl *ctl,
+				struct mdp_pgc_lut_data *config,
+				u32 *copyback)
+{
+	int ret = 0;
+	u32 argc_offset = 0, disp_num, dspp_num = 0;
+	struct mdp_pgc_lut_data local_cfg;
+	struct mdp_pgc_lut_data *pgc_ptr;
+	u32 tbl_size;
+
+	if (!ctl)
+		return -EINVAL;
+
+	if ((PP_BLOCK(config->block) < MDP_LOGICAL_BLOCK_DISP_0) ||
+		(PP_BLOCK(config->block) >= MDP_BLOCK_MAX))
+		return -EINVAL;
+
+	mutex_lock(&mdss_pp_mutex);
+
+	disp_num = PP_BLOCK(config->block) - MDP_LOGICAL_BLOCK_DISP_0;
+	switch (config->block & MDSS_PP_LOCATION_MASK) {
+	case MDSS_PP_LM_CFG:
+		argc_offset = MDSS_MDP_REG_LM_OFFSET(dspp_num) +
+			MDSS_MDP_REG_LM_GC_LUT_BASE;
+		pgc_ptr = &mdss_pp_res->argc_disp_cfg[disp_num];
+		mdss_pp_res->pp_disp_flags[disp_num] |=
+			PP_FLAGS_DIRTY_ARGC;
+		break;
+	case MDSS_PP_DSPP_CFG:
+		argc_offset = MDSS_MDP_REG_DSPP_OFFSET(dspp_num) +
+					MDSS_MDP_REG_DSPP_GC_BASE;
+		pgc_ptr = &mdss_pp_res->pgc_disp_cfg[disp_num];
+		mdss_pp_res->pp_disp_flags[disp_num] |=
+			PP_FLAGS_DIRTY_PGC;
+		break;
+	default:
+		goto argc_config_exit;
+		break;
+	}
+
+	tbl_size = GC_LUT_SEGMENTS * sizeof(struct mdp_ar_gc_lut_data);
+
+	if (config->flags & MDP_PP_OPS_READ) {
+		ret = pp_get_dspp_num(disp_num, &dspp_num);
+		if (ret) {
+			pr_err("%s, no dspp connects to disp %d",
+				__func__, disp_num);
+			goto argc_config_exit;
+		}
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+		local_cfg = *config;
+		local_cfg.r_data =
+			&mdss_pp_res->gc_lut_r[disp_num][0];
+		local_cfg.g_data =
+			&mdss_pp_res->gc_lut_g[disp_num][0];
+		local_cfg.b_data =
+			&mdss_pp_res->gc_lut_b[disp_num][0];
+		pp_read_argc_lut(&local_cfg, argc_offset);
+		if (copy_to_user(config->r_data,
+			&mdss_pp_res->gc_lut_r[disp_num][0], tbl_size)) {
+			ret = -EFAULT;
+			goto argc_config_exit;
+		}
+		if (copy_to_user(config->g_data,
+			&mdss_pp_res->gc_lut_g[disp_num][0], tbl_size)) {
+			ret = -EFAULT;
+			goto argc_config_exit;
+		}
+		if (copy_to_user(config->b_data,
+			&mdss_pp_res->gc_lut_b[disp_num][0], tbl_size)) {
+			ret = -EFAULT;
+			goto argc_config_exit;
+		}
+		*copyback = 1;
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+	} else {
+		if (copy_from_user(&mdss_pp_res->gc_lut_r[disp_num][0],
+			config->r_data, tbl_size)) {
+			ret = -EFAULT;
+			goto argc_config_exit;
+		}
+		if (copy_from_user(&mdss_pp_res->gc_lut_g[disp_num][0],
+			config->g_data, tbl_size)) {
+			ret = -EFAULT;
+			goto argc_config_exit;
+		}
+		if (copy_from_user(&mdss_pp_res->gc_lut_b[disp_num][0],
+			config->b_data, tbl_size)) {
+			ret = -EFAULT;
+			goto argc_config_exit;
+		}
+
+		*pgc_ptr = *config;
+		pgc_ptr->r_data =
+			&mdss_pp_res->gc_lut_r[disp_num][0];
+		pgc_ptr->g_data =
+			&mdss_pp_res->gc_lut_g[disp_num][0];
+		pgc_ptr->b_data =
+			&mdss_pp_res->gc_lut_b[disp_num][0];
+	}
+argc_config_exit:
+	mutex_unlock(&mdss_pp_mutex);
+	if (!ret)
+		mdss_mdp_pp_setup(ctl);
+	return ret;
+}
+int mdss_mdp_hist_lut_config(struct mdss_mdp_ctl *ctl,
+					struct mdp_hist_lut_data *config,
+					u32 *copyback)
+{
+	int i, ret = 0;
+	u32 hist_offset, disp_num, dspp_num = 0;
+
+	if (!ctl)
+		return -EINVAL;
+
+	if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
+		(config->block >= MDP_BLOCK_MAX))
+		return -EINVAL;
+
+	mutex_lock(&mdss_pp_mutex);
+	disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+
+	if (config->ops & MDP_PP_OPS_READ) {
+		ret = pp_get_dspp_num(disp_num, &dspp_num);
+		if (ret) {
+			pr_err("%s, no dspp connects to disp %d",
+				__func__, disp_num);
+			goto enhist_config_exit;
+		}
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+
+		hist_offset = MDSS_MDP_REG_DSPP_OFFSET(dspp_num) +
+			  MDSS_MDP_REG_DSPP_HIST_LUT_BASE;
+		for (i = 0; i < ENHIST_LUT_ENTRIES; i++)
+			mdss_pp_res->enhist_lut[disp_num][i] =
+				MDSS_MDP_REG_READ(hist_offset);
+		if (copy_to_user(config->data,
+			&mdss_pp_res->enhist_lut[disp_num][0],
+			ENHIST_LUT_ENTRIES * sizeof(u32))) {
+			ret = -EFAULT;
+			goto enhist_config_exit;
+		}
+		*copyback = 1;
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+	} else {
+		if (copy_from_user(&mdss_pp_res->enhist_lut[disp_num][0],
+			config->data, ENHIST_LUT_ENTRIES * sizeof(u32))) {
+			ret = -EFAULT;
+			goto enhist_config_exit;
+		}
+		mdss_pp_res->enhist_disp_cfg[disp_num] = *config;
+		mdss_pp_res->enhist_disp_cfg[disp_num].data =
+			&mdss_pp_res->enhist_lut[disp_num][0];
+		mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_ENHIST;
+	}
+enhist_config_exit:
+	mutex_unlock(&mdss_pp_mutex);
+	if (!ret)
+		mdss_mdp_pp_setup(ctl);
+	return ret;
+}
+
+int mdss_mdp_dither_config(struct mdss_mdp_ctl *ctl,
+					struct mdp_dither_cfg_data *config,
+					u32 *copyback)
+{
+	u32 disp_num;
+	if (!ctl)
+		return -EINVAL;
+
+	if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
+		(config->block >= MDP_BLOCK_MAX))
+		return -EINVAL;
+	if (config->flags & MDP_PP_OPS_READ)
+		return -ENOTSUPP;
+
+	mutex_lock(&mdss_pp_mutex);
+	disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+	mdss_pp_res->dither_disp_cfg[disp_num] = *config;
+	mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_DITHER;
+	mutex_unlock(&mdss_pp_mutex);
+	mdss_mdp_pp_setup(ctl);
+	return 0;
+}
+
+int mdss_mdp_gamut_config(struct mdss_mdp_ctl *ctl,
+					struct mdp_gamut_cfg_data *config,
+					u32 *copyback)
+{
+	int i, j, size_total = 0, ret = 0;
+	u32 offset, disp_num, dspp_num = 0;
+	uint16_t *tbl_off;
+	struct mdp_gamut_cfg_data local_cfg;
+
+	if (!ctl)
+		return -EINVAL;
+
+	if ((config->block < MDP_LOGICAL_BLOCK_DISP_0) ||
+		(config->block >= MDP_BLOCK_MAX))
+		return -EINVAL;
+	for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++)
+		size_total += config->tbl_size[i];
+	if (size_total != GAMUT_TOTAL_TABLE_SIZE)
+		return -EINVAL;
+
+	mutex_lock(&mdss_pp_mutex);
+	disp_num = config->block - MDP_LOGICAL_BLOCK_DISP_0;
+
+	if (config->flags & MDP_PP_OPS_READ) {
+		ret = pp_get_dspp_num(disp_num, &dspp_num);
+		if (ret) {
+			pr_err("%s, no dspp connects to disp %d",
+				__func__, disp_num);
+			goto gamut_config_exit;
+		}
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+
+		offset = MDSS_MDP_REG_DSPP_OFFSET(dspp_num) +
+			  MDSS_MDP_REG_DSPP_GAMUT_BASE;
+		for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+			for (j = 0; j < config->tbl_size[i]; j++)
+				config->r_tbl[i][j] =
+					(u16)MDSS_MDP_REG_READ(offset);
+			offset += 4;
+		}
+		for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+			for (j = 0; j < config->tbl_size[i]; j++)
+				config->g_tbl[i][j] =
+					(u16)MDSS_MDP_REG_READ(offset);
+			offset += 4;
+		}
+		for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+			for (j = 0; j < config->tbl_size[i]; j++)
+				config->b_tbl[i][j] =
+					(u16)MDSS_MDP_REG_READ(offset);
+			offset += 4;
+		}
+		*copyback = 1;
+		mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+	} else {
+		local_cfg = *config;
+		tbl_off = mdss_pp_res->gamut_tbl[disp_num];
+		for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+			local_cfg.r_tbl[i] = tbl_off;
+			if (copy_from_user(tbl_off, config->r_tbl[i],
+				config->tbl_size[i] * sizeof(uint16_t))) {
+				ret = -EFAULT;
+				goto gamut_config_exit;
+			}
+			tbl_off += local_cfg.tbl_size[i];
+		}
+		for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+			local_cfg.g_tbl[i] = tbl_off;
+			if (copy_from_user(tbl_off, config->g_tbl[i],
+				config->tbl_size[i] * sizeof(uint16_t))) {
+				ret = -EFAULT;
+				goto gamut_config_exit;
+			}
+			tbl_off += local_cfg.tbl_size[i];
+		}
+		for (i = 0; i < MDP_GAMUT_TABLE_NUM; i++) {
+			local_cfg.b_tbl[i] = tbl_off;
+			if (copy_from_user(tbl_off, config->b_tbl[i],
+				config->tbl_size[i] * sizeof(uint16_t))) {
+				ret = -EFAULT;
+				goto gamut_config_exit;
+			}
+			tbl_off += local_cfg.tbl_size[i];
+		}
+		mdss_pp_res->gamut_disp_cfg[disp_num] = local_cfg;
+		mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_GAMUT;
+	}
+gamut_config_exit:
+	mutex_unlock(&mdss_pp_mutex);
+	if (!ret)
+		mdss_mdp_pp_setup(ctl);
+	return ret;
+}
+static void pp_hist_read(u32 v_base, struct pp_hist_col_info *hist_info)
+{
+	int i, i_start;
+	u32 data;
+	data = MDSS_MDP_REG_READ(v_base);
+	i_start = data >> 24;
+	hist_info->data[i_start] = data & 0xFFFFFF;
+	for (i = i_start + 1; i < HIST_V_SIZE; i++)
+		hist_info->data[i] = MDSS_MDP_REG_READ(v_base) & 0xFFFFFF;
+	for (i = 0; i < i_start - 1; i++)
+		hist_info->data[i] = MDSS_MDP_REG_READ(v_base) & 0xFFFFFF;
+	hist_info->hist_cnt_read++;
+}
+
+int mdss_mdp_histogram_start(struct mdss_mdp_ctl *ctl,
+					struct mdp_histogram_start_req *req)
+{
+	u32 ctl_base, done_shift_bit;
+	struct pp_hist_col_info *hist_info;
+	int i, ret = 0;
+	u32 disp_num, dspp_num = 0;
+	u32 mixer_cnt, mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+	unsigned long flag;
+
+	if (!ctl)
+		return -EINVAL;
+
+	if ((req->block < MDP_LOGICAL_BLOCK_DISP_0) ||
+		(req->block >= MDP_BLOCK_MAX))
+		return -EINVAL;
+
+	mutex_lock(&mdss_mdp_hist_mutex);
+	disp_num = req->block - MDP_LOGICAL_BLOCK_DISP_0;
+	mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
+
+	if (!mixer_cnt) {
+		pr_err("%s, no dspp connects to disp %d",
+			__func__, disp_num);
+		ret = -EPERM;
+		goto hist_start_exit;
+	}
+	if (mixer_cnt >= MDSS_MDP_MAX_DSPP) {
+		pr_err("%s, Too many dspp connects to disp %d",
+			__func__, mixer_cnt);
+		ret = -EPERM;
+		goto hist_start_exit;
+	}
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+	for (i = 0; i < mixer_cnt; i++) {
+		dspp_num = mixer_id[i];
+		hist_info = &mdss_pp_res->dspp_hist[dspp_num];
+		done_shift_bit = (dspp_num * 4) + 12;
+		/* check if it is idle */
+		if (hist_info->col_en) {
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+			pr_info("%s Hist collection has already been enabled %d",
+				__func__, dspp_num);
+			goto hist_start_exit;
+		}
+		spin_lock_irqsave(&mdss_hist_lock, flag);
+		hist_info->frame_cnt = req->frame_cnt;
+		init_completion(&hist_info->comp);
+		hist_info->hist_cnt_read = 0;
+		hist_info->hist_cnt_sent = 0;
+		hist_info->read_request = false;
+		hist_info->col_state = HIST_RESET;
+		hist_info->col_en = true;
+		hist_info->is_kick_ready = false;
+		spin_unlock_irqrestore(&mdss_hist_lock, flag);
+		mdss_pp_res->hist_col[disp_num][i] =
+			&mdss_pp_res->dspp_hist[dspp_num];
+		mdss_mdp_hist_irq_enable(3 << done_shift_bit);
+		ctl_base = MDSS_MDP_REG_DSPP_OFFSET(dspp_num) +
+			  MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
+		MDSS_MDP_REG_WRITE(ctl_base + 8, req->frame_cnt);
+		/* Kick out reset start */
+		MDSS_MDP_REG_WRITE(ctl_base + 4, 1);
+	}
+	for (i = mixer_cnt; i < MDSS_MDP_MAX_DSPP; i++)
+		mdss_pp_res->hist_col[disp_num][i] = 0;
+	mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_HIST_COL;
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+hist_start_exit:
+	mutex_unlock(&mdss_mdp_hist_mutex);
+	if (!ret) {
+		mdss_mdp_pp_setup(ctl);
+		/* wait for a frame to let histrogram enable itself */
+		usleep(41666);
+		for (i = 0; i < mixer_cnt; i++) {
+			dspp_num = mixer_id[i];
+			hist_info = &mdss_pp_res->dspp_hist[dspp_num];
+			mutex_lock(&mdss_mdp_hist_mutex);
+			spin_lock_irqsave(&mdss_hist_lock, flag);
+			hist_info->is_kick_ready = true;
+			spin_unlock_irqrestore(&mdss_hist_lock, flag);
+			mutex_unlock(&mdss_mdp_hist_mutex);
+		}
+	}
+	return ret;
+}
+
+int mdss_mdp_histogram_stop(struct mdss_mdp_ctl *ctl, u32 block)
+{
+	int i, ret = 0;
+	u32 dspp_num, disp_num, ctl_base, done_bit;
+	struct pp_hist_col_info *hist_info;
+	u32 mixer_cnt, mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+	unsigned long flag;
+
+	if (!ctl)
+		return -EINVAL;
+
+	if ((block < MDP_LOGICAL_BLOCK_DISP_0) ||
+		(block >= MDP_BLOCK_MAX))
+		return -EINVAL;
+
+	mutex_lock(&mdss_mdp_hist_mutex);
+	disp_num = block - MDP_LOGICAL_BLOCK_DISP_0;
+	mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
+
+	if (!mixer_cnt) {
+		pr_err("%s, no dspp connects to disp %d",
+			__func__, disp_num);
+		ret = -EPERM;
+		goto hist_stop_exit;
+	}
+	if (mixer_cnt >= MDSS_MDP_MAX_DSPP) {
+		pr_err("%s, Too many dspp connects to disp %d",
+			__func__, mixer_cnt);
+		ret = -EPERM;
+		goto hist_stop_exit;
+	}
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+	for (i = 0; i < mixer_cnt; i++) {
+		dspp_num = mixer_id[i];
+		hist_info = &mdss_pp_res->dspp_hist[dspp_num];
+		done_bit = 3 << ((dspp_num * 4) + 12);
+		ctl_base = MDSS_MDP_REG_DSPP_OFFSET(dspp_num) +
+			  MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
+		if (hist_info->col_en == false) {
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+			goto hist_stop_exit;
+		}
+		complete_all(&hist_info->comp);
+		spin_lock_irqsave(&mdss_hist_lock, flag);
+		hist_info->col_en = false;
+		hist_info->col_state = HIST_UNKNOWN;
+		hist_info->is_kick_ready = false;
+		spin_unlock_irqrestore(&mdss_hist_lock, flag);
+		mdss_mdp_hist_irq_disable(done_bit);
+		MDSS_MDP_REG_WRITE(ctl_base, (1 << 1));/* cancel */
+	}
+	for (i = 0; i < MDSS_MDP_MAX_DSPP; i++)
+		mdss_pp_res->hist_col[disp_num][i] = 0;
+	mdss_pp_res->pp_disp_flags[disp_num] |= PP_FLAGS_DIRTY_HIST_COL;
+	mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+hist_stop_exit:
+	mutex_unlock(&mdss_mdp_hist_mutex);
+	if (!ret)
+		mdss_mdp_pp_setup(ctl);
+	return ret;
+}
+
+int mdss_mdp_hist_collect(struct mdss_mdp_ctl *ctl,
+					struct mdp_histogram_data *hist,
+					u32 *hist_data_addr)
+{
+	int i, j, wait_ret, ret = 0;
+	u32 timeout, v_base;
+	struct pp_hist_col_info *hist_info;
+	u32 dspp_num, disp_num, ctl_base;
+	u32 mixer_cnt, mixer_id[MDSS_MDP_INTF_MAX_LAYERMIXER];
+	unsigned long flag;
+
+	if (!ctl)
+		return -EINVAL;
+
+	if ((hist->block < MDP_LOGICAL_BLOCK_DISP_0) ||
+		(hist->block >= MDP_BLOCK_MAX))
+		return -EINVAL;
+
+	mutex_lock(&mdss_mdp_hist_mutex);
+	disp_num = hist->block - MDP_LOGICAL_BLOCK_DISP_0;
+	mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
+
+	if (!mixer_cnt) {
+		pr_err("%s, no dspp connects to disp %d",
+			__func__, disp_num);
+		ret = -EPERM;
+		goto hist_collect_exit;
+	}
+	if (mixer_cnt >= MDSS_MDP_MAX_DSPP) {
+		pr_err("%s, Too many dspp connects to disp %d",
+			__func__, mixer_cnt);
+		ret = -EPERM;
+		goto hist_collect_exit;
+	}
+	hist_info = &mdss_pp_res->dspp_hist[0];
+	for (i = 0; i < mixer_cnt; i++) {
+		dspp_num = mixer_id[i];
+		hist_info = &mdss_pp_res->dspp_hist[dspp_num];
+		ctl_base = MDSS_MDP_REG_DSPP_OFFSET(dspp_num) +
+			  MDSS_MDP_REG_DSPP_HIST_CTL_BASE;
+		if ((hist_info->col_en == 0) ||
+			(hist_info->col_state == HIST_UNKNOWN)) {
+			ret = -EINVAL;
+			goto hist_collect_exit;
+		}
+		spin_lock_irqsave(&mdss_hist_lock, flag);
+		/* wait for hist done if cache has no data */
+		if (hist_info->col_state != HIST_READY) {
+			hist_info->read_request = true;
+			spin_unlock_irqrestore(&mdss_hist_lock, flag);
+			timeout = HIST_WAIT_TIMEOUT(hist_info->frame_cnt);
+			mutex_unlock(&mdss_mdp_hist_mutex);
+			/* flush updates before wait*/
+			mdss_mdp_pp_setup(ctl);
+			wait_ret = wait_for_completion_killable_timeout(
+					&(hist_info->comp), timeout);
+
+			mutex_lock(&mdss_mdp_hist_mutex);
+			if (wait_ret == 0) {
+				ret = -ETIMEDOUT;
+				spin_lock_irqsave(&mdss_hist_lock, flag);
+				pr_debug("bin collection timedout, state %d",
+							hist_info->col_state);
+				/*
+				 * When the histogram has timed out (usually
+				 * underrun) change the SW state back to idle
+				 * since histogram hardware will have done the
+				 * same. Histogram data also needs to be
+				 * cleared in this case, which is done by the
+				 * histogram being read (triggered by READY
+				 * state, which also moves the histogram SW back
+				 * to IDLE).
+				 */
+				hist_info->col_state = HIST_READY;
+				spin_unlock_irqrestore(&mdss_hist_lock, flag);
+			} else if (wait_ret < 0) {
+				ret = -EINTR;
+				pr_debug("%s: bin collection interrupted",
+						__func__);
+				goto hist_collect_exit;
+			}
+			if (hist_info->col_state != HIST_READY) {
+				ret = -ENODATA;
+				pr_debug("%s: state is not ready: %d",
+					__func__, hist_info->col_state);
+				goto hist_collect_exit;
+			}
+		} else {
+			spin_unlock_irqrestore(&mdss_hist_lock, flag);
+		}
+		spin_lock_irqsave(&mdss_hist_lock, flag);
+		if (hist_info->col_state == HIST_READY) {
+			spin_unlock_irqrestore(&mdss_hist_lock, flag);
+			v_base = ctl_base + 0x1C;
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
+			pp_hist_read(v_base, hist_info);
+			mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
+			spin_lock_irqsave(&mdss_hist_lock, flag);
+			hist_info->read_request = false;
+			hist_info->col_state = HIST_IDLE;
+		}
+		spin_unlock_irqrestore(&mdss_hist_lock, flag);
+	}
+	if (mixer_cnt > 1) {
+		memset(&mdss_pp_res->hist_data[disp_num][0],
+			0, HIST_V_SIZE * sizeof(u32));
+		for (i = 0; i < mixer_cnt; i++) {
+			dspp_num = mixer_id[i];
+			hist_info = &mdss_pp_res->dspp_hist[dspp_num];
+			for (j = 0; j < HIST_V_SIZE; j++)
+				mdss_pp_res->hist_data[disp_num][i] +=
+					hist_info->data[i];
+		}
+		*hist_data_addr = (u32)&mdss_pp_res->hist_data[disp_num][0];
+	} else {
+		*hist_data_addr = (u32)hist_info->data;
+	}
+	hist_info->hist_cnt_sent++;
+hist_collect_exit:
+	mutex_unlock(&mdss_mdp_hist_mutex);
+	return ret;
+}
+void mdss_mdp_hist_intr_done(u32 isr)
+{
+	u32 isr_blk, blk_idx;
+	struct pp_hist_col_info *hist_info;
+	isr &= 0x333333;
+	while (isr != 0) {
+		if (isr & 0xFFF000) {
+			if (isr & 0x3000) {
+				blk_idx = 0;
+				isr_blk = (isr >> 12) & 0x3;
+				isr &= ~0x3000;
+			} else if (isr & 0x30000) {
+				blk_idx = 1;
+				isr_blk = (isr >> 16) & 0x3;
+				isr &= ~0x30000;
+			} else {
+				blk_idx = 2;
+				isr_blk = (isr >> 20) & 0x3;
+				isr &= ~0x300000;
+			}
+			hist_info = &mdss_pp_res->dspp_hist[blk_idx];
+		} else {
+			if (isr & 0x3) {
+				blk_idx = 0;
+				isr_blk = isr & 0x3;
+				isr &= ~0x3;
+			} else if (isr & 0x30) {
+				blk_idx = 1;
+				isr_blk = (isr >> 4) & 0x3;
+				isr &= ~0x30;
+			} else {
+				blk_idx = 2;
+				isr_blk = (isr >> 8) & 0x3;
+				isr &= ~0x300;
+			}
+			/* SSPP block, not support yet*/
+			continue;
+		}
+		/* Histogram Done Interrupt */
+		if ((isr_blk & 0x1) &&
+			(hist_info->col_en)) {
+			spin_lock(&mdss_hist_lock);
+			hist_info->col_state = HIST_READY;
+			spin_unlock(&mdss_hist_lock);
+			if (hist_info->read_request)
+				complete(&hist_info->comp);
+		}
+		/* Histogram Reset Done Interrupt */
+		if ((isr_blk & 0x2) &&
+			(hist_info->col_en)) {
+				spin_lock(&mdss_hist_lock);
+				hist_info->col_state = HIST_IDLE;
+				spin_unlock(&mdss_hist_lock);
+		}
+	};
+}
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.c b/drivers/video/msm/mdss/mdss_mdp_rotator.c
index a5d072b..5711653 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.c
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -70,28 +70,13 @@
 {
 	struct mdss_mdp_mixer *mixer;
 	struct mdss_mdp_pipe *pipe = NULL;
-	int pnum;
 
 	mixer = mdss_mdp_wb_mixer_alloc(1);
 	if (!mixer)
 		return NULL;
 
-	switch (mixer->num) {
-	case MDSS_MDP_LAYERMIXER3:
-		pnum = MDSS_MDP_SSPP_DMA0;
-		break;
-	case MDSS_MDP_LAYERMIXER4:
-		pnum = MDSS_MDP_SSPP_DMA1;
-		break;
-	default:
-		goto done;
-	}
+	pipe = mdss_mdp_pipe_alloc_dma(mixer);
 
-	pipe = mdss_mdp_pipe_alloc_pnum(pnum);
-
-	if (pipe)
-		pipe->mixer = mixer;
-done:
 	if (!pipe)
 		mdss_mdp_wb_mixer_destroy(mixer);
 
@@ -185,14 +170,19 @@
 			   struct mdss_mdp_data *src_data,
 			   struct mdss_mdp_data *dst_data)
 {
-	struct mdss_mdp_pipe *rot_pipe;
+	struct mdss_mdp_pipe *rot_pipe = NULL;
 	struct mdss_mdp_ctl *ctl;
-	int ret;
+	int ret, need_wait = false;
 
-	if (!rot)
+	ret = mutex_lock_interruptible(&rotator_lock);
+	if (ret)
+		return ret;
+
+	if (!rot || !rot->ref_cnt) {
+		mutex_unlock(&rotator_lock);
 		return -ENODEV;
+	}
 
-	mutex_lock(&rotator_lock);
 	ret = mdss_mdp_rotator_pipe_dequeue(rot);
 	if (ret) {
 		pr_err("unable to acquire rotator\n");
@@ -207,13 +197,12 @@
 
 	if (rot->params_changed) {
 		rot->params_changed = 0;
-		rot_pipe->flags = rot->rotations;
+		rot_pipe->flags = rot->flags;
 		rot_pipe->src_fmt = mdss_mdp_get_format_params(rot->format);
 		rot_pipe->img_width = rot->img_width;
 		rot_pipe->img_height = rot->img_height;
 		rot_pipe->src = rot->src_rect;
 		rot_pipe->dst = rot->src_rect;
-		rot_pipe->bwc_mode = rot->bwc_mode;
 		rot_pipe->params_changed++;
 	}
 
@@ -225,16 +214,21 @@
 
 	ret = mdss_mdp_rotator_kickoff(ctl, rot, dst_data);
 
+	if (ret == 0 && !rot->no_wait)
+		need_wait = true;
 done:
 	mutex_unlock(&rotator_lock);
 
-	if (!rot->no_wait)
+	if (need_wait)
 		mdss_mdp_rotator_busy_wait(rot);
 
+	if (rot_pipe)
+		pr_debug("end of rotator pnum=%d enqueue\n", rot_pipe->num);
+
 	return ret;
 }
 
-int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot)
+static int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot)
 {
 	struct mdss_mdp_pipe *rot_pipe;
 
@@ -243,7 +237,6 @@
 
 	pr_debug("finish rot id=%x\n", rot->session_id);
 
-	mutex_lock(&rotator_lock);
 	rot_pipe = rot->pipe;
 	if (rot_pipe) {
 		mdss_mdp_rotator_busy_wait(rot);
@@ -255,7 +248,43 @@
 		mdss_mdp_pipe_destroy(rot_pipe);
 		mdss_mdp_wb_mixer_destroy(mixer);
 	}
+
+	return 0;
+}
+
+int mdss_mdp_rotator_release(u32 ndx)
+{
+	struct mdss_mdp_rotator_session *rot;
+	mutex_lock(&rotator_lock);
+	rot = mdss_mdp_rotator_session_get(ndx);
+	if (rot) {
+		mdss_mdp_rotator_finish(rot);
+	} else {
+		pr_warn("unknown session id=%x\n", ndx);
+		return -ENOENT;
+	}
 	mutex_unlock(&rotator_lock);
 
 	return 0;
 }
+
+int mdss_mdp_rotator_release_all(void)
+{
+	struct mdss_mdp_rotator_session *rot;
+	int i, cnt;
+
+	mutex_lock(&rotator_lock);
+	for (i = 0, cnt = 0; i < MAX_ROTATOR_SESSIONS; i++) {
+		rot = &rotator_session[i];
+		if (rot->ref_cnt) {
+			mdss_mdp_rotator_finish(rot);
+			cnt++;
+		}
+	}
+	mutex_unlock(&rotator_lock);
+
+	if (cnt)
+		pr_debug("cleaned up %d rotator sessions\n", cnt);
+
+	return 0;
+}
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.h b/drivers/video/msm/mdss/mdss_mdp_rotator.h
index 7d39c72..21ee9bb 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.h
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,7 +17,7 @@
 
 #include "mdss_mdp.h"
 
-#define MDSS_MDP_ROT_SESSION_MASK	0x80000000
+#define MDSS_MDP_ROT_SESSION_MASK	0x40000000
 
 struct mdss_mdp_rotator_session {
 	u32 session_id;
@@ -25,7 +25,7 @@
 	u32 params_changed;
 
 	u32 format;
-	u32 rotations;
+	u32 flags;
 
 	u16 img_width;
 	u16 img_height;
@@ -42,13 +42,30 @@
 	struct list_head head;
 };
 
+static inline u32 mdss_mdp_get_rotator_dst_format(u32 in_format)
+{
+	switch (in_format) {
+	case MDP_RGB_565:
+	case MDP_BGR_565:
+		return MDP_RGB_888;
+	case MDP_Y_CBCR_H2V2_VENUS:
+	case MDP_Y_CB_CR_H2V2:
+	case MDP_Y_CR_CB_GH2V2:
+	case MDP_Y_CR_CB_H2V2:
+		return MDP_Y_CRCB_H2V2;
+	default:
+		return in_format;
+	}
+}
+
 struct mdss_mdp_rotator_session *mdss_mdp_rotator_session_alloc(void);
 struct mdss_mdp_rotator_session *mdss_mdp_rotator_session_get(u32 session_id);
 
 int mdss_mdp_rotator_queue(struct mdss_mdp_rotator_session *rot,
 			   struct mdss_mdp_data *src_data,
 			   struct mdss_mdp_data *dst_data);
-int mdss_mdp_rotator_finish(struct mdss_mdp_rotator_session *rot);
-int mdss_mdp_rotator_ctl_busy_wait(struct mdss_mdp_ctl *ctl);
+
+int mdss_mdp_rotator_release(u32 ndx);
+int mdss_mdp_rotator_release_all(void);
 
 #endif /* MDSS_MDP_ROTATOR_H */
diff --git a/drivers/video/msm/mdss/mdss_mdp_util.c b/drivers/video/msm/mdss/mdss_mdp_util.c
index 60fe263..5915f61 100644
--- a/drivers/video/msm/mdss/mdss_mdp_util.c
+++ b/drivers/video/msm/mdss/mdss_mdp_util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -12,27 +12,39 @@
  */
 #define pr_fmt(fmt)	"%s: " fmt, __func__
 
-#include <linux/android_pmem.h>
 #include <linux/dma-mapping.h>
 #include <linux/errno.h>
 #include <linux/file.h>
 #include <linux/msm_ion.h>
+#include <linux/iommu.h>
 #include <linux/msm_kgsl.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
+#include <media/msm_media_info.h>
+
+#include <mach/iommu_domains.h>
 
 #include "mdss_fb.h"
 #include "mdss_mdp.h"
 #include "mdss_mdp_formats.h"
 
+#define DEFAULT_FRAME_RATE	60
+
 enum {
 	MDP_INTR_VSYNC_INTF_0,
 	MDP_INTR_VSYNC_INTF_1,
 	MDP_INTR_VSYNC_INTF_2,
 	MDP_INTR_VSYNC_INTF_3,
+	MDP_INTR_UNDERRUN_INTF_0,
+	MDP_INTR_UNDERRUN_INTF_1,
+	MDP_INTR_UNDERRUN_INTF_2,
+	MDP_INTR_UNDERRUN_INTF_3,
 	MDP_INTR_PING_PONG_0,
 	MDP_INTR_PING_PONG_1,
 	MDP_INTR_PING_PONG_2,
+	MDP_INTR_PING_PONG_0_RD_PTR,
+	MDP_INTR_PING_PONG_1_RD_PTR,
+	MDP_INTR_PING_PONG_2_RD_PTR,
 	MDP_INTR_WB_0,
 	MDP_INTR_WB_1,
 	MDP_INTR_WB_2,
@@ -51,12 +63,18 @@
 {
 	int index = -1;
 	switch (intr_type) {
+	case MDSS_MDP_IRQ_INTF_UNDER_RUN:
+		index = MDP_INTR_UNDERRUN_INTF_0 + (intf_num - MDSS_MDP_INTF0);
+		break;
 	case MDSS_MDP_IRQ_INTF_VSYNC:
 		index = MDP_INTR_VSYNC_INTF_0 + (intf_num - MDSS_MDP_INTF0);
 		break;
 	case MDSS_MDP_IRQ_PING_PONG_COMP:
 		index = MDP_INTR_PING_PONG_0 + intf_num;
 		break;
+	case MDSS_MDP_IRQ_PING_PONG_RD_PTR:
+		index = MDP_INTR_PING_PONG_0_RD_PTR + intf_num;
+		break;
 	case MDSS_MDP_IRQ_WB_ROT_COMP:
 		index = MDP_INTR_WB_0 + intf_num;
 		break;
@@ -72,7 +90,7 @@
 			       void (*fnc_ptr)(void *), void *arg)
 {
 	unsigned long flags;
-	int index, ret;
+	int index;
 
 	index = mdss_mdp_intr2index(intr_type, intf_num);
 	if (index < 0) {
@@ -82,16 +100,13 @@
 	}
 
 	spin_lock_irqsave(&mdss_mdp_intr_lock, flags);
-	if (!mdp_intr_cb[index].func) {
-		mdp_intr_cb[index].func = fnc_ptr;
-		mdp_intr_cb[index].arg = arg;
-		ret = 0;
-	} else {
-		ret = -EBUSY;
-	}
+	WARN(mdp_intr_cb[index].func && fnc_ptr,
+		"replacing current intr callback for ndx=%d\n", index);
+	mdp_intr_cb[index].func = fnc_ptr;
+	mdp_intr_cb[index].arg = arg;
 	spin_unlock_irqrestore(&mdss_mdp_intr_lock, flags);
 
-	return ret;
+	return 0;
 }
 
 static inline void mdss_mdp_intr_done(int index)
@@ -102,8 +117,6 @@
 	spin_lock(&mdss_mdp_intr_lock);
 	fnc = mdp_intr_cb[index].func;
 	arg = mdp_intr_cb[index].arg;
-	if (fnc != NULL)
-		mdp_intr_cb[index].func = NULL;
 	spin_unlock(&mdss_mdp_intr_lock);
 	if (fnc)
 		fnc(arg);
@@ -111,22 +124,35 @@
 
 irqreturn_t mdss_mdp_isr(int irq, void *ptr)
 {
-	u32 isr, mask;
+	u32 isr, mask, hist_isr, hist_mask;
 
 
 	isr = MDSS_MDP_REG_READ(MDSS_MDP_REG_INTR_STATUS);
 
-	pr_debug("isr=%x\n", isr);
-
 	if (isr == 0)
-		goto done;
+		goto mdp_isr_done;
+
 
 	mask = MDSS_MDP_REG_READ(MDSS_MDP_REG_INTR_EN);
 	MDSS_MDP_REG_WRITE(MDSS_MDP_REG_INTR_CLEAR, isr);
 
+	pr_debug("%s: isr=%x mask=%x\n", __func__, isr, mask);
+
 	isr &= mask;
 	if (isr == 0)
-		goto done;
+		goto mdp_isr_done;
+
+	if (isr & MDSS_MDP_INTR_INTF_0_UNDERRUN)
+		mdss_mdp_intr_done(MDP_INTR_UNDERRUN_INTF_0);
+
+	if (isr & MDSS_MDP_INTR_INTF_1_UNDERRUN)
+		mdss_mdp_intr_done(MDP_INTR_UNDERRUN_INTF_1);
+
+	if (isr & MDSS_MDP_INTR_INTF_2_UNDERRUN)
+		mdss_mdp_intr_done(MDP_INTR_UNDERRUN_INTF_2);
+
+	if (isr & MDSS_MDP_INTR_INTF_3_UNDERRUN)
+		mdss_mdp_intr_done(MDP_INTR_UNDERRUN_INTF_3);
 
 	if (isr & MDSS_MDP_INTR_PING_PONG_0_DONE)
 		mdss_mdp_intr_done(MDP_INTR_PING_PONG_0);
@@ -137,6 +163,15 @@
 	if (isr & MDSS_MDP_INTR_PING_PONG_2_DONE)
 		mdss_mdp_intr_done(MDP_INTR_PING_PONG_2);
 
+	if (isr & MDSS_MDP_INTR_PING_PONG_0_RD_PTR)
+		mdss_mdp_intr_done(MDP_INTR_PING_PONG_0_RD_PTR);
+
+	if (isr & MDSS_MDP_INTR_PING_PONG_1_RD_PTR)
+		mdss_mdp_intr_done(MDP_INTR_PING_PONG_1_RD_PTR);
+
+	if (isr & MDSS_MDP_INTR_PING_PONG_2_RD_PTR)
+		mdss_mdp_intr_done(MDP_INTR_PING_PONG_2_RD_PTR);
+
 	if (isr & MDSS_MDP_INTR_INTF_0_VSYNC)
 		mdss_mdp_intr_done(MDP_INTR_VSYNC_INTF_0);
 
@@ -158,27 +193,76 @@
 	if (isr & MDSS_MDP_INTR_WB_2_DONE)
 		mdss_mdp_intr_done(MDP_INTR_WB_2);
 
-done:
+mdp_isr_done:
+	hist_isr = MDSS_MDP_REG_READ(MDSS_MDP_REG_HIST_INTR_STATUS);
+	if (hist_isr == 0)
+		goto hist_isr_done;
+	hist_mask = MDSS_MDP_REG_READ(MDSS_MDP_REG_HIST_INTR_EN);
+	MDSS_MDP_REG_WRITE(MDSS_MDP_REG_HIST_INTR_CLEAR, hist_isr);
+	hist_isr &= hist_mask;
+	if (hist_isr == 0)
+		goto hist_isr_done;
+	mdss_mdp_hist_intr_done(hist_isr);
+hist_isr_done:
 	return IRQ_HANDLED;
 }
 
 struct mdss_mdp_format_params *mdss_mdp_get_format_params(u32 format)
 {
-	struct mdss_mdp_format_params *fmt = NULL;
 	if (format < MDP_IMGTYPE_LIMIT) {
-		fmt = &mdss_mdp_format_map[format];
-		if (fmt->format != format)
-			fmt = NULL;
+		struct mdss_mdp_format_params *fmt = NULL;
+		int i;
+		for (i = 0; i < ARRAY_SIZE(mdss_mdp_format_map); i++) {
+			fmt = &mdss_mdp_format_map[i];
+			if (format == fmt->format)
+				return fmt;
+		}
+	}
+	return NULL;
+}
+
+int mdss_mdp_get_rau_strides(u32 w, u32 h,
+			       struct mdss_mdp_format_params *fmt,
+			       struct mdss_mdp_plane_sizes *ps)
+{
+	u32 stride_off;
+	if (fmt->is_yuv) {
+		ps->rau_cnt = DIV_ROUND_UP(w, 64);
+		ps->ystride[0] = 64 * 4;
+		ps->rau_h[0] = 4;
+		ps->rau_h[1] = 2;
+		if (fmt->chroma_sample == MDSS_MDP_CHROMA_H1V2)
+			ps->ystride[1] = 64 * 2;
+		else if (fmt->chroma_sample == MDSS_MDP_CHROMA_H2V1) {
+			ps->ystride[1] = 32 * 4;
+			ps->rau_h[1] = 4;
+		} else
+			ps->ystride[1] = 32 * 2;
+	} else if (fmt->fetch_planes == MDSS_MDP_PLANE_INTERLEAVED) {
+		ps->rau_cnt = DIV_ROUND_UP(w, 32);
+		ps->ystride[0] = 32 * 4;
+		ps->ystride[1] = 0;
+		ps->rau_h[0] = 4;
+		ps->rau_h[1] = 0;
+	} else  {
+		pr_err("Invalid format=%d\n", fmt->format);
+		return -EINVAL;
 	}
 
-	return fmt;
+	stride_off = DIV_ROUND_UP(ps->rau_cnt, 8);
+	ps->ystride[0] = ps->ystride[0] * ps->rau_cnt * fmt->bpp + stride_off;
+	ps->ystride[1] = ps->ystride[1] * ps->rau_cnt * fmt->bpp + stride_off;
+	ps->num_planes = 2;
+
+	return 0;
 }
 
 int mdss_mdp_get_plane_sizes(u32 format, u32 w, u32 h,
-			     struct mdss_mdp_plane_sizes *ps)
+			     struct mdss_mdp_plane_sizes *ps, u32 bwc_mode)
 {
 	struct mdss_mdp_format_params *fmt;
-	int i;
+	int i, rc;
+	u32 bpp, stride_off;
 
 	if (ps == NULL)
 		return -EINVAL;
@@ -190,44 +274,69 @@
 	if (!fmt)
 		return -EINVAL;
 
+	bpp = fmt->bpp;
 	memset(ps, 0, sizeof(struct mdss_mdp_plane_sizes));
 
-	if (fmt->fetch_planes == MDSS_MDP_PLANE_INTERLEAVED) {
-		u32 bpp = fmt->bpp + 1;
-		ps->num_planes = 1;
-		ps->plane_size[0] = w * h * bpp;
-		ps->ystride[0] = w * bpp;
+	if (bwc_mode) {
+		rc = mdss_mdp_get_rau_strides(w, h, fmt, ps);
+		if (rc)
+			return rc;
+		stride_off = DIV_ROUND_UP(h, ps->rau_h[0]);
+		ps->ystride[0] = ps->ystride[0] + ps->ystride[1];
+		ps->plane_size[0] = ps->ystride[0] * stride_off;
+		ps->ystride[1] = 2;
+		ps->plane_size[1] = ps->rau_cnt * ps->ystride[1] * stride_off;
+
 	} else {
-		u8 hmap[] = { 1, 2, 1, 2 };
-		u8 vmap[] = { 1, 1, 2, 2 };
-		u8 horiz, vert;
-
-		horiz = hmap[fmt->chroma_sample];
-		vert = vmap[fmt->chroma_sample];
-
-		if (format == MDP_Y_CR_CB_GH2V2) {
-			ps->plane_size[0] = ALIGN(w, 16) * h;
-			ps->plane_size[1] = ALIGN(w / horiz, 16) * (h / vert);
-			ps->ystride[0] = ALIGN(w, 16);
-			ps->ystride[1] = ALIGN(w / horiz, 16);
-		} else {
-			ps->plane_size[0] = w * h;
-			ps->plane_size[1] = (w / horiz) * (h / vert);
-			ps->ystride[0] = w;
-			ps->ystride[1] = (w / horiz);
-		}
-
-		if (fmt->fetch_planes == MDSS_MDP_PLANE_PSEUDO_PLANAR) {
+		if (fmt->fetch_planes == MDSS_MDP_PLANE_INTERLEAVED) {
+			ps->num_planes = 1;
+			ps->plane_size[0] = w * h * bpp;
+			ps->ystride[0] = w * bpp;
+		} else if (format == MDP_Y_CBCR_H2V2_VENUS) {
+			int cf = COLOR_FMT_NV12;
 			ps->num_planes = 2;
-			ps->plane_size[1] *= 2;
-			ps->ystride[1] *= 2;
-		} else { /* planar */
-			ps->num_planes = 3;
-			ps->plane_size[2] = ps->plane_size[1];
-			ps->ystride[2] = ps->ystride[1];
+			ps->ystride[0] = VENUS_Y_STRIDE(cf, w);
+			ps->ystride[1] = VENUS_UV_STRIDE(cf, w);
+			ps->plane_size[0] = VENUS_Y_SCANLINES(cf, h) *
+				ps->ystride[0];
+			ps->plane_size[1] = VENUS_UV_SCANLINES(cf, h) *
+				ps->ystride[1];
+		} else {
+			u8 hmap[] = { 1, 2, 1, 2 };
+			u8 vmap[] = { 1, 1, 2, 2 };
+			u8 horiz, vert, stride_align, height_align;
+
+			horiz = hmap[fmt->chroma_sample];
+			vert = vmap[fmt->chroma_sample];
+
+			switch (format) {
+			case MDP_Y_CR_CB_GH2V2:
+				stride_align = 16;
+				height_align = 1;
+				break;
+			default:
+				stride_align = 1;
+				height_align = 1;
+				break;
+			}
+
+			ps->ystride[0] = ALIGN(w, stride_align);
+			ps->ystride[1] = ALIGN(w / horiz, stride_align);
+			ps->plane_size[0] = ps->ystride[0] *
+				ALIGN(h, height_align);
+			ps->plane_size[1] = ps->ystride[1] * (h / vert);
+
+			if (fmt->fetch_planes == MDSS_MDP_PLANE_PSEUDO_PLANAR) {
+				ps->num_planes = 2;
+				ps->plane_size[1] *= 2;
+				ps->ystride[1] *= 2;
+			} else { /* planar */
+				ps->num_planes = 3;
+				ps->plane_size[2] = ps->plane_size[1];
+				ps->ystride[2] = ps->ystride[1];
+			}
 		}
 	}
-
 	for (i = 0; i < ps->num_planes; i++)
 		ps->total_size += ps->plane_size[i];
 
@@ -244,7 +353,8 @@
 		return -ENOMEM;
 
 	if (data->bwc_enabled) {
-		return -EPERM; /* not supported */
+		data->num_planes = ps->num_planes;
+		data->p[1].addr = data->p[0].addr + ps->plane_size[0];
 	} else {
 		struct mdss_mdp_img_data *prev, *curr;
 		int i;
@@ -279,37 +389,52 @@
 
 int mdss_mdp_put_img(struct mdss_mdp_img_data *data)
 {
-	/* only source may use frame buffer */
+	struct ion_client *iclient = mdss_get_ionclient();
 	if (data->flags & MDP_MEMORY_ID_TYPE_FB) {
+		pr_debug("fb mem buf=0x%x\n", data->addr);
 		fput_light(data->srcp_file, data->p_need);
-		return 0;
-	}
-	if (data->srcp_file) {
-		put_pmem_file(data->srcp_file);
 		data->srcp_file = NULL;
-		return 0;
-	}
-	if (!IS_ERR_OR_NULL(data->srcp_ihdl)) {
-		ion_free(data->iclient, data->srcp_ihdl);
-		data->iclient = NULL;
+	} else if (data->srcp_file) {
+		pr_debug("pmem buf=0x%x\n", data->addr);
+		data->srcp_file = NULL;
+	} else if (!IS_ERR_OR_NULL(data->srcp_ihdl)) {
+		pr_debug("ion hdl=%p buf=0x%x\n", data->srcp_ihdl, data->addr);
+
+		if (is_mdss_iommu_attached()) {
+			int domain;
+			if (data->flags & MDP_SECURE_OVERLAY_SESSION)
+				domain = MDSS_IOMMU_DOMAIN_SECURE;
+			else
+				domain = MDSS_IOMMU_DOMAIN_UNSECURE;
+			ion_unmap_iommu(iclient, data->srcp_ihdl,
+					mdss_get_iommu_domain(domain), 0);
+
+			if (domain == MDSS_IOMMU_DOMAIN_SECURE) {
+				msm_ion_unsecure_buffer(iclient,
+					data->srcp_ihdl);
+			}
+		}
+
+		ion_free(iclient, data->srcp_ihdl);
 		data->srcp_ihdl = NULL;
-		return 0;
+	} else {
+		return -ENOMEM;
 	}
 
-	return -ENOMEM;
+	return 0;
 }
 
-int mdss_mdp_get_img(struct ion_client *iclient, struct msmfb_data *img,
-		     struct mdss_mdp_img_data *data)
+int mdss_mdp_get_img(struct msmfb_data *img, struct mdss_mdp_img_data *data)
 {
 	struct file *file;
 	int ret = -EINVAL;
 	int fb_num;
 	unsigned long *start, *len;
+	struct ion_client *iclient = mdss_get_ionclient();
 
 	start = (unsigned long *) &data->addr;
 	len = (unsigned long *) &data->len;
-	data->flags = img->flags;
+	data->flags |= img->flags;
 	data->p_need = 0;
 
 	if (img->flags & MDP_BLIT_SRC_GEM) {
@@ -318,32 +443,94 @@
 					start, len);
 	} else if (img->flags & MDP_MEMORY_ID_TYPE_FB) {
 		file = fget_light(img->memory_id, &data->p_need);
-		if (file && FB_MAJOR ==
-				MAJOR(file->f_dentry->d_inode->i_rdev)) {
-			data->srcp_file = file;
+		if (file == NULL) {
+			pr_err("invalid framebuffer file (%d)\n",
+					img->memory_id);
+			return -EINVAL;
+		}
+		data->srcp_file = file;
+
+		if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
 			fb_num = MINOR(file->f_dentry->d_inode->i_rdev);
 			ret = mdss_fb_get_phys_info(start, len, fb_num);
+			if (ret)
+				pr_err("mdss_fb_get_phys_info() failed\n");
+		} else {
+			pr_err("invalid FB_MAJOR\n");
+			ret = -1;
 		}
 	} else if (iclient) {
-		data->iclient = iclient;
 		data->srcp_ihdl = ion_import_dma_buf(iclient, img->memory_id);
-		if (IS_ERR_OR_NULL(data->srcp_ihdl))
-			return PTR_ERR(data->srcp_ihdl);
-		ret = ion_phys(iclient, data->srcp_ihdl,
-			       start, (size_t *) len);
-	} else {
-		unsigned long vstart;
-		ret = get_pmem_file(img->memory_id, start, &vstart, len,
-				    &data->srcp_file);
+		if (IS_ERR_OR_NULL(data->srcp_ihdl)) {
+			pr_err("error on ion_import_fd\n");
+			ret = PTR_ERR(data->srcp_ihdl);
+			data->srcp_ihdl = NULL;
+			return ret;
+		}
+
+		if (is_mdss_iommu_attached()) {
+			int domain;
+			if (data->flags & MDP_SECURE_OVERLAY_SESSION) {
+				domain = MDSS_IOMMU_DOMAIN_SECURE;
+				ret = msm_ion_secure_buffer(iclient,
+					data->srcp_ihdl, 0x2, 0);
+				if (IS_ERR_VALUE(ret)) {
+					ion_free(iclient, data->srcp_ihdl);
+					pr_err("failed to secure handle (%d)\n",
+						ret);
+					return ret;
+				}
+			} else {
+				domain = MDSS_IOMMU_DOMAIN_UNSECURE;
+			}
+
+			ret = ion_map_iommu(iclient, data->srcp_ihdl,
+					    mdss_get_iommu_domain(domain),
+					    0, SZ_4K, 0, start, len, 0, 0);
+		} else {
+			ret = ion_phys(iclient, data->srcp_ihdl, start,
+				       (size_t *) len);
+		}
+
+		if (IS_ERR_VALUE(ret)) {
+			ion_free(iclient, data->srcp_ihdl);
+			pr_err("failed to map ion handle (%d)\n", ret);
+			return ret;
+		}
 	}
 
 	if (!ret && (img->offset < data->len)) {
 		data->addr += img->offset;
 		data->len -= img->offset;
+
+		pr_debug("mem=%d ihdl=%p buf=0x%x len=0x%x\n", img->memory_id,
+			 data->srcp_ihdl, data->addr, data->len);
 	} else {
-		mdss_mdp_put_img(data);
-		ret = -EINVAL;
+		return -EINVAL;
 	}
 
 	return ret;
 }
+
+u32 mdss_get_panel_framerate(struct msm_fb_data_type *mfd)
+{
+	u32 frame_rate = DEFAULT_FRAME_RATE;
+	u32 pixel_total;
+	struct mdss_panel_info *panel_info = mfd->panel_info;
+
+	if (panel_info->type == MIPI_VIDEO_PANEL) {
+		frame_rate = panel_info->mipi.frame_rate;
+	} else {
+		pixel_total = (panel_info->lcdc.h_back_porch +
+			  panel_info->lcdc.h_front_porch +
+			  panel_info->lcdc.h_pulse_width +
+			  panel_info->xres) *
+			 (panel_info->lcdc.v_back_porch +
+			  panel_info->lcdc.v_front_porch +
+			  panel_info->lcdc.v_pulse_width +
+			  panel_info->yres);
+		if (pixel_total)
+			frame_rate = panel_info->clk_rate / pixel_total;
+	}
+	return frame_rate;
+}
diff --git a/drivers/video/msm/mdss/mdss_mdp_wb.c b/drivers/video/msm/mdss/mdss_mdp_wb.c
index d92c18e..88e7605 100644
--- a/drivers/video/msm/mdss/mdss_mdp_wb.c
+++ b/drivers/video/msm/mdss/mdss_mdp_wb.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,11 +18,14 @@
 #include <linux/major.h>
 #include <linux/module.h>
 #include <linux/uaccess.h>
+#include <linux/iommu.h>
+
+#include <mach/iommu.h>
+#include <mach/iommu_domains.h>
 
 #include "mdss_mdp.h"
 #include "mdss_fb.h"
 
-#define DEBUG_WRITEBACK
 
 enum mdss_mdp_wb_state {
 	WB_OPEN,
@@ -39,6 +42,8 @@
 	struct list_head register_queue;
 	wait_queue_head_t wait_q;
 	u32 state;
+	int is_secure;
+	struct mdss_mdp_pipe *secure_pipe;
 };
 
 enum mdss_mdp_wb_node_state {
@@ -67,33 +72,47 @@
 	static struct ion_handle *ihdl;
 	static void *videomemory;
 	static ion_phys_addr_t mdss_wb_mem;
-	static struct mdss_mdp_data buffer = { .num_planes = 1,	};
-	struct fb_info *fbi;
-	size_t img_size;
+	static struct mdss_mdp_data mdss_wb_buffer = { .num_planes = 1, };
+	int rc;
 
-	fbi = mfd->fbi;
-	img_size = fbi->var.xres * fbi->var.yres * fbi->var.bits_per_pixel / 8;
+	if (IS_ERR_OR_NULL(ihdl)) {
+		struct fb_info *fbi;
+		size_t img_size;
+		struct ion_client *iclient = mdss_get_ionclient();
+		struct mdss_mdp_img_data *img = mdss_wb_buffer.p;
 
-	if (ihdl == NULL) {
-		ihdl = ion_alloc(mfd->iclient, img_size, SZ_4K,
-				 ION_HEAP(ION_SF_HEAP_ID));
-		if (!IS_ERR_OR_NULL(ihdl)) {
-			videomemory = ion_map_kernel(mfd->iclient, ihdl);
-			ion_phys(mfd->iclient, ihdl, &mdss_wb_mem, &img_size);
-		} else {
+		fbi = mfd->fbi;
+		img_size = fbi->var.xres * fbi->var.yres *
+			fbi->var.bits_per_pixel / 8;
+
+
+		ihdl = ion_alloc(iclient, img_size, SZ_4K,
+				 ION_HEAP(ION_SF_HEAP_ID), 0);
+		if (IS_ERR_OR_NULL(ihdl)) {
 			pr_err("unable to alloc fbmem from ion (%p)\n", ihdl);
-			ihdl = NULL;
+			return NULL;
 		}
+
+		videomemory = ion_map_kernel(iclient, ihdl);
+		ion_phys(iclient, ihdl, &mdss_wb_mem, &img_size);
+
+		if (is_mdss_iommu_attached()) {
+			int domain = MDSS_IOMMU_DOMAIN_UNSECURE;
+			rc = ion_map_iommu(iclient, ihdl,
+					   mdss_get_iommu_domain(domain),
+					   0, SZ_4K, 0,
+					   (unsigned long *) &img->addr,
+					   (unsigned long *) &img->len,
+					   0, 0);
+		} else {
+			img->addr = mdss_wb_mem;
+			img->len = img_size;
+		}
+
+		pr_debug("ihdl=%p virt=%p phys=0x%lx iova=0x%x size=%u\n",
+			 ihdl, videomemory, mdss_wb_mem, img->addr, img_size);
 	}
-
-	if (mdss_wb_mem) {
-		buffer.p[0].addr = (u32) mdss_wb_mem;
-		buffer.p[0].len = img_size;
-
-		return &buffer;
-	}
-
-	return NULL;
+	return &mdss_wb_buffer;
 }
 #else
 static inline
@@ -103,16 +122,77 @@
 }
 #endif
 
+int mdss_mdp_wb_set_secure(struct msm_fb_data_type *mfd, int enable)
+{
+	struct mdss_mdp_wb *wb = mfd_to_wb(mfd);
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+	struct mdss_mdp_pipe *pipe;
+	struct mdss_mdp_mixer *mixer;
+
+	pr_debug("setting secure=%d\n", enable);
+
+	wb->is_secure = enable;
+	pipe = wb->secure_pipe;
+
+	if (!enable) {
+		if (pipe) {
+			/* unset pipe */
+			mdss_mdp_mixer_pipe_unstage(pipe);
+			mdss_mdp_pipe_destroy(pipe);
+			wb->secure_pipe = NULL;
+		}
+		return 0;
+	}
+
+	mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_DEFAULT);
+	if (!mixer) {
+		pr_err("Unable to find mixer for wb\n");
+		return -ENOENT;
+	}
+
+	if (!pipe) {
+		pipe = mdss_mdp_pipe_alloc(mixer, MDSS_MDP_PIPE_TYPE_RGB);
+		if (!pipe)
+			pipe = mdss_mdp_pipe_alloc(mixer,
+					MDSS_MDP_PIPE_TYPE_VIG);
+		if (!pipe) {
+			pr_err("Unable to get pipe to set secure session\n");
+			return -ENOMEM;
+		}
+
+		pipe->src_fmt = mdss_mdp_get_format_params(MDP_RGBA_8888);
+
+		pipe->mfd = mfd;
+		pipe->mixer_stage = MDSS_MDP_STAGE_BASE;
+		wb->secure_pipe = pipe;
+	}
+
+	pipe->img_height = mixer->height;
+	pipe->img_width = mixer->width;
+	pipe->src.x = 0;
+	pipe->src.y = 0;
+	pipe->src.w = pipe->img_width;
+	pipe->src.h = pipe->img_height;
+	pipe->dst = pipe->src;
+
+	pipe->flags = (enable ? MDP_SECURE_OVERLAY_SESSION : 0);
+	pipe->params_changed++;
+
+	pr_debug("setting secure pipe=%d flags=%x\n", pipe->num, pipe->flags);
+
+	return mdss_mdp_pipe_queue_data(pipe, NULL);
+}
+
 static int mdss_mdp_wb_init(struct msm_fb_data_type *mfd)
 {
-	struct mdss_mdp_wb *wb;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_wb *wb = mfd_to_wb(mfd);
 
 	mutex_lock(&mdss_mdp_wb_buf_lock);
-	wb = mfd->wb;
 	if (wb == NULL) {
 		wb = &mdss_mdp_wb_info;
 		wb->fb_ndx = mfd->index;
-		mfd->wb = wb;
+		mdp5_data->wb = wb;
 	} else if (mfd->index != wb->fb_ndx) {
 		pr_err("only one writeback intf supported at a time\n");
 		return -EMLINK;
@@ -129,14 +209,15 @@
 	wb->state = WB_OPEN;
 	init_waitqueue_head(&wb->wait_q);
 
-	mfd->wb = wb;
+	mdp5_data->wb = wb;
 	mutex_unlock(&mdss_mdp_wb_buf_lock);
 	return 0;
 }
 
 static int mdss_mdp_wb_terminate(struct msm_fb_data_type *mfd)
 {
-	struct mdss_mdp_wb *wb = mfd->wb;
+	struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+	struct mdss_mdp_wb *wb = mfd_to_wb(mfd);
 
 	if (!wb) {
 		pr_err("unable to terminate, writeback is not initialized\n");
@@ -155,9 +236,13 @@
 			kfree(node);
 		}
 	}
+
+	wb->is_secure = false;
+	if (wb->secure_pipe)
+		mdss_mdp_pipe_destroy(wb->secure_pipe);
 	mutex_unlock(&wb->lock);
 
-	mfd->wb = NULL;
+	mdp5_data->wb = NULL;
 	mutex_unlock(&mdss_mdp_wb_buf_lock);
 
 	return 0;
@@ -165,7 +250,7 @@
 
 static int mdss_mdp_wb_start(struct msm_fb_data_type *mfd)
 {
-	struct mdss_mdp_wb *wb = mfd->wb;
+	struct mdss_mdp_wb *wb = mfd_to_wb(mfd);
 
 	if (!wb) {
 		pr_err("unable to start, writeback is not initialized\n");
@@ -182,7 +267,7 @@
 
 static int mdss_mdp_wb_stop(struct msm_fb_data_type *mfd)
 {
-	struct mdss_mdp_wb *wb = mfd->wb;
+	struct mdss_mdp_wb *wb = mfd_to_wb(mfd);
 
 	if (!wb) {
 		pr_err("unable to stop, writeback is not initialized\n");
@@ -239,6 +324,8 @@
 	buf = &node->buf_data.p[0];
 	buf->addr = (u32) (data->iova + data->offset);
 	buf->len = UINT_MAX; /* trusted source */
+	if (wb->is_secure)
+		buf->flags |= MDP_SECURE_OVERLAY_SESSION;
 	ret = mdss_mdp_wb_register_node(wb, node);
 	if (IS_ERR_VALUE(ret)) {
 		pr_err("error registering wb node\n");
@@ -252,8 +339,10 @@
 }
 
 static struct mdss_mdp_wb_data *get_user_node(struct msm_fb_data_type *mfd,
-					      struct msmfb_data *data) {
-	struct mdss_mdp_wb *wb = mfd->wb;
+						struct msmfb_data *data)
+{
+
+	struct mdss_mdp_wb *wb = mfd_to_wb(mfd);
 	struct mdss_mdp_wb_data *node;
 	struct mdss_mdp_img_data *buf;
 	int ret;
@@ -266,7 +355,9 @@
 
 	node->buf_data.num_planes = 1;
 	buf = &node->buf_data.p[0];
-	ret = mdss_mdp_get_img(mfd->iclient, data, buf);
+	if (wb->is_secure)
+		buf->flags |= MDP_SECURE_OVERLAY_SESSION;
+	ret = mdss_mdp_get_img(data, buf);
 	if (IS_ERR_VALUE(ret)) {
 		pr_err("error getting buffer info\n");
 		goto register_fail;
@@ -290,9 +381,9 @@
 }
 
 static int mdss_mdp_wb_queue(struct msm_fb_data_type *mfd,
-			     struct msmfb_data *data, int local)
+				struct msmfb_data *data, int local)
 {
-	struct mdss_mdp_wb *wb = mfd->wb;
+	struct mdss_mdp_wb *wb = mfd_to_wb(mfd);
 	struct mdss_mdp_wb_data *node = NULL;
 	int ret = 0;
 
@@ -333,9 +424,9 @@
 }
 
 static int mdss_mdp_wb_dequeue(struct msm_fb_data_type *mfd,
-			       struct msmfb_data *data)
+				struct msmfb_data *data)
 {
-	struct mdss_mdp_wb *wb = mfd->wb;
+	struct mdss_mdp_wb *wb = mfd_to_wb(mfd);
 	struct mdss_mdp_wb_data *node = NULL;
 	int ret;
 
@@ -371,7 +462,7 @@
 		ret = -ENOBUFS;
 	}
 	mutex_unlock(&wb->lock);
-	return 0;
+	return ret;
 }
 
 static void mdss_mdp_wb_callback(void *arg)
@@ -380,9 +471,10 @@
 		complete((struct completion *) arg);
 }
 
-int mdss_mdp_wb_kickoff(struct mdss_mdp_ctl *ctl)
+int mdss_mdp_wb_kickoff(struct msm_fb_data_type *mfd)
 {
-	struct mdss_mdp_wb *wb;
+	struct mdss_mdp_wb *wb = mfd_to_wb(mfd);
+	struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
 	struct mdss_mdp_wb_data *node = NULL;
 	int ret = 0;
 	DECLARE_COMPLETION_ONSTACK(comp);
@@ -391,13 +483,15 @@
 		.priv_data = &comp,
 	};
 
-	if (!ctl || !ctl->mfd)
-		return -ENODEV;
+	if (!ctl->power_on)
+		return 0;
 
 	mutex_lock(&mdss_mdp_wb_buf_lock);
-	wb = ctl->mfd->wb;
 	if (wb) {
 		mutex_lock(&wb->lock);
+		/* in case of reinit of control path need to reset secure */
+		if (ctl->play_cnt == 0)
+			mdss_mdp_wb_set_secure(ctl->mfd, wb->is_secure);
 		if (!list_empty(&wb->free_queue) && wb->state != WB_STOPING &&
 		    wb->state != WB_STOP) {
 			node = list_first_entry(&wb->free_queue,
@@ -417,7 +511,8 @@
 
 	if (wb_args.data == NULL) {
 		pr_err("unable to get writeback buf ctl=%d\n", ctl->num);
-		ret = -ENOMEM;
+		/* drop buffer but don't return error */
+		ret = 0;
 		goto kickoff_fail;
 	}
 
@@ -440,7 +535,8 @@
 	return ret;
 }
 
-int mdss_mdp_wb_ioctl_handler(struct msm_fb_data_type *mfd, u32 cmd, void *arg)
+int mdss_mdp_wb_ioctl_handler(struct msm_fb_data_type *mfd, u32 cmd,
+				void *arg)
 {
 	struct msmfb_data data;
 	int ret = -ENOSYS;
@@ -546,3 +642,32 @@
 	return mdss_mdp_wb_terminate(mfd);
 }
 EXPORT_SYMBOL(msm_fb_writeback_terminate);
+
+int msm_fb_get_iommu_domain(struct fb_info *info, int domain)
+{
+	int mdss_domain;
+	switch (domain) {
+	case MDP_IOMMU_DOMAIN_CP:
+		mdss_domain = MDSS_IOMMU_DOMAIN_SECURE;
+		break;
+	case MDP_IOMMU_DOMAIN_NS:
+		mdss_domain = MDSS_IOMMU_DOMAIN_UNSECURE;
+		break;
+	default:
+		pr_err("Invalid mdp iommu domain (%d)\n", domain);
+		return -EINVAL;
+	}
+	return mdss_get_iommu_domain(mdss_domain);
+}
+EXPORT_SYMBOL(msm_fb_get_iommu_domain);
+
+int msm_fb_writeback_set_secure(struct fb_info *info, int enable)
+{
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *) info->par;
+
+	if (!mfd)
+		return -ENODEV;
+
+	return mdss_mdp_wb_set_secure(mfd, enable);
+}
+EXPORT_SYMBOL(msm_fb_writeback_set_secure);
diff --git a/drivers/video/msm/mdss/mdss_panel.h b/drivers/video/msm/mdss/mdss_panel.h
index 7ee3829..d230100 100644
--- a/drivers/video/msm/mdss/mdss_panel.h
+++ b/drivers/video/msm/mdss/mdss_panel.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -55,19 +55,21 @@
 	MAX_PHYS_TARGET_NUM,
 };
 
-/* panel info type */
-struct lcd_panel_info {
-	u32 vsync_enable;
-	u32 refx100;
-	u32 v_back_porch;
-	u32 v_front_porch;
-	u32 v_pulse_width;
-	u32 hw_vsync_mode;
-	u32 vsync_notifier_period;
-	u32 rev;
+enum mdss_intf_events {
+	MDSS_EVENT_RESET,
+	MDSS_EVENT_UNBLANK,
+	MDSS_EVENT_PANEL_ON,
+	MDSS_EVENT_BLANK,
+	MDSS_EVENT_PANEL_OFF,
+	MDSS_EVENT_CLOSE,
+	MDSS_EVENT_SUSPEND,
+	MDSS_EVENT_RESUME,
+	MDSS_EVENT_CHECK_PARAMS,
+	MDSS_EVENT_CONT_SPLASH_FINISH,
+	MDSS_EVENT_FB_REGISTERED,
 };
 
-struct lcdc_panel_info {
+struct lcd_panel_info {
 	u32 h_back_porch;
 	u32 h_front_porch;
 	u32 h_pulse_width;
@@ -83,6 +85,18 @@
 	u32 yres_pad;
 };
 
+
+/* DSI PHY configuration */
+struct mdss_dsi_phy_ctrl {
+	uint32_t regulator[7];
+	uint32_t timing[12];
+	uint32_t ctrl[4];
+	uint32_t strength[2];
+	char bistCtrl[6];
+	uint32_t pll[21];
+	char laneCfg[45];
+};
+
 struct mipi_panel_info {
 	char mode;		/* video/cmd */
 	char interleave_mode;
@@ -103,7 +117,7 @@
 	char t_clk_post; /* 0xc0, DSI_CLKOUT_TIMING_CTRL */
 	char t_clk_pre;  /* 0xc0, DSI_CLKOUT_TIMING_CTRL */
 	char vc;	/* virtual channel */
-	struct mipi_dsi_phy_ctrl *dsi_phy_db;
+	struct mdss_dsi_phy_ctrl *dsi_phy_db;
 	/* video mode */
 	char pulse_mode_hsa_he;
 	char hfp_power_stop;
@@ -127,6 +141,9 @@
 	char no_max_pkt_size;
 	/* Clock required during LP commands */
 	char force_clk_lane_hs;
+
+	char vsync_enable;
+	char hw_vsync_mode;
 };
 
 enum lvds_mode {
@@ -140,6 +157,26 @@
 	char channel_swap;
 };
 
+struct fbc_panel_info {
+	u32 enabled;
+	u32 target_bpp;
+	u32 comp_mode;
+	u32 qerr_enable;
+	u32 cd_bias;
+	u32 pat_enable;
+	u32 vlc_enable;
+	u32 bflc_enable;
+
+	u32 line_x_budget;
+	u32 block_x_budget;
+	u32 block_budget;
+
+	u32 lossless_mode_thd;
+	u32 lossy_mode_thd;
+	u32 lossy_rgb_thd;
+	u32 lossy_mode_idx;
+};
+
 struct mdss_panel_info {
 	u32 xres;
 	u32 yres;
@@ -156,23 +193,33 @@
 	u32 frame_count;
 	u32 is_3d_panel;
 	u32 out_format;
+	u32 vic; /* video identification code */
+	int bklt_ctrl;	/* backlight ctrl */
+	int pwm_gpio;
+	int pwm_lpg_chan;
+	int pwm_period;
 
-	struct lcd_panel_info lcd;
-	struct lcdc_panel_info lcdc;
+	u32 cont_splash_enabled;
+	struct ion_handle *splash_ihdl;
+	u32 panel_power_on;
+
+	struct lcd_panel_info lcdc;
+	struct fbc_panel_info fbc;
 	struct mipi_panel_info mipi;
 	struct lvds_panel_info lvds;
-	struct fb_info *fbi;
 };
 
 struct mdss_panel_data {
 	struct mdss_panel_info panel_info;
-	void (*set_backlight) (u32 bl_level);
-	unsigned char *dsi_base;
+	void (*set_backlight) (struct mdss_panel_data *pdata, u32 bl_level);
+	unsigned char *mmss_cc_base;
 
 	/* function entry chain */
-	int (*on) (struct mdss_panel_data *pdata);
-	int (*off) (struct mdss_panel_data *pdata);
+	int (*event_handler) (struct mdss_panel_data *pdata, int e, void *arg);
+
+	struct mdss_panel_data *next;
 };
 
-int mdss_register_panel(struct mdss_panel_data *pdata);
+int mdss_register_panel(struct platform_device *pdev,
+	struct mdss_panel_data *pdata);
 #endif /* MDSS_PANEL_H */
diff --git a/drivers/video/msm/mdss/mdss_qpic.c b/drivers/video/msm/mdss/mdss_qpic.c
new file mode 100644
index 0000000..be02113
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_qpic.c
@@ -0,0 +1,614 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/hrtimer.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/bootmem.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/system.h>
+#include <asm/mach-types.h>
+#include <mach/sps.h>
+#include <mach/clk.h>
+#include <mach/hardware.h>
+
+#include "mdss_fb.h"
+#include "mdss_qpic.h"
+
+static int mdss_qpic_probe(struct platform_device *pdev);
+static int mdss_qpic_remove(struct platform_device *pdev);
+
+struct qpic_data_type *qpic_res;
+
+/* for tuning */
+static u32 use_bam = true;
+static u32 use_irq;
+static u32 use_vsync;
+
+static const struct of_device_id mdss_qpic_dt_match[] = {
+	{ .compatible = "qcom,mdss_qpic",},
+	{}
+};
+MODULE_DEVICE_TABLE(of, mdss_qpic_dt_match);
+
+static struct platform_driver mdss_qpic_driver = {
+	.probe = mdss_qpic_probe,
+	.remove = mdss_qpic_remove,
+	.shutdown = NULL,
+	.driver = {
+		/*
+		 * Simulate mdp hw
+		 */
+		.name = "mdp",
+		.of_match_table = mdss_qpic_dt_match,
+	},
+};
+
+int qpic_on(struct msm_fb_data_type *mfd)
+{
+	int ret;
+	ret = mdss_qpic_panel_on(qpic_res->panel_data);
+	return ret;
+}
+
+int qpic_off(struct msm_fb_data_type *mfd)
+{
+	int ret;
+	ret = mdss_qpic_panel_off(qpic_res->panel_data);
+	return ret;
+}
+
+static void mdss_qpic_pan_display(struct msm_fb_data_type *mfd)
+{
+
+	struct fb_info *fbi;
+	u32 offset, fb_offset, size;
+	int bpp;
+
+	if (!mfd) {
+		pr_err("%s: mfd is NULL!", __func__);
+		return;
+	}
+
+	fbi = mfd->fbi;
+
+	bpp = fbi->var.bits_per_pixel / 8;
+	offset = fbi->var.xoffset * bpp +
+		 fbi->var.yoffset * fbi->fix.line_length;
+
+	if (offset > fbi->fix.smem_len) {
+		pr_err("invalid fb offset=%u total length=%u\n",
+		       offset, fbi->fix.smem_len);
+		return;
+	}
+	fb_offset = (u32)fbi->fix.smem_start + offset;
+
+	mdss_qpic_panel_on(qpic_res->panel_data);
+	size = fbi->var.xres * fbi->var.yres * bpp;
+
+	qpic_send_frame(0, 0, fbi->var.xres, fbi->var.yres,
+		(u32 *)fb_offset, size);
+}
+
+int mdss_qpic_alloc_fb_mem(struct msm_fb_data_type *mfd)
+{
+	size_t size;
+	u32 yres = mfd->fbi->var.yres_virtual;
+
+	size = PAGE_ALIGN(mfd->fbi->fix.line_length * yres);
+
+	if (!qpic_res->res_init)
+		return -EINVAL;
+
+	if (mfd->index != 0) {
+		mfd->fbi->fix.smem_start = 0;
+		mfd->fbi->screen_base = NULL;
+		mfd->fbi->fix.smem_len = 0;
+		mfd->iova = 0;
+		return 0;
+	}
+
+	if (!qpic_res->fb_virt) {
+		qpic_res->fb_virt = (void *)dmam_alloc_coherent(
+						&qpic_res->pdev->dev,
+						size + QPIC_MAX_CMD_BUF_SIZE,
+						&qpic_res->fb_phys,
+						GFP_KERNEL);
+		pr_err("%s size=%d vir_addr=%x phys_addr=%x",
+			__func__, size, (int)qpic_res->fb_virt,
+			(int)qpic_res->fb_phys);
+		if (!qpic_res->fb_virt)
+			return -ENOMEM;
+		qpic_res->cmd_buf_virt = qpic_res->fb_virt + size;
+		qpic_res->cmd_buf_phys = qpic_res->fb_phys + size;
+	}
+	mfd->fbi->fix.smem_start = qpic_res->fb_phys;
+	mfd->fbi->screen_base = qpic_res->fb_virt;
+	mfd->fbi->fix.smem_len = size;
+	mfd->iova = 0;
+	return 0;
+}
+
+u32 mdss_qpic_fb_stride(u32 fb_index, u32 xres, int bpp)
+{
+	return xres * bpp;
+}
+
+int mdss_qpic_overlay_init(struct msm_fb_data_type *mfd)
+{
+	struct msm_mdp_interface *qpic_interface = &mfd->mdp;
+	qpic_interface->on_fnc = qpic_on;
+	qpic_interface->off_fnc = qpic_off;
+	qpic_interface->do_histogram = NULL;
+	qpic_interface->cursor_update = NULL;
+	qpic_interface->dma_fnc = mdss_qpic_pan_display;
+	qpic_interface->ioctl_handler = NULL;
+	qpic_interface->kickoff_fnc = NULL;
+	return 0;
+}
+
+int qpic_register_panel(struct mdss_panel_data *pdata)
+{
+	struct platform_device *mdss_fb_dev = NULL;
+	int rc;
+
+	mdss_fb_dev = platform_device_alloc("mdss_fb", pdata->panel_info.pdest);
+	if (!mdss_fb_dev) {
+		pr_err("unable to allocate mdss_fb device\n");
+		return -ENOMEM;
+	}
+
+	mdss_fb_dev->dev.platform_data = pdata;
+
+	rc = platform_device_add(mdss_fb_dev);
+	if (rc) {
+		platform_device_put(mdss_fb_dev);
+		pr_err("unable to probe mdss_fb device (%d)\n", rc);
+		return rc;
+	}
+
+	qpic_res->panel_data = pdata;
+
+	return rc;
+}
+
+int qpic_init_sps(struct platform_device *pdev,
+				struct qpic_sps_endpt *end_point)
+{
+	int rc = 0;
+	struct sps_pipe *pipe_handle;
+	struct sps_connect *sps_config = &end_point->config;
+	struct sps_register_event *sps_event = &end_point->bam_event;
+	struct sps_bam_props bam = {0};
+	u32 bam_handle = 0;
+
+	if (qpic_res->sps_init)
+		return 0;
+	bam.phys_addr = qpic_res->qpic_phys + 0x4000;
+	bam.virt_addr = qpic_res->qpic_base + 0x4000;
+	bam.irq = qpic_res->irq - 4;
+	bam.manage = SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE;
+
+	rc = sps_phy2h(bam.phys_addr, &bam_handle);
+	if (rc)
+		rc = sps_register_bam_device(&bam, &bam_handle);
+	if (rc) {
+		pr_err("%s bam_handle is NULL", __func__);
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	pipe_handle = sps_alloc_endpoint();
+	if (!pipe_handle) {
+		pr_err("sps_alloc_endpoint() failed\n");
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	rc = sps_get_config(pipe_handle, sps_config);
+	if (rc) {
+		pr_err("sps_get_config() failed %d\n", rc);
+		goto free_endpoint;
+	}
+
+	/* WRITE CASE: source - system memory; destination - BAM */
+	sps_config->source = SPS_DEV_HANDLE_MEM;
+	sps_config->destination = bam_handle;
+	sps_config->mode = SPS_MODE_DEST;
+	sps_config->dest_pipe_index = 6;
+
+	sps_config->options = SPS_O_AUTO_ENABLE | SPS_O_EOT;
+	sps_config->lock_group = 0;
+	/*
+	 * Descriptor FIFO is a cyclic FIFO. If 64 descriptors
+	 * are allowed to be submitted before we get any ack for any of them,
+	 * the descriptor FIFO size should be: (SPS_MAX_DESC_NUM + 1) *
+	 * sizeof(struct sps_iovec).
+	 */
+	sps_config->desc.size = (64) *
+					sizeof(struct sps_iovec);
+	sps_config->desc.base = dmam_alloc_coherent(&pdev->dev,
+					sps_config->desc.size,
+					&sps_config->desc.phys_base,
+					GFP_KERNEL);
+	if (!sps_config->desc.base) {
+		pr_err("dmam_alloc_coherent() failed for size %x\n",
+				sps_config->desc.size);
+		rc = -ENOMEM;
+		goto free_endpoint;
+	}
+	memset(sps_config->desc.base, 0x00, sps_config->desc.size);
+
+	rc = sps_connect(pipe_handle, sps_config);
+	if (rc) {
+		pr_err("sps_connect() failed %d\n", rc);
+		goto free_endpoint;
+	}
+
+	init_completion(&end_point->completion);
+	sps_event->mode = SPS_TRIGGER_WAIT;
+	sps_event->options = SPS_O_EOT;
+	sps_event->xfer_done = &end_point->completion;
+	sps_event->user = (void *)qpic_res;
+
+	rc = sps_register_event(pipe_handle, sps_event);
+	if (rc) {
+		pr_err("sps_register_event() failed %d\n", rc);
+		goto sps_disconnect;
+	}
+
+	end_point->handle = pipe_handle;
+	qpic_res->sps_init = true;
+	goto out;
+sps_disconnect:
+	sps_disconnect(pipe_handle);
+free_endpoint:
+	sps_free_endpoint(pipe_handle);
+out:
+	return rc;
+}
+
+void mdss_qpic_reset(void)
+{
+	u32 time_end;
+
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_RESET, 1 << 0);
+	/* wait 100 us after reset as suggested by hw */
+	usleep(100);
+	time_end = (u32)ktime_to_ms(ktime_get()) + QPIC_MAX_VSYNC_WAIT_TIME;
+	while (((QPIC_INP(QPIC_REG_QPIC_LCDC_STTS) & (1 << 8)) == 0)) {
+		if ((u32)ktime_to_ms(ktime_get()) > time_end) {
+			pr_err("%s reset not finished", __func__);
+			break;
+		}
+		/* yield 100 us for next polling by experiment*/
+		usleep(100);
+	}
+}
+
+void qpic_interrupt_en(u32 en)
+{
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_CLR, 0xff);
+	if (en) {
+		if (!qpic_res->irq_ena) {
+			qpic_res->irq_ena = true;
+			enable_irq(qpic_res->irq);
+			QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_EN,
+				(1 << 0) | (1 << 2));
+		}
+	} else {
+		QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_EN, 0);
+		disable_irq(qpic_res->irq);
+		qpic_res->irq_ena = false;
+	}
+}
+
+static irqreturn_t qpic_irq_handler(int irq, void *ptr)
+{
+	u32 data;
+	data = QPIC_INP(QPIC_REG_QPIC_LCDC_IRQ_STTS);
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_CLR, 0xff);
+	return 0;
+}
+
+int qpic_flush_buffer_bam(u32 cmd, u32 len, u32 *param, u32 is_cmd)
+{
+	int  ret = 0;
+	u32 phys_addr, cfg2, block_len , flags;
+	if (is_cmd) {
+		memcpy((u8 *)qpic_res->cmd_buf_virt, param, len);
+		invalidate_caches((unsigned long)qpic_res->cmd_buf_virt,
+		len,
+		(unsigned long)qpic_res->cmd_buf_phys);
+		phys_addr = qpic_res->cmd_buf_phys;
+	} else {
+		phys_addr = (u32)param;
+	}
+
+	cfg2 = QPIC_INP(QPIC_REG_QPIC_LCDC_CFG2);
+	cfg2 &= ~0xFF;
+	cfg2 |= cmd;
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG2, cfg2);
+	block_len = 0x7FF0;
+	while (len > 0)  {
+		if (len <= 0x7FF0) {
+			flags = SPS_IOVEC_FLAG_EOT;
+			block_len = len;
+		} else {
+			flags = 0;
+		}
+		ret = sps_transfer_one(qpic_res->qpic_endpt.handle,
+				phys_addr, block_len, NULL, flags);
+		if (ret)
+			pr_err("failed to submit command %x ret %d\n",
+				cmd, ret);
+		phys_addr += block_len;
+		len -= block_len;
+	}
+	ret = wait_for_completion_interruptible_timeout(
+		&qpic_res->qpic_endpt.completion,
+		msecs_to_jiffies(100 * 4));
+	if (ret <= 0)
+		pr_err("%s timeout %x", __func__, ret);
+	else
+		ret = 0;
+	return ret;
+}
+
+int qpic_flush_buffer_sw(u32 cmd, u32 len, u32 *param, u32 is_cmd)
+{
+	u32 bytes_left, space, data, cfg2, time_end;
+	int i, ret = 0;
+	if ((len <= (sizeof(u32) * 4)) && (is_cmd)) {
+		len >>= 2;/* len in dwords */
+		data = 0;
+		for (i = 0; i < len; i++)
+			data |= param[i] << (8 * i);
+		QPIC_OUTP(QPIC_REG_QPIC_LCDC_CMD_DATA_CYCLE_CNT, len);
+		QPIC_OUTP(QPIC_REG_LCD_DEVICE_CMD0 + (4 * cmd), data);
+		return 0;
+	}
+	if ((len & 0x1) != 0) {
+		pr_err("%s: number of bytes needs be even", __func__);
+		len = (len + 1) & (~0x1);
+	}
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_IRQ_CLR, 0xff);
+	cfg2 = QPIC_INP(QPIC_REG_QPIC_LCDC_CFG2);
+	cfg2 |= (1 << 24); /* transparent mode */
+	cfg2 &= ~0xFF;
+	cfg2 |= cmd;
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG2, cfg2);
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_FIFO_SOF, 0x0);
+	bytes_left = len;
+	while (bytes_left > 0) {
+		time_end = (u32)ktime_to_ms(ktime_get()) +
+			QPIC_MAX_VSYNC_WAIT_TIME;
+		while (1) {
+			data = QPIC_INP(QPIC_REG_QPIC_LCDC_STTS);
+			data &= 0x3F;
+			if (data == 0)
+				break;
+			/* yield 10 us for next polling by experiment*/
+			usleep(10);
+			if (ktime_to_ms(ktime_get()) > time_end) {
+				pr_err("%s time out", __func__);
+				ret = -EBUSY;
+				goto exit_send_cmd_sw;
+			}
+		}
+		space = (16 - data);
+
+		while ((space > 0) && (bytes_left > 0)) {
+			/* write to fifo */
+			if (bytes_left >= 4) {
+				QPIC_OUTP(QPIC_REG_QPIC_LCDC_FIFO_DATA_PORT0,
+					param[0]);
+				param++;
+				bytes_left -= 4;
+				space++;
+			} else if (bytes_left == 2) {
+				QPIC_OUTPW(QPIC_REG_QPIC_LCDC_FIFO_DATA_PORT0,
+					*(u16 *)param);
+				bytes_left -= 2;
+			}
+		}
+	}
+	/* finished */
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_FIFO_EOF, 0x0);
+
+	time_end = (u32)ktime_to_ms(ktime_get()) + QPIC_MAX_VSYNC_WAIT_TIME;
+	while (1) {
+		data = QPIC_INP(QPIC_REG_QPIC_LCDC_IRQ_STTS);
+		if (data & (1 << 2))
+			break;
+		/* yield 10 us for next polling by experiment*/
+		usleep(10);
+		if (ktime_to_ms(ktime_get()) > time_end) {
+			pr_err("%s wait for eof time out", __func__);
+			ret = -EBUSY;
+			goto exit_send_cmd_sw;
+		}
+	}
+exit_send_cmd_sw:
+	cfg2 &= ~(1 << 24);
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG2, cfg2);
+	return ret;
+}
+
+int qpic_flush_buffer(u32 cmd, u32 len, u32 *param, u32 is_cmd)
+{
+	if (use_bam) {
+		if (is_cmd)
+			return qpic_flush_buffer_sw(cmd, len, param, is_cmd);
+		else
+			return qpic_flush_buffer_bam(cmd, len, param, is_cmd);
+	} else {
+		return qpic_flush_buffer_sw(cmd, len, param, is_cmd);
+	}
+}
+
+int mdss_qpic_init(void)
+{
+	int ret = 0;
+	u32 data;
+	mdss_qpic_reset();
+
+	pr_info("%s version=%x", __func__, QPIC_INP(QPIC_REG_LCDC_VERSION));
+	data = QPIC_INP(QPIC_REG_QPIC_LCDC_CTRL);
+	/* clear vsync wait , bam mode = 0*/
+	data &= ~(3 << 0);
+	data &= ~(0x1f << 3);
+	data |= (1 << 3); /* threshold */
+	data |= (1 << 8); /* lcd_en */
+	data &= ~(0x1f << 9);
+	data |= (1 << 9); /* threshold */
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_CTRL, data);
+
+	if (use_irq && qpic_res->irq_requested) {
+		ret = devm_request_irq(&qpic_res->pdev->dev,
+			qpic_res->irq, qpic_irq_handler,
+			IRQF_DISABLED,	"QPIC", qpic_res);
+		if (ret) {
+			pr_err("qpic request_irq() failed!\n");
+			use_irq = false;
+		}
+		qpic_res->irq_requested = true;
+	}
+
+	qpic_interrupt_en(use_irq);
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG0, 0x02108501);
+	data = QPIC_INP(QPIC_REG_QPIC_LCDC_CFG2);
+	data &= ~(0xFFF);
+	data |= 0x200; /* XRGB */
+	data |= 0x2C;
+	QPIC_OUTP(QPIC_REG_QPIC_LCDC_CFG2, data);
+
+	if (use_bam) {
+		qpic_init_sps(qpic_res->pdev , &qpic_res->qpic_endpt);
+		data = QPIC_INP(QPIC_REG_QPIC_LCDC_CTRL);
+		data |= (1 << 1);
+		QPIC_OUTP(QPIC_REG_QPIC_LCDC_CTRL, data);
+	}
+	/* TE enable */
+	if (use_vsync) {
+		data = QPIC_INP(QPIC_REG_QPIC_LCDC_CTRL);
+		data |= (1 << 0);
+		QPIC_OUTP(QPIC_REG_QPIC_LCDC_CTRL, data);
+	}
+
+	return ret;
+}
+
+static int mdss_qpic_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	int rc = 0;
+	static struct msm_mdp_interface qpic_interface = {
+		.init_fnc = mdss_qpic_overlay_init,
+		.fb_mem_alloc_fnc = mdss_qpic_alloc_fb_mem,
+		.fb_stride = mdss_qpic_fb_stride,
+	};
+
+
+	if (!pdev->dev.of_node) {
+		pr_err("qpic driver only supports device tree probe\n");
+		return -ENOTSUPP;
+	}
+
+	if (!qpic_res)
+		qpic_res = devm_kzalloc(&pdev->dev,
+			sizeof(*qpic_res), GFP_KERNEL);
+
+	if (!qpic_res)
+		return -ENOMEM;
+
+	if (qpic_res->res_init) {
+		pr_err("qpic already initialized\n");
+		return -EINVAL;
+	}
+
+	pdev->id = 0;
+
+	qpic_res->pdev = pdev;
+	platform_set_drvdata(pdev, qpic_res);
+
+	res = platform_get_resource_byname(pdev,
+		IORESOURCE_MEM, "qpic_base");
+	if (!res) {
+		pr_err("unable to get QPIC reg base address\n");
+		rc = -ENOMEM;
+		goto probe_done;
+	}
+
+	qpic_res->qpic_reg_size = resource_size(res);
+	qpic_res->qpic_base = devm_ioremap(&pdev->dev, res->start,
+					qpic_res->qpic_reg_size);
+	if (unlikely(!qpic_res->qpic_base)) {
+		pr_err("unable to map MDSS QPIC base\n");
+		rc = -ENOMEM;
+		goto probe_done;
+	}
+	qpic_res->qpic_phys = res->start;
+	pr_info("MDSS QPIC HW Base phy_Address=0x%x virt=0x%x\n",
+		(int) res->start,
+		(int) qpic_res->qpic_base);
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		pr_err("unable to get QPIC irq\n");
+		rc = -ENOMEM;
+		goto probe_done;
+	}
+
+	qpic_res->irq = res->start;
+	qpic_res->res_init = true;
+
+	rc = mdss_fb_register_mdp_instance(&qpic_interface);
+	if (rc)
+		pr_err("unable to register QPIC instance\n");
+
+probe_done:
+	return rc;
+}
+
+static int mdss_qpic_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static int __init mdss_qpic_driver_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&mdss_qpic_driver);
+	if (ret)
+		pr_err("mdss_qpic_register_driver() failed!\n");
+	return ret;
+}
+
+module_init(mdss_qpic_driver_init);
+
+
diff --git a/drivers/video/msm/mdss/mdss_qpic.h b/drivers/video/msm/mdss/mdss_qpic.h
new file mode 100644
index 0000000..086e8c8
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_qpic.h
@@ -0,0 +1,95 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_QPIC_H
+#define MDSS_QPIC_H
+
+#include <linux/list.h>
+#include <mach/scm-io.h>
+#include <mach/sps.h>
+
+#include "mdss_panel.h"
+
+#define QPIC_REG_QPIC_LCDC_CTRL				0x22000
+#define QPIC_REG_LCDC_VERSION				0x22004
+#define QPIC_REG_QPIC_LCDC_IRQ_EN			0x22008
+#define QPIC_REG_QPIC_LCDC_IRQ_STTS			0x2200C
+#define QPIC_REG_QPIC_LCDC_IRQ_CLR			0x22010
+#define QPIC_REG_QPIC_LCDC_STTS				0x22014
+#define QPIC_REG_QPIC_LCDC_CMD_DATA_CYCLE_CNT	0x22018
+#define QPIC_REG_QPIC_LCDC_CFG0				0x22020
+#define QPIC_REG_QPIC_LCDC_CFG1				0x22024
+#define QPIC_REG_QPIC_LCDC_CFG2				0x22028
+#define QPIC_REG_QPIC_LCDC_RESET			0x2202C
+#define QPIC_REG_QPIC_LCDC_FIFO_SOF			0x22100
+#define QPIC_REG_LCD_DEVICE_CMD0			0x23000
+#define QPIC_REG_QPIC_LCDC_FIFO_DATA_PORT0	0x22140
+#define QPIC_REG_QPIC_LCDC_FIFO_EOF			0x22180
+
+#define QPIC_OUTP(off, data) \
+	writel_relaxed((data), qpic_res->qpic_base + (off))
+#define QPIC_OUTPW(off, data) \
+	writew_relaxed((data), qpic_res->qpic_base + (off))
+#define QPIC_INP(off) \
+	readl_relaxed(qpic_res->qpic_base + (off))
+
+#define QPIC_MAX_VSYNC_WAIT_TIME			500
+#define QPIC_MAX_CMD_BUF_SIZE				512
+
+int mdss_qpic_init(void);
+int qpic_flush_buffer(u32 cmd, u32 len, u32 *param, u32 is_cmd);
+
+u32 msm_qpic_get_bam_hdl(struct sps_bam_props *bam);
+int mdss_qpic_panel_on(struct mdss_panel_data *pdata);
+int mdss_qpic_panel_off(struct mdss_panel_data *pdata);
+int qpic_register_panel(struct mdss_panel_data *pdata);
+int ili9341_on(void);
+
+/* Structure that defines an SPS end point for a BAM pipe. */
+struct qpic_sps_endpt {
+	struct sps_pipe *handle;
+	struct sps_connect config;
+	struct sps_register_event bam_event;
+	struct completion completion;
+};
+
+struct qpic_data_type {
+	u32 rev;
+	struct platform_device *pdev;
+	size_t qpic_reg_size;
+	u32 qpic_phys;
+	char __iomem *qpic_base;
+	u32 irq;
+	u32 irq_ena;
+	u32 res_init;
+	void *fb_virt;
+	u32 fb_phys;
+	void *cmd_buf_virt;
+	u32 cmd_buf_phys;
+	struct qpic_sps_endpt qpic_endpt;
+	u32 sps_init;
+	u32 irq_requested;
+	struct mdss_panel_data *panel_data;
+};
+
+u32 qpic_send_frame(
+		u32 x_start,
+		u32 y_start,
+		u32 x_end,
+		u32 y_end,
+		u32 *data,
+		u32 total_bytes);
+
+u32 qpic_panel_get_framerate(void);
+
+#endif /* MDSS_QPIC_H */
diff --git a/drivers/video/msm/mdss/mdss_qpic_panel.c b/drivers/video/msm/mdss/mdss_qpic_panel.c
new file mode 100644
index 0000000..9825abc
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_qpic_panel.c
@@ -0,0 +1,246 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/qpnp/pin.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/regulator/consumer.h>
+#include <linux/dma-mapping.h>
+#include <linux/uaccess.h>
+
+#include <mach/sps.h>
+
+#include "mdss.h"
+#include "mdss_panel.h"
+#include "mdss_qpic.h"
+#include "mdss_qpic_panel.h"
+
+static u32 panel_is_on;
+static u32 panel_refresh_rate;
+static int (*qpic_panel_on)(void);
+static void (*qpic_panel_off)(void);
+static int (*qpic_panel_init)(struct platform_device *pdev,
+			struct device_node *np);
+
+u32 qpic_panel_get_framerate(void)
+{
+	return panel_refresh_rate;
+}
+
+u32 qpic_send_panel_cmd(u32 cmd, u32 *val, u32 length)
+{
+	u32 ret;
+	u32 cmd_index;
+	u32 size;
+	u32 buffer[LCDC_INTERNAL_BUFFER_SIZE];
+	u32 i;
+
+	cmd_index = LCDC_EXTRACT_OP_CMD(cmd);
+	size = LCDC_EXTRACT_OP_SIZE(cmd);
+	if (size == INV_SIZE)
+		size = length;
+	/* short or pixel data commands need not conversion */
+	if ((cmd == OP_WRITE_MEMORY_CONTINUE) ||
+		(cmd == OP_WRITE_MEMORY_START)) {
+		ret = qpic_flush_buffer(cmd_index, size, val, false);
+	} else {
+		if (size > LCDC_INTERNAL_BUFFER_SIZE)
+			size = LCDC_INTERNAL_BUFFER_SIZE;
+		/* correcting for encoding issues */
+		for (i = 0; i < size; i += sizeof(u32)) {
+			buffer[i] = (val[(i>>2)] >> 0) & 0xff;
+			buffer[i+1] = (val[(i>>2)] >> 8) & 0xff;
+			buffer[i+2] = (val[(i>>2)] >> 16) & 0xff;
+			buffer[i+3] = (val[(i>>2)] >> 24) & 0xff;
+		}
+		ret = qpic_flush_buffer(cmd_index,
+				size * sizeof(u32), buffer, true);
+	}
+	return ret;
+}
+
+u32 qpic_panel_set_cmd_only(u32 command)
+{
+	u32 param;
+	return qpic_send_panel_cmd(command, &param, 0);
+}
+
+/* write a frame of pixels to a MIPI screen */
+u32 qpic_send_frame(u32 x_start,
+				u32 y_start,
+				u32 x_end,
+				u32 y_end,
+				u32 *data,
+				u32 total_bytes)
+{
+	u32 param;
+	u32 status;
+	u32 start_0_7;
+	u32 end_0_7;
+	u32 start_8_15;
+	u32 end_8_15;
+
+	/* convert to 16 bit representation */
+	x_start = x_start & 0xffff;
+	y_start = y_start & 0xffff;
+	x_end = x_end & 0xffff;
+	y_end = y_end & 0xffff;
+
+	/* set column/page */
+	start_0_7 = x_start & 0xff;
+	end_0_7 = x_end & 0xff;
+	start_8_15 = (x_start >> 8) & 0xff;
+	end_8_15 = (x_end >> 8) & 0xff;
+	param = (start_8_15 << 0) | (start_0_7 << 8) |
+		(end_8_15 << 16) | (end_0_7 << 24U);
+	status = qpic_send_panel_cmd(OP_SET_COLUMN_ADDRESS, &param, 0);
+	if (status) {
+		pr_err("Failed to set column address");
+		return status;
+	}
+
+	start_0_7 = y_start & 0xff;
+	end_0_7 = y_end & 0xff;
+	start_8_15 = (y_start >> 8) & 0xff;
+	end_8_15 = (y_end >> 8) & 0xff;
+	param = (start_8_15 << 0) | (start_0_7 << 8) |
+		(end_8_15 << 16) | (end_0_7 << 24U);
+	status = qpic_send_panel_cmd(OP_SET_PAGE_ADDRESS, &param, 0);
+	if (status) {
+		pr_err("Failed to set page address");
+		return status;
+	}
+
+	status = qpic_send_panel_cmd(OP_WRITE_MEMORY_START,
+		&(data[0]), total_bytes);
+	if (status) {
+		pr_err("Failed to start memory write");
+		return status;
+	}
+	return 0;
+}
+
+int mdss_qpic_panel_on(struct mdss_panel_data *pdata)
+{
+	int rc = 0;
+
+	if (panel_is_on)
+		return 0;
+	mdss_qpic_init();
+
+	if (qpic_panel_on)
+		rc = qpic_panel_on();
+	if (rc)
+		return rc;
+	panel_is_on = true;
+	return 0;
+}
+
+int mdss_qpic_panel_off(struct mdss_panel_data *pdata)
+{
+	if (qpic_panel_off)
+		qpic_panel_off();
+	panel_is_on = false;
+	return 0;
+}
+
+static int mdss_panel_parse_dt(struct platform_device *pdev,
+			       struct mdss_panel_data *panel_data)
+{
+	struct device_node *np = pdev->dev.of_node;
+	u32 res[6], tmp;
+	int rc;
+
+	rc = of_property_read_u32_array(np, "qcom,mdss-pan-res", res, 2);
+	if (rc) {
+		pr_err("%s:%d, panel resolution not specified\n",
+						__func__, __LINE__);
+		return -EINVAL;
+	}
+	panel_data->panel_info.xres = (!rc ? res[0] : 240);
+	panel_data->panel_info.yres = (!rc ? res[1] : 320);
+	rc = of_property_read_u32(np, "qcom,mdss-pan-bpp", &tmp);
+	if (rc) {
+		pr_err("%s:%d, panel bpp not specified\n",
+						__func__, __LINE__);
+		return -EINVAL;
+	}
+	panel_data->panel_info.bpp = (!rc ? tmp : 24);
+	of_property_read_u32(np, "qcom,refresh_rate", &panel_refresh_rate);
+
+	panel_data->panel_info.type = EBI2_PANEL;
+	panel_data->panel_info.pdest = DISPLAY_1;
+
+	if (qpic_panel_init)
+		rc = qpic_panel_init(pdev, np);
+
+	return rc;
+}
+
+static int __devinit mdss_qpic_panel_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	static struct mdss_panel_data vendor_pdata;
+	static const char *panel_name;
+
+	pr_debug("%s:%d, debug info id=%d", __func__, __LINE__, pdev->id);
+	if (!pdev->dev.of_node)
+		return -ENODEV;
+
+	panel_name = of_get_property(pdev->dev.of_node, "label", NULL);
+	if (!panel_name)
+		pr_info("%s:%d, panel name not specified\n",
+						__func__, __LINE__);
+	else
+		pr_info("%s: Panel Name = %s\n", __func__, panel_name);
+
+	/* select panel according to label */
+	qpic_panel_init = ili9341_init;
+	qpic_panel_on = ili9341_on;
+	qpic_panel_off = ili9341_off;
+
+	rc = mdss_panel_parse_dt(pdev, &vendor_pdata);
+	if (rc)
+		return rc;
+
+	rc = qpic_register_panel(&vendor_pdata);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static const struct of_device_id mdss_qpic_panel_match[] = {
+	{.compatible = "qcom,mdss-qpic-panel"},
+	{}
+};
+
+static struct platform_driver this_driver = {
+	.probe  = mdss_qpic_panel_probe,
+	.driver = {
+		.name = "qpic_panel",
+		.of_match_table = mdss_qpic_panel_match,
+	},
+};
+
+static int __init mdss_qpic_panel_init(void)
+{
+	return platform_driver_register(&this_driver);
+}
+MODULE_DEVICE_TABLE(of, mdss_qpic_panel_match);
+module_init(mdss_qpic_panel_init);
diff --git a/drivers/video/msm/mdss/mdss_qpic_panel.h b/drivers/video/msm/mdss/mdss_qpic_panel.h
new file mode 100644
index 0000000..76f7dc2
--- /dev/null
+++ b/drivers/video/msm/mdss/mdss_qpic_panel.h
@@ -0,0 +1,114 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_QPIC_PANEL_H
+#define MDSS_QPIC_PANEL_H
+
+#include <linux/list.h>
+#include <mach/sps.h>
+
+#include "mdss_panel.h"
+
+#define LCDC_INTERNAL_BUFFER_SIZE   30
+
+/**
+   Macros for coding MIPI commands
+*/
+#define INV_SIZE             0xFFFF
+/* Size of argument to MIPI command is variable */
+#define OP_SIZE_PAIR(op, size)    ((op<<16) | size)
+/* MIPI {command, argument size} tuple */
+#define LCDC_EXTRACT_OP_SIZE(op_identifier)    ((op_identifier&0xFFFF))
+/* extract size from command identifier */
+#define LCDC_EXTRACT_OP_CMD(op_identifier)    (((op_identifier>>16)&0xFFFF))
+/* extract command id from command identifier */
+
+
+/* MIPI standard efinitions */
+#define LCDC_ADDRESS_MODE_ORDER_BOTTOM_TO_TOP                0x80
+#define LCDC_ADDRESS_MODE_ORDER_RIGHT_TO_LEFT                0x40
+#define LCDC_ADDRESS_MODE_ORDER_REVERSE                      0x20
+#define LCDC_ADDRESS_MODE_ORDER_REFRESH_BOTTOM_TO_TOP        0x10
+#define LCDC_ADDRESS_MODE_ORDER_BGER_RGB                     0x08
+#define LCDC_ADDRESS_MODE_ORDER_REFERESH_RIGHT_TO_LEFT       0x04
+#define LCDC_ADDRESS_MODE_FLIP_HORIZONTAL                    0x02
+#define LCDC_ADDRESS_MODE_FLIP_VERTICAL                      0x01
+
+#define LCDC_PIXEL_FORMAT_3_BITS_PER_PIXEL    0x1
+#define LCDC_PIXEL_FORMAT_8_BITS_PER_PIXEL    0x2
+#define LCDC_PIXEL_FORMAT_12_BITS_PER_PIXEL   0x3
+#define LCDC_PIXEL_FORMAT_16_BITS_PER_PIXEL   0x5
+#define LCDC_PIXEL_FORMAT_18_BITS_PER_PIXEL   0x6
+#define LCDC_PIXEL_FORMAT_24_BITS_PER_PIXEL   0x7
+
+#define LCDC_CREATE_PIXEL_FORMAT(dpi_format, dbi_format) \
+	(dpi_format | (dpi_format<<4))
+
+#define POWER_MODE_IDLE_ON       0x40
+#define POWER_MODE_PARTIAL_ON    0x20
+#define POWER_MODE_SLEEP_ON      0x10
+#define POWER_MODE_NORMAL_ON     0x08
+#define POWER_MODE_DISPLAY_ON    0x04
+
+#define LCDC_DISPLAY_MODE_SCROLLING_ON       0x80
+#define LCDC_DISPLAY_MODE_INVERSION_ON       0x20
+#define LCDC_DISPLAY_MODE_GAMMA_MASK         0x07
+
+/**
+ * LDCc MIPI Type B supported commands
+ */
+enum {
+	OP_ENTER_IDLE_MODE        = OP_SIZE_PAIR(0x39, 0),
+	OP_ENTER_INVERT_MODE      = OP_SIZE_PAIR(0x21, 0),
+	OP_ENTER_NORMAL_MODE      = OP_SIZE_PAIR(0x13, 0),
+	OP_ENTER_PARTIAL_MODE     = OP_SIZE_PAIR(0x12, 0),
+	OP_ENTER_SLEEP_MODE       = OP_SIZE_PAIR(0x10, 0),
+	OP_EXIT_INVERT_MODE       = OP_SIZE_PAIR(0x20, 0),
+	OP_EXIT_SLEEP_MODE        = OP_SIZE_PAIR(0x11, 0),
+	OP_EXIT_IDLE_MODE         = OP_SIZE_PAIR(0x38, 0),
+	OP_GET_ADDRESS_MODE       = OP_SIZE_PAIR(0x0B, 1),
+	OP_GET_BLUE_CHANNEL       = OP_SIZE_PAIR(0x08, 1),
+	OP_GET_DIAGNOSTIC_RESULT  = OP_SIZE_PAIR(0x0F, 2),
+	OP_GET_DISPLAY_MODE       = OP_SIZE_PAIR(0x0D, 1),
+	OP_GET_GREEN_CHANNEL      = OP_SIZE_PAIR(0x07, 1),
+	OP_GET_PIXEL_FORMAT       = OP_SIZE_PAIR(0x0C, 1),
+	OP_GET_POWER_MODE         = OP_SIZE_PAIR(0x0A, 1),
+	OP_GET_RED_CHANNEL        = OP_SIZE_PAIR(0x06, 1),
+	OP_GET_SCANLINE           = OP_SIZE_PAIR(0x45, 2),
+	OP_GET_SIGNAL_MODE        = OP_SIZE_PAIR(0x0E, 1),
+	OP_NOP                    = OP_SIZE_PAIR(0x00, 0),
+	OP_READ_DDB_CONTINUE      = OP_SIZE_PAIR(0xA8, INV_SIZE),
+	OP_READ_DDB_START         = OP_SIZE_PAIR(0xA1, INV_SIZE),
+	OP_READ_MEMORY_CONTINUE   = OP_SIZE_PAIR(0x3E, INV_SIZE),
+	OP_READ_MEMORY_START      = OP_SIZE_PAIR(0x2E, INV_SIZE),
+	OP_SET_ADDRESS_MODE       = OP_SIZE_PAIR(0x36, 1),
+	OP_SET_COLUMN_ADDRESS     = OP_SIZE_PAIR(0x2A, 4),
+	OP_SET_DISPLAY_OFF        = OP_SIZE_PAIR(0x28, 0),
+	OP_SET_DISPLAY_ON         = OP_SIZE_PAIR(0x29, 0),
+	OP_SET_GAMMA_CURVE        = OP_SIZE_PAIR(0x26, 1),
+	OP_SET_PAGE_ADDRESS       = OP_SIZE_PAIR(0x2B, 4),
+	OP_SET_PARTIAL_COLUMNS    = OP_SIZE_PAIR(0x31, 4),
+	OP_SET_PARTIAL_ROWS       = OP_SIZE_PAIR(0x30, 4),
+	OP_SET_PIXEL_FORMAT       = OP_SIZE_PAIR(0x3A, 1),
+	OP_SOFT_RESET             = OP_SIZE_PAIR(0x01, 0),
+	OP_WRITE_MEMORY_CONTINUE  = OP_SIZE_PAIR(0x3C, INV_SIZE),
+	OP_WRITE_MEMORY_START     = OP_SIZE_PAIR(0x2C, INV_SIZE),
+};
+
+u32 qpic_panel_set_cmd_only(u32 command);
+u32 qpic_send_panel_cmd(u32 cmd, u32 *val, u32 length);
+int ili9341_on(void);
+void ili9341_off(void);
+int ili9341_init(struct platform_device *pdev,
+			struct device_node *np);
+#endif /* MDSS_QPIC_PANEL_H */
diff --git a/drivers/video/msm/mdss/mdss_wb.c b/drivers/video/msm/mdss/mdss_wb.c
index a0a4837..1b398d3 100644
--- a/drivers/video/msm/mdss/mdss_wb.c
+++ b/drivers/video/msm/mdss/mdss_wb.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -25,16 +25,49 @@
 
 #include "mdss_panel.h"
 
-static int mdss_wb_on(struct mdss_panel_data *pdata)
+/**
+ * mdss_wb_check_params - check new panel info params
+ * @pdata: current panel information
+ * @new: updates to panel info
+ *
+ * Checks if there are any changes that require panel reconfiguration
+ * in order to be reflected on writeback buffer.
+ *
+ * Return negative errno if invalid input, zero if there is no panel reconfig
+ * needed and non-zero if reconfiguration is needed.
+ */
+static int mdss_wb_check_params(struct mdss_panel_data *pdata,
+	struct mdss_panel_info *new)
 {
-	pr_debug("%s\n", __func__);
+	struct mdss_panel_info *old;
+
+	if (!pdata || !new) {
+		pr_err("%s: Invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	old = &pdata->panel_info;
+
+	if ((old->xres != new->xres) || (old->yres != new->yres))
+		return 1;
+
 	return 0;
 }
 
-static int mdss_wb_off(struct mdss_panel_data *pdata)
+static int mdss_wb_event_handler(struct mdss_panel_data *pdata,
+				 int event, void *arg)
 {
-	pr_debug("%s\n", __func__);
-	return 0;
+	int rc = 0;
+
+	switch (event) {
+	case MDSS_EVENT_CHECK_PARAMS:
+		rc = mdss_wb_check_params(pdata, (struct mdss_panel_info *)arg);
+		break;
+	default:
+		pr_debug("%s: panel event (%d) not handled\n", __func__, event);
+		break;
+	}
+	return rc;
 }
 
 static int mdss_wb_parse_dt(struct platform_device *pdev,
@@ -73,13 +106,12 @@
 	pdata->panel_info.type = WRITEBACK_PANEL;
 	pdata->panel_info.clk_rate = 74250000;
 	pdata->panel_info.pdest = DISPLAY_3;
-	pdata->panel_info.out_format = MDP_Y_CBCR_H2V2;
+	pdata->panel_info.out_format = MDP_Y_CBCR_H2V2_VENUS;
 
-	pdata->on = mdss_wb_on;
-	pdata->off = mdss_wb_off;
+	pdata->event_handler = mdss_wb_event_handler;
 	pdev->dev.platform_data = pdata;
 
-	rc = mdss_register_panel(pdata);
+	rc = mdss_register_panel(pdev, pdata);
 	if (rc) {
 		dev_err(&pdev->dev, "unable to register writeback panel\n");
 		return rc;
diff --git a/drivers/video/msm/mdss/mhl_msc.c b/drivers/video/msm/mdss/mhl_msc.c
new file mode 100644
index 0000000..96e8b67
--- /dev/null
+++ b/drivers/video/msm/mdss/mhl_msc.c
@@ -0,0 +1,669 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/mhl_8334.h>
+#include <linux/vmalloc.h>
+#include <linux/input.h>
+#include "mhl_msc.h"
+#include "mdss_hdmi_mhl.h"
+
+static struct mhl_tx_ctrl *mhl_ctrl;
+static DEFINE_MUTEX(msc_send_workqueue_mutex);
+
+const char *devcap_reg_name[] = {
+	"DEV_STATE       ",
+	"MHL_VERSION     ",
+	"DEV_CAT         ",
+	"ADOPTER_ID_H    ",
+	"ADOPTER_ID_L    ",
+	"VID_LINK_MODE   ",
+	"AUD_LINK_MODE   ",
+	"VIDEO_TYPE      ",
+	"LOG_DEV_MAP     ",
+	"BANDWIDTH       ",
+	"FEATURE_FLAG    ",
+	"DEVICE_ID_H     ",
+	"DEVICE_ID_L     ",
+	"SCRATCHPAD_SIZE ",
+	"INT_STAT_SIZE   ",
+	"Reserved        ",
+};
+
+static bool mhl_check_tmds_enabled(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	if (mhl_ctrl && mhl_ctrl->hdmi_mhl_ops) {
+		struct msm_hdmi_mhl_ops *ops = mhl_ctrl->hdmi_mhl_ops;
+		struct platform_device *pdev = mhl_ctrl->pdata->hdmi_pdev;
+		return (ops->tmds_enabled(pdev) == true);
+	} else {
+		pr_err("%s: invalid input\n", __func__);
+		return false;
+	}
+}
+
+static void mhl_print_devcap(u8 offset, u8 devcap)
+{
+	switch (offset) {
+	case DEVCAP_OFFSET_DEV_CAT:
+		pr_debug("DCAP: %02X %s: %02X DEV_TYPE=%X POW=%s\n",
+			offset, devcap_reg_name[offset], devcap,
+			devcap & 0x0F, (devcap & 0x10) ? "y" : "n");
+		break;
+	case DEVCAP_OFFSET_FEATURE_FLAG:
+		pr_debug("DCAP: %02X %s: %02X RCP=%s RAP=%s SP=%s\n",
+			offset, devcap_reg_name[offset], devcap,
+			(devcap & 0x01) ? "y" : "n",
+			(devcap & 0x02) ? "y" : "n",
+			(devcap & 0x04) ? "y" : "n");
+		break;
+	default:
+		pr_debug("DCAP: %02X %s: %02X\n",
+			offset, devcap_reg_name[offset], devcap);
+		break;
+	}
+}
+
+void mhl_register_msc(struct mhl_tx_ctrl *ctrl)
+{
+	if (ctrl)
+		mhl_ctrl = ctrl;
+}
+
+static int mhl_flag_scrpd_burst_req(struct mhl_tx_ctrl *mhl_ctrl,
+		struct msc_command_struct *req)
+{
+	int postpone_send = 0;
+
+	if ((req->command == MHL_SET_INT) &&
+	    (req->offset == MHL_RCHANGE_INT)) {
+		if (mhl_ctrl->scrpd_busy) {
+			/* reduce priority */
+			if (req->payload.data[0] == MHL_INT_REQ_WRT)
+				postpone_send = 1;
+		} else {
+			if (req->payload.data[0] == MHL_INT_REQ_WRT) {
+				mhl_ctrl->scrpd_busy = true;
+				mhl_ctrl->wr_burst_pending = true;
+			} else if (req->payload.data[0] == MHL_INT_GRT_WRT) {
+					mhl_ctrl->scrpd_busy = true;
+			}
+		}
+	}
+	return postpone_send;
+}
+
+void mhl_msc_send_work(struct work_struct *work)
+{
+	struct mhl_tx_ctrl *mhl_ctrl =
+		container_of(work, struct mhl_tx_ctrl, mhl_msc_send_work);
+	struct msc_cmd_envelope *cmd_env;
+	int ret, postpone_send;
+	/*
+	 * Remove item from the queue
+	 * and schedule it
+	 */
+	mutex_lock(&msc_send_workqueue_mutex);
+	while (!list_empty(&mhl_ctrl->list_cmd)) {
+		cmd_env = list_first_entry(&mhl_ctrl->list_cmd,
+					   struct msc_cmd_envelope,
+					   msc_queue_envelope);
+		list_del(&cmd_env->msc_queue_envelope);
+		mutex_unlock(&msc_send_workqueue_mutex);
+
+		postpone_send = mhl_flag_scrpd_burst_req(
+			mhl_ctrl,
+			&cmd_env->msc_cmd_msg);
+		if (postpone_send) {
+			if (cmd_env->msc_cmd_msg.retry-- > 0) {
+				mutex_lock(&msc_send_workqueue_mutex);
+				list_add_tail(
+					&cmd_env->msc_queue_envelope,
+					&mhl_ctrl->list_cmd);
+				mutex_unlock(&msc_send_workqueue_mutex);
+			} else {
+				pr_err("%s: max scrpd retry out\n",
+				       __func__);
+			}
+		} else {
+			ret = mhl_send_msc_command(mhl_ctrl,
+						   &cmd_env->msc_cmd_msg);
+			if (ret == -EAGAIN) {
+				int retry = 2;
+				while (retry--) {
+					ret = mhl_send_msc_command(
+						mhl_ctrl,
+						&cmd_env->msc_cmd_msg);
+					if (ret != -EAGAIN)
+						break;
+				}
+			}
+			if (ret == -EAGAIN)
+				pr_err("%s: send_msc_command retry out!\n",
+				       __func__);
+			vfree(cmd_env);
+		}
+
+		mutex_lock(&msc_send_workqueue_mutex);
+	}
+	mutex_unlock(&msc_send_workqueue_mutex);
+}
+
+int mhl_queue_msc_command(struct mhl_tx_ctrl *mhl_ctrl,
+			  struct msc_command_struct *req,
+			  int priority_send)
+{
+	struct msc_cmd_envelope *cmd_env;
+
+	mutex_lock(&msc_send_workqueue_mutex);
+	cmd_env = vmalloc(sizeof(struct msc_cmd_envelope));
+	if (!cmd_env) {
+		pr_err("%s: out of memory!\n", __func__);
+		return -ENOMEM;
+	}
+
+	memcpy(&cmd_env->msc_cmd_msg, req,
+	       sizeof(struct msc_command_struct));
+
+	if (priority_send)
+		list_add(&cmd_env->msc_queue_envelope,
+			 &mhl_ctrl->list_cmd);
+	else
+		list_add_tail(&cmd_env->msc_queue_envelope,
+			      &mhl_ctrl->list_cmd);
+	mutex_unlock(&msc_send_workqueue_mutex);
+	queue_work(mhl_ctrl->msc_send_workqueue, &mhl_ctrl->mhl_msc_send_work);
+
+	return 0;
+}
+
+static int mhl_update_devcap(struct mhl_tx_ctrl *mhl_ctrl,
+	int offset, u8 devcap)
+{
+	if (!mhl_ctrl)
+		return -EFAULT;
+	if (offset < 0 || offset > 15)
+		return -EFAULT;
+	mhl_ctrl->devcap[offset] = devcap;
+	mhl_print_devcap(offset, mhl_ctrl->devcap[offset]);
+
+	return 0;
+}
+
+int mhl_msc_command_done(struct mhl_tx_ctrl *mhl_ctrl,
+			 struct msc_command_struct *req)
+{
+	switch (req->command) {
+	case MHL_WRITE_STAT:
+		if (req->offset == MHL_STATUS_REG_LINK_MODE) {
+			if (req->payload.data[0]
+			    & MHL_STATUS_PATH_ENABLED)
+				/* Enable TMDS output */
+				mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
+			else
+				/* Disable TMDS output */
+				mhl_tmds_ctrl(mhl_ctrl, TMDS_DISABLE);
+		}
+		break;
+	case MHL_READ_DEVCAP:
+		mhl_update_devcap(mhl_ctrl,
+			req->offset, req->retval);
+		mhl_ctrl->devcap_state |= BIT(req->offset);
+		switch (req->offset) {
+		case MHL_DEV_CATEGORY_OFFSET:
+			if (req->retval & MHL_DEV_CATEGORY_POW_BIT)
+				pr_debug("%s: devcap pow bit set\n",
+					 __func__);
+			else
+				pr_debug("%s: devcap pow bit unset\n",
+					 __func__);
+			break;
+		case DEVCAP_OFFSET_MHL_VERSION:
+		case DEVCAP_OFFSET_INT_STAT_SIZE:
+			break;
+		}
+		break;
+	case MHL_WRITE_BURST:
+		mhl_msc_send_set_int(
+			mhl_ctrl,
+			MHL_RCHANGE_INT,
+			MHL_INT_DSCR_CHG,
+			MSC_PRIORITY_SEND);
+		break;
+	}
+	return 0;
+}
+
+int mhl_msc_send_set_int(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 offset, u8 mask, u8 prior)
+{
+	struct msc_command_struct req;
+	req.command = MHL_SET_INT;
+	req.offset = offset;
+	req.payload.data[0] = mask;
+	return mhl_queue_msc_command(mhl_ctrl, &req, prior);
+}
+
+int mhl_msc_send_write_stat(struct mhl_tx_ctrl *mhl_ctrl,
+			    u8 offset, u8 value)
+{
+	struct msc_command_struct req;
+	req.command = MHL_WRITE_STAT;
+	req.offset = offset;
+	req.payload.data[0] = value;
+	return mhl_queue_msc_command(mhl_ctrl, &req, MSC_NORMAL_SEND);
+}
+
+static int mhl_msc_write_burst(struct mhl_tx_ctrl *mhl_ctrl,
+	u8 offset, u8 *data, u8 length)
+{
+	struct msc_command_struct req;
+	if (!mhl_ctrl)
+		return -EFAULT;
+
+	if (!mhl_ctrl->wr_burst_pending)
+		return -EFAULT;
+
+	req.command = MHL_WRITE_BURST;
+	req.offset = offset;
+	req.length = length;
+	req.payload.burst_data = data;
+	mhl_queue_msc_command(mhl_ctrl, &req, MSC_PRIORITY_SEND);
+	mhl_ctrl->wr_burst_pending = false;
+	return 0;
+}
+
+int mhl_msc_send_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 sub_cmd, u8 cmd_data)
+{
+	struct msc_command_struct req;
+	req.command = MHL_MSC_MSG;
+	req.payload.data[0] = sub_cmd;
+	req.payload.data[1] = cmd_data;
+	return mhl_queue_msc_command(mhl_ctrl, &req, MSC_NORMAL_SEND);
+}
+
+/*
+ * Certain MSC msgs such as RCPK, RCPE and RAPK
+ * should be transmitted as a high priority
+ * because these msgs should be sent within
+ * 1000ms of a receipt of RCP/RAP. So such msgs can
+ * be added to the head of msc cmd queue.
+ */
+static int mhl_msc_send_prior_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
+				      u8 sub_cmd, u8 cmd_data)
+{
+	struct msc_command_struct req;
+	req.command = MHL_MSC_MSG;
+	req.payload.data[0] = sub_cmd;
+	req.payload.data[1] = cmd_data;
+	return mhl_queue_msc_command(mhl_ctrl, &req, MSC_PRIORITY_SEND);
+}
+
+int mhl_msc_read_devcap(struct mhl_tx_ctrl *mhl_ctrl, u8 offset)
+{
+	struct msc_command_struct req;
+	if (offset < 0 || offset > 15)
+		return -EFAULT;
+	req.command = MHL_READ_DEVCAP;
+	req.offset = offset;
+	req.payload.data[0] = 0;
+	return mhl_queue_msc_command(mhl_ctrl, &req, MSC_NORMAL_SEND);
+}
+
+int mhl_msc_read_devcap_all(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	int offset;
+	int ret;
+
+	for (offset = 0; offset < DEVCAP_SIZE; offset++) {
+		ret = mhl_msc_read_devcap(mhl_ctrl, offset);
+		if (ret == -EBUSY)
+			pr_err("%s: queue busy!\n", __func__);
+	}
+	return ret;
+}
+
+static void mhl_handle_input(struct mhl_tx_ctrl *mhl_ctrl,
+			     u8 key_code, u16 input_key_code)
+{
+	int key_press = (key_code & 0x80) == 0;
+
+	pr_debug("%s: send key events[%x][%x][%d]\n",
+		 __func__, key_code, input_key_code, key_press);
+	input_report_key(mhl_ctrl->input, input_key_code, key_press);
+	input_sync(mhl_ctrl->input);
+}
+
+int mhl_rcp_recv(struct mhl_tx_ctrl *mhl_ctrl, u8 key_code)
+{
+	u8 index = key_code & 0x7f;
+	u16 input_key_code;
+
+	if (!mhl_ctrl->rcp_key_code_tbl) {
+		pr_err("%s: RCP Key Code Table not initialized\n", __func__);
+		return -EINVAL;
+	}
+
+	input_key_code = mhl_ctrl->rcp_key_code_tbl[index];
+
+	if ((index < mhl_ctrl->rcp_key_code_tbl_len) &&
+	    (input_key_code > 0)) {
+		/* prior send rcpk */
+		mhl_msc_send_prior_msc_msg(
+			mhl_ctrl,
+			MHL_MSC_MSG_RCPK,
+			key_code);
+
+		if (mhl_ctrl->input)
+			mhl_handle_input(mhl_ctrl, key_code, input_key_code);
+	} else {
+		/* prior send rcpe */
+		mhl_msc_send_prior_msc_msg(
+			mhl_ctrl,
+			MHL_MSC_MSG_RCPE,
+			MHL_RCPE_INEFFECTIVE_KEY_CODE);
+
+		/* send rcpk after rcpe send */
+		mhl_msc_send_prior_msc_msg(
+			mhl_ctrl,
+			MHL_MSC_MSG_RCPK,
+			key_code);
+	}
+	return 0;
+}
+
+static int mhl_rap_action(struct mhl_tx_ctrl *mhl_ctrl, u8 action_code)
+{
+	switch (action_code) {
+	case MHL_RAP_CONTENT_ON:
+		mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
+		break;
+	case MHL_RAP_CONTENT_OFF:
+		/*
+		 * instead of only disabling tmds
+		 * send power button press - CONTENT_OFF
+		 */
+		input_report_key(mhl_ctrl->input, KEY_VENDOR, 1);
+		input_sync(mhl_ctrl->input);
+		input_report_key(mhl_ctrl->input, KEY_VENDOR, 0);
+		input_sync(mhl_ctrl->input);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int mhl_rap_recv(struct mhl_tx_ctrl *mhl_ctrl, u8 action_code)
+{
+	u8 error_code;
+	bool tmds_en;
+
+	tmds_en = mhl_check_tmds_enabled(mhl_ctrl);
+	switch (action_code) {
+	case MHL_RAP_POLL:
+		if (tmds_en)
+			error_code = MHL_RAPK_NO_ERROR;
+		else
+			error_code = MHL_RAPK_UNSUPPORTED_ACTION_CODE;
+		break;
+	case MHL_RAP_CONTENT_ON:
+	case MHL_RAP_CONTENT_OFF:
+		if (tmds_en) {
+			mhl_rap_action(mhl_ctrl, action_code);
+			error_code = MHL_RAPK_NO_ERROR;
+		} else {
+			error_code = MHL_RAPK_UNSUPPORTED_ACTION_CODE;
+		}
+		break;
+	default:
+		error_code = MHL_RAPK_UNRECOGNIZED_ACTION_CODE;
+		break;
+	}
+	/* prior send rapk */
+	return mhl_msc_send_prior_msc_msg(
+		mhl_ctrl,
+		MHL_MSC_MSG_RAPK,
+		error_code);
+}
+
+int mhl_msc_recv_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 sub_cmd, u8 cmd_data)
+{
+	int rc = 0;
+	switch (sub_cmd) {
+	case MHL_MSC_MSG_RCP:
+		pr_debug("MHL: receive RCP(0x%02x)\n", cmd_data);
+		rc = mhl_rcp_recv(mhl_ctrl, cmd_data);
+		break;
+	case MHL_MSC_MSG_RCPK:
+		pr_debug("MHL: receive RCPK(0x%02x)\n", cmd_data);
+		break;
+	case MHL_MSC_MSG_RCPE:
+		pr_debug("MHL: receive RCPE(0x%02x)\n", cmd_data);
+		break;
+	case MHL_MSC_MSG_RAP:
+		pr_debug("MHL: receive RAP(0x%02x)\n", cmd_data);
+		rc = mhl_rap_recv(mhl_ctrl, cmd_data);
+		break;
+	case MHL_MSC_MSG_RAPK:
+		pr_debug("MHL: receive RAPK(0x%02x)\n", cmd_data);
+		break;
+	default:
+		break;
+	}
+	return rc;
+}
+
+int mhl_msc_recv_set_int(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 offset, u8 set_int)
+{
+	int prior;
+	if (offset >= 2)
+		return -EFAULT;
+
+	switch (offset) {
+	case 0:
+		if (set_int & MHL_INT_DCAP_CHG) {
+			/* peer dcap has changed */
+			mhl_ctrl->devcap_state = 0;
+			mhl_msc_read_devcap_all(mhl_ctrl);
+		}
+		if (set_int & MHL_INT_DSCR_CHG) {
+			/* peer's scratchpad reg changed */
+			pr_debug("%s: dscr chg\n", __func__);
+			mhl_read_scratchpad(mhl_ctrl);
+			mhl_ctrl->scrpd_busy = false;
+		}
+		if (set_int & MHL_INT_REQ_WRT) {
+			/* SET_INT: REQ_WRT */
+			if (mhl_ctrl->scrpd_busy) {
+				prior = MSC_NORMAL_SEND;
+			} else {
+				prior = MSC_PRIORITY_SEND;
+				mhl_ctrl->scrpd_busy = true;
+			}
+			mhl_msc_send_set_int(
+				mhl_ctrl,
+				MHL_RCHANGE_INT,
+				MHL_INT_GRT_WRT,
+				prior);
+		}
+		if (set_int & MHL_INT_GRT_WRT) {
+			/* SET_INT: GRT_WRT */
+			pr_debug("%s: recvd req to permit/grant write",
+				 __func__);
+			complete_all(&mhl_ctrl->req_write_done);
+			mhl_msc_write_burst(
+				mhl_ctrl,
+				MHL_SCRATCHPAD_OFFSET,
+				mhl_ctrl->scrpd.data,
+				mhl_ctrl->scrpd.length);
+		}
+		break;
+	case 1:
+		if (set_int & MHL_INT_EDID_CHG) {
+			/* peer EDID has changed
+			 * toggle HPD to read EDID
+			 */
+			pr_debug("%s: EDID CHG\n", __func__);
+			mhl_drive_hpd(mhl_ctrl, HPD_DOWN);
+			msleep(110);
+			mhl_drive_hpd(mhl_ctrl, HPD_UP);
+		}
+	}
+	return 0;
+}
+
+int mhl_msc_recv_write_stat(struct mhl_tx_ctrl *mhl_ctrl,
+			    u8 offset, u8 value)
+{
+	bool tmds_en;
+
+	if (offset >= 2)
+		return -EFAULT;
+
+	switch (offset) {
+	case 0:
+		/*
+		 * connected device bits
+		 * changed and DEVCAP READY
+		 */
+		if (((value ^ mhl_ctrl->devcap_state) &
+		     MHL_STATUS_DCAP_RDY)) {
+			if (value & MHL_STATUS_DCAP_RDY) {
+				mhl_ctrl->devcap_state = 0;
+				mhl_msc_read_devcap_all(mhl_ctrl);
+			} else {
+				/*
+				 * peer dcap turned not ready
+				 * use old devap state
+				 */
+				pr_debug("%s: DCAP RDY bit cleared\n",
+					 __func__);
+			}
+		}
+		break;
+	case 1:
+		/*
+		 * connected device bits
+		 * changed and PATH ENABLED
+		 * bit set
+		 */
+		tmds_en = mhl_check_tmds_enabled(mhl_ctrl);
+		if ((value ^ mhl_ctrl->path_en_state)
+		    & MHL_STATUS_PATH_ENABLED) {
+			if (value & MHL_STATUS_PATH_ENABLED) {
+				if (tmds_en &&
+				    (mhl_ctrl->devcap[offset] &
+				     MHL_FEATURE_RAP_SUPPORT)) {
+					mhl_msc_send_msc_msg(
+						mhl_ctrl,
+						MHL_MSC_MSG_RAP,
+						MHL_RAP_CONTENT_ON);
+				}
+				mhl_ctrl->path_en_state
+					|= (MHL_STATUS_PATH_ENABLED |
+					    MHL_STATUS_CLK_MODE_NORMAL);
+				mhl_msc_send_write_stat(
+					mhl_ctrl,
+					MHL_STATUS_REG_LINK_MODE,
+					mhl_ctrl->path_en_state);
+			} else {
+				mhl_ctrl->path_en_state
+					&= ~(MHL_STATUS_PATH_ENABLED |
+					     MHL_STATUS_CLK_MODE_NORMAL);
+				mhl_msc_send_write_stat(
+					mhl_ctrl,
+					MHL_STATUS_REG_LINK_MODE,
+					mhl_ctrl->path_en_state);
+			}
+		}
+		break;
+	}
+	mhl_ctrl->path_en_state = value;
+	return 0;
+}
+
+static int mhl_request_write_burst(struct mhl_tx_ctrl *mhl_ctrl,
+				   u8 start_reg,
+				   u8 length, u8 *data)
+{
+	int i, reg;
+	int timeout, retry = 20;
+
+	if (!(mhl_ctrl->devcap[DEVCAP_OFFSET_FEATURE_FLAG] &
+	      MHL_FEATURE_SP_SUPPORT)) {
+		pr_debug("MHL: SCRATCHPAD_NOT_SUPPORTED\n");
+		return -EFAULT;
+	}
+
+	/*
+	 * scratchpad remains busy as long as a peer's permission or
+	 * write bursts are pending; experimentally it was found that
+	 * 50ms is optimal
+	 */
+	while (mhl_ctrl->scrpd_busy && retry--)
+		msleep(50);
+	if (!retry) {
+		pr_debug("MHL: scratchpad_busy\n");
+		return -EBUSY;
+	}
+
+	for (i = 0, reg = start_reg; (i < length) &&
+		     (reg < MHL_SCRATCHPAD_SIZE); i++, reg++)
+		mhl_ctrl->scrpd.data[reg] = data[i];
+	mhl_ctrl->scrpd.length = length;
+	mhl_ctrl->scrpd.offset = start_reg;
+
+	retry = 5;
+	do {
+		init_completion(&mhl_ctrl->req_write_done);
+		mhl_msc_send_set_int(
+			mhl_ctrl,
+			MHL_RCHANGE_INT,
+			MHL_INT_REQ_WRT,
+			MSC_PRIORITY_SEND);
+		timeout = wait_for_completion_interruptible_timeout(
+			&mhl_ctrl->req_write_done,
+			msecs_to_jiffies(MHL_BURST_WAIT));
+		if (!timeout)
+			mhl_ctrl->scrpd_busy = false;
+	} while (retry-- && timeout == 0);
+	if (!timeout) {
+		pr_err("%s: timed out!\n", __func__);
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+/* write scratchpad entry */
+int mhl_write_scratchpad(struct mhl_tx_ctrl *mhl_ctrl,
+			  u8 offset, u8 length, u8 *data)
+{
+	int rc;
+
+	if ((length < ADOPTER_ID_SIZE) ||
+	    (length > MAX_SCRATCHPAD_TRANSFER_SIZE) ||
+	    (offset > (MAX_SCRATCHPAD_TRANSFER_SIZE - ADOPTER_ID_SIZE)) ||
+	    ((offset + length) > MAX_SCRATCHPAD_TRANSFER_SIZE)) {
+		pr_debug("MHL: write_burst (0x%02x)\n", -EINVAL);
+		return  -EINVAL;
+	}
+
+	rc = mhl_request_write_burst(mhl_ctrl, offset, length, data);
+
+	return rc;
+}
diff --git a/drivers/video/msm/mdss/mhl_msc.h b/drivers/video/msm/mdss/mhl_msc.h
new file mode 100644
index 0000000..8a1fd39
--- /dev/null
+++ b/drivers/video/msm/mdss/mhl_msc.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MHL_MSC_H__
+#define __MHL_MSC_H__
+#include <linux/mhl_8334.h>
+
+#define MAX_RCP_KEYS_SUPPORTED 256
+
+#define MSC_NORMAL_SEND 0
+#define MSC_PRIORITY_SEND 1
+
+#define TMDS_ENABLE 1
+#define TMDS_DISABLE 0
+
+/******************************************************************/
+/* the below APIs are implemented by the MSC functionality */
+int mhl_msc_command_done(struct mhl_tx_ctrl *mhl_ctrl,
+			 struct msc_command_struct *req);
+
+int mhl_msc_send_set_int(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 offset, u8 mask, u8 priority);
+
+int mhl_msc_send_write_stat(struct mhl_tx_ctrl *mhl_ctrl,
+			    u8 offset, u8 value);
+int mhl_msc_send_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 sub_cmd, u8 cmd_data);
+
+int mhl_msc_recv_set_int(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 offset, u8 set_int);
+
+int mhl_msc_recv_write_stat(struct mhl_tx_ctrl *mhl_ctrl,
+			    u8 offset, u8 value);
+int mhl_msc_recv_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
+			 u8 sub_cmd, u8 cmd_data);
+void mhl_msc_send_work(struct work_struct *work);
+
+/******************************************************************/
+/* Tx should implement these APIs */
+int mhl_send_msc_command(struct mhl_tx_ctrl *mhl_ctrl,
+			 struct msc_command_struct *req);
+void mhl_read_scratchpad(struct mhl_tx_ctrl *mhl_ctrl);
+void mhl_drive_hpd(struct mhl_tx_ctrl *mhl_ctrl, uint8_t to_state);
+void mhl_tmds_ctrl(struct mhl_tx_ctrl *ctrl, uint8_t on);
+/******************************************************************/
+/* MHL driver registers ctrl with MSC */
+void mhl_register_msc(struct mhl_tx_ctrl *ctrl);
+
+#endif /* __MHL_MSC_H__ */
diff --git a/drivers/video/msm/mdss/mhl_sii8334.c b/drivers/video/msm/mdss/mhl_sii8334.c
new file mode 100644
index 0000000..a3a1a4e
--- /dev/null
+++ b/drivers/video/msm/mdss/mhl_sii8334.c
@@ -0,0 +1,1927 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/input.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/mhl_8334.h>
+
+#include "mdss_fb.h"
+#include "mdss_hdmi_tx.h"
+#include "mdss_hdmi_edid.h"
+#include "mdss.h"
+#include "mdss_panel.h"
+#include "mdss_io_util.h"
+#include "mhl_msc.h"
+#include "mdss_hdmi_mhl.h"
+
+#define MHL_DRIVER_NAME "sii8334"
+#define COMPATIBLE_NAME "qcom,mhl-sii8334"
+#define MAX_CURRENT 700000
+
+#define pr_debug_intr(...)
+
+#define MSC_START_BIT_MSC_CMD        (0x01 << 0)
+#define MSC_START_BIT_VS_CMD        (0x01 << 1)
+#define MSC_START_BIT_READ_REG        (0x01 << 2)
+#define MSC_START_BIT_WRITE_REG        (0x01 << 3)
+#define MSC_START_BIT_WRITE_BURST        (0x01 << 4)
+
+/* supported RCP key code */
+u16 support_rcp_key_code_tbl[] = {
+	KEY_ENTER,		/* 0x00 Select */
+	KEY_UP,			/* 0x01 Up */
+	KEY_DOWN,		/* 0x02 Down */
+	KEY_LEFT,		/* 0x03 Left */
+	KEY_RIGHT,		/* 0x04 Right */
+	KEY_UNKNOWN,		/* 0x05 Right-up */
+	KEY_UNKNOWN,		/* 0x06 Right-down */
+	KEY_UNKNOWN,		/* 0x07 Left-up */
+	KEY_UNKNOWN,		/* 0x08 Left-down */
+	KEY_MENU,		/* 0x09 Root Menu */
+	KEY_OPTION,		/* 0x0A Setup Menu */
+	KEY_UNKNOWN,		/* 0x0B Contents Menu */
+	KEY_UNKNOWN,		/* 0x0C Favorite Menu */
+	KEY_EXIT,		/* 0x0D Exit */
+	KEY_RESERVED,		/* 0x0E */
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,		/* 0x1F */
+	KEY_NUMERIC_0,		/* 0x20 NUMERIC_0 */
+	KEY_NUMERIC_1,		/* 0x21 NUMERIC_1 */
+	KEY_NUMERIC_2,		/* 0x22 NUMERIC_2 */
+	KEY_NUMERIC_3,		/* 0x23 NUMERIC_3 */
+	KEY_NUMERIC_4,		/* 0x24 NUMERIC_4 */
+	KEY_NUMERIC_5,		/* 0x25 NUMERIC_5 */
+	KEY_NUMERIC_6,		/* 0x26 NUMERIC_6 */
+	KEY_NUMERIC_7,		/* 0x27 NUMERIC_7 */
+	KEY_NUMERIC_8,		/* 0x28 NUMERIC_8 */
+	KEY_NUMERIC_9,		/* 0x29 NUMERIC_9 */
+	KEY_DOT,		/* 0x2A Dot */
+	KEY_ENTER,		/* 0x2B Enter */
+	KEY_ESC,		/* 0x2C Clear */
+	KEY_RESERVED,		/* 0x2D */
+	KEY_RESERVED,		/* 0x2E */
+	KEY_RESERVED,		/* 0x2F */
+	KEY_UNKNOWN,		/* 0x30 Channel Up */
+	KEY_UNKNOWN,		/* 0x31 Channel Down */
+	KEY_UNKNOWN,		/* 0x32 Previous Channel */
+	KEY_UNKNOWN,		/* 0x33 Sound Select */
+	KEY_UNKNOWN,		/* 0x34 Input Select */
+	KEY_UNKNOWN,		/* 0x35 Show Information */
+	KEY_UNKNOWN,		/* 0x36 Help */
+	KEY_UNKNOWN,		/* 0x37 Page Up */
+	KEY_UNKNOWN,		/* 0x38 Page Down */
+	KEY_RESERVED,		/* 0x39 */
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,		/* 0x3F */
+	KEY_RESERVED,		/* 0x40 */
+	KEY_VOLUMEUP,		/* 0x41 Volume Up */
+	KEY_VOLUMEDOWN,		/* 0x42 Volume Down */
+	KEY_MUTE,		/* 0x43 Mute */
+	KEY_PLAY,		/* 0x44 Play */
+	KEY_STOP,		/* 0x45 Stop */
+	KEY_PAUSE,		/* 0x46 Pause */
+	KEY_UNKNOWN,		/* 0x47 Record */
+	KEY_REWIND,		/* 0x48 Rewind */
+	KEY_FASTFORWARD,	/* 0x49 Fast Forward */
+	KEY_UNKNOWN,		/* 0x4A Eject */
+	KEY_FORWARD,		/* 0x4B Forward */
+	KEY_BACK,		/* 0x4C Backward */
+	KEY_RESERVED,		/* 0x4D */
+	KEY_RESERVED,
+	KEY_RESERVED,		/* 0x4F */
+	KEY_UNKNOWN,		/* 0x50 Angle */
+	KEY_UNKNOWN,		/* 0x51 Subtitle */
+	KEY_RESERVED,		/* 0x52 */
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,		/* 0x5F */
+	KEY_PLAYPAUSE,		/* 0x60 Play Function */
+	KEY_PLAYPAUSE,		/* 0x61 Pause_Play Function */
+	KEY_UNKNOWN,		/* 0x62 Record Function */
+	KEY_PAUSE,		/* 0x63 Pause Record Function */
+	KEY_STOP,		/* 0x64 Stop Function  */
+	KEY_MUTE,		/* 0x65 Mute Function */
+	KEY_UNKNOWN,		/* 0x66 Restore Volume Function */
+	KEY_UNKNOWN,		/* 0x67 Tune Function */
+	KEY_UNKNOWN,		/* 0x68 Select Media Function */
+	KEY_RESERVED,		/* 0x69 */
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,		/* 0x70 */
+	KEY_BLUE,			/* 0x71 F1 */
+	KEY_RED,			/* 0x72 F2 */
+	KEY_GREEN,			/* 0x73 F3 */
+	KEY_YELLOW,			/* 0x74 F4 */
+	KEY_UNKNOWN,		/* 0x75 F5 */
+	KEY_RESERVED,		/* 0x76 */
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,
+	KEY_RESERVED,		/* 0x7D */
+	KEY_VENDOR,		/* Vendor Specific */
+	KEY_RESERVED,		/* 0x7F */
+};
+
+
+uint8_t slave_addrs[MAX_PAGES] = {
+	DEV_PAGE_TPI_0    ,
+	DEV_PAGE_TX_L0_0  ,
+	DEV_PAGE_TX_L1_0  ,
+	DEV_PAGE_TX_2_0   ,
+	DEV_PAGE_TX_3_0   ,
+	DEV_PAGE_CBUS     ,
+	DEV_PAGE_DDC_EDID ,
+	DEV_PAGE_DDC_SEGM ,
+};
+
+static irqreturn_t mhl_tx_isr(int irq, void *dev_id);
+static void switch_mode(struct mhl_tx_ctrl *mhl_ctrl,
+			enum mhl_st_type to_mode);
+static void mhl_init_reg_settings(struct mhl_tx_ctrl *mhl_ctrl,
+				  bool mhl_disc_en);
+
+int mhl_i2c_reg_read(struct i2c_client *client,
+			    uint8_t slave_addr_index, uint8_t reg_offset)
+{
+	int rc = -1;
+	uint8_t buffer = 0;
+
+	rc = mdss_i2c_byte_read(client, slave_addrs[slave_addr_index],
+				reg_offset, &buffer);
+	if (rc) {
+		pr_err("%s: slave=%x, off=%x\n",
+		       __func__, slave_addrs[slave_addr_index], reg_offset);
+		return rc;
+	}
+	return buffer;
+}
+
+
+int mhl_i2c_reg_write(struct i2c_client *client,
+			     uint8_t slave_addr_index, uint8_t reg_offset,
+			     uint8_t value)
+{
+	return mdss_i2c_byte_write(client, slave_addrs[slave_addr_index],
+				 reg_offset, &value);
+}
+
+void mhl_i2c_reg_modify(struct i2c_client *client,
+			       uint8_t slave_addr_index, uint8_t reg_offset,
+			       uint8_t mask, uint8_t val)
+{
+	uint8_t temp;
+
+	temp = mhl_i2c_reg_read(client, slave_addr_index, reg_offset);
+	temp &= (~mask);
+	temp |= (mask & val);
+	mhl_i2c_reg_write(client, slave_addr_index, reg_offset, temp);
+}
+
+
+static int mhl_tx_get_dt_data(struct device *dev,
+	struct mhl_tx_platform_data *pdata)
+{
+	int i, rc = 0;
+	struct device_node *of_node = NULL;
+	struct dss_gpio *temp_gpio = NULL;
+	struct platform_device *hdmi_pdev = NULL;
+	struct device_node *hdmi_tx_node = NULL;
+	int dt_gpio;
+	i = 0;
+
+	if (!dev || !pdata) {
+		pr_err("%s: invalid input\n", __func__);
+		return -EINVAL;
+	}
+
+	of_node = dev->of_node;
+	if (!of_node) {
+		pr_err("%s: invalid of_node\n", __func__);
+		goto error;
+	}
+
+	pr_debug("%s: id=%d\n", __func__, dev->id);
+
+	/* GPIOs */
+	temp_gpio = NULL;
+	temp_gpio = devm_kzalloc(dev, sizeof(struct dss_gpio), GFP_KERNEL);
+	pr_debug("%s: gpios allocd\n", __func__);
+	if (!(temp_gpio)) {
+		pr_err("%s: can't alloc %d gpio mem\n", __func__, i);
+		goto error;
+	}
+	/* RESET */
+	dt_gpio = of_get_named_gpio(of_node, "mhl-rst-gpio", 0);
+	if (dt_gpio < 0) {
+		pr_err("%s: Can't get mhl-rst-gpio\n", __func__);
+		goto error;
+	}
+
+	temp_gpio->gpio = dt_gpio;
+	snprintf(temp_gpio->gpio_name, 32, "%s", "mhl-rst-gpio");
+	pr_debug("%s: rst gpio=[%d]\n", __func__,
+		 temp_gpio->gpio);
+	pdata->gpios[MHL_TX_RESET_GPIO] = temp_gpio;
+
+	/* PWR */
+	temp_gpio = NULL;
+	temp_gpio = devm_kzalloc(dev, sizeof(struct dss_gpio), GFP_KERNEL);
+	pr_debug("%s: gpios allocd\n", __func__);
+	if (!(temp_gpio)) {
+		pr_err("%s: can't alloc %d gpio mem\n", __func__, i);
+		goto error;
+	}
+	dt_gpio = of_get_named_gpio(of_node, "mhl-pwr-gpio", 0);
+	if (dt_gpio < 0) {
+		pr_err("%s: Can't get mhl-pwr-gpio\n", __func__);
+		goto error;
+	}
+
+	temp_gpio->gpio = dt_gpio;
+	snprintf(temp_gpio->gpio_name, 32, "%s", "mhl-pwr-gpio");
+	pr_debug("%s: pmic gpio=[%d]\n", __func__,
+		 temp_gpio->gpio);
+	pdata->gpios[MHL_TX_PMIC_PWR_GPIO] = temp_gpio;
+
+	/* INTR */
+	temp_gpio = NULL;
+	temp_gpio = devm_kzalloc(dev, sizeof(struct dss_gpio), GFP_KERNEL);
+	pr_debug("%s: gpios allocd\n", __func__);
+	if (!(temp_gpio)) {
+		pr_err("%s: can't alloc %d gpio mem\n", __func__, i);
+		goto error;
+	}
+	dt_gpio = of_get_named_gpio(of_node, "mhl-intr-gpio", 0);
+	if (dt_gpio < 0) {
+		pr_err("%s: Can't get mhl-intr-gpio\n", __func__);
+		goto error;
+	}
+
+	temp_gpio->gpio = dt_gpio;
+	snprintf(temp_gpio->gpio_name, 32, "%s", "mhl-intr-gpio");
+	pr_debug("%s: intr gpio=[%d]\n", __func__,
+		 temp_gpio->gpio);
+	pdata->gpios[MHL_TX_INTR_GPIO] = temp_gpio;
+
+	/* parse phandle for hdmi tx */
+	hdmi_tx_node = of_parse_phandle(of_node, "qcom,hdmi-tx-map", 0);
+	if (!hdmi_tx_node) {
+		pr_err("%s: can't find hdmi phandle\n", __func__);
+		goto error;
+	}
+
+	hdmi_pdev = of_find_device_by_node(hdmi_tx_node);
+	if (!hdmi_pdev) {
+		pr_err("%s: can't find the device by node\n", __func__);
+		goto error;
+	}
+	pr_debug("%s: hdmi_pdev [0X%x] to pdata->pdev\n",
+	       __func__, (unsigned int)hdmi_pdev);
+
+	pdata->hdmi_pdev = hdmi_pdev;
+
+	return 0;
+error:
+	pr_err("%s: ret due to err\n", __func__);
+	for (i = 0; i < MHL_TX_MAX_GPIO; i++)
+		if (pdata->gpios[i])
+			devm_kfree(dev, pdata->gpios[i]);
+	return rc;
+} /* mhl_tx_get_dt_data */
+
+static int mhl_sii_reset_pin(struct mhl_tx_ctrl *mhl_ctrl, int on)
+{
+	if (mhl_ctrl->pdata->gpios[MHL_TX_RESET_GPIO]) {
+		gpio_set_value(
+			mhl_ctrl->pdata->gpios[MHL_TX_RESET_GPIO]->gpio,
+			on);
+	}
+	return 0;
+}
+
+
+static int mhl_sii_wait_for_rgnd(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	int timeout;
+	/* let isr handle RGND interrupt */
+	pr_debug("%s:%u\n", __func__, __LINE__);
+	INIT_COMPLETION(mhl_ctrl->rgnd_done);
+	timeout = wait_for_completion_interruptible_timeout
+		(&mhl_ctrl->rgnd_done, HZ/2);
+	if (!timeout) {
+		/* most likely nothing plugged in USB */
+		/* USB HOST connected or already in USB mode */
+		pr_warn("%s:%u timedout\n", __func__, __LINE__);
+		return -ENODEV;
+	}
+	return mhl_ctrl->mhl_mode ? 0 : 1;
+}
+
+/*  USB_HANDSHAKING FUNCTIONS */
+static int mhl_sii_device_discovery(void *data, int id,
+			     void (*usb_notify_cb)(int online))
+{
+	int rc;
+	struct mhl_tx_ctrl *mhl_ctrl = data;
+
+	if (id) {
+		/* When MHL cable is disconnected we get a sii8334
+		 * mhl_disconnect interrupt which is handled separately.
+		 */
+		pr_debug("%s: USB ID pin high\n", __func__);
+		return id;
+	}
+
+	if (!mhl_ctrl || !usb_notify_cb) {
+		pr_warn("%s: cb || ctrl is NULL\n", __func__);
+		/* return "USB" so caller can proceed */
+		return -EINVAL;
+	}
+
+	if (!mhl_ctrl->notify_usb_online)
+		mhl_ctrl->notify_usb_online = usb_notify_cb;
+
+	if (!mhl_ctrl->disc_enabled) {
+		mhl_sii_reset_pin(mhl_ctrl, 0);
+		msleep(50);
+		mhl_sii_reset_pin(mhl_ctrl, 1);
+		/* TX PR-guide requires a 100 ms wait here */
+		msleep(100);
+		mhl_init_reg_settings(mhl_ctrl, true);
+		rc = mhl_sii_wait_for_rgnd(mhl_ctrl);
+	} else {
+		if (mhl_ctrl->cur_state == POWER_STATE_D3) {
+			rc = mhl_sii_wait_for_rgnd(mhl_ctrl);
+		} else {
+			/* in MHL mode */
+			pr_debug("%s:%u\n", __func__, __LINE__);
+			rc = 0;
+		}
+	}
+	pr_debug("%s: ret result: %s\n", __func__, rc ? "usb" : " mhl");
+	return rc;
+}
+
+static int mhl_power_get_property(struct power_supply *psy,
+				  enum power_supply_property psp,
+				  union power_supply_propval *val)
+{
+	struct mhl_tx_ctrl *mhl_ctrl =
+		container_of(psy, struct mhl_tx_ctrl, mhl_psy);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		val->intval = mhl_ctrl->current_val;
+		break;
+	case POWER_SUPPLY_PROP_PRESENT:
+		val->intval = mhl_ctrl->vbus_active;
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+		val->intval = mhl_ctrl->vbus_active && mhl_ctrl->mhl_mode;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int mhl_power_set_property(struct power_supply *psy,
+				  enum power_supply_property psp,
+				  const union power_supply_propval *val)
+{
+	struct mhl_tx_ctrl *mhl_ctrl =
+		container_of(psy, struct mhl_tx_ctrl, mhl_psy);
+
+	switch (psp) {
+	case POWER_SUPPLY_PROP_PRESENT:
+		mhl_ctrl->vbus_active = val->intval;
+		if (mhl_ctrl->vbus_active)
+			mhl_ctrl->current_val = MAX_CURRENT;
+		else
+			mhl_ctrl->current_val = 0;
+		power_supply_changed(psy);
+		break;
+	case POWER_SUPPLY_PROP_ONLINE:
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static char *mhl_pm_power_supplied_to[] = {
+	"usb",
+};
+
+static enum power_supply_property mhl_pm_power_props[] = {
+	POWER_SUPPLY_PROP_PRESENT,
+	POWER_SUPPLY_PROP_ONLINE,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
+};
+
+static void cbus_reset(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	uint8_t i;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	/*
+	 * REG_SRST
+	 */
+	MHL_SII_REG_NAME_MOD(REG_SRST, BIT3, BIT3);
+	msleep(20);
+	MHL_SII_REG_NAME_MOD(REG_SRST, BIT3, 0x00);
+	/*
+	 * REG_INTR1 and REG_INTR4
+	 */
+	MHL_SII_REG_NAME_WR(REG_INTR1_MASK, BIT6);
+	MHL_SII_REG_NAME_WR(REG_INTR4_MASK,
+		BIT0 | BIT2 | BIT3 | BIT4 | BIT5 | BIT6);
+
+	if (mhl_ctrl->chip_rev_id < 1)
+		MHL_SII_REG_NAME_WR(REG_INTR5_MASK, BIT3 | BIT4);
+	else
+		MHL_SII_REG_NAME_WR(REG_INTR5_MASK, 0x00);
+
+	/* Unmask CBUS1 Intrs */
+	MHL_SII_REG_NAME_WR(REG_CBUS_INTR_ENABLE,
+		BIT2 | BIT3 | BIT4 | BIT5 | BIT6);
+
+	/* Unmask CBUS2 Intrs */
+	MHL_SII_REG_NAME_WR(REG_CBUS_MSC_INT2_ENABLE, BIT2 | BIT3);
+
+	for (i = 0; i < 4; i++) {
+		/*
+		 * Enable WRITE_STAT interrupt for writes to
+		 * all 4 MSC Status registers.
+		 */
+		MHL_SII_CBUS_WR((0xE0 + i), 0xFF);
+
+		/*
+		 * Enable SET_INT interrupt for writes to
+		 * all 4 MSC Interrupt registers.
+		 */
+		MHL_SII_CBUS_WR((0xF0 + i), 0xFF);
+	}
+}
+
+static void init_cbus_regs(struct i2c_client *client)
+{
+	uint8_t		regval;
+
+	/* Increase DDC translation layer timer*/
+	MHL_SII_CBUS_WR(0x0007, 0xF2);
+	/* Drive High Time */
+	MHL_SII_CBUS_WR(0x0036, 0x0B);
+	/* Use programmed timing */
+	MHL_SII_CBUS_WR(0x0039, 0x30);
+	/* CBUS Drive Strength */
+	MHL_SII_CBUS_WR(0x0040, 0x03);
+	/*
+	 * Write initial default settings
+	 * to devcap regs: default settings
+	 */
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_DEV_STATE, DEVCAP_VAL_DEV_STATE);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_MHL_VERSION, DEVCAP_VAL_MHL_VERSION);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_DEV_CAT, DEVCAP_VAL_DEV_CAT);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_ADOPTER_ID_H, DEVCAP_VAL_ADOPTER_ID_H);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_ADOPTER_ID_L, DEVCAP_VAL_ADOPTER_ID_L);
+	MHL_SII_CBUS_WR(0x0080 | DEVCAP_OFFSET_VID_LINK_MODE,
+			  DEVCAP_VAL_VID_LINK_MODE);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_AUD_LINK_MODE,
+			  DEVCAP_VAL_AUD_LINK_MODE);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_VIDEO_TYPE, DEVCAP_VAL_VIDEO_TYPE);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_LOG_DEV_MAP, DEVCAP_VAL_LOG_DEV_MAP);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_BANDWIDTH, DEVCAP_VAL_BANDWIDTH);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_FEATURE_FLAG, DEVCAP_VAL_FEATURE_FLAG);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_DEVICE_ID_H, DEVCAP_VAL_DEVICE_ID_H);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_DEVICE_ID_L, DEVCAP_VAL_DEVICE_ID_L);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_SCRATCHPAD_SIZE,
+			  DEVCAP_VAL_SCRATCHPAD_SIZE);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_INT_STAT_SIZE,
+			  DEVCAP_VAL_INT_STAT_SIZE);
+	MHL_SII_CBUS_WR(0x0080 |
+			  DEVCAP_OFFSET_RESERVED, DEVCAP_VAL_RESERVED);
+
+	/* Make bits 2,3 (initiator timeout) to 1,1
+	 * for register CBUS_LINK_CONTROL_2
+	 * REG_CBUS_LINK_CONTROL_2
+	 */
+	regval = MHL_SII_CBUS_RD(0x0031);
+	regval = (regval | 0x0C);
+	/* REG_CBUS_LINK_CONTROL_2 */
+	MHL_SII_CBUS_WR(0x0031, regval);
+	 /* REG_MSC_TIMEOUT_LIMIT */
+	MHL_SII_CBUS_WR(0x0022, 0x0F);
+	/* REG_CBUS_LINK_CONTROL_1 */
+	MHL_SII_CBUS_WR(0x0030, 0x01);
+	/* disallow vendor specific commands */
+	MHL_SII_CBUS_MOD(0x002E, BIT4, BIT4);
+}
+
+/*
+ * Configure the initial reg settings
+ */
+static void mhl_init_reg_settings(struct mhl_tx_ctrl *mhl_ctrl,
+	bool mhl_disc_en)
+{
+	uint8_t regval;
+
+	/*
+	 * ============================================
+	 * POWER UP
+	 * ============================================
+	 */
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	/* Power up 1.2V core */
+	MHL_SII_PAGE1_WR(0x003D, 0x3F);
+	/* Enable Tx PLL Clock */
+	MHL_SII_PAGE2_WR(0x0011, 0x01);
+	/* Enable Tx Clock Path and Equalizer */
+	MHL_SII_PAGE2_WR(0x0012, 0x11);
+	/* Tx Source Termination ON */
+	MHL_SII_REG_NAME_WR(REG_MHLTX_CTL1, 0x10);
+	/* Enable 1X MHL Clock output */
+	MHL_SII_REG_NAME_WR(REG_MHLTX_CTL6, 0xBC);
+	/* Tx Differential Driver Config */
+	MHL_SII_REG_NAME_WR(REG_MHLTX_CTL2, 0x3C);
+	MHL_SII_REG_NAME_WR(REG_MHLTX_CTL4, 0xC8);
+	/* PLL Bandwidth Control */
+	MHL_SII_REG_NAME_WR(REG_MHLTX_CTL7, 0x03);
+	MHL_SII_REG_NAME_WR(REG_MHLTX_CTL8, 0x0A);
+	/*
+	 * ============================================
+	 * Analog PLL Control
+	 * ============================================
+	 */
+	/* Enable Rx PLL clock */
+	MHL_SII_REG_NAME_WR(REG_TMDS_CCTRL,  0x08);
+	MHL_SII_PAGE0_WR(0x00F8, 0x8C);
+	MHL_SII_PAGE0_WR(0x0085, 0x02);
+	MHL_SII_PAGE2_WR(0x0000, 0x00);
+	regval = MHL_SII_PAGE2_RD(0x0005);
+	regval &= ~BIT5;
+	MHL_SII_PAGE2_WR(0x0005, regval);
+	MHL_SII_PAGE2_WR(0x0013, 0x60);
+	/* PLL Cal ref sel */
+	MHL_SII_PAGE2_WR(0x0017, 0x03);
+	/* VCO Cal */
+	MHL_SII_PAGE2_WR(0x001A, 0x20);
+	/* Auto EQ */
+	MHL_SII_PAGE2_WR(0x0022, 0xE0);
+	MHL_SII_PAGE2_WR(0x0023, 0xC0);
+	MHL_SII_PAGE2_WR(0x0024, 0xA0);
+	MHL_SII_PAGE2_WR(0x0025, 0x80);
+	MHL_SII_PAGE2_WR(0x0026, 0x60);
+	MHL_SII_PAGE2_WR(0x0027, 0x40);
+	MHL_SII_PAGE2_WR(0x0028, 0x20);
+	MHL_SII_PAGE2_WR(0x0029, 0x00);
+	/* Rx PLL Bandwidth 4MHz */
+	MHL_SII_PAGE2_WR(0x0031, 0x0A);
+	/* Rx PLL Bandwidth value from I2C */
+	MHL_SII_PAGE2_WR(0x0045, 0x06);
+	MHL_SII_PAGE2_WR(0x004B, 0x06);
+	MHL_SII_PAGE2_WR(0x004C, 0x60);
+	/* Manual zone control */
+	MHL_SII_PAGE2_WR(0x004C, 0xE0);
+	/* PLL Mode value */
+	MHL_SII_PAGE2_WR(0x004D, 0x00);
+	MHL_SII_PAGE0_WR(0x0008, 0x35);
+	/*
+	 * Discovery Control and Status regs
+	 * Setting De-glitch time to 50 ms (default)
+	 * Switch Control Disabled
+	 */
+	MHL_SII_REG_NAME_WR(REG_DISC_CTRL2, 0xAD);
+	/* 1.8V CBUS VTH */
+	MHL_SII_REG_NAME_WR(REG_DISC_CTRL5, 0x57);
+	/* RGND and single Discovery attempt */
+	MHL_SII_REG_NAME_WR(REG_DISC_CTRL6, 0x11);
+	/* Ignore VBUS */
+	MHL_SII_REG_NAME_WR(REG_DISC_CTRL8, 0x82);
+
+	/* Enable CBUS Discovery */
+	if (mhl_disc_en) {
+		MHL_SII_REG_NAME_WR(REG_DISC_CTRL9, 0x24);
+		/* Enable MHL Discovery */
+		MHL_SII_REG_NAME_WR(REG_DISC_CTRL1, 0x27);
+		/* Pull-up resistance off for IDLE state */
+		MHL_SII_REG_NAME_WR(REG_DISC_CTRL4, 0x8C);
+	} else {
+		MHL_SII_REG_NAME_WR(REG_DISC_CTRL9, 0x26);
+		/* Disable MHL Discovery */
+		MHL_SII_REG_NAME_WR(REG_DISC_CTRL1, 0x26);
+		MHL_SII_REG_NAME_WR(REG_DISC_CTRL4, 0x8C);
+	}
+
+	MHL_SII_REG_NAME_WR(REG_DISC_CTRL7, 0x20);
+	/* MHL CBUS Discovery - immediate comm.  */
+	MHL_SII_REG_NAME_WR(REG_DISC_CTRL3, 0x86);
+
+	MHL_SII_PAGE3_WR(0x3C, 0x80);
+
+	if (mhl_ctrl->cur_state != POWER_STATE_D3)
+		MHL_SII_REG_NAME_MOD(REG_INT_CTRL, BIT6 | BIT5 | BIT4, BIT4);
+
+	/* Enable Auto Soft RESET */
+	MHL_SII_REG_NAME_WR(REG_SRST, 0x084);
+	/* HDMI Transcode mode enable */
+	MHL_SII_PAGE0_WR(0x000D, 0x1C);
+
+	cbus_reset(mhl_ctrl);
+	init_cbus_regs(client);
+}
+
+
+static void switch_mode(struct mhl_tx_ctrl *mhl_ctrl, enum mhl_st_type to_mode)
+{
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	switch (to_mode) {
+	case POWER_STATE_D0_NO_MHL:
+		mhl_ctrl->cur_state = to_mode;
+		mhl_init_reg_settings(mhl_ctrl, true);
+		/* REG_DISC_CTRL1 */
+		MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT1 | BIT0, BIT0);
+
+		/* TPI_DEVICE_POWER_STATE_CTRL_REG */
+		mhl_i2c_reg_modify(client, TX_PAGE_TPI, 0x001E, BIT1 | BIT0,
+			0x00);
+		break;
+	case POWER_STATE_D0_MHL:
+		mhl_ctrl->cur_state = to_mode;
+		break;
+	case POWER_STATE_D3:
+		if (mhl_ctrl->cur_state == POWER_STATE_D3)
+			break;
+
+		/* Force HPD to 0 when not in MHL mode.  */
+		mhl_drive_hpd(mhl_ctrl, HPD_DOWN);
+		mhl_tmds_ctrl(mhl_ctrl, TMDS_DISABLE);
+		/*
+		 * Change TMDS termination to high impedance
+		 * on disconnection.
+		 */
+		MHL_SII_REG_NAME_WR(REG_MHLTX_CTL1, 0xD0);
+		msleep(50);
+		if (!mhl_ctrl->disc_enabled)
+			MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT1 | BIT0, 0x00);
+		MHL_SII_PAGE3_MOD(0x003D, BIT0, 0x00);
+		mhl_ctrl->cur_state = POWER_STATE_D3;
+		break;
+	default:
+		break;
+	}
+}
+
+
+void mhl_tmds_ctrl(struct mhl_tx_ctrl *mhl_ctrl, uint8_t on)
+{
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+	if (on) {
+		MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, BIT4);
+		mhl_drive_hpd(mhl_ctrl, HPD_UP);
+	} else {
+		MHL_SII_REG_NAME_MOD(REG_TMDS_CCTRL, BIT4, 0x00);
+	}
+}
+
+void mhl_drive_hpd(struct mhl_tx_ctrl *mhl_ctrl, uint8_t to_state)
+{
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	pr_debug("%s: To state=[0x%x]\n", __func__, to_state);
+	if (to_state == HPD_UP) {
+		/*
+		 * Drive HPD to UP state
+		 * Set HPD_OUT_OVR_EN = HPD State
+		 * EDID read and Un-force HPD (from low)
+		 * propogate to src let HPD float by clearing
+		 * HPD OUT OVRRD EN
+		 */
+		MHL_SII_REG_NAME_MOD(REG_INT_CTRL, BIT4, 0x00);
+	} else {
+		/* Drive HPD to DOWN state */
+		MHL_SII_REG_NAME_MOD(REG_INT_CTRL, BIT4, BIT4);
+	}
+}
+
+static void mhl_msm_connection(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	uint8_t val;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	pr_debug("%s: cur st [0x%x]\n", __func__,
+		mhl_ctrl->cur_state);
+
+	if (mhl_ctrl->cur_state == POWER_STATE_D0_MHL) {
+		/* Already in D0 - MHL power state */
+		pr_err("%s: cur st not D0\n", __func__);
+		return;
+	}
+	/* spin_lock_irqsave(&mhl_state_lock, flags); */
+	switch_mode(mhl_ctrl, POWER_STATE_D0_MHL);
+	/* spin_unlock_irqrestore(&mhl_state_lock, flags); */
+
+	MHL_SII_REG_NAME_WR(REG_MHLTX_CTL1, 0x10);
+	MHL_SII_CBUS_WR(0x07, 0xF2);
+
+	/*
+	 * Keep the discovery enabled. Need RGND interrupt
+	 * Possibly chip disables discovery after MHL_EST??
+	 * Need to re-enable here
+	 */
+	val = MHL_SII_PAGE3_RD(0x10);
+	MHL_SII_PAGE3_WR(0x10, val | BIT0);
+
+	/*
+	 * indicate DCAP_RDY and DCAP_CHG
+	 * to the peer only after
+	 * msm conn has been established
+	 */
+	mhl_msc_send_write_stat(mhl_ctrl,
+				MHL_STATUS_REG_CONNECTED_RDY,
+				MHL_STATUS_DCAP_RDY);
+
+	mhl_msc_send_set_int(mhl_ctrl,
+			     MHL_RCHANGE_INT,
+			     MHL_INT_DCAP_CHG,
+			     MSC_PRIORITY_SEND);
+
+}
+
+static void mhl_msm_disconnection(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+	/*
+	 * MHL TX CTL1
+	 * Disabling Tx termination
+	 */
+	MHL_SII_PAGE3_WR(0x30, 0xD0);
+
+	switch_mode(mhl_ctrl, POWER_STATE_D3);
+}
+
+static int  mhl_msm_read_rgnd_int(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	uint8_t rgnd_imp;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+	/* DISC STATUS REG 2 */
+	rgnd_imp = (mhl_i2c_reg_read(client, TX_PAGE_3, 0x001C) &
+		    (BIT1 | BIT0));
+	pr_debug("imp range read=%02X\n", (int)rgnd_imp);
+
+	if (0x02 == rgnd_imp) {
+		pr_debug("%s: mhl sink\n", __func__);
+		mhl_ctrl->mhl_mode = 1;
+		power_supply_changed(&mhl_ctrl->mhl_psy);
+		if (mhl_ctrl->notify_usb_online)
+			mhl_ctrl->notify_usb_online(1);
+	} else {
+		pr_debug("%s: non-mhl sink\n", __func__);
+		mhl_ctrl->mhl_mode = 0;
+		switch_mode(mhl_ctrl, POWER_STATE_D3);
+	}
+	complete(&mhl_ctrl->rgnd_done);
+	return mhl_ctrl->mhl_mode ?
+		MHL_DISCOVERY_RESULT_MHL : MHL_DISCOVERY_RESULT_USB;
+}
+
+static void force_usb_switch_open(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	/*disable discovery*/
+	MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT0, 0);
+	/* force USB ID switch to open*/
+	MHL_SII_REG_NAME_MOD(REG_DISC_CTRL6, BIT6, BIT6);
+	MHL_SII_REG_NAME_WR(REG_DISC_CTRL3, 0x86);
+	/* force HPD to 0 when not in mhl mode. */
+	MHL_SII_REG_NAME_MOD(REG_INT_CTRL, BIT5 | BIT4, BIT4);
+}
+
+static void release_usb_switch_open(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	msleep(50);
+	MHL_SII_REG_NAME_MOD(REG_DISC_CTRL6, BIT6, 0x00);
+	MHL_SII_REG_NAME_MOD(REG_DISC_CTRL1, BIT0, BIT0);
+}
+
+static void scdt_st_chg(struct i2c_client *client)
+{
+	uint8_t tmds_cstat;
+	uint8_t mhl_fifo_status;
+
+	/* tmds cstat */
+	tmds_cstat = MHL_SII_PAGE3_RD(0x0040);
+	pr_debug("%s: tmds cstat: 0x%02x\n", __func__,
+		 tmds_cstat);
+
+	if (!(tmds_cstat & BIT1))
+		return;
+
+	mhl_fifo_status = MHL_SII_REG_NAME_RD(REG_INTR5);
+	pr_debug("%s: mhl fifo st: 0x%02x\n", __func__,
+		 mhl_fifo_status);
+	if (mhl_fifo_status & 0x0C) {
+		MHL_SII_REG_NAME_WR(REG_INTR5,  0x0C);
+		pr_debug("%s: mhl fifo rst\n", __func__);
+		MHL_SII_REG_NAME_WR(REG_SRST, 0x94);
+		MHL_SII_REG_NAME_WR(REG_SRST, 0x84);
+	}
+}
+
+
+static void dev_detect_isr(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	uint8_t status, reg ;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	/* INTR_STATUS4 */
+	status = MHL_SII_REG_NAME_RD(REG_INTR4);
+	pr_debug("%s: reg int4 st=%02X\n", __func__, status);
+
+	if ((0x00 == status) &&\
+	    (mhl_ctrl->cur_state == POWER_STATE_D3)) {
+		pr_err("%s: invalid intr\n", __func__);
+		return;
+	}
+
+	if (0xFF == status) {
+		pr_debug("%s: invalid intr 0xff\n", __func__);
+		MHL_SII_REG_NAME_WR(REG_INTR4, status);
+		return;
+	}
+
+	if ((status & BIT0) && (mhl_ctrl->chip_rev_id < 1)) {
+		pr_debug("%s: scdt intr\n", __func__);
+		scdt_st_chg(client);
+	}
+
+	if (status & BIT1)
+		pr_debug("mhl: int4 bit1 set\n");
+
+	/* mhl_est interrupt */
+	if (status & BIT2) {
+		pr_debug("%s: mhl_est st=%02X\n", __func__,
+			 (int) status);
+		mhl_msm_connection(mhl_ctrl);
+	} else if (status & BIT3) {
+		pr_debug("%s: uUSB-a type dev detct\n", __func__);
+		/* Short RGND */
+		MHL_SII_REG_NAME_MOD(REG_DISC_STAT2, BIT0 | BIT1, 0x00);
+		mhl_msm_disconnection(mhl_ctrl);
+		power_supply_changed(&mhl_ctrl->mhl_psy);
+		if (mhl_ctrl->notify_usb_online)
+			mhl_ctrl->notify_usb_online(0);
+	}
+
+	if (status & BIT5) {
+		/* clr intr - reg int4 */
+		pr_debug("%s: mhl discon: int4 st=%02X\n", __func__,
+			 (int)status);
+		reg = MHL_SII_REG_NAME_RD(REG_INTR4);
+		MHL_SII_REG_NAME_WR(REG_INTR4, reg);
+		mhl_msm_disconnection(mhl_ctrl);
+		power_supply_changed(&mhl_ctrl->mhl_psy);
+		if (mhl_ctrl->notify_usb_online)
+			mhl_ctrl->notify_usb_online(0);
+	}
+
+	if ((mhl_ctrl->cur_state != POWER_STATE_D0_NO_MHL) &&\
+	    (status & BIT6)) {
+		/* rgnd rdy Intr */
+		pr_debug("%s: rgnd ready intr\n", __func__);
+		switch_mode(mhl_ctrl, POWER_STATE_D0_NO_MHL);
+		mhl_msm_read_rgnd_int(mhl_ctrl);
+	}
+
+	/* Can't succeed at these in D3 */
+	if ((mhl_ctrl->cur_state != POWER_STATE_D3) &&\
+	     (status & BIT4)) {
+		/* cbus lockout interrupt?
+		 * Hardware detection mechanism figures that
+		 * CBUS line is latched and raises this intr
+		 * where we force usb switch open and release
+		 */
+		pr_warn("%s: cbus locked out!\n", __func__);
+		force_usb_switch_open(mhl_ctrl);
+		release_usb_switch_open(mhl_ctrl);
+	}
+	MHL_SII_REG_NAME_WR(REG_INTR4, status);
+}
+
+static void mhl_misc_isr(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	uint8_t intr_5_stat;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	/*
+	 * Clear INT 5
+	 * INTR5 is related to FIFO underflow/overflow reset
+	 * which is handled in 8334 by auto FIFO reset
+	 */
+	intr_5_stat = MHL_SII_REG_NAME_RD(REG_INTR5);
+	MHL_SII_REG_NAME_WR(REG_INTR5,  intr_5_stat);
+}
+
+
+static void mhl_hpd_stat_isr(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	uint8_t intr_1_stat;
+	uint8_t cbus_stat;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	/* INTR STATUS 1 */
+	intr_1_stat = MHL_SII_PAGE0_RD(0x0071);
+
+	if (!intr_1_stat)
+		return;
+
+	/* Clear interrupts */
+	MHL_SII_PAGE0_WR(0x0071, intr_1_stat);
+	if (BIT6 & intr_1_stat) {
+		/*
+		 * HPD status change event is pending
+		 * Read CBUS HPD status for this info
+		 * MSC REQ ABRT REASON
+		 */
+		cbus_stat = MHL_SII_CBUS_RD(0x0D);
+		if (BIT6 & cbus_stat)
+			mhl_drive_hpd(mhl_ctrl, HPD_UP);
+		else
+			mhl_drive_hpd(mhl_ctrl, HPD_DOWN);
+
+	}
+}
+
+static void mhl_sii_cbus_process_errors(struct i2c_client *client,
+					u8 int_status)
+{
+	u8 abort_reason = 0;
+
+	if (int_status & BIT2) {
+		abort_reason = MHL_SII_REG_NAME_RD(REG_DDC_ABORT_REASON);
+		pr_debug("%s: CBUS DDC Abort Reason(0x%02x)\n",
+			 __func__, abort_reason);
+	}
+	if (int_status & BIT5) {
+		abort_reason = MHL_SII_REG_NAME_RD(REG_PRI_XFR_ABORT_REASON);
+		pr_debug("%s: CBUS MSC Requestor Abort Reason(0x%02x)\n",
+			 __func__, abort_reason);
+		MHL_SII_REG_NAME_WR(REG_PRI_XFR_ABORT_REASON, 0xFF);
+	}
+	if (int_status & BIT6) {
+		abort_reason = MHL_SII_REG_NAME_RD(
+			REG_CBUS_PRI_FWR_ABORT_REASON);
+		pr_debug("%s: CBUS MSC Responder Abort Reason(0x%02x)\n",
+			 __func__, abort_reason);
+		MHL_SII_REG_NAME_WR(REG_CBUS_PRI_FWR_ABORT_REASON, 0xFF);
+	}
+}
+
+int mhl_send_msc_command(struct mhl_tx_ctrl *mhl_ctrl,
+			 struct msc_command_struct *req)
+{
+	int timeout;
+	u8 start_bit = 0x00;
+	u8 *burst_data;
+	int i;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	if (mhl_ctrl->cur_state != POWER_STATE_D0_MHL) {
+		pr_debug("%s: power_state:%02x CBUS(0x0A):%02x\n",
+			 __func__,
+			 mhl_ctrl->cur_state,
+			 MHL_SII_REG_NAME_RD(REG_CBUS_BUS_STATUS));
+		return -EFAULT;
+	}
+
+	if (!req)
+		return -EFAULT;
+
+	pr_debug("%s: command=0x%02x offset=0x%02x %02x %02x",
+		 __func__,
+		 req->command,
+		 req->offset,
+		 req->payload.data[0],
+		 req->payload.data[1]);
+
+	/* REG_CBUS_PRI_ADDR_CMD = REQ CBUS CMD or OFFSET */
+	MHL_SII_REG_NAME_WR(REG_CBUS_PRI_ADDR_CMD, req->offset);
+	MHL_SII_REG_NAME_WR(REG_CBUS_PRI_WR_DATA_1ST,
+			    req->payload.data[0]);
+
+	switch (req->command) {
+	case MHL_SET_INT:
+	case MHL_WRITE_STAT:
+		start_bit = MSC_START_BIT_WRITE_REG;
+		break;
+	case MHL_READ_DEVCAP:
+		start_bit = MSC_START_BIT_READ_REG;
+		break;
+	case MHL_GET_STATE:
+	case MHL_GET_VENDOR_ID:
+	case MHL_SET_HPD:
+	case MHL_CLR_HPD:
+	case MHL_GET_SC1_ERRORCODE:
+	case MHL_GET_DDC_ERRORCODE:
+	case MHL_GET_MSC_ERRORCODE:
+	case MHL_GET_SC3_ERRORCODE:
+		start_bit = MSC_START_BIT_MSC_CMD;
+		MHL_SII_REG_NAME_WR(REG_CBUS_PRI_ADDR_CMD, req->command);
+		break;
+	case MHL_MSC_MSG:
+		start_bit = MSC_START_BIT_VS_CMD;
+		MHL_SII_REG_NAME_WR(REG_CBUS_PRI_WR_DATA_2ND,
+				    req->payload.data[1]);
+		MHL_SII_REG_NAME_WR(REG_CBUS_PRI_ADDR_CMD, req->command);
+		break;
+	case MHL_WRITE_BURST:
+		start_bit = MSC_START_BIT_WRITE_BURST;
+		MHL_SII_REG_NAME_WR(REG_MSC_WRITE_BURST_LEN, req->length - 1);
+		if (!(req->payload.burst_data)) {
+			pr_err("%s: burst data is null!\n", __func__);
+			goto cbus_send_fail;
+		}
+		burst_data = req->payload.burst_data;
+		for (i = 0; i < req->length; i++, burst_data++)
+			MHL_SII_REG_NAME_WR(REG_CBUS_SCRATCHPAD_0 + i,
+				*burst_data);
+		break;
+	default:
+		pr_err("%s: unknown command! (%02x)\n",
+		       __func__, req->command);
+		goto cbus_send_fail;
+	}
+
+	INIT_COMPLETION(mhl_ctrl->msc_cmd_done);
+	MHL_SII_REG_NAME_WR(REG_CBUS_PRI_START, start_bit);
+	timeout = wait_for_completion_interruptible_timeout
+		(&mhl_ctrl->msc_cmd_done, msecs_to_jiffies(T_ABORT_NEXT));
+	if (!timeout) {
+		pr_err("%s: cbus_command_send timed out!\n", __func__);
+		goto cbus_send_fail;
+	}
+
+	switch (req->command) {
+	case MHL_READ_DEVCAP:
+		req->retval = MHL_SII_REG_NAME_RD(REG_CBUS_PRI_RD_DATA_1ST);
+		break;
+	case MHL_MSC_MSG:
+		/* check if MSC_MSG NACKed */
+		if (MHL_SII_REG_NAME_RD(REG_MSC_WRITE_BURST_LEN) & BIT6)
+			return -EAGAIN;
+	default:
+		req->retval = 0;
+		break;
+	}
+	mhl_msc_command_done(mhl_ctrl, req);
+	pr_debug("%s: msc cmd done\n", __func__);
+	return 0;
+
+cbus_send_fail:
+	return -EFAULT;
+}
+
+/* read scratchpad */
+void mhl_read_scratchpad(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+	int i;
+
+	for (i = 0; i < MHL_SCRATCHPAD_SIZE; i++) {
+		mhl_ctrl->scrpd.data[i] = MHL_SII_REG_NAME_RD(
+			REG_CBUS_SCRATCHPAD_0 + i);
+	}
+}
+
+static void mhl_cbus_isr(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	uint8_t regval;
+	int req_done = 0;
+	uint8_t sub_cmd = 0x0;
+	uint8_t cmd_data = 0x0;
+	int msc_msg_recved = 0;
+	int rc = -1;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	regval = MHL_SII_REG_NAME_RD(REG_CBUS_INTR_STATUS);
+	if (regval == 0xff)
+		return;
+
+	if (regval)
+		MHL_SII_REG_NAME_WR(REG_CBUS_INTR_STATUS, regval);
+
+	pr_debug("%s: CBUS_INT = %02x\n", __func__, regval);
+
+	/* MSC_MSG (RCP/RAP) */
+	if (regval & BIT3) {
+		sub_cmd = MHL_SII_REG_NAME_RD(REG_CBUS_PRI_VS_CMD);
+		cmd_data = MHL_SII_REG_NAME_RD(REG_CBUS_PRI_VS_DATA);
+		msc_msg_recved = 1;
+	}
+	/* MSC_MT_ABRT/MSC_MR_ABRT/DDC_ABORT */
+	if (regval & (BIT6 | BIT5 | BIT2))
+		mhl_sii_cbus_process_errors(client, regval);
+
+	/* MSC_REQ_DONE */
+	if (regval & BIT4)
+		req_done = 1;
+
+	/* look for interrupts on CBUS_MSC_INT2 */
+	regval  = MHL_SII_REG_NAME_RD(REG_CBUS_MSC_INT2_STATUS);
+
+	/* clear all interrupts */
+	if (regval)
+		MHL_SII_REG_NAME_WR(REG_CBUS_MSC_INT2_STATUS, regval);
+
+	pr_debug("%s: CBUS_MSC_INT2 = %02x\n", __func__, regval);
+
+	/* received SET_INT */
+	if (regval & BIT2) {
+		uint8_t intr;
+		intr = MHL_SII_REG_NAME_RD(REG_CBUS_SET_INT_0);
+		MHL_SII_REG_NAME_WR(REG_CBUS_SET_INT_0, intr);
+		mhl_msc_recv_set_int(mhl_ctrl, 0, intr);
+
+		pr_debug("%s: MHL_INT_0 = %02x\n", __func__, intr);
+		intr = MHL_SII_REG_NAME_RD(REG_CBUS_SET_INT_1);
+		MHL_SII_REG_NAME_WR(REG_CBUS_SET_INT_1, intr);
+		mhl_msc_recv_set_int(mhl_ctrl, 1, intr);
+
+		pr_debug("%s: MHL_INT_1 = %02x\n", __func__, intr);
+		MHL_SII_REG_NAME_WR(REG_CBUS_SET_INT_2, 0xFF);
+		MHL_SII_REG_NAME_WR(REG_CBUS_SET_INT_3, 0xFF);
+	}
+
+	/* received WRITE_STAT */
+	if (regval & BIT3) {
+		uint8_t stat;
+		stat = MHL_SII_REG_NAME_RD(REG_CBUS_WRITE_STAT_0);
+		mhl_msc_recv_write_stat(mhl_ctrl, 0, stat);
+
+		pr_debug("%s: MHL_STATUS_0 = %02x\n", __func__, stat);
+		stat = MHL_SII_REG_NAME_RD(REG_CBUS_WRITE_STAT_1);
+		mhl_msc_recv_write_stat(mhl_ctrl, 1, stat);
+		pr_debug("%s: MHL_STATUS_1 = %02x\n", __func__, stat);
+
+		MHL_SII_REG_NAME_WR(REG_CBUS_WRITE_STAT_0, 0xFF);
+		MHL_SII_REG_NAME_WR(REG_CBUS_WRITE_STAT_1, 0xFF);
+		MHL_SII_REG_NAME_WR(REG_CBUS_WRITE_STAT_2, 0xFF);
+		MHL_SII_REG_NAME_WR(REG_CBUS_WRITE_STAT_3, 0xFF);
+	}
+
+	/* received MSC_MSG */
+	if (msc_msg_recved) {
+		/*mhl msc recv msc msg*/
+		rc = mhl_msc_recv_msc_msg(mhl_ctrl, sub_cmd, cmd_data);
+		if (rc)
+			pr_err("MHL: mhl msc recv msc msg failed(%d)!\n", rc);
+	}
+	/* complete last command */
+	if (req_done)
+		complete_all(&mhl_ctrl->msc_cmd_done);
+
+}
+
+static void clear_all_intrs(struct i2c_client *client)
+{
+	uint8_t regval = 0x00;
+
+	pr_debug_intr("********* exiting isr mask check ?? *************\n");
+	pr_debug_intr("int1 mask = %02X\n",
+		(int) MHL_SII_REG_NAME_RD(REG_INTR1));
+	pr_debug_intr("int3 mask = %02X\n",
+		(int) MHL_SII_PAGE0_RD(0x0077));
+	pr_debug_intr("int4 mask = %02X\n",
+		(int) MHL_SII_REG_NAME_RD(REG_INTR4));
+	pr_debug_intr("int5 mask = %02X\n",
+		(int) MHL_SII_REG_NAME_RD(REG_INTR5));
+	pr_debug_intr("cbus1 mask = %02X\n",
+		(int) MHL_SII_CBUS_RD(0x0009));
+	pr_debug_intr("cbus2 mask = %02X\n",
+		(int) MHL_SII_CBUS_RD(0x001F));
+	pr_debug_intr("********* end of isr mask check *************\n");
+
+	regval = MHL_SII_REG_NAME_RD(REG_INTR1);
+	pr_debug_intr("int1 st = %02X\n", (int)regval);
+	MHL_SII_REG_NAME_WR(REG_INTR1, regval);
+
+	regval =  MHL_SII_REG_NAME_RD(REG_INTR2);
+	pr_debug_intr("int2 st = %02X\n", (int)regval);
+	MHL_SII_REG_NAME_WR(REG_INTR2, regval);
+
+	regval =  MHL_SII_PAGE0_RD(0x0073);
+	pr_debug_intr("int3 st = %02X\n", (int)regval);
+	MHL_SII_PAGE0_WR(0x0073, regval);
+
+	regval =  MHL_SII_REG_NAME_RD(REG_INTR4);
+	pr_debug_intr("int4 st = %02X\n", (int)regval);
+	MHL_SII_REG_NAME_WR(REG_INTR4, regval);
+
+	regval =  MHL_SII_REG_NAME_RD(REG_INTR5);
+	pr_debug_intr("int5 st = %02X\n", (int)regval);
+	MHL_SII_REG_NAME_WR(REG_INTR5, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x0008);
+	pr_debug_intr("cbusInt st = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x0008, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x001E);
+	pr_debug_intr("CBUS intR_2: %d\n", (int)regval);
+	MHL_SII_CBUS_WR(0x001E, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00A0);
+	pr_debug_intr("A0 int set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00A0, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00A1);
+	pr_debug_intr("A1 int set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00A1, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00A2);
+	pr_debug_intr("A2 int set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00A2, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00A3);
+	pr_debug_intr("A3 int set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00A3, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00B0);
+	pr_debug_intr("B0 st set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00B0, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00B1);
+	pr_debug_intr("B1 st set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00B1, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00B2);
+	pr_debug_intr("B2 st set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00B2, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00B3);
+	pr_debug_intr("B3 st set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00B3, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00E0);
+	pr_debug_intr("E0 st set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00E0, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00E1);
+	pr_debug_intr("E1 st set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00E1, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00E2);
+	pr_debug_intr("E2 st set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00E2, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00E3);
+	pr_debug_intr("E3 st set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00E3, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00F0);
+	pr_debug_intr("F0 int set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00F0, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00F1);
+	pr_debug_intr("F1 int set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00F1, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00F2);
+	pr_debug_intr("F2 int set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00F2, regval);
+
+	regval =  MHL_SII_CBUS_RD(0x00F3);
+	pr_debug_intr("F3 int set = %02X\n", (int)regval);
+	MHL_SII_CBUS_WR(0x00F3, regval);
+	pr_debug_intr("********* end of exiting in isr *************\n");
+}
+
+
+static irqreturn_t mhl_tx_isr(int irq, void *data)
+{
+	struct mhl_tx_ctrl *mhl_ctrl = (struct mhl_tx_ctrl *)data;
+	pr_debug("%s: Getting Interrupts\n", __func__);
+
+	/*
+	 * Check RGND, MHL_EST, CBUS_LOCKOUT, SCDT
+	 * interrupts. In D3, we get only RGND
+	 */
+	dev_detect_isr(mhl_ctrl);
+
+	pr_debug("%s: cur pwr state is [0x%x]\n",
+		 __func__, mhl_ctrl->cur_state);
+	if (mhl_ctrl->cur_state == POWER_STATE_D0_MHL) {
+		/*
+		 * If dev_detect_isr() didn't move the tx to D3
+		 * on disconnect, continue to check other
+		 * interrupt sources.
+		 */
+		mhl_misc_isr(mhl_ctrl);
+
+		/*
+		 * Check for any peer messages for DCAP_CHG, MSC etc
+		 * Dispatch to have the CBUS module working only
+		 * once connected.
+		 */
+		mhl_cbus_isr(mhl_ctrl);
+		mhl_hpd_stat_isr(mhl_ctrl);
+	}
+
+	clear_all_intrs(mhl_ctrl->i2c_handle);
+
+	return IRQ_HANDLED;
+}
+
+static int mhl_tx_chip_init(struct mhl_tx_ctrl *mhl_ctrl)
+{
+	uint8_t chip_rev_id = 0x00;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+
+	/* Reset the TX chip */
+	mhl_sii_reset_pin(mhl_ctrl, 0);
+	msleep(20);
+	mhl_sii_reset_pin(mhl_ctrl, 1);
+	/* TX PR-guide requires a 100 ms wait here */
+	msleep(100);
+
+	/* Read the chip rev ID */
+	chip_rev_id = MHL_SII_PAGE0_RD(0x04);
+	pr_debug("MHL: chip rev ID read=[%x]\n", chip_rev_id);
+	mhl_ctrl->chip_rev_id = chip_rev_id;
+
+	/*
+	 * Need to disable MHL discovery if
+	 * MHL-USB handshake is implemented
+	 */
+	mhl_init_reg_settings(mhl_ctrl, true);
+	switch_mode(mhl_ctrl, POWER_STATE_D3);
+	return 0;
+}
+
+static int mhl_sii_reg_config(struct i2c_client *client, bool enable)
+{
+	static struct regulator *reg_8941_l24;
+	static struct regulator *reg_8941_l02;
+	static struct regulator *reg_8941_smps3a;
+	static struct regulator *reg_8941_vdda;
+	int rc;
+
+	pr_debug("%s\n", __func__);
+	if (!reg_8941_l24) {
+		reg_8941_l24 = regulator_get(&client->dev,
+			"avcc_18");
+		if (IS_ERR(reg_8941_l24)) {
+			pr_err("could not get 8941 l24, rc = %ld\n",
+				PTR_ERR(reg_8941_l24));
+			return -ENODEV;
+		}
+		if (enable)
+			rc = regulator_enable(reg_8941_l24);
+		else
+			rc = regulator_disable(reg_8941_l24);
+		if (rc) {
+			pr_err("'%s' regulator config[%u] failed, rc=%d\n",
+			       "avcc_1.8V", enable, rc);
+			goto l24_fail;
+		} else {
+			pr_debug("%s: vreg L24 %s\n",
+				 __func__, (enable ? "enabled" : "disabled"));
+		}
+	}
+
+	if (!reg_8941_l02) {
+		reg_8941_l02 = regulator_get(&client->dev,
+			"avcc_12");
+		if (IS_ERR(reg_8941_l02)) {
+			pr_err("could not get reg_8941_l02, rc = %ld\n",
+				PTR_ERR(reg_8941_l02));
+			goto l24_fail;
+		}
+		if (enable)
+			rc = regulator_enable(reg_8941_l02);
+		else
+			rc = regulator_disable(reg_8941_l02);
+		if (rc) {
+			pr_debug("'%s' regulator configure[%u] failed, rc=%d\n",
+				 "avcc_1.2V", enable, rc);
+			goto l02_fail;
+		} else {
+			pr_debug("%s: vreg L02 %s\n",
+				 __func__, (enable ? "enabled" : "disabled"));
+		}
+	}
+
+	if (!reg_8941_smps3a) {
+		reg_8941_smps3a = regulator_get(&client->dev,
+			"smps3a");
+		if (IS_ERR(reg_8941_smps3a)) {
+			pr_err("could not get vreg smps3a, rc = %ld\n",
+				PTR_ERR(reg_8941_smps3a));
+			goto l02_fail;
+		}
+		if (enable)
+			rc = regulator_enable(reg_8941_smps3a);
+		else
+			rc = regulator_disable(reg_8941_smps3a);
+		if (rc) {
+			pr_err("'%s' regulator config[%u] failed, rc=%d\n",
+			       "SMPS3A", enable, rc);
+			goto smps3a_fail;
+		} else {
+			pr_debug("%s: vreg SMPS3A %s\n",
+				 __func__, (enable ? "enabled" : "disabled"));
+		}
+	}
+
+	if (!reg_8941_vdda) {
+		reg_8941_vdda = regulator_get(&client->dev,
+			"vdda");
+		if (IS_ERR(reg_8941_vdda)) {
+			pr_err("could not get vreg vdda, rc = %ld\n",
+				PTR_ERR(reg_8941_vdda));
+			goto smps3a_fail;
+		}
+		if (enable)
+			rc = regulator_enable(reg_8941_vdda);
+		else
+			rc = regulator_disable(reg_8941_vdda);
+		if (rc) {
+			pr_err("'%s' regulator config[%u] failed, rc=%d\n",
+			       "VDDA", enable, rc);
+			goto vdda_fail;
+		} else {
+			pr_debug("%s: vreg VDDA %s\n",
+				 __func__, (enable ? "enabled" : "disabled"));
+		}
+	}
+
+	return rc;
+
+vdda_fail:
+	regulator_disable(reg_8941_vdda);
+	regulator_put(reg_8941_vdda);
+smps3a_fail:
+	regulator_disable(reg_8941_smps3a);
+	regulator_put(reg_8941_smps3a);
+l02_fail:
+	regulator_disable(reg_8941_l02);
+	regulator_put(reg_8941_l02);
+l24_fail:
+	regulator_disable(reg_8941_l24);
+	regulator_put(reg_8941_l24);
+
+	return -EINVAL;
+}
+
+
+static int mhl_vreg_config(struct mhl_tx_ctrl *mhl_ctrl, uint8_t on)
+{
+	int ret;
+	struct i2c_client *client = mhl_ctrl->i2c_handle;
+	int pwr_gpio = mhl_ctrl->pdata->gpios[MHL_TX_PMIC_PWR_GPIO]->gpio;
+
+	pr_debug("%s\n", __func__);
+	if (on) {
+		ret = gpio_request(pwr_gpio,
+		    mhl_ctrl->pdata->gpios[MHL_TX_PMIC_PWR_GPIO]->gpio_name);
+		if (ret < 0) {
+			pr_err("%s: mhl pwr gpio req failed: %d\n",
+			       __func__, ret);
+			return ret;
+		}
+		ret = gpio_direction_output(pwr_gpio, 1);
+		if (ret < 0) {
+			pr_err("%s: set gpio MHL_PWR_EN dircn failed: %d\n",
+			       __func__, ret);
+			goto vreg_config_failed;
+		}
+
+		ret = mhl_sii_reg_config(client, true);
+		if (ret) {
+			pr_err("%s: regulator enable failed\n", __func__);
+			goto vreg_config_failed;
+		}
+		pr_debug("%s: mhl sii power on successful\n", __func__);
+	} else {
+		pr_warn("%s: turning off pwr controls\n", __func__);
+		mhl_sii_reg_config(client, false);
+		gpio_free(pwr_gpio);
+	}
+	pr_debug("%s: successful\n", __func__);
+	return 0;
+vreg_config_failed:
+	gpio_free(pwr_gpio);
+	return -EINVAL;
+}
+
+/*
+ * Request for GPIO allocations
+ * Set appropriate GPIO directions
+ */
+static int mhl_gpio_config(struct mhl_tx_ctrl *mhl_ctrl, int on)
+{
+	int ret;
+	struct dss_gpio *temp_reset_gpio, *temp_intr_gpio;
+
+	/* caused too many line spills */
+	temp_reset_gpio = mhl_ctrl->pdata->gpios[MHL_TX_RESET_GPIO];
+	temp_intr_gpio = mhl_ctrl->pdata->gpios[MHL_TX_INTR_GPIO];
+
+	if (on) {
+		if (gpio_is_valid(temp_reset_gpio->gpio)) {
+			ret = gpio_request(temp_reset_gpio->gpio,
+					   temp_reset_gpio->gpio_name);
+			if (ret < 0) {
+				pr_err("%s:rst_gpio=[%d] req failed:%d\n",
+				       __func__, temp_reset_gpio->gpio, ret);
+				return -EBUSY;
+			}
+			ret = gpio_direction_output(temp_reset_gpio->gpio, 0);
+			if (ret < 0) {
+				pr_err("%s: set dirn rst failed: %d\n",
+				       __func__, ret);
+				return -EBUSY;
+			}
+		}
+		if (gpio_is_valid(temp_intr_gpio->gpio)) {
+			ret = gpio_request(temp_intr_gpio->gpio,
+					   temp_intr_gpio->gpio_name);
+			if (ret < 0) {
+				pr_err("%s: intr_gpio req failed: %d\n",
+				       __func__, ret);
+				return -EBUSY;
+			}
+			ret = gpio_direction_input(temp_intr_gpio->gpio);
+			if (ret < 0) {
+				pr_err("%s: set dirn intr failed: %d\n",
+				       __func__, ret);
+				return -EBUSY;
+			}
+			mhl_ctrl->i2c_handle->irq = gpio_to_irq(
+				temp_intr_gpio->gpio);
+			pr_debug("%s: gpio_to_irq=%d\n",
+				 __func__, mhl_ctrl->i2c_handle->irq);
+		}
+	} else {
+		pr_warn("%s: freeing gpios\n", __func__);
+		gpio_free(temp_intr_gpio->gpio);
+		gpio_free(temp_reset_gpio->gpio);
+	}
+	pr_debug("%s: successful\n", __func__);
+	return 0;
+}
+
+static int mhl_i2c_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	int rc = 0;
+	struct mhl_tx_platform_data *pdata = NULL;
+	struct mhl_tx_ctrl *mhl_ctrl;
+	struct usb_ext_notification *mhl_info = NULL;
+	struct msm_hdmi_mhl_ops *hdmi_mhl_ops = NULL;
+
+	mhl_ctrl = devm_kzalloc(&client->dev, sizeof(*mhl_ctrl), GFP_KERNEL);
+	if (!mhl_ctrl) {
+		pr_err("%s: FAILED: cannot alloc hdmi tx ctrl\n", __func__);
+		rc = -ENOMEM;
+		goto failed_no_mem;
+	}
+
+	if (client->dev.of_node) {
+		pdata = devm_kzalloc(&client->dev,
+			     sizeof(struct mhl_tx_platform_data), GFP_KERNEL);
+		if (!pdata) {
+			dev_err(&client->dev, "Failed to allocate memory\n");
+			rc = -ENOMEM;
+			goto failed_no_mem;
+		}
+
+		rc = mhl_tx_get_dt_data(&client->dev, pdata);
+		if (rc) {
+			pr_err("%s: FAILED: parsing device tree data; rc=%d\n",
+				__func__, rc);
+			goto failed_dt_data;
+		}
+		mhl_ctrl->i2c_handle = client;
+		mhl_ctrl->pdata = pdata;
+		i2c_set_clientdata(client, mhl_ctrl);
+	}
+
+	/*
+	 * Regulator init
+	 */
+	rc = mhl_vreg_config(mhl_ctrl, 1);
+	if (rc) {
+		pr_err("%s: vreg init failed [%d]\n",
+			__func__, rc);
+		goto failed_probe;
+	}
+
+	/*
+	 * GPIO init
+	 */
+	rc = mhl_gpio_config(mhl_ctrl, 1);
+	if (rc) {
+		pr_err("%s: gpio init failed [%d]\n",
+			__func__, rc);
+		goto failed_probe;
+	}
+
+	/*
+	 * Other initializations
+	 * such tx specific
+	 */
+	mhl_ctrl->disc_enabled = false;
+	INIT_WORK(&mhl_ctrl->mhl_msc_send_work, mhl_msc_send_work);
+	mhl_ctrl->cur_state = POWER_STATE_D0_MHL;
+	INIT_LIST_HEAD(&mhl_ctrl->list_cmd);
+	init_completion(&mhl_ctrl->msc_cmd_done);
+	mhl_ctrl->msc_send_workqueue = create_singlethread_workqueue
+		("mhl_msc_cmd_queue");
+
+	mhl_ctrl->input = input_allocate_device();
+	if (mhl_ctrl->input) {
+		int i;
+		struct input_dev *input = mhl_ctrl->input;
+
+		mhl_ctrl->rcp_key_code_tbl = vmalloc(
+			sizeof(support_rcp_key_code_tbl));
+		if (!mhl_ctrl->rcp_key_code_tbl) {
+			pr_err("%s: no alloc mem for rcp keycode tbl\n",
+			       __func__);
+			return -ENOMEM;
+		}
+
+		mhl_ctrl->rcp_key_code_tbl_len = sizeof(
+			support_rcp_key_code_tbl);
+		memcpy(mhl_ctrl->rcp_key_code_tbl,
+		       &support_rcp_key_code_tbl[0],
+		       mhl_ctrl->rcp_key_code_tbl_len);
+
+		input->phys = "cbus/input0";
+		input->id.bustype = BUS_VIRTUAL;
+		input->id.vendor  = 0x1095;
+		input->id.product = 0x8334;
+		input->id.version = 0xA;
+
+		input->name = "mhl-rcp";
+
+		input->keycode = support_rcp_key_code_tbl;
+		input->keycodesize = sizeof(u16);
+		input->keycodemax = ARRAY_SIZE(support_rcp_key_code_tbl);
+
+		input->evbit[0] = EV_KEY;
+		for (i = 0; i < ARRAY_SIZE(support_rcp_key_code_tbl); i++) {
+			if (support_rcp_key_code_tbl[i] > 1)
+				input_set_capability(input, EV_KEY,
+					support_rcp_key_code_tbl[i]);
+		}
+
+		if (input_register_device(input) < 0) {
+			pr_warn("%s: failed to register input device\n",
+				__func__);
+			input_free_device(input);
+			mhl_ctrl->input = NULL;
+		}
+	}
+
+	rc = mhl_tx_chip_init(mhl_ctrl);
+	if (rc) {
+		pr_err("%s: tx chip init failed [%d]\n",
+			__func__, rc);
+		goto failed_probe;
+	}
+
+	init_completion(&mhl_ctrl->rgnd_done);
+
+	pr_debug("%s: IRQ from GPIO INTR = %d\n",
+		__func__, mhl_ctrl->i2c_handle->irq);
+	pr_debug("%s: Driver name = [%s]\n", __func__,
+		client->dev.driver->name);
+	rc = request_threaded_irq(mhl_ctrl->i2c_handle->irq, NULL,
+				   &mhl_tx_isr,
+				  IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+				 client->dev.driver->name, mhl_ctrl);
+	if (rc) {
+		pr_err("request_threaded_irq failed, status: %d\n",
+		       rc);
+		goto failed_probe;
+	} else {
+		pr_debug("request_threaded_irq succeeded\n");
+	}
+
+	mhl_ctrl->mhl_psy.name = "ext-vbus";
+	mhl_ctrl->mhl_psy.type = POWER_SUPPLY_TYPE_USB_DCP;
+	mhl_ctrl->mhl_psy.supplied_to = mhl_pm_power_supplied_to;
+	mhl_ctrl->mhl_psy.num_supplicants = ARRAY_SIZE(
+					mhl_pm_power_supplied_to);
+	mhl_ctrl->mhl_psy.properties = mhl_pm_power_props;
+	mhl_ctrl->mhl_psy.num_properties = ARRAY_SIZE(mhl_pm_power_props);
+	mhl_ctrl->mhl_psy.get_property = mhl_power_get_property;
+	mhl_ctrl->mhl_psy.set_property = mhl_power_set_property;
+
+	rc = power_supply_register(&client->dev, &mhl_ctrl->mhl_psy);
+	if (rc < 0) {
+		dev_err(&client->dev, "%s:power_supply_register ext_vbus_psy failed\n",
+			__func__);
+		goto failed_probe;
+	}
+
+	hdmi_mhl_ops = devm_kzalloc(&client->dev,
+				    sizeof(struct msm_hdmi_mhl_ops),
+				    GFP_KERNEL);
+	if (!hdmi_mhl_ops) {
+		pr_err("%s: alloc hdmi mhl ops failed\n", __func__);
+		rc = -ENOMEM;
+		goto failed_probe_pwr;
+	}
+
+	pr_debug("%s: i2c client addr is [%x]\n", __func__, client->addr);
+	if (mhl_ctrl->pdata->hdmi_pdev) {
+		rc = msm_hdmi_register_mhl(mhl_ctrl->pdata->hdmi_pdev,
+					   hdmi_mhl_ops);
+		if (rc) {
+			pr_err("%s: register with hdmi failed\n", __func__);
+			rc = -EPROBE_DEFER;
+			goto failed_probe_pwr;
+		}
+	}
+
+	if (!hdmi_mhl_ops || !hdmi_mhl_ops->tmds_enabled ||
+	    !hdmi_mhl_ops->set_mhl_max_pclk) {
+		pr_err("%s: func ptr is NULL\n", __func__);
+		rc = -EINVAL;
+		goto failed_probe_pwr;
+	}
+	mhl_ctrl->hdmi_mhl_ops = hdmi_mhl_ops;
+
+	rc = hdmi_mhl_ops->set_mhl_max_pclk(
+		mhl_ctrl->pdata->hdmi_pdev, MAX_MHL_PCLK);
+	if (rc) {
+		pr_err("%s: can't set max mhl pclk\n", __func__);
+		goto failed_probe_pwr;
+	}
+
+	mhl_info = devm_kzalloc(&client->dev, sizeof(*mhl_info), GFP_KERNEL);
+	if (!mhl_info) {
+		pr_err("%s: alloc mhl info failed\n", __func__);
+		rc = -ENOMEM;
+		goto failed_probe_pwr;
+	}
+
+	mhl_info->ctxt = mhl_ctrl;
+	mhl_info->notify = mhl_sii_device_discovery;
+	if (msm_register_usb_ext_notification(mhl_info)) {
+		pr_err("%s: register for usb notifcn failed\n", __func__);
+		rc = -EPROBE_DEFER;
+		goto failed_probe_pwr;
+	}
+	mhl_ctrl->mhl_info = mhl_info;
+	mhl_register_msc(mhl_ctrl);
+	return 0;
+
+failed_probe_pwr:
+	power_supply_unregister(&mhl_ctrl->mhl_psy);
+failed_probe:
+	free_irq(mhl_ctrl->i2c_handle->irq, mhl_ctrl);
+	mhl_gpio_config(mhl_ctrl, 0);
+	mhl_vreg_config(mhl_ctrl, 0);
+	/* do not deep-free */
+	if (mhl_info)
+		devm_kfree(&client->dev, mhl_info);
+failed_dt_data:
+	if (pdata)
+		devm_kfree(&client->dev, pdata);
+failed_no_mem:
+	if (mhl_ctrl)
+		devm_kfree(&client->dev, mhl_ctrl);
+	mhl_info = NULL;
+	pdata = NULL;
+	mhl_ctrl = NULL;
+	pr_err("%s: PROBE FAILED, rc=%d\n", __func__, rc);
+	return rc;
+}
+
+
+static int mhl_i2c_remove(struct i2c_client *client)
+{
+	struct mhl_tx_ctrl *mhl_ctrl = i2c_get_clientdata(client);
+
+	if (!mhl_ctrl) {
+		pr_warn("%s: i2c get client data failed\n", __func__);
+		return -EINVAL;
+	}
+
+	free_irq(mhl_ctrl->i2c_handle->irq, mhl_ctrl);
+	mhl_gpio_config(mhl_ctrl, 0);
+	mhl_vreg_config(mhl_ctrl, 0);
+	if (mhl_ctrl->mhl_info)
+		devm_kfree(&client->dev, mhl_ctrl->mhl_info);
+	if (mhl_ctrl->pdata)
+		devm_kfree(&client->dev, mhl_ctrl->pdata);
+	devm_kfree(&client->dev, mhl_ctrl);
+	return 0;
+}
+
+static struct i2c_device_id mhl_sii_i2c_id[] = {
+	{ MHL_DRIVER_NAME, 0 },
+	{ }
+};
+
+
+MODULE_DEVICE_TABLE(i2c, mhl_sii_i2c_id);
+
+static struct of_device_id mhl_match_table[] = {
+	{.compatible = COMPATIBLE_NAME,},
+	{ },
+};
+
+static struct i2c_driver mhl_sii_i2c_driver = {
+	.driver = {
+		.name = MHL_DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = mhl_match_table,
+	},
+	.probe = mhl_i2c_probe,
+	.remove =  mhl_i2c_remove,
+	.id_table = mhl_sii_i2c_id,
+};
+
+module_i2c_driver(mhl_sii_i2c_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHL SII 8334 TX Driver");
diff --git a/drivers/video/msm/mdss/msm_mdss_io_8974.c b/drivers/video/msm/mdss/msm_mdss_io_8974.c
index b92a6ca..2b07e43 100644
--- a/drivers/video/msm/mdss/msm_mdss_io_8974.c
+++ b/drivers/video/msm/mdss/msm_mdss_io_8974.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -12,6 +12,7 @@
  */
 #include <linux/clk.h>
 #include <linux/interrupt.h>
+#include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/io.h>
 
@@ -19,6 +20,7 @@
 #include <mach/msm_iomap.h>
 
 #include "mdss_dsi.h"
+#include "mdss_edp.h"
 
 #define SW_RESET BIT(2)
 #define SW_RESET_PLL BIT(0)
@@ -26,42 +28,53 @@
 
 static struct dsi_clk_desc dsi_pclk;
 
-static struct clk *dsi_byte_div_clk;
-static struct clk *dsi_esc_clk;
-
-int mdss_dsi_clk_on;
-
-int mdss_dsi_clk_init(struct platform_device *pdev)
+int mdss_dsi_clk_init(struct platform_device *pdev,
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata)
 {
-	struct device *dev = &pdev->dev;
+	struct device *dev = NULL;
 
-	dsi_byte_div_clk = clk_get(dev, "byte_clk");
-	if (IS_ERR(dsi_byte_div_clk)) {
-		pr_err("can't find dsi_byte_div_clk\n");
-		dsi_byte_div_clk = NULL;
+	if (!pdev) {
+		pr_err("%s: Invalid pdev\n", __func__);
 		goto mdss_dsi_clk_err;
 	}
 
-	dsi_esc_clk = clk_get(dev, "core_clk");
-	if (IS_ERR(dsi_esc_clk)) {
-		printk(KERN_ERR "can't find dsi_esc_clk\n");
-		dsi_esc_clk = NULL;
+	dev = &pdev->dev;
+	ctrl_pdata->byte_clk = clk_get(dev, "byte_clk");
+	if (IS_ERR(ctrl_pdata->byte_clk)) {
+		pr_err("can't find dsi_byte_clk\n");
+		ctrl_pdata->byte_clk = NULL;
+		goto mdss_dsi_clk_err;
+	}
+
+	ctrl_pdata->pixel_clk = clk_get(dev, "pixel_clk");
+	if (IS_ERR(ctrl_pdata->pixel_clk)) {
+		pr_err("can't find dsi_pixel_clk\n");
+		ctrl_pdata->pixel_clk = NULL;
+		goto mdss_dsi_clk_err;
+	}
+
+	ctrl_pdata->esc_clk = clk_get(dev, "core_clk");
+	if (IS_ERR(ctrl_pdata->esc_clk)) {
+		pr_err("can't find dsi_esc_clk\n");
+		ctrl_pdata->esc_clk = NULL;
 		goto mdss_dsi_clk_err;
 	}
 
 	return 0;
 
 mdss_dsi_clk_err:
-	mdss_dsi_clk_deinit(dev);
+	mdss_dsi_clk_deinit(ctrl_pdata);
 	return -EPERM;
 }
 
-void mdss_dsi_clk_deinit(struct device *dev)
+void mdss_dsi_clk_deinit(struct mdss_dsi_ctrl_pdata  *ctrl_pdata)
 {
-	if (dsi_byte_div_clk)
-		clk_put(dsi_byte_div_clk);
-	if (dsi_esc_clk)
-		clk_put(dsi_esc_clk);
+	if (ctrl_pdata->byte_clk)
+		clk_put(ctrl_pdata->byte_clk);
+	if (ctrl_pdata->esc_clk)
+		clk_put(ctrl_pdata->esc_clk);
+	if (ctrl_pdata->pixel_clk)
+		clk_put(ctrl_pdata->pixel_clk);
 }
 
 #define PREF_DIV_RATIO 27
@@ -143,57 +156,556 @@
 	return 0;
 }
 
-void cont_splash_clk_ctrl(int enable)
+void mdss_dsi_prepare_clocks(struct mdss_dsi_ctrl_pdata  *ctrl_pdata)
 {
-	static int cont_splash_clks_enabled;
-	if (enable && !cont_splash_clks_enabled) {
-			clk_prepare_enable(dsi_byte_div_clk);
-			clk_prepare_enable(dsi_esc_clk);
-			cont_splash_clks_enabled = 1;
-	} else if (!enable && cont_splash_clks_enabled) {
-			clk_disable_unprepare(dsi_byte_div_clk);
-			clk_disable_unprepare(dsi_esc_clk);
-			cont_splash_clks_enabled = 0;
+	clk_prepare(ctrl_pdata->byte_clk);
+	clk_prepare(ctrl_pdata->esc_clk);
+	clk_prepare(ctrl_pdata->pixel_clk);
+}
+
+void mdss_dsi_unprepare_clocks(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
+{
+	clk_unprepare(ctrl_pdata->esc_clk);
+	clk_unprepare(ctrl_pdata->pixel_clk);
+	clk_unprepare(ctrl_pdata->byte_clk);
+}
+
+void mdss_dsi_clk_enable(struct mdss_panel_data *pdata)
+{
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+	u32 esc_clk_rate = 19200000;
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+	if (!ctrl_pdata) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
 	}
-}
 
-void mdss_dsi_prepare_clocks(void)
-{
-	clk_prepare(dsi_byte_div_clk);
-	clk_prepare(dsi_esc_clk);
-}
-
-void mdss_dsi_unprepare_clocks(void)
-{
-	clk_unprepare(dsi_esc_clk);
-	clk_unprepare(dsi_byte_div_clk);
-}
-
-void mdss_dsi_clk_enable(void)
-{
-	if (mdss_dsi_clk_on) {
+	if (ctrl_pdata->mdss_dsi_clk_on) {
 		pr_info("%s: mdss_dsi_clks already ON\n", __func__);
 		return;
 	}
 
-	if (clk_set_rate(dsi_byte_div_clk, 1) < 0)	/* divided by 1 */
-		pr_err("%s: dsi_byte_div_clk - clk_set_rate failed\n",
-					__func__);
-	if (clk_set_rate(dsi_esc_clk, 2) < 0) /* divided by 2 */
-		pr_err("%s: dsi_esc_clk - clk_set_rate failed\n",
-					__func__);
-	clk_enable(dsi_byte_div_clk);
-	clk_enable(dsi_esc_clk);
-	mdss_dsi_clk_on = 1;
+	pr_debug("%s: Setting clock rates: pclk=%d, byteclk=%d escclk=%d\n",
+			__func__, ctrl_pdata->pclk_rate,
+			ctrl_pdata->byte_clk_rate, esc_clk_rate);
+	if (clk_set_rate(ctrl_pdata->esc_clk, esc_clk_rate) < 0)
+		pr_err("%s: dsi_esc_clk - clk_set_rate failed\n", __func__);
+
+	if (clk_set_rate(ctrl_pdata->byte_clk, ctrl_pdata->byte_clk_rate) < 0)
+		pr_err("%s: dsi_byte_clk - clk_set_rate failed\n", __func__);
+
+	if (clk_set_rate(ctrl_pdata->pixel_clk, ctrl_pdata->pclk_rate) < 0)
+		pr_err("%s: dsi_pixel_clk - clk_set_rate failed\n", __func__);
+
+	clk_enable(ctrl_pdata->esc_clk);
+	clk_enable(ctrl_pdata->byte_clk);
+	clk_enable(ctrl_pdata->pixel_clk);
+
+	ctrl_pdata->mdss_dsi_clk_on = 1;
 }
 
-void mdss_dsi_clk_disable(void)
+void mdss_dsi_clk_disable(struct mdss_panel_data *pdata)
 {
-	if (mdss_dsi_clk_on == 0) {
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+	if (!ctrl_pdata) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	if (ctrl_pdata->mdss_dsi_clk_on == 0) {
 		pr_info("%s: mdss_dsi_clks already OFF\n", __func__);
 		return;
 	}
-	clk_disable(dsi_esc_clk);
-	clk_disable(dsi_byte_div_clk);
-	mdss_dsi_clk_on = 0;
+
+	clk_disable(ctrl_pdata->pixel_clk);
+	clk_disable(ctrl_pdata->byte_clk);
+	clk_disable(ctrl_pdata->esc_clk);
+
+	ctrl_pdata->mdss_dsi_clk_on = 0;
+}
+
+void mdss_dsi_phy_sw_reset(unsigned char *ctrl_base)
+{
+	/* start phy sw reset */
+	MIPI_OUTP(ctrl_base + 0x12c, 0x0001);
+	udelay(1000);
+	wmb();
+	/* end phy sw reset */
+	MIPI_OUTP(ctrl_base + 0x12c, 0x0000);
+	udelay(100);
+	wmb();
+}
+
+void mdss_dsi_phy_enable(unsigned char *ctrl_base, int on)
+{
+	if (on) {
+		MIPI_OUTP(ctrl_base + 0x03cc, 0x03);
+		wmb();
+		usleep(100);
+		MIPI_OUTP(ctrl_base + 0x0220, 0x006);
+		wmb();
+		usleep(100);
+		MIPI_OUTP(ctrl_base + 0x0268, 0x001);
+		wmb();
+		usleep(100);
+		MIPI_OUTP(ctrl_base + 0x0268, 0x000);
+		wmb();
+		usleep(100);
+		MIPI_OUTP(ctrl_base + 0x0220, 0x007);
+		wmb();
+		MIPI_OUTP(ctrl_base + 0x03cc, 0x01);
+		wmb();
+		usleep(100);
+
+		/* MMSS_DSI_0_PHY_DSIPHY_CTRL_0 */
+		MIPI_OUTP(ctrl_base + 0x0470, 0x07e);
+		MIPI_OUTP(ctrl_base + 0x0470, 0x06e);
+		MIPI_OUTP(ctrl_base + 0x0470, 0x06c);
+		MIPI_OUTP(ctrl_base + 0x0470, 0x064);
+		MIPI_OUTP(ctrl_base + 0x0470, 0x065);
+		MIPI_OUTP(ctrl_base + 0x0470, 0x075);
+		MIPI_OUTP(ctrl_base + 0x0470, 0x077);
+		MIPI_OUTP(ctrl_base + 0x0470, 0x07f);
+		wmb();
+
+	} else {
+		MIPI_OUTP(ctrl_base + 0x0220, 0x006);
+		usleep(10);
+		MIPI_OUTP(ctrl_base + 0x0470, 0x000);
+		wmb();
+	}
+}
+
+void mdss_dsi_phy_init(struct mdss_panel_data *pdata)
+{
+	struct mdss_dsi_phy_ctrl *pd;
+	int i, off, ln, offset;
+	struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
+
+	ctrl_pdata = container_of(pdata, struct mdss_dsi_ctrl_pdata,
+				panel_data);
+	if (!ctrl_pdata) {
+		pr_err("%s: Invalid input data\n", __func__);
+		return;
+	}
+
+	pd = ((ctrl_pdata->panel_data).panel_info.mipi).dsi_phy_db;
+
+	/* Strength ctrl 0 */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0484, pd->strength[0]);
+
+	/* phy regulator ctrl settings. Both the DSI controller
+	   have one regulator */
+	if ((ctrl_pdata->panel_data).panel_info.pdest == DISPLAY_1)
+		off = 0x0580;
+	else
+		off = 0x0580 - 0x600;
+
+	/* Regulator ctrl 0 */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 0), 0x0);
+	/* Regulator ctrl - CAL_PWR_CFG */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 6), pd->regulator[6]);
+
+	/* Regulator ctrl - TEST */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 5), pd->regulator[5]);
+	/* Regulator ctrl 3 */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 3), pd->regulator[3]);
+	/* Regulator ctrl 2 */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 2), pd->regulator[2]);
+	/* Regulator ctrl 1 */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 1), pd->regulator[1]);
+	/* Regulator ctrl 0 */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 0), pd->regulator[0]);
+	/* Regulator ctrl 4 */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + off + (4 * 4), pd->regulator[4]);
+
+	/* LDO ctrl 0 */
+	if ((ctrl_pdata->panel_data).panel_info.pdest == DISPLAY_1)
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x4dc, 0x00);
+	else
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x4dc, 0x00);
+
+	off = 0x0440;	/* phy timing ctrl 0 - 11 */
+	for (i = 0; i < 12; i++) {
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + off, pd->timing[i]);
+		wmb();
+		off += 4;
+	}
+
+	/* MMSS_DSI_0_PHY_DSIPHY_CTRL_1 */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0474, 0x00);
+	/* MMSS_DSI_0_PHY_DSIPHY_CTRL_0 */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0470, 0x5f);
+	wmb();
+
+	/* Strength ctrl 1 */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0488, pd->strength[1]);
+	wmb();
+
+	/* 4 lanes + clk lane configuration */
+	/* lane config n * (0 - 4) & DataPath setup */
+	for (ln = 0; ln < 5; ln++) {
+		off = 0x0300 + (ln * 0x40);
+		for (i = 0; i < 9; i++) {
+			offset = i + (ln * 9);
+			MIPI_OUTP((ctrl_pdata->ctrl_base) + off,
+							pd->laneCfg[offset]);
+			wmb();
+			off += 4;
+		}
+	}
+
+	/* MMSS_DSI_0_PHY_DSIPHY_CTRL_0 */
+	MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x0470, 0x5f);
+	wmb();
+
+	/* DSI_0_PHY_DSIPHY_GLBL_TEST_CTRL */
+	if ((ctrl_pdata->panel_data).panel_info.pdest == DISPLAY_1)
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x04d4, 0x01);
+	else
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x04d4, 0x00);
+	wmb();
+
+	off = 0x04b4;	/* phy BIST ctrl 0 - 5 */
+	for (i = 0; i < 6; i++) {
+		MIPI_OUTP((ctrl_pdata->ctrl_base) + off, pd->bistCtrl[i]);
+		wmb();
+		off += 4;
+	}
+
+}
+
+/* EDP phy configuration settings */
+void mdss_edp_phy_sw_reset(unsigned char *edp_base)
+{
+	/* phy sw reset */
+	edp_write(edp_base + 0x74, 0x100); /* EDP_PHY_CTRL */
+	wmb();
+	usleep(1);
+	edp_write(edp_base + 0x74, 0x000); /* EDP_PHY_CTRL */
+	wmb();
+	usleep(1);
+
+	/* phy PLL sw reset */
+	edp_write(edp_base + 0x74, 0x001); /* EDP_PHY_CTRL */
+	wmb();
+	usleep(1);
+	edp_write(edp_base + 0x74, 0x000); /* EDP_PHY_CTRL */
+	wmb();
+	usleep(1);
+}
+
+void mdss_edp_hw_powerup(unsigned char *edp_base, int enable)
+{
+	int ret = 0;
+
+	if (enable) {
+		/* EDP_PHY_EDPPHY_GLB_PD_CTL */
+		edp_write(edp_base + 0x52c, 0x3f);
+		/* EDP_PHY_EDPPHY_GLB_CFG */
+		edp_write(edp_base + 0x528, 0x1);
+		/* EDP_PHY_PLL_UNIPHY_PLL_GLB_CFG */
+		edp_write(edp_base + 0x620, 0xf);
+		/* EDP_AUX_CTRL */
+		ret = edp_read(edp_base + 0x300);
+		edp_write(edp_base + 0x300, ret | 0x1);
+	} else {
+		/* EDP_PHY_EDPPHY_GLB_PD_CTL */
+		edp_write(edp_base + 0x52c, 0xc0);
+	}
+}
+
+void mdss_edp_pll_configure(unsigned char *edp_base, int rate)
+{
+	if (rate == 810000000) {
+		edp_write(edp_base + 0x60c, 0x18);
+		edp_write(edp_base + 0x664, 0x5);
+		edp_write(edp_base + 0x600, 0x0);
+		edp_write(edp_base + 0x638, 0x36);
+		edp_write(edp_base + 0x63c, 0x69);
+		edp_write(edp_base + 0x640, 0xff);
+		edp_write(edp_base + 0x644, 0x2f);
+		edp_write(edp_base + 0x648, 0x0);
+		edp_write(edp_base + 0x66c, 0x0a);
+		edp_write(edp_base + 0x674, 0x01);
+		edp_write(edp_base + 0x684, 0x5a);
+		edp_write(edp_base + 0x688, 0x0);
+		edp_write(edp_base + 0x68c, 0x60);
+		edp_write(edp_base + 0x690, 0x0);
+		edp_write(edp_base + 0x694, 0x2a);
+		edp_write(edp_base + 0x698, 0x3);
+		edp_write(edp_base + 0x65c, 0x10);
+		edp_write(edp_base + 0x660, 0x1a);
+		edp_write(edp_base + 0x604, 0x0);
+		edp_write(edp_base + 0x624, 0x0);
+		edp_write(edp_base + 0x628, 0x0);
+
+		edp_write(edp_base + 0x620, 0x1);
+		edp_write(edp_base + 0x620, 0x5);
+		edp_write(edp_base + 0x620, 0x7);
+		edp_write(edp_base + 0x620, 0xf);
+
+	} else if (rate == 138500000) {
+		edp_write(edp_base + 0x664, 0x5); /* UNIPHY_PLL_LKDET_CFG2 */
+		edp_write(edp_base + 0x600, 0x1); /* UNIPHY_PLL_REFCLK_CFG */
+		edp_write(edp_base + 0x638, 0x36); /* UNIPHY_PLL_SDM_CFG0 */
+		edp_write(edp_base + 0x63c, 0x62); /* UNIPHY_PLL_SDM_CFG1 */
+		edp_write(edp_base + 0x640, 0x0); /* UNIPHY_PLL_SDM_CFG2 */
+		edp_write(edp_base + 0x644, 0x28); /* UNIPHY_PLL_SDM_CFG3 */
+		edp_write(edp_base + 0x648, 0x0); /* UNIPHY_PLL_SDM_CFG4 */
+		edp_write(edp_base + 0x64c, 0x80); /* UNIPHY_PLL_SSC_CFG0 */
+		edp_write(edp_base + 0x650, 0x0); /* UNIPHY_PLL_SSC_CFG1 */
+		edp_write(edp_base + 0x654, 0x0); /* UNIPHY_PLL_SSC_CFG2 */
+		edp_write(edp_base + 0x658, 0x0); /* UNIPHY_PLL_SSC_CFG3 */
+		edp_write(edp_base + 0x66c, 0xa); /* UNIPHY_PLL_CAL_CFG0 */
+		edp_write(edp_base + 0x674, 0x1); /* UNIPHY_PLL_CAL_CFG2 */
+		edp_write(edp_base + 0x684, 0x5a); /* UNIPHY_PLL_CAL_CFG6 */
+		edp_write(edp_base + 0x688, 0x0); /* UNIPHY_PLL_CAL_CFG7 */
+		edp_write(edp_base + 0x68c, 0x60); /* UNIPHY_PLL_CAL_CFG8 */
+		edp_write(edp_base + 0x690, 0x0); /* UNIPHY_PLL_CAL_CFG9 */
+		edp_write(edp_base + 0x694, 0x46); /* UNIPHY_PLL_CAL_CFG10 */
+		edp_write(edp_base + 0x698, 0x5); /* UNIPHY_PLL_CAL_CFG11 */
+		edp_write(edp_base + 0x65c, 0x10); /* UNIPHY_PLL_LKDET_CFG0 */
+		edp_write(edp_base + 0x660, 0x1a); /* UNIPHY_PLL_LKDET_CFG1 */
+		edp_write(edp_base + 0x604, 0x0); /* UNIPHY_PLL_POSTDIV1_CFG */
+		edp_write(edp_base + 0x624, 0x0); /* UNIPHY_PLL_POSTDIV2_CFG */
+		edp_write(edp_base + 0x628, 0x0); /* UNIPHY_PLL_POSTDIV3_CFG */
+
+		edp_write(edp_base + 0x620, 0x1); /* UNIPHY_PLL_GLB_CFG */
+		edp_write(edp_base + 0x620, 0x5); /* UNIPHY_PLL_GLB_CFG */
+		edp_write(edp_base + 0x620, 0x7); /* UNIPHY_PLL_GLB_CFG */
+		edp_write(edp_base + 0x620, 0xf); /* UNIPHY_PLL_GLB_CFG */
+	} else {
+		pr_err("%s: Unknown configuration rate\n", __func__);
+	}
+}
+
+void mdss_edp_enable_aux(unsigned char *edp_base, int enable)
+{
+	if (!enable) {
+		edp_write(edp_base + 0x300, 0); /* EDP_AUX_CTRL */
+		return;
+	}
+
+	/*reset AUX */
+	edp_write(edp_base + 0x300, BIT(1)); /* EDP_AUX_CTRL */
+	edp_write(edp_base + 0x300, 0); /* EDP_AUX_CTRL */
+
+	/* Enable AUX */
+	edp_write(edp_base + 0x300, BIT(0)); /* EDP_AUX_CTRL */
+
+	edp_write(edp_base + 0x550, 0x2c); /* AUX_CFG0 */
+	edp_write(edp_base + 0x308, 0xffffffff); /* INTR_STATUS */
+	edp_write(edp_base + 0x568, 0xff); /* INTR_MASK */
+}
+
+void mdss_edp_enable_mainlink(unsigned char *edp_base, int enable)
+{
+	u32 data;
+
+	data = edp_read(edp_base + 0x004);
+	data &= ~BIT(0);
+
+	if (enable) {
+		data |= 0x1;
+		edp_write(edp_base + 0x004, data);
+		edp_write(edp_base + 0x004, 0x1);
+	} else {
+		data |= 0x0;
+		edp_write(edp_base + 0x004, data);
+	}
+}
+
+void mdss_edp_enable_lane_bist(unsigned char *edp_base, int lane, int enable)
+{
+	unsigned char *addr_ln_bist_cfg, *addr_ln_pd_ctrl;
+
+	/* EDP_PHY_EDPPHY_LNn_PD_CTL */
+	addr_ln_pd_ctrl = edp_base + 0x404 + (0x40 * lane);
+	/* EDP_PHY_EDPPHY_LNn_BIST_CFG0 */
+	addr_ln_bist_cfg = edp_base + 0x408 + (0x40 * lane);
+
+	if (enable) {
+		edp_write(addr_ln_pd_ctrl, 0x0);
+		edp_write(addr_ln_bist_cfg, 0x10);
+
+	} else {
+		edp_write(addr_ln_pd_ctrl, 0xf);
+		edp_write(addr_ln_bist_cfg, 0x10);
+	}
+}
+
+void mdss_edp_clk_deinit(struct mdss_edp_drv_pdata *edp_drv)
+{
+	if (edp_drv->aux_clk)
+		clk_put(edp_drv->aux_clk);
+	if (edp_drv->pixel_clk)
+		clk_put(edp_drv->pixel_clk);
+	if (edp_drv->ahb_clk)
+		clk_put(edp_drv->ahb_clk);
+	if (edp_drv->link_clk)
+		clk_put(edp_drv->link_clk);
+}
+
+int mdss_edp_clk_init(struct mdss_edp_drv_pdata *edp_drv)
+{
+	struct device *dev = &(edp_drv->pdev->dev);
+
+	edp_drv->aux_clk = clk_get(dev, "core_clk");
+	if (IS_ERR(edp_drv->aux_clk)) {
+		pr_err("%s: Can't find aux_clk", __func__);
+		edp_drv->aux_clk = NULL;
+		goto mdss_edp_clk_err;
+	}
+
+	edp_drv->pixel_clk = clk_get(dev, "pixel_clk");
+	if (IS_ERR(edp_drv->pixel_clk)) {
+		pr_err("%s: Can't find pixel_clk", __func__);
+		edp_drv->pixel_clk = NULL;
+		goto mdss_edp_clk_err;
+	}
+
+	edp_drv->ahb_clk = clk_get(dev, "iface_clk");
+	if (IS_ERR(edp_drv->ahb_clk)) {
+		pr_err("%s: Can't find ahb_clk", __func__);
+		edp_drv->ahb_clk = NULL;
+		goto mdss_edp_clk_err;
+	}
+
+	edp_drv->link_clk = clk_get(dev, "link_clk");
+	if (IS_ERR(edp_drv->link_clk)) {
+		pr_err("%s: Can't find link_clk", __func__);
+		edp_drv->link_clk = NULL;
+		goto mdss_edp_clk_err;
+	}
+
+	return 0;
+
+mdss_edp_clk_err:
+	mdss_edp_clk_deinit(edp_drv);
+	return -EPERM;
+}
+
+
+void mdss_edp_clk_enable(struct mdss_edp_drv_pdata *edp_drv)
+{
+	if (edp_drv->clk_on) {
+		pr_info("%s: edp clks are already ON\n", __func__);
+		return;
+	}
+
+	if (clk_set_rate(edp_drv->aux_clk, 19200000) < 0)
+		pr_err("%s: aux_clk - clk_set_rate failed\n",
+					__func__);
+
+	if (clk_set_rate(edp_drv->pixel_clk, 138500000) < 0)
+		pr_err("%s: pixel_clk - clk_set_rate failed\n",
+					__func__);
+
+	if (clk_set_rate(edp_drv->link_clk, 270000000) < 0)
+		pr_err("%s: link_clk - clk_set_rate failed\n",
+					__func__);
+
+	clk_enable(edp_drv->aux_clk);
+	clk_enable(edp_drv->pixel_clk);
+	clk_enable(edp_drv->ahb_clk);
+	clk_enable(edp_drv->link_clk);
+
+	edp_drv->clk_on = 1;
+}
+
+void mdss_edp_clk_disable(struct mdss_edp_drv_pdata *edp_drv)
+{
+	if (edp_drv->clk_on == 0) {
+		pr_info("%s: edp clks are already OFF\n", __func__);
+		return;
+	}
+
+	clk_disable(edp_drv->aux_clk);
+	clk_disable(edp_drv->pixel_clk);
+	clk_disable(edp_drv->ahb_clk);
+	clk_disable(edp_drv->link_clk);
+
+	edp_drv->clk_on = 0;
+}
+
+void mdss_edp_prepare_clocks(struct mdss_edp_drv_pdata *edp_drv)
+{
+	clk_prepare(edp_drv->aux_clk);
+	clk_prepare(edp_drv->pixel_clk);
+	clk_prepare(edp_drv->ahb_clk);
+	clk_prepare(edp_drv->link_clk);
+}
+
+void mdss_edp_unprepare_clocks(struct mdss_edp_drv_pdata *edp_drv)
+{
+	clk_unprepare(edp_drv->aux_clk);
+	clk_unprepare(edp_drv->pixel_clk);
+	clk_unprepare(edp_drv->ahb_clk);
+	clk_unprepare(edp_drv->link_clk);
+}
+
+void mdss_edp_enable_pixel_clk(unsigned char *edp_base,
+		unsigned char *mmss_cc_base, int enable)
+{
+	if (!enable) {
+		edp_write(mmss_cc_base + 0x032c, 0); /* CBCR */
+		return;
+	}
+
+	edp_write(edp_base + 0x624, 0x1); /* PostDiv2 */
+
+	/* Configuring MND for Pixel */
+	edp_write(mmss_cc_base + 0x00a8, 0x3f); /* M value */
+	edp_write(mmss_cc_base + 0x00ac, 0xb); /* N value */
+	edp_write(mmss_cc_base + 0x00b0, 0x0); /* D value */
+
+	/* CFG RCGR */
+	edp_write(mmss_cc_base + 0x00a4, (5 << 8) | (2 << 12));
+	edp_write(mmss_cc_base + 0x00a0, 3); /* CMD RCGR */
+
+	edp_write(mmss_cc_base + 0x032c, 1); /* CBCR */
+}
+
+void mdss_edp_enable_link_clk(unsigned char *mmss_cc_base, int enable)
+{
+	if (!enable) {
+		edp_write(mmss_cc_base + 0x0330, 0); /* CBCR */
+		return;
+	}
+
+	edp_write(mmss_cc_base + 0x00c4, (4 << 8)); /* CFG RCGR */
+	edp_write(mmss_cc_base + 0x00c0, 3); /* CMD RCGR */
+
+	edp_write(mmss_cc_base + 0x0330, 1); /* CBCR */
+}
+
+void mdss_edp_config_clk(unsigned char *edp_base, unsigned char *mmss_cc_base)
+{
+	mdss_edp_enable_link_clk(mmss_cc_base, 1);
+	mdss_edp_enable_pixel_clk(edp_base, mmss_cc_base, 1);
+}
+
+void mdss_edp_unconfig_clk(unsigned char *edp_base,
+		unsigned char *mmss_cc_base)
+{
+	mdss_edp_enable_link_clk(mmss_cc_base, 0);
+	mdss_edp_enable_pixel_clk(edp_base, mmss_cc_base, 0);
+}
+
+void mdss_edp_phy_misc_cfg(unsigned char *edp_base)
+{
+	/* EDP_PHY_EDPPHY_GLB_VM_CFG0 */
+	edp_write(edp_base + 0x510, 0x3);
+	/* EDP_PHY_EDPPHY_GLB_VM_CFG1 */
+	edp_write(edp_base + 0x514, 0x64);
+	/* EDP_PHY_EDPPHY_GLB_MISC9 */
+	edp_write(edp_base + 0x518, 0x6c);
+	/* EDP_MISC1_MISC0 */
+	edp_write(edp_base + 0x2c, 0x1);
 }
diff --git a/drivers/video/msm/mdss/qpic_panel_ili_qvga.c b/drivers/video/msm/mdss/qpic_panel_ili_qvga.c
new file mode 100644
index 0000000..4459c6b
--- /dev/null
+++ b/drivers/video/msm/mdss/qpic_panel_ili_qvga.c
@@ -0,0 +1,226 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/memory.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <asm/system.h>
+#include <asm/mach-types.h>
+
+#include "mdss_qpic.h"
+#include "mdss_qpic_panel.h"
+
+enum {
+	OP_ILI9341_TEARING_EFFECT_LINE_ON = OP_SIZE_PAIR(0x35, 1),
+	OP_ILI9341_INTERFACE_CONTROL = OP_SIZE_PAIR(0xf6, 3),
+	OP_ILI9341_WRITE_CTRL_DISPLAY  = OP_SIZE_PAIR(0x53, 1),
+	OP_ILI9341_POWER_CONTROL_A  = OP_SIZE_PAIR(0xcb, 5),
+	OP_ILI9341_POWER_CONTROL_B  = OP_SIZE_PAIR(0xcf, 3),
+	OP_ILI9341_DRIVER_TIMING_CONTROL_A  = OP_SIZE_PAIR(0xe8, 3),
+	OP_ILI9341_DRIVER_TIMING_CONTROL_B  = OP_SIZE_PAIR(0xea, 3),
+	OP_ILI9341_POWER_ON_SEQUENCE_CONTROL  = OP_SIZE_PAIR(0xed, 4),
+	OP_ILI9341_PUMP_RATIO_CONTROL  = OP_SIZE_PAIR(0xf7, 1),
+	OP_ILI9341_POWER_CONTROL_1  = OP_SIZE_PAIR(0xc0, 1),
+	OP_ILI9341_POWER_CONTROL_2  = OP_SIZE_PAIR(0xc1, 1),
+	OP_ILI9341_VCOM_CONTROL_1  = OP_SIZE_PAIR(0xc5, 2),
+	OP_ILI9341_VCOM_CONTROL_2  = OP_SIZE_PAIR(0xc7, 1),
+	OP_ILI9341_MEMORY_ACCESS_CONTROL  = OP_SIZE_PAIR(0x36, 1),
+	OP_ILI9341_FRAME_RATE_CONTROL  = OP_SIZE_PAIR(0xb1, 2),
+	OP_ILI9341_DISPLAY_FUNCTION_CONTROL = OP_SIZE_PAIR(0xb6, 4),
+	OP_ILI9341_ENABLE_3G = OP_SIZE_PAIR(0xf2, 1),
+	OP_ILI9341_COLMOD_PIXEL_FORMAT_SET = OP_SIZE_PAIR(0x3a, 1),
+	OP_ILI9341_GAMMA_SET = OP_SIZE_PAIR(0x26, 1),
+	OP_ILI9341_POSITIVE_GAMMA_CORRECTION = OP_SIZE_PAIR(0xe0, 15),
+	OP_ILI9341_NEGATIVE_GAMMA_CORRECTION = OP_SIZE_PAIR(0xe1, 15),
+	OP_ILI9341_READ_DISPLAY_ID = OP_SIZE_PAIR(0x04, 4),
+	OP_ILI9341_READ_DISPLAY_POWER_MODE = OP_SIZE_PAIR(0x0a, 2),
+	OP_ILI9341_READ_DISPLAY_MADCTL = OP_SIZE_PAIR(0x0b, 2),
+};
+
+static int rst_gpio;
+static int cs_gpio;
+static int ad8_gpio;
+static int te_gpio;
+struct regulator *vdd_vreg;
+struct regulator *avdd_vreg;
+
+int ili9341_init(struct platform_device *pdev,
+			struct device_node *np)
+{
+	int rc = 0;
+	rst_gpio = of_get_named_gpio(np, "qcom,rst-gpio", 0);
+	cs_gpio = of_get_named_gpio(np, "qcom,cs-gpio", 0);
+	ad8_gpio = of_get_named_gpio(np, "qcom,ad8-gpio", 0);
+	te_gpio = of_get_named_gpio(np, "qcom,te-gpio", 0);
+	if (!gpio_is_valid(rst_gpio)) {
+		pr_err("%s: reset gpio not specified\n" , __func__);
+		return -EINVAL;
+	}
+	if (!gpio_is_valid(cs_gpio)) {
+		pr_err("%s: cs gpio not specified\n", __func__);
+		return -EINVAL;
+	}
+	if (!gpio_is_valid(ad8_gpio)) {
+		pr_err("%s: ad8 gpio not specified\n", __func__);
+		return -EINVAL;
+	}
+	if (!gpio_is_valid(te_gpio)) {
+		pr_err("%s: te gpio not specified\n", __func__);
+		return -EINVAL;
+	}
+	vdd_vreg = devm_regulator_get(&pdev->dev, "vdd");
+	if (IS_ERR(vdd_vreg)) {
+		pr_err("%s could not get vdd,", __func__);
+		return -ENODEV;
+	}
+	avdd_vreg = devm_regulator_get(&pdev->dev, "avdd");
+	if (IS_ERR(avdd_vreg)) {
+		pr_err("%s could not get avdd,", __func__);
+		return -ENODEV;
+	}
+	rc = regulator_set_voltage(vdd_vreg, 1800000, 1800000);
+	if (rc) {
+		pr_err("vdd_vreg->set_voltage failed, rc=%d\n", rc);
+		return -EINVAL;
+	}
+	rc = regulator_set_voltage(avdd_vreg, 2700000, 2700000);
+	if (rc) {
+		pr_err("vdd_vreg->set_voltage failed, rc=%d\n", rc);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int ili9341_panel_power_on(void)
+{
+	int rc;
+	rc = regulator_enable(vdd_vreg);
+	if (rc) {
+		pr_err("enable vdd failed, rc=%d\n", rc);
+		return -ENODEV;
+	}
+	rc = regulator_enable(avdd_vreg);
+	if (rc) {
+		pr_err("enable avdd failed, rc=%d\n", rc);
+		return -ENODEV;
+	}
+
+	if (gpio_request(rst_gpio, "disp_rst_n")) {
+		pr_err("%s request reset gpio failed\n", __func__);
+		return -EINVAL;
+	}
+	if (gpio_request(cs_gpio, "disp_cs_n")) {
+		gpio_free(rst_gpio);
+		pr_err("%s request cs gpio failed\n", __func__);
+		return -EINVAL;
+	}
+
+	if (gpio_request(ad8_gpio, "disp_ad8_n")) {
+		gpio_free(cs_gpio);
+		gpio_free(rst_gpio);
+		pr_err("%s request ad8 gpio failed\n", __func__);
+		return -EINVAL;
+	}
+	if (gpio_request(te_gpio, "disp_te_n")) {
+		gpio_free(ad8_gpio);
+		gpio_free(cs_gpio);
+		gpio_free(rst_gpio);
+		pr_err("%s request te gpio failed\n", __func__);
+		return -EINVAL;
+	}
+	/* wait for 20 ms after enable gpio as suggested by hw */
+	msleep(20);
+	return 0;
+}
+
+static void ili9341_panel_power_off(void)
+{
+	gpio_free(ad8_gpio);
+	gpio_free(cs_gpio);
+	gpio_free(rst_gpio);
+	gpio_free(te_gpio);
+	regulator_disable(vdd_vreg);
+	regulator_disable(avdd_vreg);
+}
+
+int ili9341_on(void)
+{
+	u32 param[20];
+	int ret;
+	ret = ili9341_panel_power_on();
+	if (ret)
+		return ret;
+	qpic_panel_set_cmd_only(OP_SOFT_RESET);
+	/* wait for 120 ms after reset as panel spec suggests */
+	msleep(120);
+	qpic_panel_set_cmd_only(OP_SET_DISPLAY_OFF);
+	/* wait for 20 ms after disply off */
+	msleep(20);
+
+	/* set memory access control */
+	param[0] = ((0x48)<<0) | ((0x00)<<8) | ((0x00)<<16) | ((0x00U)<<24U);
+	qpic_send_panel_cmd(OP_ILI9341_MEMORY_ACCESS_CONTROL, param, 0);
+	/* wait for 20 ms after command sent as panel spec suggests */
+	msleep(20);
+
+	/* set COLMOD: Pixel Format Set */
+	param[0] = ((0x66)<<0) | ((0x00)<<8) | ((0x00)<<16) | ((0x00U)<<24U);
+	qpic_send_panel_cmd(OP_ILI9341_COLMOD_PIXEL_FORMAT_SET, param, 0);
+	/* wait for 20 ms after command sent as panel spec suggests */
+	msleep(20);
+
+	/* set interface */
+	param[0] = ((0x01)<<0) | ((0x00)<<8) | ((0x00)<<16) | ((0x00U)<<24U);
+	qpic_send_panel_cmd(OP_ILI9341_INTERFACE_CONTROL, &param[0], 0);
+	/* wait for 20 ms after command sent */
+	msleep(20);
+
+	/* exit sleep mode */
+	qpic_panel_set_cmd_only(OP_EXIT_SLEEP_MODE);
+	/* wait for 20 ms after command sent as panel spec suggests */
+	msleep(20);
+
+	/* normal mode */
+	qpic_panel_set_cmd_only(OP_ENTER_NORMAL_MODE);
+	/* wait for 20 ms after command sent as panel spec suggests */
+	msleep(20);
+
+	/* display on */
+	qpic_panel_set_cmd_only(OP_SET_DISPLAY_ON);
+	/* wait for 20 ms after command sent as panel spec suggests */
+	msleep(20);
+
+	/* tearing effect  */
+	param[0] = ((0x00)<<0) | ((0x00)<<8) | ((0x00)<<16) | ((0x00U)<<24U);
+	qpic_send_panel_cmd(OP_ILI9341_TEARING_EFFECT_LINE_ON, param, 0);
+	/* wait for 20 ms after command sent as panel spec suggests */
+	msleep(20);
+
+	return 0;
+}
+
+void ili9341_off(void)
+{
+	ili9341_panel_power_off();
+}
diff --git a/drivers/video/msm/mhl/mhl_8334.c b/drivers/video/msm/mhl/mhl_8334.c
index 1a29677..2acf6f4 100644
--- a/drivers/video/msm/mhl/mhl_8334.c
+++ b/drivers/video/msm/mhl/mhl_8334.c
@@ -37,6 +37,11 @@
 #include "hdmi_msm.h"
 #include "mhl_i2c_utils.h"
 
+#define MSC_START_BIT_MSC_CMD		        (0x01 << 0)
+#define MSC_START_BIT_VS_CMD		        (0x01 << 1)
+#define MSC_START_BIT_READ_REG		        (0x01 << 2)
+#define MSC_START_BIT_WRITE_REG		        (0x01 << 3)
+#define MSC_START_BIT_WRITE_BURST	        (0x01 << 4)
 
 static struct i2c_device_id mhl_sii_i2c_id[] = {
 	{ MHL_DRIVER_NAME, 0 },
@@ -45,6 +50,7 @@
 
 struct mhl_msm_state_t *mhl_msm_state;
 spinlock_t mhl_state_lock;
+struct workqueue_struct *msc_send_workqueue;
 
 static int mhl_i2c_probe(struct i2c_client *client,\
 	const struct i2c_device_id *id);
@@ -55,6 +61,10 @@
 static irqreturn_t mhl_tx_isr(int irq, void *dev_id);
 void (*notify_usb_online)(int online);
 static void mhl_drive_hpd(uint8_t to_state);
+static int mhl_send_msc_command(struct msc_command_struct *req);
+static void list_cmd_put(struct msc_command_struct *cmd);
+static struct msc_command_struct *list_cmd_get(void);
+static void mhl_msc_send_work(struct work_struct *work);
 
 static struct i2c_driver mhl_sii_i2c_driver = {
 	.driver = {
@@ -224,12 +234,6 @@
 	return 0;
 }
 
-bool mhl_is_connected(void)
-{
-	return true;
-}
-
-
 /*  USB_HANDSHAKING FUNCTIONS */
 
 int mhl_device_discovery(const char *name, int *result)
@@ -530,34 +534,62 @@
 static int mhl_i2c_probe(struct i2c_client *client,
 	const struct i2c_device_id *id)
 {
-	int ret = -ENODEV;
+	int ret;
+	struct msm_mhl_platform_data *tmp = client->dev.platform_data;
+	if (!tmp->mhl_enabled) {
+		ret = -ENODEV;
+		pr_warn("MHL feautre left disabled\n");
+		goto probe_early_exit;
+	}
 	mhl_msm_state->mhl_data = kzalloc(sizeof(struct msm_mhl_platform_data),
 		GFP_KERNEL);
 	if (!(mhl_msm_state->mhl_data)) {
 		ret = -ENOMEM;
 		pr_err("MHL I2C Probe failed - no mem\n");
-		goto probe_exit;
+		goto probe_early_exit;
 	}
 	mhl_msm_state->i2c_client = client;
-
 	spin_lock_init(&mhl_state_lock);
-
 	i2c_set_clientdata(client, mhl_msm_state);
 	mhl_msm_state->mhl_data = client->dev.platform_data;
 	pr_debug("MHL: mhl_msm_state->mhl_data->irq=[%d]\n",
 		mhl_msm_state->mhl_data->irq);
+	msc_send_workqueue = create_workqueue("mhl_msc_cmd_queue");
 
+	mhl_msm_state->cur_state = POWER_STATE_D0_MHL;
 	/* Init GPIO stuff here */
 	ret = mhl_sii_gpio_setup(1);
-	if (ret == -1) {
+	if (ret) {
 		pr_err("MHL: mhl_gpio_init has failed\n");
 		ret = -ENODEV;
+		goto probe_early_exit;
+	}
+	mhl_sii_power_on();
+	/* MHL SII 8334 chip specific init */
+	mhl_chip_init();
+	init_completion(&mhl_msm_state->rgnd_done);
+	/* Request IRQ stuff here */
+	pr_debug("MHL: mhl_msm_state->mhl_data->irq=[%d]\n",
+		mhl_msm_state->mhl_data->irq);
+	ret = request_threaded_irq(mhl_msm_state->mhl_data->irq, NULL,
+				   &mhl_tx_isr,
+				 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+				 "mhl_tx_isr", mhl_msm_state);
+	if (ret) {
+		pr_err("request_threaded_irq failed, status: %d\n",
+			ret);
 		goto probe_exit;
+	} else {
+		pr_debug("request_threaded_irq succeeded\n");
 	}
 
-	mhl_sii_power_on();
+	INIT_WORK(&mhl_msm_state->mhl_msc_send_work, mhl_msc_send_work);
+	INIT_LIST_HEAD(&mhl_msm_state->list_cmd);
+	mhl_msm_state->msc_command_put_work = list_cmd_put;
+	mhl_msm_state->msc_command_get_work = list_cmd_get;
+	init_completion(&mhl_msm_state->msc_cmd_done);
 
-	pr_debug("I2C PROBE successful\n");
+	pr_debug("i2c probe successful\n");
 	return 0;
 
 probe_exit:
@@ -567,6 +599,7 @@
 		kfree(mhl_msm_state->mhl_data);
 		mhl_msm_state->mhl_data = NULL;
 	}
+probe_early_exit:
 	return ret;
 }
 
@@ -578,6 +611,46 @@
 	return 0;
 }
 
+static void list_cmd_put(struct msc_command_struct *cmd)
+{
+	struct msc_cmd_envelope *new_cmd;
+	new_cmd = vmalloc(sizeof(struct msc_cmd_envelope));
+	memcpy(&new_cmd->msc_cmd_msg, cmd,
+		sizeof(struct msc_command_struct));
+	/* Need to check for queue getting filled up */
+	list_add_tail(&new_cmd->msc_queue_envelope, &mhl_msm_state->list_cmd);
+}
+
+static struct msc_command_struct *list_cmd_get(void)
+{
+	struct msc_cmd_envelope *cmd_env =
+		list_first_entry(&mhl_msm_state->list_cmd,
+			struct msc_cmd_envelope, msc_queue_envelope);
+	list_del(&cmd_env->msc_queue_envelope);
+	return &cmd_env->msc_cmd_msg;
+}
+
+static void mhl_msc_send_work(struct work_struct *work)
+{
+	int ret;
+	/*
+	 * Remove item from the queue
+	 * and schedule it
+	 */
+	struct msc_command_struct *req;
+	while (!list_empty(&mhl_msm_state->list_cmd)) {
+		req = mhl_msm_state->msc_command_get_work();
+		ret = mhl_send_msc_command(req);
+		if (ret == -EAGAIN)
+			pr_err("MHL: Queue still busy!!\n");
+		else {
+			vfree(req);
+			pr_debug("MESSAGE SENT!!!!\n");
+		}
+	}
+}
+
+
 static int __init mhl_msm_init(void)
 {
 	int32_t     ret;
@@ -589,7 +662,6 @@
 		ret = -ENOMEM;
 		goto init_exit;
 	}
-
 	mhl_msm_state->i2c_client = NULL;
 	ret = i2c_add_driver(&mhl_sii_i2c_driver);
 	if (ret) {
@@ -598,6 +670,7 @@
 		goto init_exit;
 	} else {
 		if (mhl_msm_state->i2c_client == NULL) {
+			i2c_del_driver(&mhl_sii_i2c_driver);
 			pr_err("MHL: I2C driver add failed\n");
 			ret = -ENODEV;
 			goto init_exit;
@@ -605,31 +678,9 @@
 		pr_info("MHL: I2C driver added\n");
 	}
 
-	/* Request IRQ stuff here */
-	pr_debug("MHL: mhl_msm_state->mhl_data->irq=[%d]\n",
-		mhl_msm_state->mhl_data->irq);
-	ret = request_threaded_irq(mhl_msm_state->mhl_data->irq, NULL,
-				   &mhl_tx_isr,
-				 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
-				 "mhl_tx_isr", mhl_msm_state);
-	if (ret != 0) {
-		pr_err("request_threaded_irq failed, status: %d\n",
-			ret);
-		ret = -EACCES; /* Error code???? */
-		goto init_exit;
-	} else
-		pr_debug("request_threaded_irq succeeded\n");
-
-	mhl_msm_state->cur_state = POWER_STATE_D0_MHL;
-
-	/* MHL SII 8334 chip specific init */
-	mhl_chip_init();
-	init_completion(&mhl_msm_state->rgnd_done);
 	return 0;
-
 init_exit:
 	pr_err("Exiting from the init with err\n");
-	i2c_del_driver(&mhl_sii_i2c_driver);
 	if (!mhl_msm_state) {
 		kfree(mhl_msm_state);
 		mhl_msm_state = NULL;
@@ -637,6 +688,16 @@
 	 return ret;
 }
 
+static void mhl_msc_sched_work(struct msc_command_struct *req)
+{
+	/*
+	 * Put an item to the queue
+	 * and schedule work
+	 */
+	mhl_msm_state->msc_command_put_work(req);
+	queue_work(msc_send_workqueue, &mhl_msm_state->mhl_msc_send_work);
+}
+
 static void switch_mode(enum mhl_st_type to_mode)
 {
 	unsigned long flags;
@@ -666,7 +727,8 @@
 			mhl_i2c_reg_write(TX_PAGE_3, 0x0030, 0xD0);
 			msleep(50);
 			mhl_i2c_reg_modify(TX_PAGE_3, 0x0010,
-				BIT1 | BIT0, BIT1);
+				BIT1 | BIT0, 0x00);
+			mhl_i2c_reg_modify(TX_PAGE_3, 0x003D, BIT0, 0x00);
 			spin_lock_irqsave(&mhl_state_lock, flags);
 			mhl_msm_state->cur_state = POWER_STATE_D3;
 			spin_unlock_irqrestore(&mhl_state_lock, flags);
@@ -741,7 +803,7 @@
 	 * Need to re-enable here
 	 */
 	val = mhl_i2c_reg_read(TX_PAGE_3, 0x10);
-	mhl_i2c_reg_write(TX_PAGE_3, 0x10, val | BIT(0));
+	mhl_i2c_reg_write(TX_PAGE_3, 0x10, val | BIT0);
 
 	return;
 }
@@ -783,9 +845,6 @@
 	if (0x02 == rgnd_imp) {
 		pr_debug("MHL: MHL DEVICE!!!\n");
 		mhl_i2c_reg_modify(TX_PAGE_3, 0x0018, BIT0, BIT0);
-		/*
-		 * Handling the MHL event in driver
-		 */
 		mhl_msm_state->mhl_mode = TRUE;
 		if (notify_usb_online)
 			notify_usb_online(1);
@@ -922,8 +981,7 @@
 	uint8_t intr_5_stat;
 
 	/*
-	 * Clear INT 5 ??
-	 * Probably need to revisit this later
+	 * Clear INT 5
 	 * INTR5 is related to FIFO underflow/overflow reset
 	 * which is handled in 8334 by auto FIFO reset
 	 */
@@ -959,16 +1017,455 @@
 	return;
 }
 
-/*
- * RCP, RAP messages - mandatory for compliance
- *
- */
+static void mhl_cbus_process_errors(u8 int_status)
+{
+	u8 abort_reason = 0;
+	if (int_status & BIT2) {
+		abort_reason = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x0B);
+		pr_debug("%s: CBUS DDC Abort Reason(0x%02x)\n",
+			__func__, abort_reason);
+	}
+	if (int_status & BIT5) {
+		abort_reason = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x0D);
+		pr_debug("%s: CBUS MSC Requestor Abort Reason(0x%02x)\n",
+			__func__, abort_reason);
+		mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0D, 0xFF);
+	}
+	if (int_status & BIT6) {
+		abort_reason = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x0E);
+		pr_debug("%s: CBUS MSC Responder Abort Reason(0x%02x)\n",
+			__func__, abort_reason);
+		mhl_i2c_reg_write(TX_PAGE_CBUS, 0x0E, 0xFF);
+	}
+}
+
+static int mhl_msc_command_done(struct msc_command_struct *req)
+{
+	switch (req->command) {
+	case MHL_WRITE_STAT:
+		if (req->offset == MHL_STATUS_REG_LINK_MODE) {
+			if (req->payload.data[0]
+				& MHL_STATUS_PATH_ENABLED) {
+				/* Enable TMDS output */
+				mhl_i2c_reg_modify(TX_PAGE_L0, 0x0080,
+								BIT4, BIT4);
+			} else
+				/* Disable TMDS output */
+				mhl_i2c_reg_modify(TX_PAGE_L0, 0x0080,
+								BIT4, BIT4);
+		}
+		break;
+	case MHL_READ_DEVCAP:
+		mhl_msm_state->devcap_state |= BIT(req->offset);
+		switch (req->offset) {
+		case MHL_DEV_CATEGORY_OFFSET:
+			if (req->retval & MHL_DEV_CATEGORY_POW_BIT) {
+				/*
+				 * Enable charging
+				 */
+			} else {
+				/*
+				 * Disable charging
+				 */
+			}
+			break;
+		case DEVCAP_OFFSET_MHL_VERSION:
+		case DEVCAP_OFFSET_INT_STAT_SIZE:
+			break;
+		}
+
+		break;
+	}
+	return 0;
+}
+
+static int mhl_send_msc_command(struct msc_command_struct *req)
+{
+	int timeout;
+	u8 start_bit = 0x00;
+	u8 *burst_data;
+	int i;
+
+	if (mhl_msm_state->cur_state != POWER_STATE_D0_MHL) {
+		pr_debug("%s: power_state:%02x CBUS(0x0A):%02x\n",
+		__func__,
+		mhl_msm_state->cur_state, mhl_i2c_reg_read(TX_PAGE_CBUS, 0x0A));
+		return -EFAULT;
+	}
+
+	if (!req)
+		return -EFAULT;
+
+	pr_debug("%s: command=0x%02x offset=0x%02x %02x %02x",
+		__func__,
+		req->command,
+		req->offset,
+		req->payload.data[0],
+		req->payload.data[1]);
+
+	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x13, req->offset);
+	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x14, req->payload.data[0]);
+
+	switch (req->command) {
+	case MHL_SET_INT:
+	case MHL_WRITE_STAT:
+		start_bit = MSC_START_BIT_WRITE_REG;
+		break;
+	case MHL_READ_DEVCAP:
+		start_bit = MSC_START_BIT_READ_REG;
+		break;
+	case MHL_GET_STATE:
+	case MHL_GET_VENDOR_ID:
+	case MHL_SET_HPD:
+	case MHL_CLR_HPD:
+	case MHL_GET_SC1_ERRORCODE:
+	case MHL_GET_DDC_ERRORCODE:
+	case MHL_GET_MSC_ERRORCODE:
+	case MHL_GET_SC3_ERRORCODE:
+		start_bit = MSC_START_BIT_MSC_CMD;
+		mhl_i2c_reg_write(TX_PAGE_CBUS, 0x13, req->command);
+		break;
+	case MHL_MSC_MSG:
+		start_bit = MSC_START_BIT_VS_CMD;
+		mhl_i2c_reg_write(TX_PAGE_CBUS, 0x15, req->payload.data[1]);
+		mhl_i2c_reg_write(TX_PAGE_CBUS, 0x13, req->command);
+		break;
+	case MHL_WRITE_BURST:
+		start_bit = MSC_START_BIT_WRITE_BURST;
+		mhl_i2c_reg_write(TX_PAGE_CBUS, 0x20, req->length - 1);
+		if (!(req->payload.burst_data)) {
+			pr_err("%s: burst data is null!\n", __func__);
+			goto cbus_send_fail;
+		}
+		burst_data = req->payload.burst_data;
+		for (i = 0; i < req->length; i++, burst_data++)
+			mhl_i2c_reg_write(TX_PAGE_CBUS, 0xC0 + i, *burst_data);
+		break;
+	default:
+		pr_err("%s: unknown command! (%02x)\n",
+			__func__, req->command);
+		goto cbus_send_fail;
+	}
+
+	INIT_COMPLETION(mhl_msm_state->msc_cmd_done);
+	mhl_i2c_reg_write(TX_PAGE_CBUS, 0x12, start_bit);
+	timeout = wait_for_completion_interruptible_timeout
+		(&mhl_msm_state->msc_cmd_done, HZ);
+	if (!timeout) {
+		pr_err("%s: cbus_command_send timed out!\n", __func__);
+		goto cbus_send_fail;
+	}
+
+	switch (req->command) {
+	case MHL_READ_DEVCAP:
+		/* devcap */
+		req->retval = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x16);
+		pr_debug("Read CBUS[0x16]=[%02x]\n", req->retval);
+		break;
+	case MHL_MSC_MSG:
+		/* check if MSC_MSG NACKed */
+		if (mhl_i2c_reg_read(TX_PAGE_CBUS, 0x20) & BIT6)
+			return -EAGAIN;
+	default:
+		req->retval = 0;
+		break;
+	}
+	mhl_msc_command_done(req);
+	pr_debug("%s: msc cmd done\n", __func__);
+	return 0;
+
+cbus_send_fail:
+	return -EFAULT;
+}
+
+static int mhl_msc_send_set_int(u8 offset, u8 mask)
+{
+	struct msc_command_struct req;
+	req.command = MHL_SET_INT;
+	req.offset = offset;
+	req.payload.data[0] = mask;
+	mhl_msc_sched_work(&req);
+	return 0;
+}
+
+static int mhl_msc_send_write_stat(u8 offset, u8 value)
+{
+	struct msc_command_struct req;
+	req.command = MHL_WRITE_STAT;
+	req.offset = offset;
+	req.payload.data[0] = value;
+	mhl_msc_sched_work(&req);
+	return 0;
+}
+
+static int mhl_msc_send_msc_msg(u8 sub_cmd, u8 cmd_data)
+{
+	struct msc_command_struct req;
+	req.command = MHL_MSC_MSG;
+	req.payload.data[0] = sub_cmd;
+	req.payload.data[1] = cmd_data;
+	mhl_msc_sched_work(&req);
+	return 0;
+}
+
+static int mhl_msc_read_devcap(u8 offset)
+{
+	struct msc_command_struct req;
+	if (offset < 0 || offset > 15)
+		return -EFAULT;
+	req.command = MHL_READ_DEVCAP;
+	req.offset = offset;
+	req.payload.data[0] = 0;
+	mhl_msc_sched_work(&req);
+	return 0;
+}
+
+static int mhl_msc_read_devcap_all(void)
+{
+	int offset;
+	int ret;
+
+	for (offset = 0; offset < DEVCAP_SIZE; offset++) {
+		ret = mhl_msc_read_devcap(offset);
+		msleep(200);
+		if (ret == -EFAULT) {
+			pr_err("%s: queue busy!\n", __func__);
+			return -EBUSY;
+		}
+	}
+
+	return 0;
+}
+
+/* supported RCP key code */
+static const u8 rcp_key_code_tbl[] = {
+	0, 0, 0, 0, 0, 0, 0, 0,		/* 0x00~0x07 */
+	0, 0, 0, 0, 0, 0, 0, 0,		/* 0x08~0x0f */
+	0, 0, 0, 0, 0, 0, 0, 0,		/* 0x10~0x17 */
+	0, 0, 0, 0, 0, 0, 0, 0,		/* 0x18~0x1f */
+	0, 0, 0, 0, 0, 0, 0, 0,		/* 0x20~0x27 */
+	0, 0, 0, 0, 0, 0, 0, 0,		/* 0x28~0x2f */
+	0, 0, 0, 0, 0, 0, 0, 0,		/* 0x30~0x37 */
+	0, 0, 0, 0, 0, 0, 0, 0,		/* 0x38~0x3f */
+	0, 0, 0, 0, 0, 0, 0, 0,		/* 0x40~0x47 */
+	0, 0, 0, 0, 0, 0, 0, 0,		/* 0x48~0x4f */
+	0, 0, 0, 0, 0, 0, 0, 0,		/* 0x50~0x57 */
+	0, 0, 0, 0, 0, 0, 0, 0,		/* 0x58~0x5f */
+	0, 0, 0, 0, 0, 0, 0, 0,		/* 0x60~0x67 */
+	0, 0, 0, 0, 0, 0, 0, 0,		/* 0x68~0x6f */
+	0, 0, 0, 0, 0, 0, 0, 0,		/* 0x70~0x77 */
+	0, 0, 0, 0, 0, 0, 0, 0		/* 0x78~0x7f */
+};
+
+static int mhl_rcp_recv(u8 key_code)
+{
+	int rc;
+	if (rcp_key_code_tbl[(key_code & 0x7f)]) {
+		/*
+		 * TODO: Take action for the RCP cmd
+		 */
+
+		/* send ack to rcp cmd*/
+		rc = mhl_msc_send_msc_msg(
+			MHL_MSC_MSG_RCPK,
+			key_code);
+	} else {
+		/* send rcp error */
+		rc = mhl_msc_send_msc_msg(
+			MHL_MSC_MSG_RCPE,
+			MHL_RCPE_UNSUPPORTED_KEY_CODE);
+		if (rc)
+			return rc;
+		/* send rcpk after rcpe send */
+		rc = mhl_msc_send_msc_msg(
+			MHL_MSC_MSG_RCPK,
+			key_code);
+	}
+	return rc;
+}
+
+static int mhl_rap_action(u8 action_code)
+{
+	switch (action_code) {
+	case MHL_RAP_CONTENT_ON:
+		/*
+		 * Enable TMDS on TMDS_CCTRL
+		 */
+		mhl_i2c_reg_modify(TX_PAGE_L0, 0x0080, BIT4, BIT4);
+		break;
+	case MHL_RAP_CONTENT_OFF:
+		/*
+		 * Disable TMDS on TMDS_CCTRL
+		 */
+		mhl_i2c_reg_modify(TX_PAGE_L0, 0x0080, BIT4, 0x00);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int mhl_rap_recv(u8 action_code)
+{
+	u8 error_code;
+
+	switch (action_code) {
+	/*case MHL_RAP_POLL:*/
+	case MHL_RAP_CONTENT_ON:
+	case MHL_RAP_CONTENT_OFF:
+		mhl_rap_action(action_code);
+		error_code = MHL_RAPK_NO_ERROR;
+		/* notify userspace */
+		break;
+	default:
+		error_code = MHL_RAPK_UNRECOGNIZED_ACTION_CODE;
+		break;
+	}
+	/* prior send rapk */
+	return mhl_msc_send_msc_msg(
+		MHL_MSC_MSG_RAPK,
+		error_code);
+}
+
+static int mhl_msc_recv_msc_msg(u8 sub_cmd, u8 cmd_data)
+{
+	int rc = 0;
+	switch (sub_cmd) {
+	case MHL_MSC_MSG_RCP:
+		pr_debug("MHL: receive RCP(0x%02x)\n", cmd_data);
+		rc = mhl_rcp_recv(cmd_data);
+		break;
+	case MHL_MSC_MSG_RCPK:
+		pr_debug("MHL: receive RCPK(0x%02x)\n", cmd_data);
+		break;
+	case MHL_MSC_MSG_RCPE:
+		pr_debug("MHL: receive RCPE(0x%02x)\n", cmd_data);
+		break;
+	case MHL_MSC_MSG_RAP:
+		pr_debug("MHL: receive RAP(0x%02x)\n", cmd_data);
+		rc = mhl_rap_recv(cmd_data);
+		break;
+	case MHL_MSC_MSG_RAPK:
+		pr_debug("MHL: receive RAPK(0x%02x)\n", cmd_data);
+		break;
+	default:
+		break;
+	}
+	return rc;
+}
+
+static int mhl_msc_recv_set_int(u8 offset, u8 set_int)
+{
+	if (offset >= 2)
+		return -EFAULT;
+
+	switch (offset) {
+	case 0:
+		/* DCAP_CHG */
+		if (set_int & MHL_INT_DCAP_CHG) {
+			/* peer dcap has changed */
+			if (mhl_msc_read_devcap_all() == -EBUSY) {
+				pr_err("READ DEVCAP FAILED to send successfully\n");
+				break;
+			}
+		}
+		/* DSCR_CHG */
+		if (set_int & MHL_INT_DSCR_CHG)
+			;
+		/* REQ_WRT */
+		if (set_int & MHL_INT_REQ_WRT) {
+			/* SET_INT: GRT_WRT */
+			mhl_msc_send_set_int(
+				MHL_RCHANGE_INT,
+				MHL_INT_GRT_WRT);
+		}
+		/* GRT_WRT */
+		if (set_int & MHL_INT_GRT_WRT)
+			;
+		break;
+	case 1:
+		/* EDID_CHG */
+		if (set_int & MHL_INT_EDID_CHG) {
+			/* peer EDID has changed.
+			 * toggle HPD to read EDID again
+			 * In 8x30 FLUID  HDMI HPD line
+			 * is not connected
+			 * with MHL 8334 transmitter
+			 */
+		}
+	}
+	return 0;
+}
+
+static int mhl_msc_recv_write_stat(u8 offset, u8 value)
+{
+	if (offset >= 2)
+		return -EFAULT;
+
+	switch (offset) {
+	case 0:
+		/* DCAP_RDY */
+		/*
+		 * Connected Device bits changed and DEVCAP READY
+		 */
+		pr_debug("MHL: value [0x%02x]\n", value);
+		pr_debug("MHL: offset [0x%02x]\n", offset);
+		pr_debug("MHL: devcap state [0x%02x]\n",
+			mhl_msm_state->devcap_state);
+		pr_debug("MHL: MHL_STATUS_DCAP_RDY [0x%02x]\n",
+			MHL_STATUS_DCAP_RDY);
+		if (((value ^ mhl_msm_state->devcap_state) &
+			MHL_STATUS_DCAP_RDY)) {
+			if (value & MHL_STATUS_DCAP_RDY) {
+				if (mhl_msc_read_devcap_all() == -EBUSY) {
+					pr_err("READ DEVCAP FAILED to send successfully\n");
+					break;
+				}
+			} else {
+				/* peer dcap turned not ready */
+				/*
+				 * Clear DEVCAP READY state
+				 */
+			}
+		}
+		break;
+	case 1:
+		/* PATH_EN */
+		/*
+		 * Connected Device bits changed and PATH ENABLED
+		 */
+		if ((value ^ mhl_msm_state->path_en_state)
+			& MHL_STATUS_PATH_ENABLED) {
+			if (value & MHL_STATUS_PATH_ENABLED) {
+				mhl_msm_state->path_en_state
+					|= (MHL_STATUS_PATH_ENABLED |
+					MHL_STATUS_CLK_MODE_NORMAL);
+				mhl_msc_send_write_stat(
+					MHL_STATUS_REG_LINK_MODE,
+					mhl_msm_state->path_en_state);
+			} else {
+				mhl_msm_state->path_en_state
+					&= ~(MHL_STATUS_PATH_ENABLED |
+					MHL_STATUS_CLK_MODE_NORMAL);
+				mhl_msc_send_write_stat(
+					MHL_STATUS_REG_LINK_MODE,
+					mhl_msm_state->path_en_state);
+			}
+		}
+		break;
+	}
+	mhl_msm_state->path_en_state = value;
+	return 0;
+}
+
+
+/* MSC, RCP, RAP messages - mandatory for compliance */
 static void mhl_cbus_isr(void)
 {
 	uint8_t regval;
 	int req_done = FALSE;
-	uint8_t sub_cmd;
-	uint8_t cmd_data;
+	uint8_t sub_cmd = 0x0;
+	uint8_t cmd_data = 0x0;
 	int msc_msg_recved = FALSE;
 	int rc = -1;
 
@@ -976,21 +1473,27 @@
 	if (regval == 0xff)
 		return;
 
-	/* clear all interrupts that were raised even if we did not process */
+	/*
+	 * clear all interrupts that were raised
+	 * even if we did not process
+	 */
 	if (regval)
 		mhl_i2c_reg_write(TX_PAGE_CBUS, 0x08, regval);
 
 	pr_debug("%s: CBUS_INT = %02x\n", __func__, regval);
 
 	/* MSC_MSG (RCP/RAP) */
-	if (regval & BIT(3)) {
+	if (regval & BIT3) {
 		sub_cmd = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x18);
 		cmd_data = mhl_i2c_reg_read(TX_PAGE_CBUS, 0x19);
 		msc_msg_recved = TRUE;
 	}
+	/* MSC_MT_ABRT/MSC_MR_ABRT/DDC_ABORT */
+	if (regval & (BIT6 | BIT5 | BIT2))
+		mhl_cbus_process_errors(regval);
 
 	/* MSC_REQ_DONE */
-	if (regval & BIT(4))
+	if (regval & BIT4)
 		req_done = TRUE;
 
 	/* Now look for interrupts on CBUS_MSC_INT2 */
@@ -1004,11 +1507,15 @@
 	pr_debug("%s: CBUS_MSC_INT2 = %02x\n", __func__, regval);
 
 	/* received SET_INT */
-	if (regval & BIT(2)) {
+	if (regval & BIT2) {
 		uint8_t intr;
 		intr = mhl_i2c_reg_read(TX_PAGE_CBUS, 0xA0);
+		mhl_msc_recv_set_int(0, intr);
+
 		pr_debug("%s: MHL_INT_0 = %02x\n", __func__, intr);
 		intr = mhl_i2c_reg_read(TX_PAGE_CBUS, 0xA1);
+		mhl_msc_recv_set_int(1, intr);
+
 		pr_debug("%s: MHL_INT_1 = %02x\n", __func__, intr);
 		mhl_i2c_reg_write(TX_PAGE_CBUS, 0xA0, 0xFF);
 		mhl_i2c_reg_write(TX_PAGE_CBUS, 0xA1, 0xFF);
@@ -1017,11 +1524,14 @@
 	}
 
 	/* received WRITE_STAT */
-	if (regval & BIT(3)) {
+	if (regval & BIT3) {
 		uint8_t stat;
 		stat = mhl_i2c_reg_read(TX_PAGE_CBUS, 0xB0);
+		mhl_msc_recv_write_stat(0, stat);
+
 		pr_debug("%s: MHL_STATUS_0 = %02x\n", __func__, stat);
 		stat = mhl_i2c_reg_read(TX_PAGE_CBUS, 0xB1);
+		mhl_msc_recv_write_stat(1, stat);
 		pr_debug("%s: MHL_STATUS_1 = %02x\n", __func__, stat);
 
 		mhl_i2c_reg_write(TX_PAGE_CBUS, 0xB0, 0xFF);
@@ -1033,9 +1543,13 @@
 	/* received MSC_MSG */
 	if (msc_msg_recved) {
 		/*mhl msc recv msc msg*/
+		rc = mhl_msc_recv_msc_msg(sub_cmd, cmd_data);
 		if (rc)
 			pr_err("MHL: mhl msc recv msc msg failed(%d)!\n", rc);
 	}
+	/* complete last command */
+	if (req_done)
+		complete_all(&mhl_msm_state->msc_cmd_done);
 
 	return;
 }
diff --git a/drivers/video/msm/mhl_api.h b/drivers/video/msm/mhl_api.h
index 26037ce..691771a 100644
--- a/drivers/video/msm/mhl_api.h
+++ b/drivers/video/msm/mhl_api.h
@@ -15,9 +15,9 @@
 #define __MHL_API_H__
 
 #ifdef CONFIG_FB_MSM_HDMI_MHL_8334
-bool mhl_is_connected(void);
+bool mhl_is_enabled(void);
 #else
-static bool mhl_is_connected(void)
+static bool mhl_is_enabled(void)
 {
 	return false;
 }
diff --git a/drivers/video/msm/mipi_NT35510.c b/drivers/video/msm/mipi_NT35510.c
index 50a038e..9763c88 100644
--- a/drivers/video/msm/mipi_NT35510.c
+++ b/drivers/video/msm/mipi_NT35510.c
@@ -32,8 +32,8 @@
 static char write_ram[2] = {0x2c, 0x00}; /* write ram */
 
 static struct dsi_cmd_desc nt35510_display_off_cmds[] = {
-	{DTYPE_DCS_WRITE, 1, 0, 0, 150, sizeof(display_off), display_off},
-	{DTYPE_DCS_WRITE, 1, 0, 0, 150, sizeof(enter_sleep), enter_sleep}
+	{DTYPE_DCS_WRITE, 1, 0, 0, 50, sizeof(display_off), display_off},
+	{DTYPE_DCS_WRITE, 1, 0, 0, 50, sizeof(enter_sleep), enter_sleep}
 };
 
 static char cmd0[6] = {
@@ -213,46 +213,46 @@
 };
 static char config_MADCTL[2] = {0x36, 0x00};
 static struct dsi_cmd_desc nt35510_cmd_display_on_cmds[] = {
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd0), cmd0},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd1), cmd1},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd2), cmd2},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd3), cmd3},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd4), cmd4},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd5), cmd5},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd6), cmd6},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd7), cmd7},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd8), cmd8},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd9), cmd9},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd10), cmd10},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd11), cmd11},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd12), cmd12},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd13), cmd13},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd14), cmd14},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd15), cmd15},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd16), cmd16},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd17), cmd17},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd18), cmd18},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd19), cmd19},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd20), cmd20},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd21), cmd21},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd22), cmd22},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd23), cmd23},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd24), cmd24},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd25), cmd25},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd26), cmd26},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(cmd27), cmd27},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd0), cmd0},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd1), cmd1},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd2), cmd2},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd3), cmd3},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd4), cmd4},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd5), cmd5},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd6), cmd6},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd7), cmd7},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd8), cmd8},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd9), cmd9},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd10), cmd10},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd11), cmd11},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd12), cmd12},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd13), cmd13},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd14), cmd14},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd15), cmd15},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd16), cmd16},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd17), cmd17},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd18), cmd18},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd19), cmd19},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd20), cmd20},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd21), cmd21},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd22), cmd22},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd23), cmd23},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd24), cmd24},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd25), cmd25},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd26), cmd26},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(cmd27), cmd27},
 
-	{DTYPE_DCS_WRITE, 1, 0, 0, 150,	sizeof(exit_sleep), exit_sleep},
-	{DTYPE_DCS_WRITE, 1, 0, 0, 10,	sizeof(display_on), display_on},
+	{DTYPE_DCS_WRITE, 1, 0, 0, 0,	sizeof(exit_sleep), exit_sleep},
+	{DTYPE_DCS_WRITE, 1, 0, 0, 0,	sizeof(display_on), display_on},
 
-	{DTYPE_DCS_WRITE1, 1, 0, 0, 150,
+	{DTYPE_DCS_WRITE1, 1, 0, 0, 0,
 		sizeof(config_MADCTL), config_MADCTL},
 
-	{DTYPE_DCS_WRITE, 1, 0, 0, 10,	sizeof(write_ram), write_ram},
+	{DTYPE_DCS_WRITE, 1, 0, 0, 0,	sizeof(write_ram), write_ram},
 };
 
 static struct dsi_cmd_desc nt35510_cmd_display_on_cmds_rotate[] = {
-	{DTYPE_DCS_LWRITE, 1, 0, 0, 50,
+	{DTYPE_DCS_LWRITE, 1, 0, 0, 0,
 		sizeof(cmd19_rotate), cmd19_rotate},
 };
 
@@ -430,34 +430,34 @@
 };
 static char config_video_MADCTL[2] = {0x36, 0xC0};
 static struct dsi_cmd_desc nt35510_video_display_on_cmds[] = {
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video0), video0},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video1), video1},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video2), video2},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video3), video3},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video4), video4},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video5), video5},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video6), video6},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video7), video7},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video8), video8},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video9), video9},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video10), video10},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video11), video11},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video12), video12},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video13), video13},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video14), video14},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video15), video15},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video16), video16},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video17), video17},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video18), video18},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video19), video19},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video20), video20},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video21), video21},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video22), video22},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video23), video23},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video24), video24},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video25), video25},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video26), video26},
-	{DTYPE_GEN_LWRITE, 1, 0, 0, 50, sizeof(video27), video27},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video0), video0},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video1), video1},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video2), video2},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video3), video3},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video4), video4},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video5), video5},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video6), video6},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video7), video7},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video8), video8},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video9), video9},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video10), video10},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video11), video11},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video12), video12},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video13), video13},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video14), video14},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video15), video15},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video16), video16},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video17), video17},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video18), video18},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video19), video19},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video20), video20},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video21), video21},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video22), video22},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video23), video23},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video24), video24},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video25), video25},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video26), video26},
+	{DTYPE_GEN_LWRITE, 1, 0, 0, 0, sizeof(video27), video27},
 	{DTYPE_DCS_WRITE, 1, 0, 0, NT35510_SLEEP_OFF_DELAY, sizeof(exit_sleep),
 			exit_sleep},
 	{DTYPE_DCS_WRITE, 1, 0, 0, NT35510_DISPLAY_ON_DELAY, sizeof(display_on),
@@ -465,7 +465,7 @@
 };
 
 static struct dsi_cmd_desc nt35510_video_display_on_cmds_rotate[] = {
-	{DTYPE_DCS_WRITE1, 1, 0, 0, 150,
+	{DTYPE_DCS_WRITE1, 1, 0, 0, 0,
 		sizeof(config_video_MADCTL), config_video_MADCTL},
 };
 static int mipi_nt35510_lcd_on(struct platform_device *pdev)
@@ -473,6 +473,8 @@
 	struct msm_fb_data_type *mfd;
 	struct mipi_panel_info *mipi;
 	static int rotate;
+	struct dcs_cmd_req cmdreq;
+
 	mfd = platform_get_drvdata(pdev);
 	if (!mfd)
 		return -ENODEV;
@@ -490,25 +492,41 @@
 	if (mipi_nt35510_pdata && mipi_nt35510_pdata->rotate_panel)
 		rotate = mipi_nt35510_pdata->rotate_panel();
 
+	memset(&cmdreq, 0, sizeof(cmdreq));
 	if (mipi->mode == DSI_VIDEO_MODE) {
-		mipi_dsi_cmds_tx(&nt35510_tx_buf,
-			nt35510_video_display_on_cmds,
-			ARRAY_SIZE(nt35510_video_display_on_cmds));
+		cmdreq.cmds = nt35510_video_display_on_cmds;
+		cmdreq.cmds_cnt = ARRAY_SIZE(nt35510_video_display_on_cmds);
+		cmdreq.flags = CMD_REQ_COMMIT;
+		cmdreq.rlen = 0;
+		cmdreq.cb = NULL;
+		mipi_dsi_cmdlist_put(&cmdreq);
 
 		if (rotate) {
-			mipi_dsi_cmds_tx(&nt35510_tx_buf,
-				nt35510_video_display_on_cmds_rotate,
-			ARRAY_SIZE(nt35510_video_display_on_cmds_rotate));
+			cmdreq.cmds = nt35510_video_display_on_cmds_rotate;
+			cmdreq.cmds_cnt =
+			ARRAY_SIZE(nt35510_video_display_on_cmds_rotate);
+			cmdreq.flags = CMD_REQ_COMMIT;
+			cmdreq.rlen = 0;
+			cmdreq.cb = NULL;
+			mipi_dsi_cmdlist_put(&cmdreq);
 		}
 	} else if (mipi->mode == DSI_CMD_MODE) {
-		mipi_dsi_cmds_tx(&nt35510_tx_buf,
-			nt35510_cmd_display_on_cmds,
-			ARRAY_SIZE(nt35510_cmd_display_on_cmds));
+		cmdreq.cmds = nt35510_cmd_display_on_cmds;
+		cmdreq.cmds_cnt =
+			ARRAY_SIZE(nt35510_cmd_display_on_cmds);
+		cmdreq.flags = CMD_REQ_COMMIT;
+		cmdreq.rlen = 0;
+		cmdreq.cb = NULL;
+		mipi_dsi_cmdlist_put(&cmdreq);
 
 		if (rotate) {
-			mipi_dsi_cmds_tx(&nt35510_tx_buf,
-				nt35510_cmd_display_on_cmds_rotate,
-			ARRAY_SIZE(nt35510_cmd_display_on_cmds_rotate));
+			cmdreq.cmds = nt35510_cmd_display_on_cmds_rotate;
+			cmdreq.cmds_cnt =
+				ARRAY_SIZE(nt35510_cmd_display_on_cmds_rotate);
+			cmdreq.flags = CMD_REQ_COMMIT;
+			cmdreq.rlen = 0;
+			cmdreq.cb = NULL;
+			mipi_dsi_cmdlist_put(&cmdreq);
 		}
 	}
 
@@ -518,6 +536,7 @@
 static int mipi_nt35510_lcd_off(struct platform_device *pdev)
 {
 	struct msm_fb_data_type *mfd;
+	struct dcs_cmd_req cmdreq;
 
 	pr_debug("mipi_nt35510_lcd_off E\n");
 
@@ -528,8 +547,13 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
-	mipi_dsi_cmds_tx(&nt35510_tx_buf, nt35510_display_off_cmds,
-			ARRAY_SIZE(nt35510_display_off_cmds));
+	memset(&cmdreq, 0, sizeof(cmdreq));
+	cmdreq.cmds = nt35510_display_off_cmds;
+	cmdreq.cmds_cnt = ARRAY_SIZE(nt35510_display_off_cmds);
+	cmdreq.flags = CMD_REQ_COMMIT;
+	cmdreq.rlen = 0;
+	cmdreq.cb = NULL;
+	mipi_dsi_cmdlist_put(&cmdreq);
 
 	pr_debug("mipi_nt35510_lcd_off X\n");
 	return 0;
@@ -592,6 +616,7 @@
 static int __devinit mipi_nt35510_lcd_probe(struct platform_device *pdev)
 {
 	struct platform_device *pthisdev = NULL;
+	struct msm_fb_panel_data *pdata;
 	pr_debug("%s\n", __func__);
 
 	if (pdev->id == 0) {
@@ -601,6 +626,11 @@
 		return 0;
 	}
 
+	pdata = pdev->dev.platform_data;
+	if (mipi_nt35510_pdata && mipi_nt35510_pdata->rotate_panel()
+			&& pdata->panel_info.type == MIPI_CMD_PANEL)
+		pdata->panel_info.lcd.refx100 = 6200;
+
 	pthisdev = msm_fb_add_device(pdev);
 	mipi_nt35510_create_sysfs(pthisdev);
 
diff --git a/drivers/video/msm/mipi_dsi.c b/drivers/video/msm/mipi_dsi.c
index e94e937..1528d65 100644
--- a/drivers/video/msm/mipi_dsi.c
+++ b/drivers/video/msm/mipi_dsi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -45,8 +45,6 @@
 
 static int mipi_dsi_off(struct platform_device *pdev);
 static int mipi_dsi_on(struct platform_device *pdev);
-static int mipi_dsi_fps_level_change(struct platform_device *pdev,
-					u32 fps_level);
 
 static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
 static int pdev_list_cnt;
@@ -65,14 +63,6 @@
 
 struct device dsi_dev;
 
-static int mipi_dsi_fps_level_change(struct platform_device *pdev,
-					u32 fps_level)
-{
-	mipi_dsi_wait4video_done();
-	mipi_dsi_configure_fb_divider(fps_level);
-	return 0;
-}
-
 static int mipi_dsi_off(struct platform_device *pdev)
 {
 	int ret = 0;
@@ -116,8 +106,6 @@
 
 	ret = panel_next_off(pdev);
 
-	spin_lock_bh(&dsi_clk_lock);
-
 	mipi_dsi_clk_disable();
 
 	/* disbale dsi engine */
@@ -126,7 +114,6 @@
 	mipi_dsi_phy_ctrl(0);
 
 	mipi_dsi_ahb_ctrl(0);
-	spin_unlock_bh(&dsi_clk_lock);
 
 	mipi_dsi_unprepare_clocks();
 	if (mipi_dsi_pdata && mipi_dsi_pdata->dsi_power_save)
@@ -456,9 +443,6 @@
 	if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST)
 		return -ENOMEM;
 
-	if (!mfd->cont_splash_done)
-		cont_splash_clk_ctrl(1);
-
 	mdp_dev = platform_device_alloc("mdp", pdev->id);
 	if (!mdp_dev)
 		return -ENOMEM;
@@ -484,7 +468,6 @@
 	pdata = mdp_dev->dev.platform_data;
 	pdata->on = mipi_dsi_on;
 	pdata->off = mipi_dsi_off;
-	pdata->fps_level_change = mipi_dsi_fps_level_change;
 	pdata->late_init = mipi_dsi_late_init;
 	pdata->next = pdev;
 
@@ -596,6 +579,9 @@
 
 	pdev_list[pdev_list_cnt++] = pdev;
 
+	if (!mfd->cont_splash_done)
+		cont_splash_clk_ctrl(1);
+
 return 0;
 
 mipi_dsi_probe_err:
diff --git a/drivers/video/msm/mipi_dsi.h b/drivers/video/msm/mipi_dsi.h
index 818caa8..7338a5a 100644
--- a/drivers/video/msm/mipi_dsi.h
+++ b/drivers/video/msm/mipi_dsi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -117,7 +117,6 @@
 #define DSI_INTR_CMD_DMA_DONE_MASK	BIT(1)
 #define DSI_INTR_CMD_DMA_DONE		BIT(0)
 
-#define DSI_VIDEO_TERM	BIT(16)
 #define DSI_MDP_TERM	BIT(8)
 #define DSI_CMD_TERM	BIT(0)
 
@@ -268,11 +267,13 @@
 #define CMD_REQ_COMMIT	0x0002
 #define CMD_CLK_CTRL	0x0004
 #define CMD_REQ_NO_MAX_PKT_SIZE 0x0008
+#define CMD_MDP3_CMD_PANEL 0x80000000  /* mdp3 only */
 
 struct dcs_cmd_req {
 	struct dsi_cmd_desc *cmds;
 	int cmds_cnt;
 	u32 flags;
+	struct dsi_buf *rbuf;
 	int rlen;	/* rx length */
 	fxn cb;
 };
@@ -292,14 +293,7 @@
 void mipi_dsi_bist_ctrl(void);
 int mipi_dsi_buf_alloc(struct dsi_buf *, int size);
 int mipi_dsi_cmd_dma_add(struct dsi_buf *dp, struct dsi_cmd_desc *cm);
-int mipi_dsi_cmds_tx(struct dsi_buf *dp, struct dsi_cmd_desc *cmds, int cnt);
-
-int mipi_dsi_cmd_dma_tx(struct dsi_buf *dp);
 int mipi_dsi_cmd_reg_tx(uint32 data);
-int mipi_dsi_cmds_rx(struct msm_fb_data_type *mfd,
-			struct dsi_buf *tp, struct dsi_buf *rp,
-			struct dsi_cmd_desc *cmds, int len);
-int mipi_dsi_cmd_dma_rx(struct dsi_buf *tp, int rlen);
 void mipi_dsi_host_init(struct mipi_panel_info *pinfo);
 void mipi_dsi_op_mode_config(int mode);
 void mipi_dsi_cmd_mode_ctrl(int enable);
@@ -313,8 +307,6 @@
 void mipi_dsi_set_tear_off(struct msm_fb_data_type *mfd);
 void mipi_dsi_set_backlight(struct msm_fb_data_type *mfd, int level);
 void mipi_dsi_cmd_backlight_tx(struct dsi_buf *dp);
-void mipi_dsi_clk_enable(void);
-void mipi_dsi_clk_disable(void);
 void mipi_dsi_pre_kickoff_action(void);
 void mipi_dsi_post_kickoff_action(void);
 void mipi_dsi_pre_kickoff_add(struct dsi_kickoff_action *act);
@@ -328,16 +320,47 @@
 irqreturn_t mipi_dsi_isr(int irq, void *ptr);
 
 void mipi_set_tx_power_mode(int mode);
-void mipi_dsi_phy_ctrl(int on);
 void mipi_dsi_phy_init(int panel_ndx, struct msm_panel_info const *panel_info,
 	int target_type);
 int mipi_dsi_clk_div_config(uint8 bpp, uint8 lanes,
 			    uint32 *expected_dsi_pclk);
 int mipi_dsi_clk_init(struct platform_device *pdev);
 void mipi_dsi_clk_deinit(struct device *dev);
+
+#ifdef CONFIG_FB_MSM_MIPI_DSI
+void mipi_dsi_clk_enable(void);
+void mipi_dsi_clk_disable(void);
 void mipi_dsi_prepare_clocks(void);
 void mipi_dsi_unprepare_clocks(void);
 void mipi_dsi_ahb_ctrl(u32 enable);
+void mipi_dsi_phy_ctrl(int on);
+#else
+static inline void mipi_dsi_clk_enable(void)
+{
+	/* empty */
+}
+void mipi_dsi_clk_disable(void)
+{
+	/* empty */
+}
+void mipi_dsi_prepare_clocks(void)
+{
+	/* empty */
+}
+void mipi_dsi_unprepare_clocks(void)
+{
+	/* empty */
+}
+void mipi_dsi_ahb_ctrl(u32 enable)
+{
+	/* empty */
+}
+void mipi_dsi_phy_ctrl(int on)
+{
+	/* empty */
+}
+#endif
+
 void cont_splash_clk_ctrl(int enable);
 void mipi_dsi_turn_on_clks(void);
 void mipi_dsi_turn_off_clks(void);
@@ -347,8 +370,6 @@
 struct dcs_cmd_req *mipi_dsi_cmdlist_get(void);
 void mipi_dsi_cmdlist_commit(int from_mdp);
 void mipi_dsi_cmd_mdp_busy(void);
-void mipi_dsi_configure_fb_divider(u32 fps_level);
-void mipi_dsi_wait4video_done(void);
 
 #ifdef CONFIG_FB_MSM_MDP303
 void update_lane_config(struct msm_panel_info *pinfo);
diff --git a/drivers/video/msm/mipi_dsi_host.c b/drivers/video/msm/mipi_dsi_host.c
index b717e7a..31883dd 100644
--- a/drivers/video/msm/mipi_dsi_host.c
+++ b/drivers/video/msm/mipi_dsi_host.c
@@ -1,5 +1,5 @@
 
-/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -24,7 +24,6 @@
 #include <linux/semaphore.h>
 #include <linux/uaccess.h>
 #include <linux/clk.h>
-#include <linux/iopoll.h>
 #include <linux/platform_device.h>
 #include <linux/iopoll.h>
 
@@ -43,7 +42,6 @@
 
 static struct completion dsi_dma_comp;
 static struct completion dsi_mdp_comp;
-static struct completion dsi_video_comp;
 static struct dsi_buf dsi_tx_buf;
 static struct dsi_buf dsi_rx_buf;
 static spinlock_t dsi_irq_lock;
@@ -96,7 +94,6 @@
 {
 	init_completion(&dsi_dma_comp);
 	init_completion(&dsi_mdp_comp);
-	init_completion(&dsi_video_comp);
 	mipi_dsi_buf_alloc(&dsi_tx_buf, DSI_BUF_SIZE);
 	mipi_dsi_buf_alloc(&dsi_rx_buf, DSI_BUF_SIZE);
 	spin_lock_init(&dsi_irq_lock);
@@ -1016,8 +1013,7 @@
 	dsi_ctrl &= ~0x07;
 	if (mode == DSI_VIDEO_MODE) {
 		dsi_ctrl |= 0x03;
-		intr_ctrl = (DSI_INTR_CMD_DMA_DONE_MASK |
-					DSI_INTR_VIDEO_DONE_MASK);
+		intr_ctrl = DSI_INTR_CMD_DMA_DONE_MASK;
 	} else {		/* command mode */
 		dsi_ctrl |= 0x05;
 		intr_ctrl = DSI_INTR_CMD_DMA_DONE_MASK | DSI_INTR_ERROR_MASK |
@@ -1031,20 +1027,6 @@
 	wmb();
 }
 
-
-void mipi_dsi_wait4video_done(void)
-{
-	unsigned long flag;
-
-	spin_lock_irqsave(&dsi_mdp_lock, flag);
-	INIT_COMPLETION(dsi_video_comp);
-	mipi_dsi_enable_irq(DSI_VIDEO_TERM);
-	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
-
-	wait_for_completion_timeout(&dsi_video_comp,
-					msecs_to_jiffies(VSYNC_PERIOD * 4));
-}
-
 void mipi_dsi_mdp_busy_wait(void)
 {
 	mutex_lock(&cmd_mutex);
@@ -1148,11 +1130,15 @@
 	return 4;
 }
 
+static int mipi_dsi_cmd_dma_tx(struct dsi_buf *tp);
+static int mipi_dsi_cmd_dma_rx(struct dsi_buf *rp, int rlen);
+
 /*
  * mipi_dsi_cmds_tx:
  * thread context only
  */
-int mipi_dsi_cmds_tx(struct dsi_buf *tp, struct dsi_cmd_desc *cmds, int cnt)
+static int mipi_dsi_cmds_tx(struct dsi_buf *tp,
+			struct dsi_cmd_desc *cmds, int cnt)
 {
 	struct dsi_cmd_desc *cm;
 	uint32 dsi_ctrl, ctrl;
@@ -1205,122 +1191,8 @@
  * len should be either 4 or 8
  * any return data more than MIPI_DSI_LEN need to be break down
  * to multiple transactions.
- *
- * ov_mutex need to be acquired before call this function.
  */
-int mipi_dsi_cmds_rx(struct msm_fb_data_type *mfd,
-			struct dsi_buf *tp, struct dsi_buf *rp,
-			struct dsi_cmd_desc *cmds, int rlen)
-{
-	int cnt, len, diff, pkt_size;
-	char cmd;
-
-	if (mfd->panel_info.mipi.no_max_pkt_size) {
-		/* Only support rlen = 4*n */
-		rlen += 3;
-		rlen &= ~0x03;
-	}
-
-	len = rlen;
-	diff = 0;
-
-	if (len <= 2)
-		cnt = 4;	/* short read */
-	else {
-		if (len > MIPI_DSI_LEN)
-			len = MIPI_DSI_LEN;	/* 8 bytes at most */
-
-		len = (len + 3) & ~0x03; /* len 4 bytes align */
-		diff = len - rlen;
-		/*
-		 * add extra 2 bytes to len to have overall
-		 * packet size is multipe by 4. This also make
-		 * sure 4 bytes dcs headerlocates within a
-		 * 32 bits register after shift in.
-		 * after all, len should be either 6 or 10.
-		 */
-		len += 2;
-		cnt = len + 6; /* 4 bytes header + 2 bytes crc */
-	}
-
-	if (mfd->panel_info.type == MIPI_CMD_PANEL) {
-		/* make sure mdp dma is not txing pixel data */
-#ifdef CONFIG_FB_MSM_MDP303
-			mdp3_dsi_cmd_dma_busy_wait(mfd);
-#endif
-	}
-
-	if (!mfd->panel_info.mipi.no_max_pkt_size) {
-		/* packet size need to be set at every read */
-		pkt_size = len;
-		max_pktsize[0] = pkt_size;
-		mipi_dsi_enable_irq(DSI_CMD_TERM);
-		mipi_dsi_buf_init(tp);
-		mipi_dsi_cmd_dma_add(tp, pkt_size_cmd);
-		mipi_dsi_cmd_dma_tx(tp);
-	}
-
-	mipi_dsi_enable_irq(DSI_CMD_TERM);
-	mipi_dsi_buf_init(tp);
-	mipi_dsi_cmd_dma_add(tp, cmds);
-
-	/* transmit read comamnd to client */
-	mipi_dsi_cmd_dma_tx(tp);
-
-	/*
-	 * once cmd_dma_done interrupt received,
-	 * return data from client is ready and stored
-	 * at RDBK_DATA register already
-	 */
-	mipi_dsi_buf_init(rp);
-	if (mfd->panel_info.mipi.no_max_pkt_size) {
-		/*
-		 * expect rlen = n * 4
-		 * short alignement for start addr
-		 */
-		rp->data += 2;
-	}
-
-	mipi_dsi_cmd_dma_rx(rp, cnt);
-
-	if (mfd->panel_info.mipi.no_max_pkt_size) {
-		/*
-		 * remove extra 2 bytes from previous
-		 * rx transaction at shift register
-		 * which was inserted during copy
-		 * shift registers to rx buffer
-		 * rx payload start from long alignment addr
-		 */
-		rp->data += 2;
-	}
-
-	cmd = rp->data[0];
-	switch (cmd) {
-	case DTYPE_ACK_ERR_RESP:
-		pr_debug("%s: rx ACK_ERR_PACLAGE\n", __func__);
-		break;
-	case DTYPE_GEN_READ1_RESP:
-	case DTYPE_DCS_READ1_RESP:
-		mipi_dsi_short_read1_resp(rp);
-		break;
-	case DTYPE_GEN_READ2_RESP:
-	case DTYPE_DCS_READ2_RESP:
-		mipi_dsi_short_read2_resp(rp);
-		break;
-	case DTYPE_GEN_LREAD_RESP:
-	case DTYPE_DCS_LREAD_RESP:
-		mipi_dsi_long_read_resp(rp);
-		rp->len -= 2; /* extra 2 bytes added */
-		rp->len -= diff; /* align bytes */
-		break;
-	default:
-		break;
-	}
-
-	return rp->len;
-}
-
-int mipi_dsi_cmds_rx_new(struct dsi_buf *tp, struct dsi_buf *rp,
+static int mipi_dsi_cmds_rx(struct dsi_buf *tp, struct dsi_buf *rp,
 			struct dcs_cmd_req *req, int rlen)
 {
 	struct dsi_cmd_desc *cmds;
@@ -1429,8 +1301,9 @@
 	return rp->len;
 }
 
-int mipi_dsi_cmd_dma_tx(struct dsi_buf *tp)
+static int mipi_dsi_cmd_dma_tx(struct dsi_buf *tp)
 {
+
 	unsigned long flags;
 
 #ifdef DSI_HOST_DEBUG
@@ -1468,17 +1341,14 @@
 	wmb();
 	spin_unlock_irqrestore(&dsi_mdp_lock, flags);
 
-	if (!wait_for_completion_timeout(&dsi_dma_comp,
-					msecs_to_jiffies(200))) {
-		pr_err("%s: dma timeout error\n", __func__);
-	}
+	wait_for_completion(&dsi_dma_comp);
 
 	dma_unmap_single(&dsi_dev, tp->dmap, tp->len, DMA_TO_DEVICE);
 	tp->dmap = 0;
 	return tp->len;
 }
 
-int mipi_dsi_cmd_dma_rx(struct dsi_buf *rp, int rlen)
+static int mipi_dsi_cmd_dma_rx(struct dsi_buf *rp, int rlen)
 {
 	uint32 *lp, data;
 	int i, off, cnt;
@@ -1505,11 +1375,21 @@
 	return rlen;
 }
 
-static void mipi_dsi_wait4video_eng_busy(void)
+static void mipi_dsi_wait_for_video_eng_busy(void)
 {
-	mipi_dsi_wait4video_done();
-	/* delay 4 ms to skip BLLP */
-	usleep(4000);
+	u32 status;
+	int sleep_us = 4000;
+
+	/*
+	 * if video mode engine was not busy (in BLLP)
+	 * wait to pass BLLP
+	 */
+
+	/* check for VIDEO_MODE_ENGINE_BUSY */
+	readl_poll((MIPI_DSI_BASE + 0x0004), /* DSI_STATUS */
+				status,
+				(status & 0x08),
+				sleep_us);
 }
 
 void mipi_dsi_cmd_mdp_busy(void)
@@ -1573,12 +1453,17 @@
 	struct dsi_buf *rp;
 
 	mipi_dsi_buf_init(&dsi_tx_buf);
-	mipi_dsi_buf_init(&dsi_rx_buf);
 
 	tp = &dsi_tx_buf;
-	rp = &dsi_rx_buf;
 
-	len = mipi_dsi_cmds_rx_new(tp, rp, req, req->rlen);
+	if (req->rbuf)
+		rp = req->rbuf;
+	else
+		rp = &dsi_rx_buf;
+
+	mipi_dsi_buf_init(rp);
+
+	len = mipi_dsi_cmds_rx(tp, rp, req, req->rlen);
 	dp = (u32 *)rp->data;
 
 	if (req->cb)
@@ -1603,10 +1488,10 @@
 
 	dsi_ctrl = MIPI_INP(MIPI_DSI_BASE + 0x0000);
 	if (dsi_ctrl & 0x02) {
-		/* video mode, make sure video engine is busy
-		 * so dcs command will be sent at start of BLLP
+		/* video mode, make sure dsi_cmd_mdp is busy
+		 * so dcs command will be txed at start of BLLP
 		 */
-		mipi_dsi_wait4video_eng_busy();
+		mipi_dsi_wait_for_video_eng_busy();
 	} else {
 		/* command mode */
 		if (!from_mdp) { /* cmdlist_put */
@@ -1764,10 +1649,9 @@
 	}
 
 	if (isr & DSI_INTR_VIDEO_DONE) {
-		spin_lock(&dsi_mdp_lock);
-		mipi_dsi_disable_irq_nosync(DSI_VIDEO_TERM);
-		complete(&dsi_video_comp);
-		spin_unlock(&dsi_mdp_lock);
+		/*
+		* do something  here
+		*/
 	}
 
 	if (isr & DSI_INTR_CMD_DMA_DONE) {
diff --git a/drivers/video/msm/mipi_lgit.c b/drivers/video/msm/mipi_lgit.c
deleted file mode 100644
index 5aa8bfb..0000000
--- a/drivers/video/msm/mipi_lgit.c
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- *  Copyright (C) 2011-2012, LG Eletronics,Inc. All rights reserved.
- *      LGIT LCD device driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-#include <linux/gpio.h>
-
-#include "msm_fb.h"
-#include "mipi_dsi.h"
-#include "mipi_lgit.h"
-#include "mdp4.h"
-
-static struct msm_panel_common_pdata *mipi_lgit_pdata;
-
-static struct dsi_buf lgit_tx_buf;
-static struct dsi_buf lgit_rx_buf;
-static int skip_init;
-
-#define DSV_ONBST 57
-
-static int lgit_external_dsv_onoff(uint8_t on_off)
-{
-	int ret =0;
-	static int init_done=0;
-
-	if (!init_done) {
-		ret = gpio_request(DSV_ONBST,"DSV_ONBST_en");
-		if (ret) {
-			pr_err("%s: failed to request DSV_ONBST gpio \n", __func__);
-			goto out;
-		}
-		ret = gpio_direction_output(DSV_ONBST, 1);
-		if (ret) {
-			pr_err("%s: failed to set DSV_ONBST direction\n", __func__);
-			goto err_gpio;
-		}
-		init_done = 1;
-	}
-
-	gpio_set_value(DSV_ONBST, on_off);
-	mdelay(20);
-	goto out;
-
-err_gpio:
-	gpio_free(DSV_ONBST);
-out:
-	return ret;
-}
-
-static int mipi_lgit_lcd_on(struct platform_device *pdev)
-{
-	struct msm_fb_data_type *mfd;
-	int ret = 0;
-
-	pr_info("%s started\n", __func__);
-
-	mfd = platform_get_drvdata(pdev);
-	if (!mfd)
-		return -ENODEV;
-	if (mfd->key != MFD_KEY)
-		return -EINVAL;
-
-	MIPI_OUTP(MIPI_DSI_BASE + 0x38, 0x10000000);
-	ret = mipi_dsi_cmds_tx(&lgit_tx_buf,
-			mipi_lgit_pdata->power_on_set_1,
-			mipi_lgit_pdata->power_on_set_size_1);
-	MIPI_OUTP(MIPI_DSI_BASE + 0x38, 0x14000000);
-	if (ret < 0) {
-		pr_err("%s: failed to transmit power_on_set_1 cmds\n", __func__);
-		return ret;
-	}
-
-	if(!skip_init){
-		MIPI_OUTP(MIPI_DSI_BASE + 0x38, 0x10000000);
-		ret = mipi_dsi_cmds_tx(&lgit_tx_buf,
-				mipi_lgit_pdata->power_on_set_2,
-				mipi_lgit_pdata->power_on_set_size_2);
-		MIPI_OUTP(MIPI_DSI_BASE + 0x38, 0x14000000);
-		if (ret < 0) {
-			pr_err("%s: failed to transmit power_on_set_2 cmds\n", __func__);
-			return ret;
-		}
-	}
-	skip_init = false;
-
-	ret = lgit_external_dsv_onoff(1);
-	if (ret < 0) {
-		pr_err("%s: failed to turn on external dsv\n", __func__);
-		return ret;
-	}
-
-	MIPI_OUTP(MIPI_DSI_BASE + 0x38, 0x10000000);
-	ret = mipi_dsi_cmds_tx(&lgit_tx_buf,
-			mipi_lgit_pdata->power_on_set_3,
-			mipi_lgit_pdata->power_on_set_size_3);
-	MIPI_OUTP(MIPI_DSI_BASE + 0x38, 0x14000000);
-	if (ret < 0) {
-		pr_err("%s: failed to transmit power_on_set_3 cmds\n", __func__);
-		return ret;
-	}
-
-	pr_info("%s finished\n", __func__);
-	return 0;
-}
-
-static int mipi_lgit_lcd_off(struct platform_device *pdev)
-{
-	struct msm_fb_data_type *mfd;
-	int ret = 0;
-
-	pr_info("%s started\n", __func__);
-
-	if (mipi_lgit_pdata->bl_pwm_disable)
-		mipi_lgit_pdata->bl_pwm_disable();
-
-	mfd = platform_get_drvdata(pdev);
-
-	if (!mfd)
-		return -ENODEV;
-
-	if (mfd->key != MFD_KEY)
-		return -EINVAL;
-
-	MIPI_OUTP(MIPI_DSI_BASE + 0x38, 0x10000000);
-	ret = mipi_dsi_cmds_tx(&lgit_tx_buf,
-			mipi_lgit_pdata->power_off_set_1,
-			mipi_lgit_pdata->power_off_set_size_1);
-	MIPI_OUTP(MIPI_DSI_BASE + 0x38, 0x14000000);
-	if (ret < 0) {
-		pr_err("%s: failed to transmit power_off_set_1 cmds\n", __func__);
-		return ret;
-	}
-
-	ret = lgit_external_dsv_onoff(0);
-	if (ret < 0) {
-		pr_err("%s: failed to turn off external dsv\n", __func__);
-		return ret;
-	}
-
-	MIPI_OUTP(MIPI_DSI_BASE + 0x38, 0x10000000);
-	ret = mipi_dsi_cmds_tx(&lgit_tx_buf,
-			mipi_lgit_pdata->power_off_set_2,
-			mipi_lgit_pdata->power_off_set_size_2);
-	MIPI_OUTP(MIPI_DSI_BASE + 0x38, 0x14000000);
-	if (ret < 0) {
-		pr_err("%s: failed to transmit power_off_set_2 cmds\n", __func__);
-		return ret;
-	}
-
-	pr_info("%s finished\n", __func__);
-	return 0;
-}
-
-static int mipi_lgit_backlight_on_status(void)
-{
-	return (mipi_lgit_pdata->bl_on_status());
-}
-
-static void mipi_lgit_set_backlight_board(struct msm_fb_data_type *mfd)
-{
-	int level;
-
-	level = (int)mfd->bl_level;
-	mipi_lgit_pdata->backlight_level(level, 0, 0);
-}
-
-static int mipi_lgit_lcd_probe(struct platform_device *pdev)
-{
-	if (pdev->id == 0) {
-		mipi_lgit_pdata = pdev->dev.platform_data;
-		return 0;
-	}
-
-	pr_info("%s start\n", __func__);
-
-	skip_init = true;
-	msm_fb_add_device(pdev);
-
-	return 0;
-}
-
-static struct platform_driver this_driver = {
-	.probe = mipi_lgit_lcd_probe,
-	.driver = {
-		.name = "mipi_lgit",
-	},
-};
-
-static struct msm_fb_panel_data lgit_panel_data = {
-	.on = mipi_lgit_lcd_on,
-	.off = mipi_lgit_lcd_off,
-	.set_backlight = mipi_lgit_set_backlight_board,
-	.get_backlight_on_status = mipi_lgit_backlight_on_status,
-};
-
-static int ch_used[3];
-
-int mipi_lgit_device_register(struct msm_panel_info *pinfo,
-		u32 channel, u32 panel)
-{
-	struct platform_device *pdev = NULL;
-	int ret;
-
-	if ((channel >= 3) || ch_used[channel])
-		return -ENODEV;
-
-	ch_used[channel] = TRUE;
-
-	pdev = platform_device_alloc("mipi_lgit", (panel << 8)|channel);
-	if (!pdev)
-		return -ENOMEM;
-
-	lgit_panel_data.panel_info = *pinfo;
-
-	ret = platform_device_add_data(pdev, &lgit_panel_data,
-			sizeof(lgit_panel_data));
-	if (ret) {
-		pr_err("%s: platform_device_add_data failed!\n", __func__);
-		goto err_device_put;
-	}
-
-	ret = platform_device_add(pdev);
-	if (ret) {
-		pr_err("%s: platform_device_register failed!\n", __func__);
-		goto err_device_put;
-	}
-	return 0;
-
-err_device_put:
-	platform_device_put(pdev);
-	return ret;
-}
-
-static int __init mipi_lgit_lcd_init(void)
-{
-	mipi_dsi_buf_alloc(&lgit_tx_buf, DSI_BUF_SIZE);
-	mipi_dsi_buf_alloc(&lgit_rx_buf, DSI_BUF_SIZE);
-
-	return platform_driver_register(&this_driver);
-}
-
-module_init(mipi_lgit_lcd_init);
diff --git a/drivers/video/msm/mipi_lgit.h b/drivers/video/msm/mipi_lgit.h
deleted file mode 100644
index 65e8ca2..0000000
--- a/drivers/video/msm/mipi_lgit.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- *  Copyright (C) 2011-2012, LG Eletronics,Inc. All rights reserved.
- *      LGIT LCD device driver
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- *       copyright notice, this list of conditions and the following
- *       disclaimer in the documentation and/or other materials provided
- *       with the distribution.
- *     * Neither the name of The Linux Foundation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
- * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
- * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#ifndef MIPI_LGIT_H
-#define MIPI_LGIT_H
-
-int mipi_lgit_device_register(struct msm_panel_info *pinfo,
-					u32 channel, u32 panel);
-#endif  /* MIPI_LGIT_H */
diff --git a/drivers/video/msm/mipi_lgit_video_wxga_pt.c b/drivers/video/msm/mipi_lgit_video_wxga_pt.c
deleted file mode 100644
index 24b6956..0000000
--- a/drivers/video/msm/mipi_lgit_video_wxga_pt.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- *  Copyright (C) 2011-2012, LG Eletronics,Inc. All rights reserved.
- *      Hitach LCD device driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#include "msm_fb.h"
-#include "mipi_dsi.h"
-#include "mipi_lgit.h"
-
-static struct msm_panel_info pinfo;
-
-static struct mipi_dsi_phy_ctrl dsi_video_mode_phy_db = {
-/* 768*1280, RGB888, 4 Lane 60 fps video mode */
-	/* regulator */
-	{0x03, 0x0a, 0x04, 0x00, 0x20},
-	/* timing */
-	{0x66, 0x26, 0x1D, 0x00, 0x20, 0x95, 0x1E, 0x8F,
-	0x20, 0x03, 0x04, 0xA0},
-	/* phy ctrl */
-	{0x5f, 0x00, 0x00, 0x10},
-	/* strength */
-	{0xff, 0x00, 0x06, 0x00},
-	/* pll control */
-	{0x00, 0xC4, 0x01, 0x1A, 0x00, 0x50, 0x48, 0x63,
-	0x41, 0x0F, 0x03, 0x00, 0x14, 0x03, 0x00, 0x02,
-	0x00, 0x20, 0x00, 0x01 },
-};
-
-static int __init mipi_video_lgit_wxga_pt_init(void)
-{
-	int ret;
-
-#ifdef CONFIG_FB_MSM_MIPI_PANEL_DETECT
-	if (msm_fb_detect_client("mipi_video_lgit_wxga"))
-		return 0;
-#endif
-
-	pinfo.xres = 768;
-	pinfo.yres = 1280;
-	pinfo.type = MIPI_VIDEO_PANEL;
-	pinfo.pdest = DISPLAY_1;
-	pinfo.wait_cycle = 0;
-	pinfo.bpp = 24;
-	pinfo.lcdc.h_back_porch = 180;
-	pinfo.lcdc.h_front_porch = 8;
-	pinfo.lcdc.h_pulse_width = 4;
-	pinfo.lcdc.v_back_porch = 22;
-	pinfo.lcdc.v_front_porch = 8;
-	pinfo.lcdc.v_pulse_width = 2;
-	pinfo.lcdc.border_clr = 0;        /* blk */
-	pinfo.lcdc.underflow_clr = 0x0;  /* black */
-	pinfo.lcdc.hsync_skew = 0;
-	pinfo.bl_max = 0x72;
-	pinfo.bl_min = 0x02;
-	pinfo.fb_num = 2;
-	pinfo.lcd.blt_ctrl = 1;
-
-	pinfo.mipi.mode = DSI_VIDEO_MODE;
-	pinfo.mipi.pulse_mode_hsa_he = FALSE;
-
-	pinfo.mipi.hfp_power_stop = FALSE;
-	pinfo.mipi.hbp_power_stop = FALSE;
-	pinfo.mipi.hsa_power_stop = FALSE;
-
-	pinfo.mipi.eof_bllp_power_stop = FALSE;
-	pinfo.mipi.bllp_power_stop = TRUE;
-	pinfo.mipi.traffic_mode = DSI_NON_BURST_SYNCH_EVENT;
-	pinfo.mipi.dst_format = DSI_VIDEO_DST_FORMAT_RGB888;
-	pinfo.mipi.vc = 0;
-	pinfo.mipi.rgb_swap = DSI_RGB_SWAP_RGB;
-	pinfo.mipi.data_lane0 = TRUE;
-	pinfo.mipi.data_lane1 = TRUE;
-	pinfo.mipi.data_lane2 = TRUE;
-	pinfo.mipi.data_lane3 = TRUE;
-	pinfo.mipi.t_clk_post = 0x22;
-	pinfo.mipi.t_clk_pre = 0x36;
-	pinfo.clk_rate = 453770000;
-	pinfo.mipi.esc_byte_ratio = 6;
-	pinfo.mipi.frame_rate = 60;
-	pinfo.mipi.stream = 0;
-	pinfo.mipi.mdp_trigger = 0;
-	pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW;
-	pinfo.mipi.dsi_phy_db = &dsi_video_mode_phy_db;
-	ret = mipi_lgit_device_register(&pinfo, MIPI_DSI_PRIM,
-			MIPI_DSI_PANEL_WXGA);
-	if (ret)
-		pr_err("%s: failed to register device!\n", __func__);
-
-	return ret;
-}
-
-module_init(mipi_video_lgit_wxga_pt_init);
diff --git a/drivers/video/msm/mipi_novatek.c b/drivers/video/msm/mipi_novatek.c
index 995b779..60d0944 100644
--- a/drivers/video/msm/mipi_novatek.c
+++ b/drivers/video/msm/mipi_novatek.c
@@ -314,10 +314,12 @@
 {
 	struct dcs_cmd_req cmdreq;
 
+	memset(&cmdreq, 0, sizeof(cmdreq));
 	cmdreq.cmds = &novatek_manufacture_id_cmd;
 	cmdreq.cmds_cnt = 1;
 	cmdreq.flags = CMD_REQ_RX | CMD_REQ_COMMIT;
 	cmdreq.rlen = 3;
+	cmdreq.rbuf = NULL;
 	cmdreq.cb = mipi_novatek_manufacture_cb; /* call back */
 	mipi_dsi_cmdlist_put(&cmdreq);
 	/*
@@ -465,6 +467,12 @@
 {
 	struct dcs_cmd_req cmdreq;
 
+	if (mipi_novatek_pdata &&
+	    mipi_novatek_pdata->gpio_set_backlight) {
+		mipi_novatek_pdata->gpio_set_backlight(mfd->bl_level);
+		return;
+	}
+
 	if ((mipi_novatek_pdata->enable_wled_bl_ctrl)
 	    && (wled_trigger_initialized)) {
 		led_trigger_event(bkl_led_trigger, mfd->bl_level);
diff --git a/drivers/video/msm/mipi_novatek_cmd_qhd_pt.c b/drivers/video/msm/mipi_novatek_cmd_qhd_pt.c
index 616622e..d2820a8 100644
--- a/drivers/video/msm/mipi_novatek_cmd_qhd_pt.c
+++ b/drivers/video/msm/mipi_novatek_cmd_qhd_pt.c
@@ -64,7 +64,7 @@
 	pinfo.is_3d_panel = FB_TYPE_3D_PANEL;
 	pinfo.lcd.vsync_enable = TRUE;
 	pinfo.lcd.hw_vsync_mode = TRUE;
-	pinfo.lcd.refx100 = 6000; /* adjust refx100 to prevent tearing */
+	pinfo.lcd.refx100 = 6200; /* adjust refx100 to prevent tearing */
 	pinfo.lcd.v_back_porch = 11;
 	pinfo.lcd.v_front_porch = 10;
 	pinfo.lcd.v_pulse_width = 5;
diff --git a/drivers/video/msm/mipi_orise.c b/drivers/video/msm/mipi_orise.c
index 2fd5ff4..ef3f17d 100644
--- a/drivers/video/msm/mipi_orise.c
+++ b/drivers/video/msm/mipi_orise.c
@@ -53,6 +53,7 @@
 	struct msm_fb_data_type *mfd;
 	struct mipi_panel_info *mipi;
 	struct msm_panel_info *pinfo;
+	struct dcs_cmd_req cmdreq;
 
 	mfd = platform_get_drvdata(pdev);
 	if (!mfd)
@@ -63,12 +64,21 @@
 	pinfo = &mfd->panel_info;
 	mipi  = &mfd->panel_info.mipi;
 
+	memset(&cmdreq, 0, sizeof(cmdreq));
 	if (mipi->mode == DSI_VIDEO_MODE) {
-		mipi_dsi_cmds_tx(&orise_tx_buf, orise_video_on_cmds,
-			ARRAY_SIZE(orise_video_on_cmds));
+		cmdreq.cmds = orise_video_on_cmds;
+		cmdreq.cmds_cnt = ARRAY_SIZE(orise_video_on_cmds);
+		cmdreq.flags = CMD_REQ_COMMIT;
+		cmdreq.rlen = 0;
+		cmdreq.cb = NULL;
+		mipi_dsi_cmdlist_put(&cmdreq);
 	} else {
-		mipi_dsi_cmds_tx(&orise_tx_buf, orise_cmd_on_cmds,
-			ARRAY_SIZE(orise_cmd_on_cmds));
+		cmdreq.cmds = orise_cmd_on_cmds;
+		cmdreq.cmds_cnt = ARRAY_SIZE(orise_cmd_on_cmds);
+		cmdreq.flags = CMD_REQ_COMMIT;
+		cmdreq.rlen = 0;
+		cmdreq.cb = NULL;
+		mipi_dsi_cmdlist_put(&cmdreq);
 
 		mipi_dsi_cmd_bta_sw_trigger(); /* clean up ack_err_status */
 	}
@@ -79,6 +89,7 @@
 static int mipi_orise_lcd_off(struct platform_device *pdev)
 {
 	struct msm_fb_data_type *mfd;
+	struct dcs_cmd_req cmdreq;
 
 	mfd = platform_get_drvdata(pdev);
 
@@ -87,8 +98,13 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
-	mipi_dsi_cmds_tx(&orise_tx_buf, orise_display_off_cmds,
-			ARRAY_SIZE(orise_display_off_cmds));
+	memset(&cmdreq, 0, sizeof(cmdreq));
+	cmdreq.cmds = orise_display_off_cmds;
+	cmdreq.cmds_cnt = ARRAY_SIZE(orise_display_off_cmds);
+	cmdreq.flags = CMD_REQ_COMMIT;
+	cmdreq.rlen = 0;
+	cmdreq.cb = NULL;
+	mipi_dsi_cmdlist_put(&cmdreq);
 
 	return 0;
 }
diff --git a/drivers/video/msm/mipi_renesas.c b/drivers/video/msm/mipi_renesas.c
index 3fa2606..e2ab01f 100644
--- a/drivers/video/msm/mipi_renesas.c
+++ b/drivers/video/msm/mipi_renesas.c
@@ -1122,6 +1122,7 @@
 {
 	struct msm_fb_data_type *mfd;
 	struct mipi_panel_info *mipi;
+	struct dcs_cmd_req cmdreq;
 
 	mfd = platform_get_drvdata(pdev);
 	mipi  = &mfd->panel_info.mipi;
@@ -1131,24 +1132,47 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
-	mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_sleep_off_cmds,
-			ARRAY_SIZE(renesas_sleep_off_cmds));
+	memset(&cmdreq, 0, sizeof(cmdreq));
+	cmdreq.cmds = renesas_sleep_off_cmds;
+	cmdreq.cmds_cnt = ARRAY_SIZE(renesas_sleep_off_cmds);
+	cmdreq.flags = CMD_REQ_COMMIT;
+	cmdreq.rlen = 0;
+	cmdreq.cb = NULL;
+	mipi_dsi_cmdlist_put(&cmdreq);
 
 	mipi_set_tx_power_mode(1);
-	mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_display_on_cmds,
-			ARRAY_SIZE(renesas_display_on_cmds));
+
+	cmdreq.cmds = renesas_display_on_cmds;
+	cmdreq.cmds_cnt = ARRAY_SIZE(renesas_display_on_cmds);
+	cmdreq.flags = CMD_REQ_COMMIT;
+	cmdreq.rlen = 0;
+	cmdreq.cb = NULL;
+	mipi_dsi_cmdlist_put(&cmdreq);
 
 	if (cpu_is_msm7x25a() || cpu_is_msm7x25aa() || cpu_is_msm7x25ab()) {
-		mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_hvga_on_cmds,
-			ARRAY_SIZE(renesas_hvga_on_cmds));
+		cmdreq.cmds = renesas_hvga_on_cmds;
+		cmdreq.cmds_cnt = ARRAY_SIZE(renesas_hvga_on_cmds);
+		cmdreq.flags = CMD_REQ_COMMIT;
+		cmdreq.rlen = 0;
+		cmdreq.cb = NULL;
+		mipi_dsi_cmdlist_put(&cmdreq);
 	}
 
-	if (mipi->mode == DSI_VIDEO_MODE)
-		mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_video_on_cmds,
-			ARRAY_SIZE(renesas_video_on_cmds));
-	else
-		mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_cmd_on_cmds,
-			ARRAY_SIZE(renesas_cmd_on_cmds));
+	if (mipi->mode == DSI_VIDEO_MODE) {
+		cmdreq.cmds = renesas_video_on_cmds;
+		cmdreq.cmds_cnt = ARRAY_SIZE(renesas_video_on_cmds);
+		cmdreq.flags = CMD_REQ_COMMIT;
+		cmdreq.rlen = 0;
+		cmdreq.cb = NULL;
+		mipi_dsi_cmdlist_put(&cmdreq);
+	} else {
+		cmdreq.cmds = renesas_cmd_on_cmds;
+		cmdreq.cmds_cnt = ARRAY_SIZE(renesas_cmd_on_cmds);
+		cmdreq.flags = CMD_REQ_COMMIT;
+		cmdreq.rlen = 0;
+		cmdreq.cb = NULL;
+		mipi_dsi_cmdlist_put(&cmdreq);
+	}
 	mipi_set_tx_power_mode(0);
 
 	return 0;
@@ -1157,6 +1181,7 @@
 static int mipi_renesas_lcd_off(struct platform_device *pdev)
 {
 	struct msm_fb_data_type *mfd;
+	struct dcs_cmd_req cmdreq;
 
 	mfd = platform_get_drvdata(pdev);
 
@@ -1165,8 +1190,13 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
-	mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_display_off_cmds,
-			ARRAY_SIZE(renesas_display_off_cmds));
+	memset(&cmdreq, 0, sizeof(cmdreq));
+	cmdreq.cmds = renesas_display_off_cmds;
+	cmdreq.cmds_cnt = ARRAY_SIZE(renesas_display_off_cmds);
+	cmdreq.flags = CMD_REQ_COMMIT;
+	cmdreq.rlen = 0;
+	cmdreq.cb = NULL;
+	mipi_dsi_cmdlist_put(&cmdreq);
 
 	return 0;
 }
diff --git a/drivers/video/msm/mipi_simulator.c b/drivers/video/msm/mipi_simulator.c
index e2d4b15..532b97d 100644
--- a/drivers/video/msm/mipi_simulator.c
+++ b/drivers/video/msm/mipi_simulator.c
@@ -36,6 +36,7 @@
 {
 	struct msm_fb_data_type *mfd;
 	struct mipi_panel_info *mipi;
+	struct dcs_cmd_req cmdreq;
 
 	mfd = platform_get_drvdata(pdev);
 	mipi  = &mfd->panel_info.mipi;
@@ -48,9 +49,14 @@
 	pr_debug("%s:%d, debug info (mode) : %d", __func__, __LINE__,
 		 mipi->mode);
 
+	memset(&cmdreq, 0, sizeof(cmdreq));
 	if (mipi->mode == DSI_VIDEO_MODE) {
-		mipi_dsi_cmds_tx(&simulator_tx_buf, display_on_cmds,
-			ARRAY_SIZE(display_on_cmds));
+		cmdreq.cmds = display_on_cmds;
+		cmdreq.cmds_cnt = ARRAY_SIZE(display_on_cmds);
+		cmdreq.flags = CMD_REQ_COMMIT;
+		cmdreq.rlen = 0;
+		cmdreq.cb = NULL;
+		mipi_dsi_cmdlist_put(&cmdreq);
 	} else {
 		pr_err("%s:%d, CMD MODE NOT SUPPORTED", __func__, __LINE__);
 		return -EINVAL;
@@ -63,6 +69,7 @@
 {
 	struct msm_fb_data_type *mfd;
 	struct mipi_panel_info *mipi;
+	struct dcs_cmd_req cmdreq;
 
 	mfd = platform_get_drvdata(pdev);
 	mipi  = &mfd->panel_info.mipi;
@@ -74,9 +81,14 @@
 
 	pr_debug("%s:%d, debug info", __func__, __LINE__);
 
+	memset(&cmdreq, 0, sizeof(cmdreq));
 	if (mipi->mode == DSI_VIDEO_MODE) {
-		mipi_dsi_cmds_tx(&simulator_tx_buf, display_off_cmds,
-			ARRAY_SIZE(display_off_cmds));
+		cmdreq.cmds = display_off_cmds;
+		cmdreq.cmds_cnt = ARRAY_SIZE(display_off_cmds);
+		cmdreq.flags = CMD_REQ_COMMIT;
+		cmdreq.rlen = 0;
+		cmdreq.cb = NULL;
+		mipi_dsi_cmdlist_put(&cmdreq);
 	} else {
 		pr_debug("%s:%d, DONT REACH HERE", __func__, __LINE__);
 		return -EINVAL;
diff --git a/drivers/video/msm/mipi_tc358764_dsi2lvds.c b/drivers/video/msm/mipi_tc358764_dsi2lvds.c
index c962123..8c02d14 100644
--- a/drivers/video/msm/mipi_tc358764_dsi2lvds.c
+++ b/drivers/video/msm/mipi_tc358764_dsi2lvds.c
@@ -243,20 +243,25 @@
 {
 	u32 data;
 	int len = 4;
+	struct dcs_cmd_req cmdreq;
 	struct dsi_cmd_desc cmd_read_reg = {
 		DTYPE_GEN_READ2, 1, 0, 1, 0, /* cmd 0x24 */
 			sizeof(reg), (char *) &reg};
 
-	mipi_dsi_buf_init(&d2l_tx_buf);
 	mipi_dsi_buf_init(&d2l_rx_buf);
 
-	/* mutex had been acquired at mipi_dsi_on */
-	len = mipi_dsi_cmds_rx(mfd, &d2l_tx_buf, &d2l_rx_buf,
-			       &cmd_read_reg, len);
+	memset(&cmdreq, 0, sizeof(cmdreq));
+	cmdreq.cmds = &cmd_read_reg;
+	cmdreq.cmds_cnt = 1;
+	cmdreq.flags = CMD_REQ_RX | CMD_REQ_COMMIT | CMD_REQ_NO_MAX_PKT_SIZE;
+	cmdreq.rbuf = &d2l_rx_buf;
+	cmdreq.rlen = 0;
+	cmdreq.cb = NULL;
+	mipi_dsi_cmdlist_put(&cmdreq);
 
 	data = *(u32 *)d2l_rx_buf.data;
 
-	if (len != 4)
+	if (d2l_rx_buf.len != 4)
 		pr_err("%s: invalid rlen=%d, expecting 4.\n", __func__, len);
 
 	pr_debug("%s: reg=0x%x.data=0x%08x.\n", __func__, reg, data);
@@ -274,6 +279,7 @@
 static int mipi_d2l_write_reg(struct msm_fb_data_type *mfd, u16 reg, u32 data)
 {
 	struct wr_cmd_payload payload;
+	struct dcs_cmd_req cmdreq;
 	struct dsi_cmd_desc cmd_write_reg = {
 		DTYPE_GEN_LWRITE, 1, 0, 0, 0,
 			sizeof(payload), (char *)&payload};
@@ -281,8 +287,13 @@
 	payload.addr = reg;
 	payload.data = data;
 
-	/* mutex had been acquried at dsi_on */
-	mipi_dsi_cmds_tx(&d2l_tx_buf, &cmd_write_reg, 1);
+	memset(&cmdreq, 0, sizeof(cmdreq));
+	cmdreq.cmds = &cmd_write_reg;
+	cmdreq.cmds_cnt = 1;
+	cmdreq.flags = CMD_REQ_COMMIT;
+	cmdreq.rlen = 0;
+	cmdreq.cb = NULL;
+	mipi_dsi_cmdlist_put(&cmdreq);
 
 	pr_debug("%s: reg=0x%x. data=0x%x.\n", __func__, reg, data);
 
diff --git a/drivers/video/msm/mipi_toshiba.c b/drivers/video/msm/mipi_toshiba.c
index 6ebbba4..bba2807 100644
--- a/drivers/video/msm/mipi_toshiba.c
+++ b/drivers/video/msm/mipi_toshiba.c
@@ -184,6 +184,7 @@
 static int mipi_toshiba_lcd_on(struct platform_device *pdev)
 {
 	struct msm_fb_data_type *mfd;
+	struct dcs_cmd_req cmdreq;
 
 	mfd = platform_get_drvdata(pdev);
 
@@ -192,16 +193,23 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
-	if (TM_GET_PID(mfd->panel.id) == MIPI_DSI_PANEL_WVGA_PT)
-		mipi_dsi_cmds_tx(&toshiba_tx_buf,
-			toshiba_wvga_display_on_cmds,
-			ARRAY_SIZE(toshiba_wvga_display_on_cmds));
-	else if (TM_GET_PID(mfd->panel.id) == MIPI_DSI_PANEL_WSVGA_PT ||
-		TM_GET_PID(mfd->panel.id) == MIPI_DSI_PANEL_WUXGA)
-		mipi_dsi_cmds_tx(&toshiba_tx_buf,
-			toshiba_wsvga_display_on_cmds,
-			ARRAY_SIZE(toshiba_wsvga_display_on_cmds));
-	else
+	memset(&cmdreq, 0, sizeof(cmdreq));
+	if (TM_GET_PID(mfd->panel.id) == MIPI_DSI_PANEL_WVGA_PT) {
+		cmdreq.cmds = toshiba_wvga_display_on_cmds;
+		cmdreq.cmds_cnt = ARRAY_SIZE(toshiba_wvga_display_on_cmds);
+		cmdreq.flags = CMD_REQ_COMMIT;
+		cmdreq.rlen = 0;
+		cmdreq.cb = NULL;
+		mipi_dsi_cmdlist_put(&cmdreq);
+	} else if (TM_GET_PID(mfd->panel.id) == MIPI_DSI_PANEL_WSVGA_PT ||
+		TM_GET_PID(mfd->panel.id) == MIPI_DSI_PANEL_WUXGA) {
+		cmdreq.cmds = toshiba_wsvga_display_on_cmds;
+		cmdreq.cmds_cnt = ARRAY_SIZE(toshiba_wsvga_display_on_cmds);
+		cmdreq.flags = CMD_REQ_COMMIT;
+		cmdreq.rlen = 0;
+		cmdreq.cb = NULL;
+		mipi_dsi_cmdlist_put(&cmdreq);
+	} else
 		return -EINVAL;
 
 	return 0;
@@ -210,6 +218,7 @@
 static int mipi_toshiba_lcd_off(struct platform_device *pdev)
 {
 	struct msm_fb_data_type *mfd;
+	struct dcs_cmd_req cmdreq;
 
 	mfd = platform_get_drvdata(pdev);
 
@@ -218,8 +227,13 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
-	mipi_dsi_cmds_tx(&toshiba_tx_buf, toshiba_display_off_cmds,
-			ARRAY_SIZE(toshiba_display_off_cmds));
+	memset(&cmdreq, 0, sizeof(cmdreq));
+	cmdreq.cmds = toshiba_display_off_cmds;
+	cmdreq.cmds_cnt = ARRAY_SIZE(toshiba_display_off_cmds);
+	cmdreq.flags = CMD_REQ_COMMIT;
+	cmdreq.rlen = 0;
+	cmdreq.cb = NULL;
+	mipi_dsi_cmdlist_put(&cmdreq);
 
 	return 0;
 }
diff --git a/drivers/video/msm/mipi_truly.c b/drivers/video/msm/mipi_truly.c
index 6fcab32..016c815 100644
--- a/drivers/video/msm/mipi_truly.c
+++ b/drivers/video/msm/mipi_truly.c
@@ -107,6 +107,7 @@
 static int mipi_truly_lcd_on(struct platform_device *pdev)
 {
 	struct msm_fb_data_type *mfd;
+	struct dcs_cmd_req cmdreq;
 
 	mfd = platform_get_drvdata(pdev);
 
@@ -116,8 +117,14 @@
 		return -EINVAL;
 
 	msleep(20);
-	mipi_dsi_cmds_tx(&truly_tx_buf, truly_display_on_cmds,
-			ARRAY_SIZE(truly_display_on_cmds));
+
+	memset(&cmdreq, 0, sizeof(cmdreq));
+	cmdreq.cmds = truly_display_on_cmds;
+	cmdreq.cmds_cnt = ARRAY_SIZE(truly_display_on_cmds);
+	cmdreq.flags = CMD_REQ_COMMIT;
+	cmdreq.rlen = 0;
+	cmdreq.cb = NULL;
+	mipi_dsi_cmdlist_put(&cmdreq);
 
 	return 0;
 }
@@ -125,6 +132,7 @@
 static int mipi_truly_lcd_off(struct platform_device *pdev)
 {
 	struct msm_fb_data_type *mfd;
+	struct dcs_cmd_req cmdreq;
 
 	mfd = platform_get_drvdata(pdev);
 
@@ -133,8 +141,13 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
-	mipi_dsi_cmds_tx(&truly_tx_buf, truly_display_off_cmds,
-			ARRAY_SIZE(truly_display_off_cmds));
+	memset(&cmdreq, 0, sizeof(cmdreq));
+	cmdreq.cmds = truly_display_off_cmds;
+	cmdreq.cmds_cnt = ARRAY_SIZE(truly_display_off_cmds);
+	cmdreq.flags = CMD_REQ_COMMIT;
+	cmdreq.rlen = 0;
+	cmdreq.cb = NULL;
+	mipi_dsi_cmdlist_put(&cmdreq);
 
 	return 0;
 }
diff --git a/drivers/video/msm/mipi_truly_tft540960_1_e.c b/drivers/video/msm/mipi_truly_tft540960_1_e.c
index 456517a..3b3bcbc 100644
--- a/drivers/video/msm/mipi_truly_tft540960_1_e.c
+++ b/drivers/video/msm/mipi_truly_tft540960_1_e.c
@@ -679,6 +679,7 @@
 {
 	struct msm_fb_data_type *mfd;
 	struct mipi_panel_info *mipi;
+	struct dcs_cmd_req cmdreq;
 
 	mfd = platform_get_drvdata(pdev);
 
@@ -692,14 +693,21 @@
 	pr_info("%s: mode = %d\n", __func__, mipi->mode);
 	msleep(120);
 
+	memset(&cmdreq, 0, sizeof(cmdreq));
 	if (mipi->mode == DSI_VIDEO_MODE) {
-		mipi_dsi_cmds_tx(&truly_tx_buf,
-			truly_video_display_on_cmds,
-			ARRAY_SIZE(truly_video_display_on_cmds));
+		cmdreq.cmds = truly_video_display_on_cmds;
+		cmdreq.cmds_cnt = ARRAY_SIZE(truly_video_display_on_cmds);
+		cmdreq.flags = CMD_REQ_COMMIT;
+		cmdreq.rlen = 0;
+		cmdreq.cb = NULL;
+		mipi_dsi_cmdlist_put(&cmdreq);
 	} else if (mipi->mode == DSI_CMD_MODE) {
-		mipi_dsi_cmds_tx(&truly_tx_buf,
-			truly_cmd_display_on_cmds,
-			ARRAY_SIZE(truly_cmd_display_on_cmds));
+		cmdreq.cmds = truly_cmd_display_on_cmds;
+		cmdreq.cmds_cnt = ARRAY_SIZE(truly_cmd_display_on_cmds);
+		cmdreq.flags = CMD_REQ_COMMIT;
+		cmdreq.rlen = 0;
+		cmdreq.cb = NULL;
+		mipi_dsi_cmdlist_put(&cmdreq);
 	}
 
 	return 0;
@@ -708,6 +716,7 @@
 static int mipi_truly_lcd_off(struct platform_device *pdev)
 {
 	struct msm_fb_data_type *mfd;
+	struct dcs_cmd_req cmdreq;
 
 	mfd = platform_get_drvdata(pdev);
 
@@ -716,8 +725,13 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
-	mipi_dsi_cmds_tx(&truly_tx_buf, truly_display_off_cmds,
-			ARRAY_SIZE(truly_display_off_cmds));
+	memset(&cmdreq, 0, sizeof(cmdreq));
+	cmdreq.cmds = truly_display_off_cmds;
+	cmdreq.cmds_cnt = ARRAY_SIZE(truly_display_off_cmds);
+	cmdreq.flags = CMD_REQ_COMMIT;
+	cmdreq.rlen = 0;
+	cmdreq.cb = NULL;
+	mipi_dsi_cmdlist_put(&cmdreq);
 
 	return 0;
 }
diff --git a/drivers/video/msm/msm_dss_io_8960.c b/drivers/video/msm/msm_dss_io_8960.c
index 944dde0..b1f2b34 100644
--- a/drivers/video/msm/msm_dss_io_8960.c
+++ b/drivers/video/msm/msm_dss_io_8960.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -326,10 +326,7 @@
 }
 
 #define PREF_DIV_RATIO 27
-#define VCO_MINIMUM 600
 struct dsiphy_pll_divider_config pll_divider_config;
-u32 vco_level_100;
-u32 vco_min_allowed;
 
 int mipi_dsi_phy_pll_config(u32 clk_rate)
 {
@@ -371,35 +368,10 @@
 	return 0;
 }
 
-void mipi_dsi_configure_fb_divider(u32 fps_level)
-{
-	u32 fb_div_req, fb_div_req_by_2;
-	u32 vco_required;
-
-	vco_required = vco_level_100 * fps_level/100;
-	if (vco_required < vco_min_allowed) {
-		printk(KERN_WARNING "Can not change fps. Min level allowed is \
-	%d \n", (vco_min_allowed * 100 / vco_level_100) + 1);
-		return;
-	}
-
-	fb_div_req = vco_required * PREF_DIV_RATIO / 27;
-	fb_div_req_by_2 = (fb_div_req / 2) - 1;
-
-	pll_divider_config.fb_divider = fb_div_req;
-
-	/* DSIPHY_PLL_CTRL_1 */
-	MIPI_OUTP(MIPI_DSI_BASE + 0x204, fb_div_req_by_2 & 0xff);
-	wmb();
-}
-
 int mipi_dsi_clk_div_config(uint8 bpp, uint8 lanes,
 			    uint32 *expected_dsi_pclk)
 {
 	u32 fb_divider, rate, vco;
-	u32 fb_div_min, fb_div_by_2_min,
-			 fb_div_by_2;
-	u32 vco_level_75;
 	u32 div_ratio = 0;
 	struct dsi_clk_mnd_table const *mnd_entry = mnd_table;
 	if (pll_divider_config.clk_rate == 0)
@@ -442,17 +414,6 @@
 	pll_divider_config.dsi_clk_divider =
 			(mnd_entry->dsiclk_div) * div_ratio;
 
-	vco_level_100 = vco;
-	fb_div_by_2 = (fb_divider / 2) - 1;
-	fb_div_by_2_min = (fb_div_by_2 / 256) * 256;
-	fb_div_min = (fb_div_by_2_min + 1) * 2;
-	vco_min_allowed = (fb_div_min * 27 / PREF_DIV_RATIO);
-	vco_level_75 = vco_level_100 * 75 / 100;
-	if (vco_min_allowed < VCO_MINIMUM)
-		vco_min_allowed = VCO_MINIMUM;
-	if (vco_min_allowed < vco_level_75)
-		vco_min_allowed = vco_level_75;
-
 	if (mnd_entry->dsiclk_d == 0) {
 		dsicore_clk.mnd_mode = 0;
 		dsicore_clk.src = 0x3;
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 7390aaf..797d4a3 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -1,5 +1,4 @@
-/*
- * drivers/video/msm/msm_fb.c
+/* drivers/video/msm/msm_fb.c
  *
  * Core MSM framebuffer driver.
  *
@@ -39,7 +38,6 @@
 #include <linux/vmalloc.h>
 #include <linux/debugfs.h>
 #include <linux/console.h>
-#include <linux/android_pmem.h>
 #include <linux/leds.h>
 #include <linux/pm_runtime.h>
 #include <linux/sync.h>
@@ -61,6 +59,11 @@
 static unsigned char *fbram_phys;
 static int fbram_size;
 static boolean bf_supported;
+/* Set backlight on resume after 50 ms after first
+ * pan display on the panel. This is to avoid panel specific
+ * transients during resume.
+ */
+unsigned long backlight_duration = (HZ/20);
 
 static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
 static int pdev_list_cnt;
@@ -122,11 +125,8 @@
 #define MSM_FB_MAX_DBGFS 1024
 #define MAX_BACKLIGHT_BRIGHTNESS 255
 
-/* 800 ms for fence time out */
-#define WAIT_FENCE_TIMEOUT 800
-/* 900 ms for display operation time out */
-#define WAIT_DISP_OP_TIMEOUT 900
-#define MAX_TIMELINE_NAME_LEN 16
+/* 200 ms for time out */
+#define WAIT_FENCE_TIMEOUT 200
 
 int msm_fb_debugfs_file_index;
 struct dentry *msm_fb_debugfs_root;
@@ -182,20 +182,17 @@
 	struct msm_fb_data_type *mfd = dev_get_drvdata(led_cdev->dev->parent);
 	int bl_lvl;
 
-	/* This maps android backlight level 1 to 255 into
-	   driver backlight level bl_min to bl_max with rounding
-	   and maps backlight level 0 to 0. */
-	if (value <= 0)
-		bl_lvl = 0;
-	else if (value >= MAX_BACKLIGHT_BRIGHTNESS)
-		bl_lvl = mfd->panel_info.bl_max;
-	else
-		bl_lvl = mfd->panel_info.bl_min + ((value - 1) * 2 *
-			(mfd->panel_info.bl_max - mfd->panel_info.bl_min) +
-			MAX_BACKLIGHT_BRIGHTNESS - 1) /
-			(MAX_BACKLIGHT_BRIGHTNESS - 1) / 2;
+	if (value > MAX_BACKLIGHT_BRIGHTNESS)
+		value = MAX_BACKLIGHT_BRIGHTNESS;
 
-        down(&mfd->sem);
+	/* This maps android backlight level 0 to 255 into
+	   driver backlight level 0 to bl_max with rounding */
+	bl_lvl = (2 * value * mfd->panel_info.bl_max + MAX_BACKLIGHT_BRIGHTNESS)
+		/(2 * MAX_BACKLIGHT_BRIGHTNESS);
+
+	if (!bl_lvl && value)
+		bl_lvl = 1;
+	down(&mfd->sem);
 	msm_fb_set_backlight(mfd, bl_lvl);
 	up(&mfd->sem);
 }
@@ -266,28 +263,6 @@
 	return ret;
 }
 
-static ssize_t msm_fb_fps_level_change(struct device *dev,
-				struct device_attribute *attr,
-				const char *buf, size_t count)
-{
-	struct fb_info *fbi = dev_get_drvdata(dev);
-	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
-	struct msm_fb_panel_data *pdata =
-		(struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
-	unsigned long val;
-	int ret;
-
-	ret = kstrtoul(buf, 10, &val);
-	if (ret)
-		return ret;
-
-	if ((val <= 0) || (val > 100))
-		return -EINVAL;
-	if (pdata->fps_level_change)
-		pdata->fps_level_change(mfd->pdev, (u32)val);
-	return count;
-}
-
 static ssize_t msm_fb_msm_fb_type(struct device *dev,
 				  struct device_attribute *attr, char *buf)
 {
@@ -343,11 +318,8 @@
 }
 
 static DEVICE_ATTR(msm_fb_type, S_IRUGO, msm_fb_msm_fb_type, NULL);
-static DEVICE_ATTR(msm_fb_fps_level, S_IRUGO | S_IWUGO, NULL, \
-				msm_fb_fps_level_change);
 static struct attribute *msm_fb_attrs[] = {
 	&dev_attr_msm_fb_type.attr,
-	&dev_attr_msm_fb_fps_level.attr,
 	NULL,
 };
 static struct attribute_group msm_fb_attr_group = {
@@ -371,6 +343,8 @@
 	sysfs_remove_group(&mfd->fbi->dev->kobj, &msm_fb_attr_group);
 }
 
+static void bl_workqueue_handler(struct work_struct *work);
+
 static int msm_fb_probe(struct platform_device *pdev)
 {
 	struct msm_fb_data_type *mfd;
@@ -409,6 +383,8 @@
 
 	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
 
+	INIT_DELAYED_WORK(&mfd->backlight_worker, bl_workqueue_handler);
+
 	if (!mfd)
 		return -ENODEV;
 
@@ -451,10 +427,7 @@
 	pdev_list[pdev_list_cnt++] = pdev;
 	msm_fb_create_sysfs(pdev);
 	if (mfd->timeline == NULL) {
-		char timeline_name[MAX_TIMELINE_NAME_LEN];
-		snprintf(timeline_name, sizeof(timeline_name),
-			"mdp_fb_%d", mfd->index);
-		mfd->timeline = sw_sync_timeline_create(timeline_name);
+		mfd->timeline = sw_sync_timeline_create("mdp-timeline");
 		if (mfd->timeline == NULL) {
 			pr_err("%s: cannot create time line", __func__);
 			return -ENOMEM;
@@ -463,7 +436,6 @@
 		}
 	}
 
-
 	return 0;
 }
 
@@ -475,8 +447,6 @@
 
 	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
 
-	msm_fb_pan_idle(mfd);
-
 	msm_fb_remove_sysfs(pdev);
 
 	pm_runtime_disable(mfd->fbi->dev);
@@ -544,8 +514,6 @@
 	if ((!mfd) || (mfd->key != MFD_KEY))
 		return 0;
 
-	msm_fb_pan_idle(mfd);
-
 	console_lock();
 	fb_set_suspend(mfd->fbi, FBINFO_STATE_SUSPENDED);
 
@@ -571,6 +539,10 @@
 	if ((!mfd) || (mfd->key != MFD_KEY))
 		return 0;
 
+	if (mfd->msmfb_no_update_notify_timer.function)
+		del_timer(&mfd->msmfb_no_update_notify_timer);
+	complete(&mfd->msmfb_no_update_notify);
+
 	/*
 	 * suspend this channel
 	 */
@@ -636,9 +608,6 @@
 	mfd->op_enable = mfd->suspend.op_enable;
 
 	if (mfd->suspend.panel_power_on) {
-		if (mfd->panel_driver_on == FALSE)
-			msm_fb_blank_sub(FB_BLANK_POWERDOWN, mfd->fbi,
-				      mfd->op_enable);
 		ret =
 		     msm_fb_blank_sub(FB_BLANK_UNBLANK, mfd->fbi,
 				      mfd->op_enable);
@@ -667,8 +636,6 @@
 	if ((!mfd) || (mfd->key != MFD_KEY))
 		return 0;
 
-	msm_fb_pan_idle(mfd);
-
 	console_lock();
 	ret = msm_fb_resume_sub(mfd);
 	pdev->dev.power.power_state = PMSG_ON;
@@ -706,15 +673,8 @@
 	struct msm_fb_panel_data *pdata = NULL;
 	int ret = 0;
 
-	if (hdmi_prim_display) {
-		MSM_FB_INFO("%s: hdmi primary handles early suspend only\n",
-			__func__);
-		return 0;
-	}
-
 	if ((!mfd) || (mfd->key != MFD_KEY))
 		return 0;
-	msm_fb_pan_idle(mfd);
 
 	pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
 	if (mfd->panel_info.type == HDMI_PANEL ||
@@ -738,15 +698,9 @@
 	struct msm_fb_panel_data *pdata = NULL;
 	int ret = 0;
 
-	if (hdmi_prim_display) {
-		MSM_FB_INFO("%s: hdmi primary handles early resume only\n",
-			__func__);
-		return 0;
-	}
-
 	if ((!mfd) || (mfd->key != MFD_KEY))
 		return 0;
-	msm_fb_pan_idle(mfd);
+
 	pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
 	if (mfd->panel_info.type == HDMI_PANEL ||
 		mfd->panel_info.type == DTV_PANEL) {
@@ -768,8 +722,7 @@
 	.runtime_suspend = msm_fb_runtime_suspend,
 	.runtime_resume = msm_fb_runtime_resume,
 	.runtime_idle = msm_fb_runtime_idle,
-#if (defined(CONFIG_SUSPEND) && defined(CONFIG_FB_MSM_HDMI_MSM_PANEL) && \
-	!defined(CONFIG_FB_MSM_HDMI_AS_PRIMARY))
+#if (defined(CONFIG_SUSPEND) && defined(CONFIG_FB_MSM_HDMI_MSM_PANEL))
 	.suspend = msm_fb_ext_suspend,
 	.resume = msm_fb_ext_resume,
 #endif
@@ -790,79 +743,24 @@
 		   },
 };
 
-#if defined(CONFIG_HAS_EARLYSUSPEND) && defined(CONFIG_FB_MSM_MDP303)
-static void memset32_io(u32 __iomem *_ptr, u32 val, size_t count)
-{
-	count >>= 2;
-	while (count--)
-		writel(val, _ptr++);
-}
-#endif
-
 #ifdef CONFIG_HAS_EARLYSUSPEND
 static void msmfb_early_suspend(struct early_suspend *h)
 {
 	struct msm_fb_data_type *mfd = container_of(h, struct msm_fb_data_type,
-						early_suspend);
-	struct msm_fb_panel_data *pdata = NULL;
-
-	msm_fb_pan_idle(mfd);
-#if defined(CONFIG_FB_MSM_MDP303)
-	/*
-	* For MDP with overlay, set framebuffer with black pixels
-	* to show black screen on HDMI.
-	*/
-	struct fb_info *fbi = mfd->fbi;
-	switch (mfd->fbi->var.bits_per_pixel) {
-	case 32:
-		memset32_io((void *)fbi->screen_base, 0xFF000000,
-							fbi->fix.smem_len);
-		break;
-	default:
-		memset32_io((void *)fbi->screen_base, 0x00, fbi->fix.smem_len);
-		break;
-	}
-#endif
+						    early_suspend);
 	msm_fb_suspend_sub(mfd);
-
-	pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
-	if (hdmi_prim_display &&
-		(mfd->panel_info.type == HDMI_PANEL ||
-		 mfd->panel_info.type == DTV_PANEL)) {
-		/* Turn off the HPD circuitry */
-		if (pdata->power_ctrl) {
-			MSM_FB_INFO("%s: Turning off HPD circuitry\n",
-				__func__);
-			pdata->power_ctrl(FALSE);
-		}
-	}
 }
 
 static void msmfb_early_resume(struct early_suspend *h)
 {
 	struct msm_fb_data_type *mfd = container_of(h, struct msm_fb_data_type,
-						early_suspend);
-	struct msm_fb_panel_data *pdata = NULL;
-
-	msm_fb_pan_idle(mfd);
-	pdata = (struct msm_fb_panel_data *)mfd->pdev->dev.platform_data;
-	if (hdmi_prim_display &&
-		(mfd->panel_info.type == HDMI_PANEL ||
-		 mfd->panel_info.type == DTV_PANEL)) {
-		/* Turn on the HPD circuitry */
-		if (pdata->power_ctrl) {
-			MSM_FB_INFO("%s: Turning on HPD circuitry\n", __func__);
-			pdata->power_ctrl(TRUE);
-		}
-	}
-
+						    early_suspend);
 	msm_fb_resume_sub(mfd);
 }
 #endif
 
 static int unset_bl_level, bl_updated;
 static int bl_level_old;
-
 static int mdp_bl_scale_config(struct msm_fb_data_type *mfd,
 						struct mdp_bl_scale_data *data)
 {
@@ -904,7 +802,6 @@
 {
 	struct msm_fb_panel_data *pdata;
 	__u32 temp = bkl_lvl;
-
 	if (!mfd->panel_power_on || !bl_updated) {
 		unset_bl_level = bkl_lvl;
 		return;
@@ -945,10 +842,21 @@
 	switch (blank_mode) {
 	case FB_BLANK_UNBLANK:
 		if (!mfd->panel_power_on) {
+			msleep(16);
 			ret = pdata->on(mfd->pdev);
 			if (ret == 0) {
 				mfd->panel_power_on = TRUE;
-				mfd->panel_driver_on = mfd->op_enable;
+
+/* ToDo: possible conflict with android which doesn't expect sw refresher */
+/*
+	  if (!mfd->hw_refresh)
+	  {
+	    if ((ret = msm_fb_resume_sw_refresher(mfd)) != 0)
+	    {
+	      MSM_FB_INFO("msm_fb_blank_sub: msm_fb_resume_sw_refresher failed = %d!\n",ret);
+	    }
+	  }
+*/
 			}
 		}
 		break;
@@ -964,22 +872,25 @@
 			mfd->op_enable = FALSE;
 			curr_pwr_state = mfd->panel_power_on;
 			mfd->panel_power_on = FALSE;
-
-			if (mfd->msmfb_no_update_notify_timer.function)
-				del_timer(&mfd->msmfb_no_update_notify_timer);
-			complete(&mfd->msmfb_no_update_notify);
-
+			cancel_delayed_work_sync(&mfd->backlight_worker);
 			bl_updated = 0;
 
-			/* clean fb to prevent displaying old fb */
-			memset((void *)info->screen_base, 0,
-					info->fix.smem_len);
-
+			msleep(16);
 			ret = pdata->off(mfd->pdev);
 			if (ret)
 				mfd->panel_power_on = curr_pwr_state;
 
-			msm_fb_release_timeline(mfd);
+			if (mfd->timeline) {
+				/* Adding 1 is enough when pan_display is still
+				 * a blocking call and with mutex protection.
+				 * But if it is an async call, we will still
+				 * need to add 2. Adding 2 can be safer in
+				 * order to signal all existing fences, and it
+				 * is harmless. */
+				sw_sync_timeline_inc(mfd->timeline, 2);
+				mfd->timeline_value += 2;
+			}
+
 			mfd->op_enable = TRUE;
 		}
 		break;
@@ -1082,26 +993,7 @@
 static int msm_fb_blank(int blank_mode, struct fb_info *info)
 {
 	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
-
-	if (blank_mode == FB_BLANK_POWERDOWN) {
-		struct fb_event event;
-		event.info = info;
-		event.data = &blank_mode;
-		fb_notifier_call_chain(FB_EVENT_BLANK, &event);
-	}
 	msm_fb_pan_idle(mfd);
-	if (mfd->op_enable == 0) {
-		if (blank_mode == FB_BLANK_UNBLANK) {
-			mfd->suspend.panel_power_on = TRUE;
-			/* if unblank is called when system is in suspend,
-			do not unblank but send SUCCESS to hwc, so that hwc
-			keeps pushing frames and display comes up as soon
-			 as system is resumed */
-			return 0;
-		}
-		else
-			mfd->suspend.panel_power_on = FALSE;
-	}
 	return msm_fb_blank_sub(blank_mode, info, mfd->op_enable);
 }
 
@@ -1321,6 +1213,26 @@
 		bpp = 4;
 		break;
 
+	case MDP_BGRA_8888:
+		fix->type = FB_TYPE_PACKED_PIXELS;
+		fix->xpanstep = 1;
+		fix->ypanstep = 1;
+		var->vmode = FB_VMODE_NONINTERLACED;
+		var->blue.offset = 0;
+		var->green.offset = 8;
+		var->red.offset = 16;
+		var->blue.length = 8;
+		var->green.length = 8;
+		var->red.length = 8;
+		var->blue.msb_right = 0;
+		var->green.msb_right = 0;
+		var->red.msb_right = 0;
+		var->transp.offset = 24;
+		var->transp.length = 8;
+		bpp = 4;
+		break;
+
+
 	case MDP_YCRYCB_H2V1:
 		/* ToDo: need to check TV-Out YUV422i framebuffer format */
 		/*       we might need to create new type define */
@@ -1401,7 +1313,6 @@
 	var->yres_virtual = panel_info->yres * mfd->fb_page +
 		((PAGE_SIZE - remainder)/fix->line_length) * mfd->fb_page;
 	var->bits_per_pixel = bpp * 8;	/* FrameBuffer color depth */
-	var->reserved[3] = mdp_get_panel_framerate(mfd);
 
 		/*
 		 * id field for fb app
@@ -1576,9 +1487,7 @@
 	ret = 0;
 
 #ifdef CONFIG_HAS_EARLYSUSPEND
-
-	if (hdmi_prim_display ||
-	    (mfd->panel_info.type != DTV_PANEL)) {
+	if (mfd->panel_info.type != DTV_PANEL) {
 		mfd->early_suspend.suspend = msmfb_early_suspend;
 		mfd->early_suspend.resume = msmfb_early_resume;
 		mfd->early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB - 2;
@@ -1733,12 +1642,6 @@
 			return 0;
 	}
 
-	if (mfd->op_enable == 0) {
-		/* if system is in suspend mode, do not unblank */
-		mfd->ref_cnt++;
-		return 0;
-	}
-
 	if (!mfd->ref_cnt) {
 		if (!bf_supported ||
 			(info->node != 1 && info->node != 2))
@@ -1750,10 +1653,12 @@
 		if (mfd->is_panel_ready && !mfd->is_panel_ready())
 			unblank = false;
 
-		if (unblank && (mfd->panel_info.type != DTV_PANEL)) {
-			if (msm_fb_blank_sub(FB_BLANK_UNBLANK, info, TRUE)) {
-				pr_err("msm_fb_open: can't turn on display\n");
-				return -EINVAL;
+		if (unblank) {
+			if (msm_fb_blank_sub(FB_BLANK_UNBLANK,
+				info, mfd->op_enable)) {
+				MSM_FB_ERR("%s: can't turn on display!\n",
+					__func__);
+				return -EPERM;
 			}
 		}
 	}
@@ -1772,10 +1677,10 @@
 			    mfd->index);
 		return -EINVAL;
 	}
-	msm_fb_pan_idle(mfd);
+
 	mfd->ref_cnt--;
 
-	if ((!mfd->ref_cnt) && (mfd->op_enable)) {
+	if (!mfd->ref_cnt) {
 		if ((ret =
 		     msm_fb_blank_sub(FB_BLANK_POWERDOWN, info,
 				      mfd->op_enable)) != 0) {
@@ -1788,7 +1693,7 @@
 	return ret;
 }
 
-void msm_fb_wait_for_fence(struct msm_fb_data_type *mfd)
+int msm_fb_wait_for_fence(struct msm_fb_data_type *mfd)
 {
 	int i, ret = 0;
 	/* buf sync */
@@ -1801,19 +1706,13 @@
 			break;
 		}
 	}
-	if (ret < 0) {
-		while (i < mfd->acq_fen_cnt) {
-			sync_fence_put(mfd->acq_fen[i]);
-			i++;
-		}
-	}
 	mfd->acq_fen_cnt = 0;
+	return ret;
 }
 int msm_fb_signal_timeline(struct msm_fb_data_type *mfd)
 {
 	mutex_lock(&mfd->sync_mutex);
-	if (mfd->timeline && !list_empty((const struct list_head *)
-				(&(mfd->timeline->obj.active_list_head)))) {
+	if (mfd->timeline) {
 		sw_sync_timeline_inc(mfd->timeline, 1);
 		mfd->timeline_value++;
 	}
@@ -1823,16 +1722,20 @@
 	return 0;
 }
 
-void msm_fb_release_timeline(struct msm_fb_data_type *mfd)
+static void bl_workqueue_handler(struct work_struct *work)
 {
-	mutex_lock(&mfd->sync_mutex);
-	if (mfd->timeline) {
-		sw_sync_timeline_inc(mfd->timeline, 2);
-		mfd->timeline_value += 2;
+	struct msm_fb_data_type *mfd = container_of(to_delayed_work(work),
+				struct msm_fb_data_type, backlight_worker);
+	struct msm_fb_panel_data *pdata = mfd->pdev->dev.platform_data;
+
+	if ((pdata) && (pdata->set_backlight) && (!bl_updated)) {
+		down(&mfd->sem);
+		mfd->bl_level = unset_bl_level;
+		pdata->set_backlight(mfd);
+		bl_level_old = unset_bl_level;
+		bl_updated = 1;
+		up(&mfd->sem);
 	}
-	mfd->last_rel_fence = 0;
-	mfd->cur_rel_fence = 0;
-	mutex_unlock(&mfd->sync_mutex);
 }
 
 DEFINE_SEMAPHORE(msm_fb_pan_sem);
@@ -1845,76 +1748,56 @@
 		mutex_unlock(&mfd->sync_mutex);
 		ret = wait_for_completion_interruptible_timeout(
 				&mfd->commit_comp,
-			msecs_to_jiffies(WAIT_DISP_OP_TIMEOUT));
-		if (ret < 0)
+			msecs_to_jiffies(WAIT_FENCE_TIMEOUT));
+		if (ret <= 0)
 			ret = -ERESTARTSYS;
 		else if (!ret)
 			pr_err("%s wait for commit_comp timeout %d %d",
 				__func__, ret, mfd->is_committing);
-		if (ret <= 0) {
-			mutex_lock(&mfd->sync_mutex);
-			mfd->is_committing = 0;
-			complete_all(&mfd->commit_comp);
-			mutex_unlock(&mfd->sync_mutex);
-		}
 	} else {
 		mutex_unlock(&mfd->sync_mutex);
 	}
 	return ret;
 }
-static int msm_fb_pan_display_ex(struct fb_info *info,
-		struct mdp_display_commit *disp_commit)
+static int msm_fb_pan_display_ex(struct fb_var_screeninfo *var,
+			      struct fb_info *info, u32 wait_for_finish)
 {
 	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
 	struct msm_fb_backup_type *fb_backup;
-	struct fb_var_screeninfo *var = &disp_commit->var;
-	u32 wait_for_finish = disp_commit->wait_for_finish;
 	int ret = 0;
-
-	if (disp_commit->flags &
-		MDP_DISPLAY_COMMIT_OVERLAY) {
-		if (!mfd->panel_power_on) /* suspended */
-			return -EPERM;
-	} else {
-		/*
-		 * If framebuffer is 2, io pan display is not allowed.
-		 */
-		if (bf_supported && info->node == 2) {
-			pr_err("%s: no pan display for fb%d!",
-				   __func__, info->node);
-			return -EPERM;
-		}
-
-		if (info->node != 0 || mfd->cont_splash_done)	/* primary */
-			if ((!mfd->op_enable) || (!mfd->panel_power_on))
-				return -EPERM;
-
-		if (var->xoffset > (info->var.xres_virtual - info->var.xres))
-			return -EINVAL;
-
-		if (var->yoffset > (info->var.yres_virtual - info->var.yres))
-			return -EINVAL;
+	/*
+	 * If framebuffer is 2, io pen display is not allowed.
+	 */
+	if (bf_supported && info->node == 2) {
+		pr_err("%s: no pan display for fb%d!",
+		       __func__, info->node);
+		return -EPERM;
 	}
+
+	if (info->node != 0 || mfd->cont_splash_done)	/* primary */
+		if ((!mfd->op_enable) || (!mfd->panel_power_on))
+			return -EPERM;
+
+	if (var->xoffset > (info->var.xres_virtual - info->var.xres))
+		return -EINVAL;
+
+	if (var->yoffset > (info->var.yres_virtual - info->var.yres))
+		return -EINVAL;
 	msm_fb_pan_idle(mfd);
 
 	mutex_lock(&mfd->sync_mutex);
 
-	if (!(disp_commit->flags &
-		MDP_DISPLAY_COMMIT_OVERLAY)) {
-		if (info->fix.xpanstep)
-			info->var.xoffset =
-				(var->xoffset / info->fix.xpanstep) *
-					info->fix.xpanstep;
+	if (info->fix.xpanstep)
+		info->var.xoffset =
+		    (var->xoffset / info->fix.xpanstep) * info->fix.xpanstep;
 
-		if (info->fix.ypanstep)
-			info->var.yoffset =
-				(var->yoffset / info->fix.ypanstep) *
-					info->fix.ypanstep;
-	}
+	if (info->fix.ypanstep)
+		info->var.yoffset =
+		    (var->yoffset / info->fix.ypanstep) * info->fix.ypanstep;
+
 	fb_backup = (struct msm_fb_backup_type *)mfd->msm_fb_backup;
 	memcpy(&fb_backup->info, info, sizeof(struct fb_info));
-	memcpy(&fb_backup->disp_commit, disp_commit,
-		sizeof(struct mdp_display_commit));
+	memcpy(&fb_backup->var, var, sizeof(struct fb_var_screeninfo));
 	mfd->is_committing = 1;
 	INIT_COMPLETION(mfd->commit_comp);
 	schedule_work(&mfd->commit_work);
@@ -1927,10 +1810,7 @@
 static int msm_fb_pan_display(struct fb_var_screeninfo *var,
 			      struct fb_info *info)
 {
-	struct mdp_display_commit disp_commit;
-	memset(&disp_commit, 0, sizeof(disp_commit));
-	disp_commit.wait_for_finish = TRUE;
-	return msm_fb_pan_display_ex(info, &disp_commit);
+	return msm_fb_pan_display_ex(var, info, TRUE);
 }
 
 static int msm_fb_pan_display_sub(struct fb_var_screeninfo *var,
@@ -1939,7 +1819,6 @@
 	struct mdp_dirty_region dirty;
 	struct mdp_dirty_region *dirtyPtr = NULL;
 	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
-	struct msm_fb_panel_data *pdata;
 
 	/*
 	 * If framebuffer is 2, io pen display is not allowed.
@@ -2016,8 +1895,10 @@
 		mdp_set_dma_pan_info(info, NULL, TRUE);
 		if (msm_fb_blank_sub(FB_BLANK_UNBLANK, info, mfd->op_enable)) {
 			pr_err("%s: can't turn on display!\n", __func__);
-			up(&msm_fb_pan_sem);
-			msm_fb_release_timeline(mfd);
+			if (mfd->timeline) {
+				sw_sync_timeline_inc(mfd->timeline, 2);
+				mfd->timeline_value += 2;
+			}
 			return -EINVAL;
 		}
 	}
@@ -2025,23 +1906,16 @@
 	mdp_set_dma_pan_info(info, dirtyPtr,
 			     (var->activate & FB_ACTIVATE_VBL));
 	/* async call */
-
 	mdp_dma_pan_update(info);
 	msm_fb_signal_timeline(mfd);
 	up(&msm_fb_pan_sem);
 
-	if (unset_bl_level && !bl_updated) {
-		pdata = (struct msm_fb_panel_data *)mfd->pdev->
-			dev.platform_data;
-		if ((pdata) && (pdata->set_backlight)) {
-			down(&mfd->sem);
-			mfd->bl_level = unset_bl_level;
-			pdata->set_backlight(mfd);
-			bl_level_old = unset_bl_level;
-			up(&mfd->sem);
-			bl_updated = 1;
-		}
-	}
+	if (unset_bl_level && !bl_updated)
+		schedule_delayed_work(&mfd->backlight_worker,
+				backlight_duration);
+
+	if (info->node == 0 && (mfd->cont_splash_done)) /* primary */
+		mdp_free_splash_buffer(mfd);
 
 	++mfd->panel_info.frame_count;
 	return 0;
@@ -2056,14 +1930,9 @@
 
 	mfd = container_of(work, struct msm_fb_data_type, commit_work);
 	fb_backup = (struct msm_fb_backup_type *)mfd->msm_fb_backup;
+	var = &fb_backup->var;
 	info = &fb_backup->info;
-	if (fb_backup->disp_commit.flags &
-		MDP_DISPLAY_COMMIT_OVERLAY) {
-			mdp4_overlay_commit(info);
-	} else {
-		var = &fb_backup->disp_commit.var;
-		msm_fb_pan_display_sub(var, info);
-	}
+	msm_fb_pan_display_sub(var, info);
 	mutex_lock(&mfd->sync_mutex);
 	mfd->is_committing = 0;
 	complete_all(&mfd->commit_comp);
@@ -2221,7 +2090,9 @@
 		break;
 
 	case 32:
-		if (var->transp.offset == 24)
+		if ((var->transp.offset == 24) && (var->blue.offset == 0))
+			mfd->fb_imgType = MDP_BGRA_8888;
+		else if (var->transp.offset == 24)
 			mfd->fb_imgType = MDP_ARGB_8888;
 		else
 			mfd->fb_imgType = MDP_RGBA_8888;
@@ -2245,13 +2116,12 @@
 	mfd->fbi->fix.line_length = msm_fb_line_length(mfd->index, var->xres,
 						       var->bits_per_pixel/8);
 
-	if (mfd->update_panel_info)
-		mfd->update_panel_info(mfd);
-
-	if ((mfd->panel_info.type == DTV_PANEL) && !mfd->panel_power_on) {
-		msm_fb_blank_sub(FB_BLANK_UNBLANK, info, mfd->op_enable);
-	} else if (blank) {
+	if (blank) {
 		msm_fb_blank_sub(FB_BLANK_POWERDOWN, info, mfd->op_enable);
+
+		if (mfd->update_panel_info)
+			mfd->update_panel_info(mfd);
+
 		msm_fb_blank_sub(FB_BLANK_UNBLANK, info, mfd->op_enable);
 	}
 
@@ -3201,15 +3071,16 @@
 	return ret;
 }
 
+static int msmfb_overlay_commit(struct fb_info *info)
+{
+	return mdp4_overlay_commit(info);
+}
+
 static int msmfb_overlay_play(struct fb_info *info, unsigned long *argp)
 {
 	int	ret;
 	struct msmfb_overlay_data req;
 	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
-	struct msm_fb_panel_data *pdata;
-
-	if (!mfd->panel_power_on) /* suspended */
-		return -EPERM;
 
 	if (mfd->overlay_play_enable == 0)	/* nothing to do */
 		return 0;
@@ -3240,18 +3111,12 @@
 
 	ret = mdp4_overlay_play(info, &req);
 
-	if (unset_bl_level && !bl_updated) {
-		pdata = (struct msm_fb_panel_data *)mfd->pdev->
-			dev.platform_data;
-		if ((pdata) && (pdata->set_backlight)) {
-			down(&mfd->sem);
-			mfd->bl_level = unset_bl_level;
-			pdata->set_backlight(mfd);
-			bl_level_old = unset_bl_level;
-			up(&mfd->sem);
-			bl_updated = 1;
-		}
-	}
+	if (unset_bl_level && !bl_updated)
+		schedule_delayed_work(&mfd->backlight_worker,
+				backlight_duration);
+
+	if (info->node == 0 && (mfd->cont_splash_done)) /* primary */
+		mdp_free_splash_buffer(mfd);
 
 	return ret;
 }
@@ -3373,29 +3238,6 @@
 	return mdp4_writeback_terminate(info);
 }
 
-static int msmfb_overlay_ioctl_writeback_set_mirr_hint(struct fb_info *
-		info, void *argp)
-{
-	int ret = 0, hint;
-
-	if (!info) {
-		ret = -EINVAL;
-		goto error;
-	}
-
-	ret = copy_from_user(&hint, argp, sizeof(hint));
-	if (ret)
-		goto error;
-
-	ret = mdp4_writeback_set_mirroring_hint(info, hint);
-	if (ret)
-		goto error;
-error:
-	if (ret)
-		pr_err("%s: ioctl failed\n", __func__);
-	return ret;
-}
-
 #else
 static int msmfb_overlay_ioctl_writeback_init(struct fb_info *info)
 {
@@ -3428,12 +3270,6 @@
 {
 	return -ENOTSUPP;
 }
-
-static int msmfb_overlay_ioctl_writeback_set_mirr_hint(struct fb_info *
-		info, void *argp)
-{
-	return -ENOTSUPP;
-}
 #endif
 
 static int msmfb_overlay_3d_sbys(struct fb_info *info, unsigned long *argp)
@@ -3477,6 +3313,7 @@
 #endif
 
 DEFINE_SEMAPHORE(msm_fb_ioctl_ppp_sem);
+DEFINE_SEMAPHORE(msm_fb_ioctl_vsync_sem);
 DEFINE_MUTEX(msm_fb_ioctl_lut_sem);
 
 /* Set color conversion matrix from user space */
@@ -3606,6 +3443,10 @@
 		ret = mdp4_qseed_cfg((struct mdp_qseed_cfg_data *)
 						&pp_ptr->data.qseed_cfg_data);
 		break;
+	case mdp_op_calib_cfg:
+		ret = mdp4_calib_config((struct mdp_calib_config_data *)
+						&pp_ptr->data.calib_cfg);
+		break;
 #endif
 	case mdp_bl_scale_cfg:
 		ret = mdp_bl_scale_config(mfd, (struct mdp_bl_scale_data *)
@@ -3620,6 +3461,45 @@
 
 	return ret;
 }
+static int msmfb_handle_metadata_ioctl(struct msm_fb_data_type *mfd,
+				struct msmfb_metadata *metadata_ptr)
+{
+	int ret;
+	switch (metadata_ptr->op) {
+#ifdef CONFIG_FB_MSM_MDP40
+	case metadata_op_base_blend:
+		ret = mdp4_update_base_blend(mfd,
+						&metadata_ptr->data.blend_cfg);
+		break;
+	case metadata_op_wb_format:
+		ret = mdp4_update_writeback_format(mfd,
+					&metadata_ptr->data.mixer_cfg);
+		break;
+#endif
+	default:
+		pr_warn("Unsupported request to MDP META IOCTL.\n");
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+static int msmfb_get_metadata(struct msm_fb_data_type *mfd,
+				struct msmfb_metadata *metadata_ptr)
+{
+	int ret = 0;
+	switch (metadata_ptr->op) {
+	case metadata_op_frame_rate:
+		metadata_ptr->data.panel_frame_rate =
+			mdp_get_panel_framerate(mfd);
+		break;
+	default:
+		pr_warn("Unsupported request to MDP META IOCTL.\n");
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
 
 static int msmfb_handle_buf_sync_ioctl(struct msm_fb_data_type *mfd,
 						struct mdp_buf_sync *buf_sync)
@@ -3632,9 +3512,6 @@
 		(mfd->timeline == NULL))
 		return -EINVAL;
 
-	if ((!mfd->op_enable) || (!mfd->panel_power_on))
-		return -EPERM;
-
 	if (buf_sync->acq_fen_fd_cnt)
 		ret = copy_from_user(acq_fen_fd, buf_sync->acq_fen_fd,
 				buf_sync->acq_fen_fd_cnt * sizeof(int));
@@ -3657,9 +3534,9 @@
 	if (ret)
 		goto buf_sync_err_1;
 	mfd->acq_fen_cnt = fence_cnt;
-	if (buf_sync->flags & MDP_BUF_SYNC_FLAG_WAIT) {
+	if (buf_sync->flags & MDP_BUF_SYNC_FLAG_WAIT)
 		msm_fb_wait_for_fence(mfd);
-	}
+
 	mfd->cur_rel_sync_pt = sw_sync_pt_create(mfd->timeline,
 			mfd->timeline_value + 2);
 	if (mfd->cur_rel_sync_pt == NULL) {
@@ -3679,24 +3556,18 @@
 	}
 	/* create fd */
 	mfd->cur_rel_fen_fd = get_unused_fd_flags(0);
-	if (mfd->cur_rel_fen_fd < 0) {
-		pr_err("%s: get_unused_fd_flags failed", __func__);
-		ret  = -EIO;
-		goto buf_sync_err_2;
-	}
 	sync_fence_install(mfd->cur_rel_fence, mfd->cur_rel_fen_fd);
 	ret = copy_to_user(buf_sync->rel_fen_fd,
 		&mfd->cur_rel_fen_fd, sizeof(int));
 	if (ret) {
 		pr_err("%s:copy_to_user failed", __func__);
-		goto buf_sync_err_3;
+		goto buf_sync_err_2;
 	}
 	mutex_unlock(&mfd->sync_mutex);
 	return ret;
-buf_sync_err_3:
-	put_unused_fd(mfd->cur_rel_fen_fd);
 buf_sync_err_2:
 	sync_fence_put(mfd->cur_rel_fence);
+	put_unused_fd(mfd->cur_rel_fen_fd);
 	mfd->cur_rel_fence = NULL;
 	mfd->cur_rel_fen_fd = 0;
 buf_sync_err_1:
@@ -3707,40 +3578,95 @@
 	return ret;
 }
 
+static int buf_fence_process(struct msm_fb_data_type *mfd,
+						struct mdp_buf_fence *buf_fence)
+{
+	int i, fence_cnt = 0, ret;
+	struct sync_fence *fence;
+
+	if ((buf_fence->acq_fen_fd_cnt == 0) ||
+		(buf_fence->acq_fen_fd_cnt > MDP_MAX_FENCE_FD) ||
+		(mfd->timeline == NULL))
+		return -EINVAL;
+
+	mutex_lock(&mfd->sync_mutex);
+	for (i = 0; i < buf_fence->acq_fen_fd_cnt; i++) {
+		fence = sync_fence_fdget(buf_fence->acq_fen_fd[i]);
+		if (fence == NULL) {
+			pr_info("%s: null fence! i=%d fd=%d\n", __func__, i,
+				buf_fence->acq_fen_fd[i]);
+			ret = -EINVAL;
+			break;
+		}
+		mfd->acq_fen[i] = fence;
+	}
+	fence_cnt = i;
+	if (ret)
+		goto buf_fence_err_1;
+	mfd->cur_rel_sync_pt = sw_sync_pt_create(mfd->timeline,
+			mfd->timeline_value + 2);
+	if (mfd->cur_rel_sync_pt == NULL) {
+		pr_err("%s: cannot create sync point", __func__);
+		ret = -ENOMEM;
+		goto buf_fence_err_1;
+	}
+	/* create fence */
+	mfd->cur_rel_fence = sync_fence_create("mdp-fence",
+			mfd->cur_rel_sync_pt);
+	if (mfd->cur_rel_fence == NULL) {
+		sync_pt_free(mfd->cur_rel_sync_pt);
+		mfd->cur_rel_sync_pt = NULL;
+		pr_err("%s: cannot create fence", __func__);
+		ret = -ENOMEM;
+		goto buf_fence_err_1;
+	}
+	/* create fd */
+	mfd->cur_rel_fen_fd = get_unused_fd_flags(0);
+	sync_fence_install(mfd->cur_rel_fence, mfd->cur_rel_fen_fd);
+	buf_fence->rel_fen_fd[0] = mfd->cur_rel_fen_fd;
+	/* Only one released fd for now, -1 indicates an end */
+	buf_fence->rel_fen_fd[1] = -1;
+	mfd->acq_fen_cnt = buf_fence->acq_fen_fd_cnt;
+	mutex_unlock(&mfd->sync_mutex);
+	return ret;
+buf_fence_err_1:
+	for (i = 0; i < fence_cnt; i++)
+		sync_fence_put(mfd->acq_fen[i]);
+	mfd->acq_fen_cnt = 0;
+	mutex_unlock(&mfd->sync_mutex);
+	return ret;
+}
 static int msmfb_display_commit(struct fb_info *info,
 						unsigned long *argp)
 {
 	int ret;
+	u32 copy_back = FALSE;
+	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
 	struct mdp_display_commit disp_commit;
+	struct mdp_buf_fence *buf_fence;
 	ret = copy_from_user(&disp_commit, argp,
 			sizeof(disp_commit));
 	if (ret) {
 		pr_err("%s:copy_from_user failed", __func__);
 		return ret;
 	}
+	buf_fence = &disp_commit.buf_fence;
+	if (buf_fence->acq_fen_fd_cnt > 0)
+		ret = buf_fence_process(mfd, buf_fence);
+	if ((!ret) && (buf_fence->rel_fen_fd[0] > 0))
+		copy_back = TRUE;
 
-	ret = msm_fb_pan_display_ex(info, &disp_commit);
+	ret = msm_fb_pan_display_ex(&disp_commit.var,
+			      info, disp_commit.wait_for_finish);
 
-	return ret;
-}
-
-static int msmfb_get_metadata(struct msm_fb_data_type *mfd,
-				struct msmfb_metadata *metadata_ptr)
-{
-	int ret = 0;
-	switch (metadata_ptr->op) {
-	case metadata_op_frame_rate:
-		metadata_ptr->data.panel_frame_rate =
-			mdp_get_panel_framerate(mfd);
-		break;
-	default:
-		pr_warn("Unsupported request to MDP META IOCTL.\n");
-		ret = -EINVAL;
-		break;
+	if (copy_back) {
+		ret = copy_to_user(argp,
+			&disp_commit, sizeof(disp_commit));
+		if (ret)
+			pr_err("%s:copy_to_user failed", __func__);
 	}
 	return ret;
 }
-
 static int msm_fb_ioctl(struct fb_info *info, unsigned int cmd,
 			unsigned long arg)
 {
@@ -3758,8 +3684,8 @@
 #endif
 	struct mdp_page_protection fb_page_protection;
 	struct msmfb_mdp_pp mdp_pp;
-	struct mdp_buf_sync buf_sync;
 	struct msmfb_metadata mdp_metadata;
+	struct mdp_buf_sync buf_sync;
 	int ret = 0;
 	msm_fb_pan_idle(mfd);
 
@@ -3774,6 +3700,11 @@
 	case MSMFB_OVERLAY_UNSET:
 		ret = msmfb_overlay_unset(info, argp);
 		break;
+	case MSMFB_OVERLAY_COMMIT:
+		down(&msm_fb_ioctl_ppp_sem);
+		ret = msmfb_overlay_commit(info);
+		up(&msm_fb_ioctl_ppp_sem);
+		break;
 	case MSMFB_OVERLAY_PLAY:
 		ret = msmfb_overlay_play(info, argp);
 		break;
@@ -3814,19 +3745,15 @@
 	case MSMFB_WRITEBACK_TERMINATE:
 		ret = msmfb_overlay_ioctl_writeback_terminate(info);
 		break;
-	case MSMFB_WRITEBACK_SET_MIRRORING_HINT:
-		ret = msmfb_overlay_ioctl_writeback_set_mirr_hint(
-				info, argp);
-		break;
 #endif
 	case MSMFB_VSYNC_CTRL:
 	case MSMFB_OVERLAY_VSYNC_CTRL:
-		down(&msm_fb_ioctl_ppp_sem);
+		down(&msm_fb_ioctl_vsync_sem);
 		if (mdp_rev >= MDP_REV_40)
 			ret = msmfb_overlay_vsync_ctrl(info, argp);
 		else
 			ret = msmfb_vsync_ctrl(info, argp);
-		up(&msm_fb_ioctl_ppp_sem);
+		up(&msm_fb_ioctl_vsync_sem);
 		break;
 	case MSMFB_BLIT:
 		down(&msm_fb_ioctl_ppp_sem);
@@ -4044,6 +3971,8 @@
 			return ret;
 
 		ret = msmfb_handle_pp_ioctl(mfd, &mdp_pp);
+		if (ret == 1)
+			ret = copy_to_user(argp, &mdp_pp, sizeof(mdp_pp));
 		break;
 	case MSMFB_BUFFER_SYNC:
 		ret = copy_from_user(&buf_sync, argp, sizeof(buf_sync));
@@ -4056,6 +3985,11 @@
 			ret = copy_to_user(argp, &buf_sync, sizeof(buf_sync));
 		break;
 
+	case MSMFB_METADATA_SET:
+		ret = copy_from_user(&mdp_metadata, argp, sizeof(mdp_metadata));
+		if (ret)
+			return ret;
+		ret = msmfb_handle_metadata_ioctl(mfd, &mdp_metadata);
 	case MSMFB_DISPLAY_COMMIT:
 		ret = msmfb_display_commit(info, argp);
 		break;
@@ -4068,7 +4002,6 @@
 		if (!ret)
 			ret = copy_to_user(argp, &mdp_metadata,
 				sizeof(mdp_metadata));
-
 		break;
 
 	default:
@@ -4332,7 +4265,7 @@
 EXPORT_SYMBOL(msm_fb_v4l2_enable);
 
 /* Called by v4l2 driver to provide a frame for display */
-int msm_fb_v4l2_update(void *par,
+int msm_fb_v4l2_update(void *par, bool bUserPtr,
 	unsigned long srcp0_addr, unsigned long srcp0_size,
 	unsigned long srcp1_addr, unsigned long srcp1_size,
 	unsigned long srcp2_addr, unsigned long srcp2_size)
@@ -4344,9 +4277,14 @@
 		srcp2_addr);
 #else
 #ifdef CONFIG_FB_MSM_MDP30
-	return mdp_ppp_v4l2_overlay_play(fbi_list[0],
-		srcp0_addr, srcp0_size,
-		srcp1_addr, srcp1_size);
+	if (bUserPtr)
+		return mdp_ppp_v4l2_overlay_play(fbi_list[0], true,
+				srcp0_addr, srcp0_size,
+				srcp1_addr, srcp1_size);
+	else
+		return mdp_ppp_v4l2_overlay_play(fbi_list[0], false,
+			srcp0_addr, srcp0_size,
+			srcp1_addr, srcp1_size);
 #else
 	return -EINVAL;
 #endif
diff --git a/drivers/video/msm/msm_fb.h b/drivers/video/msm/msm_fb.h
index bb34ac2..7519ac7 100644
--- a/drivers/video/msm/msm_fb.h
+++ b/drivers/video/msm/msm_fb.h
@@ -37,9 +37,7 @@
 #include <linux/fb.h>
 #include <linux/list.h>
 #include <linux/types.h>
-#include <linux/switch.h>
 #include <linux/msm_mdp.h>
-
 #ifdef CONFIG_HAS_EARLYSUSPEND
 #include <linux/earlysuspend.h>
 #endif
@@ -82,7 +80,7 @@
 	DISP_TARGET dest;
 	struct fb_info *fbi;
 
-	struct device *dev;
+	struct delayed_work backlight_worker;
 	boolean op_enable;
 	uint32 fb_imgType;
 	boolean sw_currently_refreshing;
@@ -182,7 +180,6 @@
 	struct list_head writeback_busy_queue;
 	struct list_head writeback_free_queue;
 	struct list_head writeback_register_queue;
-	struct switch_dev writeback_sdev;
 	wait_queue_head_t wait_q;
 	struct ion_client *iclient;
 	unsigned long display_iova;
@@ -195,7 +192,11 @@
 	u32 writeback_state;
 	bool writeback_active_cnt;
 	int cont_splash_done;
+	void *copy_splash_buf;
+	unsigned char *copy_splash_phys;
 	void *cpu_pm_hdl;
+	u32 avtimer_phy;
+	int vsync_sysfs_created;
 	u32 acq_fen_cnt;
 	struct sync_fence *acq_fen[MDP_MAX_FENCE_FD];
 	int cur_rel_fen_fd;
@@ -211,12 +212,11 @@
 	u32 is_committing;
 	struct work_struct commit_work;
 	void *msm_fb_backup;
-	boolean panel_driver_on;
-	int vsync_sysfs_created;
 };
 struct msm_fb_backup_type {
 	struct fb_info info;
-	struct mdp_display_commit disp_commit;
+	struct fb_var_screeninfo var;
+	struct msm_fb_data_type mfd;
 };
 
 struct dentry *msm_fb_get_debugfs_root(void);
@@ -236,9 +236,8 @@
 int msm_fb_writeback_terminate(struct fb_info *info);
 int msm_fb_detect_client(const char *name);
 int calc_fb_offset(struct msm_fb_data_type *mfd, struct fb_info *fbi, int bpp);
-void msm_fb_wait_for_fence(struct msm_fb_data_type *mfd);
+int msm_fb_wait_for_fence(struct msm_fb_data_type *mfd);
 int msm_fb_signal_timeline(struct msm_fb_data_type *mfd);
-void msm_fb_release_timeline(struct msm_fb_data_type *mfd);
 #ifdef CONFIG_FB_BACKLIGHT
 void msm_fb_config_backlight(struct msm_fb_data_type *mfd);
 #endif
diff --git a/drivers/video/msm/msm_fb_def.h b/drivers/video/msm/msm_fb_def.h
index dcd648b..34cf427 100644
--- a/drivers/video/msm/msm_fb_def.h
+++ b/drivers/video/msm/msm_fb_def.h
@@ -34,7 +34,6 @@
 #include <linux/vmalloc.h>
 #include <linux/debugfs.h>
 #include <linux/console.h>
-#include <linux/android_pmem.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/time.h>
diff --git a/drivers/video/msm/msm_fb_panel.c b/drivers/video/msm/msm_fb_panel.c
index 69f60df..28698c1 100644
--- a/drivers/video/msm/msm_fb_panel.c
+++ b/drivers/video/msm/msm_fb_panel.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -78,31 +78,6 @@
 	return ret;
 }
 
-int panel_next_fps_level_change(struct platform_device *pdev,
-					 u32 fps_level)
-{
-	int ret = 0;
-	struct msm_fb_panel_data *pdata;
-	struct msm_fb_panel_data *next_pdata;
-	struct platform_device *next_pdev;
-
-	pdata = (struct msm_fb_panel_data *)pdev->dev.platform_data;
-
-	if (pdata) {
-		next_pdev = pdata->next;
-		if (next_pdev) {
-			next_pdata =
-			    (struct msm_fb_panel_data *)next_pdev->dev.
-			    platform_data;
-			if ((next_pdata) && (next_pdata->fps_level_change))
-				ret = next_pdata->fps_level_change(next_pdev,
-							 fps_level);
-		}
-	}
-
-	return ret;
-}
-
 int panel_next_late_init(struct platform_device *pdev)
 {
 	int ret = 0;
diff --git a/drivers/video/msm/msm_fb_panel.h b/drivers/video/msm/msm_fb_panel.h
index 7d550ae..26b88b1 100644
--- a/drivers/video/msm/msm_fb_panel.h
+++ b/drivers/video/msm/msm_fb_panel.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -56,6 +56,11 @@
 	MAX_PHYS_TARGET_NUM,
 } DISP_TARGET_PHYS;
 
+enum {
+	BLT_SWITCH_TG_OFF,
+	BLT_SWITCH_TG_ON
+};
+
 /* panel info type */
 struct lcd_panel_info {
 	__u32 vsync_enable;
@@ -66,6 +71,7 @@
 	__u32 hw_vsync_mode;
 	__u32 vsync_notifier_period;
 	__u32 blt_ctrl;
+	__u32 blt_mode;
 	__u32 rev;
 };
 
@@ -190,7 +196,6 @@
 	void (*set_rect) (int x, int y, int xres, int yres);
 	void (*set_vsync_notifier) (msm_fb_vsync_handler_type, void *arg);
 	void (*set_backlight) (struct msm_fb_data_type *);
-	int (*get_backlight_on_status) (void);
 
 	/* function entry chain */
 	int (*on) (struct platform_device *pdev);
@@ -199,8 +204,18 @@
 	int (*power_ctrl) (boolean enable);
 	struct platform_device *next;
 	int (*clk_func) (int enable);
-	int (*fps_level_change) (struct platform_device *pdev,
-					u32 fps_level);
+};
+
+enum {
+	MDP4_OVERLAY_BLT_SWITCH_TG_OFF,
+	MDP4_OVERLAY_BLT_SWITCH_TG_ON,
+	MDP4_OVERLAY_BLT_SWITCH_POLL
+};
+
+enum {
+	MDP4_OVERLAY_MODE_BLT_CTRL,
+	MDP4_OVERLAY_MODE_BLT_ALWAYS_ON,
+	MDP4_OVERLAY_MODE_BLT_ALWAYS_OFF
 };
 
 /*===========================================================================
@@ -210,8 +225,6 @@
 						u32 type, u32 id);
 int panel_next_on(struct platform_device *pdev);
 int panel_next_off(struct platform_device *pdev);
-int panel_next_fps_level_change(struct platform_device *pdev,
-					u32 fps_level);
 int panel_next_late_init(struct platform_device *pdev);
 
 int lcdc_device_register(struct msm_panel_info *pinfo);
diff --git a/drivers/video/msm/tvout_msm.c b/drivers/video/msm/tvout_msm.c
index 5c67e79..bc2b8ad 100644
--- a/drivers/video/msm/tvout_msm.c
+++ b/drivers/video/msm/tvout_msm.c
@@ -232,7 +232,6 @@
 static int tvout_on(struct platform_device *pdev)
 {
 	uint32 reg = 0;
-	uint32 userformat = 0;
 	struct fb_var_screeninfo *var;
 	struct msm_fb_data_type *mfd = platform_get_drvdata(pdev);
 
@@ -253,9 +252,8 @@
 #endif
 
 	var = &mfd->fbi->var;
-	userformat = var->reserved[3] >> 16;
-	if (userformat >= NTSC_M && userformat <= PAL_N)
-		external_common_state->video_resolution = userformat;
+	if (var->reserved[3] >= NTSC_M && var->reserved[3] <= PAL_N)
+		external_common_state->video_resolution = var->reserved[3];
 
 	tvout_msm_state->pdev = pdev;
 	if (del_timer(&tvout_msm_state->hpd_work_timer))
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c
index d941654..582744c 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -88,7 +88,7 @@
 			ddl_context->dram_base_a.align_virtual_addr;
 	}
 	if (!status) {
-		ddl_context->metadata_shared_input.mem_type = DDL_FW_MEM;
+		ddl_context->metadata_shared_input.mem_type = DDL_CMD_MEM;
 		ptr = ddl_pmem_alloc(&ddl_context->metadata_shared_input,
 			DDL_METADATA_TOTAL_INPUTBUFSIZE,
 			DDL_LINEAR_BUFFER_ALIGN_BYTES);
@@ -280,6 +280,25 @@
 		return VCD_ERR_ILLEGAL_OP;
 	}
 	encoder = &ddl->codec_data.encoder;
+	if (DDL_IS_LTR_ENABLED(encoder)) {
+		DDL_MSG_HIGH("LTR enabled, mode %u count %u",
+			(u32)encoder->ltr_control.ltrmode.ltr_mode,
+			(u32)encoder->ltr_control.ltr_count);
+		status = ddl_allocate_ltr_list(&encoder->ltr_control);
+		if (status) {
+			DDL_MSG_ERROR("%s: allocate ltr list failed",
+				__func__);
+			return status;
+		} else {
+			ddl_clear_ltr_list(&encoder->ltr_control, false);
+		}
+		encoder->num_references_for_p_frame = 2;
+		encoder->ltr_control.callback_reqd = false;
+		encoder->ltr_control.curr_ltr_id = (u32)DDL_LTR_FRAME_START_ID;
+		DDL_MSG_HIGH("num_ref_for_p_frames %u, curr_ltr_id = %u",
+			(u32)encoder->num_references_for_p_frame,
+			(u32)encoder->ltr_control.curr_ltr_id);
+	}
 	status = ddl_allocate_enc_hw_buffers(ddl);
 	if (status)
 		return status;
@@ -393,7 +412,7 @@
 		(struct ddl_client_context *) ddl_handle;
 	struct ddl_context *ddl_context;
 	struct ddl_decoder_data *decoder;
-	DDL_MSG_HIGH("ddl_decode_frame");
+	DDL_MSG_MED("ddl_decode_frame");
 	ddl_context = ddl_get_context();
 	if (!DDL_IS_INITIALIZED(ddl_context)) {
 		DDL_MSG_ERROR("ddl_dec_frame:Not_inited");
@@ -472,9 +491,6 @@
 	struct ddl_encoder_data *encoder =
 		&ddl->codec_data.encoder;
 	u32 vcd_status = VCD_S_SUCCESS;
-	struct vcd_transc *transc;
-	transc = (struct vcd_transc *)(ddl->client_data);
-	DDL_MSG_LOW("%s: transc = 0x%x", __func__, (u32)ddl->client_data);
 	if (encoder->slice_delivery_info.enable) {
 		return ddl_encode_frame_batch(ddl_handle,
 					input_frame,
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
index 1ce054a..b6dc085 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl.h
@@ -14,7 +14,6 @@
 #ifndef _VCD_DDL_H_
 #define _VCD_DDL_H_
 
-#include <mach/msm_subsystem_map.h>
 #include "vcd_ddl_api.h"
 #include "vcd_ddl_core.h"
 #include "vcd_ddl_utils.h"
@@ -52,6 +51,19 @@
 #define DDLCLIENT_STATE_IS(ddl, state) \
 	(state == (ddl)->client_state)
 
+#define DDL_IS_LTR_ENABLED(encoder) \
+	((encoder->ltr_control.ltrmode.ltr_mode == \
+		VCD_LTR_MODE_AUTO || \
+	encoder->ltr_control.ltrmode.ltr_mode == \
+		VCD_LTR_MODE_MANUAL) && \
+	(encoder->ltr_control.ltr_count > 0))
+
+#define DDL_IS_LTR_IN_AUTO_MODE(encoder) \
+	((encoder->ltr_control.ltrmode.ltr_mode == \
+		VCD_LTR_MODE_AUTO) && \
+	(encoder->ltr_control.ltr_count > 0) && \
+	(encoder->ltr_control.ltr_period > 0))
+
 #define DDL_DPB_OP_INIT       1
 #define DDL_DPB_OP_MARK_FREE  2
 #define DDL_DPB_OP_MARK_BUSY  3
@@ -72,6 +84,7 @@
 #define DDL_ENC_CHANGE_BITRATE    0x04
 #define DDL_ENC_CHANGE_FRAMERATE  0x08
 #define DDL_ENC_CHANGE_CIR        0x10
+#define DDL_ENC_LTR_USE_FRAME     0x20
 
 #define DDL_DEC_REQ_OUTPUT_FLUSH  0x1
 
@@ -86,6 +99,9 @@
 
 #define MDP_MIN_TILE_HEIGHT			96
 
+#define DDL_MAX_NUM_LTR_FRAMES                  2
+#define DDL_LTR_FRAME_START_ID                  1
+
 enum ddl_mem_area {
 	DDL_FW_MEM	= 0x0,
 	DDL_MM_MEM	= 0x1,
@@ -188,6 +204,7 @@
 	struct ddl_buf_addr h264_nb_ip;
 	struct ddl_buf_addr context;
 	struct ddl_buf_addr extnuserdata;
+	struct ddl_buf_addr meta_hdr[DDL_MAX_BUFFER_COUNT];
 };
 struct ddl_enc_buffer_size{
 	u32  sz_cur_y;
@@ -242,6 +259,43 @@
 	u32 seqdisp_extdump_enable;
 	u32 seq_extdump_enable;
 };
+
+
+struct ddl_ltrlist {
+	bool ltr_in_use;
+	u32 ltr_id;
+};
+
+struct ddl_ltr_encoding_type {
+	struct vcd_property_ltrmode_type  ltrmode;
+	struct vcd_property_ltruse_type  failed_use_cmd;
+	struct ddl_ltrlist *ltr_list;
+	u32 ltr_count;
+	u32 ltr_period;
+	u32 ltr_use_frames;
+	u32 curr_ltr_id;
+	u32 storing_idx;
+	u32 out_frame_cnt_to_use_this_ltr;
+	u32 out_frame_cnt_before_next_idr;
+	bool storing;
+	bool callback_reqd;
+	bool meta_data_reqd;
+	bool using;
+	bool first_ltr_use_arvd;
+	bool use_ltr_reqd;
+	bool store_for_intraframe_insertion;
+	bool pending_chg_ltr_useframes; /* True if
+		 * corresponding driver context of
+		 * out_frame_cnt_to_use_this_ltr
+		 * is pending to be changed with
+		 * client settings
+		 */
+	bool store_ltr0;
+	bool store_ltr1;
+	bool use_ltr0;
+	bool use_ltr1;
+};
+
 struct ddl_encoder_data{
 	struct ddl_codec_data_hdr   hdr;
 	struct vcd_property_codec   codec;
@@ -275,6 +329,8 @@
 	struct ddl_enc_buffers  hw_bufs;
 	struct ddl_yuv_buffer_size  input_buf_size;
 	struct vidc_1080p_enc_frame_info enc_frame_info;
+	struct ddl_ltr_encoding_type  ltr_control;
+	u32  plusptype_enable;
 	u32  meta_data_enable_flag;
 	u32  suffix;
 	u32  meta_data_offset;
@@ -290,6 +346,7 @@
 	u32  num_references_for_p_frame;
 	u32  closed_gop;
 	u32  num_slices_comp;
+	bool  intra_period_changed;
 	struct vcd_property_slice_delivery_info slice_delivery_info;
 	struct ddl_batch_frame_data batch_frame;
 	u32 avc_delimiter_enable;
@@ -495,7 +552,7 @@
 #ifdef DDL_BUF_LOG
 void ddl_list_buffers(struct ddl_client_context *ddl);
 #endif
-#ifdef DDL_MSG_LOG
+#if DDL_MSG_LOG
 s8 *ddl_get_state_string(enum ddl_client_state client_state);
 #endif
 extern unsigned char *vidc_video_codec_fw;
@@ -509,4 +566,21 @@
 void ddl_vidc_decode_reset_avg_time(struct ddl_client_context *ddl);
 void ddl_calc_core_proc_time(const char *func_name, u32 index,
 		struct ddl_client_context *ddl);
+s32 ddl_encoder_ltr_control(struct ddl_client_context *ddl);
+void ddl_encoder_use_ltr_fail_callback(
+	struct ddl_client_context *ddl);
+void ddl_handle_ltr_in_framedone(struct ddl_client_context *ddl);
+s32 ddl_clear_ltr_list(struct ddl_ltr_encoding_type *ltr_control,
+	bool only_use_flag);
+s32 ddl_find_oldest_ltr_not_in_use(
+	struct ddl_ltr_encoding_type *ltr_control);
+s32 ddl_find_ltr_in_use(struct ddl_ltr_encoding_type *ltr_control);
+s32 ddl_find_ltr_from_list(struct ddl_ltr_encoding_type *ltr_control,
+	u32 ltr_id);
+s32 ddl_use_ltr_from_list(struct ddl_ltr_encoding_type *ltr_control,
+	u32 ltr_idx);
+s32 ddl_allocate_ltr_list(struct ddl_ltr_encoding_type *ltr_control);
+s32 ddl_free_ltr_list(struct ddl_ltr_encoding_type *ltr_control);
+void ddl_print_ltr_list(struct ddl_ltr_encoding_type *ltr_control);
+
 #endif
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h
index e6d0260..176699e 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_core.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -154,4 +154,10 @@
 #define VIDC_SM_ERR_CONCEALMENT_INTER_SLICE_MB_COPY		2
 #define VIDC_SM_ERR_CONCEALMENT_INTRA_SLICE_COLOR_CONCEALMENT	1
 
+#define DDL_MAX_P_FRAMES_IN_INTRA_INTERVAL      0xffff
+
+#define DDL_SATURATE_P_FRAMES_IN_INTRA_INTERVAL(p_rames) \
+	(((p_rames) > (DDL_MAX_P_FRAMES_IN_INTRA_INTERVAL - 1)) ? \
+	(DDL_MAX_P_FRAMES_IN_INTRA_INTERVAL) : (p_rames))
+
 #endif
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_errors.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_errors.c
index b2952c5..daafe44 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_errors.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_errors.c
@@ -70,7 +70,6 @@
 	case VIDC_1080P_ERROR_MEM_ALLOCATION_FAILED:
 	case VIDC_1080P_ERROR_INSUFFICIENT_CONTEXT_SIZE:
 	case VIDC_1080P_ERROR_DIVIDE_BY_ZERO:
-	case VIDC_1080P_ERROR_DESCRIPTOR_BUFFER_EMPTY:
 	case VIDC_1080P_ERROR_DMA_TX_NOT_COMPLETE:
 	case VIDC_1080P_ERROR_VSP_NOT_READY:
 	case VIDC_1080P_ERROR_BUFFER_FULL_STATE:
@@ -219,6 +218,7 @@
 			}
 			break;
 		}
+	case VIDC_1080P_ERROR_NON_IDR_FRAME_TYPE:
 	case VIDC_1080P_ERROR_BIT_STREAM_BUF_EXHAUST:
 	case VIDC_1080P_ERROR_DESCRIPTOR_TABLE_ENTRY_INVALID:
 	case VIDC_1080P_ERROR_MB_COEFF_NOT_DONE:
@@ -242,15 +242,18 @@
 	case VIDC_1080P_ERROR_HEADER_NOT_FOUND:
 	case VIDC_1080P_ERROR_SLICE_PARSE_ERROR:
 	case VIDC_1080P_ERROR_NON_PAIRED_FIELD_NOT_SUPPORTED:
+	case VIDC_1080P_ERROR_DESCRIPTOR_BUFFER_EMPTY:
 		vcd_status = VCD_ERR_BITSTREAM_ERR;
-		DDL_MSG_ERROR("VIDC_BIT_STREAM_ERR");
+		DDL_MSG_ERROR("VIDC_BIT_STREAM_ERR (%u)",
+			(u32)ddl_context->cmd_err_status);
 		break;
 	case VIDC_1080P_ERROR_B_FRAME_NOT_SUPPORTED:
 	case VIDC_1080P_ERROR_UNSUPPORTED_FEATURE_IN_PROFILE:
 	case VIDC_1080P_ERROR_RESOLUTION_NOT_SUPPORTED:
 		if (ddl->decoding) {
 			vcd_status = VCD_ERR_BITSTREAM_ERR;
-			DDL_MSG_ERROR("VIDC_BIT_STREAM_ERR");
+			DDL_MSG_ERROR("VIDC_BIT_STREAM_ERR (%u)",
+				(u32)ddl_context->cmd_err_status);
 		}
 		break;
 	default:
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
index 3c1d896..77faee3 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_helper.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -22,7 +22,7 @@
 	return &ddl_context;
 }
 
-#ifdef DDL_MSG_LOG
+#if DDL_MSG_LOG
 s8 *ddl_get_state_string(enum ddl_client_state client_state)
 {
 	s8 *ptr;
@@ -265,7 +265,7 @@
 	luma_size = ddl_get_yuv_buf_size(decoder->frame_size.width,
 			decoder->frame_size.height, DDL_YUV_BUF_TYPE_TILE);
 	dpb = decoder->dp_buf.no_of_dec_pic_buf;
-	DDL_MSG_LOW("%s Decoder num DPB buffers = %u Luma Size = %u"
+	DDL_MSG_LOW("%s Decoder num DPB buffers = %u Luma Size = %u",
 				 __func__, dpb, luma_size);
 	if (dpb > DDL_MAX_BUFFER_COUNT)
 		dpb = DDL_MAX_BUFFER_COUNT;
@@ -298,7 +298,7 @@
 		luma[i] = DDL_OFFSET(ddl_context->dram_base_a.
 			align_physical_addr, frame[i].vcd_frm.physical);
 		chroma[i] = luma[i] + luma_size;
-		DDL_MSG_LOW("%s Decoder Luma address = %x Chroma address = %x"
+		DDL_MSG_LOW("%s Decoder Luma address = %x Chroma address = %x",
 					__func__, luma[i], chroma[i]);
 	}
 	switch (decoder->codec.codec) {
@@ -403,6 +403,7 @@
 		ddl_vidc_encode_dynamic_property(ddl, false);
 		encoder->dynamic_prop_change = 0;
 		ddl_free_enc_hw_buffers(ddl);
+		ddl_free_ltr_list(&encoder->ltr_control);
 	}
 	ddl_pmem_free(&ddl->shared_mem[0]);
 	ddl_pmem_free(&ddl->shared_mem[1]);
@@ -505,6 +506,8 @@
 
 	width_round_up  = width;
 	height_round_up = height;
+	align = SZ_4K;
+
 	if (format == DDL_YUV_BUF_TYPE_TILE) {
 		width_round_up  = DDL_ALIGN(width, DDL_TILE_ALIGN_WIDTH);
 		height_round_up = DDL_ALIGN(height, DDL_TILE_ALIGN_HEIGHT);
@@ -780,7 +783,7 @@
 		}
 	}
 	if (buf_size.sz_extnuserdata > 0) {
-		dec_bufs->extnuserdata.mem_type = DDL_FW_MEM;
+		dec_bufs->extnuserdata.mem_type = DDL_CMD_MEM;
 		ptr = ddl_pmem_alloc(&dec_bufs->extnuserdata,
 				buf_size.sz_extnuserdata, DDL_KILO_BYTE(2));
 		if (!ptr)
@@ -812,13 +815,13 @@
 		height, DDL_YUV_BUF_TYPE_TILE);
 	sz_dpb_c = ddl_get_yuv_buf_size(width, height>>1,
 		DDL_YUV_BUF_TYPE_TILE);
-	if ((input_format == VCD_BUFFER_FORMAT_NV12_16M2KA) ||
-		(input_format == VCD_BUFFER_FORMAT_NV21_16M2KA)) {
+	if (input_format ==
+		VCD_BUFFER_FORMAT_NV12_16M2KA) {
 		sz_cur_y = ddl_get_yuv_buf_size(width, height,
 			DDL_YUV_BUF_TYPE_LINEAR);
 		sz_cur_c = ddl_get_yuv_buf_size(width, height>>1,
 			DDL_YUV_BUF_TYPE_LINEAR);
-	} else if (input_format == VCD_BUFFER_FORMAT_TILE_4x2) {
+	} else if (VCD_BUFFER_FORMAT_TILE_4x2 == input_format) {
 		sz_cur_y = sz_dpb_y;
 		sz_cur_c = sz_dpb_c;
 	} else
@@ -999,7 +1002,7 @@
 	u32 luma_size, i, dpb;
 	luma_size = decoder->dpb_buf_size.size_y;
 	dpb = decoder->dp_buf.no_of_dec_pic_buf;
-	DDL_MSG_HIGH("%s Decoder num DPB buffers = %u Luma Size = %u"
+	DDL_MSG_HIGH("%s Decoder num DPB buffers = %u Luma Size = %u",
 			 __func__, dpb, luma_size);
 	if (dpb > DDL_MAX_BUFFER_COUNT)
 		dpb = DDL_MAX_BUFFER_COUNT;
@@ -1137,8 +1140,295 @@
 				vidc_time_out = temp;
 		}
 	}
-	DDL_MSG_HIGH("%s Video core time out value = 0x%x",
+	DDL_MSG_LOW("%s Video core time out value = 0x%x",
 		 __func__, vidc_time_out);
 	vidc_sm_set_video_core_timeout_value(
 		&ddl->shared_mem[ddl->command_channel], vidc_time_out);
 }
+
+void ddl_handle_ltr_in_framedone(struct ddl_client_context *ddl)
+{
+	struct ddl_ltr_encoding_type *ltr_control =
+		&ddl->codec_data.encoder.ltr_control;
+	DDL_MSG_LOW("%s:", __func__);
+	if (ltr_control->storing) {
+		ltr_control->ltr_list[ltr_control->storing_idx].ltr_id =
+			ltr_control->curr_ltr_id;
+		DDL_MSG_MED("Encoder output stores LTR ID %d into entry %d",
+			ltr_control->curr_ltr_id, ltr_control->storing_idx);
+		ltr_control->meta_data_reqd = true;
+		ltr_control->storing = false;
+	}
+	ltr_control->out_frame_cnt_before_next_idr++;
+	if (ltr_control->out_frame_cnt_to_use_this_ltr) {
+		ltr_control->out_frame_cnt_to_use_this_ltr--;
+		if (!ltr_control->out_frame_cnt_to_use_this_ltr)
+			ddl_clear_ltr_list(ltr_control, true);
+	}
+}
+
+s32 ddl_encoder_ltr_control(struct ddl_client_context *ddl)
+{
+	s32 vcd_status = VCD_S_SUCCESS;
+	struct ddl_encoder_data *encoder = &ddl->codec_data.encoder;
+	struct ddl_ltr_encoding_type *ltr_ctrl = &encoder->ltr_control;
+	bool intra_period_reached = false;
+
+	DDL_MSG_LOW("%s:", __func__);
+	ddl_print_ltr_list(ltr_ctrl);
+
+	if (DDL_IS_LTR_IN_AUTO_MODE(encoder)) {
+		bool finite_i_period, infinite_i_period;
+		DDL_MSG_LOW("%s: before LTR encoding: output "\
+			"count before next IDR %d", __func__,
+			ltr_ctrl->out_frame_cnt_before_next_idr);
+		finite_i_period =
+			(DDL_MAX_P_FRAMES_IN_INTRA_INTERVAL !=
+			encoder->i_period.p_frames) &&
+			(!(ltr_ctrl->out_frame_cnt_before_next_idr %
+			(encoder->i_period.p_frames + 1)));
+		infinite_i_period =
+			((DDL_MAX_P_FRAMES_IN_INTRA_INTERVAL ==
+			encoder->i_period.p_frames) &&
+			(!ltr_ctrl->out_frame_cnt_before_next_idr));
+		if (finite_i_period || infinite_i_period) {
+			DDL_MSG_HIGH("%s: Intra period reached. "\
+			"finite_i_period (%u), infinite_i_period (%u)",
+			__func__, (u32)finite_i_period,
+			(u32)infinite_i_period);
+			intra_period_reached = true;
+		}
+		if (intra_period_reached ||
+			ltr_ctrl->store_for_intraframe_insertion ||
+			encoder->intra_period_changed) {
+			ddl_clear_ltr_list(ltr_ctrl, false);
+			ltr_ctrl->out_frame_cnt_before_next_idr = 0;
+			ltr_ctrl->first_ltr_use_arvd = false;
+			ltr_ctrl->store_for_intraframe_insertion = false;
+		} else {
+			if (ltr_ctrl->first_ltr_use_arvd == false) {
+				ddl_use_ltr_from_list(ltr_ctrl, 0);
+				ltr_ctrl->out_frame_cnt_to_use_this_ltr =
+					0xFFFFFFFF;
+				ltr_ctrl->use_ltr_reqd = true;
+			}
+		}
+		if (!(ltr_ctrl->out_frame_cnt_before_next_idr %
+			ltr_ctrl->ltr_period)) {
+			s32 idx;
+			DDL_MSG_HIGH("%s: reached LTR period "\
+				"out_frame_cnt_before_next_idr %d",
+				__func__, ltr_ctrl->\
+				out_frame_cnt_before_next_idr);
+			idx = ddl_find_oldest_ltr_not_in_use(
+					ltr_ctrl);
+			if (idx >= 0) {
+				ltr_ctrl->storing = true;
+				ltr_ctrl->storing_idx = idx;
+				if (idx == 0)
+					ltr_ctrl->store_ltr0 = true;
+				else if (idx == 1)
+					ltr_ctrl->store_ltr1 = true;
+			}
+		}
+	}
+	if (encoder->intra_frame_insertion) {
+		DDL_MSG_HIGH("%s: I-frame insertion requested, "\
+			"delay LTR store for one frame", __func__);
+		ltr_ctrl->store_for_intraframe_insertion = true;
+	}
+	if (ltr_ctrl->pending_chg_ltr_useframes) {
+		ltr_ctrl->out_frame_cnt_to_use_this_ltr =
+			ltr_ctrl->ltr_use_frames;
+		ltr_ctrl->pending_chg_ltr_useframes = false;
+	}
+	if (ltr_ctrl->out_frame_cnt_to_use_this_ltr)
+		ltr_ctrl->use_ltr_reqd = true;
+	if (ltr_ctrl->use_ltr_reqd) {
+		s32 idx;
+		idx = ddl_find_ltr_in_use(ltr_ctrl);
+		if (idx == 0)
+			ltr_ctrl->use_ltr0 = true;
+		else if (idx == 1)
+			ltr_ctrl->use_ltr1 = true;
+		ltr_ctrl->using = true;
+		ltr_ctrl->use_ltr_reqd = false;
+	} else {
+		DDL_MSG_HIGH("%s: use_ltr_reqd skipped", __func__);
+	}
+
+	return vcd_status;
+}
+
+
+s32 ddl_allocate_ltr_list(struct ddl_ltr_encoding_type *ltr_control)
+{
+	s32 vcd_status = VCD_S_SUCCESS;
+
+	DDL_MSG_LOW("%s: lrr_cout = %u", __func__, ltr_control->ltr_count);
+	if (!ltr_control->ltr_list) {
+		if (ltr_control->ltr_count) {
+			ltr_control->ltr_list = (struct ddl_ltrlist *)
+				kmalloc(sizeof(struct ddl_ltrlist)*
+					ltr_control->ltr_count, GFP_KERNEL);
+			if (!ltr_control->ltr_list) {
+				DDL_MSG_ERROR("ddl_allocate_ltr_list failed");
+				vcd_status = VCD_ERR_ALLOC_FAIL;
+			}
+		} else {
+			DDL_MSG_ERROR("%s: failed, zero LTR count", __func__);
+			vcd_status = VCD_ERR_FAIL;
+		}
+	} else {
+		DDL_MSG_HIGH("WARN: ltr_list already allocated");
+	}
+
+	return vcd_status;
+}
+
+s32 ddl_free_ltr_list(struct ddl_ltr_encoding_type *ltr_control)
+{
+	s32 vcd_status = VCD_S_SUCCESS;
+
+	DDL_MSG_LOW("%s:", __func__);
+	kfree(ltr_control->ltr_list);
+	ltr_control->ltr_list = NULL;
+
+	return vcd_status;
+}
+
+s32 ddl_clear_ltr_list(struct ddl_ltr_encoding_type *ltr_control,
+	bool only_use_flag)
+{
+	s32 vcd_status = VCD_S_SUCCESS;
+	u32 i;
+
+	DDL_MSG_LOW("%s:", __func__);
+	for (i = 0; i < ltr_control->ltr_count; i++) {
+		ltr_control->ltr_list[i].ltr_in_use = false;
+		if (!only_use_flag)
+			ltr_control->ltr_list[i].ltr_id = 0;
+	}
+
+	return vcd_status;
+}
+
+s32 ddl_find_oldest_ltr_not_in_use(struct ddl_ltr_encoding_type *ltr_control)
+{
+	s32 found_idx = -1;
+	u32 i;
+
+	if (ltr_control->ltr_list) {
+		if (ltr_control->ltr_count == 1)
+			found_idx = 0;
+		else {
+			for (i = 0; i < ltr_control->ltr_count; i++) {
+				if ((ltr_control->ltr_list[i].ltr_in_use ==
+					false) && (found_idx < 0)) {
+					found_idx = i;
+				}
+				if ((found_idx >= 0) &&
+					(ltr_control->ltr_list[i].\
+					ltr_in_use == false) &&
+					(ltr_control->ltr_list[i].ltr_id <
+					ltr_control->ltr_list[found_idx].\
+					ltr_id)) {
+					found_idx = i;
+				}
+			}
+		}
+	}
+
+	DDL_MSG_LOW("%s: found_idx = %d", __func__, found_idx);
+	return found_idx;
+}
+
+s32 ddl_find_ltr_in_use(struct ddl_ltr_encoding_type *ltr_control)
+{
+	s32 found_idx = -1;
+	u32 i;
+
+	if (ltr_control->ltr_list) {
+		for (i = 0; i < ltr_control->ltr_count; i++) {
+			if (ltr_control->ltr_list[i].ltr_in_use == true)
+				found_idx = i;
+		}
+	}
+
+	DDL_MSG_LOW("%s: found_idx = %d", __func__, found_idx);
+	return found_idx;
+}
+
+s32 ddl_find_ltr_from_list(struct ddl_ltr_encoding_type *ltr_control,
+	u32 ltr_id)
+{
+	s32 found_idx = -1;
+	u32 i;
+
+	if (ltr_control->ltr_list) {
+		for (i = 0; i < ltr_control->ltr_count; i++) {
+			if (ltr_control->ltr_list[i].ltr_id == ltr_id) {
+				found_idx = i;
+				break;
+			}
+		}
+	} else {
+		DDL_MSG_ERROR("%s: ltr_list is NULL", __func__);
+	}
+
+	DDL_MSG_LOW("%s: found_idx = %d", __func__, found_idx);
+	return found_idx;
+}
+
+s32 ddl_use_ltr_from_list(struct ddl_ltr_encoding_type *ltr_control,
+	u32 ltr_idx)
+{
+	s32 vcd_status = VCD_S_SUCCESS;
+	u32 i;
+
+	DDL_MSG_LOW("%s: ltr_idx = %u", __func__, ltr_idx);
+	if (ltr_idx > ltr_control->ltr_count) {
+		DDL_MSG_ERROR("%s: fail, idx %d larger than "\
+			"the list array count %d", __func__,
+			ltr_idx, ltr_control->ltr_count);
+		vcd_status = VCD_ERR_FAIL;
+	} else {
+		for (i = 0; i < ltr_control->ltr_count; i++) {
+			if (i == ltr_idx)
+				ltr_control->ltr_list[ltr_idx].ltr_in_use =
+					true;
+			else
+				ltr_control->ltr_list[i].ltr_in_use = false;
+		}
+	}
+
+	return vcd_status;
+}
+
+void ddl_encoder_use_ltr_fail_callback(struct ddl_client_context *ddl)
+{
+	struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder);
+	struct ddl_context *ddl_context = ddl->ddl_context;
+
+	DDL_MSG_ERROR("%s: LTR use failed, callback "\
+		"requested with LTR ID %d", __func__,
+		encoder->ltr_control.failed_use_cmd.ltr_id);
+
+	ddl_context->ddl_callback(VCD_EVT_IND_INFO_LTRUSE_FAILED,
+			VCD_ERR_ILLEGAL_PARM,
+			&(encoder->ltr_control.failed_use_cmd),
+			sizeof(struct vcd_property_ltruse_type),
+			(u32 *)ddl,
+			ddl->client_data);
+}
+
+void ddl_print_ltr_list(struct ddl_ltr_encoding_type *ltr_control)
+{
+	u32 i;
+
+	for (i = 0; i < ltr_control->ltr_count; i++) {
+		DDL_MSG_MED("%s: ltr_id: %d, ltr_in_use: %d",
+			__func__, ltr_control->ltr_list[i].ltr_id,
+			ltr_control->ltr_list[i].ltr_in_use);
+	}
+}
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
index 688ecb6..64cc570 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_interrupt_handler.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -493,6 +493,7 @@
 				ddl_get_state_string(ddl->client_state));
 			ddl_calc_core_proc_time(__func__, DEC_OP_TIME, ddl);
 			ddl_reset_core_time_variables(DEC_OP_TIME);
+			ddl_vidc_decode_reset_avg_time(ddl);
 			ddl->client_state = DDL_CLIENT_WAIT_FOR_FRAME;
 			ddl_vidc_decode_frame_run(ddl);
 			ret_status = false;
@@ -821,13 +822,13 @@
 			pixel_cache_stats;
 			vidc_pix_cache_get_statistics(&pixel_cache_stats);
 
-			DDL_MSG_HIGH(" pixel cache hits = %d,"
+			DDL_MSG_LOW(" pixel cache hits = %d,"
 				"miss = %d", pixel_cache_stats.access_hit,
 				pixel_cache_stats.access_miss);
-			DDL_MSG_HIGH(" pixel cache core reqs = %d,"
+			DDL_MSG_LOW(" pixel cache core reqs = %d,"
 				"axi reqs = %d", pixel_cache_stats.core_req,
 				pixel_cache_stats.axi_req);
-			DDL_MSG_HIGH(" pixel cache core bus stats = %d,"
+			DDL_MSG_LOW(" pixel cache core bus stats = %d,"
 			"axi bus stats = %d", pixel_cache_stats.core_bus,
 				pixel_cache_stats.axi_bus);
 		}
@@ -1312,7 +1313,7 @@
 			DDL_MSG_LOW("%s y_cb_cr_size = %u "
 				"actual_output_buf_req.sz = %u"
 				"min_output_buf_req.sz = %u\n",
-				decoder->y_cb_cr_size,
+				__func__, decoder->y_cb_cr_size,
 				decoder->actual_output_buf_req.sz,
 				decoder->min_output_buf_req.sz);
 			vidc_sm_set_chroma_addr_change(
@@ -1473,7 +1474,7 @@
 		}
 	} else
 		status = false;
-	DDL_MSG_HIGH("Enc Frame Type %u", (u32)frame->frame);
+	DDL_MSG_LOW("Enc Frame Type %u", (u32)frame->frame);
 	return status;
 }
 
@@ -1763,7 +1764,17 @@
 			(unsigned long) output_frame->alloc_len,
 			ION_IOC_INV_CACHES);
 	}
-	ddl_process_encoder_metadata(ddl);
+
+	if ((VIDC_1080P_ENCODE_FRAMETYPE_SKIPPED !=
+		encoder->enc_frame_info.enc_frame) &&
+		(VIDC_1080P_ENCODE_FRAMETYPE_NOT_CODED !=
+		encoder->enc_frame_info.enc_frame)) {
+		if (DDL_IS_LTR_ENABLED(encoder))
+			ddl_handle_ltr_in_framedone(ddl);
+		ddl_process_encoder_metadata(ddl);
+		encoder->ltr_control.meta_data_reqd = false;
+	}
+	encoder->ltr_control.using = false;
 	ddl_vidc_encode_dynamic_property(ddl, false);
 	ddl->input_frame.frm_trans_end = false;
 	input_buffer_address = ddl_context->dram_base_a.align_physical_addr +
@@ -1800,8 +1811,10 @@
 	slice_output = (struct vidc_1080p_enc_slice_batch_out_param *)
 		(encoder->batch_frame.slice_batch_out.align_virtual_addr);
 	DDL_MSG_LOW(" after get no of slices = %d\n", num_slices_comp);
-	if (slice_output == NULL)
+	if (slice_output == NULL) {
 		DDL_MSG_ERROR(" slice_output is NULL\n");
+		return; /* Bail out */
+	}
 	encoder->slice_delivery_info.num_slices_enc += num_slices_comp;
 	if (vidc_msg_timing) {
 		ddl_calc_core_proc_time_cnt(__func__, ENC_SLICE_OP_TIME,
@@ -1827,9 +1840,9 @@
 			encoder->batch_frame.output_frame[actual_idx].vcd_frm);
 		DDL_MSG_LOW("OutBfr: vcd_frm 0x%x frmbfr(virtual) 0x%x"
 			"frmbfr(physical) 0x%x\n",
-			&output_frame,
-			output_frame.virtual_base_addr,
-			output_frame.physical_base_addr);
+			(u32)output_frame,
+			(u32)output_frame->virtual,
+			(u32)output_frame->physical);
 		vidc_1080p_get_encode_frame_info(&encoder->enc_frame_info);
 		vidc_sm_get_frame_tags(&ddl->shared_mem
 			[ddl->command_channel],
@@ -1907,17 +1920,17 @@
 		actual_idx =
 			slice_output->slice_info[start_bfr_idx+index]. \
 			stream_buffer_idx;
-		DDL_MSG_LOW("Slice Info: OutBfrIndex %d SliceSize %d",
+		DDL_MSG_LOW("Slice Info: OutBfrIndex %u SliceSize %u",
 			actual_idx,
 			slice_output->slice_info[start_bfr_idx+index]. \
-			stream_buffer_size, 0);
+			stream_buffer_size);
 		output_frame =
 		&(encoder->batch_frame.output_frame[actual_idx].vcd_frm);
 		DDL_MSG_LOW("OutBfr: vcd_frm 0x%x frmbfr(virtual) 0x%x"
 				"frmbfr(physical) 0x%x",
-				&output_frame,
-				output_frame.virtual_base_addr,
-				output_frame.physical_base_addr);
+				(u32)output_frame,
+				(u32)output_frame->virtual,
+				(u32)output_frame->physical);
 		vidc_1080p_get_encode_frame_info(
 			&encoder->enc_frame_info);
 		vidc_sm_get_frame_tags(&ddl->shared_mem
@@ -1935,8 +1948,8 @@
 			slice_output->slice_info[actual_idx].stream_buffer_size;
 		ddl->output_frame =
 			encoder->batch_frame.output_frame[actual_idx];
-		DDL_MSG_LOW(" %s actual_idx = %d"
-		"encoder->batch_frame.num_output_frames = %d\n", __func__,
+		DDL_MSG_LOW("%s: actual_idx = %u "\
+		"encoder->batch_frame.num_output_frames = %u\n", __func__,
 		actual_idx, encoder->batch_frame.num_output_frames);
 		if (encoder->batch_frame.num_output_frames == (actual_idx+1)) {
 			output_frame->flags |= VCD_FRAME_FLAG_ENDOFFRAME;
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_metadata.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_metadata.c
index f037765..2b8eddd 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_metadata.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_metadata.c
@@ -75,6 +75,9 @@
 		case VCD_METADATA_QCOMFILLER:
 			skip_words = 6;
 		break;
+		case VCD_METADATA_LTR_INFO:
+			skip_words = 9;
+		break;
 		}
 	}
 	buffer += skip_words;
@@ -146,19 +149,24 @@
 		hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
 		hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
 		hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_ENC_SLICE;
+
+		hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_LTR_INFO);
+		hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
+		hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
+		hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_LTR_INFO;
 	}
 }
 
 static u32 ddl_supported_metadata_flag(struct ddl_client_context *ddl)
 {
 	u32 flag = 0;
+	enum vcd_codec codec =
+		ddl->codec_data.decoder.codec.codec;
 
 	if (ddl->decoding) {
-		enum vcd_codec codec =
-			ddl->codec_data.decoder.codec.codec;
-
 		flag |= (VCD_METADATA_CONCEALMB | VCD_METADATA_PASSTHROUGH |
-				VCD_METADATA_QPARRAY);
+				VCD_METADATA_QPARRAY |
+				VCD_METADATA_SEPARATE_BUF);
 		if (codec == VCD_CODEC_H264)
 			flag |= (VCD_METADATA_SEI | VCD_METADATA_VUI);
 		else if (codec == VCD_CODEC_VC1 ||
@@ -167,8 +175,12 @@
 		else if (codec == VCD_CODEC_MPEG2)
 			flag |= (VCD_METADATA_USER_DATA |
 				VCD_METADATA_EXT_DATA);
-	} else
-		flag |= VCD_METADATA_ENC_SLICE;
+	} else {
+		if (codec == VCD_CODEC_H264)
+			flag |= VCD_METADATA_ENC_SLICE | VCD_METADATA_LTR_INFO;
+		else
+			flag |= VCD_METADATA_ENC_SLICE;
+	}
 	return flag;
 }
 
@@ -249,6 +261,9 @@
 	DDL_METADATA_ALIGNSIZE(suffix);
 	decoder->suffix = suffix;
 	output_buf_req->sz += suffix;
+	output_buf_req->meta_buffer_size = suffix;
+	output_buf_req->meta_buffer_size =
+		(output_buf_req->meta_buffer_size + 8191) & (~8191);
 	decoder->meta_data_offset = 0;
 	DDL_MSG_LOW("metadata output buf size : %d", suffix);
 }
@@ -272,6 +287,12 @@
 		DDL_METADATA_ALIGNSIZE(size);
 		suffix += size;
 	}
+	if (flag & VCD_METADATA_LTR_INFO) {
+		size = DDL_METADATA_HDR_SIZE;
+		size += DDL_METADATA_LTR_INFO_PAYLOAD_SIZE;
+		DDL_METADATA_ALIGNSIZE(size);
+		suffix += size;
+	}
 	size = DDL_METADATA_EXTRADATANONE_SIZE;
 	DDL_METADATA_ALIGNSIZE(size);
 	suffix += (size);
@@ -464,13 +485,14 @@
 void ddl_vidc_decode_set_metadata_output(struct ddl_decoder_data *decoder)
 {
 	struct ddl_context *ddl_context;
-	u32 loopc, yuv_size;
+	u32 loopc, yuv_size, dpb;
 	u32 *buffer;
-
+	struct ddl_dec_buffers *dec_buffers = &decoder->hw_bufs;
 	if (!decoder->meta_data_enable_flag) {
 		decoder->meta_data_offset = 0;
 		return;
 	}
+	dpb = decoder->dp_buf.no_of_dec_pic_buf;
 	ddl_context = ddl_get_context();
 	yuv_size = ddl_get_yuv_buffer_size(&decoder->client_frame_size,
 		&decoder->buf_format, !decoder->progressive_only,
@@ -478,15 +500,22 @@
 	decoder->meta_data_offset = DDL_ALIGN_SIZE(yuv_size,
 		DDL_LINEAR_BUF_ALIGN_GUARD_BYTES, DDL_LINEAR_BUF_ALIGN_MASK);
 	buffer = (u32 *) decoder->meta_data_input.align_virtual_addr;
-	*buffer++ = decoder->suffix;
 	DDL_MSG_LOW("Metadata offset & size : %d/%d",
 		decoder->meta_data_offset, decoder->suffix);
-	for (loopc = 0; loopc < decoder->dp_buf.no_of_dec_pic_buf;
-		++loopc) {
-		*buffer++ = (u32)(decoder->meta_data_offset + (u8 *)
+	if (!(decoder->meta_data_enable_flag & VCD_METADATA_SEPARATE_BUF)) {
+		*buffer++ = decoder->suffix;
+		for (loopc = 0; loopc < dpb; ++loopc) {
+			*buffer++ = (u32)(decoder->meta_data_offset + (u8 *)
 			DDL_OFFSET(ddl_context->dram_base_a.
 			align_physical_addr, decoder->dp_buf.
 			dec_pic_buffers[loopc].vcd_frm.physical));
+		}
+	} else {
+		*buffer++ = decoder->actual_output_buf_req.meta_buffer_size;
+		for (loopc = 0; loopc < dpb; ++loopc) {
+			*buffer++ = DDL_ADDR_OFFSET(ddl_context->dram_base_a,
+					dec_buffers->meta_hdr[loopc]);
+		}
 	}
 }
 
@@ -497,27 +526,92 @@
 		&(ddl->output_frame.vcd_frm);
 	u32 *qfiller_hdr, *qfiller, start_addr;
 	u32 qfiller_size;
+	u8 *extradata_addr;
+	u32 metadata_available = false;
+
+	out_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA);
 	if (!encoder->meta_data_enable_flag) {
-		out_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA);
-		return;
+		DDL_MSG_HIGH("meta_data is not enabled");
+		goto exit;
 	}
-	if (!encoder->enc_frame_info.meta_data_exists) {
-		out_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA);
-		return;
-	}
-	out_frame->flags |= VCD_FRAME_FLAG_EXTRADATA;
-	DDL_MSG_LOW("processing metadata for encoder");
 	start_addr = (u32) ((u8 *)out_frame->virtual + out_frame->offset);
-	qfiller = (u32 *)((out_frame->data_len +
+	extradata_addr = (u8 *)((out_frame->data_len +
 				start_addr + 3) & ~3);
+	qfiller = (u32 *)extradata_addr;
 	qfiller_size = (u32)((encoder->meta_data_offset +
 		(u8 *) out_frame->virtual) - (u8 *) qfiller);
+	if (qfiller_size & 3) {
+		DDL_MSG_ERROR("qfiller_size is not 4 bytes aligned");
+		goto exit;
+	}
 	qfiller_hdr = ddl_metadata_hdr_entry(ddl, VCD_METADATA_QCOMFILLER);
 	*qfiller++ = qfiller_size;
 	*qfiller++ = qfiller_hdr[DDL_METADATA_HDR_VERSION_INDEX];
 	*qfiller++ = qfiller_hdr[DDL_METADATA_HDR_PORT_INDEX];
 	*qfiller++ = qfiller_hdr[DDL_METADATA_HDR_TYPE_INDEX];
 	*qfiller = (u32)(qfiller_size - DDL_METADATA_HDR_SIZE);
+	extradata_addr += qfiller_size;
+	if ((u32)extradata_addr !=
+		(u32)((u8 *)start_addr + encoder->meta_data_offset)) {
+		DDL_MSG_ERROR("wrong qfiller size");
+		goto exit;
+	}
+	if (encoder->meta_data_enable_flag & VCD_METADATA_ENC_SLICE) {
+		u32 *sliceinfo = (u32 *)extradata_addr;
+		if (sliceinfo[3] != VCD_METADATA_ENC_SLICE) {
+			DDL_MSG_ERROR("wrong slice info extradata. " \
+				"data: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x",
+				sliceinfo[0], sliceinfo[1],	sliceinfo[2],
+				sliceinfo[3], sliceinfo[4],	sliceinfo[5]);
+			goto metadata_end;
+		}
+		extradata_addr += sliceinfo[0];
+		metadata_available = true;
+		DDL_MSG_HIGH("%s: Send %u slices info in metadata",
+			__func__, sliceinfo[5]);
+	}
+	if ((encoder->meta_data_enable_flag & VCD_METADATA_LTR_INFO) &&
+		(encoder->ltr_control.meta_data_reqd == true)) {
+		u32 *ltrinfo_hdr = ddl_metadata_hdr_entry(ddl,
+				VCD_METADATA_LTR_INFO);
+		u32 *ltrinfo = (u32 *)extradata_addr;
+		if ((u32)extradata_addr > (u32)((u8 *)out_frame->virtual +
+			out_frame->alloc_len)) {
+			metadata_available = false;
+			DDL_MSG_ERROR("Error: extradata_addr = 0x%p, "\
+				"buffer_start = 0x%p, size = %u",
+				extradata_addr, out_frame->virtual,
+				out_frame->alloc_len);
+			goto metadata_end;
+		}
+		ltrinfo[0] = DDL_METADATA_LTR_INFO_PAYLOAD_SIZE +
+				DDL_METADATA_HDR_SIZE;
+		ltrinfo[1] = ltrinfo_hdr[DDL_METADATA_HDR_VERSION_INDEX];
+		ltrinfo[2] = ltrinfo_hdr[DDL_METADATA_HDR_PORT_INDEX];
+		ltrinfo[3] = ltrinfo_hdr[DDL_METADATA_HDR_TYPE_INDEX];
+		ltrinfo[4] = DDL_METADATA_LTR_INFO_PAYLOAD_SIZE;
+		ltrinfo[5] = encoder->ltr_control.curr_ltr_id;
+		extradata_addr += ltrinfo[0];
+		encoder->ltr_control.curr_ltr_id++;
+		metadata_available = true;
+		DDL_MSG_HIGH("%s: Send curr_ltr_id = %u in metadata",
+			__func__, (u32)encoder->ltr_control.curr_ltr_id);
+	}
+metadata_end:
+	if (metadata_available == true) {
+		u32 *data_none_hdr = ddl_metadata_hdr_entry(ddl,
+				VCD_METADATA_DATANONE);
+		u32 *data_none = (u32 *)extradata_addr;
+		DDL_MSG_LOW("prepare metadata_none header");
+		data_none[0] = DDL_METADATA_EXTRADATANONE_SIZE;
+		data_none[1] = data_none_hdr[DDL_METADATA_HDR_VERSION_INDEX];
+		data_none[2] = data_none_hdr[DDL_METADATA_HDR_PORT_INDEX];
+		data_none[3] = data_none_hdr[DDL_METADATA_HDR_TYPE_INDEX];
+		data_none[4] = 0;
+		out_frame->flags |= VCD_FRAME_FLAG_EXTRADATA;
+	}
+exit:
+	return;
 }
 
 void ddl_process_decoder_metadata(struct ddl_client_context *ddl)
@@ -545,7 +639,8 @@
 	DDL_MSG_LOW("data_len/metadata_offset : %d/%d",
 		output_frame->data_len, decoder->meta_data_offset);
 	output_frame->flags |= VCD_FRAME_FLAG_EXTRADATA;
-	if (output_frame->data_len != decoder->meta_data_offset) {
+	if (!(decoder->meta_data_enable_flag & VCD_METADATA_SEPARATE_BUF)
+		&& (output_frame->data_len != decoder->meta_data_offset)) {
 		qfiller = (u32 *)((u32)((output_frame->data_len +
 			output_frame->offset  +
 				(u8 *) output_frame->virtual) + 3) & ~3);
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_metadata.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_metadata.h
index 657f95f..5be38d0 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_metadata.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_metadata.h
@@ -26,6 +26,7 @@
 #define DDL_METADATA_SEI_PAYLOAD_SIZE          100
 #define DDL_METADATA_SEI_MAX                     5
 #define DDL_METADATA_VUI_PAYLOAD_SIZE          256
+#define DDL_METADATA_LTR_INFO_PAYLOAD_SIZE     (4)
 #define DDL_METADATA_PASSTHROUGH_PAYLOAD_SIZE   68
 #define DDL_METADATA_EXT_PAYLOAD_SIZE         (640)
 #define DDL_METADATA_USER_PAYLOAD_SIZE        (2048)
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c
index effcaa5..5cf2f9c 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_properties.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -163,6 +163,28 @@
 		DDL_MSG_ERROR("H264BaseLineCABAC!!");
 		return false;
 	}
+	if (DDL_IS_LTR_ENABLED(encoder)) {
+		if ((encoder->codec.codec != VCD_CODEC_H264) ||
+			(encoder->i_period.b_frames)) {
+			DDL_MSG_ERROR("%s: Only support LTR encoding "\
+				"for H264 without B frame. Current "\
+				"codec %d, B-frame %d", __func__,
+				encoder->codec.codec,
+				encoder->i_period.b_frames);
+			return false;
+		}
+		if (encoder->ltr_control.ltrmode.ltr_mode ==
+				VCD_LTR_MODE_MANUAL) {
+			DDL_MSG_ERROR("%s: Manual LTR mode not supported!",
+				__func__);
+			return false;
+		}
+		DDL_MSG_HIGH("%s: LTR: mode = %u, count = %u, period = %u",
+			__func__, (u32)encoder->ltr_control.ltrmode.ltr_mode,
+			encoder->ltr_control.ltr_count,
+			encoder->ltr_control.ltr_period);
+	}
+
 	return true;
 }
 
@@ -319,6 +341,61 @@
 		}
 	}
 	break;
+	case VCD_I_SET_EXT_METABUFFER:
+	{
+		int index, buffer_size;
+		u8 *phys_addr;
+		u8 *virt_addr;
+		struct vcd_property_meta_buffer *meta_buffer =
+			(struct vcd_property_meta_buffer *) property_value;
+		DDL_MSG_LOW("Entered VCD_I_SET_EXT_METABUFFER Virt: %p,"\
+					"Phys %p, fd: %d size: %d count: %d",
+					meta_buffer->kernel_virtual_addr,
+					meta_buffer->physical_addr,
+					meta_buffer->pmem_fd,
+					meta_buffer->size, meta_buffer->count);
+		if ((property_hdr->sz == sizeof(struct
+			vcd_property_meta_buffer)) &&
+			(DDLCLIENT_STATE_IS(ddl,
+			DDL_CLIENT_WAIT_FOR_INITCODEC) ||
+			DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_WAIT_FOR_DPB) ||
+			DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN))) {
+			phys_addr = meta_buffer->dev_addr;
+			virt_addr = meta_buffer->kernel_virtual_addr;
+			buffer_size = meta_buffer->size/meta_buffer->count;
+
+			for (index = 0; index < meta_buffer->count; index++) {
+				ddl->codec_data.decoder.hw_bufs.
+					meta_hdr[index].align_physical_addr
+					= phys_addr;
+				ddl->codec_data.decoder.hw_bufs.
+					meta_hdr[index].align_virtual_addr
+					= virt_addr;
+				ddl->codec_data.decoder.hw_bufs.
+					meta_hdr[index].buffer_size
+					= buffer_size;
+				ddl->codec_data.decoder.hw_bufs.
+					meta_hdr[index].physical_base_addr
+					= phys_addr;
+				ddl->codec_data.decoder.hw_bufs.
+					meta_hdr[index].virtual_base_addr
+					= virt_addr;
+
+				DDL_MSG_LOW("Meta Buffer: "\
+							"Assigned %d buffer for "
+							"virt: %p, phys %p for "
+							"meta_buffers "
+							"of size: %d\n",
+							index, virt_addr,
+							phys_addr, buffer_size);
+
+				phys_addr += buffer_size;
+				virt_addr += buffer_size;
+			}
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
 	case VCD_I_H264_MV_BUFFER:
 	{
 		int index, buffer_size;
@@ -379,6 +456,13 @@
 			vcd_status = VCD_S_SUCCESS;
 		}
 		break;
+	case VCD_I_FREE_EXT_METABUFFER:
+		{
+			memset(&decoder->hw_bufs.meta_hdr, 0, sizeof(struct
+					ddl_buf_addr) * DDL_MAX_BUFFER_COUNT);
+			vcd_status = VCD_S_SUCCESS;
+		}
+		break;
 	case VCD_I_OUTPUT_ORDER:
 		{
 			if (sizeof(u32) == property_hdr->sz &&
@@ -617,8 +701,9 @@
 		struct vcd_property_multi_slice *multi_slice =
 			(struct vcd_property_multi_slice *)
 				property_value;
-		DDL_MSG_HIGH("VCD_I_MULTI_SLICE eMSliceSel %d  nMSliceSize %d"
-				"Tot#of MB %d encoder->frame_size.width = %d"
+		DDL_MSG_HIGH("VCD_I_MULTI_SLICE eMSliceSel %d  "\
+				"nMSliceSize %d Tot#of MB %d "\
+				"encoder->frame_size.width = %d "\
 				"encoder->frame_size.height = %d",
 				(int)multi_slice->m_slice_sel,
 				multi_slice->m_slice_size,
@@ -836,10 +921,8 @@
 			property_hdr->sz &&
 			((buffer_format->buffer_format ==
 			VCD_BUFFER_FORMAT_NV12_16M2KA) ||
-			(buffer_format->buffer_format ==
-			VCD_BUFFER_FORMAT_TILE_4x2) ||
-			(buffer_format->buffer_format ==
-			VCD_BUFFER_FORMAT_NV21_16M2KA))) {
+			(VCD_BUFFER_FORMAT_TILE_4x2 ==
+			buffer_format->buffer_format))) {
 			if (buffer_format->buffer_format !=
 				encoder->buf_format.buffer_format) {
 				encoder->buf_format = *buffer_format;
@@ -1032,20 +1115,62 @@
 				encoder->client_output_buf_req.sz/num_slices;
 				encoder->client_output_buf_req.sz =
 				DDL_ALIGN(output_buf_size, DDL_KILO_BYTE(4));
+				if (encoder->client_output_buf_req. \
+					actual_count < encoder-> \
+					client_output_buf_req.min_count) {
+					encoder->client_output_buf_req. \
+					actual_count = encoder-> \
+					client_output_buf_req.min_count;
+				}
 				encoder->output_buf_req =
 				encoder->client_output_buf_req;
-				DDL_MSG_HIGH("%s num_mb = %u num_slices = %u "
-				"output_buf_count = %u "
-				"output_buf_size = %u aligned size = %u\n",
+				DDL_MSG_HIGH("%s num_mb = %u num_slices = %u" \
+				" min_count = %u act_count = %u" \
+				" aligned size = %u\n",
 				__func__, num_mb, num_slices,
 				encoder->client_output_buf_req.min_count,
-				output_buf_size,
+				encoder->client_output_buf_req.actual_count,
 				encoder->client_output_buf_req.sz);
 				vcd_status = VCD_S_SUCCESS;
 			}
 		}
 		break;
 	}
+	case VCD_I_LTR_MODE:
+		if (sizeof(struct vcd_property_ltrmode_type) ==
+			property_hdr->sz && encoder->codec.codec ==
+			VCD_CODEC_H264) {
+			struct vcd_property_ltrmode_type *ltrmode =
+				(struct vcd_property_ltrmode_type *)
+				property_value;
+			encoder->ltr_control.ltrmode.ltr_mode =
+				ltrmode->ltr_mode;
+			DDL_MSG_HIGH("%s: set LTR mode = %u", __func__,
+				(u32)encoder->ltr_control.ltrmode.ltr_mode);
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_LTR_COUNT:
+		if (sizeof(struct vcd_property_ltrcount_type) ==
+			property_hdr->sz && encoder->codec.codec ==
+			VCD_CODEC_H264) {
+			struct vcd_property_ltrcount_type *ltrcount =
+				(struct vcd_property_ltrcount_type *)
+				property_value;
+			if (ltrcount->ltr_count > DDL_MAX_NUM_LTR_FRAMES) {
+				DDL_MSG_ERROR("%s: set LTR count failed. "\
+					"LTR count %u beyond maximum of %u",
+					__func__, ltrcount->ltr_count,
+					(u32)DDL_MAX_NUM_LTR_FRAMES);
+			} else {
+				encoder->ltr_control.ltr_count =
+					ltrcount->ltr_count;
+				DDL_MSG_HIGH("%s: set LTR count = %u", __func__,
+					encoder->ltr_control.ltr_count);
+				vcd_status = VCD_S_SUCCESS;
+			}
+		}
+	break;
 	case VCD_REQ_PERF_LEVEL:
 		vcd_status = VCD_S_SUCCESS;
 		break;
@@ -1077,8 +1202,24 @@
 		}
 		break;
 	}
+	case VCD_I_H263_PLUSPTYPE:
+	{
+		struct vcd_property_plusptype *plusptype =
+			(struct vcd_property_plusptype *)property_value;
+
+		if ((sizeof(struct vcd_property_plusptype) ==
+			property_hdr->sz) && encoder->codec.codec ==
+			VCD_CODEC_H263) {
+			encoder->plusptype_enable = plusptype->plusptype_enable;
+			DDL_MSG_LOW("\nencoder->plusptype_enable = %u",
+						encoder->plusptype_enable);
+			vcd_status = VCD_S_SUCCESS;
+		}
+		break;
+	}
 	default:
-		DDL_MSG_ERROR("INVALID ID %d\n", (int)property_hdr->prop_id);
+		DDL_MSG_ERROR("%s: unknown prop_id = 0x%x", __func__,
+			property_hdr->prop_id);
 		vcd_status = VCD_ERR_ILLEGAL_OP;
 	break;
 	}
@@ -1539,7 +1680,7 @@
 	break;
 	case VCD_I_METADATA_ENABLE:
 	case VCD_I_METADATA_HEADER:
-		DDL_MSG_ERROR("Meta Data Interface is Requested");
+		DDL_MSG_HIGH("Meta Data Interface is Requested");
 		vcd_status = ddl_get_metadata_params(ddl, property_hdr,
 			property_value);
 		vcd_status = VCD_S_SUCCESS;
@@ -1578,7 +1719,67 @@
 			vcd_status = VCD_S_SUCCESS;
 		}
 		break;
+	case VCD_I_CAPABILITY_LTR_COUNT:
+		if (sizeof(struct vcd_property_range_type) ==
+			property_hdr->sz) {
+			struct vcd_property_range_type capability_ltr_range;
+			capability_ltr_range.max = DDL_MAX_NUM_LTR_FRAMES;
+			capability_ltr_range.min = 1;
+			capability_ltr_range.step_size = 1;
+			*(struct vcd_property_range_type *)property_value =
+				capability_ltr_range;
+			DDL_MSG_HIGH("%s: capability_ltr_count = %u",
+				__func__, ((struct vcd_property_range_type *)
+				property_value)->max);
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_LTR_MODE:
+		if (sizeof(struct vcd_property_ltrmode_type) ==
+			property_hdr->sz) {
+			((struct vcd_property_ltrmode_type *)
+			property_value)->ltr_mode =
+				encoder->ltr_control.ltrmode.ltr_mode;
+			DDL_MSG_HIGH("%s: ltr_mode = %u", __func__,
+				(u32)(((struct vcd_property_ltrmode_type *)
+				property_value)->ltr_mode));
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_LTR_COUNT:
+		if (sizeof(struct vcd_property_ltrcount_type) ==
+			property_hdr->sz) {
+			struct vcd_property_ltrcount_type ltr_count;
+			ltr_count.ltr_count =
+				encoder->ltr_control.ltr_count;
+			*(struct vcd_property_ltrcount_type *)property_value =
+				ltr_count;
+			DDL_MSG_HIGH("%s: ltr_count = %u", __func__,
+				((struct vcd_property_ltrcount_type *)
+				property_value)->ltr_count);
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
+	case VCD_I_LTR_PERIOD:
+		if (sizeof(struct vcd_property_ltrperiod_type) ==
+			property_hdr->sz) {
+			struct vcd_property_ltrperiod_type ltr_period;
+			if (!encoder->ltr_control.ltr_period)
+				ltr_period.ltr_period = 0;
+			else
+				ltr_period.ltr_period =
+					encoder->ltr_control.ltr_period - 1;
+			*(struct vcd_property_ltrperiod_type *)property_value =
+				ltr_period;
+			DDL_MSG_HIGH("%s: ltr_period = %u", __func__,
+				((struct vcd_property_ltrperiod_type *)
+				property_value)->ltr_period);
+			vcd_status = VCD_S_SUCCESS;
+		}
+	break;
 	default:
+		DDL_MSG_ERROR("%s: unknown prop_id = 0x%x", __func__,
+			property_hdr->prop_id);
 		vcd_status = VCD_ERR_ILLEGAL_OP;
 		break;
 	}
@@ -1667,6 +1868,77 @@
 		}
 	}
 	break;
+	case VCD_I_LTR_PERIOD:
+	{
+		if (sizeof(struct vcd_property_ltrperiod_type) ==
+			property_hdr->sz) {
+			struct vcd_property_ltrperiod_type *ltrperiod =
+				(struct vcd_property_ltrperiod_type *)
+				property_value;
+			encoder->ltr_control.ltr_period =
+				(ltrperiod->ltr_period == 0xFFFFFFFF) ?
+				0xFFFFFFFF : (ltrperiod->ltr_period + 1);
+			DDL_MSG_HIGH("%s: set ltr_period = %u", __func__,
+				encoder->ltr_control.ltr_period);
+			vcd_status = VCD_S_SUCCESS;
+		}
+	}
+	break;
+	case VCD_I_LTR_USE:
+	{
+		if (sizeof(struct vcd_property_ltruse_type) ==
+			property_hdr->sz) {
+			struct vcd_property_ltruse_type *ltruse =
+				(struct vcd_property_ltruse_type *)
+				property_value;
+			if (ltruse->ltr_id >= DDL_LTR_FRAME_START_ID) {
+				struct ddl_ltr_encoding_type *ltr_ctrl =
+					&encoder->ltr_control;
+				s32 idx;
+				idx = ddl_find_ltr_from_list(ltr_ctrl,
+					ltruse->ltr_id);
+				if (idx < 0) {
+					ltr_ctrl->callback_reqd = true;
+					ltr_ctrl->failed_use_cmd.ltr_id =
+						ltruse->ltr_id;
+					ltr_ctrl->failed_use_cmd.ltr_frames =
+						ltruse->ltr_frames;
+					DDL_MSG_ERROR("%s: index (%d) "\
+					"not found. Callback requested. "\
+					"ltr_id = %u, ltr_frames = %u",
+					__func__, idx, ltruse->ltr_id,
+					ltruse->ltr_frames);
+				} else {
+					ddl_use_ltr_from_list(ltr_ctrl, idx);
+					ltr_ctrl->ltr_use_frames =
+						ltruse->ltr_frames;
+					if (ltr_ctrl->using == false)
+						ltr_ctrl->\
+						out_frame_cnt_to_use_this_ltr =
+							ltruse->ltr_frames;
+					else
+						ltr_ctrl->\
+						pending_chg_ltr_useframes =
+							true;
+					ltr_ctrl->first_ltr_use_arvd = true;
+					ltr_ctrl->use_ltr_reqd = true;
+					DDL_MSG_HIGH("%s: index (%d) found. "\
+					"num frames to use this ltr_id (%u) "\
+					"is %u", __func__, idx,
+					ltruse->ltr_id, ltruse->ltr_frames);
+				}
+				dynamic_prop_change = DDL_ENC_LTR_USE_FRAME;
+				vcd_status = VCD_S_SUCCESS;
+			} else {
+				DDL_MSG_ERROR("%s: LTRUse ID %d failed. "\
+					"LTR ID starts from %d", __func__,
+					ltruse->ltr_id,
+					(u32)DDL_LTR_FRAME_START_ID);
+				vcd_status = VCD_ERR_ILLEGAL_OP;
+			}
+		}
+	}
+	break;
 	default:
 		vcd_status = VCD_ERR_ILLEGAL_OP;
 		break;
@@ -1734,6 +2006,9 @@
 	encoder->num_references_for_p_frame = DDL_MIN_NUM_REF_FOR_P_FRAME;
 	if (encoder->codec.codec == VCD_CODEC_MPEG4)
 		encoder->closed_gop = true;
+	encoder->intra_period_changed = false;
+	memset(&encoder->ltr_control, 0,
+		sizeof(struct ddl_ltr_encoding_type));
 	ddl_set_default_metadata_flag(ddl);
 	ddl_set_default_encoder_buffer_req(encoder);
 	encoder->slice_delivery_info.enable = 0;
@@ -1877,14 +2152,12 @@
 		encoder->input_buf_req.min_count;
 	encoder->input_buf_req.max_count    = DDL_MAX_BUFFER_COUNT;
 	encoder->input_buf_req.sz = y_cb_cr_size;
-	if ((encoder->buf_format.buffer_format ==
-		VCD_BUFFER_FORMAT_NV12_16M2KA) ||
-		(encoder->buf_format.buffer_format ==
-		VCD_BUFFER_FORMAT_NV21_16M2KA))
+	if (encoder->buf_format.buffer_format ==
+		VCD_BUFFER_FORMAT_NV12_16M2KA)
 		encoder->input_buf_req.align =
 			DDL_LINEAR_BUFFER_ALIGN_BYTES;
-	else if (encoder->buf_format.buffer_format ==
-		VCD_BUFFER_FORMAT_TILE_4x2)
+	else if (VCD_BUFFER_FORMAT_TILE_4x2 ==
+		encoder->buf_format.buffer_format)
 		encoder->input_buf_req.align = DDL_TILE_BUFFER_ALIGN_BYTES;
 	encoder->client_input_buf_req = encoder->input_buf_req;
 	memset(&encoder->output_buf_req , 0 ,
@@ -1911,6 +2184,7 @@
 	struct vcd_buffer_requirement *input_buf_req;
 	struct vcd_buffer_requirement *output_buf_req;
 	u32  min_dpb, y_cb_cr_size;
+	u32  frame_height_actual = 0;
 
 	if (!decoder->codec.codec)
 		return false;
@@ -1934,6 +2208,7 @@
 		if ((decoder->buf_format.buffer_format ==
 			VCD_BUFFER_FORMAT_TILE_4x2) &&
 			(frame_size->height < MDP_MIN_TILE_HEIGHT)) {
+			frame_height_actual = frame_size->height;
 			frame_size->height = MDP_MIN_TILE_HEIGHT;
 			ddl_calculate_stride(frame_size,
 				!decoder->progressive_only);
@@ -1972,6 +2247,10 @@
 	input_buf_req->sz = (1024 * 1024 * 2);
 	input_buf_req->align = DDL_LINEAR_BUFFER_ALIGN_BYTES;
 	decoder->min_input_buf_req = *input_buf_req;
+	if (frame_height_actual) {
+		frame_size->height = frame_height_actual;
+		ddl_calculate_stride(frame_size, !decoder->progressive_only);
+	}
 	return true;
 }
 
@@ -2167,5 +2446,6 @@
 		encoder->frame_rate.fps_denominator = 1;
 		ddl_set_default_enc_property(ddl);
 		encoder->sps_pps.sps_pps_for_idr_enable_flag = false;
+		encoder->plusptype_enable = 0;
 	}
 }
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c
index d70c8c7..f5e81e1 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.c
@@ -87,6 +87,8 @@
 #define VIDC_SM_ENC_EXT_CTRL_TIMING_INFO_EN_SHFT     14
 #define VIDC_SM_ENC_EXT_CTRL_AU_DELIMITER_EN_BMSK    0x00000800
 #define VIDC_SM_ENC_EXT_CTRL_AU_DELIMITER_EN_SHFT    11
+#define VIDC_SM_ENC_EXT_CTRL_LONG_TERM_REF_ENABLE_BMSK      0x00000400
+#define VIDC_SM_ENC_EXT_CTRL_LONG_TERM_REF_ENABLE_SHFT      10
 #define VIDC_SM_ENC_EXT_CTRL_H263_CPCFC_ENABLE_BMSK  0x80
 #define VIDC_SM_ENC_EXT_CTRL_H263_CPCFC_ENABLE_SHFT  7
 #define VIDC_SM_ENC_EXT_CTRL_SPS_PPS_CONTROL_BMSK    0X100
@@ -458,8 +460,8 @@
 	enum VIDC_SM_frame_skip frame_skip_mode,
 	u32 seq_hdr_in_band, u32 vbv_buffer_size, u32 cpcfc_enable,
 	u32 sps_pps_control, u32 closed_gop_enable,
-	u32 au_delim_enable,
-	u32 vui_timing_info_enable)
+	u32 au_delim_enable, u32 vui_timing_info_enable,
+	u32 ltr_enable)
 {
 	u32 enc_ctrl;
 	enc_ctrl = VIDC_SETFIELD((hec_enable) ? 1 : 0,
@@ -488,8 +490,10 @@
 			VIDC_SM_ENC_EXT_CTRL_AU_DELIMITER_EN_BMSK) |
 			VIDC_SETFIELD((vui_timing_info_enable) ? 1 : 0,
 			VIDC_SM_ENC_EXT_CTRL_TIMING_INFO_EN_SHFT,
-			VIDC_SM_ENC_EXT_CTRL_TIMING_INFO_EN_BMSK);
-
+			VIDC_SM_ENC_EXT_CTRL_TIMING_INFO_EN_BMSK) |
+			VIDC_SETFIELD((ltr_enable) ? 1 : 0,
+			VIDC_SM_ENC_EXT_CTRL_LONG_TERM_REF_ENABLE_SHFT,
+			VIDC_SM_ENC_EXT_CTRL_LONG_TERM_REF_ENABLE_BMSK);
 	DDL_MEM_WRITE_32(shared_mem, VIDC_SM_ENC_EXT_CTRL_ADDR, enc_ctrl);
 }
 
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h
index 2a1c772..b7325a8 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_shared_mem.h
@@ -106,7 +106,8 @@
 	struct ddl_buf_addr *shared_mem, u32 hec_enable,
 	enum VIDC_SM_frame_skip  frame_skip_mode, u32 seq_hdr_in_band,
 	u32 vbv_buffer_size, u32 cpcfc_enable, u32 sps_pps_control,
-	u32 closed_gop_enable, u32 au_delim_enable, u32 vui_timing_info_enable);
+	u32 closed_gop_enable, u32 au_delim_enable, u32 vui_timing_info_enable,
+	u32 ltr_enable);
 void vidc_sm_set_encoder_param_change(struct ddl_buf_addr *shared_mem,
 	u32 bit_rate_chg, u32 frame_rate_chg, u32 i_period_chg);
 void vidc_sm_set_encoder_vop_time(struct ddl_buf_addr *shared_mem,
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c
index c10c343..9b30ebe 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -12,8 +12,10 @@
  */
 #include <linux/memory_alloc.h>
 #include <linux/delay.h>
-#include <mach/msm_subsystem_map.h>
+#include <mach/iommu_domains.h>
+#include <mach/subsystem_restart.h>
 #include <mach/peripheral-loader.h>
+
 #include "vcd_ddl_utils.h"
 #include "vcd_ddl.h"
 #include "vcd_res_tracker_api.h"
@@ -25,8 +27,6 @@
 };
 static struct time_data proc_time[MAX_TIME_DATA];
 #define DDL_MSG_TIME(x...) printk(KERN_DEBUG x)
-static unsigned int vidc_mmu_subsystem[] =	{
-		MSM_SUBSYSTEM_VIDEO, MSM_SUBSYSTEM_VIDEO_FWARE};
 
 #ifdef DDL_BUF_LOG
 static void ddl_print_buffer(struct ddl_context *ddl_context,
@@ -39,13 +39,10 @@
 void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
 {
 	u32 alloc_size, offset = 0 ;
-	u32 index = 0;
 	struct ddl_context *ddl_context;
-	struct msm_mapped_buffer *mapped_buffer = NULL;
 	unsigned long iova = 0;
 	unsigned long buffer_size = 0;
 	unsigned long *kernel_vaddr = NULL;
-	unsigned long flags = 0;
 	int ret = 0;
 	ion_phys_addr_t phyaddr = 0;
 	size_t len = 0;
@@ -113,11 +110,6 @@
 				goto unmap_ion_alloc;
 			}
 			addr->alloced_phys_addr = (phys_addr_t) iova;
-
-			msm_ion_do_cache_op(ddl_context->video_ion_client,
-					addr->alloc_handle,
-					addr->virtual_base_addr,
-					sz, ION_IOC_CLEAN_INV_CACHES);
 		}
 		if (!addr->alloced_phys_addr) {
 			DDL_MSG_ERROR("%s():DDL ION client physical failed\n",
@@ -133,51 +125,10 @@
 		addr->align_virtual_addr = addr->virtual_base_addr + offset;
 		addr->buffer_size = alloc_size;
 	} else {
-		addr->alloced_phys_addr = (phys_addr_t)
-		allocate_contiguous_memory_nomap(alloc_size,
-			res_trk_get_mem_type(), SZ_4K);
-		if (!addr->alloced_phys_addr) {
-			DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n",
-					 __func__, alloc_size);
-			goto bail_out;
-		}
-		flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
-		if (alignment == DDL_KILO_BYTE(128))
-				index = 1;
-		else if (alignment > SZ_4K)
-			flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;
-
-		addr->mapped_buffer =
-		msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr,
-			alloc_size, flags, &vidc_mmu_subsystem[index],
-			sizeof(vidc_mmu_subsystem[index])/sizeof(unsigned int));
-		if (IS_ERR(addr->mapped_buffer)) {
-			pr_err(" %s() buffer map failed", __func__);
-			goto free_acm_alloc;
-		}
-		mapped_buffer = addr->mapped_buffer;
-		if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
-			pr_err("%s() map buffers failed\n", __func__);
-			goto free_map_buffers;
-		}
-		addr->physical_base_addr = (u8 *)mapped_buffer->iova[0];
-		addr->virtual_base_addr = mapped_buffer->vaddr;
-		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
-			addr->physical_base_addr, alignment);
-		offset = (u32)(addr->align_physical_addr -
-				addr->physical_base_addr);
-		addr->align_virtual_addr = addr->virtual_base_addr + offset;
-		addr->buffer_size = sz;
+		pr_err("ION must be enabled.");
+		goto bail_out;
 	}
 	return addr->virtual_base_addr;
-free_map_buffers:
-	msm_subsystem_unmap_buffer(addr->mapped_buffer);
-	addr->mapped_buffer = NULL;
-free_acm_alloc:
-		free_contiguous_memory_by_paddr(
-			(unsigned long)addr->alloced_phys_addr);
-		addr->alloced_phys_addr = (phys_addr_t)NULL;
-		return NULL;
 unmap_ion_alloc:
 	ion_unmap_kernel(ddl_context->video_ion_client,
 		addr->alloc_handle);
@@ -212,12 +163,6 @@
 			ion_free(ddl_context->video_ion_client,
 				addr->alloc_handle);
 			}
-	} else {
-		if (addr->mapped_buffer)
-			msm_subsystem_unmap_buffer(addr->mapped_buffer);
-		if (addr->alloced_phys_addr)
-			free_contiguous_memory_by_paddr(
-				(unsigned long)addr->alloced_phys_addr);
 	}
 	memset(addr, 0, sizeof(struct ddl_buf_addr));
 }
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.h b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.h
index 8db13c6..1792510 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_utils.h
@@ -34,17 +34,19 @@
 		printk(KERN_DEBUG x); \
 } while (0)
 
-#ifdef DDL_MSG_LOG
+#if DDL_MSG_LOG
 #define DDL_MSG_LOW(x...)    printk(KERN_INFO x)
 #define DDL_MSG_MED(x...)    printk(KERN_INFO x)
 #define DDL_MSG_HIGH(x...)   printk(KERN_INFO x)
+#define DDL_MSG_ERROR(x...)  printk(KERN_INFO x)
 #else
 #define DDL_MSG_LOW(x...)
 #define DDL_MSG_MED(x...)
 #define DDL_MSG_HIGH(x...)
+#define DDL_MSG_ERROR(x...)
 #endif
 
-#define DDL_MSG_ERROR(x...)  printk(KERN_INFO x)
+
 #define DDL_MSG_FATAL(x...)  printk(KERN_INFO x)
 
 #define DDL_ALIGN_SIZE(sz, guard_bytes, align_mask) \
diff --git a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
index 9779e1a..52ad4aa 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vcd_ddl_vidc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,6 +15,7 @@
 #include "vcd_ddl_metadata.h"
 #include "vcd_ddl_shared_mem.h"
 #include "vcd_core.h"
+#include "vcd_res_tracker_api.h"
 
 #if defined(PIX_CACHE_DISABLE)
 #define DDL_PIX_CACHE_ENABLE  false
@@ -108,7 +109,8 @@
 		dec_pix_cache = VIDC_1080P_DECODE_PCACHE_DISABLE;
 	const enum vidc_1080p_encode_p_cache_enable
 		enc_pix_cache = VIDC_1080P_ENCODE_PCACHE_ENABLE;
-	u32 pix_cache_ctrl, ctxt_mem_offset, ctxt_mem_size;
+	u32 pix_cache_ctrl, ctxt_mem_offset, ctxt_mem_size, arg1 = 0;
+	u8 *hw_ctxt = NULL;
 
 	if (ddl->decoding) {
 		ddl_set_core_start_time(__func__, DEC_OP_TIME);
@@ -116,6 +118,8 @@
 		pix_cache_ctrl = (u32)dec_pix_cache;
 		ctxt_mem_offset = DDL_ADDR_OFFSET(ddl_context->dram_base_a,
 		ddl->codec_data.decoder.hw_bufs.context) >> 11;
+		hw_ctxt =
+		ddl->codec_data.decoder.hw_bufs.context.virtual_base_addr;
 		ctxt_mem_size =
 			ddl->codec_data.decoder.hw_bufs.context.buffer_size;
 	} else {
@@ -123,9 +127,15 @@
 		pix_cache_ctrl = (u32)enc_pix_cache;
 		ctxt_mem_offset = DDL_ADDR_OFFSET(ddl_context->dram_base_a,
 			ddl->codec_data.encoder.hw_bufs.context) >> 11;
+		hw_ctxt =
+		ddl->codec_data.encoder.hw_bufs.context.virtual_base_addr;
 		ctxt_mem_size =
 			ddl->codec_data.encoder.hw_bufs.context.buffer_size;
 	}
+	if (!res_trk_check_for_sec_session() && hw_ctxt) {
+		memset(hw_ctxt, 0, ctxt_mem_size);
+		arg1 = 1 << 29;
+	}
 	switch (*vcd_codec) {
 	default:
 	case VCD_CODEC_MPEG4:
@@ -184,8 +194,9 @@
 	DDL_MSG_LOW("ddl_state_transition: %s ~~> DDL_CLIENT_WAIT_FOR_CHDONE",
 	ddl_get_state_string(ddl->client_state));
 	ddl->client_state = DDL_CLIENT_WAIT_FOR_CHDONE;
+	arg1 |= (u32)codec;
 	vidc_1080p_set_host2risc_cmd(VIDC_1080P_HOST2RISC_CMD_OPEN_CH,
-		(u32)codec, pix_cache_ctrl, ctxt_mem_offset,
+		arg1, pix_cache_ctrl, ctxt_mem_offset,
 		ctxt_mem_size);
 }
 
@@ -320,7 +331,7 @@
 {
 	struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder);
 	u32 frame_rate_change = false, bit_rate_change = false;
-	u32 i_period_change = false, reset_req = false;
+	u32 reset_req = false;
 
 	if (!enable) {
 		if (encoder->dynmic_prop_change_req) {
@@ -333,6 +344,17 @@
 			encoder->dynamic_prop_change &=
 				~(DDL_ENC_REQ_IFRAME);
 		}
+		if (encoder->dynamic_prop_change & DDL_ENC_LTR_USE_FRAME) {
+			if (encoder->ltr_control.callback_reqd) {
+				DDL_MSG_ERROR("%s: LTR use failed", __func__);
+				ddl_encoder_use_ltr_fail_callback(ddl);
+				encoder->ltr_control.callback_reqd = false;
+			} else {
+				encoder->ltr_control.use_ltr_reqd = true;
+			}
+			encoder->dynamic_prop_change &=
+				~(DDL_ENC_LTR_USE_FRAME);
+		}
 		if ((encoder->dynamic_prop_change &
 			DDL_ENC_CHANGE_BITRATE)) {
 			bit_rate_change = true;
@@ -344,7 +366,7 @@
 		}
 		if ((encoder->dynamic_prop_change
 			& DDL_ENC_CHANGE_IPERIOD)) {
-			i_period_change = true;
+			encoder->intra_period_changed = true;
 			vidc_sm_set_encoder_new_i_period(
 				&ddl->shared_mem[ddl->command_channel],
 				encoder->i_period.p_frames);
@@ -376,7 +398,7 @@
 		vidc_sm_set_encoder_param_change(
 			&ddl->shared_mem[ddl->command_channel],
 			bit_rate_change, frame_rate_change,
-			i_period_change);
+			encoder->intra_period_changed);
 	}
 }
 
@@ -569,7 +591,7 @@
 	u32 index, luma[4], chroma[4], hdr_ext_control = false;
 	const u32 recon_bufs = 4;
 	u32 h263_cpfc_enable = false;
-	u32 scaled_frame_rate;
+	u32 scaled_frame_rate, ltr_enable;
 
 	ddl_vidc_encode_set_profile_level(ddl);
 	vidc_1080p_set_encode_frame_size(encoder->frame_size.width,
@@ -588,14 +610,16 @@
 			encoder->frame_rate.fps_denominator;
 	if ((encoder->codec.codec == VCD_CODEC_H263) &&
 		(DDL_FRAMERATE_SCALE(DDL_INITIAL_FRAME_RATE)
-		 != scaled_frame_rate))
+		 != scaled_frame_rate) && encoder->plusptype_enable)
 		h263_cpfc_enable = true;
+	ltr_enable = DDL_IS_LTR_ENABLED(encoder);
+	DDL_MSG_HIGH("ltr_enable = %u", ltr_enable);
 	vidc_sm_set_extended_encoder_control(&ddl->shared_mem
 		[ddl->command_channel], hdr_ext_control,
 		r_cframe_skip, false, 0,
 		h263_cpfc_enable, encoder->sps_pps.sps_pps_for_idr_enable_flag,
 		encoder->closed_gop, encoder->avc_delimiter_enable,
-		encoder->vui_timinginfo_enable);
+		encoder->vui_timinginfo_enable, ltr_enable);
 	if (encoder->vui_timinginfo_enable) {
 		vidc_sm_set_h264_encoder_timing_info(
 			&ddl->shared_mem[ddl->command_channel],
@@ -723,15 +747,8 @@
 	default:
 	break;
 	}
-	if ((encoder->buf_format.buffer_format ==
-			VCD_BUFFER_FORMAT_NV21_16M2KA)) {
-		DDL_MSG_LOW("NV21 Input format is set to the core");
-		vidc_1080p_set_enc_NV21(true);
-	}
-	if ((encoder->buf_format.buffer_format ==
-		VCD_BUFFER_FORMAT_NV12_16M2KA) ||
-		(encoder->buf_format.buffer_format ==
-		VCD_BUFFER_FORMAT_NV21_16M2KA))
+	if (encoder->buf_format.buffer_format ==
+		VCD_BUFFER_FORMAT_NV12_16M2KA)
 		mem_access_method = VIDC_1080P_TILE_LINEAR;
 	else
 		mem_access_method = VIDC_1080P_TILE_64x32;
@@ -805,7 +822,8 @@
 		encoder->dynmic_prop_change_req = true;
 		ddl_vidc_encode_dynamic_property(ddl, true);
 	}
-
+	if (DDL_IS_LTR_ENABLED(encoder))
+		ddl_encoder_ltr_control(ddl);
 	vidc_1080p_set_encode_circular_intra_refresh(
 		encoder->intra_refresh.cir_mb_number);
 	ddl_vidc_encode_set_multi_slice_info(encoder);
@@ -820,13 +838,21 @@
 	ddl_context->dram_base_a.align_physical_addr, stream->physical);
 	enc_param.stream_buffer_size =
 		encoder->client_output_buf_req.sz;
-
 	enc_param.intra_frame = encoder->intra_frame_insertion;
-	if (encoder->intra_frame_insertion)
-		encoder->intra_frame_insertion = false;
 	enc_param.input_flush = false;
 	enc_param.slice_enable = false;
-		vidc_sm_set_encoder_vop_time(
+	enc_param.store_ltr0 = encoder->ltr_control.store_ltr0;
+	enc_param.store_ltr1 = encoder->ltr_control.store_ltr1;
+	enc_param.use_ltr0 = encoder->ltr_control.use_ltr0;
+	enc_param.use_ltr1 = encoder->ltr_control.use_ltr1;
+
+	encoder->intra_frame_insertion = false;
+	encoder->intra_period_changed = false;
+	encoder->ltr_control.store_ltr0 = false;
+	encoder->ltr_control.store_ltr1 = false;
+	encoder->ltr_control.use_ltr0 = false;
+	encoder->ltr_control.use_ltr1 = false;
+	vidc_sm_set_encoder_vop_time(
 			&ddl->shared_mem[ddl->command_channel], true,
 			encoder->vop_timing.vop_time_resolution,
 			ddl->input_frame.frm_delta);
diff --git a/drivers/video/msm/vidc/1080p/ddl/vidc.c b/drivers/video/msm/vidc/1080p/ddl/vidc.c
index 5bfb184..c60175a 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vidc.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vidc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -83,6 +83,17 @@
 #define VIDC_1080P_SI_RG10_ENCODE_SLICE_IF_ENABLE_SHFT      31
 #define VIDC_1080P_MAX_INTRA_PERIOD 0xffff
 
+#define VIDC_1080P_COMMON_CHX_RG6_I_FRAME_BMASK              0x00000001
+#define VIDC_1080P_COMMON_CHX_RG6_I_FRAME_SHIFT              0
+#define VIDC_1080P_COMMON_CHX_RG6_STORE_LTR0_BMASK           0x00000008
+#define VIDC_1080P_COMMON_CHX_RG6_STORE_LTR0_SHIFT           3
+#define VIDC_1080P_COMMON_CHX_RG6_STORE_LTR1_BMASK           0x00000010
+#define VIDC_1080P_COMMON_CHX_RG6_STORE_LTR1_SHIFT           4
+#define VIDC_1080P_COMMON_CHX_RG6_USE_LTR0_BMASK             0x00000020
+#define VIDC_1080P_COMMON_CHX_RG6_USE_LTR0_SHIFT             5
+#define VIDC_1080P_COMMON_CHX_RG6_USE_LTR1_BMASK             0x00000040
+#define VIDC_1080P_COMMON_CHX_RG6_USE_LTR1_SHIFT             6
+
 u8 *VIDC_BASE_PTR;
 
 void vidc_1080p_do_sw_reset(enum vidc_1080p_reset init_flag)
@@ -787,7 +798,9 @@
 void vidc_1080p_encode_frame_start_ch0(
 	struct vidc_1080p_enc_frame_start_param *param)
 {
-	u32 input_flush;
+	u32 input_flush = 0;
+	u32 frame_insertion = 0;
+
 	VIDC_HWIO_OUT(REG_695082, VIDC_1080P_RISC2HOST_CMD_EMPTY);
 	VIDC_HWIO_OUT(REG_666957, VIDC_1080P_INIT_CH_INST_ID);
 	VIDC_HWIO_OUT(REG_117192,
@@ -798,7 +811,22 @@
 		VIDC_1080P_BASE_OFFSET_SHIFT);
 	VIDC_HWIO_OUT(REG_175608, param->current_c_addr_offset >>
 		VIDC_1080P_BASE_OFFSET_SHIFT);
-	VIDC_HWIO_OUT(REG_190381, param->intra_frame);
+	frame_insertion = VIDC_SETFIELD(param->intra_frame,
+			VIDC_1080P_COMMON_CHX_RG6_I_FRAME_SHIFT,
+			VIDC_1080P_COMMON_CHX_RG6_I_FRAME_BMASK);
+	frame_insertion |= VIDC_SETFIELD(param->store_ltr0,
+			VIDC_1080P_COMMON_CHX_RG6_STORE_LTR0_SHIFT,
+			VIDC_1080P_COMMON_CHX_RG6_STORE_LTR0_BMASK);
+	frame_insertion |= VIDC_SETFIELD(param->store_ltr1,
+			VIDC_1080P_COMMON_CHX_RG6_STORE_LTR1_SHIFT,
+			VIDC_1080P_COMMON_CHX_RG6_STORE_LTR1_BMASK);
+	frame_insertion |= VIDC_SETFIELD(param->use_ltr0,
+			VIDC_1080P_COMMON_CHX_RG6_USE_LTR0_SHIFT,
+			VIDC_1080P_COMMON_CHX_RG6_USE_LTR0_BMASK);
+	frame_insertion |= VIDC_SETFIELD(param->use_ltr1,
+			VIDC_1080P_COMMON_CHX_RG6_USE_LTR1_SHIFT,
+			VIDC_1080P_COMMON_CHX_RG6_USE_LTR1_BMASK);
+	VIDC_HWIO_OUT(REG_190381, frame_insertion);
 	VIDC_HWIO_OUT(REG_889944, param->shared_mem_addr_offset);
 	input_flush = VIDC_SETFIELD(param->input_flush,
 			VIDC_1080P_SI_RG10_ENCODE_INPUT_BUFFER_FLUSH_SHFT,
@@ -816,7 +844,9 @@
 void vidc_1080p_encode_frame_start_ch1(
 	struct vidc_1080p_enc_frame_start_param *param)
 {
-	u32 input_flush;
+	u32 input_flush = 0;
+	u32 frame_insertion = 0;
+
 	VIDC_HWIO_OUT(REG_695082, VIDC_1080P_RISC2HOST_CMD_EMPTY);
 	VIDC_HWIO_OUT(REG_313350, VIDC_1080P_INIT_CH_INST_ID);
 	VIDC_HWIO_OUT(REG_980194,
@@ -827,7 +857,22 @@
 		VIDC_1080P_BASE_OFFSET_SHIFT);
 	VIDC_HWIO_OUT(REG_548308,  param->current_c_addr_offset >>
 		VIDC_1080P_BASE_OFFSET_SHIFT);
-	VIDC_HWIO_OUT(REG_887095, param->intra_frame);
+	frame_insertion = VIDC_SETFIELD(param->intra_frame,
+			VIDC_1080P_COMMON_CHX_RG6_I_FRAME_SHIFT,
+			VIDC_1080P_COMMON_CHX_RG6_I_FRAME_BMASK);
+	frame_insertion |= VIDC_SETFIELD(param->store_ltr0,
+			VIDC_1080P_COMMON_CHX_RG6_STORE_LTR0_SHIFT,
+			VIDC_1080P_COMMON_CHX_RG6_STORE_LTR0_BMASK);
+	frame_insertion |= VIDC_SETFIELD(param->store_ltr1,
+			VIDC_1080P_COMMON_CHX_RG6_STORE_LTR1_SHIFT,
+			VIDC_1080P_COMMON_CHX_RG6_STORE_LTR1_BMASK);
+	frame_insertion |= VIDC_SETFIELD(param->use_ltr0,
+			VIDC_1080P_COMMON_CHX_RG6_USE_LTR0_SHIFT,
+			VIDC_1080P_COMMON_CHX_RG6_USE_LTR0_BMASK);
+	frame_insertion |= VIDC_SETFIELD(param->use_ltr1,
+			VIDC_1080P_COMMON_CHX_RG6_USE_LTR1_SHIFT,
+			VIDC_1080P_COMMON_CHX_RG6_USE_LTR1_BMASK);
+	VIDC_HWIO_OUT(REG_887095, frame_insertion);
 	VIDC_HWIO_OUT(REG_652528, param->shared_mem_addr_offset);
 	input_flush = VIDC_SETFIELD(param->input_flush,
 			VIDC_1080P_SI_RG10_ENCODE_INPUT_BUFFER_FLUSH_SHFT,
@@ -1110,8 +1155,3 @@
 	VIDC_HWIO_OUT(REG_666957,
 		VIDC_1080P_DEC_TYPE_FRAME_START_REALLOC | instance_id);
 }
-
-void vidc_1080p_set_enc_NV21(u32 enc_nv21)
-{
-	VIDC_HWIO_OUT(REG_515664, enc_nv21);
-}
diff --git a/drivers/video/msm/vidc/1080p/ddl/vidc.h b/drivers/video/msm/vidc/1080p/ddl/vidc.h
index aa19766..0d83dd9 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vidc.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vidc.h
@@ -103,6 +103,7 @@
 #define VIDC_1080P_ERROR_SPS_PARSE_ERROR         129
 #define VIDC_1080P_ERROR_PPS_PARSE_ERROR         130
 #define VIDC_1080P_ERROR_SLICE_PARSE_ERROR       131
+#define VIDC_1080P_ERROR_NON_IDR_FRAME_TYPE      132
 #define VIDC_1080P_ERROR_SYNC_POINT_NOT_RECEIVED  171
 
 #define VIDC_1080P_WARN_COMMAND_FLUSHED                  145
@@ -399,6 +400,10 @@
 	u32 intra_frame;
 	u32 input_flush;
 	u32 slice_enable;
+	u32 store_ltr0;
+	u32 store_ltr1;
+	u32 use_ltr0;
+	u32 use_ltr1;
 	enum vidc_1080p_encode encode;
 };
 struct vidc_1080p_enc_frame_info{
@@ -585,6 +590,4 @@
 	u32 *intermediate_stage_counter);
 void vidc_1080p_get_exception_status(u32 *exception_status);
 void vidc_1080p_frame_start_realloc(u32 instance_id);
-void vidc_1080p_set_enc_NV21(u32 enc_nv21);
-
 #endif
diff --git a/drivers/video/msm/vidc/1080p/ddl/vidc_hwio_reg.h b/drivers/video/msm/vidc/1080p/ddl/vidc_hwio_reg.h
index 92e911f..0de06bf 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vidc_hwio_reg.h
+++ b/drivers/video/msm/vidc/1080p/ddl/vidc_hwio_reg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010,2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -3603,22 +3603,6 @@
 #define HWIO_REG_515564_MSLICE_BIT_BMSK  0xffffffff
 #define HWIO_REG_515564_MSLICE_BIT_SHFT  0
 
-#define HWIO_REG_515664_ADDR  (VIDC_BLACKBIRD_REG_BASE + 0x0000c548)
-#define HWIO_REG_515664_PHYS  (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000c548)
-#define HWIO_REG_515664_RMSK  0xffffffff
-#define HWIO_REG_515664_SHFT  0
-#define HWIO_REG_515664_IN    in_dword_masked(\
-	HWIO_REG_515664_ADDR, HWIO_REG_515664_RMSK)
-#define HWIO_REG_515664_INM(m)  in_dword_masked(\
-	HWIO_REG_515664_ADDR, m)
-#define HWIO_REG_515664_OUT(v)  out_dword(HWIO_REG_515664_ADDR, v)
-#define HWIO_REG_515664_OUTM(m, v)  out_dword_masked_ns(\
-	HWIO_REG_515664_ADDR, m, v, HWIO_REG_515664_IN);
-#define HWIO_REG_515664_NV21_SEL_BMSK   0xffffffff
-#define HWIO_REG_515664_NV21_SEL_SHFT   0
-
-
-
 #define HWIO_REG_886210_ADDR (VIDC_BLACKBIRD_REG_BASE + 0x0000c518)
 #define HWIO_REG_886210_PHYS (VIDC_BLACKBIRD_REG_BASE_PHYS + 0x0000c518)
 #define HWIO_REG_886210_RMSK  0xffff
diff --git a/drivers/video/msm/vidc/1080p/ddl/vidc_pix_cache.c b/drivers/video/msm/vidc/1080p/ddl/vidc_pix_cache.c
index 5206182..de294fd 100644
--- a/drivers/video/msm/vidc/1080p/ddl/vidc_pix_cache.c
+++ b/drivers/video/msm/vidc/1080p/ddl/vidc_pix_cache.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2013, Linux Foundation. All rights reserved.
+/* Copyright (c) 2010, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -214,7 +214,7 @@
 	VIDC_HWIO_IN(REG_261029, &dmi_cfg_reg);
 	dmi_cfg_reg &= (~HWIO_REG_261029_DMI_RAM_SEL_BMSK);
 	dmi_cfg_reg |= VIDC_SETFIELD(ram_select,
-			HWIO_REG_261029_DMI_RAM_SEL_SHFT,
+			HWIO_REG_261029_AUTO_INC_EN_SHFT,
 			HWIO_REG_261029_DMI_RAM_SEL_BMSK);
 	VIDC_HWIO_OUT(REG_261029, dmi_cfg_reg);
 }
diff --git a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
index 60c5249..f5d7947 100644
--- a/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
+++ b/drivers/video/msm/vidc/1080p/resource_tracker/vcd_res_tracker.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,7 @@
 #include <linux/pm_runtime.h>
 #include <mach/clk.h>
 #include <mach/msm_memtypes.h>
+#include <mach/iommu_domains.h>
 #include <linux/interrupt.h>
 #include <linux/memory_alloc.h>
 #include <asm/sizes.h>
@@ -30,8 +31,6 @@
 static unsigned int vidc_clk_table[5] = {
 	48000000, 133330000, 200000000, 228570000, 266670000,
 };
-static unsigned int restrk_mmu_subsystem[] =	{
-		MSM_SUBSYSTEM_VIDEO, MSM_SUBSYSTEM_VIDEO_FWARE};
 static struct res_trk_context resource_context;
 
 #define VIDC_FW	"vidc_1080p.fw"
@@ -56,10 +55,8 @@
 static void *res_trk_pmem_map
 	(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
 {
-	u32 offset = 0, flags = 0;
-	u32 index = 0;
+	u32 offset = 0;
 	struct ddl_context *ddl_context;
-	struct msm_mapped_buffer *mapped_buffer = NULL;
 	int ret = 0;
 	unsigned long iova = 0;
 	unsigned long buffer_size  = 0;
@@ -100,53 +97,11 @@
 		addr->align_virtual_addr = addr->virtual_base_addr + offset;
 		addr->buffer_size = buffer_size;
 	} else {
-		if (!res_trk_check_for_sec_session()) {
-			if (!addr->alloced_phys_addr) {
-				pr_err(" %s() alloced addres NULL", __func__);
-				goto bail_out;
-			}
-			flags = MSM_SUBSYSTEM_MAP_IOVA |
-				MSM_SUBSYSTEM_MAP_KADDR;
-			if (alignment == DDL_KILO_BYTE(128))
-					index = 1;
-			else if (alignment > SZ_4K)
-				flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;
-			addr->mapped_buffer =
-			msm_subsystem_map_buffer(
-			(unsigned long)addr->alloced_phys_addr,
-			sz, flags, &restrk_mmu_subsystem[index],
-			sizeof(restrk_mmu_subsystem[index])/
-				sizeof(unsigned int));
-			if (IS_ERR(addr->mapped_buffer)) {
-				pr_err(" %s() buffer map failed", __func__);
-				goto bail_out;
-			}
-			mapped_buffer = addr->mapped_buffer;
-			if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
-				pr_err("%s() map buffers failed\n", __func__);
-				goto bail_out;
-			}
-			addr->physical_base_addr =
-				 (u8 *)mapped_buffer->iova[0];
-			addr->virtual_base_addr =
-					mapped_buffer->vaddr;
-		} else {
-			addr->physical_base_addr =
-				(u8 *) addr->alloced_phys_addr;
-			addr->virtual_base_addr =
-				(u8 *)addr->alloced_phys_addr;
-		}
-		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
-		addr->physical_base_addr, alignment);
-		offset = (u32)(addr->align_physical_addr -
-				addr->physical_base_addr);
-		addr->align_virtual_addr = addr->virtual_base_addr + offset;
-		addr->buffer_size = sz;
+		pr_err("ION must be enabled.");
+		goto bail_out;
 	}
 	return addr->virtual_base_addr;
 bail_out:
-	if (IS_ERR(addr->mapped_buffer))
-		msm_subsystem_unmap_buffer(addr->mapped_buffer);
 	return NULL;
 ion_unmap_bail_out:
 	if (!IS_ERR_OR_NULL(addr->alloc_handle)) {
@@ -160,19 +115,17 @@
 static void res_trk_pmem_free(struct ddl_buf_addr *addr)
 {
 	struct ddl_context *ddl_context;
+
+	if (!addr)
+		return;
+
 	ddl_context = ddl_get_context();
 	if (ddl_context->video_ion_client) {
-		if (addr && addr->alloc_handle) {
+		if (addr->alloc_handle) {
 			ion_free(ddl_context->video_ion_client,
 			 addr->alloc_handle);
 			addr->alloc_handle = NULL;
 		}
-	} else {
-		if (addr->mapped_buffer)
-			msm_subsystem_unmap_buffer(addr->mapped_buffer);
-		if (addr->alloced_phys_addr)
-			free_contiguous_memory_by_paddr(
-			(unsigned long)addr->alloced_phys_addr);
 	}
 	memset(addr, 0 , sizeof(struct ddl_buf_addr));
 }
@@ -259,8 +212,7 @@
 			addr->virtual_base_addr = NULL;
 			addr->physical_base_addr = NULL;
 		}
-	} else if (addr->mapped_buffer)
-		msm_subsystem_unmap_buffer(addr->mapped_buffer);
+	}
 	addr->mapped_buffer = NULL;
 }
 
@@ -453,6 +405,10 @@
 static struct ion_client *res_trk_create_ion_client(void){
 	struct ion_client *video_client;
 	video_client = msm_ion_client_create(-1, "video_client");
+	if (IS_ERR_OR_NULL(video_client)) {
+		VCDRES_MSG_ERROR("%s: Unable to create ION client\n", __func__);
+		video_client = NULL;
+	}
 	return video_client;
 }
 
@@ -844,9 +800,9 @@
 	if (resource_context.vidc_platform_data->enable_ion) {
 		if (res_trk_check_for_sec_session()) {
 			if (resource_context.res_mem_type != DDL_FW_MEM)
-				flags |= ION_SECURE;
+				flags |= ION_FLAG_SECURE;
 			else if (res_trk_is_cp_enabled())
-				flags |= ION_SECURE;
+				flags |= ION_FLAG_SECURE;
 		}
 	}
 	return flags;
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl.h b/drivers/video/msm/vidc/720p/ddl/vcd_ddl.h
index 7a79a40..383a91b 100644
--- a/drivers/video/msm/vidc/720p/ddl/vcd_ddl.h
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl.h
@@ -12,7 +12,6 @@
  */
 #ifndef _VCD_DDL_H_
 #define _VCD_DDL_H_
-#include <mach/msm_subsystem_map.h>
 #include "vcd_ddl_api.h"
 #include "vcd_ddl_utils.h"
 #include "vcd_ddl_firmware.h"
diff --git a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_utils.c b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_utils.c
index f5f5d7f..edc8112 100644
--- a/drivers/video/msm/vidc/720p/ddl/vcd_ddl_utils.c
+++ b/drivers/video/msm/vidc/720p/ddl/vcd_ddl_utils.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -87,11 +87,9 @@
 void ddl_pmem_alloc(struct ddl_buf_addr *buff_addr, size_t sz, u32 align)
 {
 	u32 guard_bytes, align_mask;
-	u32 physical_addr;
 	u32 align_offset;
-	u32 alloc_size, flags = 0;
+	u32 alloc_size;
 	struct ddl_context *ddl_context;
-	struct msm_mapped_buffer *mapped_buffer = NULL;
 	unsigned long *kernel_vaddr = NULL;
 	ion_phys_addr_t phyaddr = 0;
 	size_t len = 0;
@@ -125,9 +123,8 @@
 					alloc_size,
 					SZ_4K,
 					buff_addr->mem_type, 0);
-		if (!buff_addr->alloc_handle) {
-			ERR("\n%s(): DDL ION alloc failed\n",
-					__func__);
+		if (IS_ERR_OR_NULL(buff_addr->alloc_handle)) {
+			ERR("\n%s(): DDL ION alloc failed\n", __func__);
 			goto bailout;
 		}
 		ret = ion_phys(ddl_context->video_ion_client,
@@ -156,36 +153,8 @@
 			(u32)buff_addr->virtual_base_addr,
 			alloc_size, align, len);
 	} else {
-		physical_addr = (u32)
-			allocate_contiguous_memory_nomap(alloc_size,
-						ddl_context->memtype, SZ_4K);
-		if (!physical_addr) {
-			ERR("\n%s(): DDL pmem allocate failed\n",
-			       __func__);
-			goto bailout;
-		}
-		buff_addr->physical_base_addr = (u32 *) physical_addr;
-		flags = MSM_SUBSYSTEM_MAP_KADDR;
-		buff_addr->mapped_buffer =
-		msm_subsystem_map_buffer((unsigned long)physical_addr,
-		alloc_size, flags, NULL, 0);
-		if (IS_ERR(buff_addr->mapped_buffer)) {
-			ERR("\n%s() buffer map failed\n", __func__);
-			goto free_pmem_buffer;
-		}
-		mapped_buffer = buff_addr->mapped_buffer;
-		if (!mapped_buffer->vaddr) {
-			ERR("\n%s() mapped virtual address is NULL\n",
-				__func__);
-			goto unmap_pmem_buffer;
-		}
-		buff_addr->virtual_base_addr = mapped_buffer->vaddr;
-		DBG("ddl_pmem_alloc: mem_type(0x%x), phys(0x%x),"\
-			" virt(0x%x), sz(%u), align(%u)",
-			(u32)buff_addr->mem_type,
-			(u32)buff_addr->physical_base_addr,
-			(u32)buff_addr->virtual_base_addr,
-			alloc_size, SZ_4K);
+		pr_err("ION must be enabled.");
+		goto bailout;
 	}
 
 	memset(buff_addr->virtual_base_addr, 0 , sz + guard_bytes);
@@ -206,16 +175,6 @@
 		(u32)buff_addr->align_virtual_addr);
 	return;
 
-unmap_pmem_buffer:
-	if (buff_addr->mapped_buffer)
-		msm_subsystem_unmap_buffer(buff_addr->mapped_buffer);
-free_pmem_buffer:
-	if (buff_addr->physical_base_addr)
-		free_contiguous_memory_by_paddr((unsigned long)
-			buff_addr->physical_base_addr);
-	memset(buff_addr, 0, sizeof(struct ddl_buf_addr));
-	return;
-
 unmap_ion_buffer:
 	if (ddl_context->video_ion_client) {
 		if (buff_addr->alloc_handle)
@@ -254,13 +213,6 @@
 			ion_free(ddl_context->video_ion_client,
 				buff_addr->alloc_handle);
 		}
-	} else {
-		if (buff_addr->mapped_buffer)
-			msm_subsystem_unmap_buffer(
-				buff_addr->mapped_buffer);
-		if (buff_addr->physical_base_addr)
-			free_contiguous_memory_by_paddr((unsigned long)
-				buff_addr->physical_base_addr);
 	}
 	memset(buff_addr, 0, sizeof(struct ddl_buf_addr));
 }
diff --git a/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.c b/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.c
index 9b184e8..a67dc1c 100644
--- a/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.c
+++ b/drivers/video/msm/vidc/720p/resource_tracker/vcd_res_tracker.c
@@ -681,6 +681,10 @@
 	struct ion_client *video_client;
 	VCDRES_MSG_LOW("%s", __func__);
 	video_client = msm_ion_client_create(-1, "video_client");
+	if (IS_ERR_OR_NULL(video_client)) {
+		VCDRES_MSG_ERROR("%s: Unable to create ION client\n", __func__);
+		video_client = NULL;
+	}
 	return video_client;
 }
 
diff --git a/drivers/video/msm/vidc/common/dec/vdec.c b/drivers/video/msm/vidc/common/dec/vdec.c
index 6938011..ec27b00 100644
--- a/drivers/video/msm/vidc/common/dec/vdec.c
+++ b/drivers/video/msm/vidc/common/dec/vdec.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -25,13 +25,13 @@
 #include <linux/uaccess.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
-#include <linux/android_pmem.h>
+
 #include <linux/clk.h>
 #include <linux/timer.h>
-#include <mach/msm_subsystem_map.h>
 #include <media/msm/vidc_type.h>
 #include <media/msm/vcd_api.h>
 #include <media/msm/vidc_init.h>
+#include <mach/iommu_domains.h>
 #include "vcd_res_tracker_api.h"
 #include "vdec_internal.h"
 
@@ -48,8 +48,6 @@
 static dev_t vid_dec_dev_num;
 static struct class *vid_dec_class;
 
-static unsigned int vidc_mmu_subsystem[] = {
-	MSM_SUBSYSTEM_VIDEO};
 static s32 vid_dec_get_empty_client_index(void)
 {
 	u32 i, found = false;
@@ -271,6 +269,38 @@
 				      &phy_addr, &pmem_fd, &file,
 				      &buffer_index) ||
 		(vcd_frame_data->flags & VCD_FRAME_FLAG_EOS)) {
+
+		if (res_trk_check_for_sec_session() &&
+				event == VCD_EVT_RESP_OUTPUT_DONE) {
+			DBG("Buffer Index = %d", buffer_index);
+			if (buffer_index != -1) {
+				if (client_ctx->meta_addr_table[buffer_index].
+					kernel_vir_addr_iommu &&
+					client_ctx->
+					meta_addr_table[buffer_index].
+					kernel_vir_addr) {
+
+					memcpy(client_ctx->
+						meta_addr_table[buffer_index].
+						kernel_vir_addr_iommu,
+						client_ctx->
+						meta_addr_table[buffer_index].
+						kernel_vir_addr,
+						client_ctx->meta_buf_size);
+					DBG("Copying Meta Buffer from "\
+						"secure memory"
+						"kernel_virt_iommu = %p "
+						"kernel_virt = %p",
+						client_ctx->
+						meta_addr_table[buffer_index].
+						kernel_vir_addr_iommu,
+						client_ctx->
+						meta_addr_table[buffer_index].
+						kernel_vir_addr);
+				}
+			}
+		}
+
 		/* Buffer address in user space */
 		vdec_msg->vdec_msg_info.msgdata.output_frame.bufferaddr =
 		    (u8 *) user_vaddr;
@@ -838,16 +868,221 @@
 		return false;
 	return true;
 }
+static u32 vid_dec_set_meta_buffers(struct video_client_ctx *client_ctx,
+					struct vdec_meta_buffers *meta_buffers)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_meta_buffer *vcd_meta_buffer = NULL;
+	u32 vcd_status = VCD_ERR_FAIL;
+	u32 len = 0, len_iommu = 0, buf_size = 0;
+	int rc = 0;
+	unsigned long ionflag = 0, ionflag_iommu = 0;
+	unsigned long buffer_size = 0, buffer_size_iommu = 0;
+	unsigned long iova = 0, iova_iommu = 0;
+	int index = -1, num_buffers = 0;
+	u8 *ker_vir_addr = NULL, *ker_vir_addr_iommu = NULL;
 
+	if (!client_ctx || !meta_buffers)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_SET_EXT_METABUFFER;
+	vcd_property_hdr.sz = sizeof(struct vcd_property_meta_buffer);
+	vcd_meta_buffer = &client_ctx->vcd_meta_buffer;
+
+	memset(&client_ctx->vcd_meta_buffer, 0,
+		   sizeof(struct vcd_property_meta_buffer));
+	vcd_meta_buffer->size = meta_buffers->size;
+	vcd_meta_buffer->count = meta_buffers->count;
+	vcd_meta_buffer->pmem_fd = meta_buffers->pmem_fd;
+	vcd_meta_buffer->offset = meta_buffers->offset;
+	vcd_meta_buffer->pmem_fd_iommu = meta_buffers->pmem_fd_iommu;
+
+	if (!vcd_get_ion_status()) {
+		pr_err("PMEM Not available\n");
+		return false;
+	} else {
+		client_ctx->meta_buffer_ion_handle = ion_import_dma_buf(
+					client_ctx->user_ion_client,
+					vcd_meta_buffer->pmem_fd);
+		if (IS_ERR_OR_NULL(client_ctx->meta_buffer_ion_handle)) {
+			ERR("%s(): get_ION_handle failed\n", __func__);
+			goto import_ion_error;
+		}
+		rc = ion_handle_get_flags(client_ctx->user_ion_client,
+					client_ctx->meta_buffer_ion_handle,
+					&ionflag);
+		if (rc) {
+			ERR("%s():get_ION_flags fail\n",
+					 __func__);
+			goto import_ion_error;
+		}
+		vcd_meta_buffer->kernel_virtual_addr =
+			(u8 *) ion_map_kernel(
+			client_ctx->user_ion_client,
+			client_ctx->meta_buffer_ion_handle);
+		if (!vcd_meta_buffer->kernel_virtual_addr) {
+			ERR("%s(): get_ION_kernel virtual addr failed\n",
+				 __func__);
+			goto import_ion_error;
+		}
+		if (res_trk_check_for_sec_session() ||
+		   (res_trk_get_core_type() == (u32)VCD_CORE_720P)) {
+			rc = ion_phys(client_ctx->user_ion_client,
+				client_ctx->meta_buffer_ion_handle,
+				(unsigned long *) (&(vcd_meta_buffer->
+				physical_addr)), &len);
+			if (rc) {
+				ERR("%s():get_ION_kernel physical addr fail\n",
+					__func__);
+				goto ion_map_error;
+			}
+			vcd_meta_buffer->client_data = NULL;
+			vcd_meta_buffer->dev_addr = (u8 *)
+				vcd_meta_buffer->physical_addr;
+		} else {
+			rc = ion_map_iommu(client_ctx->user_ion_client,
+				client_ctx->meta_buffer_ion_handle,
+				VIDEO_DOMAIN, VIDEO_MAIN_POOL,
+				SZ_4K, 0, (unsigned long *)&iova,
+				(unsigned long *)&buffer_size,
+				0, 0);
+			if (rc || !iova) {
+				ERR("%s():get_ION_kernel physical addr fail,"\
+					" rc = %d iova = 0x%lx\n",
+					__func__, rc, iova);
+				goto ion_map_error;
+			}
+			vcd_meta_buffer->physical_addr = (u8 *) iova;
+			vcd_meta_buffer->client_data = NULL;
+			vcd_meta_buffer->dev_addr = (u8 *) iova;
+		}
+
+		client_ctx->meta_buffer_iommu_ion_handle = ion_import_dma_buf(
+					client_ctx->user_ion_client,
+					vcd_meta_buffer->pmem_fd_iommu);
+		if (IS_ERR_OR_NULL(client_ctx->meta_buffer_iommu_ion_handle)) {
+			ERR("%s(): get_ION_handle failed\n", __func__);
+			goto import_ion_error;
+		}
+		rc = ion_handle_get_flags(client_ctx->user_ion_client,
+					client_ctx->
+					meta_buffer_iommu_ion_handle,
+					&ionflag_iommu);
+		if (rc) {
+			ERR("%s():get_ION_flags fail\n",
+					 __func__);
+			goto import_ion_error;
+		}
+		vcd_meta_buffer->kernel_virt_addr_iommu =
+			(u8 *) ion_map_kernel(
+			client_ctx->user_ion_client,
+			client_ctx->meta_buffer_iommu_ion_handle);
+		if (!vcd_meta_buffer->kernel_virt_addr_iommu) {
+			ERR("%s(): get_ION_kernel virtual addr failed\n",
+				 __func__);
+			goto import_ion_error;
+		}
+		if (res_trk_get_core_type() == (u32)VCD_CORE_720P) {
+			rc = ion_phys(client_ctx->user_ion_client,
+				client_ctx->meta_buffer_iommu_ion_handle,
+				(unsigned long *) (&(vcd_meta_buffer->
+				physical_addr_iommu)), &len_iommu);
+			if (rc) {
+				ERR("%s():get_ION_kernel physical addr fail\n",
+					__func__);
+				goto ion_map_error_iommu;
+			}
+			vcd_meta_buffer->client_data_iommu = NULL;
+			vcd_meta_buffer->dev_addr_iommu = (u8 *)
+				vcd_meta_buffer->physical_addr_iommu;
+		} else {
+			rc = ion_map_iommu(client_ctx->user_ion_client,
+				client_ctx->meta_buffer_iommu_ion_handle,
+				VIDEO_DOMAIN, VIDEO_MAIN_POOL,
+				SZ_4K, 0, (unsigned long *)&iova_iommu,
+				(unsigned long *)&buffer_size_iommu,
+				0, 0);
+			if (rc || !iova_iommu) {
+				ERR("%s():get_ION_kernel physical addr fail, "\
+					"rc = %d iova = 0x%lx\n",
+					__func__, rc, iova);
+				goto ion_map_error_iommu;
+			}
+			vcd_meta_buffer->physical_addr_iommu =
+						(u8 *) iova_iommu;
+			vcd_meta_buffer->client_data_iommu = NULL;
+			vcd_meta_buffer->dev_addr_iommu = (u8 *) iova_iommu;
+		}
+	}
+
+	/*fill the meta addr table*/
+	num_buffers = vcd_meta_buffer->count;
+	buf_size = vcd_meta_buffer->size/num_buffers;
+	ker_vir_addr = vcd_meta_buffer->kernel_virtual_addr;
+	ker_vir_addr_iommu = vcd_meta_buffer->kernel_virt_addr_iommu;
+	client_ctx->meta_buf_size = buf_size;
+	for (index = 0; index < num_buffers; index++) {
+		client_ctx->meta_addr_table[index].kernel_vir_addr =
+			ker_vir_addr;
+		client_ctx->meta_addr_table[index].kernel_vir_addr_iommu =
+			ker_vir_addr_iommu;
+		DBG("[%d] kernel_virtual = %p kernel_vir_iommu = %p",
+			index, ker_vir_addr, ker_vir_addr_iommu);
+		ker_vir_addr += buf_size;
+		ker_vir_addr_iommu += buf_size;
+	}
+
+	DBG("Meta Buffer: Virt: %p, Phys %p, fd: %d",
+			vcd_meta_buffer->kernel_virtual_addr,
+			vcd_meta_buffer->physical_addr,
+			vcd_meta_buffer->pmem_fd);
+	DBG("IOMMU Meta Buffer: Virt: %p, Phys %p, fd: %d",
+			vcd_meta_buffer->kernel_virt_addr_iommu,
+			vcd_meta_buffer->physical_addr_iommu,
+			vcd_meta_buffer->pmem_fd_iommu);
+	DBG("Meta_buffer: Dev addr %p", vcd_meta_buffer->dev_addr);
+	DBG("IOMMU Meta_buffer: Dev addr %p",
+			vcd_meta_buffer->dev_addr_iommu);
+	vcd_status = vcd_set_property(client_ctx->vcd_handle,
+					  &vcd_property_hdr,
+					  vcd_meta_buffer);
+
+	if (vcd_status)
+		return false;
+	else
+		return true;
+ion_map_error_iommu:
+	if (vcd_meta_buffer->kernel_virt_addr_iommu) {
+		ion_unmap_kernel(client_ctx->user_ion_client,
+				client_ctx->meta_buffer_iommu_ion_handle);
+		vcd_meta_buffer->kernel_virt_addr_iommu = NULL;
+	}
+	if (!IS_ERR_OR_NULL(client_ctx->meta_buffer_iommu_ion_handle)) {
+		ion_free(client_ctx->user_ion_client,
+			client_ctx->meta_buffer_iommu_ion_handle);
+		 client_ctx->meta_buffer_iommu_ion_handle = NULL;
+	}
+ion_map_error:
+	if (vcd_meta_buffer->kernel_virtual_addr) {
+		ion_unmap_kernel(client_ctx->user_ion_client,
+				client_ctx->meta_buffer_ion_handle);
+		vcd_meta_buffer->kernel_virtual_addr = NULL;
+	}
+	if (!IS_ERR_OR_NULL(client_ctx->meta_buffer_ion_handle)) {
+		ion_free(client_ctx->user_ion_client,
+			client_ctx->meta_buffer_ion_handle);
+		 client_ctx->meta_buffer_ion_handle = NULL;
+	}
+import_ion_error:
+	return false;
+}
 static u32 vid_dec_set_h264_mv_buffers(struct video_client_ctx *client_ctx,
 					struct vdec_h264_mv *mv_data)
 {
 	struct vcd_property_hdr vcd_property_hdr;
 	struct vcd_property_h264_mv_buffer *vcd_h264_mv_buffer = NULL;
-	struct msm_mapped_buffer *mapped_buffer = NULL;
 	u32 vcd_status = VCD_ERR_FAIL;
-	u32 len = 0, flags = 0;
-	struct file *file;
+	u32 len = 0;
 	int rc = 0;
 	unsigned long ionflag = 0;
 	unsigned long buffer_size = 0;
@@ -868,28 +1103,8 @@
 	vcd_h264_mv_buffer->offset = mv_data->offset;
 
 	if (!vcd_get_ion_status()) {
-		if (get_pmem_file(vcd_h264_mv_buffer->pmem_fd,
-			(unsigned long *) (&(vcd_h264_mv_buffer->
-			physical_addr)),
-			(unsigned long *) (&vcd_h264_mv_buffer->
-						kernel_virtual_addr),
-			(unsigned long *) (&len), &file)) {
-			ERR("%s(): get_pmem_file failed\n", __func__);
-			return false;
-		}
-		put_pmem_file(file);
-		flags = MSM_SUBSYSTEM_MAP_IOVA;
-		mapped_buffer = msm_subsystem_map_buffer(
-			(unsigned long)vcd_h264_mv_buffer->physical_addr, len,
-				flags, vidc_mmu_subsystem,
-				sizeof(vidc_mmu_subsystem)/
-				sizeof(unsigned int));
-		if (IS_ERR(mapped_buffer)) {
-			pr_err("buffer map failed");
-			return false;
-		}
-		vcd_h264_mv_buffer->client_data = (void *) mapped_buffer;
-		vcd_h264_mv_buffer->dev_addr = (u8 *)mapped_buffer->iova[0];
+		pr_err("PMEM not available\n");
+		return false;
 	} else {
 		client_ctx->h264_mv_ion_handle = ion_import_dma_buf(
 					client_ctx->user_ion_client,
@@ -1018,6 +1233,58 @@
 		return true;
 }
 
+static u32 vid_dec_free_meta_buffers(struct video_client_ctx *client_ctx)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_buffer_size meta_buffer_size;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_FREE_EXT_METABUFFER;
+	vcd_property_hdr.sz = sizeof(struct vcd_property_buffer_size);
+
+	vcd_status = vcd_set_property(client_ctx->vcd_handle,
+				      &vcd_property_hdr, &meta_buffer_size);
+
+	if (!IS_ERR_OR_NULL(client_ctx->meta_buffer_ion_handle)) {
+		ion_unmap_kernel(client_ctx->user_ion_client,
+					client_ctx->meta_buffer_ion_handle);
+		if (!res_trk_check_for_sec_session() &&
+		   (res_trk_get_core_type() != (u32)VCD_CORE_720P)) {
+			ion_unmap_iommu(client_ctx->user_ion_client,
+				client_ctx->meta_buffer_ion_handle,
+				VIDEO_DOMAIN,
+				VIDEO_MAIN_POOL);
+		}
+		ion_free(client_ctx->user_ion_client,
+					client_ctx->meta_buffer_ion_handle);
+		client_ctx->meta_buffer_ion_handle = NULL;
+	}
+
+	if (!IS_ERR_OR_NULL(client_ctx->meta_buffer_iommu_ion_handle)) {
+		ion_unmap_kernel(client_ctx->user_ion_client,
+			client_ctx->meta_buffer_iommu_ion_handle);
+		if (res_trk_check_for_sec_session() &&
+		   (res_trk_get_core_type() != (u32)VCD_CORE_720P)) {
+			ion_unmap_iommu(client_ctx->user_ion_client,
+				client_ctx->meta_buffer_iommu_ion_handle,
+				VIDEO_DOMAIN,
+				VIDEO_MAIN_POOL);
+		}
+		ion_free(client_ctx->user_ion_client,
+				client_ctx->meta_buffer_iommu_ion_handle);
+		client_ctx->meta_buffer_iommu_ion_handle = NULL;
+	}
+
+	if (vcd_status)
+		return false;
+	else
+		return true;
+}
+
+
 static u32 vid_dec_free_h264_mv_buffers(struct video_client_ctx *client_ctx)
 {
 	struct vcd_property_hdr vcd_property_hdr;
@@ -1026,9 +1293,6 @@
 
 	if (!client_ctx)
 		return false;
-	if (client_ctx->vcd_h264_mv_buffer.client_data)
-		msm_subsystem_unmap_buffer((struct msm_mapped_buffer *)
-		client_ctx->vcd_h264_mv_buffer.client_data);
 
 	vcd_property_hdr.prop_id = VCD_I_FREE_H264_MV_BUFFER;
 	vcd_property_hdr.sz = sizeof(struct vcd_property_buffer_size);
@@ -1085,6 +1349,7 @@
 		vdec_buf_req->buffer_size = vcd_buf_req.sz;
 		vdec_buf_req->alignment = vcd_buf_req.align;
 		vdec_buf_req->buf_poolid = vcd_buf_req.buf_pool_id;
+		vdec_buf_req->meta_buffer_size = vcd_buf_req.meta_buffer_size;
 
 		return true;
 	}
@@ -1470,7 +1735,6 @@
 	u32 vcd_status;
 	unsigned long kernel_vaddr, phy_addr, len;
 	unsigned long ker_vaddr;
-	struct file *pmem_file;
 	u32 result = true;
 	void __user *arg = (void __user *)u_arg;
 	int rc = 0;
@@ -1785,12 +2049,8 @@
 		}
 
 		if (!vcd_get_ion_status()) {
-			if (get_pmem_file(seq_header.pmem_fd,
-				  &phy_addr, &kernel_vaddr, &len, &pmem_file)) {
-				ERR("%s(): get_pmem_file failed\n", __func__);
-				return false;
-			}
-			put_pmem_file(pmem_file);
+			pr_err("PMEM Not available\n");
+			return -EINVAL;
 		} else {
 			client_ctx->seq_hdr_ion_handle = ion_import_dma_buf(
 				client_ctx->user_ion_client,
@@ -1977,6 +2237,29 @@
 			return -EIO;
 		break;
 	}
+	case VDEC_IOCTL_SET_META_BUFFERS:
+	{
+		struct vdec_meta_buffers meta_buffers;
+		DBG("VDEC_IOCTL_SET_META_BUFFERS\n");
+		if (copy_from_user(&vdec_msg, arg, sizeof(vdec_msg)))
+			return -EFAULT;
+		if (copy_from_user(&meta_buffers, vdec_msg.in,
+						   sizeof(meta_buffers)))
+			return -EFAULT;
+		result = vid_dec_set_meta_buffers(client_ctx, &meta_buffers);
+
+		if (!result)
+			return -EIO;
+		break;
+	}
+	case VDEC_IOCTL_FREE_META_BUFFERS:
+	{
+		DBG("VDEC_IOCTL_FREE_META_BUFFERS\n");
+		result = vid_dec_free_meta_buffers(client_ctx);
+		if (!result)
+			return -EIO;
+		break;
+	}
 	case VDEC_IOCTL_SET_H264_MV_BUFFER:
 	{
 		struct vdec_h264_mv mv_data;
@@ -2114,7 +2397,7 @@
 	}
 
 	client_index = vid_dec_get_empty_client_index();
-	if (client_index == -1) {
+	if (client_index < 0) {
 		ERR("%s() : No free clients client_index == -1\n", __func__);
 		rc = -ENOMEM;
 		goto client_failure;
@@ -2160,7 +2443,7 @@
 
 static int vid_dec_open_secure(struct inode *inode, struct file *file)
 {
-	int rc = 0, close_client = 0;
+	int rc = 0;
 	struct video_client_ctx *client_ctx;
 	mutex_lock(&vid_dec_device_p->lock);
 	rc = vid_dec_open_client(&client_ctx, VCD_CP_SESSION);
@@ -2174,9 +2457,6 @@
 	file->private_data = client_ctx;
 	if (res_trk_open_secure_session()) {
 		ERR("Secure session operation failure\n");
-		close_client = 1;
-		client_ctx->stop_called = 1;
-		client_ctx->stop_sync_cb = 1;
 		rc = -EACCES;
 		goto error;
 	}
@@ -2184,8 +2464,6 @@
 	return 0;
 error:
 	mutex_unlock(&vid_dec_device_p->lock);
-	if (close_client)
-		vid_dec_close_client(client_ctx);
 	return rc;
 }
 
diff --git a/drivers/video/msm/vidc/common/enc/venc.c b/drivers/video/msm/vidc/common/enc/venc.c
index 74d8221..823626a 100644
--- a/drivers/video/msm/vidc/common/enc/venc.c
+++ b/drivers/video/msm/vidc/common/enc/venc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -25,7 +25,7 @@
 #include <linux/uaccess.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
-#include <linux/android_pmem.h>
+
 #include <linux/clk.h>
 #include <media/msm/vidc_type.h>
 #include <media/msm/vcd_api.h>
@@ -218,15 +218,15 @@
 
 	switch (event) {
 	case VCD_EVT_RESP_OUTPUT_DONE:
-	   DBG("Send INPUT_DON message to client = %p\n",
+	   DBG("Send OUTPUT_DON message to client = %p\n",
 			client_ctx);
 	   break;
 	case VCD_EVT_RESP_OUTPUT_FLUSHED:
-	   DBG("Send INPUT_FLUSHED message to client = %p\n",
+	   DBG("Send OUTPUT_FLUSHED message to client = %p\n",
 		   client_ctx);
 	   break;
 	default:
-	   ERR("QVD: vid_enc_output_frame_done invalid cmd type: %d\n", event);
+	   ERR("vid_enc_output_frame_done: invalid cmd type: %d\n", event);
 	   venc_msg->venc_msg_info.statuscode = VEN_S_EFATAL;
 	   break;
 	}
@@ -336,6 +336,12 @@
 		venc_msg->venc_msg_info.msgcode =
 			VEN_MSG_PAUSE;
 		break;
+	case VCD_EVT_IND_INFO_LTRUSE_FAILED:
+		INFO("\n msm_vidc_enc: Sending VEN_MSG_LTRUSE_FAILED"\
+			 " to client");
+		venc_msg->venc_msg_info.msgcode =
+			VEN_MSG_LTRUSE_FAILED;
+		break;
 
 	default:
 		ERR("%s() : unknown event type %u\n",
@@ -394,6 +400,7 @@
 	case VCD_EVT_IND_OUTPUT_RECONFIG:
 	case VCD_EVT_IND_HWERRFATAL:
 	case VCD_EVT_IND_RESOURCES_LOST:
+	case VCD_EVT_IND_INFO_LTRUSE_FAILED:
 		vid_enc_lean_event(client_ctx, event, status);
 		break;
 
@@ -550,7 +557,7 @@
 
 	client_index = vid_enc_get_empty_client_index();
 
-	if (client_index == -1) {
+	if (client_index < 0) {
 		ERR("%s() : No free clients client_index == -1\n",
 			__func__);
 		rc = -ENODEV;
@@ -872,17 +879,18 @@
 		struct venc_buffer enc_buffer;
 		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
 			return -EFAULT;
-		DBG("VEN_IOCTL_CMD_ENCODE_FRAME"
-			"/VEN_IOCTL_CMD_FILL_OUTPUT_BUFFER\n");
 		if (copy_from_user(&enc_buffer, venc_msg.in,
 						   sizeof(enc_buffer)))
 			return -EFAULT;
-		if (cmd == VEN_IOCTL_CMD_ENCODE_FRAME)
+		if (cmd == VEN_IOCTL_CMD_ENCODE_FRAME) {
+			DBG("VEN_IOCTL_CMD_ENCODE_FRAME\n");
 			result = vid_enc_encode_frame(client_ctx,
 					&enc_buffer);
-		else
+		} else {
+			DBG("VEN_IOCTL_CMD_FILL_OUTPUT_BUFFER\n");
 			result = vid_enc_fill_output_buffer(client_ctx,
 					&enc_buffer);
+		}
 		if (!result) {
 			DBG("\n VEN_IOCTL_CMD_ENCODE_FRAME/"
 				"VEN_IOCTL_CMD_FILL_OUTPUT_BUFFER failed");
@@ -948,19 +956,19 @@
 		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
 			return -EFAULT;
 
-		DBG("VEN_IOCTL_SET_INPUT_BUFFER_REQ"
-			"/VEN_IOCTL_SET_OUTPUT_BUFFER_REQ\n");
-
 		if (copy_from_user(&allocatorproperty, venc_msg.in,
 			sizeof(allocatorproperty)))
 			return -EFAULT;
 
-		if (cmd == VEN_IOCTL_SET_OUTPUT_BUFFER_REQ)
-				result = vid_enc_set_buffer_req(client_ctx,
-						&allocatorproperty, false);
-		else
+		if (cmd == VEN_IOCTL_SET_OUTPUT_BUFFER_REQ) {
+			DBG("VEN_IOCTL_SET_OUTPUT_BUFFER_REQ\n");
+			result = vid_enc_set_buffer_req(client_ctx,
+					&allocatorproperty, false);
+		} else {
+			DBG("VEN_IOCTL_SET_INPUT_BUFFER_REQ\n");
 			result = vid_enc_set_buffer_req(client_ctx,
 					&allocatorproperty, true);
+		}
 		if (!result) {
 			DBG("setting VEN_IOCTL_SET_OUTPUT_BUFFER_REQ/"
 			"VEN_IOCTL_SET_INPUT_BUFFER_REQ failed\n");
@@ -975,15 +983,15 @@
 		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
 			return -EFAULT;
 
-		DBG("VEN_IOCTL_GET_INPUT_BUFFER_REQ/"
-			"VEN_IOCTL_GET_OUTPUT_BUFFER_REQ\n");
-
-		if (cmd == VEN_IOCTL_GET_OUTPUT_BUFFER_REQ)
+		if (cmd == VEN_IOCTL_GET_OUTPUT_BUFFER_REQ) {
+			DBG("VEN_IOCTL_GET_OUTPUT_BUFFER_REQ\n");
 			result = vid_enc_get_buffer_req(client_ctx,
 					&allocatorproperty, false);
-		else
+		} else {
+			DBG("VEN_IOCTL_GET_INPUT_BUFFER_REQ\n");
 			result = vid_enc_get_buffer_req(client_ctx,
 					&allocatorproperty, true);
+		}
 		if (!result)
 			return -EIO;
 		if (copy_to_user(venc_msg.out, &allocatorproperty,
@@ -1425,6 +1433,7 @@
 	}
 	case VEN_IOCTL_CMD_REQUEST_IFRAME:
 	{
+		DBG("VEN_IOCTL_CMD_REQUEST_IFRAME\n");
 		result = vid_enc_request_iframe(client_ctx);
 		if (!result) {
 			ERR("setting VEN_IOCTL_CMD_REQUEST_IFRAME failed\n");
@@ -1648,6 +1657,7 @@
 		struct vcd_property_hdr vcd_property_hdr;
 		struct vcd_property_live live_mode;
 
+		DBG("VEN_IOCTL_SET_METABUFFER_MODE\n");
 		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
 			return -EFAULT;
 		if (copy_from_user(&metabuffer_mode, venc_msg.in,
@@ -1697,6 +1707,7 @@
 		struct vcd_property_hdr vcd_property_hdr;
 		u32 vcd_status = VCD_ERR_FAIL;
 		u32 enable = true;
+		DBG("VEN_IOCTL_SET_SLICE_DELIVERY_MODE\n");
 		vcd_property_hdr.prop_id = VCD_I_SLICE_DELIVERY_MODE;
 		vcd_property_hdr.sz = sizeof(u32);
 		vcd_status = vcd_set_property(client_ctx->vcd_handle,
@@ -1707,6 +1718,166 @@
 		}
 		break;
 	}
+	case VEN_IOCTL_SET_H263_PLUSPTYPE:
+	{
+		struct vcd_property_hdr vcd_property_hdr;
+		struct venc_plusptype plusptype;
+		u32 enable;
+		u32 vcd_status = VCD_ERR_FAIL;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		if (copy_from_user(&plusptype, venc_msg.in,
+			sizeof(plusptype)))
+			return -EFAULT;
+		vcd_property_hdr.prop_id = VCD_I_H263_PLUSPTYPE;
+		vcd_property_hdr.sz = sizeof(u32);
+		enable = plusptype.plusptype_enable;
+		DBG("VEN_IOCTL_SET PLUSPTYPE = %d\n", enable);
+		vcd_status = vcd_set_property(client_ctx->vcd_handle,
+						&vcd_property_hdr, &enable);
+		if (vcd_status) {
+			pr_err(" Setting plusptype failed");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_LTRMODE:
+	case VEN_IOCTL_GET_LTRMODE:
+	{
+		struct venc_ltrmode encoder_ltrmode;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		if (cmd == VEN_IOCTL_SET_LTRMODE) {
+			DBG("VEN_IOCTL_SET_LTRMODE\n");
+			if (copy_from_user(&encoder_ltrmode, venc_msg.in,
+					sizeof(encoder_ltrmode)))
+				return -EFAULT;
+			result = vid_enc_set_get_ltrmode(client_ctx,
+					&encoder_ltrmode, true);
+		} else {
+			DBG("VEN_IOCTL_GET_LTRMODE\n");
+			result = vid_enc_set_get_ltrmode(client_ctx,
+					&encoder_ltrmode, false);
+			if (result) {
+				if (copy_to_user(venc_msg.out, &encoder_ltrmode,
+						sizeof(encoder_ltrmode)))
+					return -EFAULT;
+			}
+		}
+		if (!result) {
+			ERR("VEN_IOCTL_(G)SET_LTRMODE failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_LTRCOUNT:
+	case VEN_IOCTL_GET_LTRCOUNT:
+	{
+		struct venc_ltrcount encoder_ltrcount;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		if (cmd == VEN_IOCTL_SET_LTRCOUNT) {
+			DBG("VEN_IOCTL_SET_LTRCOUNT\n");
+			if (copy_from_user(&encoder_ltrcount, venc_msg.in,
+					sizeof(encoder_ltrcount)))
+				return -EFAULT;
+			result = vid_enc_set_get_ltrcount(client_ctx,
+					&encoder_ltrcount, true);
+		} else {
+			DBG("VEN_IOCTL_GET_LTRCOUNT\n");
+			result = vid_enc_set_get_ltrcount(client_ctx,
+					&encoder_ltrcount, false);
+			if (result) {
+				if (copy_to_user(venc_msg.out,
+					&encoder_ltrcount,
+					sizeof(encoder_ltrcount)))
+					return -EFAULT;
+			}
+		}
+		if (!result) {
+			ERR("VEN_IOCTL_(G)SET_LTRCOUNT failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_LTRPERIOD:
+	case VEN_IOCTL_GET_LTRPERIOD:
+	{
+		struct venc_ltrperiod encoder_ltrperiod;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		if (cmd == VEN_IOCTL_SET_LTRPERIOD) {
+			DBG("VEN_IOCTL_SET_LTRPERIOD\n");
+			if (copy_from_user(&encoder_ltrperiod, venc_msg.in,
+					sizeof(encoder_ltrperiod)))
+				return -EFAULT;
+			result = vid_enc_set_get_ltrperiod(client_ctx,
+					&encoder_ltrperiod, true);
+		} else {
+			DBG("VEN_IOCTL_GET_LTRPERIOD\n");
+			result = vid_enc_set_get_ltrperiod(client_ctx,
+					&encoder_ltrperiod, false);
+			if (result) {
+				if (copy_to_user(venc_msg.out,
+					&encoder_ltrperiod,
+					sizeof(encoder_ltrperiod)))
+					return -EFAULT;
+			}
+		}
+		if (!result) {
+			ERR("VEN_IOCTL_(G)SET_LTRPERIOD failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_GET_CAPABILITY_LTRCOUNT:
+	{
+		struct venc_range venc_capltrcount;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		DBG("VEN_IOCTL_GET_CAPABILITY_LTRCOUNT\n");
+		result = vid_enc_get_capability_ltrcount(client_ctx,
+					&venc_capltrcount);
+		if (result) {
+			if (copy_to_user(venc_msg.out, &venc_capltrcount,
+					sizeof(venc_capltrcount)))
+				return -EFAULT;
+		} else {
+			ERR("VEN_IOCTL_GET_CAPABILITY_LTRCOUNT failed\n");
+			return -EIO;
+		}
+		break;
+	}
+	case VEN_IOCTL_SET_LTRUSE:
+	case VEN_IOCTL_GET_LTRUSE:
+	{
+		struct venc_ltruse encoder_ltruse;
+		if (copy_from_user(&venc_msg, arg, sizeof(venc_msg)))
+			return -EFAULT;
+		if (cmd == VEN_IOCTL_SET_LTRUSE) {
+			DBG("VEN_IOCTL_SET_LTRUSE\n");
+			if (copy_from_user(&encoder_ltruse, venc_msg.in,
+					sizeof(encoder_ltruse)))
+				return -EFAULT;
+			result = vid_enc_set_get_ltruse(client_ctx,
+						&encoder_ltruse, true);
+		} else {
+			DBG("VEN_IOCTL_GET_LTRUSE\n");
+			result = vid_enc_set_get_ltruse(client_ctx,
+						&encoder_ltruse, false);
+			if (result) {
+				if (copy_to_user(venc_msg.out,
+					&encoder_ltruse,
+					sizeof(encoder_ltruse)))
+					return -EFAULT;
+			}
+		}
+		if (!result) {
+			ERR("VEN_IOCTL_(G)SET_LTRUSE failed\n");
+			return -EIO;
+		}
+		break;
+	}
 	case VEN_IOCTL_SET_SPS_PPS_FOR_IDR:
 	{
 		struct vcd_property_hdr vcd_property_hdr;
diff --git a/drivers/video/msm/vidc/common/enc/venc_internal.c b/drivers/video/msm/vidc/common/enc/venc_internal.c
index 72ff8f0..06b690d 100644
--- a/drivers/video/msm/vidc/common/enc/venc_internal.c
+++ b/drivers/video/msm/vidc/common/enc/venc_internal.c
@@ -25,12 +25,12 @@
 #include <linux/uaccess.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
-#include <linux/android_pmem.h>
+
 #include <linux/clk.h>
-#include <mach/msm_subsystem_map.h>
 #include <media/msm/vidc_type.h>
 #include <media/msm/vcd_api.h>
 #include <media/msm/vidc_init.h>
+#include <mach/iommu_domains.h>
 #include "vcd_res_tracker_api.h"
 #include "venc_internal.h"
 
@@ -41,9 +41,6 @@
 #endif
 
 #define ERR(x...) printk(KERN_ERR x)
-static unsigned int vidc_mmu_subsystem[] = {
-	MSM_SUBSYSTEM_VIDEO};
-
 
 u32 vid_enc_set_get_base_cfg(struct video_client_ctx *client_ctx,
 		struct venc_basecfg *base_config, u32 set_flag)
@@ -123,10 +120,6 @@
 			format.buffer_format =
 				VCD_BUFFER_FORMAT_NV12_16M2KA;
 			break;
-		case VEN_INPUTFMT_NV21_16M2KA:
-			format.buffer_format =
-				VCD_BUFFER_FORMAT_NV21_16M2KA;
-			break;
 		default:
 			status = false;
 			break;
@@ -156,9 +149,6 @@
 			case VCD_BUFFER_FORMAT_TILE_4x2:
 				*input_format = VEN_INPUTFMT_NV21;
 				break;
-			case VCD_BUFFER_FORMAT_NV21_16M2KA:
-				*input_format = VEN_INPUTFMT_NV21_16M2KA;
-				break;
 			default:
 				status = false;
 				break;
@@ -335,6 +325,42 @@
 	vcd_property_hdr.prop_id = VCD_I_METADATA_ENABLE;
 	vcd_property_hdr.sz = sizeof(struct vcd_property_meta_data_enable);
 	if (set_flag) {
+		DBG("vcd_set_property: VCD_I_METADATA_ENABLE = %x\n",
+				(u32)*extradata_flag);
+		vcd_meta_data.meta_data_enable_flag = *extradata_flag;
+		vcd_status = vcd_set_property(client_ctx->vcd_handle,
+					&vcd_property_hdr, &vcd_meta_data);
+		if (vcd_status) {
+			ERR("%s(): Set VCD_I_METADATA_ENABLE Failed\n",
+				__func__);
+			return false;
+		}
+	} else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+					&vcd_property_hdr, &vcd_meta_data);
+		if (vcd_status) {
+			ERR("%s(): Get VCD_I_METADATA_ENABLE Failed\n",
+				__func__);
+			return false;
+		}
+		*extradata_flag = vcd_meta_data.meta_data_enable_flag;
+		DBG("vcd_get_property: VCD_I_METADATA_ENABLE = 0x%x\n",
+				(u32)*extradata_flag);
+	}
+	return true;
+}
+
+u32 vid_enc_set_get_extradata_cfg(struct video_client_ctx *client_ctx,
+		u32 *extradata_flag, u32 set_flag)
+{
+	struct vcd_property_hdr vcd_property_hdr;
+	struct vcd_property_meta_data_enable vcd_meta_data;
+	u32 vcd_status = VCD_ERR_FAIL;
+	if (!client_ctx || !extradata_flag)
+		return false;
+	vcd_property_hdr.prop_id = VCD_I_METADATA_ENABLE;
+	vcd_property_hdr.sz = sizeof(struct vcd_property_meta_data_enable);
+	if (set_flag) {
 		DBG("vcd_set_property: VCD_I_METADATA_ENABLE = %d\n",
 				*extradata_flag);
 		vcd_meta_data.meta_data_enable_flag = *extradata_flag;
@@ -1772,11 +1798,9 @@
 		struct venc_recon_addr *venc_recon)
 {
 	u32 vcd_status = VCD_ERR_FAIL;
-	u32 len, i, flags = 0;
-	struct file *file;
+	u32 len, i;
 	struct vcd_property_hdr vcd_property_hdr;
 	struct vcd_property_enc_recon_buffer *control = NULL;
-	struct msm_mapped_buffer *mapped_buffer = NULL;
 	int rc = -1;
 	unsigned long ionflag = 0;
 	unsigned long iova = 0;
@@ -1808,25 +1832,8 @@
 	control->user_virtual_addr = venc_recon->pbuffer;
 
 	if (!vcd_get_ion_status()) {
-		if (get_pmem_file(control->pmem_fd, (unsigned long *)
-			(&(control->physical_addr)), (unsigned long *)
-			(&control->kernel_virtual_addr),
-			(unsigned long *) (&len), &file)) {
-				ERR("%s(): get_pmem_file failed\n", __func__);
-				return false;
-			}
-			put_pmem_file(file);
-			flags = MSM_SUBSYSTEM_MAP_IOVA;
-			mapped_buffer = msm_subsystem_map_buffer(
-			(unsigned long)control->physical_addr, len,
-			flags, vidc_mmu_subsystem,
-			sizeof(vidc_mmu_subsystem)/sizeof(unsigned int));
-			if (IS_ERR(mapped_buffer)) {
-				pr_err("buffer map failed");
-				return false;
-			}
-			control->client_data = (void *) mapped_buffer;
-			control->dev_addr = (u8 *)mapped_buffer->iova[0];
+		pr_err("PMEM not available\n");
+		return false;
 	} else {
 		client_ctx->recon_buffer_ion_handle[i] = ion_import_dma_buf(
 				client_ctx->user_ion_client, control->pmem_fd);
@@ -1941,9 +1948,6 @@
 			venc_recon->pbuffer);
 		return false;
 	}
-	if (control->client_data)
-		msm_subsystem_unmap_buffer((struct msm_mapped_buffer *)
-		control->client_data);
 
 	vcd_property_hdr.prop_id = VCD_I_FREE_RECON_BUFFERS;
 	vcd_property_hdr.sz = sizeof(struct vcd_property_buffer_size);
@@ -2002,3 +2006,215 @@
 			return false;
 		}
 }
+
+u32 vid_enc_set_get_ltrmode(struct video_client_ctx *client_ctx,
+		struct venc_ltrmode *venc_ltrmode, u32 set_flag)
+{
+	struct vcd_property_ltrmode_type vcd_property_ltrmode;
+	struct vcd_property_hdr vcd_property_hdr;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx || !venc_ltrmode)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_LTR_MODE;
+	vcd_property_hdr.sz =
+		sizeof(struct vcd_property_ltrmode_type);
+
+	if (set_flag) {
+		vcd_property_ltrmode.ltr_mode = (enum vcd_property_ltrmode)
+			venc_ltrmode->ltr_mode;
+		DBG("%s: Set ltr_mode = %u", __func__,
+			(u32)vcd_property_ltrmode.ltr_mode);
+		vcd_status = vcd_set_property(client_ctx->vcd_handle,
+				&vcd_property_hdr, &vcd_property_ltrmode);
+		if (vcd_status) {
+			ERR("%s(): Set VCD_I_LTR_MODE Failed\n",
+					__func__);
+			return false;
+		}
+	} else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+				&vcd_property_hdr, &vcd_property_ltrmode);
+		if (vcd_status) {
+			ERR("%s(): Get VCD_I_LTR_MODE Failed\n",
+					__func__);
+			return false;
+		} else {
+			venc_ltrmode->ltr_mode = (unsigned long)
+				vcd_property_ltrmode.ltr_mode;
+			DBG("%s: Got ltr_mode = %u", __func__,
+				(u32)vcd_property_ltrmode.ltr_mode);
+		}
+	}
+
+	return true;
+}
+
+u32 vid_enc_set_get_ltrcount(struct video_client_ctx *client_ctx,
+		struct venc_ltrcount *venc_ltrcount, u32 set_flag)
+{
+	struct vcd_property_ltrcount_type vcd_property_ltrcount;
+	struct vcd_property_hdr vcd_property_hdr;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx || !venc_ltrcount)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_LTR_COUNT;
+	vcd_property_hdr.sz =
+		sizeof(struct vcd_property_ltrcount_type);
+
+	if (set_flag) {
+		vcd_property_ltrcount.ltr_count = (u32)
+			venc_ltrcount->ltr_count;
+		DBG("%s: Set ltr_count = %u", __func__,
+			(u32)vcd_property_ltrcount.ltr_count);
+		vcd_status = vcd_set_property(client_ctx->vcd_handle,
+				&vcd_property_hdr, &vcd_property_ltrcount);
+		if (vcd_status) {
+			ERR("%s(): Set VCD_I_LTR_COUNT Failed\n",
+					__func__);
+			return false;
+		}
+	} else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+				&vcd_property_hdr, &vcd_property_ltrcount);
+		if (vcd_status) {
+			ERR("%s(): Get VCD_I_LTR_COUNT Failed\n",
+					__func__);
+			return false;
+		} else {
+			venc_ltrcount->ltr_count = (unsigned long)
+				vcd_property_ltrcount.ltr_count;
+			DBG("%s: Got ltr_count = %u", __func__,
+				(u32)vcd_property_ltrcount.ltr_count);
+		}
+	}
+
+	return true;
+}
+
+u32 vid_enc_set_get_ltrperiod(struct video_client_ctx *client_ctx,
+		struct venc_ltrperiod *venc_ltrperiod, u32 set_flag)
+{
+	struct vcd_property_ltrperiod_type vcd_property_ltrperiod;
+	struct vcd_property_hdr vcd_property_hdr;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx || !venc_ltrperiod)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_LTR_PERIOD;
+	vcd_property_hdr.sz =
+		sizeof(struct vcd_property_ltrperiod_type);
+
+	if (set_flag) {
+		vcd_property_ltrperiod.ltr_period = (u32)
+			venc_ltrperiod->ltr_period;
+		DBG("%s: Set ltr_period = %u", __func__,
+			(u32)vcd_property_ltrperiod.ltr_period);
+		vcd_status = vcd_set_property(client_ctx->vcd_handle,
+				&vcd_property_hdr, &vcd_property_ltrperiod);
+		if (vcd_status) {
+			ERR("%s(): Set VCD_I_LTR_PERIOD Failed\n",
+					__func__);
+			return false;
+		}
+	} else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+				&vcd_property_hdr, &vcd_property_ltrperiod);
+		if (vcd_status) {
+			ERR("%s(): Get VCD_I_LTR_PERIOD Failed\n",
+					__func__);
+			return false;
+		} else {
+			venc_ltrperiod->ltr_period = (unsigned long)
+				vcd_property_ltrperiod.ltr_period;
+			DBG("%s: Got ltr_period = %u", __func__,
+				(u32)vcd_property_ltrperiod.ltr_period);
+		}
+	}
+
+	return true;
+}
+
+u32 vid_enc_set_get_ltruse(struct video_client_ctx *client_ctx,
+		struct venc_ltruse *venc_ltruse, u32 set_flag)
+{
+	struct vcd_property_ltruse_type vcd_property_ltruse;
+	struct vcd_property_hdr vcd_property_hdr;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx || !venc_ltruse)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_LTR_USE;
+	vcd_property_hdr.sz =
+		sizeof(struct vcd_property_ltruse_type);
+
+	if (set_flag) {
+		vcd_property_ltruse.ltr_id = (u32)
+			venc_ltruse->ltr_id;
+		vcd_property_ltruse.ltr_frames = (u32)
+			venc_ltruse->ltr_frames;
+		DBG("%s: Set ltr_id = %u, ltr_frames = %u",
+			__func__, vcd_property_ltruse.ltr_id,
+			vcd_property_ltruse.ltr_frames);
+		vcd_status = vcd_set_property(client_ctx->vcd_handle,
+				&vcd_property_hdr, &vcd_property_ltruse);
+		if (vcd_status) {
+			ERR("%s(): Set VCD_I_LTR_USE Failed\n",
+					__func__);
+			return false;
+		}
+	} else {
+		vcd_status = vcd_get_property(client_ctx->vcd_handle,
+				&vcd_property_hdr, &vcd_property_ltruse);
+		if (vcd_status) {
+			ERR("%s(): Get VCD_I_LTR_USE Failed\n",
+					__func__);
+			return false;
+		} else {
+			venc_ltruse->ltr_id = (unsigned long)
+				vcd_property_ltruse.ltr_id;
+			venc_ltruse->ltr_frames = (unsigned long)
+				vcd_property_ltruse.ltr_frames;
+			DBG("%s: Got ltr_id = %u, ltr_frames = %u",
+				__func__, vcd_property_ltruse.ltr_id,
+				vcd_property_ltruse.ltr_frames);
+		}
+	}
+
+	return true;
+}
+
+u32 vid_enc_get_capability_ltrcount(struct video_client_ctx *client_ctx,
+		struct venc_range *venc_capltrcount)
+{
+	struct vcd_property_range_type vcd_property_range;
+	struct vcd_property_hdr vcd_property_hdr;
+	u32 vcd_status = VCD_ERR_FAIL;
+
+	if (!client_ctx || !venc_capltrcount)
+		return false;
+
+	vcd_property_hdr.prop_id = VCD_I_CAPABILITY_LTR_COUNT;
+	vcd_property_hdr.sz = sizeof(struct vcd_property_range_type);
+	vcd_status = vcd_get_property(client_ctx->vcd_handle,
+					&vcd_property_hdr, &vcd_property_range);
+	if (vcd_status) {
+		ERR("%s(): Get VCD_I_CAPABILITY_LTR_COUNT Failed\n",
+			__func__);
+		return false;
+	} else {
+		venc_capltrcount->min = vcd_property_range.min;
+		venc_capltrcount->max = vcd_property_range.max;
+		venc_capltrcount->step_size = vcd_property_range.step_size;
+		DBG("%s: Got min: %lu, max: %lu, step_size: %lu", __func__,
+			venc_capltrcount->min, venc_capltrcount->max,
+			venc_capltrcount->step_size);
+	}
+
+	return true;
+}
diff --git a/drivers/video/msm/vidc/common/enc/venc_internal.h b/drivers/video/msm/vidc/common/enc/venc_internal.h
index e724b21..b7b8c98 100644
--- a/drivers/video/msm/vidc/common/enc/venc_internal.h
+++ b/drivers/video/msm/vidc/common/enc/venc_internal.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -152,4 +152,19 @@
 u32 vid_enc_get_recon_buffer_size(struct video_client_ctx *client_ctx,
 		struct venc_recon_buff_size *venc_recon_size);
 
+u32 vid_enc_set_get_ltrmode(struct video_client_ctx *client_ctx,
+		struct venc_ltrmode *encoder_ltrmode, u32 set_flag);
+
+u32 vid_enc_set_get_ltrcount(struct video_client_ctx *client_ctx,
+		struct venc_ltrcount *encoder_ltrcount, u32 set_flag);
+
+u32 vid_enc_set_get_ltrperiod(struct video_client_ctx *client_ctx,
+		struct venc_ltrperiod *encoder_ltrperiod, u32 set_flag);
+
+u32 vid_enc_get_capability_ltrcount(struct video_client_ctx *client_ctx,
+		struct venc_range *venc_capltrcount);
+
+u32 vid_enc_set_get_ltruse(struct video_client_ctx *client_ctx,
+		struct venc_ltruse *encoder_ltruse, u32 set_flag);
+
 #endif
diff --git a/drivers/video/msm/vidc/common/init/vidc_init.c b/drivers/video/msm/vidc/common/init/vidc_init.c
index 9007145..cf01622 100644
--- a/drivers/video/msm/vidc/common/init/vidc_init.c
+++ b/drivers/video/msm/vidc/common/init/vidc_init.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -24,13 +24,13 @@
 #include <linux/uaccess.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
-#include <linux/android_pmem.h>
+
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/debugfs.h>
 #include <mach/clk.h>
 #include <linux/pm_runtime.h>
-#include <mach/msm_subsystem_map.h>
+#include <mach/iommu_domains.h>
 #include <media/msm/vcd_api.h>
 #include <media/msm/vidc_init.h>
 #include "vidc_init_internal.h"
@@ -49,7 +49,6 @@
 static struct vidc_dev *vidc_device_p;
 static dev_t vidc_dev_num;
 static struct class *vidc_class;
-static unsigned int vidc_mmu_subsystem[] = {MSM_SUBSYSTEM_VIDEO};
 
 static const struct file_operations vidc_fops = {
 	.owner = THIS_MODULE,
@@ -421,12 +420,6 @@
 	if (!client_ctx->user_ion_client)
 		goto bail_out_cleanup;
 	for (i = 0; i < *num_of_buffers; ++i) {
-		if (buf_addr_table[i].client_data) {
-			msm_subsystem_unmap_buffer(
-			(struct msm_mapped_buffer *)
-			buf_addr_table[i].client_data);
-			buf_addr_table[i].client_data = NULL;
-		}
 		if (!IS_ERR_OR_NULL(buf_addr_table[i].buff_ion_handle)) {
 			if (!IS_ERR_OR_NULL(client_ctx->user_ion_client)) {
 				ion_unmap_kernel(client_ctx->user_ion_client,
@@ -449,11 +442,6 @@
 			}
 		}
 	}
-	if (client_ctx->vcd_h264_mv_buffer.client_data) {
-		msm_subsystem_unmap_buffer((struct msm_mapped_buffer *)
-		client_ctx->vcd_h264_mv_buffer.client_data);
-		client_ctx->vcd_h264_mv_buffer.client_data = NULL;
-	}
 	if (!IS_ERR_OR_NULL(client_ctx->h264_mv_ion_handle)) {
 		if (!IS_ERR_OR_NULL(client_ctx->user_ion_client)) {
 			ion_unmap_kernel(client_ctx->user_ion_client,
@@ -565,9 +553,8 @@
 	unsigned long len, phys_addr;
 	struct file *file = NULL;
 	u32 *num_of_buffers = NULL;
-	u32 i, flags;
+	u32 i;
 	struct buf_addr_table *buf_addr_table;
-	struct msm_mapped_buffer *mapped_buffer = NULL;
 	struct ion_handle *buff_ion_handle = NULL;
 	unsigned long ionflag = 0;
 	unsigned long iova = 0;
@@ -609,26 +596,8 @@
 		goto bail_out_add;
 	} else {
 		if (!vcd_get_ion_status()) {
-			if (get_pmem_file(pmem_fd, &phys_addr,
-					kernel_vaddr, &len, &file)) {
-				ERR("%s(): get_pmem_file failed\n", __func__);
-				goto bail_out_add;
-			}
-			put_pmem_file(file);
-			flags = (buffer == BUFFER_TYPE_INPUT)
-			? MSM_SUBSYSTEM_MAP_IOVA :
-			MSM_SUBSYSTEM_MAP_IOVA|MSM_SUBSYSTEM_ALIGN_IOVA_8K;
-			mapped_buffer = msm_subsystem_map_buffer(phys_addr,
-			length, flags, vidc_mmu_subsystem,
-			sizeof(vidc_mmu_subsystem)/sizeof(unsigned int));
-			if (IS_ERR(mapped_buffer)) {
-				pr_err("buffer map failed");
-				goto bail_out_add;
-			}
-			buf_addr_table[*num_of_buffers].client_data = (void *)
-				mapped_buffer;
-			buf_addr_table[*num_of_buffers].dev_addr =
-				mapped_buffer->iova[0];
+			pr_err("PMEM not available\n");
+			goto bail_out_add;
 		} else {
 			buff_ion_handle = ion_import_dma_buf(
 				client_ctx->user_ion_client, pmem_fd);
@@ -828,11 +797,6 @@
 			__func__, client_ctx, user_vaddr);
 		goto bail_out_del;
 	}
-	if (buf_addr_table[i].client_data) {
-		msm_subsystem_unmap_buffer(
-		(struct msm_mapped_buffer *)buf_addr_table[i].client_data);
-		buf_addr_table[i].client_data = NULL;
-	}
 	*kernel_vaddr = buf_addr_table[i].kernel_vaddr;
 	if (buf_addr_table[i].buff_ion_handle) {
 		ion_unmap_kernel(client_ctx->user_ion_client,
diff --git a/drivers/video/msm/vidc/common/vcd/vcd.h b/drivers/video/msm/vidc/common/vcd/vcd.h
index 122db76..90f7d0e 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd.h
+++ b/drivers/video/msm/vidc/common/vcd/vcd.h
@@ -398,4 +398,11 @@
 u32 vcd_update_decoder_perf_level(struct vcd_dev_ctxt *dev_ctxt, u32 perf_lvl);
 
 u32 vcd_set_perf_turbo_level(struct vcd_clnt_ctxt *cctxt);
+
+struct vcd_transc *vcd_get_first_in_use_trans_for_clnt(
+	struct vcd_clnt_ctxt *cctxt);
+
+u32 vcd_handle_ltr_use_failed(struct vcd_clnt_ctxt *cctxt,
+	void *payload, size_t sz, u32 status);
+
 #endif
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_api.c b/drivers/video/msm/vidc/common/vcd/vcd_api.c
index fd2d01b..3d7474f 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_api.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_api.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -117,19 +117,19 @@
 	int is_secure;
 	struct client_security_info sec_info;
 	int client_count = 0;
-	int secure_session_running = 0, non_secure_running = 0;
+	int secure_session_running = 0, non_secure_runnung = 0;
 	is_secure = (flags & VCD_CP_SESSION) ? 1 : 0;
 	client_count = vcd_get_clients_security_info(&sec_info);
 	secure_session_running = (sec_info.secure_enc > 0) ||
 			(sec_info.secure_dec > 0);
-	non_secure_running = sec_info.non_secure_dec + sec_info.non_secure_enc;
+	non_secure_runnung = sec_info.non_secure_dec + sec_info.non_secure_enc;
 	if (!is_secure) {
 		if (secure_session_running) {
 			pr_err("non secure session failed secure running\n");
 			return -EACCES;
 		}
 	} else {
-		if (non_secure_running) {
+		if (non_secure_runnung) {
 			pr_err("Secure session failed non secure running\n");
 			return -EACCES;
 		}
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_client_sm.c b/drivers/video/msm/vidc/common/vcd/vcd_client_sm.c
index 1d036d2..14c8030 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_client_sm.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_client_sm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -964,6 +964,12 @@
 			vcd_handle_ind_info_output_reconfig(cctxt, status);
 			break;
 		}
+	case VCD_EVT_IND_INFO_LTRUSE_FAILED:
+		{
+			rc = vcd_handle_ltr_use_failed(cctxt,
+					payload, sz, status);
+			break;
+		}
 	default:
 		{
 			VCD_MSG_ERROR
@@ -1622,6 +1628,7 @@
 	if (!cctxt || to_state >= VCD_CLIENT_STATE_MAX) {
 		VCD_MSG_ERROR("Bad parameters. cctxt=%p, to_state=%d",
 			      cctxt, to_state);
+		return;
 	}
 
 	state_ctxt = &cctxt->clnt_state;
@@ -1744,7 +1751,7 @@
 	 vcd_get_buffer_requirements_cmn,
 	 NULL,
 	 NULL,
-	 NULL,
+	 vcd_free_buffer_cmn,
 	 vcd_fill_output_buffer_cmn,
 	 vcd_clnt_cb_in_flushing,
 	 },
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_core.h b/drivers/video/msm/vidc/common/vcd/vcd_core.h
index 9508a8d..0eaff74 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_core.h
+++ b/drivers/video/msm/vidc/common/vcd/vcd_core.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c b/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c
index a4f9f24..9074358 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -36,6 +36,7 @@
 	if (!drv_ctxt || to_state >= VCD_DEVICE_STATE_MAX) {
 		VCD_MSG_ERROR("Bad parameters. drv_ctxt=%p, to_state=%d",
 				  drv_ctxt, to_state);
+		return;
 	}
 
 	state_ctxt = &drv_ctxt->dev_state;
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_scheduler.c b/drivers/video/msm/vidc/common/vcd/vcd_scheduler.c
index e392414..fe0e131 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_scheduler.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_scheduler.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -88,8 +88,13 @@
 			prop_hdr.sz = sizeof(cctxt->frm_p_units);
 			rc = ddl_get_property(cctxt->ddl_handle, &prop_hdr,
 						  &cctxt->frm_p_units);
-			VCD_FAILED_RETURN(rc,
-				"Failed: Get DDL_I_FRAME_PROC_UNITS");
+			if (VCD_FAILED(rc)) {
+				kfree(sched_cctxt);
+				VCD_MSG_ERROR(
+					"Failed: Get DDL_I_FRAME_PROC_UNITS");
+				return rc;
+			}
+
 			if (cctxt->decoding) {
 				cctxt->frm_rate.fps_numerator =
 					VCD_DEC_INITIAL_FRAME_RATE;
@@ -99,8 +104,12 @@
 				prop_hdr.sz = sizeof(cctxt->frm_rate);
 				rc = ddl_get_property(cctxt->ddl_handle,
 						&prop_hdr, &cctxt->frm_rate);
-				VCD_FAILED_RETURN(rc,
-					"Failed: Get VCD_I_FRAME_RATE");
+				if (VCD_FAILED(rc)) {
+					kfree(sched_cctxt);
+					VCD_MSG_ERROR(
+						"Failed: Get VCD_I_FRAME_RATE");
+					return rc;
+				}
 			}
 			if (!cctxt->perf_set_by_client)
 				cctxt->reqd_perf_lvl = cctxt->frm_p_units *
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_sub.c b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
index a6af235..f7424ed 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_sub.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
@@ -11,9 +11,9 @@
  *
  */
 #include <linux/memory_alloc.h>
-#include <mach/msm_subsystem_map.h>
 #include <asm/div64.h>
 #include <media/msm/vidc_type.h>
+#include <mach/iommu_domains.h>
 #include "vcd.h"
 #include "vdec_internal.h"
 
@@ -24,19 +24,17 @@
 
 struct vcd_msm_map_buffer {
 	phys_addr_t phy_addr;
-	struct msm_mapped_buffer *mapped_buffer;
+	void *vaddr;
 	struct ion_handle *alloc_handle;
 	u32 in_use;
 };
 static struct vcd_msm_map_buffer msm_mapped_buffer_table[MAP_TABLE_SZ];
-static unsigned int vidc_mmu_subsystem[] = {MSM_SUBSYSTEM_VIDEO};
 
 static int vcd_pmem_alloc(size_t sz, u8 **kernel_vaddr, u8 **phy_addr,
 			 struct vcd_clnt_ctxt *cctxt)
 {
-	u32 memtype, i = 0, flags = 0;
+	u32 memtype, i = 0;
 	struct vcd_msm_map_buffer *map_buffer = NULL;
-	struct msm_mapped_buffer *mapped_buffer = NULL;
 	unsigned long iova = 0;
 	unsigned long buffer_size = 0;
 	int ret = 0;
@@ -64,36 +62,13 @@
 	res_trk_set_mem_type(DDL_MM_MEM);
 	memtype = res_trk_get_mem_type();
 	if (!cctxt->vcd_enable_ion) {
-		map_buffer->phy_addr = (phys_addr_t)
-		allocate_contiguous_memory_nomap(sz, memtype, SZ_4K);
-		if (!map_buffer->phy_addr) {
-			pr_err("%s() acm alloc failed", __func__);
-			goto free_map_table;
-		}
-		flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
-		map_buffer->mapped_buffer =
-		msm_subsystem_map_buffer((unsigned long)map_buffer->phy_addr,
-		sz, flags, vidc_mmu_subsystem,
-		sizeof(vidc_mmu_subsystem)/sizeof(unsigned int));
-		if (IS_ERR(map_buffer->mapped_buffer)) {
-			pr_err(" %s() buffer map failed", __func__);
-			goto free_acm_alloc;
-		}
-		mapped_buffer = map_buffer->mapped_buffer;
-		if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
-			pr_err("%s() map buffers failed", __func__);
-			goto free_map_buffers;
-		}
-		*phy_addr = (u8 *) mapped_buffer->iova[0];
-		*kernel_vaddr = (u8 *) mapped_buffer->vaddr;
-		VCD_MSG_LOW("vcd_pmem_alloc: phys(0x%x), virt(0x%x), "\
-			"sz(%u), flags(0x%x)", (u32)*phy_addr,
-			(u32)*kernel_vaddr, sz, (u32)flags);
+		pr_err("ION must be enabled\n");
+		goto free_map_table;
 	} else {
 		map_buffer->alloc_handle = ion_alloc(
 			    cctxt->vcd_ion_client, sz, SZ_4K,
 			    memtype, res_trk_get_ion_flags());
-		if (!map_buffer->alloc_handle) {
+		if (IS_ERR_OR_NULL(map_buffer->alloc_handle)) {
 			pr_err("%s() ION alloc failed", __func__);
 			goto bailout;
 		}
@@ -106,6 +81,7 @@
 		*kernel_vaddr = (u8 *) ion_map_kernel(
 				cctxt->vcd_ion_client,
 				map_buffer->alloc_handle);
+		map_buffer->vaddr = *kernel_vaddr;
 		if (!(*kernel_vaddr)) {
 			pr_err("%s() ION map failed", __func__);
 			goto ion_free_bailout;
@@ -143,8 +119,6 @@
 			goto free_map_table;
 		}
 		*phy_addr = (u8 *)map_buffer->phy_addr;
-		mapped_buffer = NULL;
-		map_buffer->mapped_buffer = NULL;
 		VCD_MSG_LOW("vcd_ion_alloc: phys(0x%x), virt(0x%x), "\
 			"sz(%u), ionflags(0x%x)", (u32)*phy_addr,
 			(u32)*kernel_vaddr, sz, (u32)ionflag);
@@ -152,15 +126,6 @@
 
 	return 0;
 
-free_map_buffers:
-	if (map_buffer->mapped_buffer)
-		msm_subsystem_unmap_buffer(map_buffer->mapped_buffer);
-free_acm_alloc:
-	if (!cctxt->vcd_enable_ion) {
-		free_contiguous_memory_by_paddr(
-		(unsigned long)map_buffer->phy_addr);
-	}
-	return -ENOMEM;
 ion_map_bailout:
 	ion_unmap_kernel(cctxt->vcd_ion_client, map_buffer->alloc_handle);
 ion_free_bailout:
@@ -183,8 +148,7 @@
 	}
 	for (i = 0; i  < MAP_TABLE_SZ; i++) {
 		if (msm_mapped_buffer_table[i].in_use &&
-			(msm_mapped_buffer_table[i]
-			.mapped_buffer->vaddr == kernel_vaddr)) {
+			(msm_mapped_buffer_table[i].vaddr == kernel_vaddr)) {
 			map_buffer = &msm_mapped_buffer_table[i];
 			map_buffer->in_use = 0;
 			break;
@@ -194,8 +158,6 @@
 		pr_err("%s() Entry not found", __func__);
 		goto bailout;
 	}
-	if (map_buffer->mapped_buffer)
-		msm_subsystem_unmap_buffer(map_buffer->mapped_buffer);
 	if (cctxt->vcd_enable_ion) {
 		VCD_MSG_LOW("vcd_ion_free: phys(0x%x), virt(0x%x)",
 			(u32)phy_addr, (u32)kernel_vaddr);
@@ -1684,7 +1646,7 @@
 void vcd_send_frame_done_in_eos(struct vcd_clnt_ctxt *cctxt,
 	 struct vcd_frame_data *input_frame, u32 valid_opbuf)
 {
-	VCD_MSG_LOW("vcd_send_frame_done_in_eos:");
+	VCD_MSG_HIGH("vcd_send_frame_done_in_eos:");
 
 	if (!input_frame->virtual && !valid_opbuf) {
 		VCD_MSG_MED("Sending NULL output with EOS");
@@ -1804,12 +1766,41 @@
 	}
 }
 
+struct vcd_transc *vcd_get_first_in_use_trans_for_clnt(
+	struct vcd_clnt_ctxt *cctxt)
+{
+	u32 i;
+	struct vcd_dev_ctxt *dev_ctxt;
+	VCD_MSG_HIGH("%s: ", __func__);
+	dev_ctxt = cctxt->dev_ctxt;
+	if (!dev_ctxt->trans_tbl) {
+		VCD_MSG_ERROR("%s: Null trans_tbl", __func__);
+		return NULL;
+	}
+	i = 0;
+	while (i < dev_ctxt->trans_tbl_size) {
+		if ((cctxt == dev_ctxt->trans_tbl[i].cctxt) &&
+			(dev_ctxt->trans_tbl[i].in_use)) {
+			VCD_MSG_MED("%s: found transc = 0x%p",
+				__func__, &dev_ctxt->trans_tbl[i]);
+			break;
+		}
+		i++;
+	}
+	if (i == dev_ctxt->trans_tbl_size) {
+		VCD_MSG_ERROR("%s: in_use transction not found",
+			__func__);
+		return NULL;
+	} else
+		return &dev_ctxt->trans_tbl[i];
+}
+
 u32 vcd_handle_recvd_eos(
 	struct vcd_clnt_ctxt *cctxt,
 	 struct vcd_frame_data *input_frame, u32 *pb_eos_handled)
 {
 	u32 rc;
-
+	struct vcd_transc *transc;
 	VCD_MSG_LOW("vcd_handle_recvd_eos:");
 
 	*pb_eos_handled = false;
@@ -1827,13 +1818,21 @@
 		*pb_eos_handled = true;
 	else if (cctxt->decoding && !input_frame->virtual)
 		cctxt->sched_clnt_hdl->tkns++;
-	else if (!cctxt->decoding) {
-		vcd_send_frame_done_in_eos(cctxt, input_frame, false);
-		if (cctxt->status.mask & VCD_EOS_WAIT_OP_BUF) {
-			vcd_do_client_state_transition(cctxt,
-				VCD_CLIENT_STATE_EOS,
-				CLIENT_STATE_EVENT_NUMBER
-				(encode_frame));
+	else if (!cctxt->decoding && !cctxt->status.frame_delayed) {
+		if (!cctxt->status.frame_submitted) {
+			vcd_send_frame_done_in_eos(cctxt, input_frame, false);
+			if (cctxt->status.mask & VCD_EOS_WAIT_OP_BUF)
+				vcd_do_client_state_transition(cctxt,
+					VCD_CLIENT_STATE_EOS,
+					CLIENT_STATE_EVENT_NUMBER
+					(encode_frame));
+		} else {
+			transc = vcd_get_first_in_use_trans_for_clnt(cctxt);
+			if (transc) {
+				transc->flags |= VCD_FRAME_FLAG_EOS;
+				VCD_MSG_HIGH("%s: Add EOS flag to transc",
+				       __func__);
+			}
 		}
 		*pb_eos_handled = true;
 	}
@@ -1985,6 +1984,11 @@
 	orig_frame = vcd_find_buffer_pool_entry(&cctxt->in_buf_pool,
 					 transc->ip_buf_entry->virtual);
 
+	if (!orig_frame) {
+		rc = VCD_ERR_ILLEGAL_PARM;
+		VCD_FAILED_RETURN(rc, "Couldn't find buffer");
+	}
+
 	if ((transc->ip_buf_entry->frame.virtual !=
 		 frame->vcd_frm.virtual)
 		|| !transc->ip_buf_entry->in_use) {
@@ -2011,11 +2015,8 @@
 		return VCD_ERR_FAIL;
 	}
 
-	if (orig_frame != transc->ip_buf_entry) {
-		VCD_MSG_HIGH("%s: free duplicate buffer", __func__);
+	if (orig_frame != transc->ip_buf_entry)
 		kfree(transc->ip_buf_entry);
-		transc->ip_buf_entry = NULL;
-	}
 	transc->ip_buf_entry = NULL;
 	transc->input_done = true;
 
@@ -2784,7 +2785,6 @@
 	struct vcd_frame_data *frm_entry;
 	u32 rc = VCD_S_SUCCESS;
 	u32 eos_handled = false;
-	u32 duplicate_buffer = false;
 
 	VCD_MSG_LOW("vcd_handle_input_frame:");
 
@@ -2868,8 +2868,6 @@
 		buf_entry->allocated = orig_frame->allocated;
 		buf_entry->in_use = 1; /* meaningless for the dupe buffers */
 		buf_entry->frame = orig_frame->frame;
-		duplicate_buffer = true;
-		VCD_MSG_HIGH("%s: duplicate buffer", __func__);
 	} else
 		buf_entry = orig_frame;
 
@@ -2893,10 +2891,6 @@
 	if (VCD_FAILED(rc) || eos_handled) {
 		VCD_MSG_HIGH("rc = 0x%x, eos_handled = %d", rc,
 				 eos_handled);
-		if ((duplicate_buffer) && (buf_entry)) {
-			kfree(buf_entry);
-			buf_entry = NULL;
-		}
 
 		return rc;
 	}
@@ -3489,3 +3483,14 @@
 	}
 	return rc;
 }
+
+u32 vcd_handle_ltr_use_failed(struct vcd_clnt_ctxt *cctxt,
+	void *payload, size_t sz, u32 status)
+{
+	u32 rc = VCD_S_SUCCESS;
+	if (payload && cctxt) {
+		cctxt->callback(VCD_EVT_IND_INFO_LTRUSE_FAILED,
+			status, payload, sz, cctxt, cctxt->client_data);
+	}
+	return rc;
+}
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_util.h b/drivers/video/msm/vidc/common/vcd/vcd_util.h
index 7571b25..f374ebb 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_util.h
+++ b/drivers/video/msm/vidc/common/vcd/vcd_util.h
@@ -20,16 +20,16 @@
 #define VCD_MSG_LOW(xx_fmt...)		printk(KERN_INFO "\n\t* " xx_fmt)
 #define VCD_MSG_MED(xx_fmt...)		printk(KERN_INFO "\n  * " xx_fmt)
 #define VCD_MSG_HIGH(xx_fmt...)		printk(KERN_WARNING "\n" xx_fmt)
-
+#define VCD_MSG_ERROR(xx_fmt...)	printk(KERN_ERR "\n err: " xx_fmt)
 #else
 
 #define VCD_MSG_LOW(xx_fmt...)
 #define VCD_MSG_MED(xx_fmt...)
 #define VCD_MSG_HIGH(xx_fmt...)
-
+#define VCD_MSG_ERROR(xx_fmt...)
 #endif
 
-#define VCD_MSG_ERROR(xx_fmt...)	printk(KERN_ERR "\n err: " xx_fmt)
+
 #define VCD_MSG_FATAL(xx_fmt...)	printk(KERN_ERR "\n<FATAL> " xx_fmt)
 
 #define VCD_FAILED_RETURN(rc, xx_fmt...)		\
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 313a998..80e4645 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -14,74 +14,11 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include "internal.h"
-#include <linux/ion.h>
-#include <linux/msm_kgsl.h>
 
 void __attribute__((weak)) arch_report_meminfo(struct seq_file *m)
 {
 }
 
-void show_meminfo(void)
-{
-	struct sysinfo i;
-	struct vmalloc_info vmi;
-	long cached;
-	unsigned long pages[NR_LRU_LISTS];
-	int lru;
-	unsigned long ion_alloc  = ion_iommu_heap_dump_size();
-	unsigned long kgsl_alloc = kgsl_get_alloc_size(1);
-	unsigned long subtotal;
-
-#define K(x) ((x) << (PAGE_SHIFT - 10))
-	si_meminfo(&i);
-	si_swapinfo(&i);
-	cached = global_page_state(NR_FILE_PAGES) -
-		total_swapcache_pages - i.bufferram;
-
-	if (cached < 0)
-		cached = 0;
-
-	get_vmalloc_info(&vmi);
-
-	for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
-		pages[lru] = global_page_state(NR_LRU_BASE + lru);
-
-	subtotal = K(i.freeram) + K(i.bufferram) +
-		K(cached) + K(global_page_state(NR_SHMEM)) + K(global_page_state(NR_MLOCK)) +
-		K(global_page_state(NR_ANON_PAGES)) +
-		K(global_page_state(NR_SLAB_RECLAIMABLE) + global_page_state(NR_SLAB_UNRECLAIMABLE)) +
-		(global_page_state(NR_KERNEL_STACK) * THREAD_SIZE / 1024) +
-		K(global_page_state(NR_PAGETABLE)) +
-		(vmi.alloc >> 10) + (kgsl_alloc >> 10) + (ion_alloc >> 10);
-
-	printk("MemFree:        %8lu kB\n"
-			"Buffers:        %8lu kB\n"
-			"Cached:         %8lu kB\n"
-			"Shmem:          %8lu kB\n"
-			"Mlocked:        %8lu kB\n"
-			"AnonPages:      %8lu kB\n"
-			"Slab:           %8lu kB\n"
-			"PageTables:     %8lu kB\n"
-			"KernelStack:    %8lu kB\n"
-			"VmallocAlloc:   %8lu kB\n"
-			"ION_Alloc:      %8lu kB\n"
-			"KGSL_Alloc:     %8lu kB\n"
-			"Subtotal:       %8lu kB\n",
-			K(i.freeram),
-			K(i.bufferram),
-			K(cached),
-			K(global_page_state(NR_SHMEM)),
-			K(global_page_state(NR_MLOCK)),
-			K(global_page_state(NR_ANON_PAGES)),
-			K(global_page_state(NR_SLAB_RECLAIMABLE) + global_page_state(NR_SLAB_UNRECLAIMABLE)),
-			K(global_page_state(NR_PAGETABLE)),
-			global_page_state(NR_KERNEL_STACK) * THREAD_SIZE / 1024,
-			(vmi.alloc >> 10),
-			(ion_alloc >> 10),
-			(kgsl_alloc >> 10),
-			subtotal);
-}
-
 static int meminfo_proc_show(struct seq_file *m, void *v)
 {
 	struct sysinfo i;
@@ -91,8 +28,10 @@
 	long cached;
 	unsigned long pages[NR_LRU_LISTS];
 	int lru;
-	unsigned long kgsl_alloc = kgsl_get_alloc_size(0);
 
+/*
+ * display in kilobytes.
+ */
 #define K(x) ((x) << (PAGE_SHIFT - 10))
 	si_meminfo(&i);
 	si_swapinfo(&i);
@@ -110,6 +49,9 @@
 	for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
 		pages[lru] = global_page_state(NR_LRU_BASE + lru);
 
+	/*
+	 * Tagged format, for easy grepping and expansion.
+	 */
 	seq_printf(m,
 		"MemTotal:       %8lu kB\n"
 		"MemFree:        %8lu kB\n"
@@ -155,14 +97,7 @@
 		"Committed_AS:   %8lu kB\n"
 		"VmallocTotal:   %8lu kB\n"
 		"VmallocUsed:    %8lu kB\n"
-		"VmallocIoRemap: %8lu kB\n"
-		"VmallocAlloc:   %8lu kB\n"
-		"VmallocMap:     %8lu kB\n"
-		"VmallocUserMap: %8lu kB\n"
-		"VmallocVpage:   %8lu kB\n"
 		"VmallocChunk:   %8lu kB\n"
-		"KGSL_ALLOC:     %8lu kB\n"
-		"ION_ALLOC:      %8d kB\n"
 #ifdef CONFIG_MEMORY_FAILURE
 		"HardwareCorrupted: %5lu kB\n"
 #endif
@@ -221,14 +156,7 @@
 		K(committed),
 		(unsigned long)VMALLOC_TOTAL >> 10,
 		vmi.used >> 10,
-		vmi.ioremap >> 10,
-		vmi.alloc >> 10,
-		vmi.map >> 10,
-		vmi.usermap >> 10,
-		vmi.vpages >> 10,
-		vmi.largest_chunk >> 10,
-		kgsl_alloc >> 10,
-		ion_iommu_heap_dump_size() >> 10
+		vmi.largest_chunk >> 10
 #ifdef CONFIG_MEMORY_FAILURE
 		,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
 #endif
diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h
index 41c14c4..abfb268 100644
--- a/include/asm-generic/dma-coherent.h
+++ b/include/asm-generic/dma-coherent.h
@@ -2,10 +2,19 @@
 #define DMA_COHERENT_H
 
 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
+/*
+ * These three functions are only for dma allocator.
+ * Don't use them in device drivers.
+ */
 int dma_alloc_from_coherent(struct device *dev, ssize_t size,
 				       dma_addr_t *dma_handle, void **ret);
 int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
 
+int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+			    void *cpu_addr, size_t size, int *ret);
+/*
+ * Standard interface
+ */
 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
 extern int
 dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
diff --git a/include/linux/device.h b/include/linux/device.h
index f33b8cd..385d25e 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -35,6 +35,7 @@
 struct bus_type;
 struct device_node;
 struct iommu_ops;
+struct iommu_group;
 
 struct bus_attribute {
 	struct attribute	attr;
@@ -462,6 +463,7 @@
 	const struct attribute_group **groups;	
 
 	void	(*release)(struct device *dev);
+	struct iommu_group	*iommu_group;
 };
 
 #include <linux/pm_wakeup.h>
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
index e164e70..18513e3 100644
--- a/include/linux/dma-attrs.h
+++ b/include/linux/dma-attrs.h
@@ -5,16 +5,27 @@
 #include <linux/bitops.h>
 #include <linux/bug.h>
 
+/**
+ * an enum dma_attr represents an attribute associated with a DMA
+ * mapping. The semantics of each attribute should be defined in
+ * Documentation/DMA-attributes.txt.
+ */
 enum dma_attr {
 	DMA_ATTR_WRITE_BARRIER,
 	DMA_ATTR_WEAK_ORDERING,
 	DMA_ATTR_WRITE_COMBINE,
 	DMA_ATTR_NON_CONSISTENT,
+	DMA_ATTR_NO_KERNEL_MAPPING,
+	DMA_ATTR_STRONGLY_ORDERED,
 	DMA_ATTR_MAX,
 };
 
 #define __DMA_ATTRS_LONGS BITS_TO_LONGS(DMA_ATTR_MAX)
 
+/**
+ * struct dma_attrs - an opaque container for DMA attributes
+ * @flags - bitmask representing a collection of enum dma_attr
+ */
 struct dma_attrs {
 	unsigned long flags[__DMA_ATTRS_LONGS];
 };
@@ -30,6 +41,11 @@
 }
 
 #ifdef CONFIG_HAVE_DMA_ATTRS
+/**
+ * dma_set_attr - set a specific attribute
+ * @attr: attribute to set
+ * @attrs: struct dma_attrs (may be NULL)
+ */
 static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs)
 {
 	if (attrs == NULL)
@@ -38,6 +54,11 @@
 	__set_bit(attr, attrs->flags);
 }
 
+/**
+ * dma_get_attr - check for a specific attribute
+ * @attr: attribute to set
+ * @attrs: struct dma_attrs (may be NULL)
+ */
 static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
 {
 	if (attrs == NULL)
@@ -45,7 +66,7 @@
 	BUG_ON(attr >= DMA_ATTR_MAX);
 	return test_bit(attr, attrs->flags);
 }
-#else 
+#else /* !CONFIG_HAVE_DMA_ATTRS */
 static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs)
 {
 }
@@ -54,5 +75,5 @@
 {
 	return 0;
 }
-#endif 
-#endif 
+#endif /* CONFIG_HAVE_DMA_ATTRS */
+#endif /* _DMA_ATTR_H */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 95b15d6..97b8e0b 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -22,12 +22,14 @@
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/scatterlist.h>
+#include <linux/err.h>
 
 #define IOMMU_READ	(1)
 #define IOMMU_WRITE	(2)
 #define IOMMU_CACHE	(4) /* DMA cache coherency */
 
 struct iommu_ops;
+struct iommu_group;
 struct bus_type;
 struct device;
 struct iommu_domain;
@@ -37,12 +39,13 @@
 #define IOMMU_FAULT_WRITE	0x1
 
 typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
-				struct device *, unsigned long, int);
+			struct device *, unsigned long, int, void *);
 
 struct iommu_domain {
 	struct iommu_ops *ops;
 	void *priv;
 	iommu_fault_handler_t handler;
+	void *handler_token;
 };
 
 #define IOMMU_CAP_CACHE_COHERENCY	0x1
@@ -61,6 +64,8 @@
  * @iova_to_phys: translate iova to physical address
  * @domain_has_cap: domain capabilities query
  * @commit: commit iommu domain
+ * @add_device: add device to iommu grouping
+ * @remove_device: remove device from iommu grouping
  * @pgsize_bitmap: bitmap of supported page sizes
  */
 struct iommu_ops {
@@ -81,10 +86,18 @@
 	int (*domain_has_cap)(struct iommu_domain *domain,
 			      unsigned long cap);
 	phys_addr_t (*get_pt_base_addr)(struct iommu_domain *domain);
-	int (*device_group)(struct device *dev, unsigned int *groupid);
+	int (*add_device)(struct device *dev);
+	void (*remove_device)(struct device *dev);
 	unsigned long pgsize_bitmap;
 };
 
+#define IOMMU_GROUP_NOTIFY_ADD_DEVICE		1 /* Device added */
+#define IOMMU_GROUP_NOTIFY_DEL_DEVICE		2 /* Pre Device removed */
+#define IOMMU_GROUP_NOTIFY_BIND_DRIVER		3 /* Pre Driver bind */
+#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER		4 /* Post Driver bind */
+#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER	5 /* Pre Driver unbind */
+#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER	6 /* Post Driver unbind */
+
 extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops);
 extern bool iommu_present(struct bus_type *bus);
 extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus, int flags);
@@ -107,8 +120,31 @@
 				unsigned long cap);
 extern phys_addr_t iommu_get_pt_base_addr(struct iommu_domain *domain);
 extern void iommu_set_fault_handler(struct iommu_domain *domain,
-					iommu_fault_handler_t handler);
-extern int iommu_device_group(struct device *dev, unsigned int *groupid);
+			iommu_fault_handler_t handler, void *token);
+
+extern int iommu_attach_group(struct iommu_domain *domain,
+			      struct iommu_group *group);
+extern void iommu_detach_group(struct iommu_domain *domain,
+			       struct iommu_group *group);
+extern struct iommu_group *iommu_group_alloc(void);
+extern void *iommu_group_get_iommudata(struct iommu_group *group);
+extern void iommu_group_set_iommudata(struct iommu_group *group,
+				      void *iommu_data,
+				      void (*release)(void *iommu_data));
+extern int iommu_group_set_name(struct iommu_group *group, const char *name);
+extern int iommu_group_add_device(struct iommu_group *group,
+				  struct device *dev);
+extern void iommu_group_remove_device(struct device *dev);
+extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
+				    int (*fn)(struct device *, void *));
+extern struct iommu_group *iommu_group_get(struct device *dev);
+extern struct iommu_group *iommu_group_find(const char *name);
+extern void iommu_group_put(struct iommu_group *group);
+extern int iommu_group_register_notifier(struct iommu_group *group,
+					 struct notifier_block *nb);
+extern int iommu_group_unregister_notifier(struct iommu_group *group,
+					   struct notifier_block *nb);
+extern int iommu_group_id(struct iommu_group *group);
 
 /**
  * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
@@ -144,7 +180,8 @@
 	 * invoke it.
 	 */
 	if (domain->handler)
-		ret = domain->handler(domain, dev, iova, flags);
+		ret = domain->handler(domain, dev, iova, flags,
+						domain->handler_token);
 
 	return ret;
 }
@@ -152,6 +189,7 @@
 #else /* CONFIG_IOMMU_API */
 
 struct iommu_ops {};
+struct iommu_group {};
 
 static inline bool iommu_present(struct bus_type *bus)
 {
@@ -221,15 +259,90 @@
 }
 
 static inline void iommu_set_fault_handler(struct iommu_domain *domain,
-					iommu_fault_handler_t handler)
+				iommu_fault_handler_t handler, void *token)
 {
 }
 
-static inline int iommu_device_group(struct device *dev, unsigned int *groupid)
+static inline int iommu_attach_group(struct iommu_domain *domain,
+				     struct iommu_group *group)
 {
 	return -ENODEV;
 }
 
+static inline void iommu_detach_group(struct iommu_domain *domain,
+				      struct iommu_group *group)
+{
+}
+
+static inline struct iommu_group *iommu_group_alloc(void)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void *iommu_group_get_iommudata(struct iommu_group *group)
+{
+	return NULL;
+}
+
+static inline void iommu_group_set_iommudata(struct iommu_group *group,
+					     void *iommu_data,
+					     void (*release)(void *iommu_data))
+{
+}
+
+static inline int iommu_group_set_name(struct iommu_group *group,
+				       const char *name)
+{
+	return -ENODEV;
+}
+
+static inline int iommu_group_add_device(struct iommu_group *group,
+					 struct device *dev)
+{
+	return -ENODEV;
+}
+
+static inline void iommu_group_remove_device(struct device *dev)
+{
+}
+
+static inline int iommu_group_for_each_dev(struct iommu_group *group,
+					   void *data,
+					   int (*fn)(struct device *, void *))
+{
+	return -ENODEV;
+}
+
+static inline struct iommu_group *iommu_group_get(struct device *dev)
+{
+	return NULL;
+}
+
+static inline struct iommu_group *iommu_group_find(const char *name)
+{
+	return NULL;
+}
+
+static inline void iommu_group_put(struct iommu_group *group)
+{
+}
+
+static inline int iommu_group_register_notifier(struct iommu_group *group,
+				  struct notifier_block *nb)
+{
+	return -ENODEV;
+}
+
+static inline int iommu_group_unregister_notifier(struct iommu_group *group,
+				    struct notifier_block *nb)
+{
+	return 0;
+}
+
+static inline int iommu_group_id(struct iommu_group *group)
+{
+	return -ENODEV;
+}
 #endif /* CONFIG_IOMMU_API */
 
 #endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/ion.h b/include/linux/ion.h
index b895cbd..67b5e6c 100644
--- a/include/linux/ion.h
+++ b/include/linux/ion.h
@@ -2,7 +2,7 @@
  * include/linux/ion.h
  *
  * Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -33,14 +33,12 @@
  * @ION_HEAP_TYPE_CP:	 memory allocated from a prereserved
  *				carveout heap, allocations are physically
  *				contiguous. Used for content protection.
- * @ION_HEAP_TYPE_DMA:          memory allocated via DMA API
  * @ION_HEAP_END:		helper for iterating over heaps
  */
 enum ion_heap_type {
 	ION_HEAP_TYPE_SYSTEM,
 	ION_HEAP_TYPE_SYSTEM_CONTIG,
 	ION_HEAP_TYPE_CARVEOUT,
-	ION_HEAP_TYPE_DMA,
 	ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
 				 are at the end of this enum */
 	ION_NUM_HEAPS,
@@ -49,7 +47,6 @@
 #define ION_HEAP_SYSTEM_MASK		(1 << ION_HEAP_TYPE_SYSTEM)
 #define ION_HEAP_SYSTEM_CONTIG_MASK	(1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
 #define ION_HEAP_CARVEOUT_MASK		(1 << ION_HEAP_TYPE_CARVEOUT)
-#define ION_HEAP_TYPE_DMA_MASK         (1 << ION_HEAP_TYPE_DMA)
 
 /**
  * heap flags - the lower 16 bits are used by core ion, the upper 16
@@ -59,6 +56,9 @@
 					   cached, ion will do cache
 					   maintenance when the buffer is
 					   mapped for dma */
+#define ION_FLAG_CACHED_NEEDS_SYNC 2	/* mappings of this buffer will created
+					   at mmap time, if this is set
+					   caches must be managed manually */
 
 #ifdef __KERNEL__
 #include <linux/err.h>
@@ -74,7 +74,6 @@
    be converted to phys_addr_t.  For the time being many kernel interfaces
    do not accept phys_addr_t's that would have to */
 #define ion_phys_addr_t unsigned long
-#define ion_virt_addr_t unsigned long
 
 /**
  * struct ion_platform_heap - defines a heap in the given platform
@@ -227,11 +226,9 @@
  * ion_map_kernel - create mapping for the given handle
  * @client:	the client
  * @handle:	handle to map
- * @flags:	flags for this mapping
  *
  * Map the given handle into the kernel and return a kernel address that
- * can be used to access this address. If no flags are specified, this
- * will return a non-secure uncached mapping.
+ * can be used to access this address.
  */
 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
 
@@ -378,8 +375,6 @@
 int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
 			void *vaddr, unsigned long len, unsigned int cmd);
 
-int ion_iommu_heap_dump_size(void);
-
 #else
 static inline void ion_reserve(struct ion_platform_data *data)
 {
@@ -425,7 +420,7 @@
 }
 
 static inline void *ion_map_kernel(struct ion_client *client,
-	struct ion_handle *handle, unsigned long flags)
+	struct ion_handle *handle)
 {
 	return ERR_PTR(-ENODEV);
 }
@@ -460,6 +455,12 @@
 	return -ENODEV;
 }
 
+static inline int ion_handle_get_size(struct ion_client *client,
+				struct ion_handle *handle, unsigned long *size)
+{
+	return -ENODEV;
+}
+
 static inline void ion_unmap_iommu(struct ion_client *client,
 			struct ion_handle *handle, int domain_num,
 			int partition_num)
@@ -480,6 +481,10 @@
 	return -ENODEV;
 }
 
+static inline void ion_mark_dangling_buffers_locked(struct ion_device *dev)
+{
+}
+
 static inline int msm_ion_do_cache_op(struct ion_client *client,
 			struct ion_handle *handle, void *vaddr,
 			unsigned long len, unsigned int cmd)
@@ -601,6 +606,16 @@
 #define ION_IOC_IMPORT		_IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
 
 /**
+ * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
+ *
+ * Deprecated in favor of using the dma_buf api's correctly (syncing
+ * will happend automatically when the buffer is mapped to a device).
+ * If necessary should be used after touching a cached buffer from the cpu,
+ * this will make the buffer in memory coherent.
+ */
+#define ION_IOC_SYNC		_IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
+
+/**
  * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
  *
  * Takes the argument of the architecture specific ioctl to call and
diff --git a/include/linux/msm_ion.h b/include/linux/msm_ion.h
index edf2401..3c3c7a9 100644
--- a/include/linux/msm_ion.h
+++ b/include/linux/msm_ion.h
@@ -1,18 +1,3 @@
-/*
- *
- * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
 #ifndef _LINUX_MSM_ION_H
 #define _LINUX_MSM_ION_H
 
@@ -21,7 +6,9 @@
 enum msm_ion_heap_types {
 	ION_HEAP_TYPE_MSM_START = ION_HEAP_TYPE_CUSTOM + 1,
 	ION_HEAP_TYPE_IOMMU = ION_HEAP_TYPE_MSM_START,
+	ION_HEAP_TYPE_DMA,
 	ION_HEAP_TYPE_CP,
+	ION_HEAP_TYPE_SECURE_DMA,
 };
 
 /**
@@ -39,16 +26,19 @@
 	ION_CP_MFC_HEAP_ID = 12,
 	ION_CP_WB_HEAP_ID = 16, /* 8660 only */
 	ION_CAMERA_HEAP_ID = 20, /* 8660 only */
+	ION_SYSTEM_CONTIG_HEAP_ID = 21,
 	ION_ADSP_HEAP_ID = 22,
+	ION_PIL1_HEAP_ID = 23, /* Currently used for other PIL images */
 	ION_SF_HEAP_ID = 24,
 	ION_IOMMU_HEAP_ID = 25,
+	ION_PIL2_HEAP_ID = 26, /* Currently used for modem firmware images */
 	ION_QSECOM_HEAP_ID = 27,
 	ION_AUDIO_HEAP_ID = 28,
 
 	ION_MM_FIRMWARE_HEAP_ID = 29,
 	ION_SYSTEM_HEAP_ID = 30,
 
-	ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_SECURE flag */
+	ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_FLAG_SECURE flag */
 };
 
 enum ion_fixed_position {
@@ -67,18 +57,25 @@
 };
 
 #define ION_HEAP_CP_MASK		(1 << ION_HEAP_TYPE_CP)
+#define ION_HEAP_TYPE_DMA_MASK         (1 << ION_HEAP_TYPE_DMA)
 
 /**
  * Flag to use when allocating to indicate that a heap is secure.
  */
-#define ION_SECURE (1 << ION_HEAP_ID_RESERVED)
+#define ION_FLAG_SECURE (1 << ION_HEAP_ID_RESERVED)
 
 /**
  * Flag for clients to force contiguous memort allocation
  *
  * Use of this flag is carefully monitored!
  */
-#define ION_FORCE_CONTIGUOUS (1 << 30)
+#define ION_FLAG_FORCE_CONTIGUOUS (1 << 30)
+
+/**
+* Deprecated! Please use the corresponding ION_FLAG_*
+*/
+#define ION_SECURE ION_FLAG_SECURE
+#define ION_FORCE_CONTIGUOUS ION_FLAG_FORCE_CONTIGUOUS
 
 /**
  * Macro should be used with ion_heap_ids defined above.
@@ -87,6 +84,7 @@
 
 #define ION_ADSP_HEAP_NAME	"adsp"
 #define ION_VMALLOC_HEAP_NAME	"vmalloc"
+#define ION_KMALLOC_HEAP_NAME	"kmalloc"
 #define ION_AUDIO_HEAP_NAME	"audio"
 #define ION_SF_HEAP_NAME	"sf"
 #define ION_MM_HEAP_NAME	"mm"
@@ -95,6 +93,8 @@
 #define ION_MFC_HEAP_NAME	"mfc"
 #define ION_WB_HEAP_NAME	"wb"
 #define ION_MM_FIRMWARE_HEAP_NAME	"mm_fw"
+#define ION_PIL1_HEAP_NAME  "pil_1"
+#define ION_PIL2_HEAP_NAME  "pil_2"
 #define ION_QSECOM_HEAP_NAME	"qsecom"
 #define ION_FMEM_HEAP_NAME	"fmem"
 
@@ -111,6 +111,12 @@
  */
 #define ION_IOMMU_UNMAP_DELAYED 1
 
+/*
+ * This flag allows clients to defer unsecuring a buffer until the buffer
+ * is actually freed.
+ */
+#define ION_UNSECURE_DELAYED	1
+
 /**
  * struct ion_cp_heap_pdata - defines a content protection heap in the given
  * platform
@@ -136,7 +142,9 @@
  *			goes from 1 -> 0
  * @setup_region:	function to be called upon ion registration
  * @memory_type:Memory type used for the heap
- * @no_nonsecure_alloc: don't allow non-secure allocations from this heap
+ * @allow_nonsecure_alloc: allow non-secure allocations from this heap. For
+ *			secure heaps, this flag must be set so allow non-secure
+ *			allocations. For non-secure heaps, this flag is ignored.
  *
  */
 struct ion_cp_heap_pdata {
@@ -150,12 +158,12 @@
 	enum ion_fixed_position fixed_position;
 	int iommu_map_all;
 	int iommu_2x_map_domain;
-	ion_virt_addr_t *virt_addr;
+	void *virt_addr;
 	int (*request_region)(void *);
 	int (*release_region)(void *);
 	void *(*setup_region)(void);
 	enum ion_memory_types memory_type;
-	int no_nonsecure_alloc;
+	int allow_nonsecure_alloc;
 };
 
 /**
@@ -228,6 +236,26 @@
  * Returns 0 on success
  */
 int msm_ion_unsecure_heap_2_0(int heap_id, enum cp_mem_usage usage);
+
+/**
+ * msm_ion_secure_buffer - secure an individual buffer
+ *
+ * @client - client who has access to the buffer
+ * @handle - buffer to secure
+ * @usage - usage hint to TZ
+ * @flags - flags for the securing
+ */
+int msm_ion_secure_buffer(struct ion_client *client, struct ion_handle *handle,
+				enum cp_mem_usage usage, int flags);
+
+/**
+ * msm_ion_unsecure_buffer - unsecure an individual buffer
+ *
+ * @client - client who has access to the buffer
+ * @handle - buffer to secure
+ */
+int msm_ion_unsecure_buffer(struct ion_client *client,
+				struct ion_handle *handle);
 #else
 static inline int msm_ion_secure_heap(int heap_id)
 {
@@ -250,6 +278,20 @@
 {
 	return -ENODEV;
 }
+
+static inline int msm_ion_secure_buffer(struct ion_client *client,
+					struct ion_handle *handle,
+					enum cp_mem_usage usage,
+					int flags)
+{
+	return -ENODEV;
+}
+
+static inline int msm_ion_unsecure_buffer(struct ion_client *client,
+					struct ion_handle *handle)
+{
+	return -ENODEV;
+}
 #endif /* CONFIG_ION */
 
 #endif /* __KERNEL */
@@ -274,19 +316,6 @@
 	unsigned int length;
 };
 
-/* struct ion_flag_data - information about flags for this buffer
- *
- * @handle:	handle to get flags from
- * @flags:	flags of this handle
- *
- * Takes handle as an input and outputs the flags from the handle
- * in the flag field.
- */
-struct ion_flag_data {
-	struct ion_handle *handle;
-	unsigned long flags;
-};
-
 #define ION_IOC_MSM_MAGIC 'M'
 
 /**
@@ -311,13 +340,4 @@
 #define ION_IOC_CLEAN_INV_CACHES	_IOWR(ION_IOC_MSM_MAGIC, 2, \
 						struct ion_flush_data)
 
-/**
- * DOC: ION_IOC_GET_FLAGS - get the flags of the handle
- *
- * Gets the flags of the current handle which indicate cachability,
- * secure state etc.
- */
-#define ION_IOC_GET_FLAGS		_IOWR(ION_IOC_MSM_MAGIC, 3, \
-						struct ion_flag_data)
-
 #endif
diff --git a/include/linux/msm_kgsl.h b/include/linux/msm_kgsl.h
index 440bb92..2ad040e 100644
--- a/include/linux/msm_kgsl.h
+++ b/include/linux/msm_kgsl.h
@@ -1,6 +1,13 @@
 #ifndef _MSM_KGSL_H
 #define _MSM_KGSL_H
 
+/*
+ * The KGSL version has proven not to be very useful in userspace if features
+ * are cherry picked into other trees out of order so it is frozen as of 3.14.
+ * It is left here for backwards compatabilty and as a reminder that
+ * software releases are never linear. Also, I like pie.
+ */
+
 #define KGSL_VERSION_MAJOR        3
 #define KGSL_VERSION_MINOR        14
 
@@ -14,17 +21,37 @@
 #define KGSL_CONTEXT_PER_CONTEXT_TS	0x00000040
 #define KGSL_CONTEXT_USER_GENERATED_TS	0x00000080
 #define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200
+/* bits [12:15] are reserved for future use */
+#define KGSL_CONTEXT_TYPE_MASK          0x01F00000
+#define KGSL_CONTEXT_TYPE_SHIFT         20
 
+#define KGSL_CONTEXT_TYPE_ANY		0
+#define KGSL_CONTEXT_TYPE_GL		1
+#define KGSL_CONTEXT_TYPE_CL		2
+#define KGSL_CONTEXT_TYPE_C2D		3
+#define KGSL_CONTEXT_TYPE_RS		4
 
 #define KGSL_CONTEXT_INVALID 0xffffffff
 
-/* Memory allocayion flags */
-#define KGSL_MEMFLAGS_GPUREADONLY	0x01000000
+/* --- Memory allocation flags --- */
 
+/* General allocation hints */
+#define KGSL_MEMFLAGS_GPUREADONLY 0x01000000
+#define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000
+
+/* Memory caching hints */
+#define KGSL_CACHEMODE_MASK 0x0C000000
+#define KGSL_CACHEMODE_SHIFT 26
+
+#define KGSL_CACHEMODE_WRITECOMBINE 0
+#define KGSL_CACHEMODE_UNCACHED 1
+#define KGSL_CACHEMODE_WRITETHROUGH 2
+#define KGSL_CACHEMODE_WRITEBACK 3
+
+/* Memory types for which allocations are made */
 #define KGSL_MEMTYPE_MASK		0x0000FF00
 #define KGSL_MEMTYPE_SHIFT		8
 
-/* Memory types for which allocations are made */
 #define KGSL_MEMTYPE_OBJECTANY			0
 #define KGSL_MEMTYPE_FRAMEBUFFER		1
 #define KGSL_MEMTYPE_RENDERBUFFER		2
@@ -55,7 +82,8 @@
 #define KGSL_MEMALIGN_MASK		0x00FF0000
 #define KGSL_MEMALIGN_SHIFT		16
 
-/* generic flag values */
+/* --- generic KGSL flag values --- */
+
 #define KGSL_FLAGS_NORMALMODE  0x00000000
 #define KGSL_FLAGS_SAFEMODE    0x00000001
 #define KGSL_FLAGS_INITIALIZED0 0x00000002
@@ -165,31 +193,6 @@
 	KGSL_PROP_VERSION         = 0x00000008,
 	KGSL_PROP_GPU_RESET_STAT  = 0x00000009,
 	KGSL_PROP_PWRCTRL         = 0x0000000E,
-	KGSL_PROP_FAULT_TOLERANCE = 0x00000011,
-};
-
-/* Fault Tolerance policy flags */
-#define  KGSL_FT_DISABLE                  0x00000001
-#define  KGSL_FT_REPLAY                   0x00000002
-#define  KGSL_FT_SKIPIB                   0x00000004
-#define  KGSL_FT_SKIPFRAME                0x00000008
-#define  KGSL_FT_DEFAULT_POLICY           (KGSL_FT_REPLAY + KGSL_FT_SKIPIB)
-
-/* Pagefault policy flags */
-#define KGSL_FT_PAGEFAULT_INT_ENABLE         0x00000001
-#define KGSL_FT_PAGEFAULT_GPUHALT_ENABLE     0x00000002
-#define KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE   0x00000004
-#define KGSL_FT_PAGEFAULT_LOG_ONE_PER_INT    0x00000008
-#define KGSL_FT_PAGEFAULT_DEFAULT_POLICY     (KGSL_FT_PAGEFAULT_INT_ENABLE + \
-					KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE)
-
-/* Fault tolerance config */
-struct kgsl_ft_config {
-	unsigned int ft_policy;    /* Fault Tolerance policy flags */
-	unsigned int ft_pf_policy; /* Pagefault policy flags */
-	unsigned int ft_pm_dump;   /* KGSL enable postmortem dump */
-	unsigned int ft_detect_ms;
-	unsigned int ft_dos_timeout_ms;
 };
 
 struct kgsl_shadowprop {
@@ -205,6 +208,26 @@
 	unsigned int dev_minor;
 };
 
+/* Performance counter groups */
+
+#define KGSL_PERFCOUNTER_GROUP_CP 0x0
+#define KGSL_PERFCOUNTER_GROUP_RBBM 0x1
+#define KGSL_PERFCOUNTER_GROUP_PC 0x2
+#define KGSL_PERFCOUNTER_GROUP_VFD 0x3
+#define KGSL_PERFCOUNTER_GROUP_HLSQ 0x4
+#define KGSL_PERFCOUNTER_GROUP_VPC 0x5
+#define KGSL_PERFCOUNTER_GROUP_TSE 0x6
+#define KGSL_PERFCOUNTER_GROUP_RAS 0x7
+#define KGSL_PERFCOUNTER_GROUP_UCHE 0x8
+#define KGSL_PERFCOUNTER_GROUP_TP 0x9
+#define KGSL_PERFCOUNTER_GROUP_SP 0xA
+#define KGSL_PERFCOUNTER_GROUP_RB 0xB
+#define KGSL_PERFCOUNTER_GROUP_PWR 0xC
+#define KGSL_PERFCOUNTER_GROUP_VBIF 0xD
+#define KGSL_PERFCOUNTER_GROUP_VBIF_PWR 0xE
+
+#define KGSL_PERFCOUNTER_NOT_USED 0xFFFFFFFF
+
 /* structure holds list of ibs */
 struct kgsl_ibdesc {
 	unsigned int gpuaddr;
@@ -446,6 +469,14 @@
 #define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \
 	_IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc)
 
+/*
+ * This is being deprecated in favor of IOCTL_KGSL_GPUMEM_CACHE_SYNC which
+ * supports both directions (flush and invalidate). This code will still
+ * work, but by definition it will do a flush of the cache which might not be
+ * what you want to have happen on a buffer following a GPU operation.  It is
+ * safer to go with IOCTL_KGSL_GPUMEM_CACHE_SYNC
+ */
+
 #define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \
 	_IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free)
 
@@ -538,7 +569,218 @@
 #define IOCTL_KGSL_TIMESTAMP_EVENT \
 	_IOWR(KGSL_IOC_TYPE, 0x33, struct kgsl_timestamp_event)
 
-unsigned int kgsl_get_alloc_size(int detailed);
+/**
+ * struct kgsl_gpumem_alloc_id - argument to IOCTL_KGSL_GPUMEM_ALLOC_ID
+ * @id: returned id value for this allocation.
+ * @flags: mask of KGSL_MEM* values requested and actual flags on return.
+ * @size: requested size of the allocation and actual size on return.
+ * @mmapsize: returned size to pass to mmap() which may be larger than 'size'
+ * @gpuaddr: returned GPU address for the allocation
+ *
+ * Allocate memory for access by the GPU. The flags and size fields are echoed
+ * back by the kernel, so that the caller can know if the request was
+ * adjusted.
+ *
+ * Supported flags:
+ * KGSL_MEMFLAGS_GPUREADONLY: the GPU will be unable to write to the buffer
+ * KGSL_MEMTYPE*: usage hint for debugging aid
+ * KGSL_MEMALIGN*: alignment hint, may be ignored or adjusted by the kernel.
+ * KGSL_MEMFLAGS_USE_CPU_MAP: If set on call and return, the returned GPU
+ * address will be 0. Calling mmap() will set the GPU address.
+ */
+struct kgsl_gpumem_alloc_id {
+	unsigned int id;
+	unsigned int flags;
+	unsigned int size;
+	unsigned int mmapsize;
+	unsigned long gpuaddr;
+/* private: reserved for future use*/
+	unsigned int __pad[2];
+};
+
+#define IOCTL_KGSL_GPUMEM_ALLOC_ID \
+	_IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id)
+
+/**
+ * struct kgsl_gpumem_free_id - argument to IOCTL_KGSL_GPUMEM_FREE_ID
+ * @id: GPU allocation id to free
+ *
+ * Free an allocation by id, in case a GPU address has not been assigned or
+ * is unknown. Freeing an allocation by id with this ioctl or by GPU address
+ * with IOCTL_KGSL_SHAREDMEM_FREE are equivalent.
+ */
+struct kgsl_gpumem_free_id {
+	unsigned int id;
+/* private: reserved for future use*/
+	unsigned int __pad;
+};
+
+#define IOCTL_KGSL_GPUMEM_FREE_ID \
+	_IOWR(KGSL_IOC_TYPE, 0x35, struct kgsl_gpumem_free_id)
+
+/**
+ * struct kgsl_gpumem_get_info - argument to IOCTL_KGSL_GPUMEM_GET_INFO
+ * @gpuaddr: GPU address to query. Also set on return.
+ * @id: GPU allocation id to query. Also set on return.
+ * @flags: returned mask of KGSL_MEM* values.
+ * @size: returned size of the allocation.
+ * @mmapsize: returned size to pass mmap(), which may be larger than 'size'
+ * @useraddr: returned address of the userspace mapping for this buffer
+ *
+ * This ioctl allows querying of all user visible attributes of an existing
+ * allocation, by either the GPU address or the id returned by a previous
+ * call to IOCTL_KGSL_GPUMEM_ALLOC_ID. Legacy allocation ioctls may not
+ * return all attributes so this ioctl can be used to look them up if needed.
+ *
+ */
+struct kgsl_gpumem_get_info {
+	unsigned long gpuaddr;
+	unsigned int id;
+	unsigned int flags;
+	unsigned int size;
+	unsigned int mmapsize;
+	unsigned long useraddr;
+/* private: reserved for future use*/
+	unsigned int __pad[4];
+};
+
+#define IOCTL_KGSL_GPUMEM_GET_INFO\
+	_IOWR(KGSL_IOC_TYPE, 0x36, struct kgsl_gpumem_get_info)
+
+/**
+ * struct kgsl_gpumem_sync_cache - argument to IOCTL_KGSL_GPUMEM_SYNC_CACHE
+ * @gpuaddr: GPU address of the buffer to sync.
+ * @id: id of the buffer to sync. Either gpuaddr or id is sufficient.
+ * @op: a mask of KGSL_GPUMEM_CACHE_* values
+ *
+ * Sync the L2 cache for memory headed to and from the GPU - this replaces
+ * KGSL_SHAREDMEM_FLUSH_CACHE since it can handle cache management for both
+ * directions
+ *
+ */
+struct kgsl_gpumem_sync_cache {
+	unsigned int gpuaddr;
+	unsigned int id;
+	unsigned int op;
+/* private: reserved for future use*/
+	unsigned int __pad[2]; /* For future binary compatibility */
+};
+
+#define KGSL_GPUMEM_CACHE_CLEAN (1 << 0)
+#define KGSL_GPUMEM_CACHE_TO_GPU KGSL_GPUMEM_CACHE_CLEAN
+
+#define KGSL_GPUMEM_CACHE_INV (1 << 1)
+#define KGSL_GPUMEM_CACHE_FROM_GPU KGSL_GPUMEM_CACHE_INV
+
+#define KGSL_GPUMEM_CACHE_FLUSH \
+	(KGSL_GPUMEM_CACHE_CLEAN | KGSL_GPUMEM_CACHE_INV)
+
+#define IOCTL_KGSL_GPUMEM_SYNC_CACHE \
+	_IOW(KGSL_IOC_TYPE, 0x37, struct kgsl_gpumem_sync_cache)
+
+/**
+ * struct kgsl_perfcounter_get - argument to IOCTL_KGSL_PERFCOUNTER_GET
+ * @groupid: Performance counter group ID
+ * @countable: Countable to select within the group
+ * @offset: Return offset of the reserved counter
+ *
+ * Get an available performance counter from a specified groupid.  The offset
+ * of the performance counter will be returned after successfully assigning
+ * the countable to the counter for the specified group.  An error will be
+ * returned and an offset of 0 if the groupid is invalid or there are no
+ * more counters left.  After successfully getting a perfcounter, the user
+ * must call kgsl_perfcounter_put(groupid, contable) when finished with
+ * the perfcounter to clear up perfcounter resources.
+ *
+ */
+struct kgsl_perfcounter_get {
+	unsigned int groupid;
+	unsigned int countable;
+	unsigned int offset;
+/* private: reserved for future use */
+	unsigned int __pad[2]; /* For future binary compatibility */
+};
+
+#define IOCTL_KGSL_PERFCOUNTER_GET \
+	_IOWR(KGSL_IOC_TYPE, 0x38, struct kgsl_perfcounter_get)
+
+/**
+ * struct kgsl_perfcounter_put - argument to IOCTL_KGSL_PERFCOUNTER_PUT
+ * @groupid: Performance counter group ID
+ * @countable: Countable to release within the group
+ *
+ * Put an allocated performance counter to allow others to have access to the
+ * resource that was previously taken.  This is only to be called after
+ * successfully getting a performance counter from kgsl_perfcounter_get().
+ *
+ */
+struct kgsl_perfcounter_put {
+	unsigned int groupid;
+	unsigned int countable;
+/* private: reserved for future use */
+	unsigned int __pad[2]; /* For future binary compatibility */
+};
+
+#define IOCTL_KGSL_PERFCOUNTER_PUT \
+	_IOW(KGSL_IOC_TYPE, 0x39, struct kgsl_perfcounter_put)
+
+/**
+ * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
+ * @groupid: Performance counter group ID
+ * @countable: Return active countables array
+ * @size: Size of active countables array
+ * @max_counters: Return total number counters for the group ID
+ *
+ * Query the available performance counters given a groupid.  The array
+ * *countables is used to return the current active countables in counters.
+ * The size of the array is passed in so the kernel will only write at most
+ * size or counter->size for the group id.  The total number of available
+ * counters for the group ID is returned in max_counters.
+ * If the array or size passed in are invalid, then only the maximum number
+ * of counters will be returned, no data will be written to *countables.
+ * If the groupid is invalid an error code will be returned.
+ *
+ */
+struct kgsl_perfcounter_query {
+	unsigned int groupid;
+	/* Array to return the current countable for up to size counters */
+	unsigned int *countables;
+	unsigned int count;
+	unsigned int max_counters;
+/* private: reserved for future use */
+	unsigned int __pad[2]; /* For future binary compatibility */
+};
+
+#define IOCTL_KGSL_PERFCOUNTER_QUERY \
+	_IOWR(KGSL_IOC_TYPE, 0x3A, struct kgsl_perfcounter_query)
+
+/**
+ * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
+ * @groupid: Performance counter group IDs
+ * @countable: Performance counter countable IDs
+ * @value: Return performance counter reads
+ * @size: Size of all arrays (groupid/countable pair and return value)
+ *
+ * Read in the current value of a performance counter given by the groupid
+ * and countable.
+ *
+ */
+
+struct kgsl_perfcounter_read_group {
+	unsigned int groupid;
+	unsigned int countable;
+	uint64_t value;
+};
+
+struct kgsl_perfcounter_read {
+	struct kgsl_perfcounter_read_group *reads;
+	unsigned int count;
+/* private: reserved for future use */
+	unsigned int __pad[2]; /* For future binary compatibility */
+};
+
+#define IOCTL_KGSL_PERFCOUNTER_READ \
+	_IOWR(KGSL_IOC_TYPE, 0x3B, struct kgsl_perfcounter_read)
 
 #ifdef __KERNEL__
 #ifdef CONFIG_MSM_KGSL_DRM
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index de044f1..7e1a709 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -71,10 +71,10 @@
 #define MSMFB_OVERLAY_VSYNC_CTRL _IOW(MSMFB_IOCTL_MAGIC, 160, unsigned int)
 #define MSMFB_VSYNC_CTRL  _IOW(MSMFB_IOCTL_MAGIC, 161, unsigned int)
 #define MSMFB_BUFFER_SYNC  _IOW(MSMFB_IOCTL_MAGIC, 162, struct mdp_buf_sync)
+#define MSMFB_OVERLAY_COMMIT      _IO(MSMFB_IOCTL_MAGIC, 163)
 #define MSMFB_DISPLAY_COMMIT      _IOW(MSMFB_IOCTL_MAGIC, 164, \
 						struct mdp_display_commit)
-#define MSMFB_WRITEBACK_SET_MIRRORING_HINT _IOW(MSMFB_IOCTL_MAGIC, 165, \
-						unsigned int)
+#define MSMFB_METADATA_SET  _IOW(MSMFB_IOCTL_MAGIC, 165, struct msmfb_metadata)
 #define MSMFB_METADATA_GET  _IOW(MSMFB_IOCTL_MAGIC, 166, struct msmfb_metadata)
 
 #define FB_TYPE_3D_PANEL 0x10101010
@@ -95,6 +95,7 @@
 	MDP_RGB_888,      /* RGB 888 planer */
 	MDP_Y_CRCB_H2V2,  /* Y and CrCb, pseudo planer w/ Cr is in MSB */
 	MDP_YCRYCB_H2V1,  /* YCrYCb interleave */
+	MDP_CBYCRY_H2V1,  /* CbYCrY interleave */
 	MDP_Y_CRCB_H2V1,  /* Y and CrCb, pseduo planer w/ Cr is in MSB */
 	MDP_Y_CBCR_H2V1,   /* Y and CrCb, pseduo planer w/ Cr is in MSB */
 	MDP_Y_CRCB_H1V2,
@@ -112,6 +113,9 @@
 	MDP_YCRCB_H1V1,   /* YCrCb interleave */
 	MDP_YCBCR_H1V1,   /* YCbCr interleave */
 	MDP_BGR_565,      /* BGR 565 planer */
+	MDP_BGR_888,      /* BGR 888 */
+	MDP_Y_CBCR_H2V2_VENUS,
+	MDP_BGRX_8888,   /* BGRX 8888 */
 	MDP_IMGTYPE_LIMIT,
 	MDP_RGB_BORDERFILL,	/* border fill pipe */
 	MDP_FB_FORMAT = MDP_IMGTYPE2_START,    /* framebuffer format */
@@ -144,6 +148,7 @@
 #define MDP_DITHER 0x8
 #define MDP_BLUR 0x10
 #define MDP_BLEND_FG_PREMULT 0x20000
+#define MDP_IS_FG 0x40000
 #define MDP_DEINTERLACE 0x80000000
 #define MDP_SHARPENING  0x40000000
 #define MDP_NO_DMA_BARRIER_START	0x20000000
@@ -163,8 +168,9 @@
 #define MDP_BACKEND_COMPOSITION		0x00040000
 #define MDP_BORDERFILL_SUPPORTED	0x00010000
 #define MDP_SECURE_OVERLAY_SESSION      0x00008000
+#define MDP_OV_PIPE_FORCE_DMA		0x00004000
 #define MDP_MEMORY_ID_TYPE_FB		0x00001000
-
+#define MDP_BWC_EN			0x00000400
 #define MDP_TRANSP_NOP 0xffffffff
 #define MDP_ALPHA_NOP 0xff
 
@@ -276,8 +282,24 @@
 	struct msmfb_img img;
 };
 
+#define MDP_PP_OPS_ENABLE 0x1
 #define MDP_PP_OPS_READ 0x2
 #define MDP_PP_OPS_WRITE 0x4
+#define MDP_PP_OPS_DISABLE 0x8
+#define MDP_PP_IGC_FLAG_ROM0	0x10
+#define MDP_PP_IGC_FLAG_ROM1	0x20
+
+#define MDSS_PP_DSPP_CFG	0x0000
+#define MDSS_PP_SSPP_CFG	0x4000
+#define MDSS_PP_LM_CFG	0x8000
+#define MDSS_PP_WB_CFG	0xC000
+
+#define MDSS_PP_LOCATION_MASK	0xC000
+#define MDSS_PP_LOGICAL_MASK	0x3FFF
+
+#define PP_LOCAT(var) ((var) & MDSS_PP_LOCATION_MASK)
+#define PP_BLOCK(var) ((var) & MDSS_PP_LOGICAL_MASK)
+
 
 struct mdp_qseed_cfg {
 	uint32_t table_num;
@@ -286,13 +308,24 @@
 	uint32_t *data;
 };
 
+struct mdp_sharp_cfg {
+	uint32_t flags;
+	uint32_t strength;
+	uint32_t edge_thr;
+	uint32_t smooth_thr;
+	uint32_t noise_thr;
+};
+
 struct mdp_qseed_cfg_data {
 	uint32_t block;
 	struct mdp_qseed_cfg qseed_data;
 };
 
-#define MDP_OVERLAY_PP_CSC_CFG      0x1
-#define MDP_OVERLAY_PP_QSEED_CFG    0x2
+#define MDP_OVERLAY_PP_CSC_CFG         0x1
+#define MDP_OVERLAY_PP_QSEED_CFG       0x2
+#define MDP_OVERLAY_PP_PA_CFG          0x4
+#define MDP_OVERLAY_PP_IGC_CFG         0x8
+#define MDP_OVERLAY_PP_SHARP_CFG       0x10
 
 #define MDP_CSC_FLAG_ENABLE	0x1
 #define MDP_CSC_FLAG_YUV_IN	0x2
@@ -313,10 +346,28 @@
 	struct mdp_csc_cfg csc_data;
 };
 
+struct mdp_pa_cfg {
+	uint32_t flags;
+	uint32_t hue_adj;
+	uint32_t sat_adj;
+	uint32_t val_adj;
+	uint32_t cont_adj;
+};
+
+struct mdp_igc_lut_data {
+	uint32_t block;
+	uint32_t len, ops;
+	uint32_t *c0_c1_data;
+	uint32_t *c2_data;
+};
+
 struct mdp_overlay_pp_params {
 	uint32_t config_ops;
 	struct mdp_csc_cfg csc_cfg;
 	struct mdp_qseed_cfg qseed_cfg[2];
+	struct mdp_pa_cfg pa_cfg;
+	struct mdp_igc_lut_data igc_cfg;
+	struct mdp_sharp_cfg sharp_cfg;
 };
 
 struct mdp_overlay {
@@ -397,7 +448,7 @@
 	uint32_t block;
 	uint8_t frame_cnt;
 	uint8_t bit_mask;
-	uint8_t num_bins;
+	uint16_t num_bins;
 };
 
 /*
@@ -407,7 +458,7 @@
 
 struct mdp_histogram_data {
 	uint32_t block;
-	uint8_t bin_cnt;
+	uint32_t bin_cnt;
 	uint32_t *c0;
 	uint32_t *c1;
 	uint32_t *c2;
@@ -424,6 +475,8 @@
 	struct mdp_pcc_coeff r, g, b;
 };
 
+#define MDP_GAMUT_TABLE_NUM		8
+
 enum {
 	mdp_lut_igc,
 	mdp_lut_pgc,
@@ -431,13 +484,6 @@
 	mdp_lut_max,
 };
 
-struct mdp_igc_lut_data {
-	uint32_t block;
-	uint32_t len, ops;
-	uint32_t *c0_c1_data;
-	uint32_t *c2_data;
-};
-
 struct mdp_ar_gc_lut_data {
 	uint32_t x_start;
 	uint32_t slope;
@@ -477,15 +523,57 @@
 	uint32_t scale;
 };
 
+struct mdp_pa_cfg_data {
+	uint32_t block;
+	struct mdp_pa_cfg pa_data;
+};
+
+struct mdp_dither_cfg_data {
+	uint32_t block;
+	uint32_t flags;
+	uint32_t g_y_depth;
+	uint32_t r_cr_depth;
+	uint32_t b_cb_depth;
+};
+
+struct mdp_gamut_cfg_data {
+	uint32_t block;
+	uint32_t flags;
+	uint32_t gamut_first;
+	uint32_t tbl_size[MDP_GAMUT_TABLE_NUM];
+	uint16_t *r_tbl[MDP_GAMUT_TABLE_NUM];
+	uint16_t *g_tbl[MDP_GAMUT_TABLE_NUM];
+	uint16_t *b_tbl[MDP_GAMUT_TABLE_NUM];
+};
+
+struct mdp_calib_config_data {
+	uint32_t ops;
+	uint32_t addr;
+	uint32_t data;
+};
+
 enum {
 	mdp_op_pcc_cfg,
 	mdp_op_csc_cfg,
 	mdp_op_lut_cfg,
 	mdp_op_qseed_cfg,
 	mdp_bl_scale_cfg,
+	mdp_op_pa_cfg,
+	mdp_op_dither_cfg,
+	mdp_op_gamut_cfg,
+	mdp_op_calib_cfg,
 	mdp_op_max,
 };
 
+enum {
+	WB_FORMAT_NV12,
+	WB_FORMAT_RGB_565,
+	WB_FORMAT_RGB_888,
+	WB_FORMAT_xRGB_8888,
+	WB_FORMAT_ARGB_8888,
+	WB_FORMAT_ARGB_8888_INPUT_ALPHA /* Need to support */
+};
+
 struct msmfb_mdp_pp {
 	uint32_t op;
 	union {
@@ -494,13 +582,21 @@
 		struct mdp_lut_cfg_data lut_cfg_data;
 		struct mdp_qseed_cfg_data qseed_cfg_data;
 		struct mdp_bl_scale_data bl_scale_data;
+		struct mdp_pa_cfg_data pa_cfg_data;
+		struct mdp_dither_cfg_data dither_cfg_data;
+		struct mdp_gamut_cfg_data gamut_cfg_data;
+		struct mdp_calib_config_data calib_cfg;
 	} data;
 };
 
+#define FB_METADATA_VIDEO_INFO_CODE_SUPPORT 1
 enum {
 	metadata_op_none,
 	metadata_op_base_blend,
 	metadata_op_frame_rate,
+	metadata_op_vic,
+	metadata_op_wb_format,
+	metadata_op_get_caps,
 	metadata_op_max
 };
 
@@ -508,12 +604,27 @@
 	uint32_t is_premultiplied;
 };
 
+struct mdp_mixer_cfg {
+	uint32_t writeback_format;
+	uint32_t alpha;
+};
+
+struct mdss_hw_caps {
+	uint32_t mdp_rev;
+	uint8_t rgb_pipes;
+	uint8_t vig_pipes;
+	uint8_t dma_pipes;
+};
+
 struct msmfb_metadata {
 	uint32_t op;
 	uint32_t flags;
 	union {
 		struct mdp_blend_cfg blend_cfg;
+		struct mdp_mixer_cfg mixer_cfg;
 		uint32_t panel_frame_rate;
+		uint32_t video_info_code;
+		struct mdss_hw_caps caps;
 	} data;
 };
 
@@ -527,6 +638,7 @@
 	int *rel_fen_fd;
 };
 
+#define MDP_DISPLAY_COMMIT_OVERLAY	1
 struct mdp_buf_fence {
 	uint32_t flags;
 	uint32_t acq_fen_fd_cnt;
@@ -534,12 +646,12 @@
 	int rel_fen_fd[MDP_MAX_FENCE_FD];
 };
 
-#define MDP_DISPLAY_COMMIT_OVERLAY 0x00000001
 
 struct mdp_display_commit {
 	uint32_t flags;
 	uint32_t wait_for_finish;
 	struct fb_var_screeninfo var;
+	struct mdp_buf_fence buf_fence;
 };
 
 struct mdp_page_protection {
@@ -569,14 +681,12 @@
 };
 
 enum {
-	MDP_WRITEBACK_MIRROR_OFF,
-	MDP_WRITEBACK_MIRROR_ON,
-	MDP_WRITEBACK_MIRROR_PAUSE,
-	MDP_WRITEBACK_MIRROR_RESUME,
+	MDP_IOMMU_DOMAIN_CP,
+	MDP_IOMMU_DOMAIN_NS,
 };
 
 #ifdef __KERNEL__
-
+int msm_fb_get_iommu_domain(struct fb_info *info, int domain);
 /* get the framebuffer physical address information */
 int get_fb_phys_info(unsigned long *start, unsigned long *len, int fb_num,
 	int subsys_id);
@@ -589,6 +699,7 @@
 		struct msmfb_data *data);
 int msm_fb_writeback_stop(struct fb_info *info);
 int msm_fb_writeback_terminate(struct fb_info *info);
+int msm_fb_writeback_set_secure(struct fb_info *info, int enable);
 #endif
 
 #endif /*_MSM_MDP_H_*/
diff --git a/include/linux/msm_vidc_dec.h b/include/linux/msm_vidc_dec.h
index 3c99562..35279bf 100644
--- a/include/linux/msm_vidc_dec.h
+++ b/include/linux/msm_vidc_dec.h
@@ -78,6 +78,7 @@
 
 #define VDEC_EXTRADATA_EXT_DATA          0x0800
 #define VDEC_EXTRADATA_USER_DATA         0x1000
+#define VDEC_EXTRADATA_EXT_BUFFER        0x2000
 
 #define VDEC_CMDBASE	0x800
 #define VDEC_CMD_SET_INTF_VERSION	(VDEC_CMDBASE)
@@ -213,6 +214,12 @@
 #define VDEC_IOCTL_SET_PERF_CLK \
 	_IOR(VDEC_IOCTL_MAGIC, 38, struct vdec_ioctl_msg)
 
+#define VDEC_IOCTL_SET_META_BUFFERS \
+	_IOW(VDEC_IOCTL_MAGIC, 39, struct vdec_ioctl_msg)
+
+#define VDEC_IOCTL_FREE_META_BUFFERS \
+	_IO(VDEC_IOCTL_MAGIC, 40)
+
 enum vdec_picture {
 	PICTURE_TYPE_I,
 	PICTURE_TYPE_P,
@@ -236,6 +243,7 @@
 	size_t buffer_size;
 	uint32_t alignment;
 	uint32_t buf_poolid;
+	size_t meta_buffer_size;
 };
 
 struct vdec_bufferpayload {
@@ -274,7 +282,8 @@
 	VDEC_CODECTYPE_MPEG1 = 0x9,
 	VDEC_CODECTYPE_MPEG2 = 0xa,
 	VDEC_CODECTYPE_VC1 = 0xb,
-	VDEC_CODECTYPE_VC1_RCV = 0xc
+	VDEC_CODECTYPE_VC1_RCV = 0xc,
+	VDEC_CODECTYPE_HEVC = 0xd,
 };
 
 enum vdec_mpeg2_profile {
@@ -526,6 +535,11 @@
 	uint32_t par_height;
 };
 
+struct vdec_sep_metadatainfo {
+	void __user *metabufaddr;
+	uint32_t size;
+};
+
 struct vdec_output_frameinfo {
 	void __user *bufferaddr;
 	size_t offset;
@@ -538,6 +552,7 @@
 	struct vdec_framesize framesize;
 	enum vdec_interlaced_format interlaced_format;
 	struct vdec_aspectratioinfo aspect_ratio_info;
+	struct vdec_sep_metadatainfo metadata_info;
 };
 
 union vdec_msgdata {
@@ -571,4 +586,12 @@
 	int alignment;
 };
 
+struct vdec_meta_buffers {
+	size_t size;
+	int count;
+	int pmem_fd;
+	int pmem_fd_iommu;
+	int offset;
+};
+
 #endif /* end of macro _VDECDECODER_H_ */
diff --git a/include/linux/msm_vidc_enc.h b/include/linux/msm_vidc_enc.h
index 9d714f0..dcc2353 100644
--- a/include/linux/msm_vidc_enc.h
+++ b/include/linux/msm_vidc_enc.h
@@ -44,6 +44,8 @@
 #define VEN_MSG_PAUSE	8
 #define VEN_MSG_RESUME	9
 #define VEN_MSG_STOP_READING_MSG	10
+#define VEN_MSG_LTRUSE_FAILED	    11
+
 
 /*Buffer flags bits masks*/
 #define VEN_BUFFLAG_EOS	0x00000001
@@ -56,6 +58,7 @@
 #define VEN_EXTRADATA_NONE          0x001
 #define VEN_EXTRADATA_QCOMFILLER    0x002
 #define VEN_EXTRADATA_SLICEINFO     0x100
+#define VEN_EXTRADATA_LTRINFO       0x200
 
 /*ENCODER CONFIGURATION CONSTANTS*/
 
@@ -144,8 +147,6 @@
 #define VEN_INPUTFMT_NV12	1/* NV12 Linear */
 #define VEN_INPUTFMT_NV21	2/* NV21 Linear */
 #define VEN_INPUTFMT_NV12_16M2KA	3/* NV12 Linear */
-#define VEN_INPUTFMT_NV21_16M2KA	4
-
 
 /*Different allowed rotation modes.*/
 #define VEN_ROTATION_0	1/* 0 degrees */
@@ -460,9 +461,60 @@
 #define VEN_IOCTL_SET_SLICE_DELIVERY_MODE \
 	_IO(VEN_IOCTLBASE_ENC, 50)
 
+#define VEN_IOCTL_SET_H263_PLUSPTYPE \
+	_IOW(VEN_IOCTLBASE_ENC, 51, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_range, OutputData - NULL.*/
+#define VEN_IOCTL_SET_CAPABILITY_LTRCOUNT \
+	_IOW(VEN_IOCTLBASE_ENC, 52, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_range.*/
+#define VEN_IOCTL_GET_CAPABILITY_LTRCOUNT \
+	_IOR(VEN_IOCTLBASE_ENC, 53, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltrmode, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRMODE \
+	_IOW(VEN_IOCTLBASE_ENC, 54, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltrmode.*/
+#define VEN_IOCTL_GET_LTRMODE \
+	_IOR(VEN_IOCTLBASE_ENC, 55, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltrcount, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRCOUNT \
+	_IOW(VEN_IOCTLBASE_ENC, 56, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltrcount.*/
+#define VEN_IOCTL_GET_LTRCOUNT \
+	_IOR(VEN_IOCTLBASE_ENC, 57, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltrperiod, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRPERIOD \
+	_IOW(VEN_IOCTLBASE_ENC, 58, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltrperiod.*/
+#define VEN_IOCTL_GET_LTRPERIOD \
+	_IOR(VEN_IOCTLBASE_ENC, 59, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltruse, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRUSE \
+	_IOW(VEN_IOCTLBASE_ENC, 60, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltruse.*/
+#define VEN_IOCTL_GET_LTRUSE \
+	_IOR(VEN_IOCTLBASE_ENC, 61, struct venc_ioctl_msg)
+
+/*IOCTL params:SET: InputData - venc_ltrmark, OutputData - NULL.*/
+#define VEN_IOCTL_SET_LTRMARK \
+	_IOW(VEN_IOCTLBASE_ENC, 62, struct venc_ioctl_msg)
+/*IOCTL params:GET: InputData - NULL, OutputData - venc_ltrmark.*/
+#define VEN_IOCTL_GET_LTRMARK \
+	_IOR(VEN_IOCTLBASE_ENC, 63, struct venc_ioctl_msg)
+
 /*IOCTL params:SET: InputData - unsigned int, OutputData - NULL*/
 #define VEN_IOCTL_SET_SPS_PPS_FOR_IDR \
-	_IOW(VEN_IOCTLBASE_ENC, 51, struct venc_ioctl_msg)
+	_IOW(VEN_IOCTLBASE_ENC, 64, struct venc_ioctl_msg)
+
+struct venc_range {
+	unsigned long	max;
+	unsigned long	min;
+	unsigned long	step_size;
+};
 
 struct venc_switch{
 	unsigned char	status;
@@ -525,6 +577,11 @@
 	unsigned long	maxqp;
 	unsigned long	minqp;
 };
+
+struct venc_plusptype {
+	unsigned long	plusptype_enable;
+};
+
 struct venc_intraperiod{
 	unsigned long	num_pframes;
 	unsigned long	num_bframes;
@@ -620,4 +677,21 @@
 	int alignment;
 };
 
+struct venc_ltrmode {
+	unsigned long   ltr_mode;
+};
+
+struct venc_ltrcount {
+	unsigned long   ltr_count;
+};
+
+struct venc_ltrperiod {
+	unsigned long   ltr_period;
+};
+
+struct venc_ltruse {
+	unsigned long   ltr_id;
+	unsigned long   ltr_frames;
+};
+
 #endif /* _MSM_VIDC_ENC_H_ */
diff --git a/include/media/msm/vcd_api.h b/include/media/msm/vcd_api.h
index 7983a01..09e1a53 100644
--- a/include/media/msm/vcd_api.h
+++ b/include/media/msm/vcd_api.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -12,6 +12,7 @@
  */
 #ifndef _VCD_API_H_
 #define _VCD_API_H_
+#include <linux/types.h>
 #include "vcd_property.h"
 #include "vcd_status.h"
 
@@ -100,6 +101,7 @@
 	size_t sz;
 	u32 align;
 	u32 buf_pool_id;
+	size_t meta_buffer_size;
 };
 
 struct vcd_init_config {
diff --git a/include/media/msm/vcd_property.h b/include/media/msm/vcd_property.h
index 36133dd..ce6c479 100644
--- a/include/media/msm/vcd_property.h
+++ b/include/media/msm/vcd_property.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,6 +13,8 @@
 #ifndef _VCD_DRIVER_PROPERTY_H_
 #define _VCD_DRIVER_PROPERTY_H_
 
+#include <linux/types.h>
+
 #define VCD_START_BASE       0x0
 #define VCD_I_LIVE           (VCD_START_BASE + 0x1)
 #define VCD_I_CODEC          (VCD_START_BASE + 0x2)
@@ -57,7 +59,15 @@
 #define VCD_I_SET_TURBO_CLK (VCD_START_BASE + 0x29)
 #define VCD_I_ENABLE_DELIMITER_FLAG (VCD_START_BASE + 0x2A)
 #define VCD_I_ENABLE_VUI_TIMING_INFO (VCD_START_BASE + 0x2B)
-
+#define VCD_I_H263_PLUSPTYPE (VCD_START_BASE + 0x2C)
+#define VCD_I_LTR_MODE (VCD_START_BASE + 0x2D)
+#define VCD_I_LTR_COUNT (VCD_START_BASE + 0x2E)
+#define VCD_I_LTR_PERIOD (VCD_START_BASE + 0x2F)
+#define VCD_I_LTR_USE (VCD_START_BASE + 0x30)
+#define VCD_I_CAPABILITY_LTR_COUNT (VCD_START_BASE + 0x31)
+#define VCD_I_LTR_MARK (VCD_START_BASE + 0x32)
+#define VCD_I_SET_EXT_METABUFFER (VCD_START_BASE + 0x33)
+#define VCD_I_FREE_EXT_METABUFFER (VCD_START_BASE + 0x34)
 
 #define VCD_START_REQ      (VCD_START_BASE + 0x1000)
 #define VCD_I_REQ_IFRAME   (VCD_START_REQ + 0x1)
@@ -115,9 +125,12 @@
 #define VCD_METADATA_VC1            0x040
 #define VCD_METADATA_PASSTHROUGH    0x080
 #define VCD_METADATA_ENC_SLICE      0x100
+#define VCD_METADATA_LTR_INFO       0x200
 
 #define VCD_METADATA_EXT_DATA       0x0800
 #define VCD_METADATA_USER_DATA      0x1000
+#define VCD_METADATA_SEPARATE_BUF   0x2000
+
 
 struct vcd_property_meta_data_enable {
 	u32 meta_data_enable_flag;
@@ -147,8 +160,7 @@
 	VCD_BUFFER_FORMAT_NV12      = 0x1,
 	VCD_BUFFER_FORMAT_TILE_4x2    = 0x2,
 	VCD_BUFFER_FORMAT_NV12_16M2KA = 0x3,
-	VCD_BUFFER_FORMAT_TILE_1x1    = 0x4,
-	VCD_BUFFER_FORMAT_NV21_16M2KA = 0x5
+	VCD_BUFFER_FORMAT_TILE_1x1    = 0x4
 };
 
 struct vcd_property_buffer_format {
@@ -295,6 +307,10 @@
 	u32              min_qp;
 };
 
+struct vcd_property_plusptype {
+	u32              plusptype_enable;
+};
+
 struct vcd_property_session_qp {
 	u32 i_frame_qp;
 	u32 p_frame_qp;
@@ -385,4 +401,49 @@
 	u32 vui_timing_info;
 };
 
+struct vcd_property_range_type {
+	u32 min;
+	u32 max;
+	u32 step_size;
+};
+
+enum vcd_property_ltrmode {
+	VCD_LTR_MODE_DISABLE = 0,
+	VCD_LTR_MODE_MANUAL  = 1,
+	VCD_LTR_MODE_AUTO    = 2,
+	VCD_LTR_MODE_MAX     = 0x7fffffff
+};
+
+struct vcd_property_ltrmode_type {
+	enum vcd_property_ltrmode ltr_mode;
+};
+
+struct vcd_property_ltrcount_type {
+	u32 ltr_count;
+};
+
+struct vcd_property_ltrperiod_type {
+	u32 ltr_period;
+};
+
+struct vcd_property_ltruse_type {
+	u32 ltr_id;
+	u32 ltr_frames;
+};
+
+struct vcd_property_meta_buffer {
+	u8 *kernel_virtual_addr;
+	u8 *physical_addr;
+	u32 size;
+	u32 count;
+	int pmem_fd;
+	u32 offset;
+	u8 *dev_addr;
+	void *client_data;
+	u8 *kernel_virt_addr_iommu;
+	u8 *physical_addr_iommu;
+	int pmem_fd_iommu;
+	u8 *dev_addr_iommu;
+	void *client_data_iommu;
+};
 #endif
diff --git a/include/media/msm/vcd_status.h b/include/media/msm/vcd_status.h
index 9b67ed0..7419b23 100644
--- a/include/media/msm/vcd_status.h
+++ b/include/media/msm/vcd_status.h
@@ -33,6 +33,7 @@
 #define VCD_EVT_IND_RESOURCES_LOST        (VCD_EVT_IND_BASE + 0x4)
 #define VCD_EVT_IND_INFO_OUTPUT_RECONFIG  (VCD_EVT_IND_BASE + 0x5)
 #define VCD_EVT_IND_INFO_FIELD_DROPPED    (VCD_EVT_IND_BASE + 0x6)
+#define VCD_EVT_IND_INFO_LTRUSE_FAILED    (VCD_EVT_IND_BASE + 0x7)
 
 #define VCD_S_SUCCESS           0x0
 
diff --git a/include/media/msm/vidc_init.h b/include/media/msm/vidc_init.h
index 84e3381..bcc0370 100644
--- a/include/media/msm/vidc_init.h
+++ b/include/media/msm/vidc_init.h
@@ -19,6 +19,7 @@
 
 #define VIDC_MAX_NUM_CLIENTS 4
 #define MAX_VIDEO_NUM_OF_BUFF 100
+#define MAX_META_BUFFERS 32
 
 enum buffer_dir {
 	BUFFER_TYPE_INPUT,
@@ -37,6 +38,11 @@
 	void *client_data;
 };
 
+struct meta_buffer_addr_table {
+	u8 *kernel_vir_addr;
+	u8 *kernel_vir_addr_iommu;
+};
+
 struct video_client_ctx {
 	void *vcd_handle;
 	u32 num_of_input_buffers;
@@ -49,17 +55,22 @@
 	wait_queue_head_t msg_wait;
 	struct completion event;
 	struct vcd_property_h264_mv_buffer vcd_h264_mv_buffer;
+	struct vcd_property_meta_buffer vcd_meta_buffer;
 	struct vcd_property_enc_recon_buffer recon_buffer[4];
 	u32 event_status;
 	u32 seq_header_set;
 	u32 stop_msg;
 	u32 stop_called;
 	u32 stop_sync_cb;
+	size_t meta_buf_size;
 	struct ion_client *user_ion_client;
 	struct ion_handle *seq_hdr_ion_handle;
 	struct ion_handle *h264_mv_ion_handle;
 	struct ion_handle *recon_buffer_ion_handle[4];
+	struct ion_handle *meta_buffer_ion_handle;
+	struct ion_handle *meta_buffer_iommu_ion_handle;
 	u32 dmx_disable;
+	struct meta_buffer_addr_table meta_addr_table[MAX_META_BUFFERS];
 };
 
 void __iomem *vidc_get_ioaddr(void);
diff --git a/include/media/msm/vidc_type.h b/include/media/msm/vidc_type.h
index 04d28b2..5463fc6 100644
--- a/include/media/msm/vidc_type.h
+++ b/include/media/msm/vidc_type.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -21,10 +21,11 @@
 #include <linux/list.h>
 #include <linux/time.h>
 #include <linux/dma-mapping.h>
-#include <linux/android_pmem.h>
 
-#define DEBUG   0
+
+#define DDL_MSG_LOG 0
+#define DEBUG 0
 #define VIDC_ENABLE_DBGFS
-
 #define USE_RES_TRACKER
+
 #endif
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 68ec117..da1e4a6 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -302,6 +302,166 @@
 		__entry->alloc_migratetype == __entry->fallback_migratetype)
 );
 
+
+DECLARE_EVENT_CLASS(ion_alloc,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags),
+
+	TP_STRUCT__entry(
+		__field(const char *,	client_name)
+		__field(const char *,	heap_name)
+		__field(size_t,		len)
+		__field(unsigned int,	mask)
+		__field(unsigned int,	flags)
+	),
+
+	TP_fast_assign(
+		__entry->client_name	= client_name;
+		__entry->heap_name	= heap_name;
+		__entry->len		= len;
+		__entry->mask		= mask;
+		__entry->flags		= flags;
+	),
+
+	TP_printk("client_name=%s heap_name=%s len=%zu mask=0x%x flags=0x%x",
+		__entry->client_name,
+		__entry->heap_name,
+		__entry->len,
+		__entry->mask,
+		__entry->flags)
+);
+
+DEFINE_EVENT(ion_alloc, ion_alloc_buffer_start,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags)
+);
+
+DEFINE_EVENT(ion_alloc, ion_alloc_buffer_end,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags)
+);
+
+DECLARE_EVENT_CLASS(ion_alloc_error,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags,
+		 long error),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags, error),
+
+	TP_STRUCT__entry(
+		__field(const char *,	client_name)
+		__field(const char *,	heap_name)
+		__field(size_t,		len)
+		__field(unsigned int,	mask)
+		__field(unsigned int,	flags)
+		__field(long,		error)
+	),
+
+	TP_fast_assign(
+		__entry->client_name	= client_name;
+		__entry->heap_name	= heap_name;
+		__entry->len		= len;
+		__entry->mask		= mask;
+		__entry->flags		= flags;
+		__entry->error		= error;
+	),
+
+	TP_printk(
+	"client_name=%s heap_name=%s len=%zu mask=0x%x flags=0x%x error=%ld",
+		__entry->client_name,
+		__entry->heap_name,
+		__entry->len,
+		__entry->mask,
+		__entry->flags,
+		__entry->error)
+);
+
+
+DEFINE_EVENT(ion_alloc_error, ion_alloc_buffer_fallback,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags,
+		 long error),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags, error)
+);
+
+DEFINE_EVENT(ion_alloc_error, ion_alloc_buffer_fail,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags,
+		 long error),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags, error)
+);
+
+DECLARE_EVENT_CLASS(alloc_retry,
+
+	TP_PROTO(int tries),
+
+	TP_ARGS(tries),
+
+	TP_STRUCT__entry(
+		__field(int, tries)
+	),
+
+	TP_fast_assign(
+		__entry->tries = tries;
+	),
+
+	TP_printk("tries=%d",
+		__entry->tries)
+);
+
+DEFINE_EVENT(alloc_retry, ion_cp_alloc_retry,
+
+	TP_PROTO(int tries),
+
+	TP_ARGS(tries)
+);
+
+DEFINE_EVENT(alloc_retry, migrate_retry,
+
+	TP_PROTO(int tries),
+
+	TP_ARGS(tries)
+);
+
+DEFINE_EVENT(alloc_retry, dma_alloc_contiguous_retry,
+
+	TP_PROTO(int tries),
+
+	TP_ARGS(tries)
+);
+
 #endif 
 
 #include <trace/define_trace.h>
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 6c6ce4a..46bf2ed 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -44,8 +44,15 @@
 int sysctl_oom_dump_tasks = 1;
 static DEFINE_SPINLOCK(zone_scan_lock);
 
-extern void show_meminfo(void);
-
+/*
+ * compare_swap_oom_score_adj() - compare and swap current's oom_score_adj
+ * @old_val: old oom_score_adj for compare
+ * @new_val: new oom_score_adj for swap
+ *
+ * Sets the oom_score_adj value for current to @new_val iff its present value is
+ * @old_val.  Usually used to reinstate a previous value to prevent racing with
+ * userspacing tuning the value in the interim.
+ */
 void compare_swap_oom_score_adj(int old_val, int new_val)
 {
 	struct sighand_struct *sighand = current->sighand;
@@ -57,6 +64,14 @@
 	spin_unlock_irq(&sighand->siglock);
 }
 
+/**
+ * test_set_oom_score_adj() - set current's oom_score_adj and return old value
+ * @new_val: new oom_score_adj value
+ *
+ * Sets the oom_score_adj value for current to @new_val with proper
+ * synchronization and returns the old value.  Usually used to temporarily
+ * set a value, save the old value in the caller, and then reinstate it later.
+ */
 int test_set_oom_score_adj(int new_val)
 {
 	struct sighand_struct *sighand = current->sighand;
@@ -72,6 +87,15 @@
 }
 
 #ifdef CONFIG_NUMA
+/**
+ * has_intersects_mems_allowed() - check task eligiblity for kill
+ * @tsk: task struct of which task to consider
+ * @mask: nodemask passed to page allocator for mempolicy ooms
+ *
+ * Task eligibility is determined by whether or not a candidate task, @tsk,
+ * shares the same mempolicy nodes as current if it is bound by such a policy
+ * and whether or not it has the same set of allowed cpuset nodes.
+ */
 static bool has_intersects_mems_allowed(struct task_struct *tsk,
 					const nodemask_t *mask)
 {
@@ -79,9 +103,19 @@
 
 	do {
 		if (mask) {
+			/*
+			 * If this is a mempolicy constrained oom, tsk's
+			 * cpuset is irrelevant.  Only return true if its
+			 * mempolicy intersects current, otherwise it may be
+			 * needlessly killed.
+			 */
 			if (mempolicy_nodemask_intersects(tsk, mask))
 				return true;
 		} else {
+			/*
+			 * This is not a mempolicy constrained oom, so only
+			 * check the mems of tsk's cpuset.
+			 */
 			if (cpuset_mems_allowed_intersects(current, tsk))
 				return true;
 		}
@@ -95,8 +129,14 @@
 {
 	return true;
 }
-#endif 
+#endif /* CONFIG_NUMA */
 
+/*
+ * The process p may have detached its own ->mm while exiting or through
+ * use_mm(), but one or more of its subthreads may still have a valid
+ * pointer.  Return p, or any of its subthreads with a valid ->mm, with
+ * task_lock() held.
+ */
 struct task_struct *find_lock_task_mm(struct task_struct *p)
 {
 	struct task_struct *t = p;
@@ -111,6 +151,7 @@
 	return NULL;
 }
 
+/* return true if the task is not adequate as candidate victim task. */
 static bool oom_unkillable_task(struct task_struct *p,
 		const struct mem_cgroup *memcg, const nodemask_t *nodemask)
 {
@@ -119,17 +160,26 @@
 	if (p->flags & PF_KTHREAD)
 		return true;
 
-	
+	/* When mem_cgroup_out_of_memory() and p is not member of the group */
 	if (memcg && !task_in_mem_cgroup(p, memcg))
 		return true;
 
-	
+	/* p may not have freeable memory in nodemask */
 	if (!has_intersects_mems_allowed(p, nodemask))
 		return true;
 
 	return false;
 }
 
+/**
+ * oom_badness - heuristic function to determine which candidate task to kill
+ * @p: task struct of which task we should calculate
+ * @totalpages: total present RAM allowed for page allocation
+ *
+ * The heuristic for determining which task to kill is made to be as simple and
+ * predictable as possible.  The goal is to return the highest value for the
+ * task consuming the most memory to avoid subsequent oom failures.
+ */
 unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
 		      const nodemask_t *nodemask, unsigned long totalpages)
 {
@@ -147,9 +197,17 @@
 		return 0;
 	}
 
+	/*
+	 * The memory controller may have a limit of 0 bytes, so avoid a divide
+	 * by zero, if necessary.
+	 */
 	if (!totalpages)
 		totalpages = 1;
 
+	/*
+	 * The baseline for the badness score is the proportion of RAM that each
+	 * task's rss, pagetable and swap space use.
+	 */
 	points = get_mm_rss(p->mm) + p->mm->nr_ptes;
 	points += get_mm_counter(p->mm, MM_SWAPENTS);
 
@@ -157,16 +215,33 @@
 	points /= totalpages;
 	task_unlock(p);
 
+	/*
+	 * Root processes get 3% bonus, just like the __vm_enough_memory()
+	 * implementation used by LSMs.
+	 */
 	if (has_capability_noaudit(p, CAP_SYS_ADMIN))
 		points -= 30;
 
+	/*
+	 * /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may
+	 * either completely disable oom killing or always prefer a certain
+	 * task.
+	 */
 	points += p->signal->oom_score_adj;
 
+	/*
+	 * Never return 0 for an eligible task that may be killed since it's
+	 * possible that no single user task uses more than 0.1% of memory and
+	 * no single admin tasks uses more than 3.0%.
+	 */
 	if (points <= 0)
 		return 1;
 	return (points < 1000) ? points : 1000;
 }
 
+/*
+ * Determine the type of allocation constraint.
+ */
 #ifdef CONFIG_NUMA
 static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
 				gfp_t gfp_mask, nodemask_t *nodemask,
@@ -178,14 +253,24 @@
 	bool cpuset_limited = false;
 	int nid;
 
-	
+	/* Default to all available memory */
 	*totalpages = totalram_pages + total_swap_pages;
 
 	if (!zonelist)
 		return CONSTRAINT_NONE;
+	/*
+	 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
+	 * to kill current.We have to random task kill in this case.
+	 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
+	 */
 	if (gfp_mask & __GFP_THISNODE)
 		return CONSTRAINT_NONE;
 
+	/*
+	 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
+	 * the page allocator means a mempolicy is in effect.  Cpuset policy
+	 * is enforced in get_page_from_freelist().
+	 */
 	if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask)) {
 		*totalpages = total_swap_pages;
 		for_each_node_mask(nid, *nodemask)
@@ -193,7 +278,7 @@
 		return CONSTRAINT_MEMORY_POLICY;
 	}
 
-	
+	/* Check this allocation failure is caused by cpuset's wall function */
 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
 			high_zoneidx, nodemask)
 		if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
@@ -217,6 +302,12 @@
 }
 #endif
 
+/*
+ * Simple selection loop. We chose the process with the highest
+ * number of 'points'. We expect the caller will lock the tasklist.
+ *
+ * (not docbooked, we don't want this one cluttering up the manual)
+ */
 static struct task_struct *select_bad_process(unsigned int *ppoints,
 		unsigned long totalpages, struct mem_cgroup *memcg,
 		const nodemask_t *nodemask, bool force_kill)
@@ -233,6 +324,15 @@
 		if (oom_unkillable_task(p, memcg, nodemask))
 			continue;
 
+		/*
+		 * This task already has access to memory reserves and is
+		 * being killed. Don't allow any other task access to the
+		 * memory reserve.
+		 *
+		 * Note: this may have a chance of deadlock if it gets
+		 * blocked waiting for another task which itself is waiting
+		 * for memory. Is there a better alternative?
+		 */
 		if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
 			if (unlikely(frozen(p)))
 				__thaw_task(p);
@@ -243,10 +343,24 @@
 			continue;
 
 		if (p->flags & PF_EXITING) {
+			/*
+			 * If p is the current task and is in the process of
+			 * releasing memory, we allow the "kill" to set
+			 * TIF_MEMDIE, which will allow it to gain access to
+			 * memory reserves.  Otherwise, it may stall forever.
+			 *
+			 * The loop isn't broken here, however, in case other
+			 * threads are found to have already been oom killed.
+			 */
 			if (p == current) {
 				chosen = p;
 				*ppoints = 1000;
 			} else if (!force_kill) {
+				/*
+				 * If this task is not being ptraced on exit,
+				 * then wait for it to finish before killing
+				 * some other task unnecessarily.
+				 */
 				if (!(p->group_leader->ptrace & PT_TRACE_EXIT))
 					return ERR_PTR(-1UL);
 			}
@@ -262,6 +376,19 @@
 	return chosen;
 }
 
+/**
+ * dump_tasks - dump current memory state of all system tasks
+ * @mem: current's memory controller, if constrained
+ * @nodemask: nodemask passed to page allocator for mempolicy ooms
+ *
+ * Dumps the current memory state of all eligible tasks.  Tasks not in the same
+ * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
+ * are not shown.
+ * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
+ * value, oom_score_adj value, and name.
+ *
+ * Call with tasklist_lock read-locked.
+ */
 static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemask)
 {
 	struct task_struct *p;
@@ -274,6 +401,11 @@
 
 		task = find_lock_task_mm(p);
 		if (!task) {
+			/*
+			 * This is a kthread or all of p's threads have already
+			 * detached their mm's.  There's no need to report
+			 * them; they can't be oom killed anyway.
+			 */
 			continue;
 		}
 
@@ -297,7 +429,6 @@
 	cpuset_print_task_mems_allowed(current);
 	task_unlock(current);
 	dump_stack();
-	show_meminfo();
 	mem_cgroup_print_oom_info(memcg, p);
 	show_mem(SHOW_MEM_FILTER_NODES);
 	if (sysctl_oom_dump_tasks)
@@ -318,6 +449,10 @@
 	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
 					      DEFAULT_RATELIMIT_BURST);
 
+	/*
+	 * If the task is already exiting, don't alarm the sysadmin or kill
+	 * its children or threads, just set TIF_MEMDIE so it can die quickly
+	 */
 	if (p->flags & PF_EXITING) {
 		set_tsk_thread_flag(p, TIF_MEMDIE);
 		return;
@@ -331,12 +466,21 @@
 		message, task_pid_nr(p), p->comm, points);
 	task_unlock(p);
 
+	/*
+	 * If any of p's children has a different mm and is eligible for kill,
+	 * the one with the highest oom_badness() score is sacrificed for its
+	 * parent.  This attempts to lose the minimal amount of work done while
+	 * still freeing memory.
+	 */
 	do {
 		list_for_each_entry(child, &t->children, sibling) {
 			unsigned int child_points;
 
 			if (child->mm == p->mm)
 				continue;
+			/*
+			 * oom_badness() returns 0 if the thread is unkillable
+			 */
 			child_points = oom_badness(child, memcg, nodemask,
 								totalpages);
 			if (child_points > victim_points) {
@@ -350,7 +494,7 @@
 	if (!victim)
 		return;
 
-	
+	/* mm cannot safely be dereferenced after task_unlock(victim) */
 	mm = victim->mm;
 	pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
 		task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
@@ -358,13 +502,22 @@
 		K(get_mm_counter(victim->mm, MM_FILEPAGES)));
 	task_unlock(victim);
 
+	/*
+	 * Kill all user processes sharing victim->mm in other thread groups, if
+	 * any.  They don't get access to memory reserves, though, to avoid
+	 * depletion of all memory.  This prevents mm->mmap_sem livelock when an
+	 * oom killed thread cannot exit because it requires the semaphore and
+	 * its contended by another thread trying to allocate memory itself.
+	 * That thread will now get access to memory reserves since it has a
+	 * pending fatal signal.
+	 */
 	for_each_process(p)
 		if (p->mm == mm && !same_thread_group(p, victim) &&
 		    !(p->flags & PF_KTHREAD)) {
 			if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
 				continue;
 
-			task_lock(p);	
+			task_lock(p);	/* Protect ->comm from prctl() */
 			pr_err("Kill process %d (%s) sharing same memory\n",
 				task_pid_nr(p), p->comm);
 			task_unlock(p);
@@ -376,12 +529,20 @@
 }
 #undef K
 
+/*
+ * Determines whether the kernel must panic because of the panic_on_oom sysctl.
+ */
 static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
 				int order, const nodemask_t *nodemask)
 {
 	if (likely(!sysctl_panic_on_oom))
 		return;
 	if (sysctl_panic_on_oom != 2) {
+		/*
+		 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
+		 * does not panic for cpuset, mempolicy, or memcg allocation
+		 * failures.
+		 */
 		if (constraint != CONSTRAINT_NONE)
 			return;
 	}
@@ -400,6 +561,11 @@
 	unsigned int points = 0;
 	struct task_struct *p;
 
+	/*
+	 * If current has a pending SIGKILL, then automatically select it.  The
+	 * goal is to allow it to allocate so that it may quickly exit and free
+	 * its memory.
+	 */
 	if (fatal_signal_pending(current)) {
 		set_thread_flag(TIF_MEMDIE);
 		return;
@@ -430,6 +596,11 @@
 }
 EXPORT_SYMBOL_GPL(unregister_oom_notifier);
 
+/*
+ * Try to acquire the OOM killer lock for the zones in zonelist.  Returns zero
+ * if a parallel OOM killing is already taking place that includes a zone in
+ * the zonelist.  Otherwise, locks all zones in the zonelist and returns 1.
+ */
 int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
 {
 	struct zoneref *z;
@@ -445,6 +616,11 @@
 	}
 
 	for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
+		/*
+		 * Lock each zone in the zonelist under zone_scan_lock so a
+		 * parallel invocation of try_set_zonelist_oom() doesn't succeed
+		 * when it shouldn't.
+		 */
 		zone_set_flag(zone, ZONE_OOM_LOCKED);
 	}
 
@@ -453,6 +629,11 @@
 	return ret;
 }
 
+/*
+ * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
+ * allocation attempts with zonelists containing them may now recall the OOM
+ * killer, if necessary.
+ */
 void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
 {
 	struct zoneref *z;
@@ -465,6 +646,11 @@
 	spin_unlock(&zone_scan_lock);
 }
 
+/*
+ * Try to acquire the oom killer lock for all system zones.  Returns zero if a
+ * parallel oom killing is taking place, otherwise locks all zones and returns
+ * non-zero.
+ */
 static int try_set_system_oom(void)
 {
 	struct zone *zone;
@@ -483,6 +669,10 @@
 	return ret;
 }
 
+/*
+ * Clears ZONE_OOM_LOCKED for all system zones so that failed allocation
+ * attempts or page faults may now recall the oom killer, if necessary.
+ */
 static void clear_system_oom(void)
 {
 	struct zone *zone;
@@ -493,6 +683,19 @@
 	spin_unlock(&zone_scan_lock);
 }
 
+/**
+ * out_of_memory - kill the "best" process when we run out of memory
+ * @zonelist: zonelist pointer
+ * @gfp_mask: memory allocation flags
+ * @order: amount of memory being requested as a power of 2
+ * @nodemask: nodemask passed to page allocator
+ * @force_kill: true if a task must be killed, even if others are exiting
+ *
+ * If we run out of memory, we have the choice between either
+ * killing a random task (bad), letting the system crash (worse)
+ * OR try to be smart about which process to kill. Note that we
+ * don't have to be perfect here, we just have to be good.
+ */
 void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
 		int order, nodemask_t *nodemask, bool force_kill)
 {
@@ -506,14 +709,23 @@
 
 	blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
 	if (freed > 0)
-		
+		/* Got some memory back in the last second. */
 		return;
 
+	/*
+	 * If current has a pending SIGKILL, then automatically select it.  The
+	 * goal is to allow it to allocate so that it may quickly exit and free
+	 * its memory.
+	 */
 	if (fatal_signal_pending(current)) {
 		set_thread_flag(TIF_MEMDIE);
 		return;
 	}
 
+	/*
+	 * Check if there were limitations on the allocation (only relevant for
+	 * NUMA) that may require different handling.
+	 */
 	constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
 						&totalpages);
 	mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
@@ -531,7 +743,7 @@
 
 	p = select_bad_process(&points, totalpages, NULL, mpol_mask,
 			       force_kill);
-	
+	/* Found nothing?!?! Either we hang forever, or we panic. */
 	if (!p) {
 		dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
 		read_unlock(&tasklist_lock);
@@ -545,10 +757,20 @@
 out:
 	read_unlock(&tasklist_lock);
 
+	/*
+	 * Give "p" a good chance of killing itself before we
+	 * retry to allocate memory unless "p" is current
+	 */
 	if (killed && !test_thread_flag(TIF_MEMDIE))
 		schedule_timeout_uninterruptible(1);
 }
 
+/*
+ * The pagefault handler calls here because it is out of memory, so kill a
+ * memory-hogging task.  If a populated zone has ZONE_OOM_LOCKED set, a parallel
+ * oom killing is already in progress so do nothing.  If a task is found with
+ * TIF_MEMDIE set, it has been killed so do nothing and allow it to exit.
+ */
 void pagefault_out_of_memory(void)
 {
 	if (try_set_system_oom()) {