Initial Contribution

msm-2.6.38: tag AU_LINUX_ANDROID_GINGERBREAD.02.03.04.00.142

Signed-off-by: Bryan Huntsman <bryanh@codeaurora.org>
diff --git a/drivers/gpu/msm/Kconfig b/drivers/gpu/msm/Kconfig
new file mode 100644
index 0000000..64cbc30
--- /dev/null
+++ b/drivers/gpu/msm/Kconfig
@@ -0,0 +1,105 @@
+config MSM_KGSL
+	tristate "MSM 3D Graphics driver"
+	default n
+	depends on ARCH_MSM && !ARCH_MSM7X00A && !ARCH_MSM7X25
+	select GENERIC_ALLOCATOR
+	select FW_LOADER
+	---help---
+	  3D graphics driver. Required to use hardware accelerated
+	  OpenGL ES 2.0 and 1.1.
+
+config MSM_KGSL_CFF_DUMP
+	bool "Enable KGSL Common File Format (CFF) Dump Feature [Use with caution]"
+	default n
+	depends on MSM_KGSL
+	select RELAY
+	---help---
+	  This is an analysis and diagnostic feature only, and should only be
+	  turned on during KGSL GPU diagnostics and will slow down the KGSL
+	  performance sigificantly, hence *do not use in production builds*.
+	  When enabled, CFF Dump is on at boot. It can be turned off at runtime
+	  via 'echo 0 > /d/kgsl/cff_dump'.  The log can be captured via
+	  /d/kgsl-cff/cpu[0|1].
+
+config MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP
+	bool "When selected will disable KGSL CFF Dump for context switches"
+	default n
+	depends on MSM_KGSL_CFF_DUMP
+	---help---
+	  Dumping all the memory for every context switch can produce quite
+	  huge log files, to reduce this, turn this feature on.
+
+config MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL
+	bool "Disable human readable CP_STAT fields in post-mortem dump"
+	default n
+	depends on MSM_KGSL
+	---help---
+	  For a more compact kernel log the human readable output of
+	  CP_STAT can be turned off with this option.
+
+config MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP
+	bool "Disable dumping current IB1 and IB2 in post-mortem dump"
+	default n
+	depends on MSM_KGSL
+	---help---
+	  For a more compact kernel log the IB1 and IB2 embedded dump
+	  can be turned off with this option.  Some IB dumps take up
+	  so much space that vital other information gets cut from the
+	  post-mortem dump.
+
+config MSM_KGSL_PSTMRTMDMP_RB_HEX
+	bool "Use hex version for ring-buffer in post-mortem dump"
+	default n
+	depends on MSM_KGSL
+	---help---
+	  Use hex version for the ring-buffer in the post-mortem dump, instead
+	  of the human readable version.
+
+config MSM_KGSL_2D
+	tristate "MSM 2D graphics driver. Required for OpenVG"
+	default y
+	depends on MSM_KGSL && !ARCH_MSM7X27 && !ARCH_MSM7X27A && !(ARCH_QSD8X50 && !MSM_SOC_REV_A)
+
+config MSM_KGSL_DRM
+	bool "Build a DRM interface for the MSM_KGSL driver"
+	depends on MSM_KGSL && DRM
+
+config MSM_KGSL_MMU
+	bool "Enable the GPU MMU in the MSM_KGSL driver"
+	depends on MSM_KGSL && MMU && !MSM_KGSL_CFF_DUMP
+	default y
+
+config KGSL_PER_PROCESS_PAGE_TABLE
+	bool "Enable Per Process page tables for the KGSL driver"
+	default n
+	depends on MSM_KGSL_MMU && !MSM_KGSL_DRM
+	---help---
+	  The MMU will use per process pagetables when enabled.
+
+config MSM_KGSL_PAGE_TABLE_SIZE
+	hex "Size of pagetables"
+	default 0xFFF0000
+	depends on MSM_KGSL_MMU
+	---help---
+	  Sets the pagetable size used by the MMU.  The max value
+	  is 0xFFF0000 or (256M - 64K).
+
+config MSM_KGSL_PAGE_TABLE_COUNT
+	int "Minimum of concurrent pagetables to support"
+	default 8
+	depends on KGSL_PER_PROCESS_PAGE_TABLE
+	---help---
+	  Specify the number of pagetables to allocate at init time
+	  This is the number of concurrent processes that are guaranteed to
+	  to run at any time.  Additional processes can be created dynamically
+	  assuming there is enough contiguous memory to allocate the pagetable.
+
+config MSM_KGSL_MMU_PAGE_FAULT
+	bool "Force the GPU MMU to page fault for unmapped regions"
+	default y
+	depends on MSM_KGSL_MMU
+
+config MSM_KGSL_DISABLE_SHADOW_WRITES
+	bool "Disable register shadow writes for context switches"
+	default n
+	depends on MSM_KGSL
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
new file mode 100644
index 0000000..e31a3dd
--- /dev/null
+++ b/drivers/gpu/msm/Makefile
@@ -0,0 +1,31 @@
+ccflags-y := -Iinclude/drm
+
+msm_kgsl_core-y = \
+	kgsl.o \
+	kgsl_sharedmem.o \
+	kgsl_pwrctrl.o \
+	kgsl_pwrscale.o
+
+msm_kgsl_core-$(CONFIG_DEBUG_FS) += kgsl_debugfs.o
+msm_kgsl_core-$(CONFIG_MSM_KGSL_MMU) += kgsl_mmu.o
+msm_kgsl_core-$(CONFIG_MSM_KGSL_CFF_DUMP) += kgsl_cffdump.o
+msm_kgsl_core-$(CONFIG_MSM_KGSL_DRM) += kgsl_drm.o
+msm_kgsl_core-$(CONFIG_MSM_SCM) += kgsl_pwrscale_trustzone.o
+
+msm_adreno-y += \
+	adreno_ringbuffer.o \
+	adreno_drawctxt.o \
+	adreno_postmortem.o \
+	adreno.o
+
+msm_adreno-$(CONFIG_DEBUG_FS) += adreno_debugfs.o
+
+msm_z180-y += z180.o
+
+msm_kgsl_core-objs = $(msm_kgsl_core-y)
+msm_adreno-objs = $(msm_adreno-y)
+msm_z180-objs = $(msm_z180-y)
+
+obj-$(CONFIG_MSM_KGSL) += msm_kgsl_core.o
+obj-$(CONFIG_MSM_KGSL) += msm_adreno.o
+obj-$(CONFIG_MSM_KGSL_2D) += msm_z180.o
diff --git a/drivers/gpu/msm/a200_reg.h b/drivers/gpu/msm/a200_reg.h
new file mode 100644
index 0000000..e1681f9
--- /dev/null
+++ b/drivers/gpu/msm/a200_reg.h
@@ -0,0 +1,408 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __A200_REG_H
+#define __A200_REG_H
+
+enum VGT_EVENT_TYPE {
+	VS_DEALLOC = 0,
+	PS_DEALLOC = 1,
+	VS_DONE_TS = 2,
+	PS_DONE_TS = 3,
+	CACHE_FLUSH_TS = 4,
+	CONTEXT_DONE = 5,
+	CACHE_FLUSH = 6,
+	VIZQUERY_START = 7,
+	VIZQUERY_END = 8,
+	SC_WAIT_WC = 9,
+	RST_PIX_CNT = 13,
+	RST_VTX_CNT = 14,
+	TILE_FLUSH = 15,
+	CACHE_FLUSH_AND_INV_TS_EVENT = 20,
+	ZPASS_DONE = 21,
+	CACHE_FLUSH_AND_INV_EVENT = 22,
+	PERFCOUNTER_START = 23,
+	PERFCOUNTER_STOP = 24,
+	VS_FETCH_DONE = 27,
+	FACENESS_FLUSH = 28,
+};
+
+enum COLORFORMATX {
+	COLORX_4_4_4_4 = 0,
+	COLORX_1_5_5_5 = 1,
+	COLORX_5_6_5 = 2,
+	COLORX_8 = 3,
+	COLORX_8_8 = 4,
+	COLORX_8_8_8_8 = 5,
+	COLORX_S8_8_8_8 = 6,
+	COLORX_16_FLOAT = 7,
+	COLORX_16_16_FLOAT = 8,
+	COLORX_16_16_16_16_FLOAT = 9,
+	COLORX_32_FLOAT = 10,
+	COLORX_32_32_FLOAT = 11,
+	COLORX_32_32_32_32_FLOAT = 12,
+	COLORX_2_3_3 = 13,
+	COLORX_8_8_8 = 14,
+};
+
+enum SURFACEFORMAT {
+	FMT_1_REVERSE                  = 0,
+	FMT_1                          = 1,
+	FMT_8                          = 2,
+	FMT_1_5_5_5                    = 3,
+	FMT_5_6_5                      = 4,
+	FMT_6_5_5                      = 5,
+	FMT_8_8_8_8                    = 6,
+	FMT_2_10_10_10                 = 7,
+	FMT_8_A                        = 8,
+	FMT_8_B                        = 9,
+	FMT_8_8                        = 10,
+	FMT_Cr_Y1_Cb_Y0                = 11,
+	FMT_Y1_Cr_Y0_Cb                = 12,
+	FMT_5_5_5_1                    = 13,
+	FMT_8_8_8_8_A                  = 14,
+	FMT_4_4_4_4                    = 15,
+	FMT_10_11_11                   = 16,
+	FMT_11_11_10                   = 17,
+	FMT_DXT1                       = 18,
+	FMT_DXT2_3                     = 19,
+	FMT_DXT4_5                     = 20,
+	FMT_24_8                       = 22,
+	FMT_24_8_FLOAT                 = 23,
+	FMT_16                         = 24,
+	FMT_16_16                      = 25,
+	FMT_16_16_16_16                = 26,
+	FMT_16_EXPAND                  = 27,
+	FMT_16_16_EXPAND               = 28,
+	FMT_16_16_16_16_EXPAND         = 29,
+	FMT_16_FLOAT                   = 30,
+	FMT_16_16_FLOAT                = 31,
+	FMT_16_16_16_16_FLOAT          = 32,
+	FMT_32                         = 33,
+	FMT_32_32                      = 34,
+	FMT_32_32_32_32                = 35,
+	FMT_32_FLOAT                   = 36,
+	FMT_32_32_FLOAT                = 37,
+	FMT_32_32_32_32_FLOAT          = 38,
+	FMT_32_AS_8                    = 39,
+	FMT_32_AS_8_8                  = 40,
+	FMT_16_MPEG                    = 41,
+	FMT_16_16_MPEG                 = 42,
+	FMT_8_INTERLACED               = 43,
+	FMT_32_AS_8_INTERLACED         = 44,
+	FMT_32_AS_8_8_INTERLACED       = 45,
+	FMT_16_INTERLACED              = 46,
+	FMT_16_MPEG_INTERLACED         = 47,
+	FMT_16_16_MPEG_INTERLACED      = 48,
+	FMT_DXN                        = 49,
+	FMT_8_8_8_8_AS_16_16_16_16     = 50,
+	FMT_DXT1_AS_16_16_16_16        = 51,
+	FMT_DXT2_3_AS_16_16_16_16      = 52,
+	FMT_DXT4_5_AS_16_16_16_16      = 53,
+	FMT_2_10_10_10_AS_16_16_16_16  = 54,
+	FMT_10_11_11_AS_16_16_16_16    = 55,
+	FMT_11_11_10_AS_16_16_16_16    = 56,
+	FMT_32_32_32_FLOAT             = 57,
+	FMT_DXT3A                      = 58,
+	FMT_DXT5A                      = 59,
+	FMT_CTX1                       = 60,
+	FMT_DXT3A_AS_1_1_1_1           = 61
+};
+
+#define REG_PERF_MODE_CNT	0x0
+#define REG_PERF_STATE_RESET	0x0
+#define REG_PERF_STATE_ENABLE	0x1
+#define REG_PERF_STATE_FREEZE	0x2
+
+#define RB_EDRAM_INFO_EDRAM_SIZE_SIZE                      4
+#define RB_EDRAM_INFO_EDRAM_MAPPING_MODE_SIZE              2
+#define RB_EDRAM_INFO_UNUSED0_SIZE                         8
+#define RB_EDRAM_INFO_EDRAM_RANGE_SIZE                     18
+
+struct rb_edram_info_t {
+	unsigned int edram_size:RB_EDRAM_INFO_EDRAM_SIZE_SIZE;
+	unsigned int edram_mapping_mode:RB_EDRAM_INFO_EDRAM_MAPPING_MODE_SIZE;
+	unsigned int unused0:RB_EDRAM_INFO_UNUSED0_SIZE;
+	unsigned int edram_range:RB_EDRAM_INFO_EDRAM_RANGE_SIZE;
+};
+
+union reg_rb_edram_info {
+	unsigned int val;
+	struct rb_edram_info_t f;
+};
+
+#define RBBM_READ_ERROR_UNUSED0_SIZE		2
+#define RBBM_READ_ERROR_READ_ADDRESS_SIZE	15
+#define RBBM_READ_ERROR_UNUSED1_SIZE		13
+#define RBBM_READ_ERROR_READ_REQUESTER_SIZE	1
+#define RBBM_READ_ERROR_READ_ERROR_SIZE		1
+
+struct rbbm_read_error_t {
+	unsigned int unused0:RBBM_READ_ERROR_UNUSED0_SIZE;
+	unsigned int read_address:RBBM_READ_ERROR_READ_ADDRESS_SIZE;
+	unsigned int unused1:RBBM_READ_ERROR_UNUSED1_SIZE;
+	unsigned int read_requester:RBBM_READ_ERROR_READ_REQUESTER_SIZE;
+	unsigned int read_error:RBBM_READ_ERROR_READ_ERROR_SIZE;
+};
+
+union rbbm_read_error_u {
+	unsigned int val:32;
+	struct rbbm_read_error_t f;
+};
+
+#define CP_RB_CNTL_RB_BUFSZ_SIZE                           6
+#define CP_RB_CNTL_UNUSED0_SIZE                            2
+#define CP_RB_CNTL_RB_BLKSZ_SIZE                           6
+#define CP_RB_CNTL_UNUSED1_SIZE                            2
+#define CP_RB_CNTL_BUF_SWAP_SIZE                           2
+#define CP_RB_CNTL_UNUSED2_SIZE                            2
+#define CP_RB_CNTL_RB_POLL_EN_SIZE                         1
+#define CP_RB_CNTL_UNUSED3_SIZE                            6
+#define CP_RB_CNTL_RB_NO_UPDATE_SIZE                       1
+#define CP_RB_CNTL_UNUSED4_SIZE                            3
+#define CP_RB_CNTL_RB_RPTR_WR_ENA_SIZE                     1
+
+struct cp_rb_cntl_t {
+	unsigned int rb_bufsz:CP_RB_CNTL_RB_BUFSZ_SIZE;
+	unsigned int unused0:CP_RB_CNTL_UNUSED0_SIZE;
+	unsigned int rb_blksz:CP_RB_CNTL_RB_BLKSZ_SIZE;
+	unsigned int unused1:CP_RB_CNTL_UNUSED1_SIZE;
+	unsigned int buf_swap:CP_RB_CNTL_BUF_SWAP_SIZE;
+	unsigned int unused2:CP_RB_CNTL_UNUSED2_SIZE;
+	unsigned int rb_poll_en:CP_RB_CNTL_RB_POLL_EN_SIZE;
+	unsigned int unused3:CP_RB_CNTL_UNUSED3_SIZE;
+	unsigned int rb_no_update:CP_RB_CNTL_RB_NO_UPDATE_SIZE;
+	unsigned int unused4:CP_RB_CNTL_UNUSED4_SIZE;
+	unsigned int rb_rptr_wr_ena:CP_RB_CNTL_RB_RPTR_WR_ENA_SIZE;
+};
+
+union reg_cp_rb_cntl {
+	unsigned int val:32;
+	struct cp_rb_cntl_t f;
+};
+
+#define RB_COLOR_INFO__COLOR_FORMAT_MASK                   0x0000000fL
+#define RB_COPY_DEST_INFO__COPY_DEST_FORMAT__SHIFT         0x00000004
+
+
+#define SQ_INT_CNTL__PS_WATCHDOG_MASK                      0x00000001L
+#define SQ_INT_CNTL__VS_WATCHDOG_MASK                      0x00000002L
+
+#define RBBM_INT_CNTL__RDERR_INT_MASK                      0x00000001L
+#define RBBM_INT_CNTL__DISPLAY_UPDATE_INT_MASK             0x00000002L
+#define RBBM_INT_CNTL__GUI_IDLE_INT_MASK                   0x00080000L
+
+#define RBBM_STATUS__CMDFIFO_AVAIL_MASK                    0x0000001fL
+#define RBBM_STATUS__TC_BUSY_MASK                          0x00000020L
+#define RBBM_STATUS__HIRQ_PENDING_MASK                     0x00000100L
+#define RBBM_STATUS__CPRQ_PENDING_MASK                     0x00000200L
+#define RBBM_STATUS__CFRQ_PENDING_MASK                     0x00000400L
+#define RBBM_STATUS__PFRQ_PENDING_MASK                     0x00000800L
+#define RBBM_STATUS__VGT_BUSY_NO_DMA_MASK                  0x00001000L
+#define RBBM_STATUS__RBBM_WU_BUSY_MASK                     0x00004000L
+#define RBBM_STATUS__CP_NRT_BUSY_MASK                      0x00010000L
+#define RBBM_STATUS__MH_BUSY_MASK                          0x00040000L
+#define RBBM_STATUS__MH_COHERENCY_BUSY_MASK                0x00080000L
+#define RBBM_STATUS__SX_BUSY_MASK                          0x00200000L
+#define RBBM_STATUS__TPC_BUSY_MASK                         0x00400000L
+#define RBBM_STATUS__SC_CNTX_BUSY_MASK                     0x01000000L
+#define RBBM_STATUS__PA_BUSY_MASK                          0x02000000L
+#define RBBM_STATUS__VGT_BUSY_MASK                         0x04000000L
+#define RBBM_STATUS__SQ_CNTX17_BUSY_MASK                   0x08000000L
+#define RBBM_STATUS__SQ_CNTX0_BUSY_MASK                    0x10000000L
+#define RBBM_STATUS__RB_CNTX_BUSY_MASK                     0x40000000L
+#define RBBM_STATUS__GUI_ACTIVE_MASK                       0x80000000L
+
+#define CP_INT_CNTL__SW_INT_MASK                           0x00080000L
+#define CP_INT_CNTL__T0_PACKET_IN_IB_MASK                  0x00800000L
+#define CP_INT_CNTL__OPCODE_ERROR_MASK                     0x01000000L
+#define CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK             0x02000000L
+#define CP_INT_CNTL__RESERVED_BIT_ERROR_MASK               0x04000000L
+#define CP_INT_CNTL__IB_ERROR_MASK                         0x08000000L
+#define CP_INT_CNTL__IB2_INT_MASK                          0x20000000L
+#define CP_INT_CNTL__IB1_INT_MASK                          0x40000000L
+#define CP_INT_CNTL__RB_INT_MASK                           0x80000000L
+
+#define MASTER_INT_SIGNAL__MH_INT_STAT                     0x00000020L
+#define MASTER_INT_SIGNAL__SQ_INT_STAT                     0x04000000L
+#define MASTER_INT_SIGNAL__CP_INT_STAT                     0x40000000L
+#define MASTER_INT_SIGNAL__RBBM_INT_STAT                   0x80000000L
+
+#define RB_EDRAM_INFO__EDRAM_SIZE_MASK                     0x0000000fL
+#define RB_EDRAM_INFO__EDRAM_RANGE_MASK                    0xffffc000L
+
+#define MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT    0x00000006
+#define MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT            0x00000007
+#define MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT       0x00000008
+#define MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT           0x00000009
+#define MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT                0x0000000a
+#define MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT        0x0000000d
+#define MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT       0x0000000e
+#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT   0x0000000f
+#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT          0x00000010
+#define MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT           0x00000016
+#define MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT          0x00000017
+#define MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT           0x00000018
+#define MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT           0x00000019
+#define MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT           0x0000001a
+
+#define CP_RB_CNTL__RB_BUFSZ__SHIFT                        0x00000000
+#define CP_RB_CNTL__RB_BLKSZ__SHIFT                        0x00000008
+#define CP_RB_CNTL__RB_POLL_EN__SHIFT                      0x00000014
+#define CP_RB_CNTL__RB_NO_UPDATE__SHIFT                    0x0000001b
+
+#define RB_COLOR_INFO__COLOR_FORMAT__SHIFT                 0x00000000
+#define RB_EDRAM_INFO__EDRAM_MAPPING_MODE__SHIFT           0x00000004
+#define RB_EDRAM_INFO__EDRAM_RANGE__SHIFT                  0x0000000e
+
+#define REG_CP_CSQ_IB1_STAT              0x01FE
+#define REG_CP_CSQ_IB2_STAT              0x01FF
+#define REG_CP_CSQ_RB_STAT               0x01FD
+#define REG_CP_DEBUG                     0x01FC
+#define REG_CP_IB1_BASE                  0x0458
+#define REG_CP_IB1_BUFSZ                 0x0459
+#define REG_CP_IB2_BASE                  0x045A
+#define REG_CP_IB2_BUFSZ                 0x045B
+#define REG_CP_INT_ACK                   0x01F4
+#define REG_CP_INT_CNTL                  0x01F2
+#define REG_CP_INT_STATUS                0x01F3
+#define REG_CP_ME_CNTL                   0x01F6
+#define REG_CP_ME_RAM_DATA               0x01FA
+#define REG_CP_ME_RAM_WADDR              0x01F8
+#define REG_CP_ME_STATUS                 0x01F7
+#define REG_CP_PFP_UCODE_ADDR            0x00C0
+#define REG_CP_PFP_UCODE_DATA            0x00C1
+#define REG_CP_QUEUE_THRESHOLDS          0x01D5
+#define REG_CP_RB_BASE                   0x01C0
+#define REG_CP_RB_CNTL                   0x01C1
+#define REG_CP_RB_RPTR                   0x01C4
+#define REG_CP_RB_RPTR_ADDR              0x01C3
+#define REG_CP_RB_RPTR_WR                0x01C7
+#define REG_CP_RB_WPTR                   0x01C5
+#define REG_CP_RB_WPTR_BASE              0x01C8
+#define REG_CP_RB_WPTR_DELAY             0x01C6
+#define REG_CP_STAT                      0x047F
+#define REG_CP_STATE_DEBUG_DATA          0x01ED
+#define REG_CP_STATE_DEBUG_INDEX         0x01EC
+#define REG_CP_ST_BASE                   0x044D
+#define REG_CP_ST_BUFSZ                  0x044E
+
+#define REG_CP_PERFMON_CNTL              0x0444
+#define REG_CP_PERFCOUNTER_SELECT        0x0445
+#define REG_CP_PERFCOUNTER_LO            0x0446
+#define REG_CP_PERFCOUNTER_HI            0x0447
+
+#define REG_RBBM_PERFCOUNTER1_SELECT     0x0395
+#define REG_RBBM_PERFCOUNTER1_HI         0x0398
+#define REG_RBBM_PERFCOUNTER1_LO         0x0397
+
+#define REG_MASTER_INT_SIGNAL            0x03B7
+
+#define REG_MH_ARBITER_CONFIG            0x0A40
+#define REG_MH_CLNT_INTF_CTRL_CONFIG1    0x0A54
+#define REG_MH_CLNT_INTF_CTRL_CONFIG2    0x0A55
+
+#define REG_PA_CL_VPORT_XSCALE           0x210F
+#define REG_PA_CL_VPORT_ZOFFSET          0x2114
+#define REG_PA_CL_VPORT_ZSCALE           0x2113
+#define REG_PA_CL_CLIP_CNTL              0x2204
+#define REG_PA_CL_VTE_CNTL               0x2206
+#define REG_PA_SC_AA_MASK                0x2312
+#define REG_PA_SC_LINE_CNTL              0x2300
+#define REG_PA_SC_SCREEN_SCISSOR_BR      0x200F
+#define REG_PA_SC_SCREEN_SCISSOR_TL      0x200E
+#define REG_PA_SC_VIZ_QUERY              0x2293
+#define REG_PA_SC_VIZ_QUERY_STATUS       0x0C44
+#define REG_PA_SC_WINDOW_OFFSET          0x2080
+#define REG_PA_SC_WINDOW_SCISSOR_BR      0x2082
+#define REG_PA_SC_WINDOW_SCISSOR_TL      0x2081
+#define REG_PA_SU_FACE_DATA              0x0C86
+#define REG_PA_SU_POINT_SIZE             0x2280
+#define REG_PA_SU_LINE_CNTL              0x2282
+#define REG_PA_SU_POLY_OFFSET_BACK_OFFSET 0x2383
+#define REG_PA_SU_POLY_OFFSET_FRONT_SCALE 0x2380
+#define REG_PA_SU_SC_MODE_CNTL           0x2205
+
+#define REG_PC_INDEX_OFFSET              0x2102
+
+#define REG_RBBM_CNTL                    0x003B
+#define REG_RBBM_INT_ACK                 0x03B6
+#define REG_RBBM_INT_CNTL                0x03B4
+#define REG_RBBM_INT_STATUS              0x03B5
+#define REG_RBBM_PATCH_RELEASE           0x0001
+#define REG_RBBM_PERIPHID1               0x03F9
+#define REG_RBBM_PERIPHID2               0x03FA
+#define REG_RBBM_DEBUG                   0x039B
+#define REG_RBBM_DEBUG_OUT               0x03A0
+#define REG_RBBM_DEBUG_CNTL              0x03A1
+#define REG_RBBM_PM_OVERRIDE1            0x039C
+#define REG_RBBM_PM_OVERRIDE2            0x039D
+#define REG_RBBM_READ_ERROR              0x03B3
+#define REG_RBBM_SOFT_RESET              0x003C
+#define REG_RBBM_STATUS                  0x05D0
+
+#define REG_RB_COLORCONTROL              0x2202
+#define REG_RB_COLOR_DEST_MASK           0x2326
+#define REG_RB_COLOR_MASK                0x2104
+#define REG_RB_COPY_CONTROL              0x2318
+#define REG_RB_DEPTHCONTROL              0x2200
+#define REG_RB_EDRAM_INFO                0x0F02
+#define REG_RB_MODECONTROL               0x2208
+#define REG_RB_SURFACE_INFO              0x2000
+#define REG_RB_SAMPLE_POS                0x220a
+
+#define REG_SCRATCH_ADDR                 0x01DD
+#define REG_SCRATCH_REG0                 0x0578
+#define REG_SCRATCH_REG2                 0x057A
+#define REG_SCRATCH_UMSK                 0x01DC
+
+#define REG_SQ_CF_BOOLEANS               0x4900
+#define REG_SQ_CF_LOOP                   0x4908
+#define REG_SQ_GPR_MANAGEMENT            0x0D00
+#define REG_SQ_INST_STORE_MANAGMENT      0x0D02
+#define REG_SQ_INT_ACK                   0x0D36
+#define REG_SQ_INT_CNTL                  0x0D34
+#define REG_SQ_INT_STATUS                0x0D35
+#define REG_SQ_PROGRAM_CNTL              0x2180
+#define REG_SQ_PS_PROGRAM                0x21F6
+#define REG_SQ_VS_PROGRAM                0x21F7
+#define REG_SQ_WRAPPING_0                0x2183
+#define REG_SQ_WRAPPING_1                0x2184
+
+#define REG_VGT_ENHANCE                  0x2294
+#define REG_VGT_INDX_OFFSET              0x2102
+#define REG_VGT_MAX_VTX_INDX             0x2100
+#define REG_VGT_MIN_VTX_INDX             0x2101
+
+#define REG_TP0_CHICKEN                  0x0E1E
+#define REG_TC_CNTL_STATUS               0x0E00
+#define REG_PA_SC_AA_CONFIG              0x2301
+#define REG_VGT_VERTEX_REUSE_BLOCK_CNTL  0x2316
+#define REG_SQ_INTERPOLATOR_CNTL         0x2182
+#define REG_RB_DEPTH_INFO                0x2002
+#define REG_COHER_DEST_BASE_0            0x2006
+#define REG_RB_FOG_COLOR                 0x2109
+#define REG_RB_STENCILREFMASK_BF         0x210C
+#define REG_PA_SC_LINE_STIPPLE           0x2283
+#define REG_SQ_PS_CONST                  0x2308
+#define REG_RB_DEPTH_CLEAR               0x231D
+#define REG_RB_SAMPLE_COUNT_CTL          0x2324
+#define REG_SQ_CONSTANT_0                0x4000
+#define REG_SQ_FETCH_0                   0x4800
+
+#define REG_MH_DEBUG_CTRL                0xA4E
+#define REG_MH_DEBUG_DATA                0xA4F
+#define REG_COHER_BASE_PM4               0xA2A
+#define REG_COHER_STATUS_PM4             0xA2B
+#define REG_COHER_SIZE_PM4               0xA29
+
+#endif /* __A200_REG_H */
diff --git a/drivers/gpu/msm/a220_reg.h b/drivers/gpu/msm/a220_reg.h
new file mode 100644
index 0000000..7cfe705
--- /dev/null
+++ b/drivers/gpu/msm/a220_reg.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __A205_REG_H
+#define __A205_REG_H
+
+#define REG_LEIA_PC_INDX_OFFSET          REG_VGT_INDX_OFFSET
+#define REG_LEIA_PC_VERTEX_REUSE_BLOCK_CNTL REG_VGT_VERTEX_REUSE_BLOCK_CNTL
+#define REG_LEIA_PC_MAX_VTX_INDX         REG_VGT_MAX_VTX_INDX
+#define REG_LEIA_RB_LRZ_VSC_CONTROL	 0x2209
+#define REG_LEIA_GRAS_CONTROL            0x2210
+#define REG_LEIA_VSC_BIN_SIZE            0x0C01
+#define REG_LEIA_VSC_PIPE_DATA_LENGTH_7  0x0C1D
+
+#endif /*__A205_REG_H */
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
new file mode 100644
index 0000000..6b9adf8
--- /dev/null
+++ b/drivers/gpu/msm/adreno.c
@@ -0,0 +1,1329 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/ioctl.h>
+#include <linux/sched.h>
+
+#include <mach/socinfo.h>
+
+#include "kgsl.h"
+#include "kgsl_pwrscale.h"
+#include "kgsl_cffdump.h"
+#include "kgsl_sharedmem.h"
+
+#include "adreno.h"
+#include "adreno_pm4types.h"
+#include "adreno_debugfs.h"
+#include "adreno_postmortem.h"
+
+#include "a200_reg.h"
+
+#define DRIVER_VERSION_MAJOR   3
+#define DRIVER_VERSION_MINOR   1
+
+#define KGSL_RBBM_INT_MASK \
+	 (RBBM_INT_CNTL__RDERR_INT_MASK |  \
+	  RBBM_INT_CNTL__DISPLAY_UPDATE_INT_MASK)
+
+/* Adreno MH arbiter config*/
+#define ADRENO_CFG_MHARB \
+	(0x10 \
+		| (0 << MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT) \
+		| (0 << MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT) \
+		| (0 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT) \
+		| (0x8 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT))
+
+#define ADRENO_MMU_CONFIG						\
+	(0x01								\
+	 | (MMU_CONFIG << MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT)	\
+	 | (MMU_CONFIG << MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT)	\
+	 | (MMU_CONFIG << MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT)	\
+	 | (MMU_CONFIG << MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT)	\
+	 | (MMU_CONFIG << MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT)	\
+	 | (MMU_CONFIG << MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT)	\
+	 | (MMU_CONFIG << MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT)	\
+	 | (MMU_CONFIG << MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT)	\
+	 | (MMU_CONFIG << MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT)	\
+	 | (MMU_CONFIG << MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT)	\
+	 | (MMU_CONFIG << MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT))
+
+/* max msecs to wait for gpu to finish its operation(s) */
+#define MAX_WAITGPU_SECS (HZ + HZ/2)
+
+static const struct kgsl_functable adreno_functable;
+
+static struct adreno_device device_3d0 = {
+	.dev = {
+		.name = DEVICE_3D0_NAME,
+		.id = KGSL_DEVICE_3D0,
+		.ver_major = DRIVER_VERSION_MAJOR,
+		.ver_minor = DRIVER_VERSION_MINOR,
+		.mmu = {
+			.config = ADRENO_MMU_CONFIG,
+			/* turn off memory protection unit by setting
+			   acceptable physical address range to include
+			   all pages. */
+			.mpu_base = 0x00000000,
+			.mpu_range =  0xFFFFF000,
+		},
+		.pwrctrl = {
+			.regulator_name = "fs_gfx3d",
+			.irq_name = KGSL_3D0_IRQ,
+			.src_clk_name = "grp_src_clk",
+		},
+		.mutex = __MUTEX_INITIALIZER(device_3d0.dev.mutex),
+		.state = KGSL_STATE_INIT,
+		.active_cnt = 0,
+		.iomemname = KGSL_3D0_REG_MEMORY,
+		.ftbl = &adreno_functable,
+		.display_off = {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+			.level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
+			.suspend = kgsl_early_suspend_driver,
+			.resume = kgsl_late_resume_driver,
+#endif
+		},
+	},
+	.gmemspace = {
+		.gpu_base = 0,
+		.sizebytes = SZ_256K,
+	},
+	.pfp_fw = NULL,
+	.pm4_fw = NULL,
+	.mharb  = ADRENO_CFG_MHARB,
+};
+
+static int adreno_gmeminit(struct adreno_device *adreno_dev)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	union reg_rb_edram_info rb_edram_info;
+	unsigned int gmem_size;
+	unsigned int edram_value = 0;
+
+	/* make sure edram range is aligned to size */
+	BUG_ON(adreno_dev->gmemspace.gpu_base &
+				(adreno_dev->gmemspace.sizebytes - 1));
+
+	/* get edram_size value equivalent */
+	gmem_size = (adreno_dev->gmemspace.sizebytes >> 14);
+	while (gmem_size >>= 1)
+		edram_value++;
+
+	rb_edram_info.val = 0;
+
+	rb_edram_info.f.edram_size = edram_value;
+	if (!adreno_is_a220(adreno_dev))
+		rb_edram_info.f.edram_mapping_mode = 0; /* EDRAM_MAP_UPPER */
+
+	/* must be aligned to size */
+	rb_edram_info.f.edram_range = (adreno_dev->gmemspace.gpu_base >> 14);
+
+	adreno_regwrite(device, REG_RB_EDRAM_INFO, rb_edram_info.val);
+
+	return 0;
+}
+
+static int adreno_gmemclose(struct kgsl_device *device)
+{
+	adreno_regwrite(device, REG_RB_EDRAM_INFO, 0x00000000);
+
+	return 0;
+}
+
+static void adreno_rbbm_intrcallback(struct kgsl_device *device)
+{
+	unsigned int status = 0;
+	unsigned int rderr = 0;
+
+	adreno_regread(device, REG_RBBM_INT_STATUS, &status);
+
+	if (status & RBBM_INT_CNTL__RDERR_INT_MASK) {
+		union rbbm_read_error_u rerr;
+		adreno_regread(device, REG_RBBM_READ_ERROR, &rderr);
+		rerr.val = rderr;
+		if (rerr.f.read_address == REG_CP_INT_STATUS &&
+			rerr.f.read_error &&
+			rerr.f.read_requester)
+			KGSL_DRV_WARN(device,
+				"rbbm read error interrupt: %08x\n", rderr);
+		else
+			KGSL_DRV_CRIT(device,
+				"rbbm read error interrupt: %08x\n", rderr);
+	} else if (status & RBBM_INT_CNTL__DISPLAY_UPDATE_INT_MASK) {
+		KGSL_DRV_INFO(device, "rbbm display update interrupt\n");
+	} else if (status & RBBM_INT_CNTL__GUI_IDLE_INT_MASK) {
+		KGSL_DRV_INFO(device, "rbbm gui idle interrupt\n");
+	} else {
+		KGSL_CMD_WARN(device,
+			"bad bits in REG_CP_INT_STATUS %08x\n", status);
+	}
+
+	status &= KGSL_RBBM_INT_MASK;
+	adreno_regwrite(device, REG_RBBM_INT_ACK, status);
+}
+
+irqreturn_t adreno_isr(int irq, void *data)
+{
+	irqreturn_t result = IRQ_NONE;
+	struct kgsl_device *device;
+	unsigned int status;
+
+	device = (struct kgsl_device *) data;
+
+	BUG_ON(device == NULL);
+	BUG_ON(device->regspace.sizebytes == 0);
+	BUG_ON(device->regspace.mmio_virt_base == 0);
+
+	adreno_regread(device, REG_MASTER_INT_SIGNAL, &status);
+
+	if (status & MASTER_INT_SIGNAL__MH_INT_STAT) {
+		kgsl_mh_intrcallback(device);
+		result = IRQ_HANDLED;
+	}
+
+	if (status & MASTER_INT_SIGNAL__CP_INT_STAT) {
+		kgsl_cp_intrcallback(device);
+		result = IRQ_HANDLED;
+	}
+
+	if (status & MASTER_INT_SIGNAL__RBBM_INT_STAT) {
+		adreno_rbbm_intrcallback(device);
+		result = IRQ_HANDLED;
+	}
+
+	if (device->requested_state == KGSL_STATE_NONE) {
+		if (device->pwrctrl.nap_allowed == true) {
+			device->requested_state = KGSL_STATE_NAP;
+			queue_work(device->work_queue, &device->idle_check_ws);
+		} else if (device->pwrscale.policy != NULL) {
+			queue_work(device->work_queue, &device->idle_check_ws);
+		}
+	}
+
+	/* Reset the time-out in our idle timer */
+	mod_timer(&device->idle_timer,
+		jiffies + device->pwrctrl.interval_timeout);
+	return result;
+}
+
+static int adreno_cleanup_pt(struct kgsl_device *device,
+			struct kgsl_pagetable *pagetable)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+	kgsl_mmu_unmap(pagetable, &rb->buffer_desc);
+
+	kgsl_mmu_unmap(pagetable, &rb->memptrs_desc);
+
+	kgsl_mmu_unmap(pagetable, &device->memstore);
+
+	kgsl_mmu_unmap(pagetable, &device->mmu.dummyspace);
+
+	return 0;
+}
+
+static int adreno_setup_pt(struct kgsl_device *device,
+			struct kgsl_pagetable *pagetable)
+{
+	int result = 0;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+	BUG_ON(rb->buffer_desc.physaddr == 0);
+	BUG_ON(rb->memptrs_desc.physaddr == 0);
+	BUG_ON(device->memstore.physaddr == 0);
+#ifdef CONFIG_MSM_KGSL_MMU
+	BUG_ON(device->mmu.dummyspace.physaddr == 0);
+#endif
+	result = kgsl_mmu_map_global(pagetable, &rb->buffer_desc,
+				     GSL_PT_PAGE_RV);
+	if (result)
+		goto error;
+
+	result = kgsl_mmu_map_global(pagetable, &rb->memptrs_desc,
+				     GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+	if (result)
+		goto unmap_buffer_desc;
+
+	result = kgsl_mmu_map_global(pagetable, &device->memstore,
+				     GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+	if (result)
+		goto unmap_memptrs_desc;
+
+	result = kgsl_mmu_map_global(pagetable, &device->mmu.dummyspace,
+				     GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+	if (result)
+		goto unmap_memstore_desc;
+
+	return result;
+
+unmap_memstore_desc:
+	kgsl_mmu_unmap(pagetable, &device->memstore);
+
+unmap_memptrs_desc:
+	kgsl_mmu_unmap(pagetable, &rb->memptrs_desc);
+
+unmap_buffer_desc:
+	kgsl_mmu_unmap(pagetable, &rb->buffer_desc);
+
+error:
+	return result;
+}
+
+static void adreno_setstate(struct kgsl_device *device, uint32_t flags)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	unsigned int link[32];
+	unsigned int *cmds = &link[0];
+	int sizedwords = 0;
+	unsigned int mh_mmu_invalidate = 0x00000003; /*invalidate all and tc */
+
+	if (!kgsl_mmu_enabled() || !flags)
+		return;
+
+	/* If possible, then set the state via the command stream to avoid
+	   a CPU idle.  Otherwise, use the default setstate which uses register
+	   writes */
+
+	if (adreno_dev->drawctxt_active) {
+		if (flags & KGSL_MMUFLAGS_PTUPDATE) {
+			/* wait for graphics pipe to be idle */
+			*cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+			*cmds++ = 0x00000000;
+
+			/* set page table base */
+			*cmds++ = pm4_type0_packet(MH_MMU_PT_BASE, 1);
+			*cmds++ = device->mmu.hwpagetable->base.gpuaddr;
+			sizedwords += 4;
+		}
+
+		if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
+			if (!(flags & KGSL_MMUFLAGS_PTUPDATE)) {
+				*cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE,
+								1);
+				*cmds++ = 0x00000000;
+				sizedwords += 2;
+			}
+			*cmds++ = pm4_type0_packet(MH_MMU_INVALIDATE, 1);
+			*cmds++ = mh_mmu_invalidate;
+			sizedwords += 2;
+		}
+
+		if (flags & KGSL_MMUFLAGS_PTUPDATE &&
+			!adreno_is_a220(adreno_dev)) {
+			/* HW workaround: to resolve MMU page fault interrupts
+			* caused by the VGT.It prevents the CP PFP from filling
+			* the VGT DMA request fifo too early,thereby ensuring
+			* that the VGT will not fetch vertex/bin data until
+			* after the page table base register has been updated.
+			*
+			* Two null DRAW_INDX_BIN packets are inserted right
+			* after the page table base update, followed by a
+			* wait for idle. The null packets will fill up the
+			* VGT DMA request fifo and prevent any further
+			* vertex/bin updates from occurring until the wait
+			* has finished. */
+			*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+			*cmds++ = (0x4 << 16) |
+				(REG_PA_SU_SC_MODE_CNTL - 0x2000);
+			*cmds++ = 0;	  /* disable faceness generation */
+			*cmds++ = pm4_type3_packet(PM4_SET_BIN_BASE_OFFSET, 1);
+			*cmds++ = device->mmu.dummyspace.gpuaddr;
+			*cmds++ = pm4_type3_packet(PM4_DRAW_INDX_BIN, 6);
+			*cmds++ = 0;	  /* viz query info */
+			*cmds++ = 0x0003C004; /* draw indicator */
+			*cmds++ = 0;	  /* bin base */
+			*cmds++ = 3;	  /* bin size */
+			*cmds++ = device->mmu.dummyspace.gpuaddr; /* dma base */
+			*cmds++ = 6;	  /* dma size */
+			*cmds++ = pm4_type3_packet(PM4_DRAW_INDX_BIN, 6);
+			*cmds++ = 0;	  /* viz query info */
+			*cmds++ = 0x0003C004; /* draw indicator */
+			*cmds++ = 0;	  /* bin base */
+			*cmds++ = 3;	  /* bin size */
+			/* dma base */
+			*cmds++ = device->mmu.dummyspace.gpuaddr;
+			*cmds++ = 6;	  /* dma size */
+			*cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+			*cmds++ = 0x00000000;
+			sizedwords += 21;
+		}
+
+		if (flags & (KGSL_MMUFLAGS_PTUPDATE | KGSL_MMUFLAGS_TLBFLUSH)) {
+			*cmds++ = pm4_type3_packet(PM4_INVALIDATE_STATE, 1);
+			*cmds++ = 0x7fff; /* invalidate all base pointers */
+			sizedwords += 2;
+		}
+
+		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+					&link[0], sizedwords);
+	} else
+		kgsl_default_setstate(device, flags);
+}
+
+static unsigned int
+adreno_getchipid(struct kgsl_device *device)
+{
+	unsigned int chipid = 0;
+	unsigned int coreid, majorid, minorid, patchid, revid;
+
+	adreno_regread(device, REG_RBBM_PERIPHID1, &coreid);
+	adreno_regread(device, REG_RBBM_PERIPHID2, &majorid);
+	adreno_regread(device, REG_RBBM_PATCH_RELEASE, &revid);
+
+	/*
+	* adreno 22x gpus are indicated by coreid 2,
+	* but REG_RBBM_PERIPHID1 always contains 0 for this field
+	*/
+	if (cpu_is_msm8960() || cpu_is_msm8x60())
+		chipid = 2 << 24;
+	else
+		chipid = (coreid & 0xF) << 24;
+
+	chipid |= ((majorid >> 4) & 0xF) << 16;
+
+	minorid = ((revid >> 0)  & 0xFF);
+
+	patchid = ((revid >> 16) & 0xFF);
+
+	/* 8x50 returns 0 for patch release, but it should be 1 */
+	if (cpu_is_qsd8x50())
+		patchid = 1;
+	/* userspace isn't prepared to deal with patch id for these chips yet */
+	else if (cpu_is_msm8960() || cpu_is_msm8x60())
+		patchid = 0;
+
+	chipid |= (minorid << 8) | patchid;
+
+	return chipid;
+}
+
+/* all chipid fields are 8 bits wide so 256 won't occur in a real chipid */
+#define DONT_CARE 256
+static const struct {
+	unsigned int core;
+	unsigned int major;
+	unsigned int minor;
+	enum adreno_gpurev gpurev;
+} gpurev_table[] = {
+	/* major and minor may be DONT_CARE, but core must not be */
+	{0, 2, DONT_CARE, ADRENO_REV_A200},
+	{0, 1, 0, ADRENO_REV_A205},
+	{2, 1, DONT_CARE, ADRENO_REV_A220},
+	{2, 2, DONT_CARE, ADRENO_REV_A225},
+};
+
+static inline bool _rev_match(unsigned int id, unsigned int entry)
+{
+	return (entry == DONT_CARE || entry == id);
+}
+#undef DONT_CARE
+
+static void
+adreno_identify_gpu(struct adreno_device *adreno_dev)
+{
+	enum adreno_gpurev gpurev = ADRENO_REV_UNKNOWN;
+	unsigned int i, core, major, minor;
+
+	adreno_dev->chip_id = adreno_getchipid(&adreno_dev->dev);
+
+	core = (adreno_dev->chip_id >> 24) & 0xff;
+	major = (adreno_dev->chip_id >> 16) & 0xff;
+	minor = (adreno_dev->chip_id >> 8) & 0xff;
+
+	for (i = 0; i < ARRAY_SIZE(gpurev_table); i++) {
+		if (core == gpurev_table[i].core &&
+		    _rev_match(major, gpurev_table[i].major) &&
+		    _rev_match(minor, gpurev_table[i].minor)) {
+			gpurev = gpurev_table[i].gpurev;
+			break;
+		}
+	}
+
+	adreno_dev->gpurev = gpurev;
+}
+
+static int __devinit
+adreno_probe(struct platform_device *pdev)
+{
+	struct kgsl_device *device;
+	struct adreno_device *adreno_dev;
+	int status = -EINVAL;
+
+	device = (struct kgsl_device *)pdev->id_entry->driver_data;
+	adreno_dev = ADRENO_DEVICE(device);
+	device->parentdev = &pdev->dev;
+
+	init_completion(&device->recovery_gate);
+
+	status = adreno_ringbuffer_init(device);
+	if (status != 0)
+		goto error;
+
+	status = kgsl_device_platform_probe(device, adreno_isr);
+	if (status)
+		goto error_close_rb;
+
+	adreno_debugfs_init(device);
+
+	kgsl_pwrscale_init(device);
+	kgsl_pwrscale_attach_policy(device, ADRENO_DEFAULT_PWRSCALE_POLICY);
+
+	device->flags &= ~KGSL_FLAGS_SOFT_RESET;
+	return 0;
+
+error_close_rb:
+	adreno_ringbuffer_close(&adreno_dev->ringbuffer);
+error:
+	device->parentdev = NULL;
+	return status;
+}
+
+static int __devexit adreno_remove(struct platform_device *pdev)
+{
+	struct kgsl_device *device;
+	struct adreno_device *adreno_dev;
+
+	device = (struct kgsl_device *)pdev->id_entry->driver_data;
+	adreno_dev = ADRENO_DEVICE(device);
+
+	kgsl_pwrscale_detach_policy(device);
+	kgsl_pwrscale_close(device);
+
+	adreno_ringbuffer_close(&adreno_dev->ringbuffer);
+	kgsl_device_platform_remove(device);
+
+	return 0;
+}
+
+static int adreno_start(struct kgsl_device *device, unsigned int init_ram)
+{
+	int status = -EINVAL;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	int init_reftimestamp = 0x7fffffff;
+
+	device->state = KGSL_STATE_INIT;
+	device->requested_state = KGSL_STATE_NONE;
+
+	/* Power up the device */
+	kgsl_pwrctrl_enable(device);
+
+	/* Identify the specific GPU */
+	adreno_identify_gpu(adreno_dev);
+
+	if (kgsl_mmu_start(device))
+		goto error_clk_off;
+
+	/*We need to make sure all blocks are powered up and clocked before
+	*issuing a soft reset.  The overrides will then be turned off (set to 0)
+	*/
+	adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0xfffffffe);
+	adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0xffffffff);
+
+	/* Only reset CP block if all blocks have previously been reset */
+	if (!(device->flags & KGSL_FLAGS_SOFT_RESET) ||
+		!adreno_is_a220(adreno_dev)) {
+		adreno_regwrite(device, REG_RBBM_SOFT_RESET, 0xFFFFFFFF);
+		device->flags |= KGSL_FLAGS_SOFT_RESET;
+	} else
+		adreno_regwrite(device, REG_RBBM_SOFT_RESET, 0x00000001);
+
+	/* The core is in an indeterminate state until the reset completes
+	 * after 30ms.
+	 */
+	msleep(30);
+
+	adreno_regwrite(device, REG_RBBM_SOFT_RESET, 0x00000000);
+
+	adreno_regwrite(device, REG_RBBM_CNTL, 0x00004442);
+
+	adreno_regwrite(device, REG_MH_ARBITER_CONFIG,
+				adreno_dev->mharb);
+
+	/* Remove 1k boundary check in z470 to avoid GPU hang.
+	   Notice that, this solution won't work if both EBI and SMI are used */
+	if (adreno_is_a220(adreno_dev)) {
+		adreno_regwrite(device, REG_MH_CLNT_INTF_CTRL_CONFIG1,
+				 0x00032f07);
+	}
+
+	adreno_regwrite(device, REG_SQ_VS_PROGRAM, 0x00000000);
+	adreno_regwrite(device, REG_SQ_PS_PROGRAM, 0x00000000);
+
+	adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0);
+	if (!adreno_is_a220(adreno_dev))
+		adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0);
+	else
+		adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0x80);
+
+	kgsl_sharedmem_writel(&device->memstore,
+			      KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts),
+			      init_reftimestamp);
+
+	adreno_regwrite(device, REG_RBBM_DEBUG, 0x00080000);
+
+	/* Make sure interrupts are disabled */
+
+	adreno_regwrite(device, REG_RBBM_INT_CNTL, 0);
+	adreno_regwrite(device, REG_CP_INT_CNTL, 0);
+	adreno_regwrite(device, REG_SQ_INT_CNTL, 0);
+
+	if (adreno_is_a220(adreno_dev))
+		adreno_dev->gmemspace.sizebytes = SZ_512K;
+	else
+		adreno_dev->gmemspace.sizebytes = SZ_256K;
+	adreno_gmeminit(adreno_dev);
+
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
+
+	status = adreno_ringbuffer_start(&adreno_dev->ringbuffer, init_ram);
+	if (status != 0)
+		goto error_irq_off;
+
+	mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
+	return status;
+
+error_irq_off:
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+error_clk_off:
+	kgsl_pwrctrl_disable(device);
+	kgsl_mmu_stop(device);
+
+	return status;
+}
+
+static int adreno_stop(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+	del_timer(&device->idle_timer);
+
+	adreno_dev->drawctxt_active = NULL;
+
+	adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
+
+	adreno_gmemclose(device);
+
+	kgsl_mmu_stop(device);
+
+	/* Power down the device */
+	kgsl_pwrctrl_disable(device);
+
+	return 0;
+}
+
+static int
+adreno_recover_hang(struct kgsl_device *device)
+{
+	int ret;
+	unsigned int *rb_buffer;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+	unsigned int timestamp;
+	unsigned int num_rb_contents;
+	unsigned int bad_context;
+	unsigned int reftimestamp;
+	unsigned int enable_ts;
+	unsigned int soptimestamp;
+	unsigned int eoptimestamp;
+	struct adreno_context *drawctxt;
+
+	KGSL_DRV_ERR(device, "Starting recovery from 3D GPU hang....\n");
+	rb_buffer = vmalloc(rb->buffer_desc.size);
+	if (!rb_buffer) {
+		KGSL_MEM_ERR(device,
+			"Failed to allocate memory for recovery: %x\n",
+			rb->buffer_desc.size);
+		return -ENOMEM;
+	}
+	/* Extract valid contents from rb which can stil be executed after
+	 * hang */
+	ret = adreno_ringbuffer_extract(rb, rb_buffer, &num_rb_contents);
+	if (ret)
+		goto done;
+	timestamp = rb->timestamp;
+	KGSL_DRV_ERR(device, "Last issued timestamp: %x\n", timestamp);
+	kgsl_sharedmem_readl(&device->memstore, &bad_context,
+				KGSL_DEVICE_MEMSTORE_OFFSET(current_context));
+	kgsl_sharedmem_readl(&device->memstore, &reftimestamp,
+				KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts));
+	kgsl_sharedmem_readl(&device->memstore, &enable_ts,
+				KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable));
+	kgsl_sharedmem_readl(&device->memstore, &soptimestamp,
+				KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp));
+	kgsl_sharedmem_readl(&device->memstore, &eoptimestamp,
+				KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp));
+	/* Make sure memory is synchronized before restarting the GPU */
+	mb();
+	KGSL_CTXT_ERR(device,
+		"Context that caused a GPU hang: %x\n", bad_context);
+	/* restart device */
+	ret = adreno_stop(device);
+	if (ret)
+		goto done;
+	ret = adreno_start(device, true);
+	if (ret)
+		goto done;
+	KGSL_DRV_ERR(device, "Device has been restarted after hang\n");
+	/* Restore timestamp states */
+	kgsl_sharedmem_writel(&device->memstore,
+			KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp),
+			soptimestamp);
+	kgsl_sharedmem_writel(&device->memstore,
+			KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp),
+			eoptimestamp);
+	kgsl_sharedmem_writel(&device->memstore,
+			KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp),
+			soptimestamp);
+	if (num_rb_contents) {
+		kgsl_sharedmem_writel(&device->memstore,
+			KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts),
+			reftimestamp);
+		kgsl_sharedmem_writel(&device->memstore,
+			KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable),
+			enable_ts);
+	}
+	/* Make sure all writes are posted before the GPU reads them */
+	wmb();
+	/* Mark the invalid context so no more commands are accepted from
+	 * that context */
+
+	drawctxt = (struct adreno_context *) bad_context;
+
+	KGSL_CTXT_ERR(device,
+		"Context that caused a GPU hang: %x\n", bad_context);
+
+	drawctxt->flags |= CTXT_FLAGS_GPU_HANG;
+
+	/* Restore valid commands in ringbuffer */
+	adreno_ringbuffer_restore(rb, rb_buffer, num_rb_contents);
+	rb->timestamp = timestamp;
+done:
+	vfree(rb_buffer);
+	return ret;
+}
+
+static int
+adreno_dump_and_recover(struct kgsl_device *device)
+{
+	static int recovery;
+	int result = -ETIMEDOUT;
+
+	if (device->state == KGSL_STATE_HUNG)
+		goto done;
+	if (device->state == KGSL_STATE_DUMP_AND_RECOVER && !recovery) {
+		mutex_unlock(&device->mutex);
+		wait_for_completion(&device->recovery_gate);
+		mutex_lock(&device->mutex);
+		if (!(device->state & KGSL_STATE_HUNG))
+			/* recovery success */
+			result = 0;
+	} else {
+		INIT_COMPLETION(device->recovery_gate);
+		/* Detected a hang - trigger an automatic dump */
+		adreno_postmortem_dump(device, 0);
+		if (!recovery) {
+			recovery = 1;
+			result = adreno_recover_hang(device);
+			if (result)
+				device->state = KGSL_STATE_HUNG;
+			recovery = 0;
+			complete_all(&device->recovery_gate);
+		} else
+			KGSL_DRV_ERR(device,
+				"Cannot recover from another hang while "
+				"recovering from a hang\n");
+	}
+done:
+	return result;
+}
+
+static int adreno_getproperty(struct kgsl_device *device,
+				enum kgsl_property_type type,
+				void *value,
+				unsigned int sizebytes)
+{
+	int status = -EINVAL;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	switch (type) {
+	case KGSL_PROP_DEVICE_INFO:
+		{
+			struct kgsl_devinfo devinfo;
+
+			if (sizebytes != sizeof(devinfo)) {
+				status = -EINVAL;
+				break;
+			}
+
+			memset(&devinfo, 0, sizeof(devinfo));
+			devinfo.device_id = device->id+1;
+			devinfo.chip_id = adreno_dev->chip_id;
+			devinfo.mmu_enabled = kgsl_mmu_enabled();
+			devinfo.gpu_id = adreno_dev->gpurev;
+			devinfo.gmem_gpubaseaddr = adreno_dev->gmemspace.
+					gpu_base;
+			devinfo.gmem_sizebytes = adreno_dev->gmemspace.
+					sizebytes;
+
+			if (copy_to_user(value, &devinfo, sizeof(devinfo)) !=
+					0) {
+				status = -EFAULT;
+				break;
+			}
+			status = 0;
+		}
+		break;
+	case KGSL_PROP_DEVICE_SHADOW:
+		{
+			struct kgsl_shadowprop shadowprop;
+
+			if (sizebytes != sizeof(shadowprop)) {
+				status = -EINVAL;
+				break;
+			}
+			memset(&shadowprop, 0, sizeof(shadowprop));
+			if (device->memstore.hostptr) {
+				/*NOTE: with mmu enabled, gpuaddr doesn't mean
+				 * anything to mmap().
+				 */
+				shadowprop.gpuaddr = device->memstore.physaddr;
+				shadowprop.size = device->memstore.size;
+				/* GSL needs this to be set, even if it
+				   appears to be meaningless */
+				shadowprop.flags = KGSL_FLAGS_INITIALIZED;
+			}
+			if (copy_to_user(value, &shadowprop,
+				sizeof(shadowprop))) {
+				status = -EFAULT;
+				break;
+			}
+			status = 0;
+		}
+		break;
+	case KGSL_PROP_MMU_ENABLE:
+		{
+#ifdef CONFIG_MSM_KGSL_MMU
+			int mmuProp = 1;
+#else
+			int mmuProp = 0;
+#endif
+			if (sizebytes != sizeof(int)) {
+				status = -EINVAL;
+				break;
+			}
+			if (copy_to_user(value, &mmuProp, sizeof(mmuProp))) {
+				status = -EFAULT;
+				break;
+			}
+			status = 0;
+		}
+		break;
+	case KGSL_PROP_INTERRUPT_WAITS:
+		{
+			int int_waits = 1;
+			if (sizebytes != sizeof(int)) {
+				status = -EINVAL;
+				break;
+			}
+			if (copy_to_user(value, &int_waits, sizeof(int))) {
+				status = -EFAULT;
+				break;
+			}
+			status = 0;
+		}
+		break;
+	default:
+		status = -EINVAL;
+	}
+
+	return status;
+}
+
+/* Caller must hold the device mutex. */
+int adreno_idle(struct kgsl_device *device, unsigned int timeout)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+	unsigned int rbbm_status;
+	unsigned long wait_time = jiffies + MAX_WAITGPU_SECS;
+
+	kgsl_cffdump_regpoll(device->id, REG_RBBM_STATUS << 2,
+		0x00000000, 0x80000000);
+	/* first, wait until the CP has consumed all the commands in
+	 * the ring buffer
+	 */
+retry:
+	if (rb->flags & KGSL_FLAGS_STARTED) {
+		do {
+			GSL_RB_GET_READPTR(rb, &rb->rptr);
+			if (time_after(jiffies, wait_time)) {
+				KGSL_DRV_ERR(device, "rptr: %x, wptr: %x\n",
+					rb->rptr, rb->wptr);
+				goto err;
+			}
+		} while (rb->rptr != rb->wptr);
+	}
+
+	/* now, wait for the GPU to finish its operations */
+	wait_time = jiffies + MAX_WAITGPU_SECS;
+	while (time_before(jiffies, wait_time)) {
+		adreno_regread(device, REG_RBBM_STATUS, &rbbm_status);
+		if (rbbm_status == 0x110)
+			return 0;
+	}
+
+err:
+	KGSL_DRV_ERR(device, "spun too long waiting for RB to idle\n");
+	if (!adreno_dump_and_recover(device)) {
+		wait_time = jiffies + MAX_WAITGPU_SECS;
+		goto retry;
+	}
+	return -ETIMEDOUT;
+}
+
+static unsigned int adreno_isidle(struct kgsl_device *device)
+{
+	int status = false;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+	unsigned int rbbm_status;
+
+	if (rb->flags & KGSL_FLAGS_STARTED) {
+		/* Is the ring buffer is empty? */
+		GSL_RB_GET_READPTR(rb, &rb->rptr);
+		if (!device->active_cnt && (rb->rptr == rb->wptr)) {
+			/* Is the core idle? */
+			adreno_regread(device, REG_RBBM_STATUS,
+					    &rbbm_status);
+			if (rbbm_status == 0x110)
+				status = true;
+		}
+	} else {
+		KGSL_DRV_ERR(device, "ringbuffer not started\n");
+		BUG();
+	}
+	return status;
+}
+
+/* Caller must hold the device mutex. */
+static int adreno_suspend_context(struct kgsl_device *device)
+{
+	int status = 0;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	/* switch to NULL ctxt */
+	if (adreno_dev->drawctxt_active != NULL) {
+		adreno_drawctxt_switch(adreno_dev, NULL, 0);
+		status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
+	}
+
+	return status;
+}
+
+uint8_t *kgsl_sharedmem_convertaddr(struct kgsl_device *device,
+	unsigned int pt_base, unsigned int gpuaddr, unsigned int *size)
+{
+	uint8_t *result = NULL;
+	struct kgsl_mem_entry *entry;
+	struct kgsl_process_private *priv;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *ringbuffer = &adreno_dev->ringbuffer;
+
+	if (kgsl_gpuaddr_in_memdesc(&ringbuffer->buffer_desc, gpuaddr)) {
+		return kgsl_gpuaddr_to_vaddr(&ringbuffer->buffer_desc,
+					gpuaddr, size);
+	}
+
+	if (kgsl_gpuaddr_in_memdesc(&ringbuffer->memptrs_desc, gpuaddr)) {
+		return kgsl_gpuaddr_to_vaddr(&ringbuffer->memptrs_desc,
+					gpuaddr, size);
+	}
+
+	if (kgsl_gpuaddr_in_memdesc(&device->memstore, gpuaddr)) {
+		return kgsl_gpuaddr_to_vaddr(&device->memstore,
+					gpuaddr, size);
+	}
+
+	mutex_lock(&kgsl_driver.process_mutex);
+	list_for_each_entry(priv, &kgsl_driver.process_list, list) {
+		if (pt_base != 0
+			&& priv->pagetable
+			&& priv->pagetable->base.gpuaddr != pt_base) {
+			continue;
+		}
+
+		spin_lock(&priv->mem_lock);
+		entry = kgsl_sharedmem_find_region(priv, gpuaddr,
+						sizeof(unsigned int));
+		if (entry) {
+			result = kgsl_gpuaddr_to_vaddr(&entry->memdesc,
+							gpuaddr, size);
+			spin_unlock(&priv->mem_lock);
+			mutex_unlock(&kgsl_driver.process_mutex);
+			return result;
+		}
+		spin_unlock(&priv->mem_lock);
+	}
+	mutex_unlock(&kgsl_driver.process_mutex);
+
+	BUG_ON(!mutex_is_locked(&device->mutex));
+	list_for_each_entry(entry, &device->memqueue, list) {
+		if (kgsl_gpuaddr_in_memdesc(&entry->memdesc, gpuaddr)) {
+			result = kgsl_gpuaddr_to_vaddr(&entry->memdesc,
+							gpuaddr, size);
+			break;
+		}
+
+	}
+	return result;
+}
+
+void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
+				unsigned int *value)
+{
+	unsigned int *reg;
+	BUG_ON(offsetwords*sizeof(uint32_t) >= device->regspace.sizebytes);
+	reg = (unsigned int *)(device->regspace.mmio_virt_base
+				+ (offsetwords << 2));
+
+	if (!in_interrupt())
+		kgsl_pre_hwaccess(device);
+
+	/*ensure this read finishes before the next one.
+	 * i.e. act like normal readl() */
+	*value = __raw_readl(reg);
+	rmb();
+}
+
+void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords,
+				unsigned int value)
+{
+	unsigned int *reg;
+
+	BUG_ON(offsetwords*sizeof(uint32_t) >= device->regspace.sizebytes);
+
+	if (!in_interrupt())
+		kgsl_pre_hwaccess(device);
+
+	kgsl_cffdump_regwrite(device->id, offsetwords << 2, value);
+	reg = (unsigned int *)(device->regspace.mmio_virt_base
+				+ (offsetwords << 2));
+
+	/*ensure previous writes post before this one,
+	 * i.e. act like normal writel() */
+	wmb();
+	__raw_writel(value, reg);
+}
+
+static int kgsl_check_interrupt_timestamp(struct kgsl_device *device,
+					unsigned int timestamp)
+{
+	int status;
+	unsigned int ref_ts, enableflag;
+
+	status = kgsl_check_timestamp(device, timestamp);
+	if (!status) {
+		mutex_lock(&device->mutex);
+		kgsl_sharedmem_readl(&device->memstore, &enableflag,
+			KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable));
+		mb();
+
+		if (enableflag) {
+			kgsl_sharedmem_readl(&device->memstore, &ref_ts,
+				KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts));
+			mb();
+			if (timestamp_cmp(ref_ts, timestamp)) {
+				kgsl_sharedmem_writel(&device->memstore,
+				KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts),
+				timestamp);
+				wmb();
+			}
+		} else {
+			unsigned int cmds[2];
+			kgsl_sharedmem_writel(&device->memstore,
+				KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts),
+				timestamp);
+			enableflag = 1;
+			kgsl_sharedmem_writel(&device->memstore,
+				KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable),
+				enableflag);
+			wmb();
+			/* submit a dummy packet so that even if all
+			* commands upto timestamp get executed we will still
+			* get an interrupt */
+			cmds[0] = pm4_type3_packet(PM4_NOP, 1);
+			cmds[1] = 0;
+			adreno_ringbuffer_issuecmds(device, 0, &cmds[0], 2);
+		}
+		mutex_unlock(&device->mutex);
+	}
+
+	return status;
+}
+
+/*
+ wait_io_event_interruptible_timeout checks for the exit condition before
+ placing a process in wait q. For conditional interrupts we expect the
+ process to already be in its wait q when its exit condition checking
+ function is called.
+*/
+#define kgsl_wait_io_event_interruptible_timeout(wq, condition, timeout)\
+({									\
+	long __ret = timeout;						\
+	__wait_io_event_interruptible_timeout(wq, condition, __ret);	\
+	__ret;								\
+})
+
+/* MUST be called with the device mutex held */
+static int adreno_waittimestamp(struct kgsl_device *device,
+				unsigned int timestamp,
+				unsigned int msecs)
+{
+	long status = 0;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	if (timestamp != adreno_dev->ringbuffer.timestamp &&
+		timestamp_cmp(timestamp,
+		adreno_dev->ringbuffer.timestamp)) {
+		KGSL_DRV_ERR(device, "Cannot wait for invalid ts: %x, "
+			"rb->timestamp: %x\n",
+			timestamp, adreno_dev->ringbuffer.timestamp);
+		status = -EINVAL;
+		goto done;
+	}
+	if (!kgsl_check_timestamp(device, timestamp)) {
+		mutex_unlock(&device->mutex);
+		/* We need to make sure that the process is placed in wait-q
+		 * before its condition is called */
+		status = kgsl_wait_io_event_interruptible_timeout(
+				device->wait_queue,
+				kgsl_check_interrupt_timestamp(device,
+					timestamp), msecs_to_jiffies(msecs));
+		mutex_lock(&device->mutex);
+
+		if (status > 0)
+			status = 0;
+		else if (status == 0) {
+			if (!kgsl_check_timestamp(device, timestamp)) {
+				status = -ETIMEDOUT;
+				KGSL_DRV_ERR(device,
+					"Device hang detected while waiting "
+					"for timestamp: %x, last "
+					"submitted(rb->timestamp): %x, wptr: "
+					"%x\n", timestamp,
+					adreno_dev->ringbuffer.timestamp,
+					adreno_dev->ringbuffer.wptr);
+				if (!adreno_dump_and_recover(device)) {
+					/* wait for idle after recovery as the
+					 * timestamp that this process wanted
+					 * to wait on may be invalid */
+					if (!adreno_idle(device,
+						KGSL_TIMEOUT_DEFAULT))
+						status = 0;
+				}
+			}
+		}
+	}
+
+done:
+	return (int)status;
+}
+
+static unsigned int adreno_readtimestamp(struct kgsl_device *device,
+			     enum kgsl_timestamp_type type)
+{
+	unsigned int timestamp = 0;
+
+	if (type == KGSL_TIMESTAMP_CONSUMED)
+		adreno_regread(device, REG_CP_TIMESTAMP, &timestamp);
+	else if (type == KGSL_TIMESTAMP_RETIRED)
+		kgsl_sharedmem_readl(&device->memstore, &timestamp,
+				 KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp));
+	rmb();
+
+	return timestamp;
+}
+
+static long adreno_ioctl(struct kgsl_device_private *dev_priv,
+			      unsigned int cmd, void *data)
+{
+	int result = 0;
+	struct kgsl_drawctxt_set_bin_base_offset *binbase;
+	struct kgsl_context *context;
+
+	switch (cmd) {
+	case IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET:
+		binbase = data;
+
+		context = kgsl_find_context(dev_priv, binbase->drawctxt_id);
+		if (context) {
+			adreno_drawctxt_set_bin_base_offset(
+				dev_priv->device, context, binbase->offset);
+		} else {
+			result = -EINVAL;
+			KGSL_DRV_ERR(dev_priv->device,
+				"invalid drawctxt drawctxt_id %d "
+				"device_id=%d\n",
+				binbase->drawctxt_id, dev_priv->device->id);
+		}
+		break;
+
+	default:
+		KGSL_DRV_INFO(dev_priv->device,
+			"invalid ioctl code %08x\n", cmd);
+		result = -EINVAL;
+		break;
+	}
+	return result;
+
+}
+
+static inline s64 adreno_ticks_to_us(u32 ticks, u32 gpu_freq)
+{
+	gpu_freq /= 1000000;
+	return ticks / gpu_freq;
+}
+
+static void adreno_power_stats(struct kgsl_device *device,
+				struct kgsl_power_stats *stats)
+{
+	unsigned int reg;
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+	/* In order to calculate idle you have to have run the algorithm *
+	 * at least once to get a start time. */
+	if (pwr->time != 0) {
+		s64 tmp;
+		/* Stop the performance moniter and read the current *
+		 * busy cycles. */
+		adreno_regwrite(device,
+			REG_CP_PERFMON_CNTL,
+			REG_PERF_MODE_CNT |
+			REG_PERF_STATE_FREEZE);
+		adreno_regread(device, REG_RBBM_PERFCOUNTER1_LO, &reg);
+		tmp = ktime_to_us(ktime_get());
+		stats->total_time = tmp - pwr->time;
+		pwr->time = tmp;
+		stats->busy_time = adreno_ticks_to_us(reg, device->pwrctrl.
+				pwrlevels[device->pwrctrl.active_pwrlevel].
+				gpu_freq);
+
+		adreno_regwrite(device,
+			REG_CP_PERFMON_CNTL,
+			REG_PERF_MODE_CNT |
+			REG_PERF_STATE_RESET);
+	} else {
+		stats->total_time = 0;
+		stats->busy_time = 0;
+		pwr->time = ktime_to_us(ktime_get());
+	}
+
+	/* re-enable the performance moniters */
+	adreno_regread(device, REG_RBBM_PM_OVERRIDE2, &reg);
+	adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, (reg | 0x40));
+	adreno_regwrite(device, REG_RBBM_PERFCOUNTER1_SELECT, 0x1);
+	adreno_regwrite(device,
+		REG_CP_PERFMON_CNTL,
+		REG_PERF_MODE_CNT | REG_PERF_STATE_ENABLE);
+}
+
+void adreno_irqctrl(struct kgsl_device *device, int state)
+{
+	/* Enable GPU and GPUMMU interrupts */
+
+	if (state) {
+		adreno_regwrite(device, REG_RBBM_INT_CNTL, KGSL_RBBM_INT_MASK);
+		adreno_regwrite(device, REG_CP_INT_CNTL, KGSL_CP_INT_MASK);
+		adreno_regwrite(device, MH_INTERRUPT_MASK, KGSL_MMU_INT_MASK);
+	} else {
+		adreno_regwrite(device, REG_RBBM_INT_CNTL, 0);
+		adreno_regwrite(device, REG_CP_INT_CNTL, 0);
+		adreno_regwrite(device, MH_INTERRUPT_MASK, 0);
+	}
+}
+
+static const struct kgsl_functable adreno_functable = {
+	/* Mandatory functions */
+	.regread = adreno_regread,
+	.regwrite = adreno_regwrite,
+	.idle = adreno_idle,
+	.isidle = adreno_isidle,
+	.suspend_context = adreno_suspend_context,
+	.start = adreno_start,
+	.stop = adreno_stop,
+	.getproperty = adreno_getproperty,
+	.waittimestamp = adreno_waittimestamp,
+	.readtimestamp = adreno_readtimestamp,
+	.issueibcmds = adreno_ringbuffer_issueibcmds,
+	.ioctl = adreno_ioctl,
+	.setup_pt = adreno_setup_pt,
+	.cleanup_pt = adreno_cleanup_pt,
+	.power_stats = adreno_power_stats,
+	.irqctrl = adreno_irqctrl,
+	/* Optional functions */
+	.setstate = adreno_setstate,
+	.drawctxt_create = adreno_drawctxt_create,
+	.drawctxt_destroy = adreno_drawctxt_destroy,
+};
+
+static struct platform_device_id adreno_id_table[] = {
+	{ DEVICE_3D0_NAME, (kernel_ulong_t)&device_3d0.dev, },
+	{ },
+};
+MODULE_DEVICE_TABLE(platform, adreno_id_table);
+
+static struct platform_driver adreno_platform_driver = {
+	.probe = adreno_probe,
+	.remove = __devexit_p(adreno_remove),
+	.suspend = kgsl_suspend_driver,
+	.resume = kgsl_resume_driver,
+	.id_table = adreno_id_table,
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = DEVICE_3D_NAME,
+		.pm = &kgsl_pm_ops,
+	}
+};
+
+static int __init kgsl_3d_init(void)
+{
+	return platform_driver_register(&adreno_platform_driver);
+}
+
+static void __exit kgsl_3d_exit(void)
+{
+	platform_driver_unregister(&adreno_platform_driver);
+}
+
+module_init(kgsl_3d_init);
+module_exit(kgsl_3d_exit);
+
+MODULE_DESCRIPTION("3D Graphics driver");
+MODULE_VERSION("1.2");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kgsl_3d");
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
new file mode 100644
index 0000000..597c6b8
--- /dev/null
+++ b/drivers/gpu/msm/adreno.h
@@ -0,0 +1,116 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ADRENO_H
+#define __ADRENO_H
+
+#include "kgsl_device.h"
+#include "adreno_drawctxt.h"
+#include "adreno_ringbuffer.h"
+
+#define DEVICE_3D_NAME "kgsl-3d"
+#define DEVICE_3D0_NAME "kgsl-3d0"
+
+#define ADRENO_DEVICE(device) \
+		KGSL_CONTAINER_OF(device, struct adreno_device, dev)
+
+/* Flags to control command packet settings */
+#define KGSL_CMD_FLAGS_PMODE		0x00000001
+#define KGSL_CMD_FLAGS_NO_TS_CMP	0x00000002
+#define KGSL_CMD_FLAGS_NOT_KERNEL_CMD	0x00000004
+
+/* Command identifiers */
+#define KGSL_CONTEXT_TO_MEM_IDENTIFIER	0xDEADBEEF
+#define KGSL_CMD_IDENTIFIER		0xFEEDFACE
+
+#ifdef CONFIG_MSM_SCM
+#define ADRENO_DEFAULT_PWRSCALE_POLICY  (&kgsl_pwrscale_policy_tz)
+#else
+#define ADRENO_DEFAULT_PWRSCALE_POLICY  NULL
+#endif
+
+#define KGSL_CP_INT_MASK \
+	(CP_INT_CNTL__SW_INT_MASK | \
+	CP_INT_CNTL__T0_PACKET_IN_IB_MASK | \
+	CP_INT_CNTL__OPCODE_ERROR_MASK | \
+	CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK | \
+	CP_INT_CNTL__RESERVED_BIT_ERROR_MASK | \
+	CP_INT_CNTL__IB_ERROR_MASK | \
+	CP_INT_CNTL__IB2_INT_MASK | \
+	CP_INT_CNTL__IB1_INT_MASK | \
+	CP_INT_CNTL__RB_INT_MASK)
+
+enum adreno_gpurev {
+	ADRENO_REV_UNKNOWN = 0,
+	ADRENO_REV_A200 = 200,
+	ADRENO_REV_A205 = 205,
+	ADRENO_REV_A220 = 220,
+	ADRENO_REV_A225 = 225,
+};
+
+struct adreno_device {
+	struct kgsl_device dev;    /* Must be first field in this struct */
+	unsigned int chip_id;
+	enum adreno_gpurev gpurev;
+	struct kgsl_memregion gmemspace;
+	struct adreno_context *drawctxt_active;
+	wait_queue_head_t ib1_wq;
+	unsigned int *pfp_fw;
+	size_t pfp_fw_size;
+	unsigned int *pm4_fw;
+	size_t pm4_fw_size;
+	struct adreno_ringbuffer ringbuffer;
+	unsigned int mharb;
+};
+
+int adreno_idle(struct kgsl_device *device, unsigned int timeout);
+void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
+				unsigned int *value);
+void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords,
+				unsigned int value);
+
+uint8_t *kgsl_sharedmem_convertaddr(struct kgsl_device *device,
+	unsigned int pt_base, unsigned int gpuaddr, unsigned int *size);
+
+static inline int adreno_is_a200(struct adreno_device *adreno_dev)
+{
+	return (adreno_dev->gpurev == ADRENO_REV_A200);
+}
+
+static inline int adreno_is_a205(struct adreno_device *adreno_dev)
+{
+	return (adreno_dev->gpurev == ADRENO_REV_A200);
+}
+
+static inline int adreno_is_a20x(struct adreno_device *adreno_dev)
+{
+	return (adreno_dev->gpurev  == ADRENO_REV_A200 ||
+		adreno_dev->gpurev == ADRENO_REV_A205);
+}
+
+static inline int adreno_is_a220(struct adreno_device *adreno_dev)
+{
+	return (adreno_dev->gpurev == ADRENO_REV_A220);
+}
+
+static inline int adreno_is_a225(struct adreno_device *adreno_dev)
+{
+	return (adreno_dev->gpurev == ADRENO_REV_A225);
+}
+
+static inline int adreno_is_a22x(struct adreno_device *adreno_dev)
+{
+	return (adreno_dev->gpurev  == ADRENO_REV_A220 ||
+		adreno_dev->gpurev == ADRENO_REV_A225);
+}
+
+#endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
new file mode 100644
index 0000000..b897e10
--- /dev/null
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -0,0 +1,451 @@
+/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include "kgsl.h"
+#include "adreno_postmortem.h"
+#include "adreno.h"
+
+#include "a200_reg.h"
+
+unsigned int kgsl_cff_dump_enable;
+int kgsl_pm_regs_enabled;
+
+static uint32_t kgsl_ib_base;
+static uint32_t kgsl_ib_size;
+
+static struct dentry *pm_d_debugfs;
+
+static int pm_dump_set(void *data, u64 val)
+{
+	struct kgsl_device *device = data;
+
+	if (val) {
+		mutex_lock(&device->mutex);
+		adreno_postmortem_dump(device, 1);
+		mutex_unlock(&device->mutex);
+	}
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pm_dump_fops,
+			NULL,
+			pm_dump_set, "%llu\n");
+
+static int pm_regs_enabled_set(void *data, u64 val)
+{
+	kgsl_pm_regs_enabled = val ? 1 : 0;
+	return 0;
+}
+
+static int pm_regs_enabled_get(void *data, u64 *val)
+{
+	*val = kgsl_pm_regs_enabled;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pm_regs_enabled_fops,
+			pm_regs_enabled_get,
+			pm_regs_enabled_set, "%llu\n");
+
+
+static int kgsl_cff_dump_enable_set(void *data, u64 val)
+{
+#ifdef CONFIG_MSM_KGSL_CFF_DUMP
+	kgsl_cff_dump_enable = (val != 0);
+	return 0;
+#else
+	return -EINVAL;
+#endif
+}
+
+static int kgsl_cff_dump_enable_get(void *data, u64 *val)
+{
+	*val = kgsl_cff_dump_enable;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(kgsl_cff_dump_enable_fops, kgsl_cff_dump_enable_get,
+			kgsl_cff_dump_enable_set, "%llu\n");
+
+static int kgsl_dbgfs_open(struct inode *inode, struct file *file)
+{
+	file->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static int kgsl_dbgfs_release(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static int kgsl_hex_dump(const char *prefix, int c, uint8_t *data,
+	int rowc, int linec, char __user *buff)
+{
+	int ss;
+	/* Prefix of 20 chars max, 32 bytes per row, in groups of four - that's
+	 * 8 groups at 8 chars per group plus a space, plus new-line, plus
+	 * ending character */
+	char linebuf[20 + 64 + 1 + 1];
+
+	ss = snprintf(linebuf, sizeof(linebuf), prefix, c);
+	hex_dump_to_buffer(data, linec, rowc, 4, linebuf+ss,
+		sizeof(linebuf)-ss, 0);
+	strncat(linebuf, "\n", sizeof(linebuf));
+	linebuf[sizeof(linebuf)-1] = 0;
+	ss = strlen(linebuf);
+	if (copy_to_user(buff, linebuf, ss+1))
+		return -EFAULT;
+	return ss;
+}
+
+static ssize_t kgsl_ib_dump_read(
+	struct file *file,
+	char __user *buff,
+	size_t buff_count,
+	loff_t *ppos)
+{
+	int i, count = kgsl_ib_size, remaining, pos = 0, tot = 0, ss;
+	struct kgsl_device *device = file->private_data;
+	const int rowc = 32;
+	unsigned int pt_base, ib_memsize;
+	uint8_t *base_addr;
+	char linebuf[80];
+
+	if (!ppos || !device || !kgsl_ib_base)
+		return 0;
+
+	kgsl_regread(device, MH_MMU_PT_BASE, &pt_base);
+	base_addr = kgsl_sharedmem_convertaddr(device, pt_base, kgsl_ib_base,
+		&ib_memsize);
+
+	if (!base_addr)
+		return 0;
+
+	pr_info("%s ppos=%ld, buff_count=%d, count=%d\n", __func__, (long)*ppos,
+		buff_count, count);
+	ss = snprintf(linebuf, sizeof(linebuf), "IB: base=%08x(%08x"
+		"), size=%d, memsize=%d\n", kgsl_ib_base,
+		(uint32_t)base_addr, kgsl_ib_size, ib_memsize);
+	if (*ppos == 0) {
+		if (copy_to_user(buff, linebuf, ss+1))
+			return -EFAULT;
+		tot += ss;
+		buff += ss;
+		*ppos += ss;
+	}
+	pos += ss;
+	remaining = count;
+	for (i = 0; i < count; i += rowc) {
+		int linec = min(remaining, rowc);
+
+		remaining -= rowc;
+		ss = kgsl_hex_dump("IB: %05x: ", i, base_addr, rowc, linec,
+			buff);
+		if (ss < 0)
+			return ss;
+
+		if (pos >= *ppos) {
+			if (tot+ss >= buff_count) {
+				ss = copy_to_user(buff, "", 1);
+				return tot;
+			}
+			tot += ss;
+			buff += ss;
+			*ppos += ss;
+		}
+		pos += ss;
+		base_addr += linec;
+	}
+
+	return tot;
+}
+
+static ssize_t kgsl_ib_dump_write(
+	struct file *file,
+	const char __user *buff,
+	size_t count,
+	loff_t *ppos)
+{
+	char local_buff[64];
+
+	if (count >= sizeof(local_buff))
+		return -EFAULT;
+
+	if (copy_from_user(local_buff, buff, count))
+		return -EFAULT;
+
+	local_buff[count] = 0;	/* end of string */
+	sscanf(local_buff, "%x %d", &kgsl_ib_base, &kgsl_ib_size);
+
+	pr_info("%s: base=%08X size=%d\n", __func__, kgsl_ib_base,
+		kgsl_ib_size);
+
+	return count;
+}
+
+static const struct file_operations kgsl_ib_dump_fops = {
+	.open = kgsl_dbgfs_open,
+	.release = kgsl_dbgfs_release,
+	.read = kgsl_ib_dump_read,
+	.write = kgsl_ib_dump_write,
+};
+
+static int kgsl_regread_nolock(struct kgsl_device *device,
+	unsigned int offsetwords, unsigned int *value)
+{
+	unsigned int *reg;
+
+	if (offsetwords*sizeof(uint32_t) >= device->regspace.sizebytes) {
+		KGSL_DRV_ERR(device, "invalid offset %d\n", offsetwords);
+		return -ERANGE;
+	}
+
+	reg = (unsigned int *)(device->regspace.mmio_virt_base
+				+ (offsetwords << 2));
+	*value = __raw_readl(reg);
+	return 0;
+}
+
+#define KGSL_ISTORE_START 0x5000
+#define KGSL_ISTORE_LENGTH 0x600
+static ssize_t kgsl_istore_read(
+	struct file *file,
+	char __user *buff,
+	size_t buff_count,
+	loff_t *ppos)
+{
+	int i, count = KGSL_ISTORE_LENGTH, remaining, pos = 0, tot = 0;
+	struct kgsl_device *device = file->private_data;
+	const int rowc = 8;
+
+	if (!ppos || !device)
+		return 0;
+
+	remaining = count;
+	for (i = 0; i < count; i += rowc) {
+		unsigned int vals[rowc];
+		int j, ss;
+		int linec = min(remaining, rowc);
+		remaining -= rowc;
+
+		if (pos >= *ppos) {
+			for (j = 0; j < linec; ++j)
+				kgsl_regread_nolock(device,
+					KGSL_ISTORE_START+i+j, vals+j);
+		} else
+			memset(vals, 0, sizeof(vals));
+
+		ss = kgsl_hex_dump("IS: %04x: ", i, (uint8_t *)vals, rowc*4,
+			linec*4, buff);
+		if (ss < 0)
+			return ss;
+
+		if (pos >= *ppos) {
+			if (tot+ss >= buff_count)
+				return tot;
+			tot += ss;
+			buff += ss;
+			*ppos += ss;
+		}
+		pos += ss;
+	}
+
+	return tot;
+}
+
+static const struct file_operations kgsl_istore_fops = {
+	.open = kgsl_dbgfs_open,
+	.release = kgsl_dbgfs_release,
+	.read = kgsl_istore_read,
+	.llseek = default_llseek,
+};
+
+typedef void (*reg_read_init_t)(struct kgsl_device *device);
+typedef void (*reg_read_fill_t)(struct kgsl_device *device, int i,
+	unsigned int *vals, int linec);
+static ssize_t kgsl_reg_read(struct kgsl_device *device, int count,
+	reg_read_init_t reg_read_init,
+	reg_read_fill_t reg_read_fill, const char *prefix, char __user *buff,
+	loff_t *ppos)
+{
+	int i, remaining;
+	const int rowc = 8;
+
+	if (!ppos || *ppos || !device)
+		return 0;
+
+	mutex_lock(&device->mutex);
+	reg_read_init(device);
+	remaining = count;
+	for (i = 0; i < count; i += rowc) {
+		unsigned int vals[rowc];
+		int ss;
+		int linec = min(remaining, rowc);
+		remaining -= rowc;
+
+		reg_read_fill(device, i, vals, linec);
+		ss = kgsl_hex_dump(prefix, i, (uint8_t *)vals, rowc*4, linec*4,
+			buff);
+		if (ss < 0) {
+			mutex_unlock(&device->mutex);
+			return ss;
+		}
+		buff += ss;
+		*ppos += ss;
+	}
+	mutex_unlock(&device->mutex);
+
+	return *ppos;
+}
+
+
+static void kgsl_sx_reg_read_init(struct kgsl_device *device)
+{
+	kgsl_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0xFF);
+	kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0);
+}
+
+static void kgsl_sx_reg_read_fill(struct kgsl_device *device, int i,
+	unsigned int *vals, int linec)
+{
+	int j;
+
+	for (j = 0; j < linec; ++j) {
+		kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1B00 | i);
+		kgsl_regread(device, REG_RBBM_DEBUG_OUT, vals+j);
+	}
+}
+
+static ssize_t kgsl_sx_debug_read(
+	struct file *file,
+	char __user *buff,
+	size_t buff_count,
+	loff_t *ppos)
+{
+	struct kgsl_device *device = file->private_data;
+	return kgsl_reg_read(device, 0x1B, kgsl_sx_reg_read_init,
+			     kgsl_sx_reg_read_fill, "SX: %02x: ", buff, ppos);
+}
+
+static const struct file_operations kgsl_sx_debug_fops = {
+	.open = kgsl_dbgfs_open,
+	.release = kgsl_dbgfs_release,
+	.read = kgsl_sx_debug_read,
+};
+
+static void kgsl_cp_reg_read_init(struct kgsl_device *device)
+{
+	kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0);
+}
+
+static void kgsl_cp_reg_read_fill(struct kgsl_device *device, int i,
+	unsigned int *vals, int linec)
+{
+	int j;
+
+	for (j = 0; j < linec; ++j) {
+		kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1628);
+		kgsl_regread(device, REG_RBBM_DEBUG_OUT, vals+j);
+		msleep(100);
+	}
+}
+
+static ssize_t kgsl_cp_debug_read(
+	struct file *file,
+	char __user *buff,
+	size_t buff_count,
+	loff_t *ppos)
+{
+	struct kgsl_device *device = file->private_data;
+	return kgsl_reg_read(device, 20, kgsl_cp_reg_read_init,
+		kgsl_cp_reg_read_fill,
+		"CP: %02x: ", buff, ppos);
+}
+
+static const struct file_operations kgsl_cp_debug_fops = {
+	.open = kgsl_dbgfs_open,
+	.release = kgsl_dbgfs_release,
+	.read = kgsl_cp_debug_read,
+};
+
+static void kgsl_mh_reg_read_init(struct kgsl_device *device)
+{
+	kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0);
+}
+
+static void kgsl_mh_reg_read_fill(struct kgsl_device *device, int i,
+	unsigned int *vals, int linec)
+{
+	int j;
+
+	for (j = 0; j < linec; ++j) {
+		kgsl_regwrite(device, REG_MH_DEBUG_CTRL, i+j);
+		kgsl_regread(device, REG_MH_DEBUG_DATA, vals+j);
+	}
+}
+
+static ssize_t kgsl_mh_debug_read(
+	struct file *file,
+	char __user *buff,
+	size_t buff_count,
+	loff_t *ppos)
+{
+	struct kgsl_device *device = file->private_data;
+	return kgsl_reg_read(device, 0x40, kgsl_mh_reg_read_init,
+		kgsl_mh_reg_read_fill,
+		"MH: %02x: ", buff, ppos);
+}
+
+static const struct file_operations kgsl_mh_debug_fops = {
+	.open = kgsl_dbgfs_open,
+	.release = kgsl_dbgfs_release,
+	.read = kgsl_mh_debug_read,
+};
+
+void adreno_debugfs_init(struct kgsl_device *device)
+{
+	if (!device->d_debugfs || IS_ERR(device->d_debugfs))
+		return;
+
+	debugfs_create_file("ib_dump",  0600, device->d_debugfs, device,
+			    &kgsl_ib_dump_fops);
+	debugfs_create_file("istore",   0400, device->d_debugfs, device,
+			    &kgsl_istore_fops);
+	debugfs_create_file("sx_debug", 0400, device->d_debugfs, device,
+			    &kgsl_sx_debug_fops);
+	debugfs_create_file("cp_debug", 0400, device->d_debugfs, device,
+			    &kgsl_cp_debug_fops);
+	debugfs_create_file("mh_debug", 0400, device->d_debugfs, device,
+			    &kgsl_mh_debug_fops);
+	debugfs_create_file("cff_dump", 0644, device->d_debugfs, device,
+			    &kgsl_cff_dump_enable_fops);
+
+	/* Create post mortem control files */
+
+	pm_d_debugfs = debugfs_create_dir("postmortem", device->d_debugfs);
+
+	if (IS_ERR(pm_d_debugfs))
+		return;
+
+	debugfs_create_file("dump",  0600, pm_d_debugfs, device,
+			    &pm_dump_fops);
+	debugfs_create_file("regs_enabled", 0644, pm_d_debugfs, device,
+			    &pm_regs_enabled_fops);
+}
diff --git a/drivers/gpu/msm/adreno_debugfs.h b/drivers/gpu/msm/adreno_debugfs.h
new file mode 100644
index 0000000..0356ac6
--- /dev/null
+++ b/drivers/gpu/msm/adreno_debugfs.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ADRENO_DEBUGFS_H
+#define __ADRENO_DEBUGFS_H
+
+#ifdef CONFIG_DEBUG_FS
+
+int adreno_debugfs_init(struct kgsl_device *device);
+
+extern int kgsl_pm_regs_enabled;
+
+static inline int kgsl_pmregs_enabled(void)
+{
+	return kgsl_pm_regs_enabled;
+}
+
+#else
+static inline int adreno_debugfs_init(struct kgsl_device *device)
+{
+	return 0;
+}
+
+static inline int kgsl_pmregs_enabled(void)
+{
+	/* If debugfs is turned off, then always print registers */
+	return 1;
+}
+#endif
+
+#endif /* __ADRENO_DEBUGFS_H */
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
new file mode 100644
index 0000000..4db3966
--- /dev/null
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -0,0 +1,1645 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/slab.h>
+
+#include "kgsl.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_cffdump.h"
+
+#include "adreno.h"
+#include "adreno_pm4types.h"
+#include "adreno_drawctxt.h"
+
+/*
+ *
+ *  Memory Map for Register, Constant & Instruction Shadow, and Command Buffers
+ *  (34.5KB)
+ *
+ *  +---------------------+------------+-------------+---+---------------------+
+ *  | ALU Constant Shadow | Reg Shadow | C&V Buffers |Tex| Shader Instr Shadow |
+ *  +---------------------+------------+-------------+---+---------------------+
+ *    ________________________________/               \____________________
+ *   /                                                                     |
+ *  +--------------+-----------+------+-----------+------------------------+
+ *  | Restore Regs | Save Regs | Quad | Gmem Save | Gmem Restore | unused  |
+ *  +--------------+-----------+------+-----------+------------------------+
+ *
+ *              8K - ALU Constant Shadow (8K aligned)
+ *              4K - H/W Register Shadow (8K aligned)
+ *              4K - Command and Vertex Buffers
+ *                         - Indirect command buffer : Const/Reg restore
+ *                               - includes Loop & Bool const shadows
+ *                         - Indirect command buffer : Const/Reg save
+ *                         - Quad vertices & texture coordinates
+ *                         - Indirect command buffer : Gmem save
+ *                         - Indirect command buffer : Gmem restore
+ *                         - Unused (padding to 8KB boundary)
+ *             <1K - Texture Constant Shadow (768 bytes) (8K aligned)
+ *       18K - Shader Instruction Shadow
+ *               - 6K vertex (32 byte aligned)
+ *               - 6K pixel  (32 byte aligned)
+ *               - 6K shared (32 byte aligned)
+ *
+ *  Note: Reading constants into a shadow, one at a time using REG_TO_MEM, takes
+ *  3 DWORDS per DWORD transfered, plus 1 DWORD for the shadow, for a total of
+ *  16 bytes per constant.  If the texture constants were transfered this way,
+ *  the Command & Vertex Buffers section would extend past the 16K boundary.
+ *  By moving the texture constant shadow area to start at 16KB boundary, we
+ *  only require approximately 40 bytes more memory, but are able to use the
+ *  LOAD_CONSTANT_CONTEXT shadowing feature for the textures, speeding up
+ *  context switching.
+ *
+ *  [Using LOAD_CONSTANT_CONTEXT shadowing feature for the Loop and/or Bool
+ *  constants would require an additional 8KB each, for alignment.]
+ *
+ */
+
+/* Constants */
+
+#define ALU_CONSTANTS	2048	/* DWORDS */
+#define NUM_REGISTERS	1024	/* DWORDS */
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+#define CMD_BUFFER_LEN	9216	/* DWORDS */
+#else
+#define CMD_BUFFER_LEN	3072	/* DWORDS */
+#endif
+#define TEX_CONSTANTS		(32*6)	/* DWORDS */
+#define BOOL_CONSTANTS		8	/* DWORDS */
+#define LOOP_CONSTANTS		56	/* DWORDS */
+#define SHADER_INSTRUCT_LOG2	9U	/* 2^n == SHADER_INSTRUCTIONS */
+
+#if defined(PM4_IM_STORE)
+/* 96-bit instructions */
+#define SHADER_INSTRUCT		(1<<SHADER_INSTRUCT_LOG2)
+#else
+#define SHADER_INSTRUCT		0
+#endif
+
+/* LOAD_CONSTANT_CONTEXT shadow size */
+#define LCC_SHADOW_SIZE		0x2000	/* 8KB */
+
+#define ALU_SHADOW_SIZE		LCC_SHADOW_SIZE	/* 8KB */
+#define REG_SHADOW_SIZE		0x1000	/* 4KB */
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+#define CMD_BUFFER_SIZE		0x9000	/* 36KB */
+#else
+#define CMD_BUFFER_SIZE		0x3000	/* 12KB */
+#endif
+#define TEX_SHADOW_SIZE		(TEX_CONSTANTS*4)	/* 768 bytes */
+#define SHADER_SHADOW_SIZE	(SHADER_INSTRUCT*12)	/* 6KB */
+
+#define REG_OFFSET		LCC_SHADOW_SIZE
+#define CMD_OFFSET		(REG_OFFSET + REG_SHADOW_SIZE)
+#define TEX_OFFSET		(CMD_OFFSET + CMD_BUFFER_SIZE)
+#define SHADER_OFFSET		((TEX_OFFSET + TEX_SHADOW_SIZE + 32) & ~31)
+
+#define CONTEXT_SIZE		(SHADER_OFFSET + 3 * SHADER_SHADOW_SIZE)
+
+/* temporary work structure */
+struct tmp_ctx {
+	unsigned int *start;	/* Command & Vertex buffer start */
+	unsigned int *cmd;	/* Next available dword in C&V buffer */
+
+	/* address of buffers, needed when creating IB1 command buffers. */
+	uint32_t bool_shadow;	/* bool constants */
+	uint32_t loop_shadow;	/* loop constants */
+
+#if defined(PM4_IM_STORE)
+	uint32_t shader_shared;	/* shared shader instruction shadow */
+	uint32_t shader_vertex;	/* vertex shader instruction shadow */
+	uint32_t shader_pixel;	/* pixel shader instruction shadow */
+#endif
+
+	/* Addresses in command buffer where separately handled registers
+	 * are saved
+	 */
+	uint32_t reg_values[33];
+	uint32_t chicken_restore;
+
+	uint32_t gmem_base;	/* Base gpu address of GMEM */
+
+};
+
+/* Helper function to calculate IEEE754 single precision float values
+*  without FPU
+*/
+unsigned int uint2float(unsigned int uintval)
+{
+	unsigned int exp, frac = 0;
+
+	if (uintval == 0)
+		return 0;
+
+	exp = ilog2(uintval);
+
+	/* Calculate fraction */
+	if (23 > exp)
+		frac = (uintval & (~(1 << exp))) << (23 - exp);
+
+	/* Exp is biased by 127 and shifted 23 bits */
+	exp = (exp + 127) << 23;
+
+	return exp | frac;
+}
+
+/* context save (gmem -> sys) */
+
+/* pre-compiled vertex shader program
+*
+*  attribute vec4  P;
+*  void main(void)
+*  {
+*    gl_Position = P;
+*  }
+*/
+#define GMEM2SYS_VTX_PGM_LEN	0x12
+
+static unsigned int gmem2sys_vtx_pgm[GMEM2SYS_VTX_PGM_LEN] = {
+	0x00011003, 0x00001000, 0xc2000000,
+	0x00001004, 0x00001000, 0xc4000000,
+	0x00001005, 0x00002000, 0x00000000,
+	0x1cb81000, 0x00398a88, 0x00000003,
+	0x140f803e, 0x00000000, 0xe2010100,
+	0x14000000, 0x00000000, 0xe2000000
+};
+
+/* pre-compiled fragment shader program
+*
+*  precision highp float;
+*  uniform   vec4  clear_color;
+*  void main(void)
+*  {
+*     gl_FragColor = clear_color;
+*  }
+*/
+
+#define GMEM2SYS_FRAG_PGM_LEN	0x0c
+
+static unsigned int gmem2sys_frag_pgm[GMEM2SYS_FRAG_PGM_LEN] = {
+	0x00000000, 0x1002c400, 0x10000000,
+	0x00001003, 0x00002000, 0x00000000,
+	0x140f8000, 0x00000000, 0x22000000,
+	0x14000000, 0x00000000, 0xe2000000
+};
+
+/* context restore (sys -> gmem) */
+/* pre-compiled vertex shader program
+*
+*  attribute vec4 position;
+*  attribute vec4 texcoord;
+*  varying   vec4 texcoord0;
+*  void main()
+*  {
+*     gl_Position = position;
+*     texcoord0 = texcoord;
+*  }
+*/
+
+#define SYS2GMEM_VTX_PGM_LEN	0x18
+
+static unsigned int sys2gmem_vtx_pgm[SYS2GMEM_VTX_PGM_LEN] = {
+	0x00052003, 0x00001000, 0xc2000000, 0x00001005,
+	0x00001000, 0xc4000000, 0x00001006, 0x10071000,
+	0x20000000, 0x18981000, 0x0039ba88, 0x00000003,
+	0x12982000, 0x40257b08, 0x00000002, 0x140f803e,
+	0x00000000, 0xe2010100, 0x140f8000, 0x00000000,
+	0xe2020200, 0x14000000, 0x00000000, 0xe2000000
+};
+
+/* pre-compiled fragment shader program
+*
+*  precision mediump   float;
+*  uniform   sampler2D tex0;
+*  varying   vec4      texcoord0;
+*  void main()
+*  {
+*     gl_FragColor = texture2D(tex0, texcoord0.xy);
+*  }
+*/
+
+#define SYS2GMEM_FRAG_PGM_LEN	0x0f
+
+static unsigned int sys2gmem_frag_pgm[SYS2GMEM_FRAG_PGM_LEN] = {
+	0x00011002, 0x00001000, 0xc4000000, 0x00001003,
+	0x10041000, 0x20000000, 0x10000001, 0x1ffff688,
+	0x00000002, 0x140f8000, 0x00000000, 0xe2000000,
+	0x14000000, 0x00000000, 0xe2000000
+};
+
+/* shader texture constants (sysmem -> gmem)  */
+#define SYS2GMEM_TEX_CONST_LEN	6
+
+static unsigned int sys2gmem_tex_const[SYS2GMEM_TEX_CONST_LEN] = {
+	/* Texture, FormatXYZW=Unsigned, ClampXYZ=Wrap/Repeat,
+	 * RFMode=ZeroClamp-1, Dim=1:2d
+	 */
+	0x00000002,		/* Pitch = TBD */
+
+	/* Format=6:8888_WZYX, EndianSwap=0:None, ReqSize=0:256bit, DimHi=0,
+	 * NearestClamp=1:OGL Mode
+	 */
+	0x00000800,		/* Address[31:12] = TBD */
+
+	/* Width, Height, EndianSwap=0:None */
+	0,			/* Width & Height = TBD */
+
+	/* NumFormat=0:RF, DstSelXYZW=XYZW, ExpAdj=0, MagFilt=MinFilt=0:Point,
+	 * Mip=2:BaseMap
+	 */
+	0 << 1 | 1 << 4 | 2 << 7 | 3 << 10 | 2 << 23,
+
+	/* VolMag=VolMin=0:Point, MinMipLvl=0, MaxMipLvl=1, LodBiasH=V=0,
+	 * Dim3d=0
+	 */
+	0,
+
+	/* BorderColor=0:ABGRBlack, ForceBC=0:diable, TriJuice=0, Aniso=0,
+	 * Dim=1:2d, MipPacking=0
+	 */
+	1 << 9			/* Mip Address[31:12] = TBD */
+};
+
+/* quad for copying GMEM to context shadow */
+#define QUAD_LEN				12
+
+static unsigned int gmem_copy_quad[QUAD_LEN] = {
+	0x00000000, 0x00000000, 0x3f800000,
+	0x00000000, 0x00000000, 0x3f800000,
+	0x00000000, 0x00000000, 0x3f800000,
+	0x00000000, 0x00000000, 0x3f800000
+};
+
+#define TEXCOORD_LEN			8
+
+static unsigned int gmem_copy_texcoord[TEXCOORD_LEN] = {
+	0x00000000, 0x3f800000,
+	0x3f800000, 0x3f800000,
+	0x00000000, 0x00000000,
+	0x3f800000, 0x00000000
+};
+
+#define NUM_COLOR_FORMATS   13
+
+static enum SURFACEFORMAT surface_format_table[NUM_COLOR_FORMATS] = {
+	FMT_4_4_4_4,		/* COLORX_4_4_4_4 */
+	FMT_1_5_5_5,		/* COLORX_1_5_5_5 */
+	FMT_5_6_5,		/* COLORX_5_6_5 */
+	FMT_8,			/* COLORX_8 */
+	FMT_8_8,		/* COLORX_8_8 */
+	FMT_8_8_8_8,		/* COLORX_8_8_8_8 */
+	FMT_8_8_8_8,		/* COLORX_S8_8_8_8 */
+	FMT_16_FLOAT,		/* COLORX_16_FLOAT */
+	FMT_16_16_FLOAT,	/* COLORX_16_16_FLOAT */
+	FMT_16_16_16_16_FLOAT,	/* COLORX_16_16_16_16_FLOAT */
+	FMT_32_FLOAT,		/* COLORX_32_FLOAT */
+	FMT_32_32_FLOAT,	/* COLORX_32_32_FLOAT */
+	FMT_32_32_32_32_FLOAT,	/* COLORX_32_32_32_32_FLOAT */
+};
+
+static unsigned int format2bytesperpixel[NUM_COLOR_FORMATS] = {
+	2,			/* COLORX_4_4_4_4 */
+	2,			/* COLORX_1_5_5_5 */
+	2,			/* COLORX_5_6_5 */
+	1,			/* COLORX_8 */
+	2,			/* COLORX_8_8 8*/
+	4,			/* COLORX_8_8_8_8 */
+	4,			/* COLORX_S8_8_8_8 */
+	2,			/* COLORX_16_FLOAT */
+	4,			/* COLORX_16_16_FLOAT */
+	8,			/* COLORX_16_16_16_16_FLOAT */
+	4,			/* COLORX_32_FLOAT */
+	8,			/* COLORX_32_32_FLOAT */
+	16,			/* COLORX_32_32_32_32_FLOAT */
+};
+
+/* shader linkage info */
+#define SHADER_CONST_ADDR	(11 * 6 + 3)
+
+/* gmem command buffer length */
+#define PM4_REG(reg)		((0x4 << 16) | (GSL_HAL_SUBBLOCK_OFFSET(reg)))
+
+/* functions */
+static void config_gmemsize(struct gmem_shadow_t *shadow, int gmem_size)
+{
+	int w = 64, h = 64;	/* 16KB surface, minimum */
+
+	shadow->format = COLORX_8_8_8_8;
+	/* convert from bytes to 32-bit words */
+	gmem_size = (gmem_size + 3) / 4;
+
+	/* find the right surface size, close to a square. */
+	while (w * h < gmem_size)
+		if (w < h)
+			w *= 2;
+		else
+			h *= 2;
+
+	shadow->width = w;
+	shadow->pitch = w;
+	shadow->height = h;
+	shadow->gmem_pitch = shadow->pitch;
+
+	shadow->size = shadow->pitch * shadow->height * 4;
+}
+
+static unsigned int gpuaddr(unsigned int *cmd, struct kgsl_memdesc *memdesc)
+{
+	return memdesc->gpuaddr + ((char *)cmd - (char *)memdesc->hostptr);
+}
+
+static void
+create_ib1(struct adreno_context *drawctxt, unsigned int *cmd,
+	   unsigned int *start, unsigned int *end)
+{
+	cmd[0] = PM4_HDR_INDIRECT_BUFFER_PFD;
+	cmd[1] = gpuaddr(start, &drawctxt->gpustate);
+	cmd[2] = end - start;
+}
+
+static unsigned int *program_shader(unsigned int *cmds, int vtxfrag,
+				    unsigned int *shader_pgm, int dwords)
+{
+	/* load the patched vertex shader stream */
+	*cmds++ = pm4_type3_packet(PM4_IM_LOAD_IMMEDIATE, 2 + dwords);
+	/* 0=vertex shader, 1=fragment shader */
+	*cmds++ = vtxfrag;
+	/* instruction start & size (in 32-bit words) */
+	*cmds++ = ((0 << 16) | dwords);
+
+	memcpy(cmds, shader_pgm, dwords << 2);
+	cmds += dwords;
+
+	return cmds;
+}
+
+static unsigned int *reg_to_mem(unsigned int *cmds, uint32_t dst,
+				uint32_t src, int dwords)
+{
+	while (dwords-- > 0) {
+		*cmds++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+		*cmds++ = src++;
+		*cmds++ = dst;
+		dst += 4;
+	}
+
+	return cmds;
+}
+
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+
+static void build_reg_to_mem_range(unsigned int start, unsigned int end,
+				   unsigned int **cmd,
+				   struct adreno_context *drawctxt)
+{
+	unsigned int i = start;
+
+	for (i = start; i <= end; i++) {
+		*(*cmd)++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+		*(*cmd)++ = i;
+		*(*cmd)++ =
+		    ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) +
+		    (i - 0x2000) * 4;
+	}
+}
+
+#endif
+
+/* chicken restore */
+static unsigned int *build_chicken_restore_cmds(
+					struct adreno_context *drawctxt,
+					struct tmp_ctx *ctx)
+{
+	unsigned int *start = ctx->cmd;
+	unsigned int *cmds = start;
+
+	*cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+	*cmds++ = 0;
+
+	*cmds++ = pm4_type0_packet(REG_TP0_CHICKEN, 1);
+	ctx->chicken_restore = gpuaddr(cmds, &drawctxt->gpustate);
+	*cmds++ = 0x00000000;
+
+	/* create indirect buffer command for above command sequence */
+	create_ib1(drawctxt, drawctxt->chicken_restore, start, cmds);
+
+	return cmds;
+}
+
+/****************************************************************************/
+/* context save                                                             */
+/****************************************************************************/
+
+static const unsigned int register_ranges_a20x[] = {
+	REG_RB_SURFACE_INFO, REG_RB_DEPTH_INFO,
+	REG_COHER_DEST_BASE_0, REG_PA_SC_SCREEN_SCISSOR_BR,
+	REG_PA_SC_WINDOW_OFFSET, REG_PA_SC_WINDOW_SCISSOR_BR,
+	REG_RB_STENCILREFMASK_BF, REG_PA_CL_VPORT_ZOFFSET,
+	REG_SQ_PROGRAM_CNTL, REG_SQ_WRAPPING_1,
+	REG_PA_SC_LINE_CNTL, REG_SQ_PS_CONST,
+	REG_PA_SC_AA_MASK, REG_PA_SC_AA_MASK,
+	REG_RB_SAMPLE_COUNT_CTL, REG_RB_COLOR_DEST_MASK,
+	REG_PA_SU_POLY_OFFSET_FRONT_SCALE, REG_PA_SU_POLY_OFFSET_BACK_OFFSET,
+	REG_VGT_MAX_VTX_INDX, REG_RB_FOG_COLOR,
+	REG_RB_DEPTHCONTROL, REG_RB_MODECONTROL,
+	REG_PA_SU_POINT_SIZE, REG_PA_SC_LINE_STIPPLE,
+	REG_PA_SC_VIZ_QUERY, REG_PA_SC_VIZ_QUERY,
+	REG_VGT_VERTEX_REUSE_BLOCK_CNTL, REG_RB_DEPTH_CLEAR
+};
+
+static const unsigned int register_ranges_a22x[] = {
+	REG_RB_SURFACE_INFO, REG_RB_DEPTH_INFO,
+	REG_COHER_DEST_BASE_0, REG_PA_SC_SCREEN_SCISSOR_BR,
+	REG_PA_SC_WINDOW_OFFSET, REG_PA_SC_WINDOW_SCISSOR_BR,
+	REG_RB_STENCILREFMASK_BF, REG_PA_CL_VPORT_ZOFFSET,
+	REG_SQ_PROGRAM_CNTL, REG_SQ_WRAPPING_1,
+	REG_PA_SC_LINE_CNTL, REG_SQ_PS_CONST,
+	REG_PA_SC_AA_MASK, REG_PA_SC_AA_MASK,
+	REG_RB_SAMPLE_COUNT_CTL, REG_RB_COLOR_DEST_MASK,
+	REG_PA_SU_POLY_OFFSET_FRONT_SCALE, REG_PA_SU_POLY_OFFSET_BACK_OFFSET,
+	/* all the below registers are specific to Leia */
+	REG_LEIA_PC_MAX_VTX_INDX, REG_LEIA_PC_INDX_OFFSET,
+	REG_RB_COLOR_MASK, REG_RB_FOG_COLOR,
+	REG_RB_DEPTHCONTROL, REG_RB_COLORCONTROL,
+	REG_PA_CL_CLIP_CNTL, REG_PA_CL_VTE_CNTL,
+	REG_RB_MODECONTROL, REG_RB_SAMPLE_POS,
+	REG_PA_SU_POINT_SIZE, REG_PA_SU_LINE_CNTL,
+	REG_LEIA_PC_VERTEX_REUSE_BLOCK_CNTL,
+	REG_LEIA_PC_VERTEX_REUSE_BLOCK_CNTL,
+	REG_RB_COPY_CONTROL, REG_RB_DEPTH_CLEAR
+};
+
+
+/* save h/w regs, alu constants, texture contants, etc. ...
+*  requires: bool_shadow_gpuaddr, loop_shadow_gpuaddr
+*/
+static void build_regsave_cmds(struct adreno_device *adreno_dev,
+			       struct adreno_context *drawctxt,
+			       struct tmp_ctx *ctx)
+{
+	unsigned int *start = ctx->cmd;
+	unsigned int *cmd = start;
+
+	*cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+	*cmd++ = 0;
+
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+	/* Make sure the HW context has the correct register values
+	 * before reading them. */
+	*cmd++ = pm4_type3_packet(PM4_CONTEXT_UPDATE, 1);
+	*cmd++ = 0;
+
+	{
+		unsigned int i = 0;
+		unsigned int reg_array_size = 0;
+		const unsigned int *ptr_register_ranges;
+
+		/* Based on chip id choose the register ranges */
+		if (adreno_is_a220(adreno_dev)) {
+			ptr_register_ranges = register_ranges_a22x;
+			reg_array_size = ARRAY_SIZE(register_ranges_a22x);
+		} else {
+			ptr_register_ranges = register_ranges_a20x;
+			reg_array_size = ARRAY_SIZE(register_ranges_a20x);
+		}
+
+
+		/* Write HW registers into shadow */
+		for (i = 0; i < (reg_array_size/2) ; i++) {
+			build_reg_to_mem_range(ptr_register_ranges[i*2],
+					ptr_register_ranges[i*2+1],
+					&cmd, drawctxt);
+		}
+	}
+
+	/* Copy ALU constants */
+	cmd =
+	    reg_to_mem(cmd, (drawctxt->gpustate.gpuaddr) & 0xFFFFE000,
+		       REG_SQ_CONSTANT_0, ALU_CONSTANTS);
+
+	/* Copy Tex constants */
+	cmd =
+	    reg_to_mem(cmd,
+		       (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000,
+		       REG_SQ_FETCH_0, TEX_CONSTANTS);
+#else
+
+	/* Insert a wait for idle packet before reading the registers.
+	 * This is to fix a hang/reset seen during stress testing.  In this
+	 * hang, CP encountered a timeout reading SQ's boolean constant
+	 * register. There is logic in the HW that blocks reading of this
+	 * register when the SQ block is not idle, which we believe is
+	 * contributing to the hang.*/
+	*cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+	*cmd++ = 0;
+
+	/* H/w registers are already shadowed; just need to disable shadowing
+	 * to prevent corruption.
+	 */
+	*cmd++ = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, 3);
+	*cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
+	*cmd++ = 4 << 16;	/* regs, start=0 */
+	*cmd++ = 0x0;		/* count = 0 */
+
+	/* ALU constants are already shadowed; just need to disable shadowing
+	 * to prevent corruption.
+	 */
+	*cmd++ = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, 3);
+	*cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000;
+	*cmd++ = 0 << 16;	/* ALU, start=0 */
+	*cmd++ = 0x0;		/* count = 0 */
+
+	/* Tex constants are already shadowed; just need to disable shadowing
+	 *  to prevent corruption.
+	 */
+	*cmd++ = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, 3);
+	*cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000;
+	*cmd++ = 1 << 16;	/* Tex, start=0 */
+	*cmd++ = 0x0;		/* count = 0 */
+#endif
+
+	/* Need to handle some of the registers separately */
+	*cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+	*cmd++ = REG_SQ_GPR_MANAGEMENT;
+	*cmd++ = ctx->reg_values[0];
+
+	*cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+	*cmd++ = REG_TP0_CHICKEN;
+	*cmd++ = ctx->reg_values[1];
+
+	if (adreno_is_a220(adreno_dev)) {
+		unsigned int i;
+		unsigned int j = 2;
+		for (i = REG_LEIA_VSC_BIN_SIZE; i <=
+				REG_LEIA_VSC_PIPE_DATA_LENGTH_7; i++) {
+			*cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+			*cmd++ = i;
+			*cmd++ = ctx->reg_values[j];
+			j++;
+		}
+	}
+
+	/* Copy Boolean constants */
+	cmd = reg_to_mem(cmd, ctx->bool_shadow, REG_SQ_CF_BOOLEANS,
+			 BOOL_CONSTANTS);
+
+	/* Copy Loop constants */
+	cmd = reg_to_mem(cmd, ctx->loop_shadow, REG_SQ_CF_LOOP, LOOP_CONSTANTS);
+
+	/* create indirect buffer command for above command sequence */
+	create_ib1(drawctxt, drawctxt->reg_save, start, cmd);
+
+	ctx->cmd = cmd;
+}
+
+/*copy colour, depth, & stencil buffers from graphics memory to system memory*/
+static unsigned int *build_gmem2sys_cmds(struct adreno_device *adreno_dev,
+					 struct adreno_context *drawctxt,
+					 struct tmp_ctx *ctx,
+					 struct gmem_shadow_t *shadow)
+{
+	unsigned int *cmds = shadow->gmem_save_commands;
+	unsigned int *start = cmds;
+	/* Calculate the new offset based on the adjusted base */
+	unsigned int bytesperpixel = format2bytesperpixel[shadow->format];
+	unsigned int addr = shadow->gmemshadow.gpuaddr;
+	unsigned int offset = (addr - (addr & 0xfffff000)) / bytesperpixel;
+
+	/* Store TP0_CHICKEN register */
+	*cmds++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+	*cmds++ = REG_TP0_CHICKEN;
+	if (ctx)
+		*cmds++ = ctx->chicken_restore;
+	else
+		cmds++;
+
+	*cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+	*cmds++ = 0;
+
+	/* Set TP0_CHICKEN to zero */
+	*cmds++ = pm4_type0_packet(REG_TP0_CHICKEN, 1);
+	*cmds++ = 0x00000000;
+
+	/* Set PA_SC_AA_CONFIG to 0 */
+	*cmds++ = pm4_type0_packet(REG_PA_SC_AA_CONFIG, 1);
+	*cmds++ = 0x00000000;
+
+	/* program shader */
+
+	/* load shader vtx constants ... 5 dwords */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 4);
+	*cmds++ = (0x1 << 16) | SHADER_CONST_ADDR;
+	*cmds++ = 0;
+	/* valid(?) vtx constant flag & addr */
+	*cmds++ = shadow->quad_vertices.gpuaddr | 0x3;
+	/* limit = 12 dwords */
+	*cmds++ = 0x00000030;
+
+	/* Invalidate L2 cache to make sure vertices are updated */
+	*cmds++ = pm4_type0_packet(REG_TC_CNTL_STATUS, 1);
+	*cmds++ = 0x1;
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 4);
+	*cmds++ = PM4_REG(REG_VGT_MAX_VTX_INDX);
+	*cmds++ = 0x00ffffff;	/* REG_VGT_MAX_VTX_INDX */
+	*cmds++ = 0x0;		/* REG_VGT_MIN_VTX_INDX */
+	*cmds++ = 0x00000000;	/* REG_VGT_INDX_OFFSET */
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_PA_SC_AA_MASK);
+	*cmds++ = 0x0000ffff;	/* REG_PA_SC_AA_MASK */
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_RB_COLORCONTROL);
+	*cmds++ = 0x00000c20;
+
+	/* load the patched vertex shader stream */
+	cmds = program_shader(cmds, 0, gmem2sys_vtx_pgm, GMEM2SYS_VTX_PGM_LEN);
+
+	/* Load the patched fragment shader stream */
+	cmds =
+	    program_shader(cmds, 1, gmem2sys_frag_pgm, GMEM2SYS_FRAG_PGM_LEN);
+
+	/* SQ_PROGRAM_CNTL / SQ_CONTEXT_MISC */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_SQ_PROGRAM_CNTL);
+	if (adreno_is_a220(adreno_dev))
+		*cmds++ = 0x10018001;
+	else
+		*cmds++ = 0x10010001;
+	*cmds++ = 0x00000008;
+
+	/* resolve */
+
+	/* PA_CL_VTE_CNTL */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_PA_CL_VTE_CNTL);
+	/* disable X/Y/Z transforms, X/Y/Z are premultiplied by W */
+	*cmds++ = 0x00000b00;
+
+	/* program surface info */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_RB_SURFACE_INFO);
+	*cmds++ = shadow->gmem_pitch;	/* pitch, MSAA = 1 */
+
+	/* RB_COLOR_INFO Endian=none, Linear, Format=RGBA8888, Swap=0,
+	 *                Base=gmem_base
+	 */
+	/* gmem base assumed 4K aligned. */
+	if (ctx) {
+		BUG_ON(ctx->gmem_base & 0xFFF);
+		*cmds++ =
+		    (shadow->
+		     format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT) | ctx->
+		    gmem_base;
+	} else {
+		unsigned int temp = *cmds;
+		*cmds++ = (temp & ~RB_COLOR_INFO__COLOR_FORMAT_MASK) |
+			(shadow->format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT);
+	}
+
+	/* disable Z */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_RB_DEPTHCONTROL);
+	if (adreno_is_a220(adreno_dev))
+		*cmds++ = 0x08;
+	else
+		*cmds++ = 0;
+
+	/* set REG_PA_SU_SC_MODE_CNTL
+	 *              Front_ptype = draw triangles
+	 *              Back_ptype = draw triangles
+	 *              Provoking vertex = last
+	 */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_PA_SU_SC_MODE_CNTL);
+	*cmds++ = 0x00080240;
+
+	/* Use maximum scissor values -- quad vertices already have the
+	 * correct bounds */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_PA_SC_SCREEN_SCISSOR_TL);
+	*cmds++ = (0 << 16) | 0;
+	*cmds++ = (0x1fff << 16) | (0x1fff);
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_PA_SC_WINDOW_SCISSOR_TL);
+	*cmds++ = (unsigned int)((1U << 31) | (0 << 16) | 0);
+	*cmds++ = (0x1fff << 16) | (0x1fff);
+
+	/* load the viewport so that z scale = clear depth and
+	 *  z offset = 0.0f
+	 */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_PA_CL_VPORT_ZSCALE);
+	*cmds++ = 0xbf800000;	/* -1.0f */
+	*cmds++ = 0x0;
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_RB_COLOR_MASK);
+	*cmds++ = 0x0000000f;	/* R = G = B = 1:enabled */
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_RB_COLOR_DEST_MASK);
+	*cmds++ = 0xffffffff;
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_SQ_WRAPPING_0);
+	*cmds++ = 0x00000000;
+	*cmds++ = 0x00000000;
+
+	/* load the stencil ref value
+	 * $AAM - do this later
+	 */
+
+	/* load the COPY state */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 6);
+	*cmds++ = PM4_REG(REG_RB_COPY_CONTROL);
+	*cmds++ = 0;		/* RB_COPY_CONTROL */
+	*cmds++ = addr & 0xfffff000;	/* RB_COPY_DEST_BASE */
+	*cmds++ = shadow->pitch >> 5;	/* RB_COPY_DEST_PITCH */
+
+	/* Endian=none, Linear, Format=RGBA8888,Swap=0,!Dither,
+	 *  MaskWrite:R=G=B=A=1
+	 */
+	*cmds++ = 0x0003c008 |
+	    (shadow->format << RB_COPY_DEST_INFO__COPY_DEST_FORMAT__SHIFT);
+	/* Make sure we stay in offsetx field. */
+	BUG_ON(offset & 0xfffff000);
+	*cmds++ = offset;
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_RB_MODECONTROL);
+	*cmds++ = 0x6;		/* EDRAM copy */
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_PA_CL_CLIP_CNTL);
+	*cmds++ = 0x00010000;
+
+	if (adreno_is_a220(adreno_dev)) {
+		*cmds++ = pm4_type3_packet(PM4_SET_DRAW_INIT_FLAGS, 1);
+		*cmds++ = 0;
+
+		*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+		*cmds++ = PM4_REG(REG_LEIA_RB_LRZ_VSC_CONTROL);
+		*cmds++ = 0x0000000;
+
+		*cmds++ = pm4_type3_packet(PM4_DRAW_INDX, 3);
+		*cmds++ = 0;           /* viz query info. */
+		/* PrimType=RectList, SrcSel=AutoIndex, VisCullMode=Ignore */
+		*cmds++ = 0x00004088;
+		*cmds++ = 3;	       /* NumIndices=3 */
+	} else {
+		/* queue the draw packet */
+		*cmds++ = pm4_type3_packet(PM4_DRAW_INDX, 2);
+		*cmds++ = 0;		/* viz query info. */
+		/* PrimType=RectList, NumIndices=3, SrcSel=AutoIndex */
+		*cmds++ = 0x00030088;
+	}
+
+	/* create indirect buffer command for above command sequence */
+	create_ib1(drawctxt, shadow->gmem_save, start, cmds);
+
+	return cmds;
+}
+
+/* context restore */
+
+/*copy colour, depth, & stencil buffers from system memory to graphics memory*/
+static unsigned int *build_sys2gmem_cmds(struct adreno_device *adreno_dev,
+					 struct adreno_context *drawctxt,
+					 struct tmp_ctx *ctx,
+					 struct gmem_shadow_t *shadow)
+{
+	unsigned int *cmds = shadow->gmem_restore_commands;
+	unsigned int *start = cmds;
+
+	/* Store TP0_CHICKEN register */
+	*cmds++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+	*cmds++ = REG_TP0_CHICKEN;
+	if (ctx)
+		*cmds++ = ctx->chicken_restore;
+	else
+		cmds++;
+
+	*cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+	*cmds++ = 0;
+
+	/* Set TP0_CHICKEN to zero */
+	*cmds++ = pm4_type0_packet(REG_TP0_CHICKEN, 1);
+	*cmds++ = 0x00000000;
+
+	/* Set PA_SC_AA_CONFIG to 0 */
+	*cmds++ = pm4_type0_packet(REG_PA_SC_AA_CONFIG, 1);
+	*cmds++ = 0x00000000;
+	/* shader constants */
+
+	/* vertex buffer constants */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 7);
+
+	*cmds++ = (0x1 << 16) | (9 * 6);
+	/* valid(?) vtx constant flag & addr */
+	*cmds++ = shadow->quad_vertices.gpuaddr | 0x3;
+	/* limit = 12 dwords */
+	*cmds++ = 0x00000030;
+	/* valid(?) vtx constant flag & addr */
+	*cmds++ = shadow->quad_texcoords.gpuaddr | 0x3;
+	/* limit = 8 dwords */
+	*cmds++ = 0x00000020;
+	*cmds++ = 0;
+	*cmds++ = 0;
+
+	/* Invalidate L2 cache to make sure vertices are updated */
+	*cmds++ = pm4_type0_packet(REG_TC_CNTL_STATUS, 1);
+	*cmds++ = 0x1;
+
+	cmds = program_shader(cmds, 0, sys2gmem_vtx_pgm, SYS2GMEM_VTX_PGM_LEN);
+
+	/* Load the patched fragment shader stream */
+	cmds =
+	    program_shader(cmds, 1, sys2gmem_frag_pgm, SYS2GMEM_FRAG_PGM_LEN);
+
+	/* SQ_PROGRAM_CNTL / SQ_CONTEXT_MISC */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_SQ_PROGRAM_CNTL);
+	*cmds++ = 0x10030002;
+	*cmds++ = 0x00000008;
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_PA_SC_AA_MASK);
+	*cmds++ = 0x0000ffff;	/* REG_PA_SC_AA_MASK */
+
+	if (!adreno_is_a220(adreno_dev)) {
+		/* PA_SC_VIZ_QUERY */
+		*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+		*cmds++ = PM4_REG(REG_PA_SC_VIZ_QUERY);
+		*cmds++ = 0x0;		/*REG_PA_SC_VIZ_QUERY */
+	}
+
+	/* RB_COLORCONTROL */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_RB_COLORCONTROL);
+	*cmds++ = 0x00000c20;
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 4);
+	*cmds++ = PM4_REG(REG_VGT_MAX_VTX_INDX);
+	*cmds++ = 0x00ffffff;	/* mmVGT_MAX_VTX_INDX */
+	*cmds++ = 0x0;		/* mmVGT_MIN_VTX_INDX */
+	*cmds++ = 0x00000000;	/* mmVGT_INDX_OFFSET */
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_VGT_VERTEX_REUSE_BLOCK_CNTL);
+	*cmds++ = 0x00000002;	/* mmVGT_VERTEX_REUSE_BLOCK_CNTL */
+	*cmds++ = 0x00000002;	/* mmVGT_OUT_DEALLOC_CNTL */
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_SQ_INTERPOLATOR_CNTL);
+	*cmds++ = 0xffffffff;	/* mmSQ_INTERPOLATOR_CNTL */
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_PA_SC_AA_CONFIG);
+	*cmds++ = 0x00000000;	/* REG_PA_SC_AA_CONFIG */
+
+	/* set REG_PA_SU_SC_MODE_CNTL
+	 * Front_ptype = draw triangles
+	 * Back_ptype = draw triangles
+	 * Provoking vertex = last
+	 */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_PA_SU_SC_MODE_CNTL);
+	*cmds++ = 0x00080240;
+
+	/* texture constants */
+	*cmds++ =
+	    pm4_type3_packet(PM4_SET_CONSTANT, (SYS2GMEM_TEX_CONST_LEN + 1));
+	*cmds++ = (0x1 << 16) | (0 * 6);
+	memcpy(cmds, sys2gmem_tex_const, SYS2GMEM_TEX_CONST_LEN << 2);
+	cmds[0] |= (shadow->pitch >> 5) << 22;
+	cmds[1] |=
+	    shadow->gmemshadow.gpuaddr | surface_format_table[shadow->format];
+	cmds[2] |= (shadow->width - 1) | (shadow->height - 1) << 13;
+	cmds += SYS2GMEM_TEX_CONST_LEN;
+
+	/* program surface info */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_RB_SURFACE_INFO);
+	*cmds++ = shadow->gmem_pitch;	/* pitch, MSAA = 1 */
+
+	/* RB_COLOR_INFO Endian=none, Linear, Format=RGBA8888, Swap=0,
+	 *                Base=gmem_base
+	 */
+	if (ctx)
+		*cmds++ =
+		    (shadow->
+		     format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT) | ctx->
+		    gmem_base;
+	else {
+		unsigned int temp = *cmds;
+		*cmds++ = (temp & ~RB_COLOR_INFO__COLOR_FORMAT_MASK) |
+			(shadow->format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT);
+	}
+
+	/* RB_DEPTHCONTROL */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_RB_DEPTHCONTROL);
+
+	if (adreno_is_a220(adreno_dev))
+		*cmds++ = 8;		/* disable Z */
+	else
+		*cmds++ = 0;		/* disable Z */
+
+	/* Use maximum scissor values -- quad vertices already
+	 * have the correct bounds */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_PA_SC_SCREEN_SCISSOR_TL);
+	*cmds++ = (0 << 16) | 0;
+	*cmds++ = ((0x1fff) << 16) | 0x1fff;
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_PA_SC_WINDOW_SCISSOR_TL);
+	*cmds++ = (unsigned int)((1U << 31) | (0 << 16) | 0);
+	*cmds++ = ((0x1fff) << 16) | 0x1fff;
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_PA_CL_VTE_CNTL);
+	/* disable X/Y/Z transforms, X/Y/Z are premultiplied by W */
+	*cmds++ = 0x00000b00;
+
+	/*load the viewport so that z scale = clear depth and z offset = 0.0f */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_PA_CL_VPORT_ZSCALE);
+	*cmds++ = 0xbf800000;
+	*cmds++ = 0x0;
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_RB_COLOR_MASK);
+	*cmds++ = 0x0000000f;	/* R = G = B = 1:enabled */
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_RB_COLOR_DEST_MASK);
+	*cmds++ = 0xffffffff;
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+	*cmds++ = PM4_REG(REG_SQ_WRAPPING_0);
+	*cmds++ = 0x00000000;
+	*cmds++ = 0x00000000;
+
+	/* load the stencil ref value
+	 *  $AAM - do this later
+	 */
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_RB_MODECONTROL);
+	/* draw pixels with color and depth/stencil component */
+	*cmds++ = 0x4;
+
+	*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+	*cmds++ = PM4_REG(REG_PA_CL_CLIP_CNTL);
+	*cmds++ = 0x00010000;
+
+	if (adreno_is_a220(adreno_dev)) {
+		*cmds++ = pm4_type3_packet(PM4_SET_DRAW_INIT_FLAGS, 1);
+		*cmds++ = 0;
+
+		*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+		*cmds++ = PM4_REG(REG_LEIA_RB_LRZ_VSC_CONTROL);
+		*cmds++ = 0x0000000;
+
+		*cmds++ = pm4_type3_packet(PM4_DRAW_INDX, 3);
+		*cmds++ = 0;           /* viz query info. */
+		/* PrimType=RectList, SrcSel=AutoIndex, VisCullMode=Ignore */
+		*cmds++ = 0x00004088;
+		*cmds++ = 3;	       /* NumIndices=3 */
+	} else {
+		/* queue the draw packet */
+		*cmds++ = pm4_type3_packet(PM4_DRAW_INDX, 2);
+		*cmds++ = 0;		/* viz query info. */
+		/* PrimType=RectList, NumIndices=3, SrcSel=AutoIndex */
+		*cmds++ = 0x00030088;
+	}
+
+	/* create indirect buffer command for above command sequence */
+	create_ib1(drawctxt, shadow->gmem_restore, start, cmds);
+
+	return cmds;
+}
+
+/* restore h/w regs, alu constants, texture constants, etc. ... */
+static unsigned *reg_range(unsigned int *cmd, unsigned int start,
+			   unsigned int end)
+{
+	*cmd++ = PM4_REG(start);	/* h/w regs, start addr */
+	*cmd++ = end - start + 1;	/* count */
+	return cmd;
+}
+
+static void build_regrestore_cmds(struct adreno_device *adreno_dev,
+				  struct adreno_context *drawctxt,
+				  struct tmp_ctx *ctx)
+{
+	unsigned int *start = ctx->cmd;
+	unsigned int *cmd = start;
+
+	unsigned int i = 0;
+	unsigned int reg_array_size = 0;
+	const unsigned int *ptr_register_ranges;
+
+	*cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+	*cmd++ = 0;
+
+	/* H/W Registers */
+	/* deferred pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, ???); */
+	cmd++;
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+	/* Force mismatch */
+	*cmd++ = ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) | 1;
+#else
+	*cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
+#endif
+
+	/* Based on chip id choose the registers ranges*/
+	if (adreno_is_a220(adreno_dev)) {
+		ptr_register_ranges = register_ranges_a22x;
+		reg_array_size = ARRAY_SIZE(register_ranges_a22x);
+	} else {
+		ptr_register_ranges = register_ranges_a20x;
+		reg_array_size = ARRAY_SIZE(register_ranges_a20x);
+	}
+
+
+	for (i = 0; i < (reg_array_size/2); i++) {
+		cmd = reg_range(cmd, ptr_register_ranges[i*2],
+				ptr_register_ranges[i*2+1]);
+	}
+
+	/* Now we know how many register blocks we have, we can compute command
+	 * length
+	 */
+	start[2] =
+	    pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, (cmd - start) - 3);
+	/* Enable shadowing for the entire register block. */
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+	start[4] |= (0 << 24) | (4 << 16);	/* Disable shadowing. */
+#else
+	start[4] |= (1 << 24) | (4 << 16);
+#endif
+
+	/* Need to handle some of the registers separately */
+	*cmd++ = pm4_type0_packet(REG_SQ_GPR_MANAGEMENT, 1);
+	ctx->reg_values[0] = gpuaddr(cmd, &drawctxt->gpustate);
+	*cmd++ = 0x00040400;
+
+	*cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+	*cmd++ = 0;
+	*cmd++ = pm4_type0_packet(REG_TP0_CHICKEN, 1);
+	ctx->reg_values[1] = gpuaddr(cmd, &drawctxt->gpustate);
+	*cmd++ = 0x00000000;
+
+	if (adreno_is_a220(adreno_dev)) {
+		unsigned int i;
+		unsigned int j = 2;
+		for (i = REG_LEIA_VSC_BIN_SIZE; i <=
+				REG_LEIA_VSC_PIPE_DATA_LENGTH_7; i++) {
+			*cmd++ = pm4_type0_packet(i, 1);
+			ctx->reg_values[j] = gpuaddr(cmd, &drawctxt->gpustate);
+			*cmd++ = 0x00000000;
+			j++;
+		}
+	}
+
+	/* ALU Constants */
+	*cmd++ = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, 3);
+	*cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000;
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+	*cmd++ = (0 << 24) | (0 << 16) | 0;	/* Disable shadowing */
+#else
+	*cmd++ = (1 << 24) | (0 << 16) | 0;
+#endif
+	*cmd++ = ALU_CONSTANTS;
+
+	/* Texture Constants */
+	*cmd++ = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, 3);
+	*cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000;
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+	/* Disable shadowing */
+	*cmd++ = (0 << 24) | (1 << 16) | 0;
+#else
+	*cmd++ = (1 << 24) | (1 << 16) | 0;
+#endif
+	*cmd++ = TEX_CONSTANTS;
+
+	/* Boolean Constants */
+	*cmd++ = pm4_type3_packet(PM4_SET_CONSTANT, 1 + BOOL_CONSTANTS);
+	*cmd++ = (2 << 16) | 0;
+
+	/* the next BOOL_CONSTANT dwords is the shadow area for
+	 *  boolean constants.
+	 */
+	ctx->bool_shadow = gpuaddr(cmd, &drawctxt->gpustate);
+	cmd += BOOL_CONSTANTS;
+
+	/* Loop Constants */
+	*cmd++ = pm4_type3_packet(PM4_SET_CONSTANT, 1 + LOOP_CONSTANTS);
+	*cmd++ = (3 << 16) | 0;
+
+	/* the next LOOP_CONSTANTS dwords is the shadow area for
+	 * loop constants.
+	 */
+	ctx->loop_shadow = gpuaddr(cmd, &drawctxt->gpustate);
+	cmd += LOOP_CONSTANTS;
+
+	/* create indirect buffer command for above command sequence */
+	create_ib1(drawctxt, drawctxt->reg_restore, start, cmd);
+
+	ctx->cmd = cmd;
+}
+
+/* quad for saving/restoring gmem */
+static void set_gmem_copy_quad(struct gmem_shadow_t *shadow)
+{
+	/* set vertex buffer values */
+	gmem_copy_quad[1] = uint2float(shadow->height);
+	gmem_copy_quad[3] = uint2float(shadow->width);
+	gmem_copy_quad[4] = uint2float(shadow->height);
+	gmem_copy_quad[9] = uint2float(shadow->width);
+
+	gmem_copy_quad[0] = uint2float(0);
+	gmem_copy_quad[6] = uint2float(0);
+	gmem_copy_quad[7] = uint2float(0);
+	gmem_copy_quad[10] = uint2float(0);
+
+	memcpy(shadow->quad_vertices.hostptr, gmem_copy_quad, QUAD_LEN << 2);
+
+	memcpy(shadow->quad_texcoords.hostptr, gmem_copy_texcoord,
+	       TEXCOORD_LEN << 2);
+}
+
+/* quad for saving/restoring gmem */
+static void build_quad_vtxbuff(struct adreno_context *drawctxt,
+		       struct tmp_ctx *ctx, struct gmem_shadow_t *shadow)
+{
+	unsigned int *cmd = ctx->cmd;
+
+	/* quad vertex buffer location (in GPU space) */
+	shadow->quad_vertices.hostptr = cmd;
+	shadow->quad_vertices.gpuaddr = gpuaddr(cmd, &drawctxt->gpustate);
+
+	cmd += QUAD_LEN;
+
+	/* tex coord buffer location (in GPU space) */
+	shadow->quad_texcoords.hostptr = cmd;
+	shadow->quad_texcoords.gpuaddr = gpuaddr(cmd, &drawctxt->gpustate);
+
+	cmd += TEXCOORD_LEN;
+
+	set_gmem_copy_quad(shadow);
+
+	ctx->cmd = cmd;
+}
+
+static void
+build_shader_save_restore_cmds(struct adreno_context *drawctxt,
+			       struct tmp_ctx *ctx)
+{
+	unsigned int *cmd = ctx->cmd;
+	unsigned int *save, *restore, *fixup;
+#if defined(PM4_IM_STORE)
+	unsigned int *startSizeVtx, *startSizePix, *startSizeShared;
+#endif
+	unsigned int *partition1;
+	unsigned int *shaderBases, *partition2;
+
+#if defined(PM4_IM_STORE)
+	/* compute vertex, pixel and shared instruction shadow GPU addresses */
+	ctx->shader_vertex = drawctxt->gpustate.gpuaddr + SHADER_OFFSET;
+	ctx->shader_pixel = ctx->shader_vertex + SHADER_SHADOW_SIZE;
+	ctx->shader_shared = ctx->shader_pixel + SHADER_SHADOW_SIZE;
+#endif
+
+	/* restore shader partitioning and instructions */
+
+	restore = cmd;		/* start address */
+
+	/* Invalidate Vertex & Pixel instruction code address and sizes */
+	*cmd++ = pm4_type3_packet(PM4_INVALIDATE_STATE, 1);
+	*cmd++ = 0x00000300;	/* 0x100 = Vertex, 0x200 = Pixel */
+
+	/* Restore previous shader vertex & pixel instruction bases. */
+	*cmd++ = pm4_type3_packet(PM4_SET_SHADER_BASES, 1);
+	shaderBases = cmd++;	/* TBD #5: shader bases (from fixup) */
+
+	/* write the shader partition information to a scratch register */
+	*cmd++ = pm4_type0_packet(REG_SQ_INST_STORE_MANAGMENT, 1);
+	partition1 = cmd++;	/* TBD #4a: partition info (from save) */
+
+#if defined(PM4_IM_STORE)
+	/* load vertex shader instructions from the shadow. */
+	*cmd++ = pm4_type3_packet(PM4_IM_LOAD, 2);
+	*cmd++ = ctx->shader_vertex + 0x0;	/* 0x0 = Vertex */
+	startSizeVtx = cmd++;	/* TBD #1: start/size (from save) */
+
+	/* load pixel shader instructions from the shadow. */
+	*cmd++ = pm4_type3_packet(PM4_IM_LOAD, 2);
+	*cmd++ = ctx->shader_pixel + 0x1;	/* 0x1 = Pixel */
+	startSizePix = cmd++;	/* TBD #2: start/size (from save) */
+
+	/* load shared shader instructions from the shadow. */
+	*cmd++ = pm4_type3_packet(PM4_IM_LOAD, 2);
+	*cmd++ = ctx->shader_shared + 0x2;	/* 0x2 = Shared */
+	startSizeShared = cmd++;	/* TBD #3: start/size (from save) */
+#endif
+
+	/* create indirect buffer command for above command sequence */
+	create_ib1(drawctxt, drawctxt->shader_restore, restore, cmd);
+
+	/*
+	 *  fixup SET_SHADER_BASES data
+	 *
+	 *  since self-modifying PM4 code is being used here, a seperate
+	 *  command buffer is used for this fixup operation, to ensure the
+	 *  commands are not read by the PM4 engine before the data fields
+	 *  have been written.
+	 */
+
+	fixup = cmd;		/* start address */
+
+	/* write the shader partition information to a scratch register */
+	*cmd++ = pm4_type0_packet(REG_SCRATCH_REG2, 1);
+	partition2 = cmd++;	/* TBD #4b: partition info (from save) */
+
+	/* mask off unused bits, then OR with shader instruction memory size */
+	*cmd++ = pm4_type3_packet(PM4_REG_RMW, 3);
+	*cmd++ = REG_SCRATCH_REG2;
+	/* AND off invalid bits. */
+	*cmd++ = 0x0FFF0FFF;
+	/* OR in instruction memory size */
+	*cmd++ = (unsigned int)((SHADER_INSTRUCT_LOG2 - 5U) << 29);
+
+	/* write the computed value to the SET_SHADER_BASES data field */
+	*cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+	*cmd++ = REG_SCRATCH_REG2;
+	/* TBD #5: shader bases (to restore) */
+	*cmd++ = gpuaddr(shaderBases, &drawctxt->gpustate);
+
+	/* create indirect buffer command for above command sequence */
+	create_ib1(drawctxt, drawctxt->shader_fixup, fixup, cmd);
+
+	/* save shader partitioning and instructions */
+
+	save = cmd;		/* start address */
+
+	*cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+	*cmd++ = 0;
+
+	/* fetch the SQ_INST_STORE_MANAGMENT register value,
+	 *  store the value in the data fields of the SET_CONSTANT commands
+	 *  above.
+	 */
+	*cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+	*cmd++ = REG_SQ_INST_STORE_MANAGMENT;
+	/* TBD #4a: partition info (to restore) */
+	*cmd++ = gpuaddr(partition1, &drawctxt->gpustate);
+	*cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+	*cmd++ = REG_SQ_INST_STORE_MANAGMENT;
+	/* TBD #4b: partition info (to fixup) */
+	*cmd++ = gpuaddr(partition2, &drawctxt->gpustate);
+
+#if defined(PM4_IM_STORE)
+
+	/* store the vertex shader instructions */
+	*cmd++ = pm4_type3_packet(PM4_IM_STORE, 2);
+	*cmd++ = ctx->shader_vertex + 0x0;	/* 0x0 = Vertex */
+	/* TBD #1: start/size (to restore) */
+	*cmd++ = gpuaddr(startSizeVtx, &drawctxt->gpustate);
+
+	/* store the pixel shader instructions */
+	*cmd++ = pm4_type3_packet(PM4_IM_STORE, 2);
+	*cmd++ = ctx->shader_pixel + 0x1;	/* 0x1 = Pixel */
+	/* TBD #2: start/size (to restore) */
+	*cmd++ = gpuaddr(startSizePix, &drawctxt->gpustate);
+
+	/* store the shared shader instructions if vertex base is nonzero */
+
+	*cmd++ = pm4_type3_packet(PM4_IM_STORE, 2);
+	*cmd++ = ctx->shader_shared + 0x2;	/* 0x2 = Shared */
+	/* TBD #3: start/size (to restore) */
+	*cmd++ = gpuaddr(startSizeShared, &drawctxt->gpustate);
+
+#endif
+
+	*cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+	*cmd++ = 0;
+
+	/* create indirect buffer command for above command sequence */
+	create_ib1(drawctxt, drawctxt->shader_save, save, cmd);
+
+	ctx->cmd = cmd;
+}
+
+/* create buffers for saving/restoring registers, constants, & GMEM */
+static int
+create_gpustate_shadow(struct kgsl_device *device,
+		       struct adreno_context *drawctxt,
+		       struct tmp_ctx *ctx)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	int result;
+
+	/* Allocate vmalloc memory to store the gpustate */
+	result = kgsl_allocate(&drawctxt->gpustate,
+		drawctxt->pagetable, CONTEXT_SIZE);
+
+	if (result)
+		return result;
+
+	drawctxt->flags |= CTXT_FLAGS_STATE_SHADOW;
+
+	/* Blank out h/w register, constant, and command buffer shadows. */
+	kgsl_sharedmem_set(&drawctxt->gpustate, 0, 0, CONTEXT_SIZE);
+
+	/* set-up command and vertex buffer pointers */
+	ctx->cmd = ctx->start
+	    = (unsigned int *)((char *)drawctxt->gpustate.hostptr + CMD_OFFSET);
+
+	/* build indirect command buffers to save & restore regs/constants */
+	adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
+	build_regrestore_cmds(adreno_dev, drawctxt, ctx);
+	build_regsave_cmds(adreno_dev, drawctxt, ctx);
+
+	build_shader_save_restore_cmds(drawctxt, ctx);
+
+	kgsl_cache_range_op(&drawctxt->gpustate,
+			    KGSL_CACHE_OP_FLUSH);
+
+	return 0;
+}
+
+/* create buffers for saving/restoring registers, constants, & GMEM */
+static int
+create_gmem_shadow(struct adreno_device *adreno_dev,
+		   struct adreno_context *drawctxt,
+		   struct tmp_ctx *ctx)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	int result;
+
+	config_gmemsize(&drawctxt->context_gmem_shadow,
+			adreno_dev->gmemspace.sizebytes);
+	ctx->gmem_base = adreno_dev->gmemspace.gpu_base;
+
+	result = kgsl_allocate(&drawctxt->context_gmem_shadow.gmemshadow,
+		drawctxt->pagetable, drawctxt->context_gmem_shadow.size);
+
+	if (result)
+		return result;
+
+	/* we've allocated the shadow, when swapped out, GMEM must be saved. */
+	drawctxt->flags |= CTXT_FLAGS_GMEM_SHADOW | CTXT_FLAGS_GMEM_SAVE;
+
+	/* blank out gmem shadow. */
+	kgsl_sharedmem_set(&drawctxt->context_gmem_shadow.gmemshadow, 0, 0,
+			   drawctxt->context_gmem_shadow.size);
+
+	/* build quad vertex buffer */
+	build_quad_vtxbuff(drawctxt, ctx, &drawctxt->context_gmem_shadow);
+
+	/* build TP0_CHICKEN register restore command buffer */
+	ctx->cmd = build_chicken_restore_cmds(drawctxt, ctx);
+
+	/* build indirect command buffers to save & restore gmem */
+	/* Idle because we are reading PM override registers */
+	adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
+	drawctxt->context_gmem_shadow.gmem_save_commands = ctx->cmd;
+	ctx->cmd =
+	    build_gmem2sys_cmds(adreno_dev, drawctxt, ctx,
+				&drawctxt->context_gmem_shadow);
+	drawctxt->context_gmem_shadow.gmem_restore_commands = ctx->cmd;
+	ctx->cmd =
+	    build_sys2gmem_cmds(adreno_dev, drawctxt, ctx,
+				&drawctxt->context_gmem_shadow);
+
+	kgsl_cache_range_op(&drawctxt->context_gmem_shadow.gmemshadow,
+			    KGSL_CACHE_OP_FLUSH);
+
+	return 0;
+}
+
+/* create a new drawing context */
+
+int adreno_drawctxt_create(struct kgsl_device *device,
+			struct kgsl_pagetable *pagetable,
+			struct kgsl_context *context, uint32_t flags)
+{
+	struct adreno_context *drawctxt;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct tmp_ctx ctx;
+	int ret;
+
+	drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
+
+	if (drawctxt == NULL)
+		return -ENOMEM;
+
+	drawctxt->pagetable = pagetable;
+	drawctxt->bin_base_offset = 0;
+
+	ret = create_gpustate_shadow(device, drawctxt, &ctx);
+	if (ret)
+		goto err;
+
+	/* Save the shader instruction memory on context switching */
+	drawctxt->flags |= CTXT_FLAGS_SHADER_SAVE;
+
+	if (!(flags & KGSL_CONTEXT_NO_GMEM_ALLOC)) {
+		/* create gmem shadow */
+		ret = create_gmem_shadow(adreno_dev, drawctxt, &ctx);
+		if (ret != 0)
+			goto err;
+	}
+
+	BUG_ON(ctx.cmd - ctx.start > CMD_BUFFER_LEN);
+
+	context->devctxt = drawctxt;
+	return 0;
+err:
+	kgsl_sharedmem_free(&drawctxt->gpustate);
+	kfree(drawctxt);
+	return ret;
+}
+
+/* destroy a drawing context */
+
+void adreno_drawctxt_destroy(struct kgsl_device *device,
+			  struct kgsl_context *context)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_context *drawctxt = context->devctxt;
+
+	if (drawctxt == NULL)
+		return;
+
+	/* deactivate context */
+	if (adreno_dev->drawctxt_active == drawctxt) {
+		/* no need to save GMEM or shader, the context is
+		 * being destroyed.
+		 */
+		drawctxt->flags &= ~(CTXT_FLAGS_GMEM_SAVE |
+				     CTXT_FLAGS_SHADER_SAVE |
+				     CTXT_FLAGS_GMEM_SHADOW |
+				     CTXT_FLAGS_STATE_SHADOW);
+
+		adreno_drawctxt_switch(adreno_dev, NULL, 0);
+	}
+
+	adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
+
+	kgsl_sharedmem_free(&drawctxt->gpustate);
+	kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow);
+
+	kfree(drawctxt);
+	context->devctxt = NULL;
+}
+
+/* set bin base offset */
+void adreno_drawctxt_set_bin_base_offset(struct kgsl_device *device,
+				      struct kgsl_context *context,
+				      unsigned int offset)
+{
+	struct adreno_context *drawctxt = context->devctxt;
+
+	if (drawctxt)
+		drawctxt->bin_base_offset = offset;
+}
+
+/* switch drawing contexts */
+void
+adreno_drawctxt_switch(struct adreno_device *adreno_dev,
+			struct adreno_context *drawctxt,
+			unsigned int flags)
+{
+	struct adreno_context *active_ctxt =
+	  adreno_dev->drawctxt_active;
+	struct kgsl_device *device = &adreno_dev->dev;
+	unsigned int cmds[5];
+
+	if (drawctxt) {
+		if (flags & KGSL_CONTEXT_SAVE_GMEM)
+			/* Set the flag in context so that the save is done
+			* when this context is switched out. */
+			drawctxt->flags |= CTXT_FLAGS_GMEM_SAVE;
+		else
+			/* Remove GMEM saving flag from the context */
+			drawctxt->flags &= ~CTXT_FLAGS_GMEM_SAVE;
+	}
+	/* already current? */
+	if (active_ctxt == drawctxt)
+		return;
+
+	KGSL_CTXT_INFO(device, "from %p to %p flags %d\n",
+			adreno_dev->drawctxt_active, drawctxt, flags);
+	/* save old context*/
+	if (active_ctxt && active_ctxt->flags & CTXT_FLAGS_GPU_HANG)
+		KGSL_CTXT_WARN(device,
+			"Current active context has caused gpu hang\n");
+
+	if (active_ctxt != NULL) {
+		KGSL_CTXT_INFO(device,
+			"active_ctxt flags %08x\n", active_ctxt->flags);
+		/* save registers and constants. */
+		adreno_ringbuffer_issuecmds(device, 0,
+				active_ctxt->reg_save, 3);
+
+		if (active_ctxt->flags & CTXT_FLAGS_SHADER_SAVE) {
+			/* save shader partitioning and instructions. */
+			adreno_ringbuffer_issuecmds(device,
+					KGSL_CMD_FLAGS_PMODE,
+					active_ctxt->shader_save, 3);
+
+			/* fixup shader partitioning parameter for
+			 *  SET_SHADER_BASES.
+			 */
+			adreno_ringbuffer_issuecmds(device, 0,
+					active_ctxt->shader_fixup, 3);
+
+			active_ctxt->flags |= CTXT_FLAGS_SHADER_RESTORE;
+		}
+
+		if (active_ctxt->flags & CTXT_FLAGS_GMEM_SAVE
+			&& active_ctxt->flags & CTXT_FLAGS_GMEM_SHADOW) {
+			/* save gmem.
+			 * (note: changes shader. shader must already be saved.)
+			 */
+			adreno_ringbuffer_issuecmds(device,
+				KGSL_CMD_FLAGS_PMODE,
+				active_ctxt->context_gmem_shadow.gmem_save, 3);
+
+			/* Restore TP0_CHICKEN */
+			adreno_ringbuffer_issuecmds(device, 0,
+				active_ctxt->chicken_restore, 3);
+
+			active_ctxt->flags |= CTXT_FLAGS_GMEM_RESTORE;
+		}
+	}
+
+	adreno_dev->drawctxt_active = drawctxt;
+
+	/* restore new context */
+	if (drawctxt != NULL) {
+
+		KGSL_CTXT_INFO(device,
+			"drawctxt flags %08x\n", drawctxt->flags);
+		cmds[0] = pm4_nop_packet(1);
+		cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
+		cmds[2] = pm4_type3_packet(PM4_MEM_WRITE, 2);
+		cmds[3] = device->memstore.gpuaddr +
+				KGSL_DEVICE_MEMSTORE_OFFSET(current_context);
+		cmds[4] = (unsigned int)adreno_dev->drawctxt_active;
+		adreno_ringbuffer_issuecmds(device, 0, cmds, 5);
+		kgsl_mmu_setstate(device, drawctxt->pagetable);
+
+#ifndef CONFIG_MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP
+		kgsl_cffdump_syncmem(NULL, &drawctxt->gpustate,
+			drawctxt->gpustate.gpuaddr, LCC_SHADOW_SIZE +
+			REG_SHADOW_SIZE + CMD_BUFFER_SIZE + TEX_SHADOW_SIZE,
+			false);
+#endif
+
+		/* restore gmem.
+		 *  (note: changes shader. shader must not already be restored.)
+		 */
+		if (drawctxt->flags & CTXT_FLAGS_GMEM_RESTORE) {
+			adreno_ringbuffer_issuecmds(device,
+				KGSL_CMD_FLAGS_PMODE,
+				drawctxt->context_gmem_shadow.gmem_restore, 3);
+
+			/* Restore TP0_CHICKEN */
+			adreno_ringbuffer_issuecmds(device, 0,
+				drawctxt->chicken_restore, 3);
+
+			drawctxt->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
+		}
+
+		/* restore registers and constants. */
+		adreno_ringbuffer_issuecmds(device, 0,
+					  drawctxt->reg_restore, 3);
+
+		/* restore shader instructions & partitioning. */
+		if (drawctxt->flags & CTXT_FLAGS_SHADER_RESTORE) {
+			adreno_ringbuffer_issuecmds(device, 0,
+					  drawctxt->shader_restore, 3);
+		}
+
+		cmds[0] = pm4_type3_packet(PM4_SET_BIN_BASE_OFFSET, 1);
+		cmds[1] = drawctxt->bin_base_offset;
+		if (!adreno_is_a220(adreno_dev))
+			adreno_ringbuffer_issuecmds(device, 0, cmds, 2);
+
+	} else
+		kgsl_mmu_setstate(device, device->mmu.defaultpagetable);
+}
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
new file mode 100644
index 0000000..049adf7
--- /dev/null
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -0,0 +1,97 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ADRENO_DRAWCTXT_H
+#define __ADRENO_DRAWCTXT_H
+
+#include "a200_reg.h"
+#include "a220_reg.h"
+
+/* Flags */
+
+#define CTXT_FLAGS_NOT_IN_USE		0x00000000
+#define CTXT_FLAGS_IN_USE		0x00000001
+
+/* state shadow memory allocated */
+#define CTXT_FLAGS_STATE_SHADOW		0x00000010
+
+/* gmem shadow memory allocated */
+#define CTXT_FLAGS_GMEM_SHADOW		0x00000100
+/* gmem must be copied to shadow */
+#define CTXT_FLAGS_GMEM_SAVE		0x00000200
+/* gmem can be restored from shadow */
+#define CTXT_FLAGS_GMEM_RESTORE		0x00000400
+/* shader must be copied to shadow */
+#define CTXT_FLAGS_SHADER_SAVE		0x00002000
+/* shader can be restored from shadow */
+#define CTXT_FLAGS_SHADER_RESTORE	0x00004000
+/* Context has caused a GPU hang */
+#define CTXT_FLAGS_GPU_HANG		0x00008000
+
+struct kgsl_device;
+struct adreno_device;
+struct kgsl_device_private;
+struct kgsl_context;
+
+/* draw context */
+struct gmem_shadow_t {
+	struct kgsl_memdesc gmemshadow;	/* Shadow buffer address */
+
+	/* 256 KB GMEM surface = 4 bytes-per-pixel x 256 pixels/row x
+	* 256 rows. */
+	/* width & height must be a multiples of 32, in case tiled textures
+	 * are used. */
+	enum COLORFORMATX format;
+	unsigned int size;	/* Size of surface used to store GMEM */
+	unsigned int width;	/* Width of surface used to store GMEM */
+	unsigned int height;	/* Height of surface used to store GMEM */
+	unsigned int pitch;	/* Pitch of surface used to store GMEM */
+	unsigned int gmem_pitch;	/* Pitch value used for GMEM */
+	unsigned int *gmem_save_commands;
+	unsigned int *gmem_restore_commands;
+	unsigned int gmem_save[3];
+	unsigned int gmem_restore[3];
+	struct kgsl_memdesc quad_vertices;
+	struct kgsl_memdesc quad_texcoords;
+};
+
+struct adreno_context {
+	uint32_t flags;
+	struct kgsl_pagetable *pagetable;
+	struct kgsl_memdesc gpustate;
+	unsigned int reg_save[3];
+	unsigned int reg_restore[3];
+	unsigned int shader_save[3];
+	unsigned int shader_fixup[3];
+	unsigned int shader_restore[3];
+	unsigned int chicken_restore[3];
+	unsigned int bin_base_offset;
+	/* Information of the GMEM shadow that is created in context create */
+	struct gmem_shadow_t context_gmem_shadow;
+};
+
+int adreno_drawctxt_create(struct kgsl_device *device,
+			struct kgsl_pagetable *pagetable,
+			struct kgsl_context *context,
+			uint32_t flags);
+
+void adreno_drawctxt_destroy(struct kgsl_device *device,
+			  struct kgsl_context *context);
+
+void adreno_drawctxt_switch(struct adreno_device *adreno_dev,
+				struct adreno_context *drawctxt,
+				unsigned int flags);
+void adreno_drawctxt_set_bin_base_offset(struct kgsl_device *device,
+				      struct kgsl_context *context,
+					unsigned int offset);
+
+#endif  /* __ADRENO_DRAWCTXT_H */
diff --git a/drivers/gpu/msm/adreno_pm4types.h b/drivers/gpu/msm/adreno_pm4types.h
new file mode 100644
index 0000000..4d6f70a
--- /dev/null
+++ b/drivers/gpu/msm/adreno_pm4types.h
@@ -0,0 +1,187 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ADRENO_PM4TYPES_H
+#define __ADRENO_PM4TYPES_H
+
+
+#define PM4_PKT_MASK	0xc0000000
+
+#define PM4_TYPE0_PKT	((unsigned int)0 << 30)
+#define PM4_TYPE1_PKT	((unsigned int)1 << 30)
+#define PM4_TYPE2_PKT	((unsigned int)2 << 30)
+#define PM4_TYPE3_PKT	((unsigned int)3 << 30)
+
+
+/* type3 packets */
+/* initialize CP's micro-engine */
+#define PM4_ME_INIT		0x48
+
+/* skip N 32-bit words to get to the next packet */
+#define PM4_NOP			0x10
+
+/* indirect buffer dispatch.  prefetch parser uses this packet type to determine
+*  whether to pre-fetch the IB
+*/
+#define PM4_INDIRECT_BUFFER	0x3f
+
+/* indirect buffer dispatch.  same as IB, but init is pipelined */
+#define PM4_INDIRECT_BUFFER_PFD	0x37
+
+/* wait for the IDLE state of the engine */
+#define PM4_WAIT_FOR_IDLE	0x26
+
+/* wait until a register or memory location is a specific value */
+#define PM4_WAIT_REG_MEM	0x3c
+
+/* wait until a register location is equal to a specific value */
+#define PM4_WAIT_REG_EQ		0x52
+
+/* wait until a register location is >= a specific value */
+#define PM4_WAT_REG_GTE		0x53
+
+/* wait until a read completes */
+#define PM4_WAIT_UNTIL_READ	0x5c
+
+/* wait until all base/size writes from an IB_PFD packet have completed */
+#define PM4_WAIT_IB_PFD_COMPLETE 0x5d
+
+/* register read/modify/write */
+#define PM4_REG_RMW		0x21
+
+/* reads register in chip and writes to memory */
+#define PM4_REG_TO_MEM		0x3e
+
+/* write N 32-bit words to memory */
+#define PM4_MEM_WRITE		0x3d
+
+/* write CP_PROG_COUNTER value to memory */
+#define PM4_MEM_WRITE_CNTR	0x4f
+
+/* conditional execution of a sequence of packets */
+#define PM4_COND_EXEC		0x44
+
+/* conditional write to memory or register */
+#define PM4_COND_WRITE		0x45
+
+/* generate an event that creates a write to memory when completed */
+#define PM4_EVENT_WRITE		0x46
+
+/* generate a VS|PS_done event */
+#define PM4_EVENT_WRITE_SHD	0x58
+
+/* generate a cache flush done event */
+#define PM4_EVENT_WRITE_CFL	0x59
+
+/* generate a z_pass done event */
+#define PM4_EVENT_WRITE_ZPD	0x5b
+
+
+/* initiate fetch of index buffer and draw */
+#define PM4_DRAW_INDX		0x22
+
+/* draw using supplied indices in packet */
+#define PM4_DRAW_INDX_2		0x36
+
+/* initiate fetch of index buffer and binIDs and draw */
+#define PM4_DRAW_INDX_BIN	0x34
+
+/* initiate fetch of bin IDs and draw using supplied indices */
+#define PM4_DRAW_INDX_2_BIN	0x35
+
+
+/* begin/end initiator for viz query extent processing */
+#define PM4_VIZ_QUERY		0x23
+
+/* fetch state sub-blocks and initiate shader code DMAs */
+#define PM4_SET_STATE		0x25
+
+/* load constant into chip and to memory */
+#define PM4_SET_CONSTANT	0x2d
+
+/* load sequencer instruction memory (pointer-based) */
+#define PM4_IM_LOAD		0x27
+
+/* load sequencer instruction memory (code embedded in packet) */
+#define PM4_IM_LOAD_IMMEDIATE	0x2b
+
+/* load constants from a location in memory */
+#define PM4_LOAD_CONSTANT_CONTEXT 0x2e
+
+/* selective invalidation of state pointers */
+#define PM4_INVALIDATE_STATE	0x3b
+
+
+/* dynamically changes shader instruction memory partition */
+#define PM4_SET_SHADER_BASES	0x4A
+
+/* sets the 64-bit BIN_MASK register in the PFP */
+#define PM4_SET_BIN_MASK	0x50
+
+/* sets the 64-bit BIN_SELECT register in the PFP */
+#define PM4_SET_BIN_SELECT	0x51
+
+
+/* updates the current context, if needed */
+#define PM4_CONTEXT_UPDATE	0x5e
+
+/* generate interrupt from the command stream */
+#define PM4_INTERRUPT		0x40
+
+
+/* copy sequencer instruction memory to system memory */
+#define PM4_IM_STORE            0x2c
+
+/*
+ * for a20x
+ * program an offset that will added to the BIN_BASE value of
+ * the 3D_DRAW_INDX_BIN packet
+ */
+#define PM4_SET_BIN_BASE_OFFSET     0x4B
+
+/*
+ * for a22x
+ * sets draw initiator flags register in PFP, gets bitwise-ORed into
+ * every draw initiator
+ */
+#define PM4_SET_DRAW_INIT_FLAGS      0x4B
+
+#define PM4_SET_PROTECTED_MODE  0x5f /* sets the register protection mode */
+
+
+/* packet header building macros */
+#define pm4_type0_packet(regindx, cnt) \
+	(PM4_TYPE0_PKT | (((cnt)-1) << 16) | ((regindx) & 0x7FFF))
+
+#define pm4_type0_packet_for_sameregister(regindx, cnt) \
+	((PM4_TYPE0_PKT | (((cnt)-1) << 16) | ((1 << 15) | \
+		((regindx) & 0x7FFF)))
+
+#define pm4_type1_packet(reg0, reg1) \
+	 (PM4_TYPE1_PKT | ((reg1) << 12) | (reg0))
+
+#define pm4_type3_packet(opcode, cnt) \
+	 (PM4_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8))
+
+#define pm4_predicated_type3_packet(opcode, cnt) \
+	 (PM4_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8) | 0x1)
+
+#define pm4_nop_packet(cnt) \
+	 (PM4_TYPE3_PKT | (((cnt)-1) << 16) | (PM4_NOP << 8))
+
+
+/* packet headers */
+#define PM4_HDR_ME_INIT	pm4_type3_packet(PM4_ME_INIT, 18)
+#define PM4_HDR_INDIRECT_BUFFER_PFD pm4_type3_packet(PM4_INDIRECT_BUFFER_PFD, 2)
+#define PM4_HDR_INDIRECT_BUFFER	pm4_type3_packet(PM4_INDIRECT_BUFFER, 2)
+
+#endif	/* __ADRENO_PM4TYPES_H */
diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c
new file mode 100644
index 0000000..76db7fa
--- /dev/null
+++ b/drivers/gpu/msm/adreno_postmortem.c
@@ -0,0 +1,846 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/vmalloc.h>
+
+#include "kgsl.h"
+
+#include "adreno.h"
+#include "adreno_pm4types.h"
+#include "adreno_ringbuffer.h"
+#include "adreno_postmortem.h"
+#include "adreno_debugfs.h"
+
+#include "a200_reg.h"
+
+#define INVALID_RB_CMD 0xaaaaaaaa
+
+struct pm_id_name {
+	uint32_t id;
+	char name[9];
+};
+
+static const struct pm_id_name pm0_types[] = {
+	{REG_PA_SC_AA_CONFIG,		"RPASCAAC"},
+	{REG_RBBM_PM_OVERRIDE2,		"RRBBPMO2"},
+	{REG_SCRATCH_REG2,		"RSCRTRG2"},
+	{REG_SQ_GPR_MANAGEMENT,		"RSQGPRMN"},
+	{REG_SQ_INST_STORE_MANAGMENT,	"RSQINSTS"},
+	{REG_TC_CNTL_STATUS,		"RTCCNTLS"},
+	{REG_TP0_CHICKEN,		"RTP0CHCK"},
+	{REG_CP_TIMESTAMP,		"CP_TM_ST"},
+};
+
+static const struct pm_id_name pm3_types[] = {
+	{PM4_COND_EXEC,			"CND_EXEC"},
+	{PM4_CONTEXT_UPDATE,		"CX__UPDT"},
+	{PM4_DRAW_INDX,			"DRW_NDX_"},
+	{PM4_DRAW_INDX_BIN,		"DRW_NDXB"},
+	{PM4_EVENT_WRITE,		"EVENT_WT"},
+	{PM4_IM_LOAD,			"IN__LOAD"},
+	{PM4_IM_LOAD_IMMEDIATE,		"IM_LOADI"},
+	{PM4_IM_STORE,			"IM_STORE"},
+	{PM4_INDIRECT_BUFFER,		"IND_BUF_"},
+	{PM4_INDIRECT_BUFFER_PFD,	"IND_BUFP"},
+	{PM4_INTERRUPT,			"PM4_INTR"},
+	{PM4_INVALIDATE_STATE,		"INV_STAT"},
+	{PM4_LOAD_CONSTANT_CONTEXT,	"LD_CN_CX"},
+	{PM4_ME_INIT,			"ME__INIT"},
+	{PM4_NOP,			"PM4__NOP"},
+	{PM4_REG_RMW,			"REG__RMW"},
+	{PM4_REG_TO_MEM,		"REG2_MEM"},
+	{PM4_SET_BIN_BASE_OFFSET,	"ST_BIN_O"},
+	{PM4_SET_CONSTANT,		"ST_CONST"},
+	{PM4_SET_PROTECTED_MODE,	"ST_PRT_M"},
+	{PM4_SET_SHADER_BASES,		"ST_SHD_B"},
+	{PM4_WAIT_FOR_IDLE,		"WAIT4IDL"},
+};
+
+/* Offset address pairs: start, end of range to dump (inclusive) */
+
+/* GPU < Z470 */
+
+static const int a200_registers[] = {
+	0x0000, 0x0008, 0x0010, 0x002c, 0x00ec, 0x00f4,
+	0x0100, 0x0110, 0x0118, 0x011c,
+	0x0700, 0x0704, 0x070c, 0x0720, 0x0754, 0x0764,
+	0x0770, 0x0774, 0x07a8, 0x07a8, 0x07b8, 0x07cc,
+	0x07d8, 0x07dc, 0x07f0, 0x07fc, 0x0e44, 0x0e48,
+	0x0e6c, 0x0e78, 0x0ec8, 0x0ed4, 0x0edc, 0x0edc,
+	0x0fe0, 0x0fec, 0x1100, 0x1100,
+
+	0x110c, 0x1110, 0x112c, 0x112c, 0x1134, 0x113c,
+	0x1148, 0x1148, 0x1150, 0x116c, 0x11fc, 0x11fc,
+	0x15e0, 0x161c, 0x1724, 0x1724, 0x1740, 0x1740,
+	0x1804, 0x1810, 0x1818, 0x1824, 0x182c, 0x1838,
+	0x184c, 0x1850, 0x28a4, 0x28ac, 0x28bc, 0x28c4,
+	0x2900, 0x290c, 0x2914, 0x2914, 0x2938, 0x293c,
+	0x30b0, 0x30b0, 0x30c0, 0x30c0, 0x30e0, 0x30f0,
+	0x3100, 0x3100, 0x3110, 0x3110, 0x3200, 0x3218,
+	0x3220, 0x3250, 0x3264, 0x3268, 0x3290, 0x3294,
+	0x3400, 0x340c, 0x3418, 0x3418, 0x3420, 0x342c,
+	0x34d0, 0x34d4, 0x36b8, 0x3704, 0x3720, 0x3750,
+	0x3760, 0x3764, 0x3800, 0x3800, 0x3808, 0x3810,
+	0x385c, 0x3878, 0x3b00, 0x3b24, 0x3b2c, 0x3b30,
+	0x3b40, 0x3b40, 0x3b50, 0x3b5c, 0x3b80, 0x3b88,
+	0x3c04, 0x3c08, 0x3c30, 0x3c30, 0x3c38, 0x3c48,
+	0x3c98, 0x3ca8, 0x3cb0, 0x3cb0,
+
+	0x8000, 0x8008, 0x8018, 0x803c, 0x8200, 0x8208,
+	0x8400, 0x8424, 0x8430, 0x8450, 0x8600, 0x8610,
+	0x87d4, 0x87dc, 0x8800, 0x8820, 0x8a00, 0x8a0c,
+	0x8a4c, 0x8a50, 0x8c00, 0x8c20, 0x8c48, 0x8c48,
+	0x8c58, 0x8c74, 0x8c90, 0x8c98, 0x8e00, 0x8e0c,
+
+	0x9000, 0x9008, 0x9018, 0x903c, 0x9200, 0x9208,
+	0x9400, 0x9424, 0x9430, 0x9450, 0x9600, 0x9610,
+	0x97d4, 0x97dc, 0x9800, 0x9820, 0x9a00, 0x9a0c,
+	0x9a4c, 0x9a50, 0x9c00, 0x9c20, 0x9c48, 0x9c48,
+	0x9c58, 0x9c74, 0x9c90, 0x9c98, 0x9e00, 0x9e0c,
+
+	0x10000, 0x1000c, 0x12000, 0x12014,
+	0x12400, 0x12400, 0x12420, 0x12420
+};
+
+/* GPU = Z470 */
+
+static const int a220_registers[] = {
+	0x0000, 0x0008, 0x0010, 0x002c, 0x00ec, 0x00f4,
+	0x0100, 0x0110, 0x0118, 0x011c,
+	0x0700, 0x0704, 0x070c, 0x0720, 0x0754, 0x0764,
+	0x0770, 0x0774, 0x07a8, 0x07a8, 0x07b8, 0x07cc,
+	0x07d8, 0x07dc, 0x07f0, 0x07fc, 0x0e44, 0x0e48,
+	0x0e6c, 0x0e78, 0x0ec8, 0x0ed4, 0x0edc, 0x0edc,
+	0x0fe0, 0x0fec, 0x1100, 0x1100,
+
+	0x110c, 0x1110, 0x112c, 0x112c, 0x1134, 0x113c,
+	0x1148, 0x1148, 0x1150, 0x116c, 0x11fc, 0x11fc,
+	0x15e0, 0x161c, 0x1724, 0x1724, 0x1740, 0x1740,
+	0x1804, 0x1810, 0x1818, 0x1824, 0x182c, 0x1838,
+	0x184c, 0x1850, 0x28a4, 0x28ac, 0x28bc, 0x28c4,
+	0x2900, 0x2900, 0x2908, 0x290c, 0x2914, 0x2914,
+	0x2938, 0x293c, 0x30c0, 0x30c0, 0x30e0, 0x30e4,
+	0x30f0, 0x30f0, 0x3200, 0x3204, 0x3220, 0x324c,
+	0x3400, 0x340c, 0x3414, 0x3418, 0x3420, 0x342c,
+	0x34d0, 0x34d4, 0x36b8, 0x3704, 0x3720, 0x3750,
+	0x3760, 0x3764, 0x3800, 0x3800, 0x3808, 0x3810,
+	0x385c, 0x3878, 0x3b00, 0x3b24, 0x3b2c, 0x3b30,
+	0x3b40, 0x3b40, 0x3b50, 0x3b5c, 0x3b80, 0x3b88,
+	0x3c04, 0x3c08, 0x8000, 0x8008, 0x8018, 0x803c,
+	0x8200, 0x8208, 0x8400, 0x8408, 0x8410, 0x8424,
+	0x8430, 0x8450, 0x8600, 0x8610, 0x87d4, 0x87dc,
+	0x8800, 0x8808, 0x8810, 0x8810, 0x8820, 0x8820,
+	0x8a00, 0x8a08, 0x8a50, 0x8a50,
+	0x8c00, 0x8c20, 0x8c24, 0x8c28, 0x8c48, 0x8c48,
+	0x8c58, 0x8c58, 0x8c60, 0x8c74, 0x8c90, 0x8c98,
+	0x8e00, 0x8e0c, 0x9000, 0x9008, 0x9018, 0x903c,
+	0x9200, 0x9208, 0x9400, 0x9408, 0x9410, 0x9424,
+	0x9430, 0x9450, 0x9600, 0x9610, 0x97d4, 0x97dc,
+	0x9800, 0x9808, 0x9810, 0x9818, 0x9820, 0x9820,
+	0x9a00, 0x9a08, 0x9a50, 0x9a50, 0x9c00, 0x9c20,
+	0x9c48, 0x9c48, 0x9c58, 0x9c58, 0x9c60, 0x9c74,
+	0x9c90, 0x9c98, 0x9e00, 0x9e0c,
+
+	0x10000, 0x1000c, 0x12000, 0x12014,
+	0x12400, 0x12400, 0x12420, 0x12420
+};
+
+static uint32_t adreno_is_pm4_len(uint32_t word)
+{
+	if (word == INVALID_RB_CMD)
+		return 0;
+
+	return (word >> 16) & 0x3FFF;
+}
+
+static bool adreno_is_pm4_type(uint32_t word)
+{
+	int i;
+
+	if (word == INVALID_RB_CMD)
+		return 1;
+
+	if (adreno_is_pm4_len(word) > 16)
+		return 0;
+
+	if ((word & (3<<30)) == PM4_TYPE0_PKT) {
+		for (i = 0; i < ARRAY_SIZE(pm0_types); ++i) {
+			if ((word & 0x7FFF) == pm0_types[i].id)
+				return 1;
+		}
+		return 0;
+	}
+	if ((word & (3<<30)) == PM4_TYPE3_PKT) {
+		for (i = 0; i < ARRAY_SIZE(pm3_types); ++i) {
+			if ((word & 0xFFFF) == (pm3_types[i].id << 8))
+				return 1;
+		}
+		return 0;
+	}
+	return 0;
+}
+
+static const char *adreno_pm4_name(uint32_t word)
+{
+	int i;
+
+	if (word == INVALID_RB_CMD)
+		return "--------";
+
+	if ((word & (3<<30)) == PM4_TYPE0_PKT) {
+		for (i = 0; i < ARRAY_SIZE(pm0_types); ++i) {
+			if ((word & 0x7FFF) == pm0_types[i].id)
+				return pm0_types[i].name;
+		}
+		return "????????";
+	}
+	if ((word & (3<<30)) == PM4_TYPE3_PKT) {
+		for (i = 0; i < ARRAY_SIZE(pm3_types); ++i) {
+			if ((word & 0xFFFF) == (pm3_types[i].id << 8))
+				return pm3_types[i].name;
+		}
+		return "????????";
+	}
+	return "????????";
+}
+
+static void adreno_dump_regs(struct kgsl_device *device,
+			   const int *registers, int size)
+{
+	int range = 0, offset = 0;
+
+	for (range = 0; range < size; range++) {
+		/* start and end are in dword offsets */
+		int start = registers[range * 2] / 4;
+		int end = registers[range * 2 + 1] / 4;
+
+		unsigned char linebuf[32 * 3 + 2 + 32 + 1];
+		int linelen, i;
+
+		for (offset = start; offset <= end; offset += linelen) {
+			unsigned int regvals[32/4];
+			linelen = min(end+1-offset, 32/4);
+
+			for (i = 0; i < linelen; ++i)
+				kgsl_regread(device, offset+i, regvals+i);
+
+			hex_dump_to_buffer(regvals, linelen*4, 32, 4,
+				linebuf, sizeof(linebuf), 0);
+			KGSL_LOG_DUMP(device,
+				"REG: %5.5X: %s\n", offset<<2, linebuf);
+		}
+	}
+}
+
+static void dump_ib(struct kgsl_device *device, char* buffId, uint32_t pt_base,
+	uint32_t base_offset, uint32_t ib_base, uint32_t ib_size, bool dump)
+{
+	unsigned int memsize;
+	uint8_t *base_addr = kgsl_sharedmem_convertaddr(device, pt_base,
+		ib_base, &memsize);
+
+	if (base_addr && dump)
+		print_hex_dump(KERN_ERR, buffId, DUMP_PREFIX_OFFSET,
+				 32, 4, base_addr, ib_size*4, 0);
+	else
+		KGSL_LOG_DUMP(device, "%s base:%8.8X  ib_size:%d  "
+			"offset:%5.5X%s\n",
+			buffId, ib_base, ib_size*4, base_offset,
+			base_addr ? "" : " [Invalid]");
+}
+
+#define IB_LIST_SIZE	64
+struct ib_list {
+	int count;
+	uint32_t bases[IB_LIST_SIZE];
+	uint32_t sizes[IB_LIST_SIZE];
+	uint32_t offsets[IB_LIST_SIZE];
+};
+
+static void dump_ib1(struct kgsl_device *device, uint32_t pt_base,
+			uint32_t base_offset,
+			uint32_t ib1_base, uint32_t ib1_size,
+			struct ib_list *ib_list, bool dump)
+{
+	int i, j;
+	uint32_t value;
+	uint32_t *ib1_addr;
+	unsigned int memsize;
+
+	dump_ib(device, "IB1:", pt_base, base_offset, ib1_base,
+		ib1_size, dump);
+
+	/* fetch virtual address for given IB base */
+	ib1_addr = (uint32_t *)kgsl_sharedmem_convertaddr(device, pt_base,
+		ib1_base, &memsize);
+	if (!ib1_addr)
+		return;
+
+	for (i = 0; i+3 < ib1_size; ) {
+		value = ib1_addr[i++];
+		if (value == pm4_type3_packet(PM4_INDIRECT_BUFFER_PFD, 2)) {
+			uint32_t ib2_base = ib1_addr[i++];
+			uint32_t ib2_size = ib1_addr[i++];
+
+			/* find previous match */
+			for (j = 0; j < ib_list->count; ++j)
+				if (ib_list->sizes[j] == ib2_size
+					&& ib_list->bases[j] == ib2_base)
+					break;
+
+			if (j < ib_list->count || ib_list->count
+				>= IB_LIST_SIZE)
+				continue;
+
+			/* store match */
+			ib_list->sizes[ib_list->count] = ib2_size;
+			ib_list->bases[ib_list->count] = ib2_base;
+			ib_list->offsets[ib_list->count] = i<<2;
+			++ib_list->count;
+		}
+	}
+}
+
+static void adreno_dump_rb_buffer(const void *buf, size_t len,
+		char *linebuf, size_t linebuflen, int *argp)
+{
+	const u32 *ptr4 = buf;
+	const int ngroups = len;
+	int lx = 0, j;
+	bool nxsp = 1;
+
+	for (j = 0; j < ngroups; j++) {
+		if (*argp < 0) {
+			lx += scnprintf(linebuf + lx, linebuflen - lx, " <");
+			*argp = -*argp;
+		} else if (nxsp)
+			lx += scnprintf(linebuf + lx, linebuflen - lx, "  ");
+		else
+			nxsp = 1;
+		if (!*argp && adreno_is_pm4_type(ptr4[j])) {
+			lx += scnprintf(linebuf + lx, linebuflen - lx,
+				"%s", adreno_pm4_name(ptr4[j]));
+			*argp = -(adreno_is_pm4_len(ptr4[j])+1);
+		} else {
+			lx += scnprintf(linebuf + lx, linebuflen - lx,
+				"%8.8X", ptr4[j]);
+			if (*argp > 1)
+				--*argp;
+			else if (*argp == 1) {
+				*argp = 0;
+				nxsp = 0;
+				lx += scnprintf(linebuf + lx, linebuflen - lx,
+					"> ");
+			}
+		}
+	}
+	linebuf[lx] = '\0';
+}
+
+static bool adreno_rb_use_hex(void)
+{
+#ifdef CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX
+	return 1;
+#else
+	return 0;
+#endif
+}
+
+static void adreno_dump_rb(struct kgsl_device *device, const void *buf,
+			 size_t len, int start, int size)
+{
+	const uint32_t *ptr = buf;
+	int i, remaining, args = 0;
+	unsigned char linebuf[32 * 3 + 2 + 32 + 1];
+	const int rowsize = 8;
+
+	len >>= 2;
+	remaining = len;
+	for (i = 0; i < len; i += rowsize) {
+		int linelen = min(remaining, rowsize);
+		remaining -= rowsize;
+
+		if (adreno_rb_use_hex())
+			hex_dump_to_buffer(ptr+i, linelen*4, rowsize*4, 4,
+				linebuf, sizeof(linebuf), 0);
+		else
+			adreno_dump_rb_buffer(ptr+i, linelen, linebuf,
+				sizeof(linebuf), &args);
+		KGSL_LOG_DUMP(device,
+			"RB: %4.4X:%s\n", (start+i)%size, linebuf);
+	}
+}
+
+static bool adreno_ib_dump_enabled(void)
+{
+#ifdef CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP
+	return 0;
+#else
+	return 1;
+#endif
+}
+
+struct log_field {
+	bool show;
+	const char *display;
+};
+
+static int adreno_dump_fields_line(struct kgsl_device *device,
+				 const char *start, char *str, int slen,
+				 const struct log_field **lines,
+				 int num)
+{
+	const struct log_field *l = *lines;
+	int sptr, count  = 0;
+
+	sptr = snprintf(str, slen, "%s", start);
+
+	for (  ; num && sptr < slen; num--, l++) {
+		int ilen = strlen(l->display);
+
+		if (count)
+			ilen += strlen("  | ");
+
+		if (ilen > (slen - sptr))
+			break;
+
+		if (count++)
+			sptr += snprintf(str + sptr, slen - sptr, " | ");
+
+		sptr += snprintf(str + sptr, slen - sptr, "%s", l->display);
+	}
+
+	KGSL_LOG_DUMP(device, "%s\n", str);
+
+	*lines = l;
+	return num;
+}
+
+static void adreno_dump_fields(struct kgsl_device *device,
+			     const char *start, const struct log_field *lines,
+			     int num)
+{
+	char lb[90];
+	const char *sstr = start;
+
+	lb[sizeof(lb)  - 1] = '\0';
+
+	while (num) {
+		int ret = adreno_dump_fields_line(device, sstr, lb,
+			sizeof(lb) - 1, &lines, num);
+
+		if (ret == num)
+			break;
+
+		num = ret;
+		sstr = "        ";
+	}
+}
+
+static int adreno_dump(struct kgsl_device *device)
+{
+	unsigned int r1, r2, r3, rbbm_status;
+	unsigned int cp_ib1_base, cp_ib1_bufsz, cp_stat;
+	unsigned int cp_ib2_base, cp_ib2_bufsz;
+	unsigned int pt_base;
+	unsigned int cp_rb_base, rb_count;
+	unsigned int cp_rb_wptr, cp_rb_rptr;
+	unsigned int i;
+	int result = 0;
+	uint32_t *rb_copy;
+	const uint32_t *rb_vaddr;
+	int num_item = 0;
+	int read_idx, write_idx;
+	unsigned int ts_processed, rb_memsize;
+
+	static struct ib_list ib_list;
+
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+	mb();
+
+	KGSL_LOG_DUMP(device, "POWER: FLAGS = %08lX | ACTIVE POWERLEVEL = %08X",
+			pwr->power_flags, pwr->active_pwrlevel);
+
+	KGSL_LOG_DUMP(device, "POWER: INTERVAL TIMEOUT = %08X ",
+		pwr->interval_timeout);
+
+	KGSL_LOG_DUMP(device, "GRP_CLK = %lu ",
+				  kgsl_get_clkrate(pwr->grp_clks[0]));
+
+	KGSL_LOG_DUMP(device, "BUS CLK = %lu ",
+		kgsl_get_clkrate(pwr->ebi1_clk));
+
+
+	kgsl_regread(device, REG_RBBM_STATUS, &rbbm_status);
+	kgsl_regread(device, REG_RBBM_PM_OVERRIDE1, &r2);
+	kgsl_regread(device, REG_RBBM_PM_OVERRIDE2, &r3);
+	KGSL_LOG_DUMP(device, "RBBM:   STATUS   = %08X | PM_OVERRIDE1 = %08X | "
+		"PM_OVERRIDE2 = %08X\n", rbbm_status, r2, r3);
+
+	kgsl_regread(device, REG_RBBM_INT_CNTL, &r1);
+	kgsl_regread(device, REG_RBBM_INT_STATUS, &r2);
+	kgsl_regread(device, REG_RBBM_READ_ERROR, &r3);
+	KGSL_LOG_DUMP(device, "        INT_CNTL = %08X | INT_STATUS   = %08X | "
+		"READ_ERROR   = %08X\n", r1, r2, r3);
+
+	{
+		char cmdFifo[16];
+		struct log_field lines[] = {
+			{rbbm_status &  0x000F, cmdFifo},
+			{rbbm_status &  BIT(5), "TC busy     "},
+			{rbbm_status &  BIT(8), "HIRQ pending"},
+			{rbbm_status &  BIT(9), "CPRQ pending"},
+			{rbbm_status & BIT(10), "CFRQ pending"},
+			{rbbm_status & BIT(11), "PFRQ pending"},
+			{rbbm_status & BIT(12), "VGT 0DMA bsy"},
+			{rbbm_status & BIT(14), "RBBM WU busy"},
+			{rbbm_status & BIT(16), "CP NRT busy "},
+			{rbbm_status & BIT(18), "MH busy     "},
+			{rbbm_status & BIT(19), "MH chncy bsy"},
+			{rbbm_status & BIT(21), "SX busy     "},
+			{rbbm_status & BIT(22), "TPC busy    "},
+			{rbbm_status & BIT(24), "SC CNTX busy"},
+			{rbbm_status & BIT(25), "PA busy     "},
+			{rbbm_status & BIT(26), "VGT busy    "},
+			{rbbm_status & BIT(27), "SQ cntx1 bsy"},
+			{rbbm_status & BIT(28), "SQ cntx0 bsy"},
+			{rbbm_status & BIT(30), "RB busy     "},
+			{rbbm_status & BIT(31), "Grphs pp bsy"},
+		};
+		snprintf(cmdFifo, sizeof(cmdFifo), "CMD FIFO=%01X  ",
+			rbbm_status & 0xf);
+		adreno_dump_fields(device, " STATUS=", lines,
+				ARRAY_SIZE(lines));
+	}
+
+	kgsl_regread(device, REG_CP_RB_BASE, &cp_rb_base);
+	kgsl_regread(device, REG_CP_RB_CNTL, &r2);
+	rb_count = 2 << (r2 & (BIT(6)-1));
+	kgsl_regread(device, REG_CP_RB_RPTR_ADDR, &r3);
+	KGSL_LOG_DUMP(device,
+		"CP_RB:  BASE = %08X | CNTL   = %08X | RPTR_ADDR = %08X"
+		"\n", cp_rb_base, r2, r3);
+
+	kgsl_regread(device, REG_CP_RB_RPTR, &cp_rb_rptr);
+	kgsl_regread(device, REG_CP_RB_WPTR, &cp_rb_wptr);
+	kgsl_regread(device, REG_CP_RB_RPTR_WR, &r3);
+	KGSL_LOG_DUMP(device,
+		"        RPTR = %08X | WPTR   = %08X | RPTR_WR   = %08X"
+		"\n", cp_rb_rptr, cp_rb_wptr, r3);
+
+	kgsl_regread(device, REG_CP_IB1_BASE, &cp_ib1_base);
+	kgsl_regread(device, REG_CP_IB1_BUFSZ, &cp_ib1_bufsz);
+	KGSL_LOG_DUMP(device,
+		"CP_IB1: BASE = %08X | BUFSZ  = %d\n", cp_ib1_base,
+		cp_ib1_bufsz);
+
+	kgsl_regread(device, REG_CP_IB2_BASE, &cp_ib2_base);
+	kgsl_regread(device, REG_CP_IB2_BUFSZ, &cp_ib2_bufsz);
+	KGSL_LOG_DUMP(device,
+		"CP_IB2: BASE = %08X | BUFSZ  = %d\n", cp_ib2_base,
+		cp_ib2_bufsz);
+
+	kgsl_regread(device, REG_CP_INT_CNTL, &r1);
+	kgsl_regread(device, REG_CP_INT_STATUS, &r2);
+	KGSL_LOG_DUMP(device, "CP_INT: CNTL = %08X | STATUS = %08X\n", r1, r2);
+
+	kgsl_regread(device, REG_CP_ME_CNTL, &r1);
+	kgsl_regread(device, REG_CP_ME_STATUS, &r2);
+	kgsl_regread(device, REG_MASTER_INT_SIGNAL, &r3);
+	KGSL_LOG_DUMP(device,
+		"CP_ME:  CNTL = %08X | STATUS = %08X | MSTR_INT_SGNL = "
+		"%08X\n", r1, r2, r3);
+
+	kgsl_regread(device, REG_CP_STAT, &cp_stat);
+	KGSL_LOG_DUMP(device, "CP_STAT      = %08X\n", cp_stat);
+#ifndef CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL
+	{
+		struct log_field lns[] = {
+			{cp_stat &  BIT(0), "WR_BSY     0"},
+			{cp_stat &  BIT(1), "RD_RQ_BSY  1"},
+			{cp_stat &  BIT(2), "RD_RTN_BSY 2"},
+		};
+		adreno_dump_fields(device, "    MIU=", lns, ARRAY_SIZE(lns));
+	}
+	{
+		struct log_field lns[] = {
+			{cp_stat &  BIT(5), "RING_BUSY  5"},
+			{cp_stat &  BIT(6), "NDRCTS_BSY 6"},
+			{cp_stat &  BIT(7), "NDRCT2_BSY 7"},
+			{cp_stat &  BIT(9), "ST_BUSY    9"},
+			{cp_stat & BIT(10), "BUSY      10"},
+		};
+		adreno_dump_fields(device, "    CSF=", lns, ARRAY_SIZE(lns));
+	}
+	{
+		struct log_field lns[] = {
+			{cp_stat & BIT(11), "RNG_Q_BSY 11"},
+			{cp_stat & BIT(12), "NDRCTS_Q_B12"},
+			{cp_stat & BIT(13), "NDRCT2_Q_B13"},
+			{cp_stat & BIT(16), "ST_QUEUE_B16"},
+			{cp_stat & BIT(17), "PFP_BUSY  17"},
+		};
+		adreno_dump_fields(device, "   RING=", lns, ARRAY_SIZE(lns));
+	}
+	{
+		struct log_field lns[] = {
+			{cp_stat &  BIT(3), "RBIU_BUSY  3"},
+			{cp_stat &  BIT(4), "RCIU_BUSY  4"},
+			{cp_stat & BIT(18), "MQ_RG_BSY 18"},
+			{cp_stat & BIT(19), "MQ_NDRS_BS19"},
+			{cp_stat & BIT(20), "MQ_NDR2_BS20"},
+			{cp_stat & BIT(21), "MIU_WC_STL21"},
+			{cp_stat & BIT(22), "CP_NRT_BSY22"},
+			{cp_stat & BIT(23), "3D_BUSY   23"},
+			{cp_stat & BIT(26), "ME_BUSY   26"},
+			{cp_stat & BIT(29), "ME_WC_BSY 29"},
+			{cp_stat & BIT(30), "MIU_FF EM 30"},
+			{cp_stat & BIT(31), "CP_BUSY   31"},
+		};
+		adreno_dump_fields(device, " CP_STT=", lns, ARRAY_SIZE(lns));
+	}
+#endif
+
+	kgsl_regread(device, REG_SCRATCH_REG0, &r1);
+	KGSL_LOG_DUMP(device, "SCRATCH_REG0       = %08X\n", r1);
+
+	kgsl_regread(device, REG_COHER_SIZE_PM4, &r1);
+	kgsl_regread(device, REG_COHER_BASE_PM4, &r2);
+	kgsl_regread(device, REG_COHER_STATUS_PM4, &r3);
+	KGSL_LOG_DUMP(device,
+		"COHER:  SIZE_PM4   = %08X | BASE_PM4 = %08X | STATUS_PM4"
+		" = %08X\n", r1, r2, r3);
+
+	kgsl_regread(device, MH_AXI_ERROR, &r1);
+	KGSL_LOG_DUMP(device, "MH:     AXI_ERROR  = %08X\n", r1);
+
+	kgsl_regread(device, MH_MMU_PAGE_FAULT, &r1);
+	kgsl_regread(device, MH_MMU_CONFIG, &r2);
+	kgsl_regread(device, MH_MMU_MPU_BASE, &r3);
+	KGSL_LOG_DUMP(device,
+		"MH_MMU: PAGE_FAULT = %08X | CONFIG   = %08X | MPU_BASE ="
+		" %08X\n", r1, r2, r3);
+
+	kgsl_regread(device, MH_MMU_MPU_END, &r1);
+	kgsl_regread(device, MH_MMU_VA_RANGE, &r2);
+	kgsl_regread(device, MH_MMU_PT_BASE, &pt_base);
+	KGSL_LOG_DUMP(device,
+		"        MPU_END    = %08X | VA_RANGE = %08X | PT_BASE  ="
+		" %08X\n", r1, r2, pt_base);
+
+	KGSL_LOG_DUMP(device, "PAGETABLE SIZE: %08X ", KGSL_PAGETABLE_SIZE);
+
+	kgsl_regread(device, MH_MMU_TRAN_ERROR, &r1);
+	KGSL_LOG_DUMP(device, "        TRAN_ERROR = %08X\n", r1);
+
+	kgsl_regread(device, MH_INTERRUPT_MASK, &r1);
+	kgsl_regread(device, MH_INTERRUPT_STATUS, &r2);
+	KGSL_LOG_DUMP(device,
+		"MH_INTERRUPT: MASK = %08X | STATUS   = %08X\n", r1, r2);
+
+	ts_processed = device->ftbl->readtimestamp(device,
+		KGSL_TIMESTAMP_RETIRED);
+	KGSL_LOG_DUMP(device, "TIMESTM RTRD: %08X\n", ts_processed);
+
+	num_item = adreno_ringbuffer_count(&adreno_dev->ringbuffer,
+						cp_rb_rptr);
+	if (num_item <= 0)
+		KGSL_LOG_POSTMORTEM_WRITE(device, "Ringbuffer is Empty.\n");
+
+	rb_copy = vmalloc(rb_count<<2);
+	if (!rb_copy) {
+		KGSL_LOG_POSTMORTEM_WRITE(device,
+			"vmalloc(%d) failed\n", rb_count << 2);
+		result = -ENOMEM;
+		goto end;
+	}
+
+	KGSL_LOG_DUMP(device, "RB: rd_addr:%8.8x  rb_size:%d  num_item:%d\n",
+		cp_rb_base, rb_count<<2, num_item);
+	rb_vaddr = (const uint32_t *)kgsl_sharedmem_convertaddr(device, pt_base,
+					cp_rb_base, &rb_memsize);
+	if (!rb_vaddr) {
+		KGSL_LOG_POSTMORTEM_WRITE(device,
+			"Can't fetch vaddr for CP_RB_BASE\n");
+		goto error_vfree;
+	}
+
+	read_idx = (int)cp_rb_rptr - 64;
+	if (read_idx < 0)
+		read_idx += rb_count;
+	write_idx = (int)cp_rb_wptr + 16;
+	if (write_idx > rb_count)
+		write_idx -= rb_count;
+	num_item += 64+16;
+	if (num_item > rb_count)
+		num_item = rb_count;
+	if (write_idx >= read_idx)
+		memcpy(rb_copy, rb_vaddr+read_idx, num_item<<2);
+	else {
+		int part1_c = rb_count-read_idx;
+		memcpy(rb_copy, rb_vaddr+read_idx, part1_c<<2);
+		memcpy(rb_copy+part1_c, rb_vaddr, (num_item-part1_c)<<2);
+	}
+
+	/* extract the latest ib commands from the buffer */
+	ib_list.count = 0;
+	i = 0;
+	for (read_idx = 0; read_idx < num_item; ) {
+		uint32_t this_cmd = rb_copy[read_idx++];
+		if (this_cmd == pm4_type3_packet(PM4_INDIRECT_BUFFER_PFD, 2)) {
+			uint32_t ib_addr = rb_copy[read_idx++];
+			uint32_t ib_size = rb_copy[read_idx++];
+			dump_ib1(device, pt_base, (read_idx-3)<<2, ib_addr,
+				ib_size, &ib_list, 0);
+			for (; i < ib_list.count; ++i)
+				dump_ib(device, "IB2:", pt_base,
+					ib_list.offsets[i],
+					ib_list.bases[i],
+					ib_list.sizes[i], 0);
+		}
+	}
+
+	read_idx = (int)cp_rb_rptr - 64;
+	if (read_idx < 0)
+		read_idx += rb_count;
+	KGSL_LOG_DUMP(device,
+		"RB: addr=%8.8x  window:%4.4x-%4.4x, start:%4.4x\n",
+		cp_rb_base, cp_rb_rptr, cp_rb_wptr, read_idx);
+	adreno_dump_rb(device, rb_copy, num_item<<2, read_idx, rb_count);
+
+	if (adreno_ib_dump_enabled()) {
+		for (read_idx = 64; read_idx >= 0; --read_idx) {
+			uint32_t this_cmd = rb_copy[read_idx];
+			if (this_cmd == pm4_type3_packet(
+				PM4_INDIRECT_BUFFER_PFD, 2)) {
+				uint32_t ib_addr = rb_copy[read_idx+1];
+				uint32_t ib_size = rb_copy[read_idx+2];
+				if (cp_ib1_bufsz && cp_ib1_base == ib_addr) {
+					KGSL_LOG_DUMP(device,
+						"IB1: base:%8.8X  "
+						"count:%d\n", ib_addr, ib_size);
+					dump_ib(device, "IB1: ", pt_base,
+						read_idx<<2, ib_addr, ib_size,
+						1);
+				}
+			}
+		}
+		for (i = 0; i < ib_list.count; ++i) {
+			if (cp_ib2_bufsz && cp_ib2_base == ib_list.bases[i]) {
+				uint32_t ib_size = ib_list.sizes[i];
+				uint32_t ib_offset = ib_list.offsets[i];
+				KGSL_LOG_DUMP(device,
+					"IB2: base:%8.8X  count:%d\n",
+					cp_ib2_base, ib_size);
+				dump_ib(device, "IB2: ", pt_base, ib_offset,
+					ib_list.bases[i], ib_size, 1);
+			}
+		}
+	}
+
+	/* Dump the registers if the user asked for it */
+
+	if (adreno_is_a20x(adreno_dev))
+		adreno_dump_regs(device, a200_registers,
+			ARRAY_SIZE(a200_registers) / 2);
+	else if (adreno_is_a22x(adreno_dev))
+		adreno_dump_regs(device, a220_registers,
+			ARRAY_SIZE(a220_registers) / 2);
+
+error_vfree:
+	vfree(rb_copy);
+end:
+	return result;
+}
+
+/**
+ * adreno_postmortem_dump - Dump the current GPU state
+ * @device - A pointer to the KGSL device to dump
+ * @manual - A flag that indicates if this was a manually triggered
+ *           dump (from debugfs).  If zero, then this is assumed to be a
+ *           dump automaticlaly triggered from a hang
+*/
+
+int adreno_postmortem_dump(struct kgsl_device *device, int manual)
+{
+	bool saved_nap;
+
+	BUG_ON(device == NULL);
+
+	/* For a manual dump, make sure that the system is idle */
+
+	if (manual) {
+		if (device->active_cnt != 0) {
+			mutex_unlock(&device->mutex);
+			wait_for_completion(&device->suspend_gate);
+			mutex_lock(&device->mutex);
+		}
+
+		if (device->state == KGSL_STATE_ACTIVE)
+			kgsl_idle(device,  KGSL_TIMEOUT_DEFAULT);
+
+	}
+	/* Disable the idle timer so we don't get interrupted */
+	del_timer(&device->idle_timer);
+
+	/* Turn off napping to make sure we have the clocks full
+	   attention through the following process */
+	saved_nap = device->pwrctrl.nap_allowed;
+	device->pwrctrl.nap_allowed = false;
+
+	/* Force on the clocks */
+	kgsl_pwrctrl_wake(device);
+
+	/* Disable the irq */
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+
+	/* If this is not a manual trigger, then set up the
+	   state to try to recover */
+
+	if (!manual) {
+		device->state = KGSL_STATE_DUMP_AND_RECOVER;
+		KGSL_PWR_WARN(device,
+				"state -> DUMP_AND_RECOVER, device %d\n",
+				device->id);
+	}
+
+	KGSL_DRV_ERR(device,
+			"wait for work in workqueue to complete\n");
+	mutex_unlock(&device->mutex);
+	flush_workqueue(device->work_queue);
+	mutex_lock(&device->mutex);
+	adreno_dump(device);
+
+	/* Restore nap mode */
+	device->pwrctrl.nap_allowed = saved_nap;
+
+	/* On a manual trigger, turn on the interrupts and put
+	   the clocks to sleep.  They will recover themselves
+	   on the next event.  For a hang, leave things as they
+	   are until recovery kicks in. */
+
+	if (manual) {
+		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
+
+		/* try to go into a sleep mode until the next event */
+		device->requested_state = KGSL_STATE_SLEEP;
+		kgsl_pwrctrl_sleep(device);
+	}
+
+	KGSL_DRV_ERR(device, "Dump Finished\n");
+
+	return 0;
+}
diff --git a/drivers/gpu/msm/adreno_postmortem.h b/drivers/gpu/msm/adreno_postmortem.h
new file mode 100644
index 0000000..b677800
--- /dev/null
+++ b/drivers/gpu/msm/adreno_postmortem.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ADRENO_POSTMORTEM_H
+#define __ADRENO_POSTMORTEM_H
+
+struct kgsl_device;
+
+int adreno_postmortem_dump(struct kgsl_device *device, int manual);
+
+#endif /* __ADRENO_POSTMORTEM_H */
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
new file mode 100644
index 0000000..e7aaa14
--- /dev/null
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -0,0 +1,929 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/log2.h>
+
+#include "kgsl.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_cffdump.h"
+
+#include "adreno.h"
+#include "adreno_pm4types.h"
+#include "adreno_ringbuffer.h"
+
+#include "a200_reg.h"
+
+#define VALID_STATUS_COUNT_MAX	10
+#define GSL_RB_NOP_SIZEDWORDS				2
+/* protected mode error checking below register address 0x800
+*  note: if CP_INTERRUPT packet is used then checking needs
+*  to change to below register address 0x7C8
+*/
+#define GSL_RB_PROTECTED_MODE_CONTROL		0x200001F2
+
+/* Firmware file names
+ * Legacy names must remain but replacing macro names to
+ * match current kgsl model.
+ * a200 is yamato
+ * a220 is leia
+ */
+#define A200_PFP_FW "yamato_pfp.fw"
+#define A200_PM4_FW "yamato_pm4.fw"
+#define A220_PFP_470_FW "leia_pfp_470.fw"
+#define A220_PM4_470_FW "leia_pm4_470.fw"
+#define A225_PFP_FW "a225_pfp.fw"
+#define A225_PM4_FW "a225_pm4.fw"
+
+/* functions */
+void kgsl_cp_intrcallback(struct kgsl_device *device)
+{
+	unsigned int status = 0, num_reads = 0, master_status = 0;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+	adreno_regread(device, REG_MASTER_INT_SIGNAL, &master_status);
+	while (!status && (num_reads < VALID_STATUS_COUNT_MAX) &&
+		(master_status & MASTER_INT_SIGNAL__CP_INT_STAT)) {
+		adreno_regread(device, REG_CP_INT_STATUS, &status);
+		adreno_regread(device, REG_MASTER_INT_SIGNAL,
+					&master_status);
+		num_reads++;
+	}
+	if (num_reads > 1)
+		KGSL_DRV_WARN(device,
+			"Looped %d times to read REG_CP_INT_STATUS\n",
+			num_reads);
+	if (!status) {
+		if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) {
+			/* This indicates that we could not read CP_INT_STAT.
+			 * As a precaution just wake up processes so
+			 * they can check their timestamps. Since, we
+			 * did not ack any interrupts this interrupt will
+			 * be generated again */
+			KGSL_DRV_WARN(device, "Unable to read CP_INT_STATUS\n");
+			wake_up_interruptible_all(&device->wait_queue);
+		} else
+			KGSL_DRV_WARN(device, "Spurious interrput detected\n");
+		return;
+	}
+
+	if (status & CP_INT_CNTL__RB_INT_MASK) {
+		/* signal intr completion event */
+		unsigned int enableflag = 0;
+		kgsl_sharedmem_writel(&rb->device->memstore,
+			KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable),
+			enableflag);
+		wmb();
+		KGSL_CMD_WARN(rb->device, "ringbuffer rb interrupt\n");
+	}
+
+	if (status & CP_INT_CNTL__T0_PACKET_IN_IB_MASK) {
+		KGSL_CMD_CRIT(rb->device,
+			"ringbuffer TO packet in IB interrupt\n");
+		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+	}
+	if (status & CP_INT_CNTL__OPCODE_ERROR_MASK) {
+		KGSL_CMD_CRIT(rb->device,
+			"ringbuffer opcode error interrupt\n");
+		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+	}
+	if (status & CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK) {
+		KGSL_CMD_CRIT(rb->device,
+			"ringbuffer protected mode error interrupt\n");
+		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+	}
+	if (status & CP_INT_CNTL__RESERVED_BIT_ERROR_MASK) {
+		KGSL_CMD_CRIT(rb->device,
+			"ringbuffer reserved bit error interrupt\n");
+		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+	}
+	if (status & CP_INT_CNTL__IB_ERROR_MASK) {
+		KGSL_CMD_CRIT(rb->device,
+			"ringbuffer IB error interrupt\n");
+		kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+	}
+	if (status & CP_INT_CNTL__SW_INT_MASK)
+		KGSL_CMD_INFO(rb->device, "ringbuffer software interrupt\n");
+
+	if (status & CP_INT_CNTL__IB2_INT_MASK)
+		KGSL_CMD_INFO(rb->device, "ringbuffer ib2 interrupt\n");
+
+	if (status & (~KGSL_CP_INT_MASK))
+		KGSL_CMD_WARN(rb->device,
+			"bad bits in REG_CP_INT_STATUS %08x\n", status);
+
+	/* only ack bits we understand */
+	status &= KGSL_CP_INT_MASK;
+	adreno_regwrite(device, REG_CP_INT_ACK, status);
+
+	if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) {
+		KGSL_CMD_WARN(rb->device, "ringbuffer ib1/rb interrupt\n");
+		wake_up_interruptible_all(&device->wait_queue);
+		atomic_notifier_call_chain(&(device->ts_notifier_list),
+					   device->id,
+					   NULL);
+	}
+}
+
+static void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
+{
+	BUG_ON(rb->wptr == 0);
+
+	/*synchronize memory before informing the hardware of the
+	 *new commands.
+	 */
+	mb();
+
+	adreno_regwrite(rb->device, REG_CP_RB_WPTR, rb->wptr);
+}
+
+static int
+adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds,
+			  int wptr_ahead)
+{
+	int nopcount;
+	unsigned int freecmds;
+	unsigned int *cmds;
+	uint cmds_gpu;
+
+	/* if wptr ahead, fill the remaining with NOPs */
+	if (wptr_ahead) {
+		/* -1 for header */
+		nopcount = rb->sizedwords - rb->wptr - 1;
+
+		cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
+		cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*rb->wptr;
+
+		GSL_RB_WRITE(cmds, cmds_gpu, pm4_nop_packet(nopcount));
+
+		/* Make sure that rptr is not 0 before submitting
+		 * commands at the end of ringbuffer. We do not
+		 * want the rptr and wptr to become equal when
+		 * the ringbuffer is not empty */
+		do {
+			GSL_RB_GET_READPTR(rb, &rb->rptr);
+		} while (!rb->rptr);
+
+		rb->wptr++;
+
+		adreno_ringbuffer_submit(rb);
+
+		rb->wptr = 0;
+	}
+
+	/* wait for space in ringbuffer */
+	do {
+		GSL_RB_GET_READPTR(rb, &rb->rptr);
+
+		freecmds = rb->rptr - rb->wptr;
+
+	} while ((freecmds != 0) && (freecmds <= numcmds));
+
+	return 0;
+}
+
+
+static unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
+					     unsigned int numcmds)
+{
+	unsigned int	*ptr = NULL;
+	int				status = 0;
+
+	BUG_ON(numcmds >= rb->sizedwords);
+
+	GSL_RB_GET_READPTR(rb, &rb->rptr);
+	/* check for available space */
+	if (rb->wptr >= rb->rptr) {
+		/* wptr ahead or equal to rptr */
+		/* reserve dwords for nop packet */
+		if ((rb->wptr + numcmds) > (rb->sizedwords -
+				GSL_RB_NOP_SIZEDWORDS))
+			status = adreno_ringbuffer_waitspace(rb, numcmds, 1);
+	} else {
+		/* wptr behind rptr */
+		if ((rb->wptr + numcmds) >= rb->rptr)
+			status  = adreno_ringbuffer_waitspace(rb, numcmds, 0);
+		/* check for remaining space */
+		/* reserve dwords for nop packet */
+		if ((rb->wptr + numcmds) > (rb->sizedwords -
+				GSL_RB_NOP_SIZEDWORDS))
+			status = adreno_ringbuffer_waitspace(rb, numcmds, 1);
+	}
+
+	if (status == 0) {
+		ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
+		rb->wptr += numcmds;
+	}
+
+	return ptr;
+}
+
+static int _load_firmware(struct kgsl_device *device, const char *fwfile,
+			  void **data, int *len)
+{
+	const struct firmware *fw = NULL;
+	int ret;
+
+	ret = request_firmware(&fw, fwfile, device->dev);
+
+	if (ret) {
+		KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n",
+			     fwfile, ret);
+		return ret;
+	}
+
+	*data = kmalloc(fw->size, GFP_KERNEL);
+
+	if (*data) {
+		memcpy(*data, fw->data, fw->size);
+		*len = fw->size;
+	} else
+		KGSL_MEM_ERR(device, "kmalloc(%d) failed\n", fw->size);
+
+	release_firmware(fw);
+	return (*data != NULL) ? 0 : -ENOMEM;
+}
+
+static int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	const char *fwfile;
+	int i, ret = 0;
+
+	if (adreno_is_a220(adreno_dev)) {
+		fwfile =  A220_PM4_470_FW;
+	} else if (adreno_is_a225(adreno_dev)) {
+		fwfile =  A225_PM4_FW;
+	} else if (adreno_is_a20x(adreno_dev)) {
+		fwfile =  A200_PM4_FW;
+	} else {
+		KGSL_DRV_ERR(device, "Could not load PM4 file\n");
+		return -EINVAL;
+	}
+
+	if (adreno_dev->pm4_fw == NULL) {
+		int len;
+		unsigned int *ptr;
+
+		ret = _load_firmware(device, fwfile, (void *) &ptr, &len);
+		if (ret)
+			goto err;
+
+		/* PM4 size is 3 dword aligned plus 1 dword of version */
+		if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) {
+			KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		adreno_dev->pm4_fw_size = len / sizeof(uint32_t);
+		adreno_dev->pm4_fw = ptr;
+	}
+
+	KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n",
+		adreno_dev->pm4_fw[0]);
+
+	adreno_regwrite(device, REG_CP_DEBUG, 0x02000000);
+	adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0);
+	for (i = 1; i < adreno_dev->pm4_fw_size; i++)
+		adreno_regwrite(device, REG_CP_ME_RAM_DATA,
+				     adreno_dev->pm4_fw[i]);
+err:
+	return ret;
+}
+
+static int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	const char *fwfile;
+	int i, ret = 0;
+
+	if (adreno_is_a220(adreno_dev)) {
+		fwfile =  A220_PFP_470_FW;
+	} else if (adreno_is_a225(adreno_dev)) {
+		fwfile =  A225_PFP_FW;
+	} else if (adreno_is_a20x(adreno_dev)) {
+		fwfile = A200_PFP_FW;
+	} else {
+		KGSL_DRV_ERR(device, "Could not load PFP firmware\n");
+		return -EINVAL;
+	}
+
+	if (adreno_dev->pfp_fw == NULL) {
+		int len;
+		unsigned int *ptr;
+
+		ret = _load_firmware(device, fwfile, (void *) &ptr, &len);
+		if (ret)
+			goto err;
+
+		/* PFP size shold be dword aligned */
+		if (len % sizeof(uint32_t) != 0) {
+			KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		adreno_dev->pfp_fw_size = len / sizeof(uint32_t);
+		adreno_dev->pfp_fw = ptr;
+	}
+
+	KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n",
+		adreno_dev->pfp_fw[0]);
+
+	adreno_regwrite(device, REG_CP_PFP_UCODE_ADDR, 0);
+	for (i = 1; i < adreno_dev->pfp_fw_size; i++)
+		adreno_regwrite(device, REG_CP_PFP_UCODE_DATA,
+				     adreno_dev->pfp_fw[i]);
+err:
+	return ret;
+}
+
+int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
+{
+	int status;
+	/*cp_rb_cntl_u cp_rb_cntl; */
+	union reg_cp_rb_cntl cp_rb_cntl;
+	unsigned int *cmds, rb_cntl;
+	struct kgsl_device *device = rb->device;
+	uint cmds_gpu;
+
+	if (rb->flags & KGSL_FLAGS_STARTED)
+		return 0;
+
+	if (init_ram) {
+		rb->timestamp = 0;
+		GSL_RB_INIT_TIMESTAMP(rb);
+	}
+
+	kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
+			   sizeof(struct kgsl_rbmemptrs));
+
+	kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
+			   (rb->sizedwords << 2));
+
+	adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
+			     (rb->memptrs_desc.gpuaddr
+			      + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
+
+	/* setup WPTR delay */
+	adreno_regwrite(device, REG_CP_RB_WPTR_DELAY, 0 /*0x70000010 */);
+
+	/*setup REG_CP_RB_CNTL */
+	adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
+	cp_rb_cntl.val = rb_cntl;
+
+	/*
+	 * The size of the ringbuffer in the hardware is the log2
+	 * representation of the size in quadwords (sizedwords / 2)
+	 */
+	cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);
+
+	/*
+	 * Specify the quadwords to read before updating mem RPTR.
+	 * Like above, pass the log2 representation of the blocksize
+	 * in quadwords.
+	*/
+	cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);
+
+	cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN; /* WPTR polling */
+	/* mem RPTR writebacks */
+	cp_rb_cntl.f.rb_no_update =  GSL_RB_CNTL_NO_UPDATE;
+
+	adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);
+
+	adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);
+
+	adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
+			     rb->memptrs_desc.gpuaddr +
+			     GSL_RB_MEMPTRS_RPTR_OFFSET);
+
+	/* explicitly clear all cp interrupts */
+	adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
+
+	/* setup scratch/timestamp */
+	adreno_regwrite(device, REG_SCRATCH_ADDR,
+			     device->memstore.gpuaddr +
+			     KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp));
+
+	adreno_regwrite(device, REG_SCRATCH_UMSK,
+			     GSL_RB_MEMPTRS_SCRATCH_MASK);
+
+	/* load the CP ucode */
+
+	status = adreno_ringbuffer_load_pm4_ucode(device);
+	if (status != 0)
+		return status;
+
+	/* load the prefetch parser ucode */
+	status = adreno_ringbuffer_load_pfp_ucode(device);
+	if (status != 0)
+		return status;
+
+	adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000C0804);
+
+	rb->rptr = 0;
+	rb->wptr = 0;
+
+	/* clear ME_HALT to start micro engine */
+	adreno_regwrite(device, REG_CP_ME_CNTL, 0);
+
+	/* ME_INIT */
+	cmds = adreno_ringbuffer_allocspace(rb, 19);
+	cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19);
+
+	GSL_RB_WRITE(cmds, cmds_gpu, PM4_HDR_ME_INIT);
+	/* All fields present (bits 9:0) */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x000003ff);
+	/* Disable/Enable Real-Time Stream processing (present but ignored) */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+	/* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		GSL_HAL_SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO));
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET));
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		GSL_HAL_SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX));
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		GSL_HAL_SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL));
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		GSL_HAL_SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL));
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE));
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL));
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE));
+
+	/* Vertex and Pixel Shader Start Addresses in instructions
+	* (3 DWORDS per instruction) */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x80000180);
+	/* Maximum Contexts */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000001);
+	/* Write Confirm Interval and The CP will wait the
+	* wait_interval * 16 clocks between polling  */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+
+	/* NQ and External Memory Swap */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+	/* Protected mode error checking */
+	GSL_RB_WRITE(cmds, cmds_gpu, GSL_RB_PROTECTED_MODE_CONTROL);
+	/* Disable header dumping and Header dump address */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+	/* Header dump size */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+
+	adreno_ringbuffer_submit(rb);
+
+	/* idle device to validate ME INIT */
+	status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
+
+	if (status == 0)
+		rb->flags |= KGSL_FLAGS_STARTED;
+
+	return status;
+}
+
+int adreno_ringbuffer_stop(struct adreno_ringbuffer *rb)
+{
+	if (rb->flags & KGSL_FLAGS_STARTED) {
+		/* ME_HALT */
+		adreno_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000);
+
+		rb->flags &= ~KGSL_FLAGS_STARTED;
+	}
+
+	return 0;
+}
+
+int adreno_ringbuffer_init(struct kgsl_device *device)
+{
+	int status;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+	rb->device = device;
+	/*
+	 * It is silly to convert this to words and then back to bytes
+	 * immediately below, but most of the rest of the code deals
+	 * in words, so we might as well only do the math once
+	 */
+	rb->sizedwords = KGSL_RB_SIZE >> 2;
+
+	/* allocate memory for ringbuffer */
+	status = kgsl_allocate_contiguous(&rb->buffer_desc,
+		(rb->sizedwords << 2));
+
+	if (status != 0) {
+		adreno_ringbuffer_close(rb);
+		return status;
+	}
+
+	/* allocate memory for polling and timestamps */
+	/* This really can be at 4 byte alignment boundry but for using MMU
+	 * we need to make it at page boundary */
+	status = kgsl_allocate_contiguous(&rb->memptrs_desc,
+		sizeof(struct kgsl_rbmemptrs));
+
+	if (status != 0) {
+		adreno_ringbuffer_close(rb);
+		return status;
+	}
+
+	/* overlay structure on memptrs memory */
+	rb->memptrs = (struct kgsl_rbmemptrs *) rb->memptrs_desc.hostptr;
+
+	return 0;
+}
+
+int adreno_ringbuffer_close(struct adreno_ringbuffer *rb)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
+
+	kgsl_sharedmem_free(&rb->buffer_desc);
+	kgsl_sharedmem_free(&rb->memptrs_desc);
+
+	kfree(adreno_dev->pfp_fw);
+	kfree(adreno_dev->pm4_fw);
+
+	adreno_dev->pfp_fw = NULL;
+	adreno_dev->pm4_fw = NULL;
+
+	memset(rb, 0, sizeof(struct adreno_ringbuffer));
+
+	return 0;
+}
+
+static uint32_t
+adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
+				unsigned int flags, unsigned int *cmds,
+				int sizedwords)
+{
+	unsigned int *ringcmds;
+	unsigned int timestamp;
+	unsigned int total_sizedwords = sizedwords + 6;
+	unsigned int i;
+	unsigned int rcmd_gpu;
+
+	/* reserve space to temporarily turn off protected mode
+	*  error checking if needed
+	*/
+	total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
+	total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0;
+	total_sizedwords += !(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD) ? 2 : 0;
+
+	ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
+	rcmd_gpu = rb->buffer_desc.gpuaddr
+		+ sizeof(uint)*(rb->wptr-total_sizedwords);
+
+	if (!(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD)) {
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_nop_packet(1));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
+	}
+	if (flags & KGSL_CMD_FLAGS_PMODE) {
+		/* disable protected mode error checking */
+		GSL_RB_WRITE(ringcmds, rcmd_gpu,
+			pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
+	}
+
+	for (i = 0; i < sizedwords; i++) {
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
+		cmds++;
+	}
+
+	if (flags & KGSL_CMD_FLAGS_PMODE) {
+		/* re-enable protected mode error checking */
+		GSL_RB_WRITE(ringcmds, rcmd_gpu,
+			pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
+	}
+
+	rb->timestamp++;
+	timestamp = rb->timestamp;
+
+	/* start-of-pipeline and end-of-pipeline timestamps */
+	GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_type0_packet(REG_CP_TIMESTAMP, 1));
+	GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
+	GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_type3_packet(PM4_EVENT_WRITE, 3));
+	GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
+	GSL_RB_WRITE(ringcmds, rcmd_gpu,
+		     (rb->device->memstore.gpuaddr +
+		      KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)));
+	GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
+
+	if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
+		/* Conditional execution based on memory values */
+		GSL_RB_WRITE(ringcmds, rcmd_gpu,
+			pm4_type3_packet(PM4_COND_EXEC, 4));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
+			KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)) >> 2);
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
+			KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)) >> 2);
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
+		/* # of conditional command DWORDs */
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
+		GSL_RB_WRITE(ringcmds, rcmd_gpu,
+			pm4_type3_packet(PM4_INTERRUPT, 1));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
+	}
+
+	adreno_ringbuffer_submit(rb);
+
+	/* return timestamp of issued coREG_ands */
+	return timestamp;
+}
+
+void
+adreno_ringbuffer_issuecmds(struct kgsl_device *device,
+						unsigned int flags,
+						unsigned int *cmds,
+						int sizedwords)
+{
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+	if (device->state & KGSL_STATE_HUNG)
+		return;
+	adreno_ringbuffer_addcmds(rb, flags, cmds, sizedwords);
+}
+
+int
+adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
+				struct kgsl_context *context,
+				struct kgsl_ibdesc *ibdesc,
+				unsigned int numibs,
+				uint32_t *timestamp,
+				unsigned int flags)
+{
+	struct kgsl_device *device = dev_priv->device;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+	unsigned int *link;
+	unsigned int *cmds;
+	unsigned int i;
+	struct adreno_context *drawctxt = context->devctxt;
+
+	if (device->state & KGSL_STATE_HUNG)
+		return -EBUSY;
+	if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED) ||
+	      context == NULL)
+		return -EINVAL;
+
+	BUG_ON(ibdesc == 0);
+	BUG_ON(numibs == 0);
+
+	if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
+		KGSL_CTXT_WARN(device, "Context %p caused a gpu hang.."
+			" will not accept commands for this context\n",
+			drawctxt);
+		return -EDEADLK;
+	}
+	link = kzalloc(sizeof(unsigned int) * numibs * 3, GFP_KERNEL);
+	cmds = link;
+	if (!link) {
+		KGSL_MEM_ERR(device, "Failed to allocate memory for for command"
+			" submission, size %x\n", numibs * 3);
+		return -ENOMEM;
+	}
+	for (i = 0; i < numibs; i++) {
+		(void)kgsl_cffdump_parse_ibs(dev_priv, NULL,
+			ibdesc[i].gpuaddr, ibdesc[i].sizedwords, false);
+
+		*cmds++ = PM4_HDR_INDIRECT_BUFFER_PFD;
+		*cmds++ = ibdesc[i].gpuaddr;
+		*cmds++ = ibdesc[i].sizedwords;
+	}
+
+	kgsl_setstate(device,
+		      kgsl_pt_get_flags(device->mmu.hwpagetable,
+					device->id));
+
+	adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
+
+	*timestamp = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
+					KGSL_CMD_FLAGS_NOT_KERNEL_CMD,
+					&link[0], (cmds - link));
+
+	KGSL_CMD_INFO(device, "ctxt %d g %08x numibs %d ts %d\n",
+		context->id, (unsigned int)ibdesc, numibs, *timestamp);
+
+	kfree(link);
+
+#ifdef CONFIG_MSM_KGSL_CFF_DUMP
+	/*
+	 * insert wait for idle after every IB1
+	 * this is conservative but works reliably and is ok
+	 * even for performance simulations
+	 */
+	adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
+#endif
+
+	return 0;
+}
+
+int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
+				unsigned int *temp_rb_buffer,
+				int *rb_size)
+{
+	struct kgsl_device *device = rb->device;
+	unsigned int rb_rptr;
+	unsigned int retired_timestamp;
+	unsigned int temp_idx = 0;
+	unsigned int value;
+	unsigned int val1;
+	unsigned int val2;
+	unsigned int val3;
+	unsigned int copy_rb_contents = 0;
+	unsigned int cur_context;
+	unsigned int j;
+
+	GSL_RB_GET_READPTR(rb, &rb->rptr);
+
+	retired_timestamp = device->ftbl->readtimestamp(device,
+		KGSL_TIMESTAMP_RETIRED);
+	KGSL_DRV_ERR(device, "GPU successfully executed till ts: %x\n",
+			retired_timestamp);
+	/*
+	 * We need to go back in history by 4 dwords from the current location
+	 * of read pointer as 4 dwords are read to match the end of a command.
+	 * Also, take care of wrap around when moving back
+	 */
+	if (rb->rptr >= 4)
+		rb_rptr = (rb->rptr - 4) * sizeof(unsigned int);
+	else
+		rb_rptr = rb->buffer_desc.size -
+			((4 - rb->rptr) * sizeof(unsigned int));
+	/* Read the rb contents going backwards to locate end of last
+	 * sucessfully executed command */
+	while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
+		kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
+		if (value == retired_timestamp) {
+			rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
+							rb->buffer_desc.size);
+			kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
+			rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
+							rb->buffer_desc.size);
+			kgsl_sharedmem_readl(&rb->buffer_desc, &val2, rb_rptr);
+			rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
+							rb->buffer_desc.size);
+			kgsl_sharedmem_readl(&rb->buffer_desc, &val3, rb_rptr);
+			/* match the pattern found at the end of a command */
+			if ((val1 == 2 &&
+				val2 == pm4_type3_packet(PM4_INTERRUPT, 1)
+				&& val3 == CP_INT_CNTL__RB_INT_MASK) ||
+				(val1 == pm4_type3_packet(PM4_EVENT_WRITE, 3)
+				&& val2 == CACHE_FLUSH_TS &&
+				val3 == (rb->device->memstore.gpuaddr +
+				KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)))) {
+				rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
+							rb->buffer_desc.size);
+				KGSL_DRV_ERR(device,
+					"Found end of last executed "
+					"command at offset: %x\n",
+					rb_rptr / sizeof(unsigned int));
+				break;
+			} else {
+				if (rb_rptr < (3 * sizeof(unsigned int)))
+					rb_rptr = rb->buffer_desc.size -
+						(3 * sizeof(unsigned int))
+							+ rb_rptr;
+				else
+					rb_rptr -= (3 * sizeof(unsigned int));
+			}
+		}
+
+		if (rb_rptr == 0)
+			rb_rptr = rb->buffer_desc.size - sizeof(unsigned int);
+		else
+			rb_rptr -= sizeof(unsigned int);
+	}
+
+	if ((rb_rptr / sizeof(unsigned int)) == rb->wptr) {
+		KGSL_DRV_ERR(device,
+			"GPU recovery from hang not possible because last"
+			" successful timestamp is overwritten\n");
+		return -EINVAL;
+	}
+	/* rb_rptr is now pointing to the first dword of the command following
+	 * the last sucessfully executed command sequence. Assumption is that
+	 * GPU is hung in the command sequence pointed by rb_rptr */
+	/* make sure the GPU is not hung in a command submitted by kgsl
+	 * itself */
+	kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
+	kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
+				adreno_ringbuffer_inc_wrapped(rb_rptr,
+							rb->buffer_desc.size));
+	if (val1 == pm4_nop_packet(1) && val2 == KGSL_CMD_IDENTIFIER) {
+		KGSL_DRV_ERR(device,
+			"GPU recovery from hang not possible because "
+			"of hang in kgsl command\n");
+		return -EINVAL;
+	}
+
+	/* current_context is the context that is presently active in the
+	 * GPU, i.e the context in which the hang is caused */
+	kgsl_sharedmem_readl(&device->memstore, &cur_context,
+		KGSL_DEVICE_MEMSTORE_OFFSET(current_context));
+	while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
+		kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
+		rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
+						rb->buffer_desc.size);
+		/* check for context switch indicator */
+		if (value == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
+			kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
+			rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
+							rb->buffer_desc.size);
+			BUG_ON(value != pm4_type3_packet(PM4_MEM_WRITE, 2));
+			kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
+			rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
+							rb->buffer_desc.size);
+			BUG_ON(val1 != (device->memstore.gpuaddr +
+				KGSL_DEVICE_MEMSTORE_OFFSET(current_context)));
+			kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
+			rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
+							rb->buffer_desc.size);
+			BUG_ON((copy_rb_contents == 0) &&
+				(value == cur_context));
+			/*
+			 * If we were copying the commands and got to this point
+			 * then we need to remove the 3 commands that appear
+			 * before KGSL_CONTEXT_TO_MEM_IDENTIFIER
+			 */
+			if (temp_idx)
+				temp_idx -= 3;
+			/* if context switches to a context that did not cause
+			 * hang then start saving the rb contents as those
+			 * commands can be executed */
+			if (value != cur_context) {
+				copy_rb_contents = 1;
+				temp_rb_buffer[temp_idx++] = pm4_nop_packet(1);
+				temp_rb_buffer[temp_idx++] =
+						KGSL_CMD_IDENTIFIER;
+				temp_rb_buffer[temp_idx++] = pm4_nop_packet(1);
+				temp_rb_buffer[temp_idx++] =
+						KGSL_CONTEXT_TO_MEM_IDENTIFIER;
+				temp_rb_buffer[temp_idx++] =
+					pm4_type3_packet(PM4_MEM_WRITE, 2);
+				temp_rb_buffer[temp_idx++] = val1;
+				temp_rb_buffer[temp_idx++] = value;
+			} else {
+				copy_rb_contents = 0;
+			}
+		} else if (copy_rb_contents)
+			temp_rb_buffer[temp_idx++] = value;
+	}
+
+	*rb_size = temp_idx;
+	KGSL_DRV_ERR(device, "Extracted rb contents, size: %x\n", *rb_size);
+	for (temp_idx = 0; temp_idx < *rb_size;) {
+		char str[80];
+		int idx = 0;
+		if ((temp_idx + 8) <= *rb_size)
+			j = 8;
+		else
+			j = *rb_size - temp_idx;
+		for (; j != 0; j--)
+			idx += scnprintf(str + idx, 80 - idx,
+				"%8.8X ", temp_rb_buffer[temp_idx++]);
+		printk(KERN_ALERT "%s", str);
+	}
+	return 0;
+}
+
+void
+adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
+			int num_rb_contents)
+{
+	int i;
+	unsigned int *ringcmds;
+	unsigned int rcmd_gpu;
+
+	if (!num_rb_contents)
+		return;
+
+	if (num_rb_contents > (rb->buffer_desc.size - rb->wptr)) {
+		adreno_regwrite(rb->device, REG_CP_RB_RPTR, 0);
+		rb->rptr = 0;
+		BUG_ON(num_rb_contents > rb->buffer_desc.size);
+	}
+	ringcmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
+	rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(unsigned int) * rb->wptr;
+	for (i = 0; i < num_rb_contents; i++)
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, rb_buff[i]);
+	rb->wptr += num_rb_contents;
+	adreno_ringbuffer_submit(rb);
+}
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
new file mode 100644
index 0000000..04432fe
--- /dev/null
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -0,0 +1,156 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ADRENO_RINGBUFFER_H
+#define __ADRENO_RINGBUFFER_H
+
+#define GSL_RB_USE_MEM_RPTR
+#define GSL_RB_USE_MEM_TIMESTAMP
+#define GSL_DEVICE_SHADOW_MEMSTORE_TO_USER
+
+/*
+ * Adreno ringbuffer sizes in bytes - these are converted to
+ * the appropriate log2 values in the code
+ */
+
+#define KGSL_RB_SIZE (32 * 1024)
+#define KGSL_RB_BLKSIZE 16
+
+/* CP timestamp register */
+#define	REG_CP_TIMESTAMP		 REG_SCRATCH_REG0
+
+
+struct kgsl_device;
+struct kgsl_device_private;
+
+#define GSL_RB_MEMPTRS_SCRATCH_COUNT	 8
+struct kgsl_rbmemptrs {
+	int  rptr;
+	int  wptr_poll;
+};
+
+#define GSL_RB_MEMPTRS_RPTR_OFFSET \
+	(offsetof(struct kgsl_rbmemptrs, rptr))
+
+#define GSL_RB_MEMPTRS_WPTRPOLL_OFFSET \
+	(offsetof(struct kgsl_rbmemptrs, wptr_poll))
+
+struct adreno_ringbuffer {
+	struct kgsl_device *device;
+	uint32_t flags;
+
+	struct kgsl_memdesc buffer_desc;
+
+	struct kgsl_memdesc memptrs_desc;
+	struct kgsl_rbmemptrs *memptrs;
+
+	/*ringbuffer size */
+	unsigned int sizedwords;
+
+	unsigned int wptr; /* write pointer offset in dwords from baseaddr */
+	unsigned int rptr; /* read pointer offset in dwords from baseaddr */
+	uint32_t timestamp;
+};
+
+/* dword base address of the GFX decode space */
+#define GSL_HAL_SUBBLOCK_OFFSET(reg) ((unsigned int)((reg) - (0x2000)))
+
+#define GSL_RB_WRITE(ring, gpuaddr, data) \
+	do { \
+		writel_relaxed(data, ring); \
+		wmb(); \
+		kgsl_cffdump_setmem(gpuaddr, data, 4); \
+		ring++; \
+		gpuaddr += sizeof(uint); \
+	} while (0)
+
+/* timestamp */
+#ifdef GSL_DEVICE_SHADOW_MEMSTORE_TO_USER
+#define GSL_RB_USE_MEM_TIMESTAMP
+#endif /* GSL_DEVICE_SHADOW_MEMSTORE_TO_USER */
+
+#ifdef GSL_RB_USE_MEM_TIMESTAMP
+/* enable timestamp (...scratch0) memory shadowing */
+#define GSL_RB_MEMPTRS_SCRATCH_MASK 0x1
+#define GSL_RB_INIT_TIMESTAMP(rb)
+
+#else
+#define GSL_RB_MEMPTRS_SCRATCH_MASK 0x0
+#define GSL_RB_INIT_TIMESTAMP(rb) \
+		adreno_regwrite((rb)->device->id, REG_CP_TIMESTAMP, 0)
+
+#endif /* GSL_RB_USE_MEMTIMESTAMP */
+
+/* mem rptr */
+#ifdef GSL_RB_USE_MEM_RPTR
+#define GSL_RB_CNTL_NO_UPDATE 0x0 /* enable */
+#define GSL_RB_GET_READPTR(rb, data) \
+	do { \
+		*(data) = readl_relaxed(&(rb)->memptrs->rptr); \
+	} while (0)
+#else
+#define GSL_RB_CNTL_NO_UPDATE 0x1 /* disable */
+#define GSL_RB_GET_READPTR(rb, data) \
+	do { \
+		adreno_regread((rb)->device->id, REG_CP_RB_RPTR, (data)); \
+	} while (0)
+#endif /* GSL_RB_USE_MEMRPTR */
+
+#define GSL_RB_CNTL_POLL_EN 0x0 /* disable */
+
+int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
+				struct kgsl_context *context,
+				struct kgsl_ibdesc *ibdesc,
+				unsigned int numibs,
+				uint32_t *timestamp,
+				unsigned int flags);
+
+int adreno_ringbuffer_init(struct kgsl_device *device);
+
+int adreno_ringbuffer_start(struct adreno_ringbuffer *rb,
+				unsigned int init_ram);
+
+int adreno_ringbuffer_stop(struct adreno_ringbuffer *rb);
+
+int adreno_ringbuffer_close(struct adreno_ringbuffer *rb);
+
+void adreno_ringbuffer_issuecmds(struct kgsl_device *device,
+					unsigned int flags,
+					unsigned int *cmdaddr,
+					int sizedwords);
+
+void kgsl_cp_intrcallback(struct kgsl_device *device);
+
+int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
+				unsigned int *temp_rb_buffer,
+				int *rb_size);
+
+void
+adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
+			int num_rb_contents);
+
+static inline int adreno_ringbuffer_count(struct adreno_ringbuffer *rb,
+	unsigned int rptr)
+{
+	if (rb->wptr >= rptr)
+		return rb->wptr - rptr;
+	return rb->wptr + rb->sizedwords - rptr;
+}
+
+/* Increment a value by 4 bytes with wrap-around based on size */
+static inline unsigned int adreno_ringbuffer_inc_wrapped(unsigned int val,
+							unsigned int size)
+{
+	return (val + sizeof(unsigned int)) % size;
+}
+
+#endif  /* __ADRENO_RINGBUFFER_H */
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
new file mode 100644
index 0000000..fef664b
--- /dev/null
+++ b/drivers/gpu/msm/kgsl.c
@@ -0,0 +1,2119 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/fb.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/android_pmem.h>
+#include <linux/vmalloc.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/ashmem.h>
+#include <linux/major.h>
+
+#include "kgsl.h"
+#include "kgsl_debugfs.h"
+#include "kgsl_cffdump.h"
+#include "kgsl_log.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_device.h"
+
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "kgsl."
+
+static int kgsl_pagetable_count = KGSL_PAGETABLE_COUNT;
+module_param_named(ptcount, kgsl_pagetable_count, int, 0);
+MODULE_PARM_DESC(kgsl_pagetable_count,
+"Minimum number of pagetables for KGSL to allocate at initialization time");
+
+static inline struct kgsl_mem_entry *
+kgsl_mem_entry_create(void)
+{
+	struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+
+	if (!entry)
+		KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*entry));
+	else
+		kref_init(&entry->refcount);
+
+	return entry;
+}
+
+void
+kgsl_mem_entry_destroy(struct kref *kref)
+{
+	struct kgsl_mem_entry *entry = container_of(kref,
+						    struct kgsl_mem_entry,
+						    refcount);
+	size_t size = entry->memdesc.size;
+
+	kgsl_sharedmem_free(&entry->memdesc);
+
+	if (entry->memtype == KGSL_USER_MEMORY)
+		entry->priv->stats.user -= size;
+	else if (entry->memtype == KGSL_MAPPED_MEMORY) {
+		if (entry->file_ptr)
+			fput(entry->file_ptr);
+
+		kgsl_driver.stats.mapped -= size;
+		entry->priv->stats.mapped -= size;
+	}
+
+	kfree(entry);
+}
+EXPORT_SYMBOL(kgsl_mem_entry_destroy);
+
+static
+void kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
+				   struct kgsl_process_private *process)
+{
+	spin_lock(&process->mem_lock);
+	list_add(&entry->list, &process->mem_list);
+	spin_unlock(&process->mem_lock);
+
+	entry->priv = process;
+}
+
+/* Allocate a new context id */
+
+static struct kgsl_context *
+kgsl_create_context(struct kgsl_device_private *dev_priv)
+{
+	struct kgsl_context *context;
+	int ret, id;
+
+	context = kzalloc(sizeof(*context), GFP_KERNEL);
+
+	if (context == NULL)
+		return NULL;
+
+	while (1) {
+		if (idr_pre_get(&dev_priv->device->context_idr,
+				GFP_KERNEL) == 0) {
+			kfree(context);
+			return NULL;
+		}
+
+		ret = idr_get_new(&dev_priv->device->context_idr,
+				  context, &id);
+
+		if (ret != -EAGAIN)
+			break;
+	}
+
+	if (ret) {
+		kfree(context);
+		return NULL;
+	}
+
+	context->id = id;
+	context->dev_priv = dev_priv;
+
+	return context;
+}
+
+static void
+kgsl_destroy_context(struct kgsl_device_private *dev_priv,
+		     struct kgsl_context *context)
+{
+	int id;
+
+	if (context == NULL)
+		return;
+
+	/* Fire a bug if the devctxt hasn't been freed */
+	BUG_ON(context->devctxt);
+
+	id = context->id;
+	kfree(context);
+
+	idr_remove(&dev_priv->device->context_idr, id);
+}
+
+/* to be called when a process is destroyed, this walks the memqueue and
+ * frees any entryies that belong to the dying process
+ */
+static void kgsl_memqueue_cleanup(struct kgsl_device *device,
+				     struct kgsl_process_private *private)
+{
+	struct kgsl_mem_entry *entry, *entry_tmp;
+
+	if (!private)
+		return;
+
+	BUG_ON(!mutex_is_locked(&device->mutex));
+
+	list_for_each_entry_safe(entry, entry_tmp, &device->memqueue, list) {
+		if (entry->priv == private) {
+			list_del(&entry->list);
+			kgsl_mem_entry_put(entry);
+		}
+	}
+}
+
+static void kgsl_memqueue_freememontimestamp(struct kgsl_device *device,
+				  struct kgsl_mem_entry *entry,
+				  uint32_t timestamp,
+				  enum kgsl_timestamp_type type)
+{
+	BUG_ON(!mutex_is_locked(&device->mutex));
+
+	entry->free_timestamp = timestamp;
+
+	list_add_tail(&entry->list, &device->memqueue);
+}
+
+static void kgsl_memqueue_drain(struct kgsl_device *device)
+{
+	struct kgsl_mem_entry *entry, *entry_tmp;
+	uint32_t ts_processed;
+
+	BUG_ON(!mutex_is_locked(&device->mutex));
+
+	/* get current EOP timestamp */
+	ts_processed = device->ftbl->readtimestamp(device,
+		KGSL_TIMESTAMP_RETIRED);
+
+	list_for_each_entry_safe(entry, entry_tmp, &device->memqueue, list) {
+		KGSL_MEM_INFO(device,
+			"ts_processed %d ts_free %d gpuaddr %x)\n",
+			ts_processed, entry->free_timestamp,
+			entry->memdesc.gpuaddr);
+		if (!timestamp_cmp(ts_processed, entry->free_timestamp))
+			break;
+
+		list_del(&entry->list);
+		kgsl_mem_entry_put(entry);
+	}
+}
+
+static void kgsl_memqueue_drain_unlocked(struct kgsl_device *device)
+{
+	mutex_lock(&device->mutex);
+	kgsl_check_suspended(device);
+	kgsl_memqueue_drain(device);
+	mutex_unlock(&device->mutex);
+}
+
+static void kgsl_check_idle_locked(struct kgsl_device *device)
+{
+	if (device->pwrctrl.nap_allowed == true &&
+	    device->state == KGSL_STATE_ACTIVE &&
+		device->requested_state == KGSL_STATE_NONE) {
+		device->requested_state = KGSL_STATE_NAP;
+		if (kgsl_pwrctrl_sleep(device) != 0)
+			mod_timer(&device->idle_timer,
+				  jiffies +
+				  device->pwrctrl.interval_timeout);
+	}
+}
+
+static void kgsl_check_idle(struct kgsl_device *device)
+{
+	mutex_lock(&device->mutex);
+	kgsl_check_idle_locked(device);
+	mutex_unlock(&device->mutex);
+}
+
+struct kgsl_device *kgsl_get_device(int dev_idx)
+{
+	int i;
+	struct kgsl_device *ret = NULL;
+
+	mutex_lock(&kgsl_driver.devlock);
+
+	for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+		if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->id == dev_idx) {
+			ret = kgsl_driver.devp[i];
+			break;
+		}
+	}
+
+	mutex_unlock(&kgsl_driver.devlock);
+	return ret;
+}
+EXPORT_SYMBOL(kgsl_get_device);
+
+static struct kgsl_device *kgsl_get_minor(int minor)
+{
+	struct kgsl_device *ret = NULL;
+
+	if (minor < 0 || minor >= KGSL_DEVICE_MAX)
+		return NULL;
+
+	mutex_lock(&kgsl_driver.devlock);
+	ret = kgsl_driver.devp[minor];
+	mutex_unlock(&kgsl_driver.devlock);
+
+	return ret;
+}
+
+int kgsl_register_ts_notifier(struct kgsl_device *device,
+			      struct notifier_block *nb)
+{
+	BUG_ON(device == NULL);
+	return atomic_notifier_chain_register(&device->ts_notifier_list,
+					      nb);
+}
+EXPORT_SYMBOL(kgsl_register_ts_notifier);
+
+int kgsl_unregister_ts_notifier(struct kgsl_device *device,
+				struct notifier_block *nb)
+{
+	BUG_ON(device == NULL);
+	return atomic_notifier_chain_unregister(&device->ts_notifier_list,
+						nb);
+}
+EXPORT_SYMBOL(kgsl_unregister_ts_notifier);
+
+int kgsl_check_timestamp(struct kgsl_device *device, unsigned int timestamp)
+{
+	unsigned int ts_processed;
+
+	ts_processed = device->ftbl->readtimestamp(device,
+		KGSL_TIMESTAMP_RETIRED);
+
+	return timestamp_cmp(ts_processed, timestamp);
+}
+EXPORT_SYMBOL(kgsl_check_timestamp);
+
+static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state)
+{
+	int status = -EINVAL;
+	unsigned int nap_allowed_saved;
+	struct kgsl_pwrscale_policy *policy_saved;
+
+	if (!device)
+		return -EINVAL;
+
+	KGSL_PWR_WARN(device, "suspend start\n");
+
+	mutex_lock(&device->mutex);
+	nap_allowed_saved = device->pwrctrl.nap_allowed;
+	device->pwrctrl.nap_allowed = false;
+	policy_saved = device->pwrscale.policy;
+	device->pwrscale.policy = NULL;
+	device->requested_state = KGSL_STATE_SUSPEND;
+	/* Make sure no user process is waiting for a timestamp *
+	 * before supending */
+	if (device->active_cnt != 0) {
+		mutex_unlock(&device->mutex);
+		wait_for_completion(&device->suspend_gate);
+		mutex_lock(&device->mutex);
+	}
+	/* Don't let the timer wake us during suspended sleep. */
+	del_timer(&device->idle_timer);
+	switch (device->state) {
+		case KGSL_STATE_INIT:
+			break;
+		case KGSL_STATE_ACTIVE:
+			/* Wait for the device to become idle */
+			device->ftbl->idle(device, KGSL_TIMEOUT_DEFAULT);
+		case KGSL_STATE_NAP:
+		case KGSL_STATE_SLEEP:
+			/* Get the completion ready to be waited upon. */
+			INIT_COMPLETION(device->hwaccess_gate);
+			device->ftbl->suspend_context(device);
+			device->ftbl->stop(device);
+			device->state = KGSL_STATE_SUSPEND;
+			KGSL_PWR_WARN(device, "state -> SUSPEND, device %d\n",
+				device->id);
+			break;
+		default:
+			KGSL_PWR_ERR(device, "suspend fail, device %d\n",
+					device->id);
+			goto end;
+	}
+	device->requested_state = KGSL_STATE_NONE;
+	device->pwrctrl.nap_allowed = nap_allowed_saved;
+	device->pwrscale.policy = policy_saved;
+	status = 0;
+
+end:
+	mutex_unlock(&device->mutex);
+	KGSL_PWR_WARN(device, "suspend end\n");
+	return status;
+}
+
+static int kgsl_resume_device(struct kgsl_device *device)
+{
+	int status = -EINVAL;
+
+	if (!device)
+		return -EINVAL;
+
+	KGSL_PWR_WARN(device, "resume start\n");
+	mutex_lock(&device->mutex);
+	if (device->state == KGSL_STATE_SUSPEND) {
+		device->requested_state = KGSL_STATE_ACTIVE;
+		kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_NOMINAL);
+		status = device->ftbl->start(device, 0);
+		if (status == 0) {
+			device->state = KGSL_STATE_ACTIVE;
+			KGSL_PWR_WARN(device,
+					"state -> ACTIVE, device %d\n",
+					device->id);
+		} else {
+			KGSL_PWR_ERR(device,
+					"resume failed, device %d\n",
+					device->id);
+			device->state = KGSL_STATE_INIT;
+			goto end;
+		}
+		complete_all(&device->hwaccess_gate);
+	}
+	device->requested_state = KGSL_STATE_NONE;
+
+end:
+	mutex_unlock(&device->mutex);
+	kgsl_check_idle(device);
+	KGSL_PWR_WARN(device, "resume end\n");
+	return status;
+}
+
+static int kgsl_suspend(struct device *dev)
+{
+
+	pm_message_t arg = {0};
+	struct kgsl_device *device = dev_get_drvdata(dev);
+	return kgsl_suspend_device(device, arg);
+}
+
+static int kgsl_resume(struct device *dev)
+{
+	struct kgsl_device *device = dev_get_drvdata(dev);
+	return kgsl_resume_device(device);
+}
+
+static int kgsl_runtime_suspend(struct device *dev)
+{
+	return 0;
+}
+
+static int kgsl_runtime_resume(struct device *dev)
+{
+	return 0;
+}
+
+const struct dev_pm_ops kgsl_pm_ops = {
+	.suspend = kgsl_suspend,
+	.resume = kgsl_resume,
+	.runtime_suspend = kgsl_runtime_suspend,
+	.runtime_resume = kgsl_runtime_resume,
+};
+EXPORT_SYMBOL(kgsl_pm_ops);
+
+void kgsl_early_suspend_driver(struct early_suspend *h)
+{
+	struct kgsl_device *device = container_of(h,
+					struct kgsl_device, display_off);
+	kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_NOMINAL);
+}
+EXPORT_SYMBOL(kgsl_early_suspend_driver);
+
+int kgsl_suspend_driver(struct platform_device *pdev,
+					pm_message_t state)
+{
+	struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
+	return kgsl_suspend_device(device, state);
+}
+EXPORT_SYMBOL(kgsl_suspend_driver);
+
+int kgsl_resume_driver(struct platform_device *pdev)
+{
+	struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
+	return kgsl_resume_device(device);
+}
+EXPORT_SYMBOL(kgsl_resume_driver);
+
+void kgsl_late_resume_driver(struct early_suspend *h)
+{
+	struct kgsl_device *device = container_of(h,
+					struct kgsl_device, display_off);
+	kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO);
+}
+EXPORT_SYMBOL(kgsl_late_resume_driver);
+
+/* file operations */
+static struct kgsl_process_private *
+kgsl_get_process_private(struct kgsl_device_private *cur_dev_priv)
+{
+	struct kgsl_process_private *private;
+
+	mutex_lock(&kgsl_driver.process_mutex);
+	list_for_each_entry(private, &kgsl_driver.process_list, list) {
+		if (private->pid == task_tgid_nr(current)) {
+			private->refcnt++;
+			goto out;
+		}
+	}
+
+	/* no existing process private found for this dev_priv, create one */
+	private = kzalloc(sizeof(struct kgsl_process_private), GFP_KERNEL);
+	if (private == NULL) {
+		KGSL_DRV_ERR(cur_dev_priv->device, "kzalloc(%d) failed\n",
+			sizeof(struct kgsl_process_private));
+		goto out;
+	}
+
+	spin_lock_init(&private->mem_lock);
+	private->refcnt = 1;
+	private->pid = task_tgid_nr(current);
+
+	INIT_LIST_HEAD(&private->mem_list);
+
+#ifdef CONFIG_MSM_KGSL_MMU
+	{
+		unsigned long pt_name;
+
+#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
+		pt_name = task_tgid_nr(current);
+#else
+		pt_name = KGSL_MMU_GLOBAL_PT;
+#endif
+		private->pagetable = kgsl_mmu_getpagetable(pt_name);
+		if (private->pagetable == NULL) {
+			kfree(private);
+			private = NULL;
+			goto out;
+		}
+	}
+#endif
+
+	list_add(&private->list, &kgsl_driver.process_list);
+
+	kgsl_process_init_sysfs(private);
+
+out:
+	mutex_unlock(&kgsl_driver.process_mutex);
+	return private;
+}
+
+static void
+kgsl_put_process_private(struct kgsl_device *device,
+			 struct kgsl_process_private *private)
+{
+	struct kgsl_mem_entry *entry = NULL;
+	struct kgsl_mem_entry *entry_tmp = NULL;
+
+	if (!private)
+		return;
+
+	mutex_lock(&kgsl_driver.process_mutex);
+
+	if (--private->refcnt)
+		goto unlock;
+
+	KGSL_MEM_INFO(device,
+			"Memory usage: user (%d/%d) mapped (%d/%d)\n",
+			private->stats.user, private->stats.user_max,
+			private->stats.mapped, private->stats.mapped_max);
+
+	kgsl_process_uninit_sysfs(private);
+
+	list_del(&private->list);
+
+	list_for_each_entry_safe(entry, entry_tmp, &private->mem_list, list) {
+		list_del(&entry->list);
+		kgsl_mem_entry_put(entry);
+	}
+
+	kgsl_mmu_putpagetable(private->pagetable);
+	kfree(private);
+unlock:
+	mutex_unlock(&kgsl_driver.process_mutex);
+}
+
+static int kgsl_release(struct inode *inodep, struct file *filep)
+{
+	int result = 0;
+	struct kgsl_device_private *dev_priv = NULL;
+	struct kgsl_process_private *private = NULL;
+	struct kgsl_device *device;
+	struct kgsl_context *context;
+	int next = 0;
+
+	device = kgsl_driver.devp[iminor(inodep)];
+	BUG_ON(device == NULL);
+
+	dev_priv = (struct kgsl_device_private *) filep->private_data;
+	BUG_ON(dev_priv == NULL);
+	BUG_ON(device != dev_priv->device);
+	/* private could be null if kgsl_open is not successful */
+	private = dev_priv->process_priv;
+	filep->private_data = NULL;
+
+	mutex_lock(&device->mutex);
+	kgsl_check_suspended(device);
+
+	while (1) {
+		context = idr_get_next(&dev_priv->device->context_idr, &next);
+		if (context == NULL)
+			break;
+
+		if (context->dev_priv == dev_priv) {
+			device->ftbl->drawctxt_destroy(device, context);
+			kgsl_destroy_context(dev_priv, context);
+		}
+
+		next = next + 1;
+	}
+
+	device->open_count--;
+	if (device->open_count == 0) {
+		result = device->ftbl->stop(device);
+		device->state = KGSL_STATE_INIT;
+		KGSL_PWR_WARN(device, "state -> INIT, device %d\n", device->id);
+	}
+	/* clean up any to-be-freed entries that belong to this
+	 * process and this device
+	 */
+	kgsl_memqueue_cleanup(device, private);
+
+	mutex_unlock(&device->mutex);
+	kfree(dev_priv);
+
+	kgsl_put_process_private(device, private);
+
+	pm_runtime_put(device->parentdev);
+	return result;
+}
+
+static int kgsl_open(struct inode *inodep, struct file *filep)
+{
+	int result;
+	struct kgsl_device_private *dev_priv;
+	struct kgsl_device *device;
+	unsigned int minor = iminor(inodep);
+
+	device = kgsl_get_minor(minor);
+	BUG_ON(device == NULL);
+
+	if (filep->f_flags & O_EXCL) {
+		KGSL_DRV_ERR(device, "O_EXCL not allowed\n");
+		return -EBUSY;
+	}
+
+	result = pm_runtime_get_sync(device->parentdev);
+	if (result < 0) {
+		KGSL_DRV_ERR(device,
+			"Runtime PM: Unable to wake up the device, rc = %d\n",
+			result);
+		return result;
+	}
+	result = 0;
+
+	dev_priv = kzalloc(sizeof(struct kgsl_device_private), GFP_KERNEL);
+	if (dev_priv == NULL) {
+		KGSL_DRV_ERR(device, "kzalloc failed(%d)\n",
+			sizeof(struct kgsl_device_private));
+		result = -ENOMEM;
+		goto err_pmruntime;
+	}
+
+	dev_priv->device = device;
+	filep->private_data = dev_priv;
+
+	/* Get file (per process) private struct */
+	dev_priv->process_priv = kgsl_get_process_private(dev_priv);
+	if (dev_priv->process_priv ==  NULL) {
+		result = -ENOMEM;
+		goto err_freedevpriv;
+	}
+
+	mutex_lock(&device->mutex);
+	kgsl_check_suspended(device);
+
+	if (device->open_count == 0) {
+		result = device->ftbl->start(device, true);
+
+		if (result) {
+			mutex_unlock(&device->mutex);
+			goto err_putprocess;
+		}
+		device->state = KGSL_STATE_ACTIVE;
+		KGSL_PWR_WARN(device,
+				"state -> ACTIVE, device %d\n", minor);
+	}
+	device->open_count++;
+	mutex_unlock(&device->mutex);
+
+	KGSL_DRV_INFO(device, "Initialized %s: mmu=%s pagetable_count=%d\n",
+		device->name, kgsl_mmu_enabled() ? "on" : "off",
+		kgsl_pagetable_count);
+
+	return result;
+
+err_putprocess:
+	kgsl_put_process_private(device, dev_priv->process_priv);
+err_freedevpriv:
+	filep->private_data = NULL;
+	kfree(dev_priv);
+err_pmruntime:
+	pm_runtime_put(device->parentdev);
+	return result;
+}
+
+
+/*call with private->mem_lock locked */
+static struct kgsl_mem_entry *
+kgsl_sharedmem_find(struct kgsl_process_private *private, unsigned int gpuaddr)
+{
+	struct kgsl_mem_entry *entry = NULL, *result = NULL;
+
+	BUG_ON(private == NULL);
+
+	gpuaddr &= PAGE_MASK;
+
+	list_for_each_entry(entry, &private->mem_list, list) {
+		if (entry->memdesc.gpuaddr == gpuaddr) {
+			result = entry;
+			break;
+		}
+	}
+	return result;
+}
+
+/*call with private->mem_lock locked */
+struct kgsl_mem_entry *
+kgsl_sharedmem_find_region(struct kgsl_process_private *private,
+				unsigned int gpuaddr,
+				size_t size)
+{
+	struct kgsl_mem_entry *entry = NULL, *result = NULL;
+
+	BUG_ON(private == NULL);
+
+	list_for_each_entry(entry, &private->mem_list, list) {
+		if (gpuaddr >= entry->memdesc.gpuaddr &&
+		    ((gpuaddr + size) <=
+			(entry->memdesc.gpuaddr + entry->memdesc.size))) {
+			result = entry;
+			break;
+		}
+	}
+
+	return result;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_find_region);
+
+uint8_t *kgsl_gpuaddr_to_vaddr(const struct kgsl_memdesc *memdesc,
+	unsigned int gpuaddr, unsigned int *size)
+{
+	BUG_ON(memdesc->hostptr == NULL);
+
+	if (memdesc->gpuaddr == 0 || (gpuaddr < memdesc->gpuaddr ||
+		gpuaddr >= memdesc->gpuaddr + memdesc->size))
+		return NULL;
+
+	*size = memdesc->size - (gpuaddr - memdesc->gpuaddr);
+	return memdesc->hostptr + (gpuaddr - memdesc->gpuaddr);
+}
+EXPORT_SYMBOL(kgsl_gpuaddr_to_vaddr);
+
+/*call all ioctl sub functions with driver locked*/
+static long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
+					  unsigned int cmd, void *data)
+{
+	int result = 0;
+	struct kgsl_device_getproperty *param = data;
+
+	switch (param->type) {
+	case KGSL_PROP_VERSION:
+	{
+		struct kgsl_version version;
+		if (param->sizebytes != sizeof(version)) {
+			result = -EINVAL;
+			break;
+		}
+
+		version.drv_major = KGSL_VERSION_MAJOR;
+		version.drv_minor = KGSL_VERSION_MINOR;
+		version.dev_major = dev_priv->device->ver_major;
+		version.dev_minor = dev_priv->device->ver_minor;
+
+		if (copy_to_user(param->value, &version, sizeof(version)))
+			result = -EFAULT;
+
+		break;
+	}
+	default:
+		result = dev_priv->device->ftbl->getproperty(
+					dev_priv->device, param->type,
+					param->value, param->sizebytes);
+	}
+
+
+	return result;
+}
+
+static long kgsl_ioctl_device_waittimestamp(struct kgsl_device_private
+						*dev_priv, unsigned int cmd,
+						void *data)
+{
+	int result = 0;
+	struct kgsl_device_waittimestamp *param = data;
+
+	/* Set the active count so that suspend doesn't do the
+	   wrong thing */
+
+	dev_priv->device->active_cnt++;
+
+	/* Don't wait forever, set a max value for now */
+	if (param->timeout == -1)
+		param->timeout = 10 * MSEC_PER_SEC;
+
+	result = dev_priv->device->ftbl->waittimestamp(dev_priv->device,
+					param->timestamp,
+					param->timeout);
+
+	kgsl_memqueue_drain(dev_priv->device);
+
+	/* Fire off any pending suspend operations that are in flight */
+
+	INIT_COMPLETION(dev_priv->device->suspend_gate);
+	dev_priv->device->active_cnt--;
+	complete(&dev_priv->device->suspend_gate);
+
+	return result;
+}
+static bool check_ibdesc(struct kgsl_device_private *dev_priv,
+			 struct kgsl_ibdesc *ibdesc, unsigned int numibs,
+			 bool parse)
+{
+	bool result = true;
+	unsigned int i;
+	for (i = 0; i < numibs; i++) {
+		struct kgsl_mem_entry *entry;
+		spin_lock(&dev_priv->process_priv->mem_lock);
+		entry = kgsl_sharedmem_find_region(dev_priv->process_priv,
+			ibdesc[i].gpuaddr, ibdesc[i].sizedwords * sizeof(uint));
+		spin_unlock(&dev_priv->process_priv->mem_lock);
+		if (entry == NULL) {
+			KGSL_DRV_ERR(dev_priv->device,
+				"invalid cmd buffer gpuaddr %08x " \
+				"sizedwords %d\n", ibdesc[i].gpuaddr,
+				ibdesc[i].sizedwords);
+			result = false;
+			break;
+		}
+
+		if (parse && !kgsl_cffdump_parse_ibs(dev_priv, &entry->memdesc,
+			ibdesc[i].gpuaddr, ibdesc[i].sizedwords, true)) {
+			KGSL_DRV_ERR(dev_priv->device,
+				"invalid cmd buffer gpuaddr %08x " \
+				"sizedwords %d numibs %d/%d\n",
+				ibdesc[i].gpuaddr,
+				ibdesc[i].sizedwords, i+1, numibs);
+			result = false;
+			break;
+		}
+	}
+	return result;
+}
+
+static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
+				      unsigned int cmd, void *data)
+{
+	int result = 0;
+	struct kgsl_ringbuffer_issueibcmds *param = data;
+	struct kgsl_ibdesc *ibdesc;
+	struct kgsl_context *context;
+
+#ifdef CONFIG_MSM_KGSL_DRM
+	kgsl_gpu_mem_flush(DRM_KGSL_GEM_CACHE_OP_TO_DEV);
+#endif
+
+	context = kgsl_find_context(dev_priv, param->drawctxt_id);
+	if (context == NULL) {
+		result = -EINVAL;
+		KGSL_DRV_ERR(dev_priv->device,
+			"invalid drawctxt drawctxt_id %d\n",
+			param->drawctxt_id);
+		goto done;
+	}
+
+	if (param->flags & KGSL_CONTEXT_SUBMIT_IB_LIST) {
+		KGSL_DRV_INFO(dev_priv->device,
+			"Using IB list mode for ib submission, numibs: %d\n",
+			param->numibs);
+		if (!param->numibs) {
+			KGSL_DRV_ERR(dev_priv->device,
+				"Invalid numibs as parameter: %d\n",
+				 param->numibs);
+			result = -EINVAL;
+			goto done;
+		}
+
+		ibdesc = kzalloc(sizeof(struct kgsl_ibdesc) * param->numibs,
+					GFP_KERNEL);
+		if (!ibdesc) {
+			KGSL_MEM_ERR(dev_priv->device,
+				"kzalloc(%d) failed\n",
+				sizeof(struct kgsl_ibdesc) * param->numibs);
+			result = -ENOMEM;
+			goto done;
+		}
+
+		if (copy_from_user(ibdesc, (void *)param->ibdesc_addr,
+				sizeof(struct kgsl_ibdesc) * param->numibs)) {
+			result = -EFAULT;
+			KGSL_DRV_ERR(dev_priv->device,
+				"copy_from_user failed\n");
+			goto free_ibdesc;
+		}
+	} else {
+		KGSL_DRV_INFO(dev_priv->device,
+			"Using single IB submission mode for ib submission\n");
+		/* If user space driver is still using the old mode of
+		 * submitting single ib then we need to support that as well */
+		ibdesc = kzalloc(sizeof(struct kgsl_ibdesc), GFP_KERNEL);
+		if (!ibdesc) {
+			KGSL_MEM_ERR(dev_priv->device,
+				"kzalloc(%d) failed\n",
+				sizeof(struct kgsl_ibdesc));
+			result = -ENOMEM;
+			goto done;
+		}
+		ibdesc[0].gpuaddr = param->ibdesc_addr;
+		ibdesc[0].sizedwords = param->numibs;
+		param->numibs = 1;
+	}
+
+	if (!check_ibdesc(dev_priv, ibdesc, param->numibs, true)) {
+		KGSL_DRV_ERR(dev_priv->device, "bad ibdesc");
+		result = -EINVAL;
+		goto free_ibdesc;
+	}
+
+	/* Let the pwrscale policy know that a new command buffer
+	   is being issued */
+
+	kgsl_pwrscale_busy(dev_priv->device);
+
+	result = dev_priv->device->ftbl->issueibcmds(dev_priv,
+					     context,
+					     ibdesc,
+					     param->numibs,
+					     &param->timestamp,
+					     param->flags);
+
+	if (result != 0)
+		goto free_ibdesc;
+
+	/* this is a check to try to detect if a command buffer was freed
+	 * during issueibcmds().
+	 */
+	if (!check_ibdesc(dev_priv, ibdesc, param->numibs, false)) {
+		KGSL_DRV_ERR(dev_priv->device, "bad ibdesc AFTER issue");
+		result = -EINVAL;
+		goto free_ibdesc;
+	}
+
+free_ibdesc:
+	kfree(ibdesc);
+done:
+
+#ifdef CONFIG_MSM_KGSL_DRM
+	kgsl_gpu_mem_flush(DRM_KGSL_GEM_CACHE_OP_FROM_DEV);
+#endif
+
+	return result;
+}
+
+static long kgsl_ioctl_cmdstream_readtimestamp(struct kgsl_device_private
+						*dev_priv, unsigned int cmd,
+						void *data)
+{
+	struct kgsl_cmdstream_readtimestamp *param = data;
+
+	param->timestamp =
+		dev_priv->device->ftbl->readtimestamp(dev_priv->device,
+		param->type);
+
+	return 0;
+}
+
+static long kgsl_ioctl_cmdstream_freememontimestamp(struct kgsl_device_private
+						    *dev_priv, unsigned int cmd,
+						    void *data)
+{
+	int result = 0;
+	struct kgsl_cmdstream_freememontimestamp *param = data;
+	struct kgsl_mem_entry *entry = NULL;
+
+	spin_lock(&dev_priv->process_priv->mem_lock);
+	entry = kgsl_sharedmem_find(dev_priv->process_priv, param->gpuaddr);
+	if (entry)
+		list_del(&entry->list);
+	spin_unlock(&dev_priv->process_priv->mem_lock);
+
+	if (entry) {
+		kgsl_memqueue_freememontimestamp(dev_priv->device, entry,
+					param->timestamp, param->type);
+		kgsl_memqueue_drain(dev_priv->device);
+	} else {
+		KGSL_DRV_ERR(dev_priv->device,
+			"invalid gpuaddr %08x\n", param->gpuaddr);
+		result = -EINVAL;
+	}
+
+	return result;
+}
+
+static long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
+					unsigned int cmd, void *data)
+{
+	int result = 0;
+	struct kgsl_drawctxt_create *param = data;
+	struct kgsl_context *context = NULL;
+
+	context = kgsl_create_context(dev_priv);
+
+	if (context == NULL) {
+		result = -ENOMEM;
+		goto done;
+	}
+
+	if (dev_priv->device->ftbl->drawctxt_create)
+		result = dev_priv->device->ftbl->drawctxt_create(
+			dev_priv->device, dev_priv->process_priv->pagetable,
+			context, param->flags);
+
+	param->drawctxt_id = context->id;
+
+done:
+	if (result && context)
+		kgsl_destroy_context(dev_priv, context);
+
+	return result;
+}
+
+static long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv,
+					unsigned int cmd, void *data)
+{
+	int result = 0;
+	struct kgsl_drawctxt_destroy *param = data;
+	struct kgsl_context *context;
+
+	context = kgsl_find_context(dev_priv, param->drawctxt_id);
+
+	if (context == NULL) {
+		result = -EINVAL;
+		goto done;
+	}
+
+	if (dev_priv->device->ftbl->drawctxt_destroy)
+		dev_priv->device->ftbl->drawctxt_destroy(dev_priv->device,
+			context);
+
+	kgsl_destroy_context(dev_priv, context);
+
+done:
+	return result;
+}
+
+static long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
+					unsigned int cmd, void *data)
+{
+	int result = 0;
+	struct kgsl_sharedmem_free *param = data;
+	struct kgsl_process_private *private = dev_priv->process_priv;
+	struct kgsl_mem_entry *entry = NULL;
+
+	spin_lock(&private->mem_lock);
+	entry = kgsl_sharedmem_find(private, param->gpuaddr);
+	if (entry)
+		list_del(&entry->list);
+	spin_unlock(&private->mem_lock);
+
+	if (entry) {
+		kgsl_mem_entry_put(entry);
+	} else {
+		KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
+		result = -EINVAL;
+	}
+
+	return result;
+}
+
+static struct vm_area_struct *kgsl_get_vma_from_start_addr(unsigned int addr)
+{
+	struct vm_area_struct *vma;
+	int len;
+
+	down_read(&current->mm->mmap_sem);
+	vma = find_vma(current->mm, addr);
+	up_read(&current->mm->mmap_sem);
+	if (!vma) {
+		KGSL_CORE_ERR("find_vma(%x) failed\n", addr);
+		return NULL;
+	}
+	len = vma->vm_end - vma->vm_start;
+	if (vma->vm_pgoff || !KGSL_IS_PAGE_ALIGNED(len) ||
+	  !KGSL_IS_PAGE_ALIGNED(vma->vm_start)) {
+		KGSL_CORE_ERR("address %x is not aligned\n", addr);
+		return NULL;
+	}
+	if (vma->vm_start != addr) {
+		KGSL_CORE_ERR("vma address does not match mmap address\n");
+		return NULL;
+	}
+	return vma;
+}
+
+static long
+kgsl_ioctl_sharedmem_from_vmalloc(struct kgsl_device_private *dev_priv,
+				unsigned int cmd, void *data)
+{
+	int result = 0, len = 0;
+	struct kgsl_process_private *private = dev_priv->process_priv;
+	struct kgsl_sharedmem_from_vmalloc *param = data;
+	struct kgsl_mem_entry *entry = NULL;
+	struct vm_area_struct *vma;
+
+	if (!kgsl_mmu_enabled())
+		return -ENODEV;
+
+	/* Make sure all pending freed memory is collected */
+	kgsl_memqueue_drain_unlocked(dev_priv->device);
+
+	if (!param->hostptr) {
+		KGSL_CORE_ERR("invalid hostptr %x\n", param->hostptr);
+		result = -EINVAL;
+		goto error;
+	}
+
+	vma = kgsl_get_vma_from_start_addr(param->hostptr);
+	if (!vma) {
+		result = -EINVAL;
+		goto error;
+	}
+	len = vma->vm_end - vma->vm_start;
+	if (len == 0) {
+		KGSL_CORE_ERR("Invalid vma region length %d\n", len);
+		result = -EINVAL;
+		goto error;
+	}
+
+	entry = kgsl_mem_entry_create();
+	if (entry == NULL) {
+		result = -ENOMEM;
+		goto error;
+	}
+
+	result = kgsl_sharedmem_vmalloc_user(&entry->memdesc,
+					     private->pagetable, len,
+					     param->flags);
+	if (result != 0)
+		goto error_free_entry;
+
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	result = remap_vmalloc_range(vma, (void *) entry->memdesc.hostptr, 0);
+	if (result) {
+		KGSL_CORE_ERR("remap_vmalloc_range failed: %d\n", result);
+		goto error_free_vmalloc;
+	}
+
+	param->gpuaddr = entry->memdesc.gpuaddr;
+
+	entry->memtype = KGSL_USER_MEMORY;
+
+	kgsl_mem_entry_attach_process(entry, private);
+
+	/* Process specific statistics */
+	KGSL_STATS_ADD(len, private->stats.user,
+		       private->stats.user_max);
+
+	kgsl_check_idle(dev_priv->device);
+	return 0;
+
+error_free_vmalloc:
+	kgsl_sharedmem_free(&entry->memdesc);
+
+error_free_entry:
+	kfree(entry);
+
+error:
+	kgsl_check_idle(dev_priv->device);
+	return result;
+}
+
+static inline int _check_region(unsigned long start, unsigned long size,
+				uint64_t len)
+{
+	uint64_t end = ((uint64_t) start) + size;
+	return (end > len);
+}
+
+#ifdef CONFIG_ANDROID_PMEM
+static int kgsl_get_phys_file(int fd, unsigned long *start, unsigned long *len,
+			      unsigned long *vstart, struct file **filep)
+{
+	struct file *fbfile;
+	int ret = 0;
+	dev_t rdev;
+	struct fb_info *info;
+
+	*filep = NULL;
+	if (!get_pmem_file(fd, start, vstart, len, filep))
+		return 0;
+
+	fbfile = fget(fd);
+	if (fbfile == NULL) {
+		KGSL_CORE_ERR("fget_light failed\n");
+		return -1;
+	}
+
+	rdev = fbfile->f_dentry->d_inode->i_rdev;
+	info = MAJOR(rdev) == FB_MAJOR ? registered_fb[MINOR(rdev)] : NULL;
+	if (info) {
+		*start = info->fix.smem_start;
+		*len = info->fix.smem_len;
+		*vstart = (unsigned long)__va(info->fix.smem_start);
+		ret = 0;
+	} else {
+		KGSL_CORE_ERR("framebuffer minor %d not found\n",
+			      MINOR(rdev));
+		ret = -1;
+	}
+
+	fput(fbfile);
+
+	return ret;
+}
+
+static int kgsl_setup_phys_file(struct kgsl_mem_entry *entry,
+				struct kgsl_pagetable *pagetable,
+				unsigned int fd, unsigned int offset,
+				size_t size)
+{
+	int ret;
+	unsigned long phys, virt, len;
+	struct file *filep;
+
+	ret = kgsl_get_phys_file(fd, &phys, &len, &virt, &filep);
+	if (ret)
+		return ret;
+
+	if (offset >= len) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (size == 0)
+		size = len;
+
+	/* Adjust the size of the region to account for the offset */
+	size += offset & ~PAGE_MASK;
+
+	size = ALIGN(size, PAGE_SIZE);
+
+	if (_check_region(offset & PAGE_MASK, size, len)) {
+		KGSL_CORE_ERR("Offset (%ld) + size (%d) is larger"
+			      "than pmem region length %ld\n",
+			      offset & PAGE_MASK, size, len);
+		ret = -EINVAL;
+		goto err;
+
+	}
+
+	entry->file_ptr = filep;
+
+	entry->memdesc.pagetable = pagetable;
+	entry->memdesc.size = size;
+	entry->memdesc.physaddr = phys + (offset & PAGE_MASK);
+	entry->memdesc.hostptr = (void *) (virt + (offset & PAGE_MASK));
+	entry->memdesc.ops = &kgsl_contiguous_ops;
+
+	return 0;
+err:
+	put_pmem_file(filep);
+	return ret;
+}
+#else
+static int kgsl_setup_phys_file(struct kgsl_mem_entry *entry,
+				struct kgsl_pagetable *pagetable,
+				unsigned int fd, unsigned int offset,
+				size_t size)
+{
+	return -EINVAL;
+}
+#endif
+
+static int kgsl_setup_hostptr(struct kgsl_mem_entry *entry,
+			      struct kgsl_pagetable *pagetable,
+			      void *hostptr, unsigned int offset,
+			      size_t size)
+{
+	struct vm_area_struct *vma;
+	unsigned int len;
+
+	down_read(&current->mm->mmap_sem);
+	vma = find_vma(current->mm, (unsigned int) hostptr);
+	up_read(&current->mm->mmap_sem);
+
+	if (!vma) {
+		KGSL_CORE_ERR("find_vma(%p) failed\n", hostptr);
+		return -EINVAL;
+	}
+
+	/* We don't necessarily start at vma->vm_start */
+	len = vma->vm_end - (unsigned long) hostptr;
+
+	if (offset >= len)
+		return -EINVAL;
+
+	if (!KGSL_IS_PAGE_ALIGNED((unsigned long) hostptr) ||
+	    !KGSL_IS_PAGE_ALIGNED(len)) {
+		KGSL_CORE_ERR("user address len(%u)"
+			      "and start(%p) must be page"
+			      "aligned\n", len, hostptr);
+		return -EINVAL;
+	}
+
+	if (size == 0)
+		size = len;
+
+	/* Adjust the size of the region to account for the offset */
+	size += offset & ~PAGE_MASK;
+
+	size = ALIGN(size, PAGE_SIZE);
+
+	if (_check_region(offset & PAGE_MASK, size, len)) {
+		KGSL_CORE_ERR("Offset (%ld) + size (%d) is larger"
+			      "than region length %d\n",
+			      offset & PAGE_MASK, size, len);
+		return -EINVAL;
+	}
+
+	entry->memdesc.pagetable = pagetable;
+	entry->memdesc.size = size;
+	entry->memdesc.hostptr = hostptr + (offset & PAGE_MASK);
+	entry->memdesc.ops = &kgsl_userptr_ops;
+
+	return 0;
+}
+
+#ifdef CONFIG_ASHMEM
+static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
+			     struct kgsl_pagetable *pagetable,
+			     int fd, void *hostptr, size_t size)
+{
+	int ret;
+	struct vm_area_struct *vma;
+	struct file *filep, *vmfile;
+	unsigned long len;
+
+	vma = kgsl_get_vma_from_start_addr((unsigned long) hostptr);
+	if (vma == NULL)
+		return -EINVAL;
+
+	len = vma->vm_end - vma->vm_start;
+
+	if (size == 0)
+		size = len;
+
+	if (size != len) {
+		KGSL_CORE_ERR("Invalid size %d for vma region %p\n",
+			      size, hostptr);
+		return -EINVAL;
+	}
+
+	ret = get_ashmem_file(fd, &filep, &vmfile, &len);
+
+	if (ret) {
+		KGSL_CORE_ERR("get_ashmem_file failed\n");
+		return ret;
+	}
+
+	if (vmfile != vma->vm_file) {
+		KGSL_CORE_ERR("ashmem shmem file does not match vma\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	entry->file_ptr = filep;
+
+	entry->memdesc.pagetable = pagetable;
+	entry->memdesc.size = ALIGN(size, PAGE_SIZE);
+	entry->memdesc.hostptr = hostptr;
+	entry->memdesc.ops = &kgsl_userptr_ops;
+
+	return 0;
+
+err:
+	put_ashmem_file(filep);
+	return ret;
+}
+#else
+static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
+			     struct kgsl_pagetable *pagetable,
+			     int fd, void *hostptr, size_t size)
+{
+	return -EINVAL;
+}
+#endif
+
+static long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
+				     unsigned int cmd, void *data)
+{
+	int result = -EINVAL;
+	struct kgsl_map_user_mem *param = data;
+	struct kgsl_mem_entry *entry = NULL;
+	struct kgsl_process_private *private = dev_priv->process_priv;
+
+	entry = kgsl_mem_entry_create();
+
+	if (entry == NULL)
+		return -ENOMEM;
+
+	kgsl_memqueue_drain_unlocked(dev_priv->device);
+
+	switch (param->memtype) {
+	case KGSL_USER_MEM_TYPE_PMEM:
+		if (param->fd == 0 || param->len == 0)
+			break;
+
+		result = kgsl_setup_phys_file(entry, private->pagetable,
+					      param->fd, param->offset,
+					      param->len);
+		break;
+
+	case KGSL_USER_MEM_TYPE_ADDR:
+		if (!kgsl_mmu_enabled()) {
+			KGSL_DRV_ERR(dev_priv->device,
+				"Cannot map paged memory with the "
+				"MMU disabled\n");
+			break;
+		}
+
+		if (param->hostptr == 0)
+			break;
+
+		result = kgsl_setup_hostptr(entry, private->pagetable,
+					    (void *) param->hostptr,
+					    param->offset, param->len);
+		break;
+
+	case KGSL_USER_MEM_TYPE_ASHMEM:
+		if (!kgsl_mmu_enabled()) {
+			KGSL_DRV_ERR(dev_priv->device,
+				"Cannot map paged memory with the "
+				"MMU disabled\n");
+			break;
+		}
+
+		if (param->hostptr == 0)
+			break;
+
+		result = kgsl_setup_ashmem(entry, private->pagetable,
+					   param->fd, (void *) param->hostptr,
+					   param->len);
+		break;
+	default:
+		KGSL_CORE_ERR("Invalid memory type: %x\n", param->memtype);
+		break;
+	}
+
+	if (result)
+		goto error;
+
+	result = kgsl_mmu_map(private->pagetable,
+			      &entry->memdesc,
+			      GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+
+	if (result)
+		goto error_put_file_ptr;
+
+	/* Adjust the returned value for a non 4k aligned offset */
+	param->gpuaddr = entry->memdesc.gpuaddr + (param->offset & ~PAGE_MASK);
+
+	entry->memtype = KGSL_MAPPED_MEMORY;
+
+	KGSL_STATS_ADD(param->len, kgsl_driver.stats.mapped,
+		       kgsl_driver.stats.mapped_max);
+
+	/* Statistics */
+	KGSL_STATS_ADD(param->len, private->stats.mapped,
+		       private->stats.mapped_max);
+
+	kgsl_mem_entry_attach_process(entry, private);
+
+	kgsl_check_idle(dev_priv->device);
+	return result;
+
+ error_put_file_ptr:
+	if (entry->file_ptr)
+		fput(entry->file_ptr);
+
+error:
+	kfree(entry);
+	kgsl_check_idle(dev_priv->device);
+	return result;
+}
+
+/*This function flushes a graphics memory allocation from CPU cache
+ *when caching is enabled with MMU*/
+static long
+kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv,
+				 unsigned int cmd, void *data)
+{
+	int result = 0;
+	struct kgsl_mem_entry *entry;
+	struct kgsl_sharedmem_free *param = data;
+	struct kgsl_process_private *private = dev_priv->process_priv;
+
+	spin_lock(&private->mem_lock);
+	entry = kgsl_sharedmem_find(private, param->gpuaddr);
+	if (!entry) {
+		KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
+		result = -EINVAL;
+	} else {
+		if (!entry->memdesc.hostptr)
+			entry->memdesc.hostptr =
+				kgsl_gpuaddr_to_vaddr(&entry->memdesc,
+					param->gpuaddr, &entry->memdesc.size);
+
+		if (!entry->memdesc.hostptr) {
+			KGSL_CORE_ERR("invalid hostptr with gpuaddr %08x\n",
+				param->gpuaddr);
+			goto done;
+		}
+
+		kgsl_cache_range_op(&entry->memdesc, KGSL_CACHE_OP_CLEAN);
+
+		/* Statistics - keep track of how many flushes each process
+		   does */
+		private->stats.flushes++;
+	}
+	spin_unlock(&private->mem_lock);
+done:
+	return result;
+}
+
+static long
+kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,
+			unsigned int cmd, void *data)
+{
+	struct kgsl_process_private *private = dev_priv->process_priv;
+	struct kgsl_gpumem_alloc *param = data;
+	struct kgsl_mem_entry *entry;
+	int result;
+
+	entry = kgsl_mem_entry_create();
+	if (entry == NULL)
+		return -ENOMEM;
+
+	/* Make sure all pending freed memory is collected */
+	kgsl_memqueue_drain_unlocked(dev_priv->device);
+
+	result = kgsl_allocate_user(&entry->memdesc, private->pagetable,
+		param->size, param->flags);
+
+	if (result == 0) {
+		entry->memtype = KGSL_USER_MEMORY;
+		kgsl_mem_entry_attach_process(entry, private);
+		param->gpuaddr = entry->memdesc.gpuaddr;
+
+		KGSL_STATS_ADD(entry->memdesc.size, private->stats.user,
+		       private->stats.user_max);
+	} else
+		kfree(entry);
+
+	kgsl_check_idle(dev_priv->device);
+	return result;
+}
+
+typedef long (*kgsl_ioctl_func_t)(struct kgsl_device_private *,
+	unsigned int, void *);
+
+#define KGSL_IOCTL_FUNC(_cmd, _func, _lock) \
+	[_IOC_NR(_cmd)] = { .cmd = _cmd, .func = _func, .lock = _lock }
+
+static const struct {
+	unsigned int cmd;
+	kgsl_ioctl_func_t func;
+	int lock;
+} kgsl_ioctl_funcs[] = {
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_GETPROPERTY,
+			kgsl_ioctl_device_getproperty, 1),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP,
+			kgsl_ioctl_device_waittimestamp, 1),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS,
+			kgsl_ioctl_rb_issueibcmds, 1),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP,
+			kgsl_ioctl_cmdstream_readtimestamp, 1),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP,
+			kgsl_ioctl_cmdstream_freememontimestamp, 1),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_CREATE,
+			kgsl_ioctl_drawctxt_create, 1),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_DESTROY,
+			kgsl_ioctl_drawctxt_destroy, 1),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_MAP_USER_MEM,
+			kgsl_ioctl_map_user_mem, 0),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_PMEM,
+			kgsl_ioctl_map_user_mem, 0),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FREE,
+			kgsl_ioctl_sharedmem_free, 0),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC,
+			kgsl_ioctl_sharedmem_from_vmalloc, 0),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE,
+			kgsl_ioctl_sharedmem_flush_cache, 0),
+	KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC,
+			kgsl_ioctl_gpumem_alloc, 0),
+};
+
+static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+	struct kgsl_device_private *dev_priv = filep->private_data;
+	unsigned int nr = _IOC_NR(cmd);
+	kgsl_ioctl_func_t func;
+	int lock, ret;
+	char ustack[64];
+	void *uptr = NULL;
+
+	BUG_ON(dev_priv == NULL);
+
+	/* Workaround for an previously incorrectly defined ioctl code.
+	   This helps ensure binary compatability */
+
+	if (cmd == IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD)
+		cmd = IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP;
+
+	if (cmd & (IOC_IN | IOC_OUT)) {
+		if (_IOC_SIZE(cmd) < sizeof(ustack))
+			uptr = ustack;
+		else {
+			uptr = kzalloc(_IOC_SIZE(cmd), GFP_KERNEL);
+			if (uptr == NULL) {
+				KGSL_MEM_ERR(dev_priv->device,
+					"kzalloc(%d) failed\n", _IOC_SIZE(cmd));
+				ret = -ENOMEM;
+				goto done;
+			}
+		}
+
+		if (cmd & IOC_IN) {
+			if (copy_from_user(uptr, (void __user *) arg,
+				_IOC_SIZE(cmd))) {
+				ret = -EFAULT;
+				goto done;
+			}
+		} else
+			memset(uptr, 0, _IOC_SIZE(cmd));
+	}
+
+	if (nr < ARRAY_SIZE(kgsl_ioctl_funcs) &&
+	    kgsl_ioctl_funcs[nr].func != NULL) {
+		func = kgsl_ioctl_funcs[nr].func;
+		lock = kgsl_ioctl_funcs[nr].lock;
+	} else {
+		func = dev_priv->device->ftbl->ioctl;
+		if (!func) {
+			KGSL_DRV_INFO(dev_priv->device,
+				      "invalid ioctl code %08x\n", cmd);
+			ret = -EINVAL;
+			goto done;
+		}
+		lock = 1;
+	}
+
+	if (lock) {
+		mutex_lock(&dev_priv->device->mutex);
+		kgsl_check_suspended(dev_priv->device);
+	}
+
+	ret = func(dev_priv, cmd, uptr);
+
+	if (lock) {
+		kgsl_check_idle_locked(dev_priv->device);
+		mutex_unlock(&dev_priv->device->mutex);
+	}
+
+	if (ret == 0 && (cmd & IOC_OUT)) {
+		if (copy_to_user((void __user *) arg, uptr, _IOC_SIZE(cmd)))
+			ret = -EFAULT;
+	}
+
+done:
+	if (_IOC_SIZE(cmd) >= sizeof(ustack))
+		kfree(uptr);
+
+	return ret;
+}
+
+static int
+kgsl_mmap_memstore(struct kgsl_device *device, struct vm_area_struct *vma)
+{
+	struct kgsl_memdesc *memdesc = &device->memstore;
+	int result;
+	unsigned int vma_size = vma->vm_end - vma->vm_start;
+
+	/* The memstore can only be mapped as read only */
+
+	if (vma->vm_flags & VM_WRITE)
+		return -EPERM;
+
+	if (memdesc->size  !=  vma_size) {
+		KGSL_MEM_ERR(device, "memstore bad size: %d should be %d\n",
+			     vma_size, memdesc->size);
+		return -EINVAL;
+	}
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	result = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+				 vma_size, vma->vm_page_prot);
+	if (result != 0)
+		KGSL_MEM_ERR(device, "remap_pfn_range failed: %d\n",
+			     result);
+
+	return result;
+}
+
+static int
+kgsl_gpumem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct kgsl_mem_entry *entry = vma->vm_private_data;
+
+	if (!entry->memdesc.ops->vmfault)
+		return VM_FAULT_SIGBUS;
+
+	return entry->memdesc.ops->vmfault(&entry->memdesc, vma, vmf);
+}
+
+static void
+kgsl_gpumem_vm_close(struct vm_area_struct *vma)
+{
+	struct kgsl_mem_entry *entry  = vma->vm_private_data;
+	kgsl_mem_entry_put(entry);
+}
+
+static struct vm_operations_struct kgsl_gpumem_vm_ops = {
+	.fault = kgsl_gpumem_vm_fault,
+	.close = kgsl_gpumem_vm_close,
+};
+
+static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	unsigned long vma_offset = vma->vm_pgoff << PAGE_SHIFT;
+	struct inode *inodep = file->f_path.dentry->d_inode;
+	struct kgsl_device_private *dev_priv = file->private_data;
+	struct kgsl_process_private *private = dev_priv->process_priv;
+	struct kgsl_mem_entry *entry;
+	struct kgsl_device *device;
+
+	device = kgsl_driver.devp[iminor(inodep)];
+	BUG_ON(device == NULL);
+
+	/* Handle leagacy behavior for memstore */
+
+	if (vma_offset == device->memstore.physaddr)
+		return kgsl_mmap_memstore(device, vma);
+
+	/* Find a chunk of GPU memory */
+
+	spin_lock(&private->mem_lock);
+	list_for_each_entry(entry, &private->mem_list, list) {
+		if (vma_offset == entry->memdesc.gpuaddr) {
+			kgsl_mem_entry_get(entry);
+			break;
+		}
+	}
+	spin_unlock(&private->mem_lock);
+
+	if (entry == NULL)
+		return -EINVAL;
+
+	if (!entry->memdesc.ops->vmflags || !entry->memdesc.ops->vmfault)
+		return -EINVAL;
+
+	vma->vm_flags |= entry->memdesc.ops->vmflags(&entry->memdesc);
+
+	vma->vm_private_data = entry;
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+	vma->vm_ops = &kgsl_gpumem_vm_ops;
+	vma->vm_file = file;
+
+	return 0;
+}
+
+static const struct file_operations kgsl_fops = {
+	.owner = THIS_MODULE,
+	.release = kgsl_release,
+	.open = kgsl_open,
+	.mmap = kgsl_mmap,
+	.unlocked_ioctl = kgsl_ioctl,
+};
+
+struct kgsl_driver kgsl_driver  = {
+	.process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex),
+	.ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock),
+	.devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock),
+};
+EXPORT_SYMBOL(kgsl_driver);
+
+void kgsl_unregister_device(struct kgsl_device *device)
+{
+	int minor;
+
+	mutex_lock(&kgsl_driver.devlock);
+	for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
+		if (device == kgsl_driver.devp[minor])
+			break;
+	}
+
+	mutex_unlock(&kgsl_driver.devlock);
+
+	if (minor == KGSL_DEVICE_MAX)
+		return;
+
+	kgsl_cffdump_close(device->id);
+	kgsl_pwrctrl_uninit_sysfs(device);
+
+	wake_lock_destroy(&device->idle_wakelock);
+	pm_qos_remove_request(&device->pm_qos_req_dma);
+
+	idr_destroy(&device->context_idr);
+
+	if (device->memstore.hostptr)
+		kgsl_sharedmem_free(&device->memstore);
+
+	kgsl_mmu_close(device);
+
+	if (device->work_queue) {
+		destroy_workqueue(device->work_queue);
+		device->work_queue = NULL;
+	}
+
+	device_destroy(kgsl_driver.class,
+		       MKDEV(MAJOR(kgsl_driver.major), minor));
+
+	mutex_lock(&kgsl_driver.devlock);
+	kgsl_driver.devp[minor] = NULL;
+	mutex_unlock(&kgsl_driver.devlock);
+}
+EXPORT_SYMBOL(kgsl_unregister_device);
+
+int
+kgsl_register_device(struct kgsl_device *device)
+{
+	int minor, ret;
+	dev_t dev;
+
+	/* Find a minor for the device */
+
+	mutex_lock(&kgsl_driver.devlock);
+	for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
+		if (kgsl_driver.devp[minor] == NULL) {
+			kgsl_driver.devp[minor] = device;
+			break;
+		}
+	}
+
+	mutex_unlock(&kgsl_driver.devlock);
+
+	if (minor == KGSL_DEVICE_MAX) {
+		KGSL_CORE_ERR("minor devices exhausted\n");
+		return -ENODEV;
+	}
+
+	/* Create the device */
+	dev = MKDEV(MAJOR(kgsl_driver.major), minor);
+	device->dev = device_create(kgsl_driver.class,
+				    device->parentdev,
+				    dev, device,
+				    device->name);
+
+	if (IS_ERR(device->dev)) {
+		ret = PTR_ERR(device->dev);
+		KGSL_CORE_ERR("device_create(%s): %d\n", device->name, ret);
+		goto err_devlist;
+	}
+
+	dev_set_drvdata(device->parentdev, device);
+
+	/* Generic device initialization */
+	init_waitqueue_head(&device->wait_queue);
+
+	kgsl_cffdump_open(device->id);
+
+	init_completion(&device->hwaccess_gate);
+	init_completion(&device->suspend_gate);
+
+	ATOMIC_INIT_NOTIFIER_HEAD(&device->ts_notifier_list);
+
+	setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
+	ret = kgsl_create_device_workqueue(device);
+	if (ret)
+		goto err_devlist;
+
+	INIT_WORK(&device->idle_check_ws, kgsl_idle_check);
+
+	INIT_LIST_HEAD(&device->memqueue);
+
+	ret = kgsl_mmu_init(device);
+	if (ret != 0)
+		goto err_dest_work_q;
+
+	ret = kgsl_allocate_contiguous(&device->memstore,
+		sizeof(struct kgsl_devmemstore));
+
+	if (ret != 0)
+		goto err_close_mmu;
+
+	kgsl_sharedmem_set(&device->memstore, 0, 0, device->memstore.size);
+
+	wake_lock_init(&device->idle_wakelock, WAKE_LOCK_IDLE, device->name);
+	pm_qos_add_request(&device->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY,
+				PM_QOS_DEFAULT_VALUE);
+
+	idr_init(&device->context_idr);
+
+	/* sysfs and debugfs initalization - failure here is non fatal */
+
+	/* Initialize logging */
+	kgsl_device_debugfs_init(device);
+
+	/* Initialize common sysfs entries */
+	kgsl_pwrctrl_init_sysfs(device);
+
+	return 0;
+
+err_close_mmu:
+	kgsl_mmu_close(device);
+err_dest_work_q:
+	destroy_workqueue(device->work_queue);
+	device->work_queue = NULL;
+err_devlist:
+	mutex_lock(&kgsl_driver.devlock);
+	kgsl_driver.devp[minor] = NULL;
+	mutex_unlock(&kgsl_driver.devlock);
+
+	return ret;
+}
+EXPORT_SYMBOL(kgsl_register_device);
+
+int kgsl_device_platform_probe(struct kgsl_device *device,
+			       irqreturn_t (*dev_isr) (int, void*))
+{
+	int status = -EINVAL;
+	struct kgsl_memregion *regspace = NULL;
+	struct resource *res;
+	struct platform_device *pdev =
+		container_of(device->parentdev, struct platform_device, dev);
+
+	pm_runtime_enable(device->parentdev);
+
+	status = kgsl_pwrctrl_init(device);
+	if (status)
+		goto error;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   device->iomemname);
+	if (res == NULL) {
+		KGSL_DRV_ERR(device, "platform_get_resource_byname failed\n");
+		status = -EINVAL;
+		goto error_pwrctrl_close;
+	}
+	if (res->start == 0 || resource_size(res) == 0) {
+		KGSL_DRV_ERR(device, "dev %d invalid regspace\n", device->id);
+		status = -EINVAL;
+		goto error_pwrctrl_close;
+	}
+
+	regspace = &device->regspace;
+	regspace->mmio_phys_base = res->start;
+	regspace->sizebytes = resource_size(res);
+
+	if (!request_mem_region(regspace->mmio_phys_base,
+				regspace->sizebytes, device->name)) {
+		KGSL_DRV_ERR(device, "request_mem_region failed\n");
+		status = -ENODEV;
+		goto error_pwrctrl_close;
+	}
+
+	regspace->mmio_virt_base = ioremap(regspace->mmio_phys_base,
+					   regspace->sizebytes);
+
+	if (regspace->mmio_virt_base == NULL) {
+		KGSL_DRV_ERR(device, "ioremap failed\n");
+		status = -ENODEV;
+		goto error_release_mem;
+	}
+
+	status = request_irq(device->pwrctrl.interrupt_num, dev_isr,
+			     IRQF_TRIGGER_HIGH, device->name, device);
+	if (status) {
+		KGSL_DRV_ERR(device, "request_irq(%d) failed: %d\n",
+			      device->pwrctrl.interrupt_num, status);
+		goto error_iounmap;
+	}
+	device->pwrctrl.have_irq = 1;
+	disable_irq(device->pwrctrl.interrupt_num);
+
+	KGSL_DRV_INFO(device,
+		"dev_id %d regs phys 0x%08x size 0x%08x virt %p\n",
+		device->id, regspace->mmio_phys_base,
+		regspace->sizebytes, regspace->mmio_virt_base);
+
+
+	status = kgsl_register_device(device);
+	if (!status)
+		return status;
+
+	free_irq(device->pwrctrl.interrupt_num, NULL);
+	device->pwrctrl.have_irq = 0;
+error_iounmap:
+	iounmap(regspace->mmio_virt_base);
+	regspace->mmio_virt_base = NULL;
+error_release_mem:
+	release_mem_region(regspace->mmio_phys_base, regspace->sizebytes);
+error_pwrctrl_close:
+	kgsl_pwrctrl_close(device);
+error:
+	return status;
+}
+EXPORT_SYMBOL(kgsl_device_platform_probe);
+
+void kgsl_device_platform_remove(struct kgsl_device *device)
+{
+	struct kgsl_memregion *regspace = &device->regspace;
+
+	kgsl_unregister_device(device);
+
+	if (regspace->mmio_virt_base != NULL) {
+		iounmap(regspace->mmio_virt_base);
+		regspace->mmio_virt_base = NULL;
+		release_mem_region(regspace->mmio_phys_base,
+					regspace->sizebytes);
+	}
+	kgsl_pwrctrl_close(device);
+
+	pm_runtime_disable(device->parentdev);
+}
+EXPORT_SYMBOL(kgsl_device_platform_remove);
+
+static int __devinit
+kgsl_ptdata_init(void)
+{
+	INIT_LIST_HEAD(&kgsl_driver.pagetable_list);
+
+	return kgsl_ptpool_init(&kgsl_driver.ptpool, KGSL_PAGETABLE_SIZE,
+		kgsl_pagetable_count);
+}
+
+static void kgsl_core_exit(void)
+{
+	unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
+
+	kgsl_ptpool_destroy(&kgsl_driver.ptpool);
+
+	device_unregister(&kgsl_driver.virtdev);
+
+	if (kgsl_driver.class) {
+		class_destroy(kgsl_driver.class);
+		kgsl_driver.class = NULL;
+	}
+
+	kgsl_drm_exit();
+	kgsl_cffdump_destroy();
+}
+
+static int __init kgsl_core_init(void)
+{
+	int result = 0;
+
+	/* alloc major and minor device numbers */
+	result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX,
+				  KGSL_NAME);
+	if (result < 0) {
+		KGSL_CORE_ERR("alloc_chrdev_region failed err = %d\n", result);
+		goto err;
+	}
+
+	cdev_init(&kgsl_driver.cdev, &kgsl_fops);
+	kgsl_driver.cdev.owner = THIS_MODULE;
+	kgsl_driver.cdev.ops = &kgsl_fops;
+	result = cdev_add(&kgsl_driver.cdev, MKDEV(MAJOR(kgsl_driver.major), 0),
+		       KGSL_DEVICE_MAX);
+
+	if (result) {
+		KGSL_CORE_ERR("kgsl: cdev_add() failed, dev_num= %d,"
+			     " result= %d\n", kgsl_driver.major, result);
+		goto err;
+	}
+
+	kgsl_driver.class = class_create(THIS_MODULE, KGSL_NAME);
+
+	if (IS_ERR(kgsl_driver.class)) {
+		result = PTR_ERR(kgsl_driver.class);
+		KGSL_CORE_ERR("failed to create class %s", KGSL_NAME);
+		goto err;
+	}
+
+	/* Make a virtual device for managing core related things
+	   in sysfs */
+	kgsl_driver.virtdev.class = kgsl_driver.class;
+	dev_set_name(&kgsl_driver.virtdev, "kgsl");
+	result = device_register(&kgsl_driver.virtdev);
+	if (result) {
+		KGSL_CORE_ERR("driver_register failed\n");
+		goto err;
+	}
+
+	/* Make kobjects in the virtual device for storing statistics */
+
+	kgsl_driver.ptkobj =
+	  kobject_create_and_add("pagetables",
+				 &kgsl_driver.virtdev.kobj);
+
+	kgsl_driver.prockobj =
+		kobject_create_and_add("proc",
+				       &kgsl_driver.virtdev.kobj);
+
+	kgsl_core_debugfs_init();
+
+	kgsl_sharedmem_init_sysfs();
+	kgsl_cffdump_init();
+
+	/* Generic device initialization */
+	INIT_LIST_HEAD(&kgsl_driver.process_list);
+
+	result = kgsl_ptdata_init();
+	if (result)
+		goto err;
+
+	result = kgsl_drm_init(NULL);
+
+	if (result)
+		goto err;
+
+	return 0;
+
+err:
+	kgsl_core_exit();
+	return result;
+}
+
+module_init(kgsl_core_init);
+module_exit(kgsl_core_exit);
+
+MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
+MODULE_DESCRIPTION("MSM GPU driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
new file mode 100644
index 0000000..cd11bd9
--- /dev/null
+++ b/drivers/gpu/msm/kgsl.h
@@ -0,0 +1,220 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_H
+#define __KGSL_H
+
+#include <linux/types.h>
+#include <linux/msm_kgsl.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/cdev.h>
+#include <linux/regulator/consumer.h>
+
+#define KGSL_NAME "kgsl"
+
+/* Flags to control whether to flush or invalidate a cached memory range */
+#define KGSL_CACHE_INV		0x00000000
+#define KGSL_CACHE_CLEAN	0x00000001
+#define KGSL_CACHE_FLUSH	0x00000002
+
+#define KGSL_CACHE_USER_ADDR	0x00000010
+#define KGSL_CACHE_VMALLOC_ADDR	0x00000020
+
+/*cache coherency ops */
+#define DRM_KGSL_GEM_CACHE_OP_TO_DEV	0x0001
+#define DRM_KGSL_GEM_CACHE_OP_FROM_DEV	0x0002
+
+/* The size of each entry in a page table */
+#define KGSL_PAGETABLE_ENTRY_SIZE  4
+
+/* Pagetable Virtual Address base */
+#define KGSL_PAGETABLE_BASE	0x66000000
+
+/* Extra accounting entries needed in the pagetable */
+#define KGSL_PT_EXTRA_ENTRIES      16
+
+#define KGSL_PAGETABLE_ENTRIES(_sz) (((_sz) >> PAGE_SHIFT) + \
+				     KGSL_PT_EXTRA_ENTRIES)
+
+#ifdef CONFIG_MSM_KGSL_MMU
+#define KGSL_PAGETABLE_SIZE \
+ALIGN(KGSL_PAGETABLE_ENTRIES(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE) * \
+KGSL_PAGETABLE_ENTRY_SIZE, PAGE_SIZE)
+#else
+#define KGSL_PAGETABLE_SIZE 0
+#endif
+
+#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
+#define KGSL_PAGETABLE_COUNT (CONFIG_MSM_KGSL_PAGE_TABLE_COUNT)
+#else
+#define KGSL_PAGETABLE_COUNT 1
+#endif
+
+/* Casting using container_of() for structures that kgsl owns. */
+#define KGSL_CONTAINER_OF(ptr, type, member) \
+		container_of(ptr, type, member)
+
+/* A macro for memory statistics - add the new size to the stat and if
+   the statisic is greater then _max, set _max
+*/
+
+#define KGSL_STATS_ADD(_size, _stat, _max) \
+	do { _stat += (_size); if (_stat > _max) _max = _stat; } while (0)
+
+struct kgsl_device;
+
+struct kgsl_ptpool {
+	size_t ptsize;
+	struct mutex lock;
+	struct list_head list;
+	int entries;
+	int static_entries;
+	int chunks;
+};
+
+struct kgsl_driver {
+	struct cdev cdev;
+	dev_t major;
+	struct class *class;
+	/* Virtual device for managing the core */
+	struct device virtdev;
+	/* Kobjects for storing pagetable and process statistics */
+	struct kobject *ptkobj;
+	struct kobject *prockobj;
+	struct kgsl_device *devp[KGSL_DEVICE_MAX];
+
+	uint32_t flags_debug;
+
+	/* Global lilst of open processes */
+	struct list_head process_list;
+	/* Global list of pagetables */
+	struct list_head pagetable_list;
+	/* Spinlock for accessing the pagetable list */
+	spinlock_t ptlock;
+	/* Mutex for accessing the process list */
+	struct mutex process_mutex;
+
+	/* Mutex for protecting the device list */
+	struct mutex devlock;
+
+	struct kgsl_ptpool ptpool;
+
+	struct {
+		unsigned int vmalloc;
+		unsigned int vmalloc_max;
+		unsigned int coherent;
+		unsigned int coherent_max;
+		unsigned int mapped;
+		unsigned int mapped_max;
+		unsigned int histogram[16];
+	} stats;
+};
+
+extern struct kgsl_driver kgsl_driver;
+
+#define KGSL_USER_MEMORY 1
+#define KGSL_MAPPED_MEMORY 2
+
+struct kgsl_pagetable;
+struct kgsl_memdesc_ops;
+
+/* shared memory allocation */
+struct kgsl_memdesc {
+	struct kgsl_pagetable *pagetable;
+	void *hostptr;
+	unsigned int gpuaddr;
+	unsigned int physaddr;
+	unsigned int size;
+	unsigned int priv;
+	struct kgsl_memdesc_ops *ops;
+};
+
+struct kgsl_mem_entry {
+	struct kref refcount;
+	struct kgsl_memdesc memdesc;
+	int memtype;
+	struct file *file_ptr;
+	struct list_head list;
+	uint32_t free_timestamp;
+	/* back pointer to private structure under whose context this
+	* allocation is made */
+	struct kgsl_process_private *priv;
+};
+
+#ifdef CONFIG_MSM_KGSL_MMU_PAGE_FAULT
+#define MMU_CONFIG 2
+#else
+#define MMU_CONFIG 1
+#endif
+
+void kgsl_mem_entry_destroy(struct kref *kref);
+uint8_t *kgsl_gpuaddr_to_vaddr(const struct kgsl_memdesc *memdesc,
+	unsigned int gpuaddr, unsigned int *size);
+struct kgsl_mem_entry *kgsl_sharedmem_find_region(
+	struct kgsl_process_private *private, unsigned int gpuaddr,
+	size_t size);
+
+extern const struct dev_pm_ops kgsl_pm_ops;
+
+struct early_suspend;
+int kgsl_suspend_driver(struct platform_device *pdev, pm_message_t state);
+int kgsl_resume_driver(struct platform_device *pdev);
+void kgsl_early_suspend_driver(struct early_suspend *h);
+void kgsl_late_resume_driver(struct early_suspend *h);
+
+#ifdef CONFIG_MSM_KGSL_DRM
+extern int kgsl_drm_init(struct platform_device *dev);
+extern void kgsl_drm_exit(void);
+extern void kgsl_gpu_mem_flush(int op);
+#else
+static inline int kgsl_drm_init(struct platform_device *dev)
+{
+	return 0;
+}
+
+static inline void kgsl_drm_exit(void)
+{
+}
+#endif
+
+static inline int kgsl_gpuaddr_in_memdesc(const struct kgsl_memdesc *memdesc,
+				unsigned int gpuaddr)
+{
+	if (gpuaddr >= memdesc->gpuaddr && (gpuaddr + sizeof(unsigned int)) <=
+		(memdesc->gpuaddr + memdesc->size)) {
+		return 1;
+	}
+	return 0;
+}
+
+static inline bool timestamp_cmp(unsigned int new, unsigned int old)
+{
+	int ts_diff = new - old;
+	return (ts_diff >= 0) || (ts_diff < -20000);
+}
+
+static inline void
+kgsl_mem_entry_get(struct kgsl_mem_entry *entry)
+{
+	kref_get(&entry->refcount);
+}
+
+static inline void
+kgsl_mem_entry_put(struct kgsl_mem_entry *entry)
+{
+	kref_put(&entry->refcount, kgsl_mem_entry_destroy);
+}
+
+#endif /* __KGSL_H */
diff --git a/drivers/gpu/msm/kgsl_cffdump.c b/drivers/gpu/msm/kgsl_cffdump.c
new file mode 100644
index 0000000..4349316
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_cffdump.c
@@ -0,0 +1,711 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+#define ALIGN_CPU
+
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#include <linux/relay.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/sched.h>
+
+#include "kgsl.h"
+#include "kgsl_cffdump.h"
+#include "kgsl_debugfs.h"
+
+static struct rchan	*chan;
+static struct dentry	*dir;
+static int		suspended;
+static size_t		dropped;
+static size_t		subbuf_size = 256*1024;
+static size_t		n_subbufs = 64;
+
+/* forward declarations */
+static void destroy_channel(void);
+static struct rchan *create_channel(unsigned subbuf_size, unsigned n_subbufs);
+
+static spinlock_t cffdump_lock;
+static ulong serial_nr;
+static ulong total_bytes;
+static ulong total_syncmem;
+static long last_sec;
+
+#define MEMBUF_SIZE	64
+
+#define CFF_OP_WRITE_REG        0x00000002
+struct cff_op_write_reg {
+	unsigned char op;
+	uint addr;
+	uint value;
+} __attribute__((packed));
+
+#define CFF_OP_POLL_REG         0x00000004
+struct cff_op_poll_reg {
+	unsigned char op;
+	uint addr;
+	uint value;
+	uint mask;
+} __attribute__((packed));
+
+#define CFF_OP_WAIT_IRQ         0x00000005
+struct cff_op_wait_irq {
+	unsigned char op;
+} __attribute__((packed));
+
+#define CFF_OP_VERIFY_MEM_FILE  0x00000007
+#define CFF_OP_RMW              0x0000000a
+
+#define CFF_OP_WRITE_MEM        0x0000000b
+struct cff_op_write_mem {
+	unsigned char op;
+	uint addr;
+	uint value;
+} __attribute__((packed));
+
+#define CFF_OP_WRITE_MEMBUF     0x0000000c
+struct cff_op_write_membuf {
+	unsigned char op;
+	uint addr;
+	ushort count;
+	uint buffer[MEMBUF_SIZE];
+} __attribute__((packed));
+
+#define CFF_OP_EOF              0xffffffff
+struct cff_op_eof {
+	unsigned char op;
+} __attribute__((packed));
+
+
+static void b64_encodeblock(unsigned char in[3], unsigned char out[4], int len)
+{
+	static const char tob64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmno"
+		"pqrstuvwxyz0123456789+/";
+
+	out[0] = tob64[in[0] >> 2];
+	out[1] = tob64[((in[0] & 0x03) << 4) | ((in[1] & 0xf0) >> 4)];
+	out[2] = (unsigned char) (len > 1 ? tob64[((in[1] & 0x0f) << 2)
+		| ((in[2] & 0xc0) >> 6)] : '=');
+	out[3] = (unsigned char) (len > 2 ? tob64[in[2] & 0x3f] : '=');
+}
+
+static void b64_encode(const unsigned char *in_buf, int in_size,
+	unsigned char *out_buf, int out_bufsize, int *out_size)
+{
+	unsigned char in[3], out[4];
+	int i, len;
+
+	*out_size = 0;
+	while (in_size > 0) {
+		len = 0;
+		for (i = 0; i < 3; ++i) {
+			if (in_size-- > 0) {
+				in[i] = *in_buf++;
+				++len;
+			} else
+				in[i] = 0;
+		}
+		if (len) {
+			b64_encodeblock(in, out, len);
+			if (out_bufsize < 4) {
+				pr_warn("kgsl: cffdump: %s: out of buffer\n",
+					__func__);
+				return;
+			}
+			for (i = 0; i < 4; ++i)
+				*out_buf++ = out[i];
+			*out_size += 4;
+			out_bufsize -= 4;
+		}
+	}
+}
+
+#define KLOG_TMPBUF_SIZE (1024)
+static void klog_printk(const char *fmt, ...)
+{
+	/* per-cpu klog formatting temporary buffer */
+	static char klog_buf[NR_CPUS][KLOG_TMPBUF_SIZE];
+
+	va_list args;
+	int len;
+	char *cbuf;
+	unsigned long flags;
+
+	local_irq_save(flags);
+	cbuf = klog_buf[smp_processor_id()];
+	va_start(args, fmt);
+	len = vsnprintf(cbuf, KLOG_TMPBUF_SIZE, fmt, args);
+	total_bytes += len;
+	va_end(args);
+	relay_write(chan, cbuf, len);
+	local_irq_restore(flags);
+}
+
+static struct cff_op_write_membuf cff_op_write_membuf;
+static void cffdump_membuf(int id, unsigned char *out_buf, int out_bufsize)
+{
+	void *data;
+	int len, out_size;
+	struct cff_op_write_mem cff_op_write_mem;
+
+	uint addr = cff_op_write_membuf.addr
+		- sizeof(uint)*cff_op_write_membuf.count;
+
+	if (!cff_op_write_membuf.count) {
+		pr_warn("kgsl: cffdump: membuf: count == 0, skipping");
+		return;
+	}
+
+	if (cff_op_write_membuf.count != 1) {
+		cff_op_write_membuf.op = CFF_OP_WRITE_MEMBUF;
+		cff_op_write_membuf.addr = addr;
+		len = sizeof(cff_op_write_membuf) -
+			sizeof(uint)*(MEMBUF_SIZE - cff_op_write_membuf.count);
+		data = &cff_op_write_membuf;
+	} else {
+		cff_op_write_mem.op = CFF_OP_WRITE_MEM;
+		cff_op_write_mem.addr = addr;
+		cff_op_write_mem.value = cff_op_write_membuf.buffer[0];
+		data = &cff_op_write_mem;
+		len = sizeof(cff_op_write_mem);
+	}
+	b64_encode(data, len, out_buf, out_bufsize, &out_size);
+	out_buf[out_size] = 0;
+	klog_printk("%ld:%d;%s\n", ++serial_nr, id, out_buf);
+	cff_op_write_membuf.count = 0;
+	cff_op_write_membuf.addr = 0;
+}
+
+static void cffdump_printline(int id, uint opcode, uint op1, uint op2,
+	uint op3)
+{
+	struct cff_op_write_reg cff_op_write_reg;
+	struct cff_op_poll_reg cff_op_poll_reg;
+	struct cff_op_wait_irq cff_op_wait_irq;
+	struct cff_op_eof cff_op_eof;
+	unsigned char out_buf[sizeof(cff_op_write_membuf)/3*4 + 16];
+	void *data;
+	int len = 0, out_size;
+	long cur_secs;
+
+	spin_lock(&cffdump_lock);
+	if (opcode == CFF_OP_WRITE_MEM) {
+		if (op1 < 0x40000000 || op1 >= 0x60000000)
+			KGSL_CORE_ERR("addr out-of-range: op1=%08x", op1);
+		if ((cff_op_write_membuf.addr != op1 &&
+			cff_op_write_membuf.count)
+			|| (cff_op_write_membuf.count == MEMBUF_SIZE))
+			cffdump_membuf(id, out_buf, sizeof(out_buf));
+
+		cff_op_write_membuf.buffer[cff_op_write_membuf.count++] = op2;
+		cff_op_write_membuf.addr = op1 + sizeof(uint);
+		spin_unlock(&cffdump_lock);
+		return;
+	} else if (cff_op_write_membuf.count)
+		cffdump_membuf(id, out_buf, sizeof(out_buf));
+	spin_unlock(&cffdump_lock);
+
+	switch (opcode) {
+	case CFF_OP_WRITE_REG:
+		cff_op_write_reg.op = opcode;
+		cff_op_write_reg.addr = op1;
+		cff_op_write_reg.value = op2;
+		data = &cff_op_write_reg;
+		len = sizeof(cff_op_write_reg);
+		break;
+
+	case CFF_OP_POLL_REG:
+		cff_op_poll_reg.op = opcode;
+		cff_op_poll_reg.addr = op1;
+		cff_op_poll_reg.value = op2;
+		cff_op_poll_reg.mask = op3;
+		data = &cff_op_poll_reg;
+		len = sizeof(cff_op_poll_reg);
+		break;
+
+	case CFF_OP_WAIT_IRQ:
+		cff_op_wait_irq.op = opcode;
+		data = &cff_op_wait_irq;
+		len = sizeof(cff_op_wait_irq);
+		break;
+
+	case CFF_OP_EOF:
+		cff_op_eof.op = opcode;
+		data = &cff_op_eof;
+		len = sizeof(cff_op_eof);
+		break;
+	}
+
+	if (len) {
+		b64_encode(data, len, out_buf, sizeof(out_buf), &out_size);
+		out_buf[out_size] = 0;
+		klog_printk("%ld:%d;%s\n", ++serial_nr, id, out_buf);
+	} else
+		pr_warn("kgsl: cffdump: unhandled opcode: %d\n", opcode);
+
+	cur_secs = get_seconds();
+	if ((cur_secs - last_sec) > 10 || (last_sec - cur_secs) > 10) {
+		pr_info("kgsl: cffdump: total [bytes:%lu kB, syncmem:%lu kB], "
+			"seq#: %lu\n", total_bytes/1024, total_syncmem/1024,
+			serial_nr);
+		last_sec = cur_secs;
+	}
+}
+
+void kgsl_cffdump_init()
+{
+	struct dentry *debugfs_dir = kgsl_get_debugfs_dir();
+
+#ifdef ALIGN_CPU
+	cpumask_t mask;
+
+	cpumask_clear(&mask);
+	cpumask_set_cpu(1, &mask);
+	sched_setaffinity(0, &mask);
+#endif
+	if (!debugfs_dir || IS_ERR(debugfs_dir)) {
+		KGSL_CORE_ERR("Debugfs directory is bad\n");
+		return;
+	}
+
+	kgsl_cff_dump_enable = 1;
+
+	spin_lock_init(&cffdump_lock);
+
+	dir = debugfs_create_dir("cff", debugfs_dir);
+	if (!dir) {
+		KGSL_CORE_ERR("debugfs_create_dir failed\n");
+		return;
+	}
+
+	chan = create_channel(subbuf_size, n_subbufs);
+}
+
+void kgsl_cffdump_destroy()
+{
+	if (chan)
+		relay_flush(chan);
+	destroy_channel();
+	if (dir)
+		debugfs_remove(dir);
+}
+
+void kgsl_cffdump_open(enum kgsl_deviceid device_id)
+{
+}
+
+void kgsl_cffdump_close(enum kgsl_deviceid device_id)
+{
+	cffdump_printline(device_id, CFF_OP_EOF, 0, 0, 0);
+}
+
+void kgsl_cffdump_syncmem(struct kgsl_device_private *dev_priv,
+	const struct kgsl_memdesc *memdesc, uint gpuaddr, uint sizebytes,
+	bool clean_cache)
+{
+	const void *src;
+	uint host_size;
+	uint physaddr;
+
+	if (!kgsl_cff_dump_enable)
+		return;
+
+	total_syncmem += sizebytes;
+
+	if (memdesc == NULL) {
+		struct kgsl_mem_entry *entry;
+		spin_lock(&dev_priv->process_priv->mem_lock);
+		entry = kgsl_sharedmem_find_region(dev_priv->process_priv,
+			gpuaddr, sizebytes);
+		spin_unlock(&dev_priv->process_priv->mem_lock);
+		if (entry == NULL) {
+			KGSL_CORE_ERR("did not find mapping "
+				"for gpuaddr: 0x%08x\n", gpuaddr);
+			return;
+		}
+		memdesc = &entry->memdesc;
+	}
+	BUG_ON(memdesc->gpuaddr == 0);
+	BUG_ON(gpuaddr == 0);
+	physaddr = kgsl_get_realaddr(memdesc) + (gpuaddr - memdesc->gpuaddr);
+
+	src = kgsl_gpuaddr_to_vaddr(memdesc, gpuaddr, &host_size);
+	if (src == NULL || host_size < sizebytes) {
+		KGSL_CORE_ERR(("did not find mapping for "
+			"gpuaddr: 0x%08x, m->host: 0x%p, phys: 0x%08x\n",
+			gpuaddr, memdesc->hostptr, memdesc->physaddr);
+		return;
+	}
+
+	if (clean_cache) {
+		/* Ensure that this memory region is not read from the
+		 * cache but fetched fresh */
+
+		mb();
+
+		kgsl_cache_range_op(memdesc->hostptr, memdesc->size,
+				    memdesc->type, KGSL_CACHE_OP_INV);
+	}
+
+	BUG_ON(physaddr > 0x66000000 && physaddr < 0x66ffffff);
+	while (sizebytes > 3) {
+		cffdump_printline(-1, CFF_OP_WRITE_MEM, physaddr, *(uint *)src,
+			0);
+		physaddr += 4;
+		src += 4;
+		sizebytes -= 4;
+	}
+	if (sizebytes > 0)
+		cffdump_printline(-1, CFF_OP_WRITE_MEM, physaddr, *(uint *)src,
+			0);
+}
+
+void kgsl_cffdump_setmem(uint addr, uint value, uint sizebytes)
+{
+	if (!kgsl_cff_dump_enable)
+		return;
+
+	BUG_ON(addr > 0x66000000 && addr < 0x66ffffff);
+	while (sizebytes > 3) {
+		/* Use 32bit memory writes as long as there's at least
+		 * 4 bytes left */
+		cffdump_printline(-1, CFF_OP_WRITE_MEM, addr, value, 0);
+		addr += 4;
+		sizebytes -= 4;
+	}
+	if (sizebytes > 0)
+		cffdump_printline(-1, CFF_OP_WRITE_MEM, addr, value, 0);
+}
+
+void kgsl_cffdump_regwrite(enum kgsl_deviceid device_id, uint addr,
+	uint value)
+{
+	if (!kgsl_cff_dump_enable)
+		return;
+
+	cffdump_printline(device_id, CFF_OP_WRITE_REG, addr, value, 0);
+}
+
+void kgsl_cffdump_regpoll(enum kgsl_deviceid device_id, uint addr,
+	uint value, uint mask)
+{
+	if (!kgsl_cff_dump_enable)
+		return;
+
+	cffdump_printline(device_id, CFF_OP_POLL_REG, addr, value, mask);
+}
+
+void kgsl_cffdump_slavewrite(uint addr, uint value)
+{
+	if (!kgsl_cff_dump_enable)
+		return;
+
+	cffdump_printline(-1, CFF_OP_WRITE_REG, addr, value, 0);
+}
+
+int kgsl_cffdump_waitirq(void)
+{
+	if (!kgsl_cff_dump_enable)
+		return 0;
+
+	cffdump_printline(-1, CFF_OP_WAIT_IRQ, 0, 0, 0);
+
+	return 1;
+}
+EXPORT_SYMBOL(kgsl_cffdump_waitirq);
+
+#define ADDRESS_STACK_SIZE 256
+#define GET_PM4_TYPE3_OPCODE(x) ((*(x) >> 8) & 0xFF)
+static unsigned int kgsl_cffdump_addr_count;
+
+static bool kgsl_cffdump_handle_type3(struct kgsl_device_private *dev_priv,
+	uint *hostaddr, bool check_only)
+{
+	static uint addr_stack[ADDRESS_STACK_SIZE];
+	static uint size_stack[ADDRESS_STACK_SIZE];
+
+	switch (GET_PM4_TYPE3_OPCODE(hostaddr)) {
+	case PM4_INDIRECT_BUFFER_PFD:
+	case PM4_INDIRECT_BUFFER:
+	{
+		/* traverse indirect buffers */
+		int i;
+		uint ibaddr = hostaddr[1];
+		uint ibsize = hostaddr[2];
+
+		/* is this address already in encountered? */
+		for (i = 0;
+			i < kgsl_cffdump_addr_count && addr_stack[i] != ibaddr;
+			++i)
+			;
+
+		if (kgsl_cffdump_addr_count == i) {
+			addr_stack[kgsl_cffdump_addr_count] = ibaddr;
+			size_stack[kgsl_cffdump_addr_count++] = ibsize;
+
+			if (kgsl_cffdump_addr_count >= ADDRESS_STACK_SIZE) {
+				KGSL_CORE_ERR("stack overflow\n");
+				return false;
+			}
+
+			return kgsl_cffdump_parse_ibs(dev_priv, NULL,
+				ibaddr, ibsize, check_only);
+		} else if (size_stack[i] != ibsize) {
+			KGSL_CORE_ERR("gpuaddr: 0x%08x, "
+				"wc: %u, with size wc: %u already on the "
+				"stack\n", ibaddr, ibsize, size_stack[i]);
+			return false;
+		}
+	}
+	break;
+	}
+
+	return true;
+}
+
+/*
+ * Traverse IBs and dump them to test vector. Detect swap by inspecting
+ * register writes, keeping note of the current state, and dump
+ * framebuffer config to test vector
+ */
+bool kgsl_cffdump_parse_ibs(struct kgsl_device_private *dev_priv,
+	const struct kgsl_memdesc *memdesc, uint gpuaddr, int sizedwords,
+	bool check_only)
+{
+	static uint level; /* recursion level */
+	bool ret = true;
+	uint host_size;
+	uint *hostaddr, *hoststart;
+	int dwords_left = sizedwords; /* dwords left in the current command
+					 buffer */
+
+	if (level == 0)
+		kgsl_cffdump_addr_count = 0;
+
+	if (memdesc == NULL) {
+		struct kgsl_mem_entry *entry;
+		spin_lock(&dev_priv->process_priv->mem_lock);
+		entry = kgsl_sharedmem_find_region(dev_priv->process_priv,
+			gpuaddr, sizedwords * sizeof(uint));
+		spin_unlock(&dev_priv->process_priv->mem_lock);
+		if (entry == NULL) {
+			KGSL_CORE_ERR("did not find mapping "
+				"for gpuaddr: 0x%08x\n", gpuaddr);
+			return true;
+		}
+		memdesc = &entry->memdesc;
+	}
+
+	hostaddr = (uint *)kgsl_gpuaddr_to_vaddr(memdesc, gpuaddr, &host_size);
+	if (hostaddr == NULL) {
+		KGSL_CORE_ERR("did not find mapping for "
+			"gpuaddr: 0x%08x\n", gpuaddr);
+		return true;
+	}
+
+	hoststart = hostaddr;
+
+	level++;
+
+	if (!memdesc->physaddr) {
+		KGSL_CORE_ERR("no physaddr");
+		return true;
+	} else {
+		mb();
+		kgsl_cache_range_op(memdesc->hostptr, memdesc->size,
+				    memdesc->type, KGSL_CACHE_OP_INV);
+	}
+
+#ifdef DEBUG
+	pr_info("kgsl: cffdump: ib: gpuaddr:0x%08x, wc:%d, hptr:%p\n",
+		gpuaddr, sizedwords, hostaddr);
+#endif
+
+	while (dwords_left > 0) {
+		int count = 0; /* dword count including packet header */
+		bool cur_ret = true;
+
+		switch (*hostaddr >> 30) {
+		case 0x0: /* type-0 */
+			count = (*hostaddr >> 16)+2;
+			break;
+		case 0x1: /* type-1 */
+			count = 2;
+			break;
+		case 0x3: /* type-3 */
+			count = ((*hostaddr >> 16) & 0x3fff) + 2;
+			cur_ret = kgsl_cffdump_handle_type3(dev_priv,
+				hostaddr, check_only);
+			break;
+		default:
+			pr_warn("kgsl: cffdump: parse-ib: unexpected type: "
+				"type:%d, word:0x%08x @ 0x%p, gpu:0x%08x\n",
+				*hostaddr >> 30, *hostaddr, hostaddr,
+				gpuaddr+4*(sizedwords-dwords_left));
+			cur_ret = false;
+			count = dwords_left;
+			break;
+		}
+
+#ifdef DEBUG
+		if (!cur_ret) {
+			pr_info("kgsl: cffdump: bad sub-type: #:%d/%d, v:0x%08x"
+				" @ 0x%p[gb:0x%08x], level:%d\n",
+				sizedwords-dwords_left, sizedwords, *hostaddr,
+				hostaddr, gpuaddr+4*(sizedwords-dwords_left),
+				level);
+
+			print_hex_dump(KERN_ERR, level == 1 ? "IB1:" : "IB2:",
+				DUMP_PREFIX_OFFSET, 32, 4, hoststart,
+				sizedwords*4, 0);
+		}
+#endif
+		ret = ret && cur_ret;
+
+		/* jump to next packet */
+		dwords_left -= count;
+		hostaddr += count;
+		cur_ret = dwords_left >= 0;
+
+#ifdef DEBUG
+		if (!cur_ret) {
+			pr_info("kgsl: cffdump: bad count: c:%d, #:%d/%d, "
+				"v:0x%08x @ 0x%p[gb:0x%08x], level:%d\n",
+				count, sizedwords-(dwords_left+count),
+				sizedwords, *(hostaddr-count), hostaddr-count,
+				gpuaddr+4*(sizedwords-(dwords_left+count)),
+				level);
+
+			print_hex_dump(KERN_ERR, level == 1 ? "IB1:" : "IB2:",
+				DUMP_PREFIX_OFFSET, 32, 4, hoststart,
+				sizedwords*4, 0);
+		}
+#endif
+
+		ret = ret && cur_ret;
+	}
+
+	if (!ret)
+		pr_info("kgsl: cffdump: parsing failed: gpuaddr:0x%08x, "
+			"host:0x%p, wc:%d\n", gpuaddr, hoststart, sizedwords);
+
+	if (!check_only) {
+#ifdef DEBUG
+		uint offset = gpuaddr - memdesc->gpuaddr;
+		pr_info("kgsl: cffdump: ib-dump: hostptr:%p, gpuaddr:%08x, "
+			"physaddr:%08x, offset:%d, size:%d", hoststart,
+			gpuaddr, memdesc->physaddr + offset, offset,
+			sizedwords*4);
+#endif
+		kgsl_cffdump_syncmem(dev_priv, memdesc, gpuaddr, sizedwords*4,
+			false);
+	}
+
+	level--;
+
+	return ret;
+}
+
+static int subbuf_start_handler(struct rchan_buf *buf,
+	void *subbuf, void *prev_subbuf, uint prev_padding)
+{
+	pr_debug("kgsl: cffdump: subbuf_start_handler(subbuf=%p, prev_subbuf"
+		"=%p, prev_padding=%08x)\n", subbuf, prev_subbuf, prev_padding);
+
+	if (relay_buf_full(buf)) {
+		if (!suspended) {
+			suspended = 1;
+			pr_warn("kgsl: cffdump: relay: cpu %d buffer full!!!\n",
+				smp_processor_id());
+		}
+		dropped++;
+		return 0;
+	} else if (suspended) {
+		suspended = 0;
+		pr_warn("kgsl: cffdump: relay: cpu %d buffer no longer full.\n",
+			smp_processor_id());
+	}
+
+	subbuf_start_reserve(buf, 0);
+	return 1;
+}
+
+static struct dentry *create_buf_file_handler(const char *filename,
+	struct dentry *parent, int mode, struct rchan_buf *buf,
+	int *is_global)
+{
+	return debugfs_create_file(filename, mode, parent, buf,
+				       &relay_file_operations);
+}
+
+/*
+ * file_remove() default callback.  Removes relay file in debugfs.
+ */
+static int remove_buf_file_handler(struct dentry *dentry)
+{
+	pr_info("kgsl: cffdump: %s()\n", __func__);
+	debugfs_remove(dentry);
+	return 0;
+}
+
+/*
+ * relay callbacks
+ */
+static struct rchan_callbacks relay_callbacks = {
+	.subbuf_start = subbuf_start_handler,
+	.create_buf_file = create_buf_file_handler,
+	.remove_buf_file = remove_buf_file_handler,
+};
+
+/**
+ *	create_channel - creates channel /debug/klog/cpuXXX
+ *
+ *	Creates channel along with associated produced/consumed control files
+ *
+ *	Returns channel on success, NULL otherwise
+ */
+static struct rchan *create_channel(unsigned subbuf_size, unsigned n_subbufs)
+{
+	struct rchan *chan;
+
+	pr_info("kgsl: cffdump: relay: create_channel: subbuf_size %u, "
+		"n_subbufs %u, dir 0x%p\n", subbuf_size, n_subbufs, dir);
+
+	chan = relay_open("cpu", dir, subbuf_size,
+			  n_subbufs, &relay_callbacks, NULL);
+	if (!chan) {
+		KGSL_CORE_ERR("relay_open failed\n");
+		return NULL;
+	}
+
+	suspended = 0;
+	dropped = 0;
+
+	return chan;
+}
+
+/**
+ *	destroy_channel - destroys channel /debug/kgsl/cff/cpuXXX
+ *
+ *	Destroys channel along with associated produced/consumed control files
+ */
+static void destroy_channel(void)
+{
+	pr_info("kgsl: cffdump: relay: destroy_channel\n");
+	if (chan) {
+		relay_close(chan);
+		chan = NULL;
+	}
+}
+
diff --git a/drivers/gpu/msm/kgsl_cffdump.h b/drivers/gpu/msm/kgsl_cffdump.h
new file mode 100644
index 0000000..aca7a7c
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_cffdump.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __KGSL_CFFDUMP_H
+#define __KGSL_CFFDUMP_H
+
+#ifdef CONFIG_MSM_KGSL_CFF_DUMP
+
+#include <linux/types.h>
+
+#include "kgsl_device.h"
+
+void kgsl_cffdump_init(void);
+void kgsl_cffdump_destroy(void);
+void kgsl_cffdump_open(enum kgsl_deviceid device_id);
+void kgsl_cffdump_close(enum kgsl_deviceid device_id);
+void kgsl_cffdump_syncmem(struct kgsl_device_private *dev_priv,
+	const struct kgsl_memdesc *memdesc, uint physaddr, uint sizebytes,
+	bool clean_cache);
+void kgsl_cffdump_setmem(uint addr, uint value, uint sizebytes);
+void kgsl_cffdump_regwrite(enum kgsl_deviceid device_id, uint addr,
+	uint value);
+void kgsl_cffdump_regpoll(enum kgsl_deviceid device_id, uint addr,
+	uint value, uint mask);
+bool kgsl_cffdump_parse_ibs(struct kgsl_device_private *dev_priv,
+	const struct kgsl_memdesc *memdesc, uint gpuaddr, int sizedwords,
+	bool check_only);
+static inline bool kgsl_cffdump_flags_no_memzero(void) { return true; }
+
+#else
+
+#define kgsl_cffdump_init()					(void)0
+#define kgsl_cffdump_destroy()					(void)0
+#define kgsl_cffdump_open(device_id)				(void)0
+#define kgsl_cffdump_close(device_id)				(void)0
+#define kgsl_cffdump_syncmem(dev_priv, memdesc, addr, sizebytes, clean_cache) \
+	(void) 0
+#define kgsl_cffdump_setmem(addr, value, sizebytes)		(void)0
+#define kgsl_cffdump_regwrite(device_id, addr, value)		(void)0
+#define kgsl_cffdump_regpoll(device_id, addr, value, mask)	(void)0
+#define kgsl_cffdump_parse_ibs(dev_priv, memdesc, gpuaddr, \
+	sizedwords, check_only)					true
+#define kgsl_cffdump_flags_no_memzero()				true
+
+#endif /* CONFIG_MSM_KGSL_CFF_DUMP */
+
+#endif /* __KGSL_CFFDUMP_H */
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
new file mode 100644
index 0000000..9da3096
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -0,0 +1,82 @@
+/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+
+#include "kgsl.h"
+#include "kgsl_device.h"
+
+/*default log levels is error for everything*/
+#define KGSL_LOG_LEVEL_DEFAULT 3
+#define KGSL_LOG_LEVEL_MAX     7
+
+struct dentry *kgsl_debugfs_dir;
+
+static inline int kgsl_log_set(unsigned int *log_val, void *data, u64 val)
+{
+	*log_val = min((unsigned int)val, (unsigned int)KGSL_LOG_LEVEL_MAX);
+	return 0;
+}
+
+#define KGSL_DEBUGFS_LOG(__log)                         \
+static int __log ## _set(void *data, u64 val)           \
+{                                                       \
+	struct kgsl_device *device = data;              \
+	return kgsl_log_set(&device->__log, data, val); \
+}                                                       \
+static int __log ## _get(void *data, u64 *val)	        \
+{                                                       \
+	struct kgsl_device *device = data;              \
+	*val = device->__log;                           \
+	return 0;                                       \
+}                                                       \
+DEFINE_SIMPLE_ATTRIBUTE(__log ## _fops,                 \
+__log ## _get, __log ## _set, "%llu\n");                \
+
+KGSL_DEBUGFS_LOG(drv_log);
+KGSL_DEBUGFS_LOG(cmd_log);
+KGSL_DEBUGFS_LOG(ctxt_log);
+KGSL_DEBUGFS_LOG(mem_log);
+KGSL_DEBUGFS_LOG(pwr_log);
+
+void kgsl_device_debugfs_init(struct kgsl_device *device)
+{
+	if (kgsl_debugfs_dir && !IS_ERR(kgsl_debugfs_dir))
+		device->d_debugfs = debugfs_create_dir(device->name,
+						       kgsl_debugfs_dir);
+
+	if (!device->d_debugfs || IS_ERR(device->d_debugfs))
+		return;
+
+	device->cmd_log = KGSL_LOG_LEVEL_DEFAULT;
+	device->ctxt_log = KGSL_LOG_LEVEL_DEFAULT;
+	device->drv_log = KGSL_LOG_LEVEL_DEFAULT;
+	device->mem_log = KGSL_LOG_LEVEL_DEFAULT;
+	device->pwr_log = KGSL_LOG_LEVEL_DEFAULT;
+
+	debugfs_create_file("log_level_cmd", 0644, device->d_debugfs, device,
+			    &cmd_log_fops);
+	debugfs_create_file("log_level_ctxt", 0644, device->d_debugfs, device,
+			    &ctxt_log_fops);
+	debugfs_create_file("log_level_drv", 0644, device->d_debugfs, device,
+			    &drv_log_fops);
+	debugfs_create_file("log_level_mem", 0644, device->d_debugfs, device,
+				&mem_log_fops);
+	debugfs_create_file("log_level_pwr", 0644, device->d_debugfs, device,
+				&pwr_log_fops);
+}
+
+void kgsl_core_debugfs_init(void)
+{
+	kgsl_debugfs_dir = debugfs_create_dir("kgsl", 0);
+}
diff --git a/drivers/gpu/msm/kgsl_debugfs.h b/drivers/gpu/msm/kgsl_debugfs.h
new file mode 100644
index 0000000..1e36fab
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_debugfs.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _KGSL_DEBUGFS_H
+#define _KGSL_DEBUGFS_H
+
+struct kgsl_device;
+
+#ifdef CONFIG_DEBUG_FS
+void kgsl_core_debugfs_init(void);
+void kgsl_device_debugfs_init(struct kgsl_device *device);
+
+extern struct dentry *kgsl_debugfs_dir;
+static inline struct dentry *kgsl_get_debugfs_dir(void)
+{
+	return kgsl_debugfs_dir;
+}
+
+#else
+static inline void kgsl_core_debugfs_init(void) { }
+static inline void kgsl_device_debugfs_init(struct kgsl_device *device) { }
+
+static inline struct dentry *kgsl_get_debugfs_dir(void) { return NULL; }
+
+#endif
+
+#endif
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
new file mode 100644
index 0000000..692a9ec
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -0,0 +1,289 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_DEVICE_H
+#define __KGSL_DEVICE_H
+
+#include <linux/idr.h>
+#include <linux/wakelock.h>
+#include <linux/pm_qos_params.h>
+#include <linux/earlysuspend.h>
+
+#include "kgsl.h"
+#include "kgsl_mmu.h"
+#include "kgsl_pwrctrl.h"
+#include "kgsl_log.h"
+#include "kgsl_pwrscale.h"
+
+#define KGSL_TIMEOUT_NONE       0
+#define KGSL_TIMEOUT_DEFAULT    0xFFFFFFFF
+
+#define FIRST_TIMEOUT (HZ / 2)
+
+
+/* KGSL device state is initialized to INIT when platform_probe		*
+ * sucessfully initialized the device.  Once a device has been opened	*
+ * (started) it becomes active.  NAP implies that only low latency	*
+ * resources (for now clocks on some platforms) are off.  SLEEP implies	*
+ * that the KGSL module believes a device is idle (has been inactive	*
+ * past its timer) and all system resources are released.  SUSPEND is	*
+ * requested by the kernel and will be enforced upon all open devices.	*/
+
+#define KGSL_STATE_NONE		0x00000000
+#define KGSL_STATE_INIT		0x00000001
+#define KGSL_STATE_ACTIVE	0x00000002
+#define KGSL_STATE_NAP		0x00000004
+#define KGSL_STATE_SLEEP	0x00000008
+#define KGSL_STATE_SUSPEND	0x00000010
+#define KGSL_STATE_HUNG		0x00000020
+#define KGSL_STATE_DUMP_AND_RECOVER	0x00000040
+
+#define KGSL_GRAPHICS_MEMORY_LOW_WATERMARK  0x1000000
+
+#define KGSL_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
+
+struct kgsl_device;
+struct platform_device;
+struct kgsl_device_private;
+struct kgsl_context;
+struct kgsl_power_stats;
+
+struct kgsl_functable {
+	/* Mandatory functions - these functions must be implemented
+	   by the client device.  The driver will not check for a NULL
+	   pointer before calling the hook.
+	 */
+	void (*regread) (struct kgsl_device *device,
+		unsigned int offsetwords, unsigned int *value);
+	void (*regwrite) (struct kgsl_device *device,
+		unsigned int offsetwords, unsigned int value);
+	int (*idle) (struct kgsl_device *device, unsigned int timeout);
+	unsigned int (*isidle) (struct kgsl_device *device);
+	int (*suspend_context) (struct kgsl_device *device);
+	int (*start) (struct kgsl_device *device, unsigned int init_ram);
+	int (*stop) (struct kgsl_device *device);
+	int (*getproperty) (struct kgsl_device *device,
+		enum kgsl_property_type type, void *value,
+		unsigned int sizebytes);
+	int (*waittimestamp) (struct kgsl_device *device,
+		unsigned int timestamp, unsigned int msecs);
+	unsigned int (*readtimestamp) (struct kgsl_device *device,
+		enum kgsl_timestamp_type type);
+	int (*issueibcmds) (struct kgsl_device_private *dev_priv,
+		struct kgsl_context *context, struct kgsl_ibdesc *ibdesc,
+		unsigned int sizedwords, uint32_t *timestamp,
+		unsigned int flags);
+	int (*setup_pt)(struct kgsl_device *device,
+		struct kgsl_pagetable *pagetable);
+	int (*cleanup_pt)(struct kgsl_device *device,
+		struct kgsl_pagetable *pagetable);
+	void (*power_stats)(struct kgsl_device *device,
+		struct kgsl_power_stats *stats);
+	void (*irqctrl)(struct kgsl_device *device, int state);
+	/* Optional functions - these functions are not mandatory.  The
+	   driver will check that the function pointer is not NULL before
+	   calling the hook */
+	void (*setstate) (struct kgsl_device *device, uint32_t flags);
+	int (*drawctxt_create) (struct kgsl_device *device,
+		struct kgsl_pagetable *pagetable, struct kgsl_context *context,
+		uint32_t flags);
+	void (*drawctxt_destroy) (struct kgsl_device *device,
+		struct kgsl_context *context);
+	long (*ioctl) (struct kgsl_device_private *dev_priv,
+		unsigned int cmd, void *data);
+};
+
+struct kgsl_memregion {
+	unsigned char *mmio_virt_base;
+	unsigned int mmio_phys_base;
+	uint32_t gpu_base;
+	unsigned int sizebytes;
+};
+
+struct kgsl_device {
+	struct device *dev;
+	const char *name;
+	unsigned int ver_major;
+	unsigned int ver_minor;
+	uint32_t flags;
+	enum kgsl_deviceid id;
+	struct kgsl_memregion regspace;
+	struct kgsl_memdesc memstore;
+	const char *iomemname;
+
+	struct kgsl_mmu mmu;
+	struct completion hwaccess_gate;
+	const struct kgsl_functable *ftbl;
+	struct work_struct idle_check_ws;
+	struct timer_list idle_timer;
+	struct kgsl_pwrctrl pwrctrl;
+	int open_count;
+
+	struct atomic_notifier_head ts_notifier_list;
+	struct mutex mutex;
+	uint32_t state;
+	uint32_t requested_state;
+
+	struct list_head memqueue;
+	unsigned int active_cnt;
+	struct completion suspend_gate;
+
+	wait_queue_head_t wait_queue;
+	struct workqueue_struct *work_queue;
+	struct device *parentdev;
+	struct completion recovery_gate;
+	struct dentry *d_debugfs;
+	struct idr context_idr;
+	struct early_suspend display_off;
+
+	/* Logging levels */
+	int cmd_log;
+	int ctxt_log;
+	int drv_log;
+	int mem_log;
+	int pwr_log;
+	struct wake_lock idle_wakelock;
+	struct kgsl_pwrscale pwrscale;
+	struct kobject pwrscale_kobj;
+	struct pm_qos_request_list pm_qos_req_dma;
+};
+
+struct kgsl_context {
+	uint32_t id;
+
+	/* Pointer to the owning device instance */
+	struct kgsl_device_private *dev_priv;
+
+	/* Pointer to the device specific context information */
+	void *devctxt;
+};
+
+struct kgsl_process_private {
+	unsigned int refcnt;
+	pid_t pid;
+	spinlock_t mem_lock;
+	struct list_head mem_list;
+	struct kgsl_pagetable *pagetable;
+	struct list_head list;
+	struct kobject *kobj;
+
+	struct {
+		unsigned int user;
+		unsigned int user_max;
+		unsigned int mapped;
+		unsigned int mapped_max;
+		unsigned int flushes;
+	} stats;
+};
+
+struct kgsl_device_private {
+	struct kgsl_device *device;
+	struct kgsl_process_private *process_priv;
+};
+
+struct kgsl_power_stats {
+	s64 total_time;
+	s64 busy_time;
+};
+
+struct kgsl_device *kgsl_get_device(int dev_idx);
+
+static inline void kgsl_regread(struct kgsl_device *device,
+				unsigned int offsetwords,
+				unsigned int *value)
+{
+	device->ftbl->regread(device, offsetwords, value);
+}
+
+static inline void kgsl_regwrite(struct kgsl_device *device,
+				 unsigned int offsetwords,
+				 unsigned int value)
+{
+	device->ftbl->regwrite(device, offsetwords, value);
+}
+
+static inline int kgsl_idle(struct kgsl_device *device, unsigned int timeout)
+{
+	return device->ftbl->idle(device, timeout);
+}
+
+static inline int kgsl_create_device_sysfs_files(struct device *root,
+	const struct device_attribute **list)
+{
+	int ret = 0, i;
+	for (i = 0; list[i] != NULL; i++)
+		ret |= device_create_file(root, list[i]);
+	return ret;
+}
+
+static inline void kgsl_remove_device_sysfs_files(struct device *root,
+	const struct device_attribute **list)
+{
+	int i;
+	for (i = 0; list[i] != NULL; i++)
+		device_remove_file(root, list[i]);
+}
+
+static inline struct kgsl_mmu *
+kgsl_get_mmu(struct kgsl_device *device)
+{
+	return (struct kgsl_mmu *) (device ? &device->mmu : NULL);
+}
+
+static inline struct kgsl_device *kgsl_device_from_dev(struct device *dev)
+{
+	int i;
+
+	for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+		if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->dev == dev)
+			return kgsl_driver.devp[i];
+	}
+
+	return NULL;
+}
+
+static inline int kgsl_create_device_workqueue(struct kgsl_device *device)
+{
+	device->work_queue = create_workqueue(device->name);
+	if (!device->work_queue) {
+		KGSL_DRV_ERR(device, "create_workqueue(%s) failed\n",
+			device->name);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static inline struct kgsl_context *
+kgsl_find_context(struct kgsl_device_private *dev_priv, uint32_t id)
+{
+	struct kgsl_context *ctxt =
+		idr_find(&dev_priv->device->context_idr, id);
+
+	/* Make sure that the context belongs to the current instance so
+	   that other processes can't guess context IDs and mess things up */
+
+	return  (ctxt && ctxt->dev_priv == dev_priv) ? ctxt : NULL;
+}
+
+int kgsl_check_timestamp(struct kgsl_device *device, unsigned int timestamp);
+
+int kgsl_register_ts_notifier(struct kgsl_device *device,
+			      struct notifier_block *nb);
+
+int kgsl_unregister_ts_notifier(struct kgsl_device *device,
+				struct notifier_block *nb);
+
+int kgsl_device_platform_probe(struct kgsl_device *device,
+		irqreturn_t (*dev_isr) (int, void*));
+void kgsl_device_platform_remove(struct kgsl_device *device);
+
+#endif  /* __KGSL_DEVICE_H */
diff --git a/drivers/gpu/msm/kgsl_drm.c b/drivers/gpu/msm/kgsl_drm.c
new file mode 100644
index 0000000..202783b
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_drm.c
@@ -0,0 +1,1690 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Implements an interface between KGSL and the DRM subsystem.  For now this
+ * is pretty simple, but it will take on more of the workload as time goes
+ * on
+ */
+#include "drmP.h"
+#include "drm.h"
+#include <linux/android_pmem.h>
+#include <linux/notifier.h>
+
+#include "kgsl.h"
+#include "kgsl_device.h"
+#include "kgsl_drm.h"
+#include "kgsl_mmu.h"
+#include "kgsl_sharedmem.h"
+
+#define DRIVER_AUTHOR           "Qualcomm"
+#define DRIVER_NAME             "kgsl"
+#define DRIVER_DESC             "KGSL DRM"
+#define DRIVER_DATE             "20100127"
+
+#define DRIVER_MAJOR            2
+#define DRIVER_MINOR            1
+#define DRIVER_PATCHLEVEL       1
+
+#define DRM_KGSL_GEM_FLAG_MAPPED (1 << 0)
+
+#define ENTRY_EMPTY -1
+#define ENTRY_NEEDS_CLEANUP -2
+
+#define DRM_KGSL_NUM_FENCE_ENTRIES (DRM_KGSL_HANDLE_WAIT_ENTRIES << 2)
+#define DRM_KGSL_HANDLE_WAIT_ENTRIES 5
+
+/* Returns true if the memory type is in PMEM */
+
+#ifdef CONFIG_KERNEL_PMEM_SMI_REGION
+#define TYPE_IS_PMEM(_t) \
+  (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
+   ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_SMI) || \
+   ((_t) & DRM_KGSL_GEM_TYPE_PMEM))
+#else
+#define TYPE_IS_PMEM(_t) \
+  (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
+   ((_t) & (DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI)))
+#endif
+
+/* Returns true if the memory type is regular */
+
+#define TYPE_IS_MEM(_t) \
+  (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM) || \
+   ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
+   ((_t) & DRM_KGSL_GEM_TYPE_MEM))
+
+#define TYPE_IS_FD(_t) ((_t) & DRM_KGSL_GEM_TYPE_FD_MASK)
+
+/* Returns true if KMEM region is uncached */
+
+#define IS_MEM_UNCACHED(_t) \
+  ((_t == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
+   (_t == DRM_KGSL_GEM_TYPE_KMEM) || \
+   (TYPE_IS_MEM(_t) && (_t & DRM_KGSL_GEM_CACHE_WCOMBINE)))
+
+struct drm_kgsl_gem_object_wait_list_entry {
+	struct list_head list;
+	int pid;
+	int in_use;
+	wait_queue_head_t process_wait_q;
+};
+
+struct drm_kgsl_gem_object_fence {
+	int32_t fence_id;
+	unsigned int num_buffers;
+	int ts_valid;
+	unsigned int timestamp;
+	int ts_device;
+	int lockpid;
+	struct list_head buffers_in_fence;
+};
+
+struct drm_kgsl_gem_object_fence_list_entry {
+	struct list_head list;
+	int in_use;
+	struct drm_gem_object *gem_obj;
+};
+
+static int32_t fence_id = 0x1;
+
+static struct drm_kgsl_gem_object_fence
+			  gem_buf_fence[DRM_KGSL_NUM_FENCE_ENTRIES];
+
+struct drm_kgsl_gem_object {
+	struct drm_gem_object *obj;
+	uint32_t type;
+	struct kgsl_memdesc memdesc;
+	struct kgsl_pagetable *pagetable;
+	uint64_t mmap_offset;
+	int bufcount;
+	int flags;
+	struct list_head list;
+	int active;
+
+	struct {
+		uint32_t offset;
+		uint32_t gpuaddr;
+	} bufs[DRM_KGSL_GEM_MAX_BUFFERS];
+
+	int bound;
+	int lockpid;
+	/* Put these here to avoid allocing all the time */
+	struct drm_kgsl_gem_object_wait_list_entry
+	wait_entries[DRM_KGSL_HANDLE_WAIT_ENTRIES];
+	/* Each object can only appear in a single fence */
+	struct drm_kgsl_gem_object_fence_list_entry
+	fence_entries[DRM_KGSL_NUM_FENCE_ENTRIES];
+
+	struct list_head wait_list;
+};
+
+/* This is a global list of all the memory currently mapped in the MMU */
+static struct list_head kgsl_mem_list;
+
+static void kgsl_gem_mem_flush(struct kgsl_memdesc *memdesc, int type, int op)
+{
+	int cacheop = 0;
+
+	switch (op) {
+	case DRM_KGSL_GEM_CACHE_OP_TO_DEV:
+		if (type & (DRM_KGSL_GEM_CACHE_WBACK |
+			    DRM_KGSL_GEM_CACHE_WBACKWA))
+			cacheop = KGSL_CACHE_OP_CLEAN;
+
+		break;
+
+	case DRM_KGSL_GEM_CACHE_OP_FROM_DEV:
+		if (type & (DRM_KGSL_GEM_CACHE_WBACK |
+			    DRM_KGSL_GEM_CACHE_WBACKWA |
+			    DRM_KGSL_GEM_CACHE_WTHROUGH))
+			cacheop = KGSL_CACHE_OP_INV;
+	}
+
+	kgsl_cache_range_op(memdesc, cacheop);
+}
+
+/* Flush all the memory mapped in the MMU */
+
+void kgsl_gpu_mem_flush(int op)
+{
+	struct drm_kgsl_gem_object *entry;
+
+	list_for_each_entry(entry, &kgsl_mem_list, list) {
+		kgsl_gem_mem_flush(&entry->memdesc, entry->type, op);
+	}
+
+	/* Takes care of WT/WC case.
+	 * More useful when we go barrierless
+	 */
+	dmb();
+}
+
+/* TODO:
+ * Add vsync wait */
+
+static int kgsl_drm_load(struct drm_device *dev, unsigned long flags)
+{
+	return 0;
+}
+
+static int kgsl_drm_unload(struct drm_device *dev)
+{
+	return 0;
+}
+
+struct kgsl_drm_device_priv {
+	struct kgsl_device *device[KGSL_DEVICE_MAX];
+	struct kgsl_device_private *devpriv[KGSL_DEVICE_MAX];
+};
+
+static int kgsl_ts_notifier_cb(struct notifier_block *blk,
+			       unsigned long code, void *_param);
+
+static struct notifier_block kgsl_ts_nb[KGSL_DEVICE_MAX];
+
+static int kgsl_drm_firstopen(struct drm_device *dev)
+{
+	int i;
+
+	for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+		struct kgsl_device *device = kgsl_get_device(i);
+
+		if (device == NULL)
+			continue;
+
+		kgsl_ts_nb[i].notifier_call = kgsl_ts_notifier_cb;
+		kgsl_register_ts_notifier(device, &kgsl_ts_nb[i]);
+	}
+
+	return 0;
+}
+
+void kgsl_drm_lastclose(struct drm_device *dev)
+{
+	int i;
+
+	for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+		struct kgsl_device *device = kgsl_get_device(i);
+		if (device == NULL)
+			continue;
+
+		kgsl_unregister_ts_notifier(device, &kgsl_ts_nb[i]);
+	}
+}
+
+void kgsl_drm_preclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+}
+
+static int kgsl_drm_suspend(struct drm_device *dev, pm_message_t state)
+{
+	return 0;
+}
+
+static int kgsl_drm_resume(struct drm_device *dev)
+{
+	return 0;
+}
+
+static void
+kgsl_gem_free_mmap_offset(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	struct drm_gem_mm *mm = dev->mm_private;
+	struct drm_kgsl_gem_object *priv = obj->driver_private;
+	struct drm_map_list *list;
+
+	list = &obj->map_list;
+	drm_ht_remove_item(&mm->offset_hash, &list->hash);
+	if (list->file_offset_node) {
+		drm_mm_put_block(list->file_offset_node);
+		list->file_offset_node = NULL;
+	}
+
+	kfree(list->map);
+	list->map = NULL;
+
+	priv->mmap_offset = 0;
+}
+
+static int
+kgsl_gem_memory_allocated(struct drm_gem_object *obj)
+{
+	struct drm_kgsl_gem_object *priv = obj->driver_private;
+	return priv->memdesc.size ? 1 : 0;
+}
+
+static int
+kgsl_gem_alloc_memory(struct drm_gem_object *obj)
+{
+	struct drm_kgsl_gem_object *priv = obj->driver_private;
+	int index;
+
+	/* Return if the memory is already allocated */
+
+	if (kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
+		return 0;
+
+	if (TYPE_IS_PMEM(priv->type)) {
+		int type;
+
+		if (priv->type == DRM_KGSL_GEM_TYPE_EBI ||
+		    priv->type & DRM_KGSL_GEM_PMEM_EBI)
+			type = PMEM_MEMTYPE_EBI1;
+		else
+			type = PMEM_MEMTYPE_SMI;
+
+		priv->memdesc.physaddr =
+			pmem_kalloc(obj->size * priv->bufcount,
+				    type | PMEM_ALIGNMENT_4K);
+
+		if (IS_ERR((void *) priv->memdesc.physaddr)) {
+			DRM_ERROR("Unable to allocate PMEM memory\n");
+			return -ENOMEM;
+		}
+
+		priv->memdesc.size = obj->size * priv->bufcount;
+		priv->memdesc.ops = &kgsl_contiguous_ops;
+
+	} else if (TYPE_IS_MEM(priv->type)) {
+		priv->memdesc.hostptr =
+			vmalloc_user(obj->size * priv->bufcount);
+
+		if (priv->memdesc.hostptr == NULL) {
+			DRM_ERROR("Unable to allocate vmalloc memory\n");
+			return -ENOMEM;
+		}
+
+		priv->memdesc.size = obj->size * priv->bufcount;
+		priv->memdesc.ops = &kgsl_vmalloc_ops;
+	} else
+		return -EINVAL;
+
+	for (index = 0; index < priv->bufcount; index++)
+		priv->bufs[index].offset = index * obj->size;
+
+
+	return 0;
+}
+
+#ifdef CONFIG_MSM_KGSL_MMU
+static void
+kgsl_gem_unmap(struct drm_gem_object *obj)
+{
+	struct drm_kgsl_gem_object *priv = obj->driver_private;
+
+	if (!priv->flags & DRM_KGSL_GEM_FLAG_MAPPED)
+		return;
+
+	kgsl_mmu_unmap(priv->pagetable, &priv->memdesc);
+
+	kgsl_mmu_putpagetable(priv->pagetable);
+	priv->pagetable = NULL;
+
+	if ((priv->type == DRM_KGSL_GEM_TYPE_KMEM) ||
+	    (priv->type & DRM_KGSL_GEM_CACHE_MASK))
+		list_del(&priv->list);
+
+	priv->flags &= ~DRM_KGSL_GEM_FLAG_MAPPED;
+}
+#else
+static void
+kgsl_gem_unmap(struct drm_gem_object *obj)
+{
+}
+#endif
+
+static void
+kgsl_gem_free_memory(struct drm_gem_object *obj)
+{
+	struct drm_kgsl_gem_object *priv = obj->driver_private;
+
+	if (!kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
+		return;
+
+	kgsl_gem_mem_flush(&priv->memdesc,  priv->type,
+			   DRM_KGSL_GEM_CACHE_OP_FROM_DEV);
+
+	kgsl_gem_unmap(obj);
+
+	if (TYPE_IS_PMEM(priv->type))
+		pmem_kfree(priv->memdesc.physaddr);
+
+	kgsl_sharedmem_free(&priv->memdesc);
+}
+
+int
+kgsl_gem_init_object(struct drm_gem_object *obj)
+{
+	struct drm_kgsl_gem_object *priv;
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (priv == NULL) {
+		DRM_ERROR("Unable to create GEM object\n");
+		return -ENOMEM;
+	}
+
+	obj->driver_private = priv;
+	priv->obj = obj;
+
+	return 0;
+}
+
+void
+kgsl_gem_free_object(struct drm_gem_object *obj)
+{
+	kgsl_gem_free_memory(obj);
+	kgsl_gem_free_mmap_offset(obj);
+	drm_gem_object_release(obj);
+	kfree(obj->driver_private);
+}
+
+static int
+kgsl_gem_create_mmap_offset(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	struct drm_gem_mm *mm = dev->mm_private;
+	struct drm_kgsl_gem_object *priv = obj->driver_private;
+	struct drm_map_list *list;
+	int msize;
+
+	list = &obj->map_list;
+	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
+	if (list->map == NULL) {
+		DRM_ERROR("Unable to allocate drm_map_list\n");
+		return -ENOMEM;
+	}
+
+	msize = obj->size * priv->bufcount;
+
+	list->map->type = _DRM_GEM;
+	list->map->size = msize;
+	list->map->handle = obj;
+
+	/* Allocate a mmap offset */
+	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
+						    msize / PAGE_SIZE,
+						    0, 0);
+
+	if (!list->file_offset_node) {
+		DRM_ERROR("Failed to allocate offset for %d\n", obj->name);
+		kfree(list->map);
+		return -ENOMEM;
+	}
+
+	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+						  msize / PAGE_SIZE, 0);
+
+	if (!list->file_offset_node) {
+		DRM_ERROR("Unable to create the file_offset_node\n");
+		kfree(list->map);
+		return -ENOMEM;
+	}
+
+	list->hash.key = list->file_offset_node->start;
+	if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
+		DRM_ERROR("Failed to add to map hash\n");
+		drm_mm_put_block(list->file_offset_node);
+		kfree(list->map);
+		return -ENOMEM;
+	}
+
+	priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
+
+	return 0;
+}
+
+int
+kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
+			unsigned long *len)
+{
+	struct file *filp;
+	struct drm_device *dev;
+	struct drm_file *file_priv;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+	int ret = 0;
+
+	filp = fget(drm_fd);
+	if (unlikely(filp == NULL)) {
+		DRM_ERROR("Unable to ghet the DRM file descriptor\n");
+		return -EINVAL;
+	}
+	file_priv = filp->private_data;
+	if (unlikely(file_priv == NULL)) {
+		DRM_ERROR("Unable to get the file private data\n");
+		fput(filp);
+		return -EINVAL;
+	}
+	dev = file_priv->minor->dev;
+	if (unlikely(dev == NULL)) {
+		DRM_ERROR("Unable to get the minor device\n");
+		fput(filp);
+		return -EINVAL;
+	}
+
+	obj = drm_gem_object_lookup(dev, file_priv, handle);
+	if (unlikely(obj == NULL)) {
+		DRM_ERROR("Invalid GEM handle %x\n", handle);
+		fput(filp);
+		return -EBADF;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	/* We can only use the MDP for PMEM regions */
+
+	if (TYPE_IS_PMEM(priv->type)) {
+		*start = priv->memdesc.physaddr +
+			priv->bufs[priv->active].offset;
+
+		*len = priv->memdesc.size;
+
+		kgsl_gem_mem_flush(&priv->memdesc,
+				   priv->type, DRM_KGSL_GEM_CACHE_OP_TO_DEV);
+	} else {
+		*start = 0;
+		*len = 0;
+		ret = -EINVAL;
+	}
+
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	fput(filp);
+	return ret;
+}
+
+static int
+kgsl_gem_init_obj(struct drm_device *dev,
+		  struct drm_file *file_priv,
+		  struct drm_gem_object *obj,
+		  int *handle)
+{
+	struct drm_kgsl_gem_object *priv;
+	int ret, i;
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	memset(&priv->memdesc, 0, sizeof(priv->memdesc));
+	priv->bufcount = 1;
+	priv->active = 0;
+	priv->bound = 0;
+
+	/* To preserve backwards compatability, the default memory source
+	   is EBI */
+
+	priv->type = DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI;
+
+	ret = drm_gem_handle_create(file_priv, obj, handle);
+
+	drm_gem_object_handle_unreference(obj);
+	INIT_LIST_HEAD(&priv->wait_list);
+
+	for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
+		INIT_LIST_HEAD((struct list_head *) &priv->wait_entries[i]);
+		priv->wait_entries[i].pid = 0;
+		init_waitqueue_head(&priv->wait_entries[i].process_wait_q);
+	}
+
+	for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
+		INIT_LIST_HEAD((struct list_head *) &priv->fence_entries[i]);
+		priv->fence_entries[i].in_use = 0;
+		priv->fence_entries[i].gem_obj = obj;
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+int
+kgsl_gem_create_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_create *create = data;
+	struct drm_gem_object *obj;
+	int ret, handle;
+
+	/* Page align the size so we can allocate multiple buffers */
+	create->size = ALIGN(create->size, 4096);
+
+	obj = drm_gem_object_alloc(dev, create->size);
+
+	if (obj == NULL) {
+		DRM_ERROR("Unable to allocate the GEM object\n");
+		return -ENOMEM;
+	}
+
+	ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
+	if (ret)
+		return ret;
+
+	create->handle = handle;
+	return 0;
+}
+
+int
+kgsl_gem_create_fd_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_create_fd *args = data;
+	struct file *file;
+	dev_t rdev;
+	struct fb_info *info;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+	int ret, put_needed, handle;
+
+	file = fget_light(args->fd, &put_needed);
+
+	if (file == NULL) {
+		DRM_ERROR("Unable to get the file object\n");
+		return -EBADF;
+	}
+
+	rdev = file->f_dentry->d_inode->i_rdev;
+
+	/* Only framebuffer objects are supported ATM */
+
+	if (MAJOR(rdev) != FB_MAJOR) {
+		DRM_ERROR("File descriptor is not a framebuffer\n");
+		ret = -EBADF;
+		goto error_fput;
+	}
+
+	info = registered_fb[MINOR(rdev)];
+
+	if (info == NULL) {
+		DRM_ERROR("Framebuffer minor %d is not registered\n",
+			  MINOR(rdev));
+		ret = -EBADF;
+		goto error_fput;
+	}
+
+	obj = drm_gem_object_alloc(dev, info->fix.smem_len);
+
+	if (obj == NULL) {
+		DRM_ERROR("Unable to allocate GEM object\n");
+		ret = -ENOMEM;
+		goto error_fput;
+	}
+
+	ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
+
+	if (ret)
+		goto error_fput;
+
+	mutex_lock(&dev->struct_mutex);
+
+	priv = obj->driver_private;
+	priv->memdesc.physaddr = info->fix.smem_start;
+	priv->type = DRM_KGSL_GEM_TYPE_FD_FBMEM;
+
+	mutex_unlock(&dev->struct_mutex);
+	args->handle = handle;
+
+error_fput:
+	fput_light(file, put_needed);
+
+	return ret;
+}
+
+int
+kgsl_gem_setmemtype_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_memtype *args = data;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+	int ret = 0;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+	if (obj == NULL) {
+		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+		return -EBADF;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	if (TYPE_IS_FD(priv->type))
+		ret = -EINVAL;
+	else {
+		if (TYPE_IS_PMEM(args->type) || TYPE_IS_MEM(args->type))
+			priv->type = args->type;
+		else
+			ret = -EINVAL;
+	}
+
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+int
+kgsl_gem_getmemtype_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_memtype *args = data;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+	if (obj == NULL) {
+		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+		return -EBADF;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	args->type = priv->type;
+
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+int
+kgsl_gem_unbind_gpu_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_bind_gpu *args = data;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+	if (obj == NULL) {
+		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+		return -EBADF;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	if (--priv->bound == 0)
+		kgsl_gem_unmap(obj);
+
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+#ifdef CONFIG_MSM_KGSL_MMU
+static int
+kgsl_gem_map(struct drm_gem_object *obj)
+{
+	struct drm_kgsl_gem_object *priv = obj->driver_private;
+	int index;
+	int ret = -EINVAL;
+
+	if (priv->flags & DRM_KGSL_GEM_FLAG_MAPPED)
+		return 0;
+
+	/* Get the global page table */
+
+	if (priv->pagetable == NULL) {
+		priv->pagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
+
+		if (priv->pagetable == NULL) {
+			DRM_ERROR("Unable to get the GPU MMU pagetable\n");
+			return -EINVAL;
+		}
+	}
+
+	priv->memdesc.pagetable = priv->pagetable;
+
+	ret = kgsl_mmu_map(priv->pagetable, &priv->memdesc,
+			   GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+
+	if (!ret) {
+		for (index = 0; index < priv->bufcount; index++) {
+			priv->bufs[index].gpuaddr =
+				priv->memdesc.gpuaddr +
+				priv->bufs[index].offset;
+		}
+	}
+
+	/* Add cached memory to the list to be cached */
+
+	if (priv->type == DRM_KGSL_GEM_TYPE_KMEM ||
+	    priv->type & DRM_KGSL_GEM_CACHE_MASK)
+		list_add(&priv->list, &kgsl_mem_list);
+
+	priv->flags |= DRM_KGSL_GEM_FLAG_MAPPED;
+
+	return ret;
+}
+#else
+static int
+kgsl_gem_map(struct drm_gem_object *obj)
+{
+	struct drm_kgsl_gem_object *priv = obj->driver_private;
+	int index;
+
+	if (TYPE_IS_PMEM(priv->type)) {
+		for (index = 0; index < priv->bufcount; index++)
+			priv->bufs[index].gpuaddr =
+			priv->memdesc.physaddr + priv->bufs[index].offset;
+
+		return 0;
+	}
+
+	return -EINVAL;
+}
+#endif
+
+int
+kgsl_gem_bind_gpu_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_bind_gpu *args = data;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+	int ret = 0;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+	if (obj == NULL) {
+		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+		return -EBADF;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	if (priv->bound++ == 0) {
+
+		if (!kgsl_gem_memory_allocated(obj)) {
+			DRM_ERROR("Memory not allocated for this object\n");
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		ret = kgsl_gem_map(obj);
+
+		/* This is legacy behavior - use GET_BUFFERINFO instead */
+		args->gpuptr = priv->bufs[0].gpuaddr;
+	}
+out:
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/* Allocate the memory and prepare it for CPU mapping */
+
+int
+kgsl_gem_alloc_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_alloc *args = data;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+	int ret;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+	if (obj == NULL) {
+		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+		return -EBADF;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	ret = kgsl_gem_alloc_memory(obj);
+
+	if (ret) {
+		DRM_ERROR("Unable to allocate object memory\n");
+	} else if (!priv->mmap_offset) {
+		ret = kgsl_gem_create_mmap_offset(obj);
+		if (ret)
+			DRM_ERROR("Unable to create a mmap offset\n");
+	}
+
+	args->offset = priv->mmap_offset;
+
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+int
+kgsl_gem_mmap_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_mmap *args = data;
+	struct drm_gem_object *obj;
+	unsigned long addr;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+	if (obj == NULL) {
+		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+		return -EBADF;
+	}
+
+	down_write(&current->mm->mmap_sem);
+
+	addr = do_mmap(obj->filp, 0, args->size,
+		       PROT_READ | PROT_WRITE, MAP_SHARED,
+		       args->offset);
+
+	up_write(&current->mm->mmap_sem);
+
+	mutex_lock(&dev->struct_mutex);
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	if (IS_ERR((void *) addr))
+		return addr;
+
+	args->hostptr = (uint32_t) addr;
+	return 0;
+}
+
+/* This function is deprecated */
+
+int
+kgsl_gem_prep_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_prep *args = data;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+	int ret;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+	if (obj == NULL) {
+		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+		return -EBADF;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	ret = kgsl_gem_alloc_memory(obj);
+	if (ret) {
+		DRM_ERROR("Unable to allocate object memory\n");
+		drm_gem_object_unreference(obj);
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	if (priv->mmap_offset == 0) {
+		ret = kgsl_gem_create_mmap_offset(obj);
+		if (ret) {
+			drm_gem_object_unreference(obj);
+			mutex_unlock(&dev->struct_mutex);
+			return ret;
+		}
+	}
+
+	args->offset = priv->mmap_offset;
+	args->phys = priv->memdesc.physaddr;
+
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+int
+kgsl_gem_get_bufinfo_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_bufinfo *args = data;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+	int ret = -EINVAL;
+	int index;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+	if (obj == NULL) {
+		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+		return -EBADF;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	if (!kgsl_gem_memory_allocated(obj)) {
+		DRM_ERROR("Memory not allocated for this object\n");
+		goto out;
+	}
+
+	for (index = 0; index < priv->bufcount; index++) {
+		args->offset[index] = priv->bufs[index].offset;
+		args->gpuaddr[index] = priv->bufs[index].gpuaddr;
+	}
+
+	args->count = priv->bufcount;
+	args->active = priv->active;
+
+	ret = 0;
+
+out:
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+int
+kgsl_gem_set_bufcount_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_bufcount *args = data;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+	int ret = -EINVAL;
+
+	if (args->bufcount < 1 || args->bufcount > DRM_KGSL_GEM_MAX_BUFFERS)
+		return -EINVAL;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+	if (obj == NULL) {
+		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+		return -EBADF;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	/* It is too much math to worry about what happens if we are already
+	   allocated, so just bail if we are */
+
+	if (kgsl_gem_memory_allocated(obj)) {
+		DRM_ERROR("Memory already allocated - cannot change"
+			  "number of buffers\n");
+		goto out;
+	}
+
+	priv->bufcount = args->bufcount;
+	ret = 0;
+
+out:
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+int
+kgsl_gem_set_active_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_active *args = data;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+	int ret = -EINVAL;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+	if (obj == NULL) {
+		DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+		return -EBADF;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	priv = obj->driver_private;
+
+	if (args->active < 0 || args->active >= priv->bufcount) {
+		DRM_ERROR("Invalid active buffer %d\n", args->active);
+		goto out;
+	}
+
+	priv->active = args->active;
+	ret = 0;
+
+out:
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+int kgsl_gem_kmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct drm_device *dev = obj->dev;
+	struct drm_kgsl_gem_object *priv;
+	unsigned long offset, pg;
+	struct page *page;
+
+	mutex_lock(&dev->struct_mutex);
+
+	priv = obj->driver_private;
+
+	offset = (unsigned long) vmf->virtual_address - vma->vm_start;
+	pg = (unsigned long) priv->memdesc.hostptr + offset;
+
+	page = vmalloc_to_page((void *) pg);
+	if (!page) {
+		mutex_unlock(&dev->struct_mutex);
+		return VM_FAULT_SIGBUS;
+	}
+
+	get_page(page);
+	vmf->page = page;
+
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+int kgsl_gem_phys_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct drm_device *dev = obj->dev;
+	struct drm_kgsl_gem_object *priv;
+	unsigned long offset, pfn;
+	int ret = 0;
+
+	offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
+		PAGE_SHIFT;
+
+	mutex_lock(&dev->struct_mutex);
+
+	priv = obj->driver_private;
+
+	pfn = (priv->memdesc.physaddr >> PAGE_SHIFT) + offset;
+	ret = vm_insert_pfn(vma,
+			    (unsigned long) vmf->virtual_address, pfn);
+	mutex_unlock(&dev->struct_mutex);
+
+	switch (ret) {
+	case -ENOMEM:
+	case -EAGAIN:
+		return VM_FAULT_OOM;
+	case -EFAULT:
+		return VM_FAULT_SIGBUS;
+	default:
+		return VM_FAULT_NOPAGE;
+	}
+}
+
+static struct vm_operations_struct kgsl_gem_kmem_vm_ops = {
+	.fault = kgsl_gem_kmem_fault,
+	.open = drm_gem_vm_open,
+	.close = drm_gem_vm_close,
+};
+
+static struct vm_operations_struct kgsl_gem_phys_vm_ops = {
+	.fault = kgsl_gem_phys_fault,
+	.open = drm_gem_vm_open,
+	.close = drm_gem_vm_close,
+};
+
+/* This is a clone of the standard drm_gem_mmap function modified to allow
+   us to properly map KMEM regions as well as the PMEM regions */
+
+int msm_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	struct drm_gem_mm *mm = dev->mm_private;
+	struct drm_local_map *map = NULL;
+	struct drm_gem_object *obj;
+	struct drm_hash_item *hash;
+	struct drm_kgsl_gem_object *gpriv;
+	int ret = 0;
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
+		mutex_unlock(&dev->struct_mutex);
+		return drm_mmap(filp, vma);
+	}
+
+	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
+	if (!map ||
+	    ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
+		ret =  -EPERM;
+		goto out_unlock;
+	}
+
+	/* Check for valid size. */
+	if (map->size < vma->vm_end - vma->vm_start) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	obj = map->handle;
+
+	gpriv = obj->driver_private;
+
+	/* VM_PFNMAP is only for memory that doesn't use struct page
+	 * in other words, not "normal" memory.  If you try to use it
+	 * with "normal" memory then the mappings don't get flushed. */
+
+	if (TYPE_IS_MEM(gpriv->type)) {
+		vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
+		vma->vm_ops = &kgsl_gem_kmem_vm_ops;
+	} else {
+		vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP |
+			VM_DONTEXPAND;
+		vma->vm_ops = &kgsl_gem_phys_vm_ops;
+	}
+
+	vma->vm_private_data = map->handle;
+
+
+	/* Take care of requested caching policy */
+	if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM ||
+	    gpriv->type & DRM_KGSL_GEM_CACHE_MASK) {
+		if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACKWA)
+			vma->vm_page_prot =
+			pgprot_writebackwacache(vma->vm_page_prot);
+		else if (gpriv->type & DRM_KGSL_GEM_CACHE_WBACK)
+				vma->vm_page_prot =
+				pgprot_writebackcache(vma->vm_page_prot);
+		else if (gpriv->type & DRM_KGSL_GEM_CACHE_WTHROUGH)
+				vma->vm_page_prot =
+				pgprot_writethroughcache(vma->vm_page_prot);
+		else
+			vma->vm_page_prot =
+			pgprot_writecombine(vma->vm_page_prot);
+	} else {
+		if (gpriv->type == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE)
+			vma->vm_page_prot =
+			pgprot_noncached(vma->vm_page_prot);
+		else
+			/* default pmem is WC */
+			vma->vm_page_prot =
+			pgprot_writecombine(vma->vm_page_prot);
+	}
+
+	/* flush out existing KMEM cached mappings if new ones are
+	 * of uncached type */
+	if (IS_MEM_UNCACHED(gpriv->type))
+		kgsl_cache_range_op(&gpriv->memdesc,
+				    KGSL_CACHE_OP_FLUSH);
+
+	/* Add the other memory types here */
+
+	/* Take a ref for this mapping of the object, so that the fault
+	 * handler can dereference the mmap offset's pointer to the object.
+	 * This reference is cleaned up by the corresponding vm_close
+	 * (which should happen whether the vma was created by this call, or
+	 * by a vm_open due to mremap or partial unmap or whatever).
+	 */
+	drm_gem_object_reference(obj);
+
+	vma->vm_file = filp;	/* Needed for drm_vm_open() */
+	drm_vm_open_locked(vma);
+
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+void
+cleanup_fence(struct drm_kgsl_gem_object_fence *fence, int check_waiting)
+{
+	int j;
+	struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
+	struct drm_kgsl_gem_object *unlock_obj;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object_wait_list_entry *lock_next;
+
+	fence->ts_valid = 0;
+	fence->timestamp = -1;
+	fence->ts_device = -1;
+
+	/* Walk the list of buffers in this fence and clean up the */
+	/* references. Note that this can cause memory allocations */
+	/* to be freed */
+	for (j = fence->num_buffers; j > 0; j--) {
+		this_fence_entry =
+				(struct drm_kgsl_gem_object_fence_list_entry *)
+				fence->buffers_in_fence.prev;
+
+		this_fence_entry->in_use = 0;
+		obj = this_fence_entry->gem_obj;
+		unlock_obj = obj->driver_private;
+
+		/* Delete it from the list */
+
+		list_del(&this_fence_entry->list);
+
+		/* we are unlocking - see if there are other pids waiting */
+		if (check_waiting) {
+			if (!list_empty(&unlock_obj->wait_list)) {
+				lock_next =
+				(struct drm_kgsl_gem_object_wait_list_entry *)
+					unlock_obj->wait_list.prev;
+
+				list_del((struct list_head *)&lock_next->list);
+
+				unlock_obj->lockpid = 0;
+				wake_up_interruptible(
+						&lock_next->process_wait_q);
+				lock_next->pid = 0;
+
+			} else {
+				/* List is empty so set pid to 0 */
+				unlock_obj->lockpid = 0;
+			}
+		}
+
+		drm_gem_object_unreference(obj);
+	}
+	/* here all the buffers in the fence are released */
+	/* clear the fence entry */
+	fence->fence_id = ENTRY_EMPTY;
+}
+
+int
+find_empty_fence(void)
+{
+	int i;
+
+	for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
+		if (gem_buf_fence[i].fence_id == ENTRY_EMPTY) {
+			gem_buf_fence[i].fence_id = fence_id++;
+			gem_buf_fence[i].ts_valid = 0;
+			INIT_LIST_HEAD(&(gem_buf_fence[i].buffers_in_fence));
+			if (fence_id == 0xFFFFFFF0)
+				fence_id = 1;
+			return i;
+		} else {
+
+			/* Look for entries to be cleaned up */
+			if (gem_buf_fence[i].fence_id == ENTRY_NEEDS_CLEANUP)
+				cleanup_fence(&gem_buf_fence[i], 0);
+		}
+	}
+
+	return ENTRY_EMPTY;
+}
+
+int
+find_fence(int index)
+{
+	int i;
+
+	for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
+		if (gem_buf_fence[i].fence_id == index)
+			return i;
+	}
+
+	return ENTRY_EMPTY;
+}
+
+void
+wakeup_fence_entries(struct drm_kgsl_gem_object_fence *fence)
+{
+    struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
+	struct drm_kgsl_gem_object_wait_list_entry *lock_next;
+	struct drm_kgsl_gem_object *unlock_obj;
+	struct drm_gem_object *obj;
+
+	/* TS has expired when we get here */
+	fence->ts_valid = 0;
+	fence->timestamp = -1;
+	fence->ts_device = -1;
+
+	list_for_each_entry(this_fence_entry, &fence->buffers_in_fence, list) {
+		obj = this_fence_entry->gem_obj;
+		unlock_obj = obj->driver_private;
+
+		if (!list_empty(&unlock_obj->wait_list)) {
+			lock_next =
+				(struct drm_kgsl_gem_object_wait_list_entry *)
+					unlock_obj->wait_list.prev;
+
+			/* Unblock the pid */
+			lock_next->pid = 0;
+
+			/* Delete it from the list */
+			list_del((struct list_head *)&lock_next->list);
+
+			unlock_obj->lockpid = 0;
+			wake_up_interruptible(&lock_next->process_wait_q);
+
+		} else {
+			/* List is empty so set pid to 0 */
+			unlock_obj->lockpid = 0;
+		}
+	}
+	fence->fence_id = ENTRY_NEEDS_CLEANUP;  /* Mark it as needing cleanup */
+}
+
+static int kgsl_ts_notifier_cb(struct notifier_block *blk,
+			       unsigned long code, void *_param)
+{
+	struct drm_kgsl_gem_object_fence *fence;
+	struct kgsl_device *device = kgsl_get_device(code);
+	int i;
+
+	/* loop through the fences to see what things can be processed */
+
+	for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
+		fence = &gem_buf_fence[i];
+		if (!fence->ts_valid || fence->ts_device != code)
+			continue;
+
+		if (kgsl_check_timestamp(device, fence->timestamp))
+			wakeup_fence_entries(fence);
+	}
+
+	return 0;
+}
+
+int
+kgsl_gem_lock_handle_ioctl(struct drm_device *dev, void *data,
+						   struct drm_file *file_priv)
+{
+	/* The purpose of this function is to lock a given set of handles. */
+	/* The driver will maintain a list of locked handles. */
+	/* If a request comes in for a handle that's locked the thread will */
+	/* block until it's no longer in use. */
+
+	struct drm_kgsl_gem_lock_handles *args = data;
+	struct drm_gem_object *obj;
+	struct drm_kgsl_gem_object *priv;
+	struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
+	struct drm_kgsl_gem_object_fence *fence;
+	struct drm_kgsl_gem_object_wait_list_entry *lock_item;
+	int i, j;
+	int result = 0;
+	uint32_t *lock_list;
+	uint32_t *work_list = NULL;
+	int32_t fence_index;
+
+	/* copy in the data from user space */
+	lock_list = kzalloc(sizeof(uint32_t) * args->num_handles, GFP_KERNEL);
+	if (!lock_list) {
+		DRM_ERROR("Unable allocate memory for lock list\n");
+		result = -ENOMEM;
+		goto error;
+	}
+
+	if (copy_from_user(lock_list, args->handle_list,
+			   sizeof(uint32_t) * args->num_handles)) {
+		DRM_ERROR("Unable to copy the lock list from the user\n");
+		result = -EFAULT;
+		goto free_handle_list;
+	}
+
+
+	work_list = lock_list;
+	mutex_lock(&dev->struct_mutex);
+
+	/* build the fence for this group of handles */
+	fence_index = find_empty_fence();
+	if (fence_index == ENTRY_EMPTY) {
+		DRM_ERROR("Unable to find a empty fence\n");
+		args->lock_id = 0xDEADBEEF;
+		result = -EFAULT;
+		goto out_unlock;
+	}
+
+	fence = &gem_buf_fence[fence_index];
+	gem_buf_fence[fence_index].num_buffers = args->num_handles;
+	args->lock_id = gem_buf_fence[fence_index].fence_id;
+
+	for (j = args->num_handles; j > 0; j--, lock_list++) {
+		obj = drm_gem_object_lookup(dev, file_priv, *lock_list);
+
+		if (obj == NULL) {
+			DRM_ERROR("Invalid GEM handle %x\n", *lock_list);
+			result = -EBADF;
+			goto out_unlock;
+		}
+
+		priv = obj->driver_private;
+		this_fence_entry = NULL;
+
+		/* get a fence entry to hook into the fence */
+		for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
+			if (!priv->fence_entries[i].in_use) {
+				this_fence_entry = &priv->fence_entries[i];
+				this_fence_entry->in_use = 1;
+				break;
+			}
+		}
+
+		if (this_fence_entry == NULL) {
+			fence->num_buffers = 0;
+			fence->fence_id = ENTRY_EMPTY;
+			args->lock_id = 0xDEADBEAD;
+			result = -EFAULT;
+			drm_gem_object_unreference(obj);
+			goto out_unlock;
+		}
+
+		/* We're trying to lock - add to a fence */
+		list_add((struct list_head *)this_fence_entry,
+				 &gem_buf_fence[fence_index].buffers_in_fence);
+		if (priv->lockpid) {
+
+			if (priv->lockpid == args->pid) {
+				/* now that things are running async this  */
+				/* happens when an op isn't done */
+				/* so it's already locked by the calling pid */
+					continue;
+			}
+
+
+			/* if a pid already had it locked */
+			/* create and add to wait list */
+			for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
+				if (priv->wait_entries[i].in_use == 0) {
+					/* this one is empty */
+					lock_item = &priv->wait_entries[i];
+				    lock_item->in_use = 1;
+					lock_item->pid = args->pid;
+					INIT_LIST_HEAD((struct list_head *)
+						&priv->wait_entries[i]);
+					break;
+				}
+			}
+
+			if (i == DRM_KGSL_HANDLE_WAIT_ENTRIES) {
+
+				result =  -EFAULT;
+				drm_gem_object_unreference(obj);
+				goto out_unlock;
+			}
+
+			list_add_tail((struct list_head *)&lock_item->list,
+							&priv->wait_list);
+			mutex_unlock(&dev->struct_mutex);
+			/* here we need to block */
+			wait_event_interruptible_timeout(
+					priv->wait_entries[i].process_wait_q,
+					(priv->lockpid == 0),
+					msecs_to_jiffies(64));
+			mutex_lock(&dev->struct_mutex);
+			lock_item->in_use = 0;
+		}
+
+		/* Getting here means no one currently holds the lock */
+		priv->lockpid = args->pid;
+
+		args->lock_id = gem_buf_fence[fence_index].fence_id;
+	}
+	fence->lockpid = args->pid;
+
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+
+free_handle_list:
+	kfree(work_list);
+
+error:
+	return result;
+}
+
+int
+kgsl_gem_unlock_handle_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_unlock_handles *args = data;
+	int result = 0;
+	int32_t fence_index;
+
+	mutex_lock(&dev->struct_mutex);
+	fence_index = find_fence(args->lock_id);
+	if (fence_index == ENTRY_EMPTY) {
+		DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
+		result = -EFAULT;
+		goto out_unlock;
+	}
+
+	cleanup_fence(&gem_buf_fence[fence_index], 1);
+
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+
+	return result;
+}
+
+
+int
+kgsl_gem_unlock_on_ts_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_kgsl_gem_unlock_on_ts *args = data;
+	int result = 0;
+	int ts_done = 0;
+	int32_t fence_index, ts_device;
+	struct drm_kgsl_gem_object_fence *fence;
+	struct kgsl_device *device;
+
+	if (args->type == DRM_KGSL_GEM_TS_3D)
+		ts_device = KGSL_DEVICE_3D0;
+	else if (args->type == DRM_KGSL_GEM_TS_2D)
+		ts_device = KGSL_DEVICE_2D0;
+	else {
+		result = -EINVAL;
+		goto error;
+	}
+
+	device = kgsl_get_device(ts_device);
+	ts_done = kgsl_check_timestamp(device, args->timestamp);
+
+	mutex_lock(&dev->struct_mutex);
+
+	fence_index = find_fence(args->lock_id);
+	if (fence_index == ENTRY_EMPTY) {
+		DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
+		result = -EFAULT;
+		goto out_unlock;
+	}
+
+	fence = &gem_buf_fence[fence_index];
+	fence->ts_device = ts_device;
+
+	if (!ts_done)
+		fence->ts_valid = 1;
+	else
+		cleanup_fence(fence, 1);
+
+
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+
+error:
+	return result;
+}
+
+struct drm_ioctl_desc kgsl_drm_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE, kgsl_gem_create_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_PREP, kgsl_gem_prep_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_SETMEMTYPE, kgsl_gem_setmemtype_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_GETMEMTYPE, kgsl_gem_getmemtype_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_BIND_GPU, kgsl_gem_bind_gpu_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_UNBIND_GPU, kgsl_gem_unbind_gpu_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_ALLOC, kgsl_gem_alloc_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_MMAP, kgsl_gem_mmap_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_GET_BUFINFO, kgsl_gem_get_bufinfo_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_BUFCOUNT,
+		      kgsl_gem_set_bufcount_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_ACTIVE, kgsl_gem_set_active_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_LOCK_HANDLE,
+				  kgsl_gem_lock_handle_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_UNLOCK_HANDLE,
+				  kgsl_gem_unlock_handle_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_UNLOCK_ON_TS,
+				  kgsl_gem_unlock_on_ts_ioctl, 0),
+	DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE_FD, kgsl_gem_create_fd_ioctl,
+		      DRM_MASTER),
+};
+
+static struct drm_driver driver = {
+	.driver_features = DRIVER_USE_PLATFORM_DEVICE | DRIVER_GEM,
+	.load = kgsl_drm_load,
+	.unload = kgsl_drm_unload,
+	.firstopen = kgsl_drm_firstopen,
+	.lastclose = kgsl_drm_lastclose,
+	.preclose = kgsl_drm_preclose,
+	.suspend = kgsl_drm_suspend,
+	.resume = kgsl_drm_resume,
+	.reclaim_buffers = drm_core_reclaim_buffers,
+	.gem_init_object = kgsl_gem_init_object,
+	.gem_free_object = kgsl_gem_free_object,
+	.ioctls = kgsl_drm_ioctls,
+
+	.fops = {
+		 .owner = THIS_MODULE,
+		 .open = drm_open,
+		 .release = drm_release,
+		 .unlocked_ioctl = drm_ioctl,
+		 .mmap = msm_drm_gem_mmap,
+		 .poll = drm_poll,
+		 .fasync = drm_fasync,
+		 },
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+int kgsl_drm_init(struct platform_device *dev)
+{
+	int i;
+
+	driver.num_ioctls = DRM_ARRAY_SIZE(kgsl_drm_ioctls);
+	driver.platform_device = dev;
+
+	INIT_LIST_HEAD(&kgsl_mem_list);
+
+	for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
+		gem_buf_fence[i].num_buffers = 0;
+		gem_buf_fence[i].ts_valid = 0;
+		gem_buf_fence[i].fence_id = ENTRY_EMPTY;
+	}
+
+	return drm_init(&driver);
+}
+
+void kgsl_drm_exit(void)
+{
+	drm_exit(&driver);
+}
diff --git a/drivers/gpu/msm/kgsl_log.h b/drivers/gpu/msm/kgsl_log.h
new file mode 100644
index 0000000..9fafcf4
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_log.h
@@ -0,0 +1,102 @@
+/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_LOG_H
+#define __KGSL_LOG_H
+
+extern unsigned int kgsl_cff_dump_enable;
+
+#define KGSL_LOG_INFO(dev, lvl, fmt, args...) \
+	do { \
+		if ((lvl) >= 6)  \
+			dev_info(dev, "|%s| " fmt, \
+					__func__, ##args);\
+	} while (0)
+
+#define KGSL_LOG_WARN(dev, lvl, fmt, args...) \
+	do { \
+		if ((lvl) >= 4)  \
+			dev_warn(dev, "|%s| " fmt, \
+					__func__, ##args);\
+	} while (0)
+
+#define KGSL_LOG_ERR(dev, lvl, fmt, args...) \
+	do { \
+		if ((lvl) >= 3)  \
+			dev_err(dev, "|%s| " fmt, \
+					__func__, ##args);\
+	} while (0)
+
+#define KGSL_LOG_CRIT(dev, lvl, fmt, args...) \
+	do { \
+		if ((lvl) >= 2) \
+			dev_crit(dev, "|%s| " fmt, \
+					__func__, ##args);\
+	} while (0)
+
+#define KGSL_LOG_POSTMORTEM_WRITE(_dev, fmt, args...) \
+	do { dev_crit(_dev->dev, fmt, ##args); } while (0)
+
+#define KGSL_LOG_DUMP(_dev, fmt, args...)	dev_err(_dev->dev, fmt, ##args)
+
+#define KGSL_DRV_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->drv_log, fmt, ##args)
+#define KGSL_DRV_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->drv_log, fmt, ##args)
+#define KGSL_DRV_ERR(_dev, fmt, args...)  \
+KGSL_LOG_ERR(_dev->dev, _dev->drv_log, fmt, ##args)
+#define KGSL_DRV_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->drv_log, fmt, ##args)
+
+#define KGSL_CMD_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->cmd_log, fmt, ##args)
+#define KGSL_CMD_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->cmd_log, fmt, ##args)
+#define KGSL_CMD_ERR(_dev, fmt, args...) \
+KGSL_LOG_ERR(_dev->dev, _dev->cmd_log, fmt, ##args)
+#define KGSL_CMD_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->cmd_log, fmt, ##args)
+
+#define KGSL_CTXT_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->ctxt_log, fmt, ##args)
+#define KGSL_CTXT_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->ctxt_log, fmt, ##args)
+#define KGSL_CTXT_ERR(_dev, fmt, args...)  \
+KGSL_LOG_ERR(_dev->dev, _dev->ctxt_log, fmt, ##args)
+#define KGSL_CTXT_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->ctxt_log, fmt, ##args)
+
+#define KGSL_MEM_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->mem_log, fmt, ##args)
+#define KGSL_MEM_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->mem_log, fmt, ##args)
+#define KGSL_MEM_ERR(_dev, fmt, args...)  \
+KGSL_LOG_ERR(_dev->dev, _dev->mem_log, fmt, ##args)
+#define KGSL_MEM_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->mem_log, fmt, ##args)
+
+#define KGSL_PWR_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->pwr_log, fmt, ##args)
+#define KGSL_PWR_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->pwr_log, fmt, ##args)
+#define KGSL_PWR_ERR(_dev, fmt, args...) \
+KGSL_LOG_ERR(_dev->dev, _dev->pwr_log, fmt, ##args)
+#define KGSL_PWR_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->pwr_log, fmt, ##args)
+
+/* Core error messages - these are for core KGSL functions that have
+   no device associated with them (such as memory) */
+
+#define KGSL_CORE_ERR(fmt, args...) \
+pr_err("kgsl: %s: " fmt, __func__, ##args)
+
+#endif /* __KGSL_LOG_H */
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
new file mode 100644
index 0000000..15ec0ec
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -0,0 +1,1151 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "kgsl.h"
+#include "kgsl_mmu.h"
+#include "kgsl_device.h"
+#include "kgsl_sharedmem.h"
+
+#define KGSL_MMU_ALIGN_SHIFT    13
+#define KGSL_MMU_ALIGN_MASK     (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
+
+#define GSL_PT_PAGE_BITS_MASK	0x00000007
+#define GSL_PT_PAGE_ADDR_MASK	PAGE_MASK
+
+static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
+
+static ssize_t
+sysfs_show_ptpool_entries(struct kobject *kobj,
+			  struct kobj_attribute *attr,
+			  char *buf)
+{
+	return sprintf(buf, "%d\n", kgsl_driver.ptpool.entries);
+}
+
+static ssize_t
+sysfs_show_ptpool_min(struct kobject *kobj,
+			 struct kobj_attribute *attr,
+			 char *buf)
+{
+	return sprintf(buf, "%d\n", kgsl_driver.ptpool.static_entries);
+}
+
+static ssize_t
+sysfs_show_ptpool_chunks(struct kobject *kobj,
+			 struct kobj_attribute *attr,
+			 char *buf)
+{
+	return sprintf(buf, "%d\n", kgsl_driver.ptpool.chunks);
+}
+
+static ssize_t
+sysfs_show_ptpool_ptsize(struct kobject *kobj,
+			 struct kobj_attribute *attr,
+			 char *buf)
+{
+	return sprintf(buf, "%d\n", kgsl_driver.ptpool.ptsize);
+}
+
+static struct kobj_attribute attr_ptpool_entries = {
+	.attr = { .name = "ptpool_entries", .mode = 0444 },
+	.show = sysfs_show_ptpool_entries,
+	.store = NULL,
+};
+
+static struct kobj_attribute attr_ptpool_min = {
+	.attr = { .name = "ptpool_min", .mode = 0444 },
+	.show = sysfs_show_ptpool_min,
+	.store = NULL,
+};
+
+static struct kobj_attribute attr_ptpool_chunks = {
+	.attr = { .name = "ptpool_chunks", .mode = 0444 },
+	.show = sysfs_show_ptpool_chunks,
+	.store = NULL,
+};
+
+static struct kobj_attribute attr_ptpool_ptsize = {
+	.attr = { .name = "ptpool_ptsize", .mode = 0444 },
+	.show = sysfs_show_ptpool_ptsize,
+	.store = NULL,
+};
+
+static struct attribute *ptpool_attrs[] = {
+	&attr_ptpool_entries.attr,
+	&attr_ptpool_min.attr,
+	&attr_ptpool_chunks.attr,
+	&attr_ptpool_ptsize.attr,
+	NULL,
+};
+
+static struct attribute_group ptpool_attr_group = {
+	.attrs = ptpool_attrs,
+};
+
+static int
+_kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
+{
+	struct kgsl_ptpool_chunk *chunk;
+	size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
+
+	BUG_ON(count == 0);
+
+	if (get_order(size) >= MAX_ORDER) {
+		KGSL_CORE_ERR("ptpool allocation is too big: %d\n", size);
+		return -EINVAL;
+	}
+
+	chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+	if (chunk == NULL) {
+		KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
+		return -ENOMEM;
+	}
+
+	chunk->size = size;
+	chunk->count = count;
+	chunk->dynamic = dynamic;
+
+	chunk->data = dma_alloc_coherent(NULL, size,
+					 &chunk->phys, GFP_KERNEL);
+
+	if (chunk->data == NULL) {
+		KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
+		goto err;
+	}
+
+	chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
+
+	if (chunk->bitmap == NULL) {
+		KGSL_CORE_ERR("kzalloc(%d) failed\n",
+			BITS_TO_LONGS(count) * 4);
+		goto err_dma;
+	}
+
+	list_add_tail(&chunk->list, &pool->list);
+
+	pool->chunks++;
+	pool->entries += count;
+
+	if (!dynamic)
+		pool->static_entries += count;
+
+	return 0;
+
+err_dma:
+	dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
+err:
+	kfree(chunk);
+	return -ENOMEM;
+}
+
+static void *
+_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr)
+{
+	struct kgsl_ptpool_chunk *chunk;
+
+	list_for_each_entry(chunk, &pool->list, list) {
+		int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
+
+		if (bit >= chunk->count)
+			continue;
+
+		set_bit(bit, chunk->bitmap);
+		*physaddr = chunk->phys + (bit * pool->ptsize);
+
+		return chunk->data + (bit * pool->ptsize);
+	}
+
+	return NULL;
+}
+
+/**
+ * kgsl_ptpool_add
+ * @pool:  A pointer to a ptpool structure
+ * @entries: Number of entries to add
+ *
+ * Add static entries to the pagetable pool.
+ */
+
+int
+kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
+{
+	int ret = 0;
+	BUG_ON(count == 0);
+
+	mutex_lock(&pool->lock);
+
+	/* Only 4MB can be allocated in one chunk, so larger allocations
+	   need to be split into multiple sections */
+
+	while (count) {
+		int entries = ((count * pool->ptsize) > SZ_4M) ?
+			SZ_4M / pool->ptsize : count;
+
+		/* Add the entries as static, i.e. they don't ever stand
+		   a chance of being removed */
+
+		ret =  _kgsl_ptpool_add_entries(pool, entries, 0);
+		if (ret)
+			break;
+
+		count -= entries;
+	}
+
+	mutex_unlock(&pool->lock);
+	return ret;
+}
+
+/**
+ * kgsl_ptpool_alloc
+ * @pool:  A pointer to a ptpool structure
+ * @addr: A pointer to store the physical address of the chunk
+ *
+ * Allocate a pagetable from the pool.  Returns the virtual address
+ * of the pagetable, the physical address is returned in physaddr
+ */
+
+void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool, unsigned int *physaddr)
+{
+	void *addr = NULL;
+	int ret;
+
+	mutex_lock(&pool->lock);
+	addr = _kgsl_ptpool_get_entry(pool, physaddr);
+	if (addr)
+		goto done;
+
+	/* Add a chunk for 1 more pagetable and mark it as dynamic */
+	ret = _kgsl_ptpool_add_entries(pool, 1, 1);
+
+	if (ret)
+		goto done;
+
+	addr = _kgsl_ptpool_get_entry(pool, physaddr);
+done:
+	mutex_unlock(&pool->lock);
+	return addr;
+}
+
+static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
+{
+	list_del(&chunk->list);
+
+	if (chunk->data)
+		dma_free_coherent(NULL, chunk->size, chunk->data,
+			chunk->phys);
+	kfree(chunk->bitmap);
+	kfree(chunk);
+}
+
+/**
+ * kgsl_ptpool_free
+ * @pool:  A pointer to a ptpool structure
+ * @addr: A pointer to the virtual address to free
+ *
+ * Free a pagetable allocated from the pool
+ */
+
+void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
+{
+	struct kgsl_ptpool_chunk *chunk, *tmp;
+
+	if (pool == NULL || addr == NULL)
+		return;
+
+	mutex_lock(&pool->lock);
+	list_for_each_entry_safe(chunk, tmp, &pool->list, list)  {
+		if (addr >=  chunk->data &&
+		    addr < chunk->data + chunk->size) {
+			int bit = ((unsigned long) (addr - chunk->data)) /
+				pool->ptsize;
+
+			clear_bit(bit, chunk->bitmap);
+			memset(addr, 0, pool->ptsize);
+
+			if (chunk->dynamic &&
+				bitmap_empty(chunk->bitmap, chunk->count))
+				_kgsl_ptpool_rm_chunk(chunk);
+
+			break;
+		}
+	}
+
+	mutex_unlock(&pool->lock);
+}
+
+void kgsl_ptpool_destroy(struct kgsl_ptpool *pool)
+{
+	struct kgsl_ptpool_chunk *chunk, *tmp;
+
+	if (pool == NULL)
+		return;
+
+	mutex_lock(&pool->lock);
+	list_for_each_entry_safe(chunk, tmp, &pool->list, list)
+		_kgsl_ptpool_rm_chunk(chunk);
+	mutex_unlock(&pool->lock);
+
+	memset(pool, 0, sizeof(*pool));
+}
+
+/**
+ * kgsl_ptpool_init
+ * @pool:  A pointer to a ptpool structure to initialize
+ * @ptsize: The size of each pagetable entry
+ * @entries:  The number of inital entries to add to the pool
+ *
+ * Initalize a pool and allocate an initial chunk of entries.
+ */
+
+int kgsl_ptpool_init(struct kgsl_ptpool *pool, int ptsize, int entries)
+{
+	int ret = 0;
+	BUG_ON(ptsize == 0);
+
+	pool->ptsize = ptsize;
+	mutex_init(&pool->lock);
+	INIT_LIST_HEAD(&pool->list);
+
+	if (entries) {
+		ret = kgsl_ptpool_add(pool, entries);
+		if (ret)
+			return ret;
+	}
+
+	return sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
+}
+
+static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
+{
+	int i;
+	for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+		struct kgsl_device *device = kgsl_driver.devp[i];
+		if (device)
+			device->ftbl->cleanup_pt(device, pt);
+	}
+	return 0;
+}
+
+static void kgsl_destroy_pagetable(struct kref *kref)
+{
+	struct kgsl_pagetable *pagetable = container_of(kref,
+		struct kgsl_pagetable, refcount);
+	unsigned long flags;
+
+	spin_lock_irqsave(&kgsl_driver.ptlock, flags);
+	list_del(&pagetable->list);
+	spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
+
+	pagetable_remove_sysfs_objects(pagetable);
+
+	kgsl_cleanup_pt(pagetable);
+
+	kgsl_ptpool_free(&kgsl_driver.ptpool, pagetable->base.hostptr);
+
+	kgsl_driver.stats.coherent -= KGSL_PAGETABLE_SIZE;
+
+	if (pagetable->pool)
+		gen_pool_destroy(pagetable->pool);
+
+	kfree(pagetable->tlbflushfilter.base);
+	kfree(pagetable);
+}
+
+static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
+{
+	if (pagetable)
+		kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
+}
+
+static struct kgsl_pagetable *
+kgsl_get_pagetable(unsigned long name)
+{
+	struct kgsl_pagetable *pt, *ret = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kgsl_driver.ptlock, flags);
+	list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
+		if (pt->name == name) {
+			ret = pt;
+			kref_get(&ret->refcount);
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
+	return ret;
+}
+
+static struct kgsl_pagetable *
+_get_pt_from_kobj(struct kobject *kobj)
+{
+	unsigned long ptname;
+
+	if (!kobj)
+		return NULL;
+
+	if (sscanf(kobj->name, "%ld", &ptname) != 1)
+		return NULL;
+
+	return kgsl_get_pagetable(ptname);
+}
+
+static ssize_t
+sysfs_show_entries(struct kobject *kobj,
+		   struct kobj_attribute *attr,
+		   char *buf)
+{
+	struct kgsl_pagetable *pt;
+	int ret = 0;
+
+	pt = _get_pt_from_kobj(kobj);
+
+	if (pt)
+		ret += sprintf(buf, "%d\n", pt->stats.entries);
+
+	kgsl_put_pagetable(pt);
+	return ret;
+}
+
+static ssize_t
+sysfs_show_mapped(struct kobject *kobj,
+		  struct kobj_attribute *attr,
+		  char *buf)
+{
+	struct kgsl_pagetable *pt;
+	int ret = 0;
+
+	pt = _get_pt_from_kobj(kobj);
+
+	if (pt)
+		ret += sprintf(buf, "%d\n", pt->stats.mapped);
+
+	kgsl_put_pagetable(pt);
+	return ret;
+}
+
+static ssize_t
+sysfs_show_va_range(struct kobject *kobj,
+		    struct kobj_attribute *attr,
+		    char *buf)
+{
+	struct kgsl_pagetable *pt;
+	int ret = 0;
+
+	pt = _get_pt_from_kobj(kobj);
+
+	if (pt)
+		ret += sprintf(buf, "0x%x\n", pt->va_range);
+
+	kgsl_put_pagetable(pt);
+	return ret;
+}
+
+static ssize_t
+sysfs_show_max_mapped(struct kobject *kobj,
+		      struct kobj_attribute *attr,
+		      char *buf)
+{
+	struct kgsl_pagetable *pt;
+	int ret = 0;
+
+	pt = _get_pt_from_kobj(kobj);
+
+	if (pt)
+		ret += sprintf(buf, "%d\n", pt->stats.max_mapped);
+
+	kgsl_put_pagetable(pt);
+	return ret;
+}
+
+static ssize_t
+sysfs_show_max_entries(struct kobject *kobj,
+		       struct kobj_attribute *attr,
+		       char *buf)
+{
+	struct kgsl_pagetable *pt;
+	int ret = 0;
+
+	pt = _get_pt_from_kobj(kobj);
+
+	if (pt)
+		ret += sprintf(buf, "%d\n", pt->stats.max_entries);
+
+	kgsl_put_pagetable(pt);
+	return ret;
+}
+
+static struct kobj_attribute attr_entries = {
+	.attr = { .name = "entries", .mode = 0444 },
+	.show = sysfs_show_entries,
+	.store = NULL,
+};
+
+static struct kobj_attribute attr_mapped = {
+	.attr = { .name = "mapped", .mode = 0444 },
+	.show = sysfs_show_mapped,
+	.store = NULL,
+};
+
+static struct kobj_attribute attr_va_range = {
+	.attr = { .name = "va_range", .mode = 0444 },
+	.show = sysfs_show_va_range,
+	.store = NULL,
+};
+
+static struct kobj_attribute attr_max_mapped = {
+	.attr = { .name = "max_mapped", .mode = 0444 },
+	.show = sysfs_show_max_mapped,
+	.store = NULL,
+};
+
+static struct kobj_attribute attr_max_entries = {
+	.attr = { .name = "max_entries", .mode = 0444 },
+	.show = sysfs_show_max_entries,
+	.store = NULL,
+};
+
+static struct attribute *pagetable_attrs[] = {
+	&attr_entries.attr,
+	&attr_mapped.attr,
+	&attr_va_range.attr,
+	&attr_max_mapped.attr,
+	&attr_max_entries.attr,
+	NULL,
+};
+
+static struct attribute_group pagetable_attr_group = {
+	.attrs = pagetable_attrs,
+};
+
+static void
+pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
+{
+	if (pagetable->kobj)
+		sysfs_remove_group(pagetable->kobj,
+				   &pagetable_attr_group);
+
+	kobject_put(pagetable->kobj);
+}
+
+static int
+pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
+{
+	char ptname[16];
+	int ret = -ENOMEM;
+
+	snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
+	pagetable->kobj = kobject_create_and_add(ptname,
+						 kgsl_driver.ptkobj);
+	if (pagetable->kobj == NULL)
+		goto err;
+
+	ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
+
+err:
+	if (ret) {
+		if (pagetable->kobj)
+			kobject_put(pagetable->kobj);
+
+		pagetable->kobj = NULL;
+	}
+
+	return ret;
+}
+
+static inline uint32_t
+kgsl_pt_entry_get(struct kgsl_pagetable *pt, uint32_t va)
+{
+	return (va - pt->va_base) >> PAGE_SHIFT;
+}
+
+static inline void
+kgsl_pt_map_set(struct kgsl_pagetable *pt, uint32_t pte, uint32_t val)
+{
+	uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
+
+	writel_relaxed(val, &baseptr[pte]);
+}
+
+static inline uint32_t
+kgsl_pt_map_getaddr(struct kgsl_pagetable *pt, uint32_t pte)
+{
+	uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
+	uint32_t ret = readl_relaxed(&baseptr[pte]) & GSL_PT_PAGE_ADDR_MASK;
+	return ret;
+}
+
+void kgsl_mh_intrcallback(struct kgsl_device *device)
+{
+	unsigned int status = 0;
+	unsigned int reg;
+
+	kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
+	kgsl_regread(device, MH_AXI_ERROR, &reg);
+
+	if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
+		KGSL_MEM_CRIT(device, "axi read error interrupt: %08x\n", reg);
+	else if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
+		KGSL_MEM_CRIT(device, "axi write error interrupt: %08x\n", reg);
+	else if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT) {
+		unsigned int ptbase;
+		struct kgsl_pagetable *pt;
+		int ptid = -1;
+
+		kgsl_regread(device, MH_MMU_PAGE_FAULT, &reg);
+		kgsl_regread(device, MH_MMU_PT_BASE, &ptbase);
+
+		spin_lock(&kgsl_driver.ptlock);
+		list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
+			if (ptbase == pt->base.gpuaddr) {
+				ptid = (int) pt->name;
+				break;
+			}
+		}
+		spin_unlock(&kgsl_driver.ptlock);
+
+		KGSL_MEM_CRIT(device,
+			"mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
+			reg & ~(PAGE_SIZE - 1), ptid,
+			reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
+	} else
+		KGSL_MEM_WARN(device,
+			"bad bits in REG_MH_INTERRUPT_STATUS %08x\n", status);
+
+	kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
+
+	/*TODO: figure out how to handle errror interupts.
+	* specifically, page faults should probably nuke the client that
+	* caused them, but we don't have enough info to figure that out yet.
+	*/
+}
+EXPORT_SYMBOL(kgsl_mh_intrcallback);
+
+static int kgsl_setup_pt(struct kgsl_pagetable *pt)
+{
+	int i = 0;
+	int status = 0;
+
+	for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+		struct kgsl_device *device = kgsl_driver.devp[i];
+		if (device) {
+			status = device->ftbl->setup_pt(device, pt);
+			if (status)
+				goto error_pt;
+		}
+	}
+	return status;
+error_pt:
+	while (i >= 0) {
+		struct kgsl_device *device = kgsl_driver.devp[i];
+		if (device)
+			device->ftbl->cleanup_pt(device, pt);
+		i--;
+	}
+	return status;
+}
+
+static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
+				unsigned int name)
+{
+	int status = 0;
+	struct kgsl_pagetable *pagetable = NULL;
+	unsigned long flags;
+
+	pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
+	if (pagetable == NULL) {
+		KGSL_CORE_ERR("kzalloc(%d) failed\n",
+			sizeof(struct kgsl_pagetable));
+		return NULL;
+	}
+
+	kref_init(&pagetable->refcount);
+
+	spin_lock_init(&pagetable->lock);
+	pagetable->tlb_flags = 0;
+	pagetable->name = name;
+	pagetable->va_base = KGSL_PAGETABLE_BASE;
+	pagetable->va_range = CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
+	pagetable->last_superpte = 0;
+	pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(pagetable->va_range);
+
+	pagetable->tlbflushfilter.size = (pagetable->va_range /
+				(PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
+	pagetable->tlbflushfilter.base = (unsigned int *)
+			kzalloc(pagetable->tlbflushfilter.size, GFP_KERNEL);
+	if (!pagetable->tlbflushfilter.base) {
+		KGSL_CORE_ERR("kzalloc(%d) failed\n",
+			pagetable->tlbflushfilter.size);
+		goto err_alloc;
+	}
+	GSL_TLBFLUSH_FILTER_RESET();
+
+	pagetable->pool = gen_pool_create(PAGE_SHIFT, -1);
+	if (pagetable->pool == NULL) {
+		KGSL_CORE_ERR("gen_pool_create(%d) failed\n", PAGE_SHIFT);
+		goto err_flushfilter;
+	}
+
+	if (gen_pool_add(pagetable->pool, pagetable->va_base,
+				pagetable->va_range, -1)) {
+		KGSL_CORE_ERR("gen_pool_add failed\n");
+		goto err_pool;
+	}
+
+	pagetable->base.hostptr = kgsl_ptpool_alloc(&kgsl_driver.ptpool,
+		&pagetable->base.physaddr);
+
+	if (pagetable->base.hostptr == NULL)
+		goto err_pool;
+
+	/* ptpool allocations are from coherent memory, so update the
+	   device statistics acordingly */
+
+	KGSL_STATS_ADD(KGSL_PAGETABLE_SIZE, kgsl_driver.stats.coherent,
+		       kgsl_driver.stats.coherent_max);
+
+	pagetable->base.gpuaddr = pagetable->base.physaddr;
+	pagetable->base.size = KGSL_PAGETABLE_SIZE;
+
+	status = kgsl_setup_pt(pagetable);
+	if (status)
+		goto err_free_sharedmem;
+
+	spin_lock_irqsave(&kgsl_driver.ptlock, flags);
+	list_add(&pagetable->list, &kgsl_driver.pagetable_list);
+	spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
+
+	/* Create the sysfs entries */
+	pagetable_add_sysfs_objects(pagetable);
+
+	return pagetable;
+
+err_free_sharedmem:
+	kgsl_ptpool_free(&kgsl_driver.ptpool, &pagetable->base.hostptr);
+err_pool:
+	gen_pool_destroy(pagetable->pool);
+err_flushfilter:
+	kfree(pagetable->tlbflushfilter.base);
+err_alloc:
+	kfree(pagetable);
+
+	return NULL;
+}
+
+struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
+{
+	struct kgsl_pagetable *pt;
+
+	pt = kgsl_get_pagetable(name);
+
+	if (pt == NULL)
+		pt = kgsl_mmu_createpagetableobject(name);
+
+	return pt;
+}
+
+void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
+{
+	kgsl_put_pagetable(pagetable);
+}
+
+void kgsl_default_setstate(struct kgsl_device *device, uint32_t flags)
+{
+	if (!kgsl_mmu_enabled())
+		return;
+
+	if (flags & KGSL_MMUFLAGS_PTUPDATE) {
+		kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
+		kgsl_regwrite(device, MH_MMU_PT_BASE,
+			device->mmu.hwpagetable->base.gpuaddr);
+	}
+
+	if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
+		/* Invalidate all and tc */
+		kgsl_regwrite(device, MH_MMU_INVALIDATE,  0x00000003);
+	}
+}
+EXPORT_SYMBOL(kgsl_default_setstate);
+
+void kgsl_setstate(struct kgsl_device *device, uint32_t flags)
+{
+	if (device->ftbl->setstate)
+		device->ftbl->setstate(device, flags);
+}
+EXPORT_SYMBOL(kgsl_setstate);
+
+void kgsl_mmu_setstate(struct kgsl_device *device,
+				struct kgsl_pagetable *pagetable)
+{
+	struct kgsl_mmu *mmu = &device->mmu;
+
+	if (mmu->flags & KGSL_FLAGS_STARTED) {
+		/* page table not current, then setup mmu to use new
+		 *  specified page table
+		 */
+		if (mmu->hwpagetable != pagetable) {
+			mmu->hwpagetable = pagetable;
+			spin_lock(&mmu->hwpagetable->lock);
+			mmu->hwpagetable->tlb_flags &= ~(1<<device->id);
+			spin_unlock(&mmu->hwpagetable->lock);
+
+			/* call device specific set page table */
+			kgsl_setstate(mmu->device, KGSL_MMUFLAGS_TLBFLUSH |
+				KGSL_MMUFLAGS_PTUPDATE);
+		}
+	}
+}
+EXPORT_SYMBOL(kgsl_mmu_setstate);
+
+int kgsl_mmu_init(struct kgsl_device *device)
+{
+	/*
+	 * intialize device mmu
+	 *
+	 * call this with the global lock held
+	 */
+	int status = 0;
+	struct kgsl_mmu *mmu = &device->mmu;
+
+	mmu->device = device;
+
+	/* make sure aligned to pagesize */
+	BUG_ON(mmu->mpu_base & (PAGE_SIZE - 1));
+	BUG_ON((mmu->mpu_base + mmu->mpu_range) & (PAGE_SIZE - 1));
+
+	/* sub-client MMU lookups require address translation */
+	if ((mmu->config & ~0x1) > 0) {
+		/*make sure virtual address range is a multiple of 64Kb */
+		BUG_ON(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1));
+
+		/* allocate memory used for completing r/w operations that
+		 * cannot be mapped by the MMU
+		 */
+		status = kgsl_allocate_contiguous(&mmu->dummyspace, 64);
+		if (!status)
+			kgsl_sharedmem_set(&mmu->dummyspace, 0, 0,
+					   mmu->dummyspace.size);
+	}
+
+	return status;
+}
+
+int kgsl_mmu_start(struct kgsl_device *device)
+{
+	/*
+	 * intialize device mmu
+	 *
+	 * call this with the global lock held
+	 */
+
+	struct kgsl_mmu *mmu = &device->mmu;
+
+	if (mmu->flags & KGSL_FLAGS_STARTED)
+		return 0;
+
+	/* MMU not enabled */
+	if ((mmu->config & 0x1) == 0)
+		return 0;
+
+	mmu->flags |= KGSL_FLAGS_STARTED;
+
+	/* setup MMU and sub-client behavior */
+	kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
+
+	/*
+	 * Interrupts are enabled on a per-device level when
+	 * kgsl_pwrctrl_irq() is called
+	*/
+
+	/* idle device */
+	kgsl_idle(device,  KGSL_TIMEOUT_DEFAULT);
+
+	/* define physical memory range accessible by the core */
+	kgsl_regwrite(device, MH_MMU_MPU_BASE, mmu->mpu_base);
+	kgsl_regwrite(device, MH_MMU_MPU_END,
+			mmu->mpu_base + mmu->mpu_range);
+
+	/* sub-client MMU lookups require address translation */
+	if ((mmu->config & ~0x1) > 0) {
+
+		kgsl_sharedmem_set(&mmu->dummyspace, 0, 0,
+				   mmu->dummyspace.size);
+
+		/* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
+		 * to complete transactions in case of an MMU fault. Note that
+		 * we'll leave the bottom 32 bytes of the dummyspace for other
+		 * purposes (e.g. use it when dummy read cycles are needed
+		 * for other blocks */
+		kgsl_regwrite(device, MH_MMU_TRAN_ERROR,
+			mmu->dummyspace.physaddr + 32);
+
+		if (mmu->defaultpagetable == NULL)
+			mmu->defaultpagetable =
+				kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
+
+		/* Return error if the default pagetable doesn't exist */
+		if (mmu->defaultpagetable == NULL)
+			return -ENOMEM;
+
+		mmu->hwpagetable = mmu->defaultpagetable;
+
+		kgsl_regwrite(device, MH_MMU_PT_BASE,
+			      mmu->hwpagetable->base.gpuaddr);
+		kgsl_regwrite(device, MH_MMU_VA_RANGE,
+			      (mmu->hwpagetable->va_base |
+			      (mmu->hwpagetable->va_range >> 16)));
+		kgsl_setstate(device, KGSL_MMUFLAGS_TLBFLUSH);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(kgsl_mmu_start);
+
+unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr)
+{
+	unsigned int physaddr = 0;
+	pgd_t *pgd_ptr = NULL;
+	pmd_t *pmd_ptr = NULL;
+	pte_t *pte_ptr = NULL, pte;
+
+	pgd_ptr = pgd_offset(current->mm, (unsigned long) virtaddr);
+	if (pgd_none(*pgd) || pgd_bad(*pgd)) {
+		KGSL_CORE_ERR("Invalid pgd entry\n");
+		return 0;
+	}
+
+	pmd_ptr = pmd_offset(pgd_ptr, (unsigned long) virtaddr);
+	if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) {
+		KGSL_CORE_ERR("Invalid pmd entry\n");
+		return 0;
+	}
+
+	pte_ptr = pte_offset_map(pmd_ptr, (unsigned long) virtaddr);
+	if (!pte_ptr) {
+		KGSL_CORE_ERR("pt_offset_map failed\n");
+		return 0;
+	}
+	pte = *pte_ptr;
+	physaddr = pte_pfn(pte);
+	pte_unmap(pte_ptr);
+	physaddr <<= PAGE_SHIFT;
+	return physaddr;
+}
+
+int
+kgsl_mmu_map(struct kgsl_pagetable *pagetable,
+				struct kgsl_memdesc *memdesc,
+				unsigned int protflags)
+{
+	int numpages;
+	unsigned int pte, ptefirst, ptelast, physaddr;
+	int flushtlb;
+	unsigned int offset = 0;
+
+	BUG_ON(protflags & ~(GSL_PT_PAGE_RV | GSL_PT_PAGE_WV));
+	BUG_ON(protflags == 0);
+
+	memdesc->gpuaddr = gen_pool_alloc_aligned(pagetable->pool,
+		memdesc->size, KGSL_MMU_ALIGN_SHIFT);
+
+	if (memdesc->gpuaddr == 0) {
+		KGSL_CORE_ERR("gen_pool_alloc(%d) failed\n", memdesc->size);
+		KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
+				pagetable->name, pagetable->stats.mapped,
+				pagetable->stats.entries);
+		return -ENOMEM;
+	}
+
+	numpages = (memdesc->size >> PAGE_SHIFT);
+
+	ptefirst = kgsl_pt_entry_get(pagetable, memdesc->gpuaddr);
+	ptelast = ptefirst + numpages;
+
+	pte = ptefirst;
+	flushtlb = 0;
+
+	/* tlb needs to be flushed when the first and last pte are not at
+	* superpte boundaries */
+	if ((ptefirst & (GSL_PT_SUPER_PTE - 1)) != 0 ||
+		((ptelast + 1) & (GSL_PT_SUPER_PTE-1)) != 0)
+		flushtlb = 1;
+
+	spin_lock(&pagetable->lock);
+	for (pte = ptefirst; pte < ptelast; pte++, offset += PAGE_SIZE) {
+#ifdef VERBOSE_DEBUG
+		/* check if PTE exists */
+		uint32_t val = kgsl_pt_map_getaddr(pagetable, pte);
+		BUG_ON(val != 0 && val != GSL_PT_PAGE_DIRTY);
+#endif
+		if ((pte & (GSL_PT_SUPER_PTE-1)) == 0)
+			if (GSL_TLBFLUSH_FILTER_ISDIRTY(pte / GSL_PT_SUPER_PTE))
+				flushtlb = 1;
+		/* mark pte as in use */
+
+		physaddr = memdesc->ops->physaddr(memdesc, offset);
+		BUG_ON(physaddr == 0);
+		kgsl_pt_map_set(pagetable, pte, physaddr | protflags);
+	}
+
+	/* Keep track of the statistics for the sysfs files */
+
+	KGSL_STATS_ADD(1, pagetable->stats.entries,
+		       pagetable->stats.max_entries);
+
+	KGSL_STATS_ADD(memdesc->size, pagetable->stats.mapped,
+		       pagetable->stats.max_mapped);
+
+	/* Post all writes to the pagetable */
+	wmb();
+
+	/* Invalidate tlb only if current page table used by GPU is the
+	 * pagetable that we used to allocate */
+	if (flushtlb) {
+		/*set all devices as needing flushing*/
+		pagetable->tlb_flags = UINT_MAX;
+		GSL_TLBFLUSH_FILTER_RESET();
+	}
+	spin_unlock(&pagetable->lock);
+
+	return 0;
+}
+
+int
+kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
+		struct kgsl_memdesc *memdesc)
+{
+	unsigned int numpages;
+	unsigned int pte, ptefirst, ptelast, superpte;
+	unsigned int range = memdesc->size;
+
+	/* All GPU addresses as assigned are page aligned, but some
+	   functions purturb the gpuaddr with an offset, so apply the
+	   mask here to make sure we have the right address */
+
+	unsigned int gpuaddr = memdesc->gpuaddr &  KGSL_MMU_ALIGN_MASK;
+
+	if (range == 0 || gpuaddr == 0)
+		return 0;
+
+	numpages = (range >> PAGE_SHIFT);
+	if (range & (PAGE_SIZE - 1))
+		numpages++;
+
+	ptefirst = kgsl_pt_entry_get(pagetable, gpuaddr);
+	ptelast = ptefirst + numpages;
+
+	spin_lock(&pagetable->lock);
+	superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
+	GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
+	for (pte = ptefirst; pte < ptelast; pte++) {
+#ifdef VERBOSE_DEBUG
+		/* check if PTE exists */
+		BUG_ON(!kgsl_pt_map_getaddr(pagetable, pte));
+#endif
+		kgsl_pt_map_set(pagetable, pte, GSL_PT_PAGE_DIRTY);
+		superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
+		if (pte == superpte)
+			GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
+				GSL_PT_SUPER_PTE);
+	}
+
+	/* Remove the statistics */
+	pagetable->stats.entries--;
+	pagetable->stats.mapped -= range;
+
+	/* Post all writes to the pagetable */
+	wmb();
+
+	spin_unlock(&pagetable->lock);
+
+	gen_pool_free(pagetable->pool, gpuaddr, range);
+
+	return 0;
+}
+EXPORT_SYMBOL(kgsl_mmu_unmap);
+
+int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
+			struct kgsl_memdesc *memdesc, unsigned int protflags)
+{
+	int result = -EINVAL;
+	unsigned int gpuaddr = 0;
+
+	if (memdesc == NULL) {
+		KGSL_CORE_ERR("invalid memdesc\n");
+		goto error;
+	}
+
+	gpuaddr = memdesc->gpuaddr;
+
+	result = kgsl_mmu_map(pagetable, memdesc, protflags);
+	if (result)
+		goto error;
+
+	/*global mappings must have the same gpu address in all pagetables*/
+	if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
+		KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
+			"gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
+			gpuaddr, memdesc->gpuaddr);
+		goto error_unmap;
+	}
+	return result;
+error_unmap:
+	kgsl_mmu_unmap(pagetable, memdesc);
+error:
+	return result;
+}
+EXPORT_SYMBOL(kgsl_mmu_map_global);
+
+int kgsl_mmu_stop(struct kgsl_device *device)
+{
+	/*
+	 *  stop device mmu
+	 *
+	 *  call this with the global lock held
+	 */
+	struct kgsl_mmu *mmu = &device->mmu;
+
+	if (mmu->flags & KGSL_FLAGS_STARTED) {
+		/* disable MMU */
+		kgsl_regwrite(device, MH_MMU_CONFIG, 0x00000000);
+
+		mmu->flags &= ~KGSL_FLAGS_STARTED;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(kgsl_mmu_stop);
+
+int kgsl_mmu_close(struct kgsl_device *device)
+{
+	/*
+	 *  close device mmu
+	 *
+	 *  call this with the global lock held
+	 */
+	struct kgsl_mmu *mmu = &device->mmu;
+
+	if (mmu->dummyspace.gpuaddr)
+		kgsl_sharedmem_free(&mmu->dummyspace);
+
+	if (mmu->defaultpagetable)
+		kgsl_mmu_putpagetable(mmu->defaultpagetable);
+
+	return 0;
+}
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
new file mode 100644
index 0000000..3425277
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -0,0 +1,273 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_MMU_H
+#define __KGSL_MMU_H
+
+/* Identifier for the global page table */
+/* Per process page tables will probably pass in the thread group
+   as an identifier */
+
+#define KGSL_MMU_GLOBAL_PT 0
+
+#define GSL_PT_SUPER_PTE 8
+#define GSL_PT_PAGE_WV		0x00000001
+#define GSL_PT_PAGE_RV		0x00000002
+#define GSL_PT_PAGE_DIRTY	0x00000004
+
+/* MMU registers - the register locations for all cores are the
+   same.  The method for getting to those locations differs between
+   2D and 3D, but the 2D and 3D register functions do that magic
+   for us */
+
+#define MH_MMU_CONFIG                0x0040
+#define MH_MMU_VA_RANGE              0x0041
+#define MH_MMU_PT_BASE               0x0042
+#define MH_MMU_PAGE_FAULT            0x0043
+#define MH_MMU_TRAN_ERROR            0x0044
+#define MH_MMU_INVALIDATE            0x0045
+#define MH_MMU_MPU_BASE              0x0046
+#define MH_MMU_MPU_END               0x0047
+
+#define MH_INTERRUPT_MASK            0x0A42
+#define MH_INTERRUPT_STATUS          0x0A43
+#define MH_INTERRUPT_CLEAR           0x0A44
+#define MH_AXI_ERROR                 0x0A45
+
+/* MH_MMU_CONFIG bit definitions */
+
+#define MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT           0x00000004
+#define MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT           0x00000006
+#define MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT          0x00000008
+#define MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT          0x0000000a
+#define MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT          0x0000000c
+#define MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT          0x0000000e
+#define MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT          0x00000010
+#define MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT         0x00000012
+#define MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT         0x00000014
+#define MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT           0x00000016
+#define MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT           0x00000018
+
+/* MMU Flags */
+#define KGSL_MMUFLAGS_TLBFLUSH         0x10000000
+#define KGSL_MMUFLAGS_PTUPDATE         0x20000000
+
+#define MH_INTERRUPT_MASK__AXI_READ_ERROR                  0x00000001L
+#define MH_INTERRUPT_MASK__AXI_WRITE_ERROR                 0x00000002L
+#define MH_INTERRUPT_MASK__MMU_PAGE_FAULT                  0x00000004L
+
+#ifdef CONFIG_MSM_KGSL_MMU
+#define KGSL_MMU_INT_MASK \
+	(MH_INTERRUPT_MASK__AXI_READ_ERROR | \
+	 MH_INTERRUPT_MASK__AXI_WRITE_ERROR | \
+	 MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
+#else
+#define KGSL_MMU_INT_MASK \
+	(MH_INTERRUPT_MASK__AXI_READ_ERROR | \
+	 MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
+#endif
+
+/* Macros to manage TLB flushing */
+#define GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS     (sizeof(unsigned char) * 8)
+#define GSL_TLBFLUSH_FILTER_GET(superpte)			     \
+	      (*((unsigned char *)				    \
+	      (((unsigned int)pagetable->tlbflushfilter.base)    \
+	      + (superpte / GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))))
+#define GSL_TLBFLUSH_FILTER_SETDIRTY(superpte)				\
+	      (GSL_TLBFLUSH_FILTER_GET((superpte)) |= 1 <<	    \
+	      (superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))
+#define GSL_TLBFLUSH_FILTER_ISDIRTY(superpte)			 \
+	      (GSL_TLBFLUSH_FILTER_GET((superpte)) &		  \
+	      (1 << (superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS)))
+#define GSL_TLBFLUSH_FILTER_RESET() memset(pagetable->tlbflushfilter.base,\
+				      0, pagetable->tlbflushfilter.size)
+
+
+struct kgsl_device;
+
+struct kgsl_tlbflushfilter {
+	unsigned int *base;
+	unsigned int size;
+};
+
+struct kgsl_pagetable {
+	spinlock_t lock;
+	struct kref refcount;
+	struct kgsl_memdesc  base;
+	uint32_t      va_base;
+	unsigned int   va_range;
+	unsigned int   last_superpte;
+	unsigned int   max_entries;
+	struct gen_pool *pool;
+	struct list_head list;
+	unsigned int name;
+	/* Maintain filter to manage tlb flushing */
+	struct kgsl_tlbflushfilter tlbflushfilter;
+	unsigned int tlb_flags;
+	struct kobject *kobj;
+
+	struct {
+		unsigned int entries;
+		unsigned int mapped;
+		unsigned int max_mapped;
+		unsigned int max_entries;
+	} stats;
+};
+
+struct kgsl_mmu {
+	unsigned int     refcnt;
+	uint32_t      flags;
+	struct kgsl_device     *device;
+	unsigned int     config;
+	uint32_t        mpu_base;
+	int              mpu_range;
+	struct kgsl_memdesc    dummyspace;
+	/* current page table object being used by device mmu */
+	struct kgsl_pagetable  *defaultpagetable;
+	struct kgsl_pagetable  *hwpagetable;
+};
+
+struct kgsl_ptpool_chunk {
+	size_t size;
+	unsigned int count;
+	int dynamic;
+
+	void *data;
+	unsigned int phys;
+
+	unsigned long *bitmap;
+	struct list_head list;
+};
+
+struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name);
+
+#ifdef CONFIG_MSM_KGSL_MMU
+
+int kgsl_mmu_init(struct kgsl_device *device);
+int kgsl_mmu_start(struct kgsl_device *device);
+int kgsl_mmu_stop(struct kgsl_device *device);
+int kgsl_mmu_close(struct kgsl_device *device);
+void kgsl_mmu_setstate(struct kgsl_device *device,
+		      struct kgsl_pagetable *pagetable);
+int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
+		 struct kgsl_memdesc *memdesc,
+		 unsigned int protflags);
+int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
+			struct kgsl_memdesc *memdesc, unsigned int protflags);
+int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
+		    struct kgsl_memdesc *memdesc);
+void kgsl_ptpool_destroy(struct kgsl_ptpool *pool);
+int kgsl_ptpool_init(struct kgsl_ptpool *pool, int ptsize, int entries);
+void kgsl_mh_intrcallback(struct kgsl_device *device);
+void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable);
+unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr);
+void kgsl_setstate(struct kgsl_device *device, uint32_t flags);
+void kgsl_default_setstate(struct kgsl_device *device, uint32_t flags);
+
+static inline int kgsl_mmu_enabled(void)
+{
+	return 1;
+}
+
+#else
+
+static inline int kgsl_mmu_enabled(void)
+{
+	return 0;
+}
+
+static inline int kgsl_mmu_init(struct kgsl_device *device)
+{
+	return 0;
+}
+
+static inline int kgsl_mmu_start(struct kgsl_device *device)
+{
+	return 0;
+}
+
+static inline int kgsl_mmu_stop(struct kgsl_device *device)
+{
+	return 0;
+}
+
+static inline int kgsl_mmu_close(struct kgsl_device *device)
+{
+	return 0;
+}
+
+static inline void kgsl_mmu_setstate(struct kgsl_device *device,
+				    struct kgsl_pagetable *pagetable) { }
+
+static inline int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
+		 struct kgsl_memdesc *memdesc,
+		 unsigned int protflags)
+{
+	memdesc->gpuaddr = memdesc->physaddr;
+	return 0;
+}
+
+static inline int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
+				 struct kgsl_memdesc *memdesc)
+{
+	return 0;
+}
+
+static inline int kgsl_ptpool_init(struct kgsl_ptpool *pool, int ptsize,
+				    int entries)
+{
+	return 0;
+}
+
+static inline int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
+	struct kgsl_memdesc *memdesc, unsigned int protflags)
+{
+	memdesc->gpuaddr = memdesc->physaddr;
+	return 0;
+}
+
+static inline void kgsl_ptpool_destroy(struct kgsl_ptpool *pool) { }
+
+static inline void kgsl_mh_intrcallback(struct kgsl_device *device) { }
+
+static inline void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable) { }
+
+static inline unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr)
+{
+	return 0;
+}
+
+static inline void kgsl_setstate(struct kgsl_device *device, uint32_t flags)
+{ }
+
+static inline void kgsl_default_setstate(struct kgsl_device *device,
+	uint32_t flags) { }
+#endif
+
+static inline unsigned int kgsl_pt_get_flags(struct kgsl_pagetable *pt,
+					     enum kgsl_deviceid id)
+{
+	unsigned int result = 0;
+
+	if (pt == NULL)
+		return 0;
+
+	spin_lock(&pt->lock);
+	if (pt->tlb_flags && (1<<id)) {
+		result = KGSL_MMUFLAGS_TLBFLUSH;
+		pt->tlb_flags &= ~(1<<id);
+	}
+	spin_unlock(&pt->lock);
+	return result;
+}
+
+#endif /* __KGSL_MMU_H */
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
new file mode 100644
index 0000000..572e0e8
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -0,0 +1,643 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/interrupt.h>
+#include <mach/msm_iomap.h>
+#include <mach/msm_bus.h>
+
+#include "kgsl.h"
+#include "kgsl_pwrscale.h"
+#include "kgsl_device.h"
+
+#define GPU_SWFI_LATENCY	3
+
+void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
+				unsigned int new_level)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	if (new_level < (pwr->num_pwrlevels - 1) &&
+		new_level >= pwr->thermal_pwrlevel &&
+		new_level != pwr->active_pwrlevel) {
+		pwr->active_pwrlevel = new_level;
+		if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags))
+			clk_set_rate(pwr->grp_clks[0],
+					pwr->pwrlevels[pwr->active_pwrlevel].
+					gpu_freq);
+		if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags))
+			if (pwr->pcl)
+				msm_bus_scale_client_update_request(pwr->pcl,
+					pwr->pwrlevels[pwr->active_pwrlevel].
+					bus_freq);
+		KGSL_PWR_WARN(device, "kgsl pwr level changed to %d\n",
+					  pwr->active_pwrlevel);
+	}
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
+
+static int __gpuclk_store(int max, struct device *dev,
+						  struct device_attribute *attr,
+						  const char *buf, size_t count)
+{	int ret, i, delta = 5000000;
+	unsigned long val;
+	struct kgsl_device *device = kgsl_device_from_dev(dev);
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+	ret = sscanf(buf, "%ld", &val);
+	if (ret != 1)
+		return count;
+
+	mutex_lock(&device->mutex);
+	for (i = 0; i < pwr->num_pwrlevels; i++) {
+		if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
+			if (max)
+				pwr->thermal_pwrlevel = i;
+			break;
+		}
+	}
+
+	if (i == pwr->num_pwrlevels)
+		goto done;
+
+	/*
+	 * If the current or requested clock speed is greater than the
+	 * thermal limit, bump down immediately.
+	 */
+
+	if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
+	    pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
+		kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
+	else if (!max)
+		kgsl_pwrctrl_pwrlevel_change(device, i);
+
+done:
+	mutex_unlock(&device->mutex);
+	return count;
+}
+
+static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t count)
+{
+	return __gpuclk_store(1, dev, attr, buf, count);
+}
+
+static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct kgsl_device *device = kgsl_device_from_dev(dev);
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
+}
+
+static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	return __gpuclk_store(0, dev, attr, buf, count);
+}
+
+static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct kgsl_device *device = kgsl_device_from_dev(dev);
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
+}
+
+static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	char temp[20];
+	unsigned long val;
+	struct kgsl_device *device = kgsl_device_from_dev(dev);
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	int rc;
+
+	snprintf(temp, sizeof(temp), "%.*s",
+			 (int)min(count, sizeof(temp) - 1), buf);
+	rc = strict_strtoul(temp, 0, &val);
+	if (rc)
+		return rc;
+
+	mutex_lock(&device->mutex);
+
+	if (val == 1)
+		pwr->nap_allowed = true;
+	else if (val == 0)
+		pwr->nap_allowed = false;
+
+	mutex_unlock(&device->mutex);
+
+	return count;
+}
+
+static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct kgsl_device *device = kgsl_device_from_dev(dev);
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	return sprintf(buf, "%d\n", pwr->nap_allowed);
+}
+
+
+static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	char temp[20];
+	unsigned long val;
+	struct kgsl_device *device = kgsl_device_from_dev(dev);
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	const long div = 1000/HZ;
+	static unsigned int org_interval_timeout = 1;
+	int rc;
+
+	snprintf(temp, sizeof(temp), "%.*s",
+			 (int)min(count, sizeof(temp) - 1), buf);
+	rc = strict_strtoul(temp, 0, &val);
+	if (rc)
+		return rc;
+
+	if (org_interval_timeout == 1)
+		org_interval_timeout = pwr->interval_timeout;
+
+	mutex_lock(&device->mutex);
+
+	/* Let the timeout be requested in ms, but convert to jiffies. */
+	val /= div;
+	if (val >= org_interval_timeout)
+		pwr->interval_timeout = val;
+
+	mutex_unlock(&device->mutex);
+
+	return count;
+}
+
+static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct kgsl_device *device = kgsl_device_from_dev(dev);
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	return sprintf(buf, "%d\n", pwr->interval_timeout);
+}
+
+DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
+DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
+	kgsl_pwrctrl_max_gpuclk_store);
+DEVICE_ATTR(pwrnap, 0644, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
+DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
+	kgsl_pwrctrl_idle_timer_store);
+
+static const struct device_attribute *pwrctrl_attr_list[] = {
+	&dev_attr_gpuclk,
+	&dev_attr_max_gpuclk,
+	&dev_attr_pwrnap,
+	&dev_attr_idle_timer,
+	NULL
+};
+
+int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
+{
+	return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
+}
+
+void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
+{
+	kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
+}
+
+void kgsl_pwrctrl_clk(struct kgsl_device *device, int state)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	int i = 0;
+	if (state == KGSL_PWRFLAGS_OFF) {
+		if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
+			&pwr->power_flags)) {
+			KGSL_PWR_INFO(device,
+				"clocks off, device %d\n", device->id);
+			for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
+				if (pwr->grp_clks[i])
+					clk_disable(pwr->grp_clks[i]);
+			if ((pwr->pwrlevels[0].gpu_freq > 0) &&
+				(device->requested_state != KGSL_STATE_NAP))
+				clk_set_rate(pwr->grp_clks[0],
+					pwr->pwrlevels[pwr->num_pwrlevels - 1].
+					gpu_freq);
+		}
+	} else if (state == KGSL_PWRFLAGS_ON) {
+		if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
+			&pwr->power_flags)) {
+			KGSL_PWR_INFO(device,
+				"clocks on, device %d\n", device->id);
+
+			if ((pwr->pwrlevels[0].gpu_freq > 0) &&
+				(device->state != KGSL_STATE_NAP))
+				clk_set_rate(pwr->grp_clks[0],
+					pwr->pwrlevels[pwr->active_pwrlevel].
+						gpu_freq);
+
+			/* as last step, enable grp_clk
+			   this is to let GPU interrupt to come */
+			for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
+				if (pwr->grp_clks[i])
+					clk_enable(pwr->grp_clks[i]);
+		}
+	}
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_clk);
+
+void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+	if (state == KGSL_PWRFLAGS_OFF) {
+		if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
+			&pwr->power_flags)) {
+			KGSL_PWR_INFO(device,
+				"axi off, device %d\n", device->id);
+			if (pwr->ebi1_clk)
+				clk_disable(pwr->ebi1_clk);
+			if (pwr->pcl)
+				msm_bus_scale_client_update_request(pwr->pcl,
+								    0);
+		}
+	} else if (state == KGSL_PWRFLAGS_ON) {
+		if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
+			&pwr->power_flags)) {
+			KGSL_PWR_INFO(device,
+				"axi on, device %d\n", device->id);
+			if (pwr->ebi1_clk)
+				clk_enable(pwr->ebi1_clk);
+			if (pwr->pcl)
+				msm_bus_scale_client_update_request(pwr->pcl,
+					pwr->pwrlevels[pwr->active_pwrlevel].
+						bus_freq);
+		}
+	}
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_axi);
+
+
+void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+	if (state == KGSL_PWRFLAGS_OFF) {
+		if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
+			&pwr->power_flags)) {
+			KGSL_PWR_INFO(device,
+				"power off, device %d\n", device->id);
+			if (pwr->gpu_reg)
+				regulator_disable(pwr->gpu_reg);
+		}
+	} else if (state == KGSL_PWRFLAGS_ON) {
+		if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
+			&pwr->power_flags)) {
+			KGSL_PWR_INFO(device,
+				"power on, device %d\n", device->id);
+			if (pwr->gpu_reg)
+				regulator_enable(pwr->gpu_reg);
+		}
+	}
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_pwrrail);
+
+void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+	if (state == KGSL_PWRFLAGS_ON) {
+		if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
+			&pwr->power_flags)) {
+			KGSL_PWR_INFO(device,
+				"irq on, device %d\n", device->id);
+			enable_irq(pwr->interrupt_num);
+			device->ftbl->irqctrl(device, 1);
+		}
+	} else if (state == KGSL_PWRFLAGS_OFF) {
+		if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
+			&pwr->power_flags)) {
+			KGSL_PWR_INFO(device,
+				"irq off, device %d\n", device->id);
+			device->ftbl->irqctrl(device, 0);
+			disable_irq(pwr->interrupt_num);
+		}
+	}
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_irq);
+
+int kgsl_pwrctrl_init(struct kgsl_device *device)
+{
+	int i, result = 0;
+	struct clk *clk;
+	struct platform_device *pdev =
+		container_of(device->parentdev, struct platform_device, dev);
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
+	struct kgsl_device_pwr_data *pdata_pwr = &pdata_dev->pwr_data;
+	const char *clk_names[KGSL_MAX_CLKS] = {pwr->src_clk_name,
+						pdata_dev->clk.name.clk,
+						pdata_dev->clk.name.pclk,
+						pdata_dev->imem_clk_name.clk,
+						pdata_dev->imem_clk_name.pclk};
+
+	/*acquire clocks */
+	for (i = 1; i < KGSL_MAX_CLKS; i++) {
+		if (clk_names[i]) {
+			clk = clk_get(&pdev->dev, clk_names[i]);
+			if (IS_ERR(clk))
+				goto clk_err;
+			pwr->grp_clks[i] = clk;
+		}
+	}
+	/* Make sure we have a source clk for freq setting */
+	clk = clk_get(&pdev->dev, clk_names[0]);
+	pwr->grp_clks[0] = (IS_ERR(clk)) ? pwr->grp_clks[1] : clk;
+
+	/* put the AXI bus into asynchronous mode with the graphics cores */
+	if (pdata_pwr->set_grp_async != NULL)
+		pdata_pwr->set_grp_async();
+
+	if (pdata_pwr->num_levels > KGSL_MAX_PWRLEVELS) {
+		KGSL_PWR_ERR(device, "invalid power level count: %d\n",
+					 pdata_pwr->num_levels);
+		result = -EINVAL;
+		goto done;
+	}
+	pwr->num_pwrlevels = pdata_pwr->num_levels;
+	pwr->active_pwrlevel = pdata_pwr->init_level;
+	for (i = 0; i < pdata_pwr->num_levels; i++) {
+		pwr->pwrlevels[i].gpu_freq =
+		(pdata_pwr->pwrlevel[i].gpu_freq > 0) ?
+		clk_round_rate(pwr->grp_clks[0],
+					   pdata_pwr->pwrlevel[i].
+					   gpu_freq) : 0;
+		pwr->pwrlevels[i].bus_freq =
+			pdata_pwr->pwrlevel[i].bus_freq;
+	}
+	/* Do not set_rate for targets in sync with AXI */
+	if (pwr->pwrlevels[0].gpu_freq > 0)
+		clk_set_rate(pwr->grp_clks[0], pwr->
+				pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
+
+	pwr->gpu_reg = regulator_get(NULL, pwr->regulator_name);
+	if (IS_ERR(pwr->gpu_reg))
+		pwr->gpu_reg = NULL;
+
+	pwr->power_flags = 0;
+
+	pwr->nap_allowed = pdata_pwr->nap_allowed;
+	pwr->interval_timeout = pdata_pwr->idle_timeout;
+	pwr->ebi1_clk = clk_get(NULL, "ebi1_kgsl_clk");
+	if (IS_ERR(pwr->ebi1_clk))
+		pwr->ebi1_clk = NULL;
+	else
+		clk_set_rate(pwr->ebi1_clk,
+					 pwr->pwrlevels[pwr->active_pwrlevel].
+						bus_freq);
+	if (pdata_dev->clk.bus_scale_table != NULL) {
+		pwr->pcl =
+			msm_bus_scale_register_client(pdata_dev->clk.
+							bus_scale_table);
+		if (!pwr->pcl) {
+			KGSL_PWR_ERR(device,
+					"msm_bus_scale_register_client failed: "
+					"id %d table %p", device->id,
+					pdata_dev->clk.bus_scale_table);
+			result = -EINVAL;
+			goto done;
+		}
+	}
+
+	/*acquire interrupt */
+	pwr->interrupt_num =
+		platform_get_irq_byname(pdev, pwr->irq_name);
+
+	if (pwr->interrupt_num <= 0) {
+		KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n",
+					 pwr->interrupt_num);
+		result = -EINVAL;
+		goto done;
+	}
+
+	register_early_suspend(&device->display_off);
+	return result;
+
+clk_err:
+	result = PTR_ERR(clk);
+	KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
+				 clk_names[i], result);
+
+done:
+	return result;
+}
+
+void kgsl_pwrctrl_close(struct kgsl_device *device)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	int i;
+
+	KGSL_PWR_INFO(device, "close device %d\n", device->id);
+
+	unregister_early_suspend(&device->display_off);
+
+	if (pwr->interrupt_num > 0) {
+		if (pwr->have_irq) {
+			free_irq(pwr->interrupt_num, NULL);
+			pwr->have_irq = 0;
+		}
+		pwr->interrupt_num = 0;
+	}
+
+	clk_put(pwr->ebi1_clk);
+
+	if (pwr->pcl)
+		msm_bus_scale_unregister_client(pwr->pcl);
+
+	pwr->pcl = 0;
+
+	if (pwr->gpu_reg) {
+		regulator_put(pwr->gpu_reg);
+		pwr->gpu_reg = NULL;
+	}
+
+	for (i = 1; i < KGSL_MAX_CLKS; i++)
+		if (pwr->grp_clks[i]) {
+			clk_put(pwr->grp_clks[i]);
+			pwr->grp_clks[i] = NULL;
+		}
+
+	pwr->grp_clks[0] = NULL;
+	pwr->power_flags = 0;
+}
+
+void kgsl_idle_check(struct work_struct *work)
+{
+	struct kgsl_device *device = container_of(work, struct kgsl_device,
+							idle_check_ws);
+
+	mutex_lock(&device->mutex);
+	if (device->requested_state != KGSL_STATE_SLEEP)
+		kgsl_pwrscale_idle(device);
+
+	if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
+		if (kgsl_pwrctrl_sleep(device) != 0)
+			mod_timer(&device->idle_timer,
+					jiffies +
+					device->pwrctrl.interval_timeout);
+	} else if (device->state & (KGSL_STATE_HUNG |
+					KGSL_STATE_DUMP_AND_RECOVER)) {
+		device->requested_state = KGSL_STATE_NONE;
+	}
+
+	mutex_unlock(&device->mutex);
+}
+
+void kgsl_timer(unsigned long data)
+{
+	struct kgsl_device *device = (struct kgsl_device *) data;
+
+	KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
+	if (device->requested_state != KGSL_STATE_SUSPEND) {
+		device->requested_state = KGSL_STATE_SLEEP;
+		/* Have work run in a non-interrupt context. */
+		queue_work(device->work_queue, &device->idle_check_ws);
+	}
+}
+
+void kgsl_pre_hwaccess(struct kgsl_device *device)
+{
+	BUG_ON(!mutex_is_locked(&device->mutex));
+	if (device->state & (KGSL_STATE_SLEEP | KGSL_STATE_NAP))
+		kgsl_pwrctrl_wake(device);
+}
+EXPORT_SYMBOL(kgsl_pre_hwaccess);
+
+void kgsl_check_suspended(struct kgsl_device *device)
+{
+	if (device->requested_state == KGSL_STATE_SUSPEND ||
+				device->state == KGSL_STATE_SUSPEND) {
+		mutex_unlock(&device->mutex);
+		wait_for_completion(&device->hwaccess_gate);
+		mutex_lock(&device->mutex);
+	}
+	if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
+		mutex_unlock(&device->mutex);
+		wait_for_completion(&device->recovery_gate);
+		mutex_lock(&device->mutex);
+	}
+ }
+
+
+/******************************************************************/
+/* Caller must hold the device mutex. */
+int kgsl_pwrctrl_sleep(struct kgsl_device *device)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
+
+	/* Work through the legal state transitions */
+	if (device->requested_state == KGSL_STATE_NAP) {
+		if (device->ftbl->isidle(device))
+			goto nap;
+	} else if (device->requested_state == KGSL_STATE_SLEEP) {
+		if (device->state == KGSL_STATE_NAP ||
+			device->ftbl->isidle(device))
+			goto sleep;
+	}
+
+	device->requested_state = KGSL_STATE_NONE;
+	return -EBUSY;
+
+sleep:
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+	kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
+	if (pwr->pwrlevels[0].gpu_freq > 0)
+		clk_set_rate(pwr->grp_clks[0],
+				pwr->pwrlevels[pwr->num_pwrlevels - 1].
+				gpu_freq);
+	device->pwrctrl.time = 0;
+
+	kgsl_pwrscale_sleep(device);
+	goto clk_off;
+
+nap:
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+clk_off:
+	kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
+
+	device->state = device->requested_state;
+	device->requested_state = KGSL_STATE_NONE;
+	wake_unlock(&device->idle_wakelock);
+	pm_qos_update_request(&device->pm_qos_req_dma,
+				PM_QOS_DEFAULT_VALUE);
+	KGSL_PWR_WARN(device, "state -> NAP/SLEEP(%d), device %d\n",
+				  device->state, device->id);
+
+	return 0;
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
+
+/******************************************************************/
+/* Caller must hold the device mutex. */
+void kgsl_pwrctrl_wake(struct kgsl_device *device)
+{
+	if (device->state == KGSL_STATE_SUSPEND)
+		return;
+
+	if (device->state != KGSL_STATE_NAP) {
+		kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
+		kgsl_pwrscale_wake(device);
+	}
+
+	/* Turn on the core clocks */
+	kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
+
+	/* Enable state before turning on irq */
+	device->state = KGSL_STATE_ACTIVE;
+	KGSL_PWR_WARN(device, "state -> ACTIVE, device %d\n", device->id);
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
+
+	/* Re-enable HW access */
+	mod_timer(&device->idle_timer,
+				jiffies + device->pwrctrl.interval_timeout);
+
+	wake_lock(&device->idle_wakelock);
+	pm_qos_update_request(&device->pm_qos_req_dma, GPU_SWFI_LATENCY);
+	KGSL_PWR_INFO(device, "wake return for device %d\n", device->id);
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_wake);
+
+void kgsl_pwrctrl_enable(struct kgsl_device *device)
+{
+	/* Order pwrrail/clk sequence based upon platform */
+	kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
+	kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
+	kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_enable);
+
+void kgsl_pwrctrl_disable(struct kgsl_device *device)
+{
+	/* Order pwrrail/clk sequence based upon platform */
+	kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
+	kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
+	kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_disable);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
new file mode 100644
index 0000000..fff5769
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -0,0 +1,76 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_PWRCTRL_H
+#define __KGSL_PWRCTRL_H
+
+/*****************************************************************************
+** power flags
+*****************************************************************************/
+#define KGSL_PWRFLAGS_POWER_ON 0
+#define KGSL_PWRFLAGS_CLK_ON   1
+#define KGSL_PWRFLAGS_AXI_ON   2
+#define KGSL_PWRFLAGS_IRQ_ON   3
+
+#define KGSL_PWRFLAGS_ON   1
+#define KGSL_PWRFLAGS_OFF  0
+
+#define KGSL_PWRLEVEL_TURBO 0
+#define KGSL_PWRLEVEL_NOMINAL 1
+#define KGSL_MAX_CLKS 5
+
+struct platform_device;
+
+struct kgsl_pwrctrl {
+	int interrupt_num;
+	int have_irq;
+	struct clk *ebi1_clk;
+	struct clk *grp_clks[KGSL_MAX_CLKS];
+	unsigned long power_flags;
+	struct kgsl_pwrlevel pwrlevels[KGSL_MAX_PWRLEVELS];
+	unsigned int active_pwrlevel;
+	int thermal_pwrlevel;
+	unsigned int num_pwrlevels;
+	unsigned int interval_timeout;
+	struct regulator *gpu_reg;
+	uint32_t pcl;
+	unsigned int nap_allowed;
+	const char *regulator_name;
+	const char *irq_name;
+	const char *src_clk_name;
+	s64 time;
+};
+
+void kgsl_pwrctrl_clk(struct kgsl_device *device, int state);
+void kgsl_pwrctrl_axi(struct kgsl_device *device, int state);
+void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state);
+void kgsl_pwrctrl_irq(struct kgsl_device *device, int state);
+int kgsl_pwrctrl_init(struct kgsl_device *device);
+void kgsl_pwrctrl_close(struct kgsl_device *device);
+void kgsl_timer(unsigned long data);
+void kgsl_idle_check(struct work_struct *work);
+void kgsl_pre_hwaccess(struct kgsl_device *device);
+void kgsl_check_suspended(struct kgsl_device *device);
+int kgsl_pwrctrl_sleep(struct kgsl_device *device);
+void kgsl_pwrctrl_wake(struct kgsl_device *device);
+void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
+	unsigned int level);
+int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device);
+void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device);
+void kgsl_pwrctrl_enable(struct kgsl_device *device);
+void kgsl_pwrctrl_disable(struct kgsl_device *device);
+static inline unsigned long kgsl_get_clkrate(struct clk *clk)
+{
+	return (clk != NULL) ? clk_get_rate(clk) : 0;
+}
+
+#endif /* __KGSL_PWRCTRL_H */
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
new file mode 100644
index 0000000..0d3120f
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -0,0 +1,327 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+
+#include "kgsl.h"
+#include "kgsl_pwrscale.h"
+#include "kgsl_device.h"
+
+struct kgsl_pwrscale_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct kgsl_device *device, char *buf);
+	ssize_t (*store)(struct kgsl_device *device, const char *buf,
+			 size_t count);
+};
+
+#define to_pwrscale(k) container_of(k, struct kgsl_pwrscale, kobj)
+#define pwrscale_to_device(p) container_of(p, struct kgsl_device, pwrscale)
+#define to_device(k) container_of(k, struct kgsl_device, pwrscale_kobj)
+#define to_pwrscale_attr(a) \
+container_of(a, struct kgsl_pwrscale_attribute, attr)
+#define to_policy_attr(a) \
+container_of(a, struct kgsl_pwrscale_policy_attribute, attr)
+
+#define PWRSCALE_ATTR(_name, _mode, _show, _store) \
+struct kgsl_pwrscale_attribute pwrscale_attr_##_name = \
+__ATTR(_name, _mode, _show, _store)
+
+/* Master list of available policies */
+
+static struct kgsl_pwrscale_policy *kgsl_pwrscale_policies[] = {
+#ifdef CONFIG_MSM_SCM
+	&kgsl_pwrscale_policy_tz,
+#endif
+	NULL
+};
+
+static ssize_t pwrscale_policy_store(struct kgsl_device *device,
+				     const char *buf, size_t count)
+{
+	int i;
+	struct kgsl_pwrscale_policy *policy = NULL;
+
+	/* The special keyword none allows the user to detach all
+	   policies */
+	if (!strncmp("none", buf, 4)) {
+		kgsl_pwrscale_detach_policy(device);
+		return count;
+	}
+
+	for (i = 0; kgsl_pwrscale_policies[i]; i++) {
+		if (!strncmp(kgsl_pwrscale_policies[i]->name, buf,
+			     strnlen(kgsl_pwrscale_policies[i]->name,
+				PAGE_SIZE))) {
+			policy = kgsl_pwrscale_policies[i];
+			break;
+		}
+	}
+
+	if (policy)
+		if (kgsl_pwrscale_attach_policy(device, policy))
+			return -EIO;
+
+	return count;
+}
+
+static ssize_t pwrscale_policy_show(struct kgsl_device *device, char *buf)
+{
+	int ret;
+
+	if (device->pwrscale.policy)
+		ret = snprintf(buf, PAGE_SIZE, "%s\n",
+			       device->pwrscale.policy->name);
+	else
+		ret = snprintf(buf, PAGE_SIZE, "none\n");
+
+	return ret;
+}
+
+PWRSCALE_ATTR(policy, 0644, pwrscale_policy_show, pwrscale_policy_store);
+
+static ssize_t pwrscale_avail_policies_show(struct kgsl_device *device,
+					    char *buf)
+{
+	int i, ret = 0;
+
+	for (i = 0; kgsl_pwrscale_policies[i]; i++) {
+		ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s ",
+				kgsl_pwrscale_policies[i]->name);
+	}
+
+	ret += snprintf(buf + ret, PAGE_SIZE - ret, "none\n");
+	return ret;
+}
+PWRSCALE_ATTR(avail_policies, 0444, pwrscale_avail_policies_show, NULL);
+
+static struct attribute *pwrscale_attrs[] = {
+	&pwrscale_attr_policy.attr,
+	&pwrscale_attr_avail_policies.attr,
+	NULL
+};
+
+static ssize_t policy_sysfs_show(struct kobject *kobj,
+				   struct attribute *attr, char *buf)
+{
+	struct kgsl_pwrscale *pwrscale = to_pwrscale(kobj);
+	struct kgsl_device *device = pwrscale_to_device(pwrscale);
+	struct kgsl_pwrscale_policy_attribute *pattr = to_policy_attr(attr);
+	ssize_t ret;
+
+	if (pattr->show)
+		ret = pattr->show(device, pwrscale, buf);
+	else
+		ret = -EIO;
+
+	return ret;
+}
+
+static ssize_t policy_sysfs_store(struct kobject *kobj,
+				    struct attribute *attr,
+				    const char *buf, size_t count)
+{
+	struct kgsl_pwrscale *pwrscale = to_pwrscale(kobj);
+	struct kgsl_device *device = pwrscale_to_device(pwrscale);
+	struct kgsl_pwrscale_policy_attribute *pattr = to_policy_attr(attr);
+	ssize_t ret;
+
+	if (pattr->store)
+		ret = pattr->store(device, pwrscale, buf, count);
+	else
+		ret = -EIO;
+
+	return ret;
+}
+
+static void policy_sysfs_release(struct kobject *kobj)
+{
+}
+
+static ssize_t pwrscale_sysfs_show(struct kobject *kobj,
+				   struct attribute *attr, char *buf)
+{
+	struct kgsl_device *device = to_device(kobj);
+	struct kgsl_pwrscale_attribute *pattr = to_pwrscale_attr(attr);
+	ssize_t ret;
+
+	if (pattr->show)
+		ret = pattr->show(device, buf);
+	else
+		ret = -EIO;
+
+	return ret;
+}
+
+static ssize_t pwrscale_sysfs_store(struct kobject *kobj,
+				    struct attribute *attr,
+				    const char *buf, size_t count)
+{
+	struct kgsl_device *device = to_device(kobj);
+	struct kgsl_pwrscale_attribute *pattr = to_pwrscale_attr(attr);
+	ssize_t ret;
+
+	if (pattr->store)
+		ret = pattr->store(device, buf, count);
+	else
+		ret = -EIO;
+
+	return ret;
+}
+
+static void pwrscale_sysfs_release(struct kobject *kobj)
+{
+}
+
+static const struct sysfs_ops policy_sysfs_ops = {
+	.show = policy_sysfs_show,
+	.store = policy_sysfs_store
+};
+
+static const struct sysfs_ops pwrscale_sysfs_ops = {
+	.show = pwrscale_sysfs_show,
+	.store = pwrscale_sysfs_store
+};
+
+static struct kobj_type ktype_pwrscale_policy = {
+	.sysfs_ops = &policy_sysfs_ops,
+	.default_attrs = NULL,
+	.release = policy_sysfs_release
+};
+
+static struct kobj_type ktype_pwrscale = {
+	.sysfs_ops = &pwrscale_sysfs_ops,
+	.default_attrs = pwrscale_attrs,
+	.release = pwrscale_sysfs_release
+};
+
+void kgsl_pwrscale_sleep(struct kgsl_device *device)
+{
+	if (device->pwrscale.policy && device->pwrscale.policy->sleep)
+		device->pwrscale.policy->sleep(device, &device->pwrscale);
+}
+EXPORT_SYMBOL(kgsl_pwrscale_sleep);
+
+void kgsl_pwrscale_wake(struct kgsl_device *device)
+{
+	if (device->pwrscale.policy && device->pwrscale.policy->wake)
+		device->pwrscale.policy->wake(device, &device->pwrscale);
+}
+EXPORT_SYMBOL(kgsl_pwrscale_wake);
+
+void kgsl_pwrscale_busy(struct kgsl_device *device)
+{
+	if (device->pwrscale.policy && device->pwrscale.policy->busy)
+		device->pwrscale.policy->busy(device, &device->pwrscale);
+}
+
+void kgsl_pwrscale_idle(struct kgsl_device *device)
+{
+	if (device->pwrscale.policy && device->pwrscale.policy->idle)
+		device->pwrscale.policy->idle(device, &device->pwrscale);
+}
+EXPORT_SYMBOL(kgsl_pwrscale_idle);
+
+int kgsl_pwrscale_policy_add_files(struct kgsl_device *device,
+				   struct kgsl_pwrscale *pwrscale,
+				   struct attribute_group *attr_group)
+{
+	int ret;
+
+	ret = kobject_add(&pwrscale->kobj, &device->pwrscale_kobj,
+		"%s", pwrscale->policy->name);
+
+	if (ret)
+		return ret;
+
+	ret = sysfs_create_group(&pwrscale->kobj, attr_group);
+
+	if (ret) {
+		kobject_del(&pwrscale->kobj);
+		kobject_put(&pwrscale->kobj);
+	}
+
+	return ret;
+}
+
+void kgsl_pwrscale_policy_remove_files(struct kgsl_device *device,
+				       struct kgsl_pwrscale *pwrscale,
+				       struct attribute_group *attr_group)
+{
+	sysfs_remove_group(&pwrscale->kobj, attr_group);
+	kobject_del(&pwrscale->kobj);
+	kobject_put(&pwrscale->kobj);
+}
+
+static void _kgsl_pwrscale_detach_policy(struct kgsl_device *device)
+{
+	if (device->pwrscale.policy != NULL)
+		device->pwrscale.policy->close(device, &device->pwrscale);
+	device->pwrscale.policy = NULL;
+}
+
+void kgsl_pwrscale_detach_policy(struct kgsl_device *device)
+{
+	mutex_lock(&device->mutex);
+	_kgsl_pwrscale_detach_policy(device);
+	mutex_unlock(&device->mutex);
+}
+EXPORT_SYMBOL(kgsl_pwrscale_detach_policy);
+
+int kgsl_pwrscale_attach_policy(struct kgsl_device *device,
+				struct kgsl_pwrscale_policy *policy)
+{
+	int ret = 0;
+
+	mutex_lock(&device->mutex);
+
+	if (device->pwrscale.policy == policy)
+		goto done;
+
+	if (device->pwrscale.policy != NULL)
+		_kgsl_pwrscale_detach_policy(device);
+
+	device->pwrscale.policy = policy;
+
+	if (policy) {
+		ret = device->pwrscale.policy->init(device, &device->pwrscale);
+		if (ret)
+			device->pwrscale.policy = NULL;
+	}
+
+done:
+	mutex_unlock(&device->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(kgsl_pwrscale_attach_policy);
+
+int kgsl_pwrscale_init(struct kgsl_device *device)
+{
+	int ret;
+
+	ret = kobject_init_and_add(&device->pwrscale_kobj, &ktype_pwrscale,
+		&device->dev->kobj, "pwrscale");
+
+	if (ret)
+		return ret;
+
+	kobject_init(&device->pwrscale.kobj, &ktype_pwrscale_policy);
+	return ret;
+}
+EXPORT_SYMBOL(kgsl_pwrscale_init);
+
+void kgsl_pwrscale_close(struct kgsl_device *device)
+{
+	kobject_put(&device->pwrscale_kobj);
+}
+EXPORT_SYMBOL(kgsl_pwrscale_close);
diff --git a/drivers/gpu/msm/kgsl_pwrscale.h b/drivers/gpu/msm/kgsl_pwrscale.h
new file mode 100644
index 0000000..6904608
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_pwrscale.h
@@ -0,0 +1,75 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __KGSL_PWRSCALE_H
+#define __KGSL_PWRSCALE_H
+
+struct kgsl_pwrscale;
+
+struct kgsl_pwrscale_policy  {
+	const char *name;
+	int (*init)(struct kgsl_device *device,
+		struct kgsl_pwrscale *pwrscale);
+	void (*close)(struct kgsl_device *device,
+		struct kgsl_pwrscale *pwrscale);
+	void (*idle)(struct kgsl_device *device,
+		struct kgsl_pwrscale *pwrscale);
+	void (*busy)(struct kgsl_device *device,
+		struct kgsl_pwrscale *pwrscale);
+	void (*sleep)(struct kgsl_device *device,
+		struct kgsl_pwrscale *pwrscale);
+	void (*wake)(struct kgsl_device *device,
+		struct kgsl_pwrscale *pwrscale);
+};
+
+struct kgsl_pwrscale {
+	struct kgsl_pwrscale_policy *policy;
+	struct kobject kobj;
+	void *priv;
+};
+
+struct kgsl_pwrscale_policy_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct kgsl_device *device,
+			struct kgsl_pwrscale *pwrscale, char *buf);
+	ssize_t (*store)(struct kgsl_device *device,
+			 struct kgsl_pwrscale *pwrscale, const char *buf,
+			 size_t count);
+};
+
+#define PWRSCALE_POLICY_ATTR(_name, _mode, _show, _store)          \
+	struct kgsl_pwrscale_policy_attribute policy_attr_##_name = \
+		__ATTR(_name, _mode, _show, _store)
+
+extern struct kgsl_pwrscale_policy kgsl_pwrscale_policy_tz;
+
+int kgsl_pwrscale_init(struct kgsl_device *device);
+void kgsl_pwrscale_close(struct kgsl_device *device);
+
+int kgsl_pwrscale_attach_policy(struct kgsl_device *device,
+	struct kgsl_pwrscale_policy *policy);
+void kgsl_pwrscale_detach_policy(struct kgsl_device *device);
+
+void kgsl_pwrscale_idle(struct kgsl_device *device);
+void kgsl_pwrscale_busy(struct kgsl_device *device);
+void kgsl_pwrscale_sleep(struct kgsl_device *device);
+void kgsl_pwrscale_wake(struct kgsl_device *device);
+
+int kgsl_pwrscale_policy_add_files(struct kgsl_device *device,
+				   struct kgsl_pwrscale *pwrscale,
+				   struct attribute_group *attr_group);
+
+void kgsl_pwrscale_policy_remove_files(struct kgsl_device *device,
+				       struct kgsl_pwrscale *pwrscale,
+				       struct attribute_group *attr_group);
+#endif
diff --git a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
new file mode 100644
index 0000000..a6fae30
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
@@ -0,0 +1,197 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <mach/socinfo.h>
+
+#include "kgsl.h"
+#include "kgsl_pwrscale.h"
+#include "kgsl_device.h"
+
+#define TZ_GOVERNOR_PERFORMANCE 0
+#define TZ_GOVERNOR_ONDEMAND    1
+
+struct tz_priv {
+	int governor;
+	unsigned int no_switch_cnt;
+};
+
+#define SWITCH_OFF		200
+#define TZ_UPDATE_ID		0x01404000
+#define TZ_RESET_ID		0x01403000
+
+#ifdef CONFIG_MSM_SECURE_IO
+/* Trap into the TrustZone, and call funcs there. */
+static int __secure_tz_entry(u32 cmd, u32 val)
+{
+	register u32 r0 asm("r0") = cmd;
+	register u32 r1 asm("r1") = 0x0;
+	register u32 r2 asm("r2") = val;
+
+	__iowmb();
+	asm(
+		__asmeq("%0", "r0")
+		__asmeq("%1", "r0")
+		__asmeq("%2", "r1")
+		__asmeq("%3", "r2")
+		"smc    #0      @ switch to secure world\n"
+		: "=r" (r0)
+		: "r" (r0), "r" (r1), "r" (r2)
+		);
+	return r0;
+}
+#else
+static int __secure_tz_entry(u32 cmd, u32 val)
+{
+	return 0;
+}
+#endif /* CONFIG_MSM_SECURE_IO */
+
+static ssize_t tz_governor_show(struct kgsl_device *device,
+				struct kgsl_pwrscale *pwrscale,
+				char *buf)
+{
+	struct tz_priv *priv = pwrscale->priv;
+	int ret;
+
+	if (priv->governor == TZ_GOVERNOR_ONDEMAND)
+		ret = snprintf(buf, 10, "ondemand\n");
+	else
+		ret = snprintf(buf, 13, "performance\n");
+
+	return ret;
+}
+
+static ssize_t tz_governor_store(struct kgsl_device *device,
+				struct kgsl_pwrscale *pwrscale,
+				 const char *buf, size_t count)
+{
+	char str[20];
+	struct tz_priv *priv = pwrscale->priv;
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	int ret;
+
+	ret = sscanf(buf, "%20s", str);
+	if (ret != 1)
+		return -EINVAL;
+
+	mutex_lock(&device->mutex);
+
+	if (!strncmp(str, "ondemand", 8))
+		priv->governor = TZ_GOVERNOR_ONDEMAND;
+	else if (!strncmp(str, "performance", 11))
+		priv->governor = TZ_GOVERNOR_PERFORMANCE;
+
+	if (priv->governor == TZ_GOVERNOR_PERFORMANCE)
+		kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
+
+	mutex_unlock(&device->mutex);
+	return count;
+}
+
+PWRSCALE_POLICY_ATTR(governor, 0644, tz_governor_show, tz_governor_store);
+
+static struct attribute *tz_attrs[] = {
+	&policy_attr_governor.attr,
+	NULL
+};
+
+static struct attribute_group tz_attr_group = {
+	.attrs = tz_attrs,
+};
+
+static void tz_wake(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
+{
+	struct tz_priv *priv = pwrscale->priv;
+	if (device->state != KGSL_STATE_NAP &&
+		priv->governor == TZ_GOVERNOR_ONDEMAND)
+		kgsl_pwrctrl_pwrlevel_change(device,
+					     device->pwrctrl.thermal_pwrlevel);
+}
+
+static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
+{
+	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	struct tz_priv *priv = pwrscale->priv;
+	struct kgsl_power_stats stats;
+	int val;
+
+	/* In "performance" mode the clock speed always stays
+	   the same */
+
+	if (priv->governor == TZ_GOVERNOR_PERFORMANCE)
+		return;
+
+	device->ftbl->power_stats(device, &stats);
+	if (stats.total_time == 0)
+		return;
+
+	/* If the GPU has stayed in turbo mode for a while, *
+	 * stop writing out values. */
+	if (pwr->active_pwrlevel)
+		priv->no_switch_cnt = 0;
+	else if (priv->no_switch_cnt > SWITCH_OFF)
+		return;
+	priv->no_switch_cnt++;
+	val = __secure_tz_entry(TZ_UPDATE_ID,
+				stats.total_time - stats.busy_time);
+	if (val)
+		kgsl_pwrctrl_pwrlevel_change(device,
+					     pwr->active_pwrlevel + val);
+}
+
+static void tz_sleep(struct kgsl_device *device,
+	struct kgsl_pwrscale *pwrscale)
+{
+	struct tz_priv *priv = pwrscale->priv;
+
+	__secure_tz_entry(TZ_RESET_ID, 0);
+	priv->no_switch_cnt = 0;
+}
+
+static int tz_init(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
+{
+	struct tz_priv *priv;
+
+	/* Trustzone is only valid for some SOCs */
+	if (!cpu_is_msm8x60())
+		return -EINVAL;
+
+	priv = pwrscale->priv = kzalloc(sizeof(struct tz_priv), GFP_KERNEL);
+	if (pwrscale->priv == NULL)
+		return -ENOMEM;
+
+	priv->governor = TZ_GOVERNOR_ONDEMAND;
+	kgsl_pwrscale_policy_add_files(device, pwrscale, &tz_attr_group);
+
+	return 0;
+}
+
+static void tz_close(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
+{
+	kgsl_pwrscale_policy_remove_files(device, pwrscale, &tz_attr_group);
+	kfree(pwrscale->priv);
+	pwrscale->priv = NULL;
+}
+
+struct kgsl_pwrscale_policy kgsl_pwrscale_policy_tz = {
+	.name = "trustzone",
+	.init = tz_init,
+	.idle = tz_idle,
+	.sleep = tz_sleep,
+	.wake = tz_wake,
+	.close = tz_close
+};
+EXPORT_SYMBOL(kgsl_pwrscale_policy_tz);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
new file mode 100644
index 0000000..61e148c
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -0,0 +1,639 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/vmalloc.h>
+#include <linux/memory_alloc.h>
+#include <asm/cacheflush.h>
+
+#include "kgsl.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_cffdump.h"
+#include "kgsl_device.h"
+
+static struct kgsl_process_private *
+_get_priv_from_kobj(struct kobject *kobj)
+{
+	struct kgsl_process_private *private;
+	unsigned long name;
+
+	if (!kobj)
+		return NULL;
+
+	if (sscanf(kobj->name, "%ld", &name) != 1)
+		return NULL;
+
+	list_for_each_entry(private, &kgsl_driver.process_list, list) {
+		if (private->pid == name)
+			return private;
+	}
+
+	return NULL;
+}
+
+/* sharedmem / memory sysfs files */
+
+static ssize_t
+process_show(struct kobject *kobj,
+	     struct kobj_attribute *attr,
+	     char *buf)
+{
+	struct kgsl_process_private *priv;
+	unsigned int val = 0;
+
+	mutex_lock(&kgsl_driver.process_mutex);
+	priv = _get_priv_from_kobj(kobj);
+
+	if (priv == NULL) {
+		mutex_unlock(&kgsl_driver.process_mutex);
+		return 0;
+	}
+
+	if (!strncmp(attr->attr.name, "user", 4))
+		val = priv->stats.user;
+	if (!strncmp(attr->attr.name, "user_max", 8))
+		val = priv->stats.user_max;
+	if (!strncmp(attr->attr.name, "mapped", 6))
+		val = priv->stats.mapped;
+	if (!strncmp(attr->attr.name, "mapped_max", 10))
+		val = priv->stats.mapped_max;
+	if (!strncmp(attr->attr.name, "flushes", 7))
+		val = priv->stats.flushes;
+
+	mutex_unlock(&kgsl_driver.process_mutex);
+	return snprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+#define KGSL_MEMSTAT_ATTR(_name, _show) \
+	static struct kobj_attribute attr_##_name = \
+	__ATTR(_name, 0444, _show, NULL)
+
+KGSL_MEMSTAT_ATTR(user, process_show);
+KGSL_MEMSTAT_ATTR(user_max, process_show);
+KGSL_MEMSTAT_ATTR(mapped, process_show);
+KGSL_MEMSTAT_ATTR(mapped_max, process_show);
+KGSL_MEMSTAT_ATTR(flushes, process_show);
+
+static struct attribute *process_attrs[] = {
+	&attr_user.attr,
+	&attr_user_max.attr,
+	&attr_mapped.attr,
+	&attr_mapped_max.attr,
+	&attr_flushes.attr,
+	NULL
+};
+
+static struct attribute_group process_attr_group = {
+	.attrs = process_attrs,
+};
+
+void
+kgsl_process_uninit_sysfs(struct kgsl_process_private *private)
+{
+	/* Remove the sysfs entry */
+	if (private->kobj) {
+		sysfs_remove_group(private->kobj, &process_attr_group);
+		kobject_put(private->kobj);
+	}
+}
+
+void
+kgsl_process_init_sysfs(struct kgsl_process_private *private)
+{
+	unsigned char name[16];
+
+	/* Add a entry to the sysfs device */
+	snprintf(name, sizeof(name), "%d", private->pid);
+	private->kobj = kobject_create_and_add(name, kgsl_driver.prockobj);
+
+	/* sysfs failure isn't fatal, just annoying */
+	if (private->kobj != NULL) {
+		if (sysfs_create_group(private->kobj, &process_attr_group)) {
+			kobject_put(private->kobj);
+			private->kobj = NULL;
+		}
+	}
+}
+
+static int kgsl_drv_memstat_show(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	unsigned int val = 0;
+
+	if (!strncmp(attr->attr.name, "vmalloc", 7))
+		val = kgsl_driver.stats.vmalloc;
+	else if (!strncmp(attr->attr.name, "vmalloc_max", 11))
+		val = kgsl_driver.stats.vmalloc_max;
+	else if (!strncmp(attr->attr.name, "coherent", 8))
+		val = kgsl_driver.stats.coherent;
+	else if (!strncmp(attr->attr.name, "coherent_max", 12))
+		val = kgsl_driver.stats.coherent_max;
+	else if (!strncmp(attr->attr.name, "mapped", 6))
+		val = kgsl_driver.stats.mapped;
+	else if (!strncmp(attr->attr.name, "mapped_max", 10))
+		val = kgsl_driver.stats.mapped_max;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static int kgsl_drv_histogram_show(struct device *dev,
+				   struct device_attribute *attr,
+				   char *buf)
+{
+	int len = 0;
+	int i;
+
+	for (i = 0; i < 16; i++)
+		len += sprintf(buf + len, "%d ",
+			kgsl_driver.stats.histogram[i]);
+
+	len += sprintf(buf + len, "\n");
+	return len;
+}
+
+DEVICE_ATTR(vmalloc, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(vmalloc_max, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(coherent, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(coherent_max, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(mapped, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(mapped_max, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(histogram, 0444, kgsl_drv_histogram_show, NULL);
+
+static const struct device_attribute *drv_attr_list[] = {
+	&dev_attr_vmalloc,
+	&dev_attr_vmalloc_max,
+	&dev_attr_coherent,
+	&dev_attr_coherent_max,
+	&dev_attr_mapped,
+	&dev_attr_mapped_max,
+	&dev_attr_histogram,
+	NULL
+};
+
+void
+kgsl_sharedmem_uninit_sysfs(void)
+{
+	kgsl_remove_device_sysfs_files(&kgsl_driver.virtdev, drv_attr_list);
+}
+
+int
+kgsl_sharedmem_init_sysfs(void)
+{
+	return kgsl_create_device_sysfs_files(&kgsl_driver.virtdev,
+		drv_attr_list);
+}
+
+#ifdef CONFIG_OUTER_CACHE
+static void _outer_cache_range_op(int op, unsigned long addr, size_t size)
+{
+	switch (op) {
+	case KGSL_CACHE_OP_FLUSH:
+		outer_flush_range(addr, addr + size);
+		break;
+	case KGSL_CACHE_OP_CLEAN:
+		outer_clean_range(addr, addr + size);
+		break;
+	case KGSL_CACHE_OP_INV:
+		outer_inv_range(addr, addr + size);
+		break;
+	}
+}
+#endif
+
+static unsigned long kgsl_vmalloc_physaddr(struct kgsl_memdesc *memdesc,
+					   unsigned int offset)
+{
+	unsigned int addr;
+
+	if (offset > memdesc->size)
+		return 0;
+
+	addr = vmalloc_to_pfn(memdesc->hostptr + offset);
+	return addr << PAGE_SHIFT;
+}
+
+#ifdef CONFIG_OUTER_CACHE
+static void kgsl_vmalloc_outer_cache(struct kgsl_memdesc *memdesc, int op)
+{
+	void *vaddr = memdesc->hostptr;
+	for (; vaddr < (memdesc->hostptr + memdesc->size); vaddr += PAGE_SIZE) {
+		unsigned long paddr = page_to_phys(vmalloc_to_page(vaddr));
+		_outer_cache_range_op(op, paddr, PAGE_SIZE);
+	}
+}
+#endif
+
+static int kgsl_vmalloc_vmfault(struct kgsl_memdesc *memdesc,
+				struct vm_area_struct *vma,
+				struct vm_fault *vmf)
+{
+	unsigned long offset, pg;
+	struct page *page;
+
+	offset = (unsigned long) vmf->virtual_address - vma->vm_start;
+	pg = (unsigned long) memdesc->hostptr + offset;
+
+	page = vmalloc_to_page((void *) pg);
+	if (page == NULL)
+		return VM_FAULT_SIGBUS;
+
+	get_page(page);
+
+	vmf->page = page;
+	return 0;
+}
+
+static int kgsl_vmalloc_vmflags(struct kgsl_memdesc *memdesc)
+{
+	return VM_RESERVED | VM_DONTEXPAND;
+}
+
+static void kgsl_vmalloc_free(struct kgsl_memdesc *memdesc)
+{
+	kgsl_driver.stats.vmalloc -= memdesc->size;
+	vfree(memdesc->hostptr);
+}
+
+static int kgsl_contiguous_vmflags(struct kgsl_memdesc *memdesc)
+{
+	return VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
+}
+
+static int kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc,
+				struct vm_area_struct *vma,
+				struct vm_fault *vmf)
+{
+	unsigned long offset, pfn;
+	int ret;
+
+	offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
+		PAGE_SHIFT;
+
+	pfn = (memdesc->physaddr >> PAGE_SHIFT) + offset;
+	ret = vm_insert_pfn(vma, (unsigned long) vmf->virtual_address, pfn);
+
+	if (ret == -ENOMEM || ret == -EAGAIN)
+		return VM_FAULT_OOM;
+	else if (ret == -EFAULT)
+		return VM_FAULT_SIGBUS;
+
+	return VM_FAULT_NOPAGE;
+}
+
+static void kgsl_ebimem_free(struct kgsl_memdesc *memdesc)
+
+{
+	kgsl_driver.stats.coherent -= memdesc->size;
+	if (memdesc->hostptr)
+		iounmap(memdesc->hostptr);
+
+	free_contiguous_memory_by_paddr(memdesc->physaddr);
+}
+
+static void kgsl_coherent_free(struct kgsl_memdesc *memdesc)
+{
+	kgsl_driver.stats.coherent -= memdesc->size;
+	dma_free_coherent(NULL, memdesc->size,
+			  memdesc->hostptr, memdesc->physaddr);
+}
+
+static unsigned long kgsl_contiguous_physaddr(struct kgsl_memdesc *memdesc,
+					unsigned int offset)
+{
+	if (offset > memdesc->size)
+		return 0;
+
+	return memdesc->physaddr + offset;
+}
+
+#ifdef CONFIG_OUTER_CACHE
+static void kgsl_contiguous_outer_cache(struct kgsl_memdesc *memdesc, int op)
+{
+	_outer_cache_range_op(op, memdesc->physaddr, memdesc->size);
+}
+#endif
+
+#ifdef CONFIG_OUTER_CACHE
+static void kgsl_userptr_outer_cache(struct kgsl_memdesc *memdesc, int op)
+{
+	void *vaddr = memdesc->hostptr;
+	for (; vaddr < (memdesc->hostptr + memdesc->size); vaddr += PAGE_SIZE) {
+		unsigned long paddr = kgsl_virtaddr_to_physaddr(vaddr);
+		if (paddr)
+			_outer_cache_range_op(op, paddr, PAGE_SIZE);
+	}
+}
+#endif
+
+static unsigned long kgsl_userptr_physaddr(struct kgsl_memdesc *memdesc,
+					   unsigned int offset)
+{
+	return kgsl_virtaddr_to_physaddr(memdesc->hostptr + offset);
+}
+
+/* Global - also used by kgsl_drm.c */
+struct kgsl_memdesc_ops kgsl_vmalloc_ops = {
+	.physaddr = kgsl_vmalloc_physaddr,
+	.free = kgsl_vmalloc_free,
+	.vmflags = kgsl_vmalloc_vmflags,
+	.vmfault = kgsl_vmalloc_vmfault,
+#ifdef CONFIG_OUTER_CACHE
+	.outer_cache = kgsl_vmalloc_outer_cache,
+#endif
+};
+EXPORT_SYMBOL(kgsl_vmalloc_ops);
+
+static struct kgsl_memdesc_ops kgsl_ebimem_ops = {
+	.physaddr = kgsl_contiguous_physaddr,
+	.free = kgsl_ebimem_free,
+	.vmflags = kgsl_contiguous_vmflags,
+	.vmfault = kgsl_contiguous_vmfault,
+#ifdef CONFIG_OUTER_CACHE
+	.outer_cache = kgsl_contiguous_outer_cache,
+#endif
+};
+
+static struct kgsl_memdesc_ops kgsl_coherent_ops = {
+	.physaddr = kgsl_contiguous_physaddr,
+	.free = kgsl_coherent_free,
+#ifdef CONFIG_OUTER_CACHE
+	.outer_cache = kgsl_contiguous_outer_cache,
+#endif
+};
+
+/* Global - also used by kgsl.c and kgsl_drm.c */
+struct kgsl_memdesc_ops kgsl_contiguous_ops = {
+	.physaddr = kgsl_contiguous_physaddr,
+#ifdef CONFIG_OUTER_CACHE
+	.outer_cache = kgsl_contiguous_outer_cache
+#endif
+};
+EXPORT_SYMBOL(kgsl_contiguous_ops);
+
+/* Global - also used by kgsl.c */
+struct kgsl_memdesc_ops kgsl_userptr_ops = {
+	.physaddr = kgsl_userptr_physaddr,
+#ifdef CONFIG_OUTER_CACHE
+	.outer_cache = kgsl_userptr_outer_cache,
+#endif
+};
+EXPORT_SYMBOL(kgsl_userptr_ops);
+
+void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op)
+{
+	void *addr = memdesc->hostptr;
+	int size = memdesc->size;
+
+	switch (op) {
+	case KGSL_CACHE_OP_FLUSH:
+		dmac_flush_range(addr, addr + size);
+		break;
+	case KGSL_CACHE_OP_CLEAN:
+		dmac_clean_range(addr, addr + size);
+		break;
+	case KGSL_CACHE_OP_INV:
+		dmac_inv_range(addr, addr + size);
+		break;
+	}
+
+	if (memdesc->ops->outer_cache)
+		memdesc->ops->outer_cache(memdesc, op);
+}
+EXPORT_SYMBOL(kgsl_cache_range_op);
+
+static int
+_kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
+			struct kgsl_pagetable *pagetable,
+			void *ptr, size_t size, unsigned int protflags)
+{
+	int result;
+
+	memdesc->size = size;
+	memdesc->pagetable = pagetable;
+	memdesc->priv = KGSL_MEMFLAGS_CACHED;
+	memdesc->ops = &kgsl_vmalloc_ops;
+	memdesc->hostptr = (void *) ptr;
+
+	kgsl_cache_range_op(memdesc, KGSL_CACHE_OP_INV);
+
+	result = kgsl_mmu_map(pagetable, memdesc, protflags);
+
+	if (result) {
+		kgsl_sharedmem_free(memdesc);
+	} else {
+		int order;
+
+		KGSL_STATS_ADD(size, kgsl_driver.stats.vmalloc,
+			kgsl_driver.stats.vmalloc_max);
+
+		order = get_order(size);
+
+		if (order < 16)
+			kgsl_driver.stats.histogram[order]++;
+	}
+
+	return result;
+}
+
+int
+kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
+		       struct kgsl_pagetable *pagetable, size_t size)
+{
+	void *ptr;
+
+	BUG_ON(size == 0);
+
+	size = ALIGN(size, PAGE_SIZE * 2);
+	ptr = vmalloc(size);
+
+	if (ptr  == NULL) {
+		KGSL_CORE_ERR("vmalloc(%d) failed\n", size);
+		return -ENOMEM;
+	}
+
+	return _kgsl_sharedmem_vmalloc(memdesc, pagetable, ptr, size,
+		GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+}
+EXPORT_SYMBOL(kgsl_sharedmem_vmalloc);
+
+int
+kgsl_sharedmem_vmalloc_user(struct kgsl_memdesc *memdesc,
+			    struct kgsl_pagetable *pagetable,
+			    size_t size, int flags)
+{
+	void *ptr;
+	unsigned int protflags;
+
+	BUG_ON(size == 0);
+	ptr = vmalloc_user(size);
+
+	if (ptr == NULL) {
+		KGSL_CORE_ERR("vmalloc_user(%d) failed: allocated=%d\n",
+			      size, kgsl_driver.stats.vmalloc);
+		return -ENOMEM;
+	}
+
+	protflags = GSL_PT_PAGE_RV;
+	if (!(flags & KGSL_MEMFLAGS_GPUREADONLY))
+		protflags |= GSL_PT_PAGE_WV;
+
+	return _kgsl_sharedmem_vmalloc(memdesc, pagetable, ptr, size,
+		protflags);
+}
+EXPORT_SYMBOL(kgsl_sharedmem_vmalloc_user);
+
+int
+kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size)
+{
+	size = ALIGN(size, PAGE_SIZE);
+
+	memdesc->hostptr = dma_alloc_coherent(NULL, size, &memdesc->physaddr,
+					      GFP_KERNEL);
+	if (memdesc->hostptr == NULL) {
+		KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
+		return -ENOMEM;
+	}
+
+	memdesc->size = size;
+	memdesc->ops = &kgsl_coherent_ops;
+
+	/* Record statistics */
+
+	KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
+		       kgsl_driver.stats.coherent_max);
+
+	return 0;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_alloc_coherent);
+
+void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
+{
+	if (memdesc == NULL || memdesc->size == 0)
+		return;
+
+	if (memdesc->gpuaddr)
+		kgsl_mmu_unmap(memdesc->pagetable, memdesc);
+
+	if (memdesc->ops->free)
+		memdesc->ops->free(memdesc);
+
+	memset(memdesc, 0, sizeof(*memdesc));
+}
+EXPORT_SYMBOL(kgsl_sharedmem_free);
+
+static int
+_kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
+			struct kgsl_pagetable *pagetable, size_t size)
+{
+	int result;
+
+	memdesc->physaddr = allocate_contiguous_ebi_nomap(size, SZ_8K);
+
+	if (memdesc->physaddr == 0) {
+		KGSL_CORE_ERR("allocate_contiguous_ebi_nomap(%d) failed\n",
+			size);
+		return -ENOMEM;
+	}
+
+	memdesc->size = size;
+	memdesc->pagetable = pagetable;
+	memdesc->ops = &kgsl_ebimem_ops;
+
+	result = kgsl_mmu_map(pagetable, memdesc,
+		GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+
+	if (result)
+		kgsl_sharedmem_free(memdesc);
+
+	KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
+		kgsl_driver.stats.coherent_max);
+
+	return result;
+}
+
+int
+kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
+			struct kgsl_pagetable *pagetable,
+			size_t size, int flags)
+{
+	size = ALIGN(size, PAGE_SIZE);
+	return _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
+}
+EXPORT_SYMBOL(kgsl_sharedmem_ebimem_user);
+
+int
+kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
+		struct kgsl_pagetable *pagetable, size_t size)
+{
+	int result;
+	size = ALIGN(size, 8192);
+	result = _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
+
+	if (result)
+		return result;
+
+	memdesc->hostptr = ioremap(memdesc->physaddr, size);
+
+	if (memdesc->hostptr == NULL) {
+		KGSL_CORE_ERR("ioremap failed\n");
+		kgsl_sharedmem_free(memdesc);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_ebimem);
+
+int
+kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
+			uint32_t *dst,
+			unsigned int offsetbytes)
+{
+	BUG_ON(memdesc == NULL || memdesc->hostptr == NULL || dst == NULL);
+	WARN_ON(offsetbytes + sizeof(unsigned int) > memdesc->size);
+
+	if (offsetbytes + sizeof(unsigned int) > memdesc->size)
+		return -ERANGE;
+
+	*dst = readl_relaxed(memdesc->hostptr + offsetbytes);
+	return 0;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_readl);
+
+int
+kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
+			unsigned int offsetbytes,
+			uint32_t src)
+{
+	BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
+	BUG_ON(offsetbytes + sizeof(unsigned int) > memdesc->size);
+
+	kgsl_cffdump_setmem(memdesc->physaddr + offsetbytes,
+		src, sizeof(uint));
+	writel_relaxed(src, memdesc->hostptr + offsetbytes);
+	return 0;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_writel);
+
+int
+kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc, unsigned int offsetbytes,
+			unsigned int value, unsigned int sizebytes)
+{
+	BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
+	BUG_ON(offsetbytes + sizebytes > memdesc->size);
+
+	kgsl_cffdump_setmem(memdesc->physaddr + offsetbytes, value,
+		sizebytes);
+	memset(memdesc->hostptr + offsetbytes, value, sizebytes);
+	return 0;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_set);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
new file mode 100644
index 0000000..9e57e78
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -0,0 +1,114 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_SHAREDMEM_H
+#define __KGSL_SHAREDMEM_H
+
+#include <linux/dma-mapping.h>
+
+struct kgsl_device;
+struct kgsl_process_private;
+
+#define KGSL_CACHE_OP_INV       0x01
+#define KGSL_CACHE_OP_FLUSH     0x02
+#define KGSL_CACHE_OP_CLEAN     0x03
+
+/** Set if the memdesc describes cached memory */
+#define KGSL_MEMFLAGS_CACHED    0x00000001
+
+struct kgsl_memdesc_ops {
+	unsigned long (*physaddr)(struct kgsl_memdesc *, unsigned int);
+	void (*outer_cache)(struct kgsl_memdesc *, int);
+	int (*vmflags)(struct kgsl_memdesc *);
+	int (*vmfault)(struct kgsl_memdesc *, struct vm_area_struct *,
+		       struct vm_fault *);
+	void (*free)(struct kgsl_memdesc *memdesc);
+};
+
+extern struct kgsl_memdesc_ops kgsl_vmalloc_ops;
+extern struct kgsl_memdesc_ops kgsl_contiguous_ops;
+extern struct kgsl_memdesc_ops kgsl_userptr_ops;
+
+int kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
+			   struct kgsl_pagetable *pagetable, size_t size);
+
+int kgsl_sharedmem_vmalloc_user(struct kgsl_memdesc *memdesc,
+				struct kgsl_pagetable *pagetable,
+				size_t size, int flags);
+
+int kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size);
+
+int kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
+			     struct kgsl_pagetable *pagetable,
+			     size_t size, int flags);
+
+int kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
+			struct kgsl_pagetable *pagetable,
+			size_t size);
+
+void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc);
+
+int kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
+			uint32_t *dst,
+			unsigned int offsetbytes);
+
+int kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
+			unsigned int offsetbytes,
+			uint32_t src);
+
+int kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc,
+			unsigned int offsetbytes, unsigned int value,
+			unsigned int sizebytes);
+
+void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op);
+
+void kgsl_process_init_sysfs(struct kgsl_process_private *private);
+void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
+
+int kgsl_sharedmem_init_sysfs(void);
+void kgsl_sharedmem_uninit_sysfs(void);
+
+static inline int
+kgsl_allocate(struct kgsl_memdesc *memdesc,
+		struct kgsl_pagetable *pagetable, size_t size)
+{
+#ifdef CONFIG_MSM_KGSL_MMU
+	return kgsl_sharedmem_vmalloc(memdesc, pagetable, size);
+#else
+	return kgsl_sharedmem_ebimem(memdesc, pagetable, size);
+#endif
+}
+
+static inline int
+kgsl_allocate_user(struct kgsl_memdesc *memdesc,
+		struct kgsl_pagetable *pagetable,
+		size_t size, unsigned int flags)
+{
+#ifdef CONFIG_MSM_KGSL_MMU
+	return kgsl_sharedmem_vmalloc_user(memdesc, pagetable, size, flags);
+#else
+	return kgsl_sharedmem_ebimem_user(memdesc, pagetable, size, flags);
+#endif
+}
+
+static inline int
+kgsl_allocate_contiguous(struct kgsl_memdesc *memdesc, size_t size)
+{
+	int ret  = kgsl_sharedmem_alloc_coherent(memdesc, size);
+#ifndef CONFIG_MSM_KGSL_MMU
+	if (!ret)
+		memdesc->gpuaddr = memdesc->physaddr;
+#endif
+	return ret;
+}
+
+#endif /* __KGSL_SHAREDMEM_H */
diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c
new file mode 100644
index 0000000..2d3f5bd
--- /dev/null
+++ b/drivers/gpu/msm/z180.c
@@ -0,0 +1,938 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/uaccess.h>
+
+#include "kgsl.h"
+#include "kgsl_cffdump.h"
+#include "kgsl_sharedmem.h"
+
+#include "z180.h"
+#include "z180_reg.h"
+
+#define DRIVER_VERSION_MAJOR   3
+#define DRIVER_VERSION_MINOR   1
+
+#define Z180_DEVICE(device) \
+		KGSL_CONTAINER_OF(device, struct z180_device, dev)
+
+#define GSL_VGC_INT_MASK \
+	 (REG_VGC_IRQSTATUS__MH_MASK | \
+	  REG_VGC_IRQSTATUS__G2D_MASK | \
+	  REG_VGC_IRQSTATUS__FIFO_MASK)
+
+#define VGV3_NEXTCMD_JUMP        0x01
+
+#define VGV3_NEXTCMD_NEXTCMD_FSHIFT 12
+#define VGV3_NEXTCMD_NEXTCMD_FMASK 0x7
+
+#define VGV3_CONTROL_MARKADD_FSHIFT 0
+#define VGV3_CONTROL_MARKADD_FMASK 0xfff
+
+#define Z180_PACKET_SIZE 15
+#define Z180_MARKER_SIZE 10
+#define Z180_CALL_CMD     0x1000
+#define Z180_MARKER_CMD   0x8000
+#define Z180_STREAM_END_CMD 0x9000
+#define Z180_STREAM_PACKET 0x7C000176
+#define Z180_STREAM_PACKET_CALL 0x7C000275
+#define Z180_PACKET_COUNT 8
+#define Z180_RB_SIZE (Z180_PACKET_SIZE*Z180_PACKET_COUNT \
+			  *sizeof(uint32_t))
+
+#define NUMTEXUNITS             4
+#define TEXUNITREGCOUNT         25
+#define VG_REGCOUNT             0x39
+
+#define PACKETSIZE_BEGIN        3
+#define PACKETSIZE_G2DCOLOR     2
+#define PACKETSIZE_TEXUNIT      (TEXUNITREGCOUNT * 2)
+#define PACKETSIZE_REG          (VG_REGCOUNT * 2)
+#define PACKETSIZE_STATE        (PACKETSIZE_TEXUNIT * NUMTEXUNITS + \
+				 PACKETSIZE_REG + PACKETSIZE_BEGIN + \
+				 PACKETSIZE_G2DCOLOR)
+#define PACKETSIZE_STATESTREAM  (ALIGN((PACKETSIZE_STATE * \
+				 sizeof(unsigned int)), 32) / \
+				 sizeof(unsigned int))
+
+#define Z180_INVALID_CONTEXT UINT_MAX
+
+/* z180 MH arbiter config*/
+#define Z180_CFG_MHARB \
+	(0x10 \
+		| (0 << MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT) \
+		| (0 << MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT) \
+		| (0 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT) \
+		| (0x8 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT) \
+		| (1 << MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT))
+
+#define Z180_TIMESTAMP_EPSILON 20000
+#define Z180_IDLE_COUNT_MAX 1000000
+
+enum z180_cmdwindow_type {
+	Z180_CMDWINDOW_2D = 0x00000000,
+	Z180_CMDWINDOW_MMU = 0x00000002,
+};
+
+#define Z180_CMDWINDOW_TARGET_MASK		0x000000FF
+#define Z180_CMDWINDOW_ADDR_MASK		0x00FFFF00
+#define Z180_CMDWINDOW_TARGET_SHIFT		0
+#define Z180_CMDWINDOW_ADDR_SHIFT		8
+
+static int z180_start(struct kgsl_device *device, unsigned int init_ram);
+static int z180_stop(struct kgsl_device *device);
+static int z180_wait(struct kgsl_device *device,
+				unsigned int timestamp,
+				unsigned int msecs);
+static void z180_regread(struct kgsl_device *device,
+				unsigned int offsetwords,
+				unsigned int *value);
+static void z180_regwrite(struct kgsl_device *device,
+				unsigned int offsetwords,
+				unsigned int value);
+static void z180_cmdwindow_write(struct kgsl_device *device,
+				unsigned int addr,
+				unsigned int data);
+
+#define Z180_MMU_CONFIG					     \
+	(0x01							     \
+	| (MMU_CONFIG << MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT)   \
+	| (MMU_CONFIG << MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT)   \
+	| (MMU_CONFIG << MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT)  \
+	| (MMU_CONFIG << MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT)  \
+	| (MMU_CONFIG << MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT)  \
+	| (MMU_CONFIG << MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT)  \
+	| (MMU_CONFIG << MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT)  \
+	| (MMU_CONFIG << MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT) \
+	| (MMU_CONFIG << MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT) \
+	| (MMU_CONFIG << MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT)   \
+	| (MMU_CONFIG << MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT))
+
+static const struct kgsl_functable z180_functable;
+
+static struct z180_device device_2d0 = {
+	.dev = {
+		.name = DEVICE_2D0_NAME,
+		.id = KGSL_DEVICE_2D0,
+		.ver_major = DRIVER_VERSION_MAJOR,
+		.ver_minor = DRIVER_VERSION_MINOR,
+		.mmu = {
+			.config = Z180_MMU_CONFIG,
+			/* turn off memory protection unit by setting
+			   acceptable physical address range to include
+			   all pages. */
+			.mpu_base = 0x00000000,
+			.mpu_range =  0xFFFFF000,
+		},
+		.pwrctrl = {
+			.regulator_name = "fs_gfx2d0",
+			.irq_name = KGSL_2D0_IRQ,
+		},
+		.mutex = __MUTEX_INITIALIZER(device_2d0.dev.mutex),
+		.state = KGSL_STATE_INIT,
+		.active_cnt = 0,
+		.iomemname = KGSL_2D0_REG_MEMORY,
+		.ftbl = &z180_functable,
+		.display_off = {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+			.level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
+			.suspend = kgsl_early_suspend_driver,
+			.resume = kgsl_late_resume_driver,
+#endif
+		},
+	},
+};
+
+static struct z180_device device_2d1 = {
+	.dev = {
+		.name = DEVICE_2D1_NAME,
+		.id = KGSL_DEVICE_2D1,
+		.ver_major = DRIVER_VERSION_MAJOR,
+		.ver_minor = DRIVER_VERSION_MINOR,
+		.mmu = {
+			.config = Z180_MMU_CONFIG,
+			/* turn off memory protection unit by setting
+			   acceptable physical address range to include
+			   all pages. */
+			.mpu_base = 0x00000000,
+			.mpu_range =  0xFFFFF000,
+		},
+		.pwrctrl = {
+			.regulator_name = "fs_gfx2d1",
+			.irq_name = KGSL_2D1_IRQ,
+		},
+		.mutex = __MUTEX_INITIALIZER(device_2d1.dev.mutex),
+		.state = KGSL_STATE_INIT,
+		.active_cnt = 0,
+		.iomemname = KGSL_2D1_REG_MEMORY,
+		.ftbl = &z180_functable,
+		.display_off = {
+#ifdef CONFIG_HAS_EARLYSUSPEND
+			.level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
+			.suspend = kgsl_early_suspend_driver,
+			.resume = kgsl_late_resume_driver,
+#endif
+		},
+	},
+};
+
+static irqreturn_t z180_isr(int irq, void *data)
+{
+	irqreturn_t result = IRQ_NONE;
+	unsigned int status;
+	struct kgsl_device *device = (struct kgsl_device *) data;
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+
+	z180_regread(device, ADDR_VGC_IRQSTATUS >> 2, &status);
+
+	if (status & GSL_VGC_INT_MASK) {
+		z180_regwrite(device,
+			ADDR_VGC_IRQSTATUS >> 2, status & GSL_VGC_INT_MASK);
+
+		result = IRQ_HANDLED;
+
+		if (status & REG_VGC_IRQSTATUS__FIFO_MASK)
+			KGSL_DRV_ERR(device, "z180 fifo interrupt\n");
+		if (status & REG_VGC_IRQSTATUS__MH_MASK)
+			kgsl_mh_intrcallback(device);
+		if (status & REG_VGC_IRQSTATUS__G2D_MASK) {
+			int count;
+
+			z180_regread(device,
+					 ADDR_VGC_IRQ_ACTIVE_CNT >> 2,
+					 &count);
+
+			count >>= 8;
+			count &= 255;
+			z180_dev->timestamp += count;
+
+			wake_up_interruptible(&device->wait_queue);
+
+			atomic_notifier_call_chain(
+				&(device->ts_notifier_list),
+				device->id, NULL);
+		}
+	}
+
+	if ((device->pwrctrl.nap_allowed == true) &&
+		(device->requested_state == KGSL_STATE_NONE)) {
+		device->requested_state = KGSL_STATE_NAP;
+		queue_work(device->work_queue, &device->idle_check_ws);
+	}
+	mod_timer(&device->idle_timer,
+			jiffies + device->pwrctrl.interval_timeout);
+
+	return result;
+}
+
+static int z180_cleanup_pt(struct kgsl_device *device,
+			       struct kgsl_pagetable *pagetable)
+{
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+
+	kgsl_mmu_unmap(pagetable, &device->mmu.dummyspace);
+
+	kgsl_mmu_unmap(pagetable, &device->memstore);
+
+	kgsl_mmu_unmap(pagetable, &z180_dev->ringbuffer.cmdbufdesc);
+
+	return 0;
+}
+
+static int z180_setup_pt(struct kgsl_device *device,
+			     struct kgsl_pagetable *pagetable)
+{
+	int result = 0;
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+
+	result = kgsl_mmu_map_global(pagetable, &device->mmu.dummyspace,
+				     GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+
+	if (result)
+		goto error;
+
+	result = kgsl_mmu_map_global(pagetable, &device->memstore,
+				     GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+	if (result)
+		goto error_unmap_dummy;
+
+	result = kgsl_mmu_map_global(pagetable,
+				     &z180_dev->ringbuffer.cmdbufdesc,
+				     GSL_PT_PAGE_RV);
+	if (result)
+		goto error_unmap_memstore;
+	return result;
+
+error_unmap_dummy:
+	kgsl_mmu_unmap(pagetable, &device->mmu.dummyspace);
+
+error_unmap_memstore:
+	kgsl_mmu_unmap(pagetable, &device->memstore);
+
+error:
+	return result;
+}
+
+static inline unsigned int rb_offset(unsigned int index)
+{
+	return index*sizeof(unsigned int)*(Z180_PACKET_SIZE);
+}
+
+static void addmarker(struct z180_ringbuffer *rb, unsigned int index)
+{
+	char *ptr = (char *)(rb->cmdbufdesc.hostptr);
+	unsigned int *p = (unsigned int *)(ptr + rb_offset(index));
+
+	*p++ = Z180_STREAM_PACKET;
+	*p++ = (Z180_MARKER_CMD | 5);
+	*p++ = ADDR_VGV3_LAST << 24;
+	*p++ = ADDR_VGV3_LAST << 24;
+	*p++ = ADDR_VGV3_LAST << 24;
+	*p++ = Z180_STREAM_PACKET;
+	*p++ = 5;
+	*p++ = ADDR_VGV3_LAST << 24;
+	*p++ = ADDR_VGV3_LAST << 24;
+	*p++ = ADDR_VGV3_LAST << 24;
+}
+
+static void addcmd(struct z180_ringbuffer *rb, unsigned int index,
+			unsigned int cmd, unsigned int nextcnt)
+{
+	char * ptr = (char *)(rb->cmdbufdesc.hostptr);
+	unsigned int *p = (unsigned int *)(ptr + (rb_offset(index)
+			   + (Z180_MARKER_SIZE * sizeof(unsigned int))));
+
+	*p++ = Z180_STREAM_PACKET_CALL;
+	*p++ = cmd;
+	*p++ = Z180_CALL_CMD | nextcnt;
+	*p++ = ADDR_VGV3_LAST << 24;
+	*p++ = ADDR_VGV3_LAST << 24;
+}
+
+static void z180_cmdstream_start(struct kgsl_device *device)
+{
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+	unsigned int cmd = VGV3_NEXTCMD_JUMP << VGV3_NEXTCMD_NEXTCMD_FSHIFT;
+
+	z180_dev->timestamp = 0;
+	z180_dev->current_timestamp = 0;
+
+	addmarker(&z180_dev->ringbuffer, 0);
+
+	z180_cmdwindow_write(device, ADDR_VGV3_MODE, 4);
+
+	z180_cmdwindow_write(device, ADDR_VGV3_NEXTADDR,
+			z180_dev->ringbuffer.cmdbufdesc.gpuaddr);
+
+	z180_cmdwindow_write(device, ADDR_VGV3_NEXTCMD, cmd | 5);
+
+	z180_cmdwindow_write(device, ADDR_VGV3_WRITEADDR,
+			device->memstore.gpuaddr);
+
+	cmd = (int)(((1) & VGV3_CONTROL_MARKADD_FMASK)
+			<< VGV3_CONTROL_MARKADD_FSHIFT);
+
+	z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, cmd);
+
+	z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, 0);
+}
+
+static int room_in_rb(struct z180_device *device)
+{
+	int ts_diff;
+
+	ts_diff = device->current_timestamp - device->timestamp;
+
+	return ts_diff < Z180_PACKET_COUNT;
+}
+
+static int z180_idle(struct kgsl_device *device, unsigned int timeout)
+{
+	int status = 0;
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+
+	if (z180_dev->current_timestamp > z180_dev->timestamp)
+		status = z180_wait(device, z180_dev->current_timestamp,
+					timeout);
+
+	if (status)
+		KGSL_DRV_ERR(device, "z180_waittimestamp() timed out\n");
+
+	return status;
+}
+
+static void z180_setstate(struct kgsl_device *device, uint32_t flags)
+{
+	kgsl_default_setstate(device, flags);
+}
+
+int
+z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv,
+			struct kgsl_context *context,
+			struct kgsl_ibdesc *ibdesc,
+			unsigned int numibs,
+			uint32_t *timestamp,
+			unsigned int ctrl)
+{
+	unsigned int result = 0;
+	unsigned int ofs        = PACKETSIZE_STATESTREAM * sizeof(unsigned int);
+	unsigned int cnt        = 5;
+	unsigned int nextaddr   = 0;
+	unsigned int index	= 0;
+	unsigned int nextindex;
+	unsigned int nextcnt    = Z180_STREAM_END_CMD | 5;
+	struct kgsl_memdesc tmp = {0};
+	unsigned int cmd;
+	struct kgsl_device *device = dev_priv->device;
+	struct kgsl_pagetable *pagetable = dev_priv->process_priv->pagetable;
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+	unsigned int sizedwords;
+
+	if (device->state & KGSL_STATE_HUNG) {
+		return -EINVAL;
+		goto error;
+	}
+	if (numibs != 1) {
+		KGSL_DRV_ERR(device, "Invalid number of ibs: %d\n", numibs);
+		result = -EINVAL;
+		goto error;
+	}
+	cmd = ibdesc[0].gpuaddr;
+	sizedwords = ibdesc[0].sizedwords;
+
+	tmp.hostptr = (void *)*timestamp;
+
+	KGSL_CMD_INFO(device, "ctxt %d ibaddr 0x%08x sizedwords %d\n",
+		context->id, cmd, sizedwords);
+	/* context switch */
+	if ((context->id != (int)z180_dev->ringbuffer.prevctx) ||
+	    (ctrl & KGSL_CONTEXT_CTX_SWITCH)) {
+		KGSL_CMD_INFO(device, "context switch %d -> %d\n",
+			context->id, z180_dev->ringbuffer.prevctx);
+		kgsl_mmu_setstate(device, pagetable);
+		cnt = PACKETSIZE_STATESTREAM;
+		ofs = 0;
+	}
+	z180_setstate(device, kgsl_pt_get_flags(device->mmu.hwpagetable,
+						    device->id));
+
+	result = wait_event_interruptible_timeout(device->wait_queue,
+				  room_in_rb(z180_dev),
+				  msecs_to_jiffies(KGSL_TIMEOUT_DEFAULT));
+	if (result < 0) {
+		KGSL_CMD_ERR(device, "wait_event_interruptible_timeout "
+			"failed: %d\n", result);
+		goto error;
+	}
+	result = 0;
+
+	index = z180_dev->current_timestamp % Z180_PACKET_COUNT;
+	z180_dev->current_timestamp++;
+	nextindex = z180_dev->current_timestamp % Z180_PACKET_COUNT;
+	*timestamp = z180_dev->current_timestamp;
+
+	z180_dev->ringbuffer.prevctx = context->id;
+
+	addcmd(&z180_dev->ringbuffer, index, cmd + ofs, cnt);
+
+	/* Make sure the next ringbuffer entry has a marker */
+	addmarker(&z180_dev->ringbuffer, nextindex);
+
+	nextaddr = z180_dev->ringbuffer.cmdbufdesc.gpuaddr
+		+ rb_offset(nextindex);
+
+	tmp.hostptr = (void *)(tmp.hostptr +
+			(sizedwords * sizeof(unsigned int)));
+	tmp.size = 12;
+
+	kgsl_sharedmem_writel(&tmp, 4, nextaddr);
+	kgsl_sharedmem_writel(&tmp, 8, nextcnt);
+
+	/* sync memory before activating the hardware for the new command*/
+	mb();
+
+	cmd = (int)(((2) & VGV3_CONTROL_MARKADD_FMASK)
+		<< VGV3_CONTROL_MARKADD_FSHIFT);
+
+	z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, cmd);
+	z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, 0);
+error:
+	return result;
+}
+
+static int z180_ringbuffer_init(struct kgsl_device *device)
+{
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+	memset(&z180_dev->ringbuffer, 0, sizeof(struct z180_ringbuffer));
+	z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT;
+	return kgsl_allocate_contiguous(&z180_dev->ringbuffer.cmdbufdesc,
+		Z180_RB_SIZE);
+}
+
+static void z180_ringbuffer_close(struct kgsl_device *device)
+{
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+	kgsl_sharedmem_free(&z180_dev->ringbuffer.cmdbufdesc);
+	memset(&z180_dev->ringbuffer, 0, sizeof(struct z180_ringbuffer));
+}
+
+static int __devinit z180_probe(struct platform_device *pdev)
+{
+	int status = -EINVAL;
+	struct kgsl_device *device = NULL;
+	struct z180_device *z180_dev;
+
+	device = (struct kgsl_device *)pdev->id_entry->driver_data;
+	device->parentdev = &pdev->dev;
+
+	z180_dev = Z180_DEVICE(device);
+	spin_lock_init(&z180_dev->cmdwin_lock);
+
+	status = z180_ringbuffer_init(device);
+	if (status != 0)
+		goto error;
+
+	status = kgsl_device_platform_probe(device, z180_isr);
+	if (status)
+		goto error_close_ringbuffer;
+
+	return status;
+
+error_close_ringbuffer:
+	z180_ringbuffer_close(device);
+error:
+	device->parentdev = NULL;
+	return status;
+}
+
+static int __devexit z180_remove(struct platform_device *pdev)
+{
+	struct kgsl_device *device = NULL;
+
+	device = (struct kgsl_device *)pdev->id_entry->driver_data;
+
+	kgsl_device_platform_remove(device);
+
+	z180_ringbuffer_close(device);
+
+	return 0;
+}
+
+static int z180_start(struct kgsl_device *device, unsigned int init_ram)
+{
+	int status = 0;
+
+	device->state = KGSL_STATE_INIT;
+	device->requested_state = KGSL_STATE_NONE;
+	KGSL_PWR_WARN(device, "state -> INIT, device %d\n", device->id);
+
+	kgsl_pwrctrl_enable(device);
+
+	/* Set up MH arbiter.  MH offsets are considered to be dword
+	 * based, therefore no down shift. */
+	z180_regwrite(device, ADDR_MH_ARBITER_CONFIG, Z180_CFG_MHARB);
+
+	z180_regwrite(device, ADDR_MH_CLNT_INTF_CTRL_CONFIG1, 0x00030F27);
+	z180_regwrite(device, ADDR_MH_CLNT_INTF_CTRL_CONFIG2, 0x004B274F);
+
+	/* Set interrupts to 0 to ensure a good state */
+	z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 0x0);
+
+	status = kgsl_mmu_start(device);
+	if (status)
+		goto error_clk_off;
+
+	z180_cmdstream_start(device);
+
+	mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_IRQ_ON);
+	return 0;
+
+error_clk_off:
+	z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 0);
+	kgsl_pwrctrl_disable(device);
+	return status;
+}
+
+static int z180_stop(struct kgsl_device *device)
+{
+	z180_idle(device, KGSL_TIMEOUT_DEFAULT);
+
+	del_timer(&device->idle_timer);
+
+	kgsl_mmu_stop(device);
+
+	/* Disable the clocks before the power rail. */
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+
+	kgsl_pwrctrl_disable(device);
+
+	return 0;
+}
+
+static int z180_getproperty(struct kgsl_device *device,
+				enum kgsl_property_type type,
+				void *value,
+				unsigned int sizebytes)
+{
+	int status = -EINVAL;
+
+	switch (type) {
+	case KGSL_PROP_DEVICE_INFO:
+	{
+		struct kgsl_devinfo devinfo;
+
+		if (sizebytes != sizeof(devinfo)) {
+			status = -EINVAL;
+			break;
+		}
+
+		memset(&devinfo, 0, sizeof(devinfo));
+		devinfo.device_id = device->id+1;
+		devinfo.chip_id = 0;
+		devinfo.mmu_enabled = kgsl_mmu_enabled();
+
+		if (copy_to_user(value, &devinfo, sizeof(devinfo)) !=
+				0) {
+			status = -EFAULT;
+			break;
+		}
+		status = 0;
+	}
+	break;
+	case KGSL_PROP_MMU_ENABLE:
+		{
+#ifdef CONFIG_MSM_KGSL_MMU
+			int mmuProp = 1;
+#else
+			int mmuProp = 0;
+#endif
+			if (sizebytes != sizeof(int)) {
+				status = -EINVAL;
+				break;
+			}
+			if (copy_to_user(value, &mmuProp, sizeof(mmuProp))) {
+				status = -EFAULT;
+				break;
+			}
+			status = 0;
+		}
+		break;
+
+	default:
+		KGSL_DRV_ERR(device, "invalid property: %d\n", type);
+		status = -EINVAL;
+	}
+	return status;
+}
+
+static unsigned int z180_isidle(struct kgsl_device *device)
+{
+	int status = false;
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+
+	int timestamp = z180_dev->timestamp;
+
+	if (timestamp == z180_dev->current_timestamp)
+		status = true;
+
+	return status;
+}
+
+static int z180_suspend_context(struct kgsl_device *device)
+{
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+
+	z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT;
+
+	return 0;
+}
+
+/* Not all Z180 registers are directly accessible.
+ * The _z180_(read|write)_simple functions below handle the ones that are.
+ */
+static void _z180_regread_simple(struct kgsl_device *device,
+				unsigned int offsetwords,
+				unsigned int *value)
+{
+	unsigned int *reg;
+
+	BUG_ON(offsetwords * sizeof(uint32_t) >= device->regspace.sizebytes);
+
+	reg = (unsigned int *)(device->regspace.mmio_virt_base
+			+ (offsetwords << 2));
+
+	/*ensure this read finishes before the next one.
+	 * i.e. act like normal readl() */
+	*value = __raw_readl(reg);
+	rmb();
+
+}
+
+static void _z180_regwrite_simple(struct kgsl_device *device,
+				 unsigned int offsetwords,
+				 unsigned int value)
+{
+	unsigned int *reg;
+
+	BUG_ON(offsetwords*sizeof(uint32_t) >= device->regspace.sizebytes);
+
+	reg = (unsigned int *)(device->regspace.mmio_virt_base
+			+ (offsetwords << 2));
+	kgsl_cffdump_regwrite(device->id, offsetwords << 2, value);
+	/*ensure previous writes post before this one,
+	 * i.e. act like normal writel() */
+	wmb();
+	__raw_writel(value, reg);
+}
+
+
+/* The MH registers must be accessed through via a 2 step write, (read|write)
+ * process. These registers may be accessed from interrupt context during
+ * the handling of MH or MMU error interrupts. Therefore a spin lock is used
+ * to ensure that the 2 step sequence is not interrupted.
+ */
+static void _z180_regread_mmu(struct kgsl_device *device,
+			     unsigned int offsetwords,
+			     unsigned int *value)
+{
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+	unsigned long flags;
+
+	spin_lock_irqsave(&z180_dev->cmdwin_lock, flags);
+	_z180_regwrite_simple(device, (ADDR_VGC_MH_READ_ADDR >> 2),
+				offsetwords);
+	_z180_regread_simple(device, (ADDR_VGC_MH_DATA_ADDR >> 2), value);
+	spin_unlock_irqrestore(&z180_dev->cmdwin_lock, flags);
+}
+
+
+static void _z180_regwrite_mmu(struct kgsl_device *device,
+			      unsigned int offsetwords,
+			      unsigned int value)
+{
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+	unsigned int cmdwinaddr;
+	unsigned long flags;
+
+	cmdwinaddr = ((Z180_CMDWINDOW_MMU << Z180_CMDWINDOW_TARGET_SHIFT) &
+			Z180_CMDWINDOW_TARGET_MASK);
+	cmdwinaddr |= ((offsetwords << Z180_CMDWINDOW_ADDR_SHIFT) &
+			Z180_CMDWINDOW_ADDR_MASK);
+
+	spin_lock_irqsave(&z180_dev->cmdwin_lock, flags);
+	_z180_regwrite_simple(device, ADDR_VGC_MMUCOMMANDSTREAM >> 2,
+			     cmdwinaddr);
+	_z180_regwrite_simple(device, ADDR_VGC_MMUCOMMANDSTREAM >> 2, value);
+	spin_unlock_irqrestore(&z180_dev->cmdwin_lock, flags);
+}
+
+/* the rest of the code doesn't want to think about if it is writing mmu
+ * registers or normal registers so handle it here
+ */
+static void z180_regread(struct kgsl_device *device,
+			unsigned int offsetwords,
+			unsigned int *value)
+{
+	if (!in_interrupt())
+		kgsl_pre_hwaccess(device);
+
+	if ((offsetwords >= ADDR_MH_ARBITER_CONFIG &&
+	     offsetwords <= ADDR_MH_AXI_HALT_CONTROL) ||
+	    (offsetwords >= MH_MMU_CONFIG &&
+	     offsetwords <= MH_MMU_MPU_END)) {
+		_z180_regread_mmu(device, offsetwords, value);
+	} else {
+		_z180_regread_simple(device, offsetwords, value);
+	}
+}
+
+static void z180_regwrite(struct kgsl_device *device,
+				unsigned int offsetwords,
+				unsigned int value)
+{
+	if (!in_interrupt())
+		kgsl_pre_hwaccess(device);
+
+	if ((offsetwords >= ADDR_MH_ARBITER_CONFIG &&
+	     offsetwords <= ADDR_MH_CLNT_INTF_CTRL_CONFIG2) ||
+	    (offsetwords >= MH_MMU_CONFIG &&
+	     offsetwords <= MH_MMU_MPU_END)) {
+		_z180_regwrite_mmu(device, offsetwords, value);
+	} else {
+		_z180_regwrite_simple(device, offsetwords, value);
+	}
+}
+
+static void z180_cmdwindow_write(struct kgsl_device *device,
+		unsigned int addr, unsigned int data)
+{
+	unsigned int cmdwinaddr;
+
+	cmdwinaddr = ((Z180_CMDWINDOW_2D << Z180_CMDWINDOW_TARGET_SHIFT) &
+			Z180_CMDWINDOW_TARGET_MASK);
+	cmdwinaddr |= ((addr << Z180_CMDWINDOW_ADDR_SHIFT) &
+			Z180_CMDWINDOW_ADDR_MASK);
+
+	z180_regwrite(device, ADDR_VGC_COMMANDSTREAM >> 2, cmdwinaddr);
+	z180_regwrite(device, ADDR_VGC_COMMANDSTREAM >> 2, data);
+}
+
+static unsigned int z180_readtimestamp(struct kgsl_device *device,
+			     enum kgsl_timestamp_type type)
+{
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+	/* get current EOP timestamp */
+	return z180_dev->timestamp;
+}
+
+static int z180_waittimestamp(struct kgsl_device *device,
+				unsigned int timestamp,
+				unsigned int msecs)
+{
+	int status = -EINVAL;
+	mutex_unlock(&device->mutex);
+	status = z180_wait(device, timestamp, msecs);
+	mutex_lock(&device->mutex);
+
+	return status;
+}
+
+static int z180_wait(struct kgsl_device *device,
+				unsigned int timestamp,
+				unsigned int msecs)
+{
+	int status = -EINVAL;
+	long timeout = 0;
+
+	timeout = wait_io_event_interruptible_timeout(
+			device->wait_queue,
+			kgsl_check_timestamp(device, timestamp),
+			msecs_to_jiffies(msecs));
+
+	if (timeout > 0)
+		status = 0;
+	else if (timeout == 0) {
+		status = -ETIMEDOUT;
+		device->state = KGSL_STATE_HUNG;
+		KGSL_PWR_WARN(device, "state -> HUNG, device %d\n", device->id);
+	} else
+		status = timeout;
+
+	return status;
+}
+
+static void
+z180_drawctxt_destroy(struct kgsl_device *device,
+			  struct kgsl_context *context)
+{
+	struct z180_device *z180_dev = Z180_DEVICE(device);
+
+	z180_idle(device, KGSL_TIMEOUT_DEFAULT);
+
+	if (z180_dev->ringbuffer.prevctx == context->id) {
+		z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT;
+		device->mmu.hwpagetable = device->mmu.defaultpagetable;
+		kgsl_setstate(device, KGSL_MMUFLAGS_PTUPDATE);
+	}
+}
+
+static void z180_power_stats(struct kgsl_device *device,
+			    struct kgsl_power_stats *stats)
+{
+	stats->total_time = 0;
+	stats->busy_time = 0;
+}
+
+static void z180_irqctrl(struct kgsl_device *device, int state)
+{
+	/* Control interrupts for Z180 and the Z180 MMU */
+
+	if (state) {
+		z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 3);
+		z180_regwrite(device, MH_INTERRUPT_MASK, KGSL_MMU_INT_MASK);
+	} else {
+		z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 0);
+		z180_regwrite(device, MH_INTERRUPT_MASK, 0);
+	}
+}
+
+static const struct kgsl_functable z180_functable = {
+	/* Mandatory functions */
+	.regread = z180_regread,
+	.regwrite = z180_regwrite,
+	.idle = z180_idle,
+	.isidle = z180_isidle,
+	.suspend_context = z180_suspend_context,
+	.start = z180_start,
+	.stop = z180_stop,
+	.getproperty = z180_getproperty,
+	.waittimestamp = z180_waittimestamp,
+	.readtimestamp = z180_readtimestamp,
+	.issueibcmds = z180_cmdstream_issueibcmds,
+	.setup_pt = z180_setup_pt,
+	.cleanup_pt = z180_cleanup_pt,
+	.power_stats = z180_power_stats,
+	.irqctrl = z180_irqctrl,
+	/* Optional functions */
+	.setstate = z180_setstate,
+	.drawctxt_create = NULL,
+	.drawctxt_destroy = z180_drawctxt_destroy,
+	.ioctl = NULL,
+};
+
+static struct platform_device_id z180_id_table[] = {
+	{ DEVICE_2D0_NAME, (kernel_ulong_t)&device_2d0.dev, },
+	{ DEVICE_2D1_NAME, (kernel_ulong_t)&device_2d1.dev, },
+	{ },
+};
+MODULE_DEVICE_TABLE(platform, z180_id_table);
+
+static struct platform_driver z180_platform_driver = {
+	.probe = z180_probe,
+	.remove = __devexit_p(z180_remove),
+	.suspend = kgsl_suspend_driver,
+	.resume = kgsl_resume_driver,
+	.id_table = z180_id_table,
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = DEVICE_2D_NAME,
+		.pm = &kgsl_pm_ops,
+	}
+};
+
+static int __init kgsl_2d_init(void)
+{
+	return platform_driver_register(&z180_platform_driver);
+}
+
+static void __exit kgsl_2d_exit(void)
+{
+	platform_driver_unregister(&z180_platform_driver);
+}
+
+module_init(kgsl_2d_init);
+module_exit(kgsl_2d_exit);
+
+MODULE_DESCRIPTION("2D Graphics driver");
+MODULE_VERSION("1.2");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kgsl_2d");
diff --git a/drivers/gpu/msm/z180.h b/drivers/gpu/msm/z180.h
new file mode 100644
index 0000000..28b1cc6
--- /dev/null
+++ b/drivers/gpu/msm/z180.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __Z180_H
+#define __Z180_H
+
+#include "kgsl_device.h"
+
+#define DEVICE_2D_NAME "kgsl-2d"
+#define DEVICE_2D0_NAME "kgsl-2d0"
+#define DEVICE_2D1_NAME "kgsl-2d1"
+
+struct z180_ringbuffer {
+	unsigned int prevctx;
+	struct kgsl_memdesc      cmdbufdesc;
+};
+
+struct z180_device {
+	struct kgsl_device dev;    /* Must be first field in this struct */
+	int current_timestamp;
+	int timestamp;
+	struct z180_ringbuffer ringbuffer;
+	spinlock_t cmdwin_lock;
+};
+
+#endif /* __Z180_H */
diff --git a/drivers/gpu/msm/z180_reg.h b/drivers/gpu/msm/z180_reg.h
new file mode 100644
index 0000000..a3b0412
--- /dev/null
+++ b/drivers/gpu/msm/z180_reg.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __Z80_REG_H
+#define __Z80_REG_H
+
+#define REG_VGC_IRQSTATUS__MH_MASK                         0x00000001L
+#define REG_VGC_IRQSTATUS__G2D_MASK                        0x00000002L
+#define REG_VGC_IRQSTATUS__FIFO_MASK                       0x00000004L
+
+#define	MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT    0x00000006
+#define	MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT            0x00000007
+#define	MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT       0x00000008
+#define	MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT           0x00000009
+#define	MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT                0x0000000a
+#define	MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT        0x0000000d
+#define	MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT       0x0000000e
+#define	MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT   0x0000000f
+#define	MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT          0x00000010
+#define	MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT           0x00000016
+#define	MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT          0x00000017
+#define	MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT           0x00000018
+#define	MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT           0x00000019
+#define	MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT           0x0000001a
+
+#define ADDR_MH_ARBITER_CONFIG           0x0A40
+#define ADDR_MH_AXI_HALT_CONTROL         0x0A50
+#define ADDR_MH_CLNT_INTF_CTRL_CONFIG1   0x0A54
+#define ADDR_MH_CLNT_INTF_CTRL_CONFIG2   0x0A55
+#define ADDR_VGC_MH_READ_ADDR            0x0510
+#define ADDR_VGC_MH_DATA_ADDR            0x0518
+#define ADDR_VGC_COMMANDSTREAM           0x0000
+#define ADDR_VGC_IRQENABLE               0x0438
+#define ADDR_VGC_IRQSTATUS               0x0418
+#define ADDR_VGC_IRQ_ACTIVE_CNT          0x04E0
+#define ADDR_VGC_MMUCOMMANDSTREAM        0x03FC
+#define ADDR_VGV3_CONTROL                0x0070
+#define ADDR_VGV3_LAST                   0x007F
+#define ADDR_VGV3_MODE                   0x0071
+#define ADDR_VGV3_NEXTADDR               0x0075
+#define ADDR_VGV3_NEXTCMD                0x0076
+#define ADDR_VGV3_WRITEADDR              0x0072
+
+#endif /* __Z180_REG_H */