msm: kgsl: Add support for the A3XX family of GPUs

Add support for the A320, the first of the new generation
of Adreno GPUs.

Change-Id: Ic0dedbadd29fbdff8733cd38824594e757eef42d
Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
index 7b8f3e6..04d53c0 100644
--- a/drivers/gpu/msm/Makefile
+++ b/drivers/gpu/msm/Makefile
@@ -25,6 +25,7 @@
 	adreno_a2xx.o \
 	adreno_a2xx_trace.o \
 	adreno_a2xx_snapshot.o \
+	adreno_a3xx.o \
 	adreno.o
 
 msm_adreno-$(CONFIG_DEBUG_FS) += adreno_debugfs.o
diff --git a/drivers/gpu/msm/a3xx_reg.h b/drivers/gpu/msm/a3xx_reg.h
new file mode 100644
index 0000000..84c83b8
--- /dev/null
+++ b/drivers/gpu/msm/a3xx_reg.h
@@ -0,0 +1,453 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _A300_REG_H
+#define _A300_REG_H
+
+/* Interrupt bit positions within RBBM_INT_0 */
+
+#define A3XX_INT_RBBM_GPU_IDLE 0
+#define A3XX_INT_RBBM_AHB_ERROR 1
+#define A3XX_INT_RBBM_REG_TIMEOUT 2
+#define A3XX_INT_RBBM_ME_MS_TIMEOUT 3
+#define A3XX_INT_RBBM_PFP_MS_TIMEOUT 4
+#define A3XX_INT_RBBM_ATB_BUS_OVERFLOW 5
+#define A3XX_INT_VFD_ERROR 6
+#define A3XX_INT_CP_SW_INT 7
+#define A3XX_INT_CP_T0_PACKET_IN_IB 8
+#define A3XX_INT_CP_OPCODE_ERROR 9
+#define A3XX_INT_CP_RESERVED_BIT_ERROR 10
+#define A3XX_INT_CP_HW_FAULT 11
+#define A3xx_INT_CP_DMA 12
+#define A3XX_INT_CP_IB2_INT 13
+#define A3XX_INT_CP_IB1_INT 14
+#define A3XX_INT_CP_RB_INT 15
+#define A3XX_INT_CP_REG_PROTECT_FAULT 16
+#define A3XX_INT_CP_RB_DONE_TS 17
+#define A3XX_INT_CP_VS_DONE_TS 18
+#define A3XX_INT_CP_PS_DONE_TS 19
+#define A3XX_INT_CACHE_FLUSH_TS 20
+#define A3XX_INT_CP_AHB_ERROR_HALT 21
+#define A3XX_INT_MISC_HANG_DETECT 24
+#define A3XX_INT_UCHE_OOB_ACCESS 25
+
+/* Register definitions */
+
+#define A3XX_RBBM_HW_VERSION 0x000
+#define A3XX_RBBM_HW_RELEASE 0x001
+#define A3XX_RBBM_HW_CONFIGURATION 0x002
+#define A3XX_RBBM_SW_RESET_CMD 0x018
+#define A3XX_RBBM_AHB_CTL0 0x020
+#define A3XX_RBBM_AHB_CTL1 0x021
+#define A3XX_RBBM_AHB_CMD 0x022
+#define A3XX_RBBM_AHB_ERROR_STATUS 0x027
+#define A3XX_RBBM_GPR0_CTL 0x02E
+/* This the same register as on A2XX, just in a different place */
+#define A3XX_RBBM_STATUS 0x030
+#define A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x50
+#define A3XX_RBBM_INTERFACE_HANG_MASK_CTL0 0x51
+#define A3XX_RBBM_INTERFACE_HANG_MASK_CTL1 0x54
+#define A3XX_RBBM_INTERFACE_HANG_MASK_CTL2 0x57
+#define A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x5A
+#define A3XX_RBBM_INT_CLEAR_CMD 0x061
+#define A3XX_RBBM_INT_0_MASK 0x063
+#define A3XX_RBBM_INT_0_STATUS 0x064
+#define A3XX_RBBM_GPU_BUSY_MASKED 0x88
+#define A3XX_RBBM_RBBM_CTL 0x100
+#define A3XX_RBBM_RBBM_CTL 0x100
+#define A3XX_RBBM_PERFCTR_PWR_1_LO 0x0EC
+#define A3XX_RBBM_PERFCTR_PWR_1_HI 0x0ED
+/* Following two are same as on A2XX, just in a different place */
+#define A3XX_CP_PFP_UCODE_ADDR 0x1C9
+#define A3XX_CP_PFP_UCODE_DATA 0x1CA
+#define A3XX_CP_HW_FAULT  0x45C
+#define A3XX_CP_AHB_FAULT 0x54D
+#define A3XX_CP_PROTECT_CTRL 0x45E
+#define A3XX_CP_PROTECT_STATUS 0x45F
+#define A3XX_CP_PROTECT_REG_0 0x460
+#define A3XX_CP_PROTECT_REG_1 0x461
+#define A3XX_CP_PROTECT_REG_2 0x462
+#define A3XX_CP_PROTECT_REG_3 0x463
+#define A3XX_CP_PROTECT_REG_4 0x464
+#define A3XX_CP_PROTECT_REG_5 0x465
+#define A3XX_CP_PROTECT_REG_6 0x466
+#define A3XX_CP_PROTECT_REG_7 0x467
+#define A3XX_CP_PROTECT_REG_8 0x468
+#define A3XX_CP_PROTECT_REG_9 0x469
+#define A3XX_CP_PROTECT_REG_A 0x46A
+#define A3XX_CP_PROTECT_REG_B 0x46B
+#define A3XX_CP_PROTECT_REG_C 0x46C
+#define A3XX_CP_PROTECT_REG_D 0x46D
+#define A3XX_CP_PROTECT_REG_E 0x46E
+#define A3XX_CP_PROTECT_REG_F 0x46F
+#define A3XX_CP_SCRATCH_REG2 0x57A
+#define A3XX_CP_SCRATCH_REG3 0x57B
+#define A3XX_VSC_BIN_SIZE 0xC01
+#define A3XX_VSC_SIZE_ADDRESS 0xC02
+#define A3XX_VSC_PIPE_CONFIG_0 0xC06
+#define A3XX_VSC_PIPE_DATA_ADDRESS_0 0xC07
+#define A3XX_VSC_PIPE_DATA_LENGTH_0 0xC08
+#define A3XX_VSC_PIPE_CONFIG_1 0xC09
+#define A3XX_VSC_PIPE_DATA_ADDRESS_1 0xC0A
+#define A3XX_VSC_PIPE_DATA_LENGTH_1 0xC0B
+#define A3XX_VSC_PIPE_CONFIG_2 0xC0C
+#define A3XX_VSC_PIPE_DATA_ADDRESS_2 0xC0D
+#define A3XX_VSC_PIPE_DATA_LENGTH_2 0xC0E
+#define A3XX_VSC_PIPE_CONFIG_3 0xC0F
+#define A3XX_VSC_PIPE_DATA_ADDRESS_3 0xC10
+#define A3XX_VSC_PIPE_DATA_LENGTH_3 0xC11
+#define A3XX_VSC_PIPE_CONFIG_4 0xC12
+#define A3XX_VSC_PIPE_DATA_ADDRESS_4 0xC13
+#define A3XX_VSC_PIPE_DATA_LENGTH_4 0xC14
+#define A3XX_VSC_PIPE_CONFIG_5 0xC15
+#define A3XX_VSC_PIPE_DATA_ADDRESS_5 0xC16
+#define A3XX_VSC_PIPE_DATA_LENGTH_5 0xC17
+#define A3XX_VSC_PIPE_CONFIG_6 0xC18
+#define A3XX_VSC_PIPE_DATA_ADDRESS_6 0xC19
+#define A3XX_VSC_PIPE_DATA_LENGTH_6 0xC1A
+#define A3XX_VSC_PIPE_CONFIG_7 0xC1B
+#define A3XX_VSC_PIPE_DATA_ADDRESS_7 0xC1C
+#define A3XX_VSC_PIPE_DATA_LENGTH_7 0xC1D
+#define A3XX_GRAS_CL_USER_PLANE_X0 0xCA0
+#define A3XX_GRAS_CL_USER_PLANE_Y0 0xCA1
+#define A3XX_GRAS_CL_USER_PLANE_Z0 0xCA2
+#define A3XX_GRAS_CL_USER_PLANE_W0 0xCA3
+#define A3XX_GRAS_CL_USER_PLANE_X1 0xCA4
+#define A3XX_GRAS_CL_USER_PLANE_Y1 0xCA5
+#define A3XX_GRAS_CL_USER_PLANE_Z1 0xCA6
+#define A3XX_GRAS_CL_USER_PLANE_W1 0xCA7
+#define A3XX_GRAS_CL_USER_PLANE_X2 0xCA8
+#define A3XX_GRAS_CL_USER_PLANE_Y2 0xCA9
+#define A3XX_GRAS_CL_USER_PLANE_Z2 0xCAA
+#define A3XX_GRAS_CL_USER_PLANE_W2 0xCAB
+#define A3XX_GRAS_CL_USER_PLANE_X3 0xCAC
+#define A3XX_GRAS_CL_USER_PLANE_Y3 0xCAD
+#define A3XX_GRAS_CL_USER_PLANE_Z3 0xCAE
+#define A3XX_GRAS_CL_USER_PLANE_W3 0xCAF
+#define A3XX_GRAS_CL_USER_PLANE_X4 0xCB0
+#define A3XX_GRAS_CL_USER_PLANE_Y4 0xCB1
+#define A3XX_GRAS_CL_USER_PLANE_Z4 0xCB2
+#define A3XX_GRAS_CL_USER_PLANE_W4 0xCB3
+#define A3XX_GRAS_CL_USER_PLANE_X5 0xCB4
+#define A3XX_GRAS_CL_USER_PLANE_Y5 0xCB5
+#define A3XX_GRAS_CL_USER_PLANE_Z5 0xCB6
+#define A3XX_GRAS_CL_USER_PLANE_W5 0xCB7
+#define A3XX_UCHE_CACHE_INVALIDATE0_REG 0xEA0
+#define A3XX_GRAS_CL_CLIP_CNTL 0x2040
+#define A3XX_GRAS_CL_GB_CLIP_ADJ 0x2044
+#define A3XX_GRAS_CL_VPORT_XOFFSET 0x2048
+#define A3XX_GRAS_CL_VPORT_ZOFFSET 0x204C
+#define A3XX_GRAS_CL_VPORT_ZSCALE 0x204D
+#define A3XX_GRAS_SU_POINT_MINMAX 0x2068
+#define A3XX_GRAS_SU_POINT_SIZE 0x2069
+#define A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x206C
+#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x206D
+#define A3XX_GRAS_SU_MODE_CONTROL 0x2070
+#define A3XX_GRAS_SC_CONTROL 0x2072
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL 0x2074
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR 0x2075
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL 0x2079
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR 0x207A
+#define A3XX_RB_MODE_CONTROL 0x20C0
+#define A3XX_RB_RENDER_CONTROL 0x20C1
+#define A3XX_RB_MSAA_CONTROL 0x20C2
+#define A3XX_RB_MRT_CONTROL0 0x20C4
+#define A3XX_RB_MRT_BUF_INFO0 0x20C5
+#define A3XX_RB_MRT_BLEND_CONTROL0 0x20C7
+#define A3XX_RB_MRT_BLEND_CONTROL1 0x20CB
+#define A3XX_RB_MRT_BLEND_CONTROL2 0x20CF
+#define A3XX_RB_MRT_BLEND_CONTROL3 0x20D3
+#define A3XX_RB_BLEND_RED 0x20E4
+#define A3XX_RB_COPY_CONTROL 0x20EC
+#define A3XX_RB_COPY_DEST_INFO 0x20EF
+#define A3XX_RB_DEPTH_CONTROL 0x2100
+#define A3XX_RB_STENCIL_CONTROL 0x2104
+#define A3XX_PC_VSTREAM_CONTROL 0x21E4
+#define A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x21EA
+#define A3XX_PC_PRIM_VTX_CNTL 0x21EC
+#define A3XX_PC_RESTART_INDEX 0x21ED
+#define A3XX_HLSQ_CONTROL_0_REG 0x2200
+#define A3XX_HLSQ_VS_CONTROL_REG 0x2204
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x2207
+#define A3XX_HLSQ_CL_NDRANGE_0_REG 0x220A
+#define A3XX_HLSQ_CL_NDRANGE_2_REG 0x220C
+#define A3XX_HLSQ_CL_CONTROL_0_REG 0x2211
+#define A3XX_HLSQ_CL_CONTROL_1_REG 0x2212
+#define A3XX_HLSQ_CL_KERNEL_CONST_REG 0x2214
+#define A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x2215
+#define A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x2217
+#define A3XX_HLSQ_CL_WG_OFFSET_REG 0x221A
+#define A3XX_VFD_CONTROL_0 0x2240
+#define A3XX_VFD_INDEX_MIN 0x2242
+#define A3XX_VFD_FETCH_INSTR_0_0 0x2246
+#define A3XX_VFD_FETCH_INSTR_0_4 0x224E
+#define A3XX_VFD_DECODE_INSTR_0 0x2266
+#define A3XX_VFD_VS_THREADING_THRESHOLD 0x227E
+#define A3XX_VPC_ATTR 0x2280
+#define A3XX_VPC_VARY_CYLWRAP_ENABLE_1 0x228B
+#define A3XX_SP_SP_CTRL_REG 0x22C0
+#define A3XX_SP_VS_CTRL_REG0 0x22C4
+#define A3XX_SP_VS_CTRL_REG1 0x22C5
+#define A3XX_SP_VS_PARAM_REG 0x22C6
+#define A3XX_SP_VS_OUT_REG_7 0x22CE
+#define A3XX_SP_VS_VPC_DST_REG_0 0x22D0
+#define A3XX_SP_VS_OBJ_OFFSET_REG 0x22D4
+#define A3XX_SP_VS_PVT_MEM_SIZE_REG 0x22D8
+#define A3XX_SP_VS_LENGTH_REG 0x22DF
+#define A3XX_SP_FS_CTRL_REG0 0x22E0
+#define A3XX_SP_FS_CTRL_REG1 0x22E1
+#define A3XX_SP_FS_OBJ_OFFSET_REG 0x22E2
+#define A3XX_SP_FS_PVT_MEM_SIZE_REG 0x22E6
+#define A3XX_SP_FS_FLAT_SHAD_MODE_REG_0 0x22E8
+#define A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x22E9
+#define A3XX_SP_FS_OUTPUT_REG 0x22EC
+#define A3XX_SP_FS_MRT_REG_0 0x22F0
+#define A3XX_SP_FS_IMAGE_OUTPUT_REG_0 0x22F4
+#define A3XX_SP_FS_IMAGE_OUTPUT_REG_3 0x22F7
+#define A3XX_SP_FS_LENGTH_REG 0x22FF
+#define A3XX_TPL1_TP_VS_TEX_OFFSET 0x2340
+#define A3XX_TPL1_TP_FS_TEX_OFFSET 0x2342
+#define A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x2343
+#define A3XX_VBIF_FIXED_SORT_EN 0x300C
+#define A3XX_VBIF_FIXED_SORT_SEL0 0x300D
+#define A3XX_VBIF_FIXED_SORT_SEL1 0x300E
+
+/* Bit flags for RBBM_CTL */
+#define RBBM_RBBM_CTL_RESET_PWR_CTR1  (1 << 1)
+#define RBBM_RBBM_CTL_ENABLE_PWR_CTR1  (17 << 1)
+
+/* Various flags used by the context switch code */
+
+#define SP_MULTI 0
+#define SP_BUFFER_MODE 1
+#define SP_TWO_VTX_QUADS 0
+#define SP_PIXEL_BASED 0
+#define SP_R8G8B8A8_UNORM 8
+#define SP_FOUR_PIX_QUADS 1
+
+#define HLSQ_DIRECT 0
+#define HLSQ_BLOCK_ID_SP_VS 4
+#define HLSQ_SP_VS_INSTR 0
+#define HLSQ_SP_FS_INSTR 0
+#define HLSQ_BLOCK_ID_SP_FS 6
+#define HLSQ_TWO_PIX_QUADS 0
+#define HLSQ_TWO_VTX_QUADS 0
+#define HLSQ_BLOCK_ID_TP_TEX 2
+#define HLSQ_TP_TEX_SAMPLERS 0
+#define HLSQ_TP_TEX_MEMOBJ 1
+#define HLSQ_BLOCK_ID_TP_MIPMAP 3
+#define HLSQ_TP_MIPMAP_BASE 1
+#define HLSQ_FOUR_PIX_QUADS 1
+
+#define RB_FACTOR_ONE 1
+#define RB_BLEND_OP_ADD 0
+#define RB_FACTOR_ZERO 0
+#define RB_DITHER_DISABLE 0
+#define RB_DITHER_ALWAYS 1
+#define RB_FRAG_NEVER 0
+#define RB_ENDIAN_NONE 0
+#define RB_R8G8B8A8_UNORM 8
+#define RB_RESOLVE_PASS 2
+#define RB_CLEAR_MODE_RESOLVE 1
+#define RB_TILINGMODE_LINEAR 0
+#define RB_REF_NEVER 0
+#define RB_STENCIL_KEEP 0
+#define RB_RENDERING_PASS 0
+#define RB_TILINGMODE_32X32 2
+
+#define PC_DRAW_TRIANGLES 2
+#define PC_DI_PT_RECTLIST 8
+#define PC_DI_SRC_SEL_AUTO_INDEX 2
+#define PC_DI_INDEX_SIZE_16_BIT 0
+#define PC_DI_IGNORE_VISIBILITY 0
+#define PC_DI_PT_TRILIST 4
+#define PC_DI_SRC_SEL_IMMEDIATE 1
+#define PC_DI_INDEX_SIZE_32_BIT 1
+
+#define UCHE_ENTIRE_CACHE 1
+#define UCHE_OP_INVALIDATE 1
+
+/*
+ * The following are bit field shifts within some of the registers defined
+ * above. These are used in the context switch code in conjunction with the
+ * _SET macro
+ */
+
+#define GRAS_CL_CLIP_CNTL_CLIP_DISABLE 16
+#define GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 12
+#define GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 21
+#define GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 19
+#define GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 20
+#define GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 17
+#define GRAS_CL_VPORT_XSCALE_VPORT_XSCALE 0
+#define GRAS_CL_VPORT_YSCALE_VPORT_YSCALE 0
+#define GRAS_CL_VPORT_ZSCALE_VPORT_ZSCALE 0
+#define GRAS_SC_CONTROL_RASTER_MODE 12
+#define GRAS_SC_CONTROL_RENDER_MODE 4
+#define GRAS_SC_SCREEN_SCISSOR_BR_BR_X 0
+#define GRAS_SC_SCREEN_SCISSOR_BR_BR_Y 16
+#define GRAS_SC_WINDOW_SCISSOR_BR_BR_X 0
+#define GRAS_SC_WINDOW_SCISSOR_BR_BR_Y 16
+#define HLSQ_CONSTFSPRESERVEDRANGEREG_ENDENTRY 16
+#define HLSQ_CONSTFSPRESERVEDRANGEREG_STARTENTRY 0
+#define HLSQ_CTRL0REG_CHUNKDISABLE 26
+#define HLSQ_CTRL0REG_CONSTSWITCHMODE 27
+#define HLSQ_CTRL0REG_FSSUPERTHREADENABLE 6
+#define HLSQ_CTRL0REG_FSTHREADSIZE 4
+#define HLSQ_CTRL0REG_LAZYUPDATEDISABLE 28
+#define HLSQ_CTRL0REG_RESERVED2 10
+#define HLSQ_CTRL0REG_SPCONSTFULLUPDATE 29
+#define HLSQ_CTRL0REG_SPSHADERRESTART 9
+#define HLSQ_CTRL0REG_TPFULLUPDATE 30
+#define HLSQ_CTRL1REG_RESERVED1 9
+#define HLSQ_CTRL1REG_VSSUPERTHREADENABLE 8
+#define HLSQ_CTRL1REG_VSTHREADSIZE 6
+#define HLSQ_CTRL2REG_PRIMALLOCTHRESHOLD 26
+#define HLSQ_FSCTRLREG_FSCONSTLENGTH 0
+#define HLSQ_FSCTRLREG_FSCONSTSTARTOFFSET 12
+#define HLSQ_FSCTRLREG_FSINSTRLENGTH 24
+#define HLSQ_VSCTRLREG_VSINSTRLENGTH 24
+#define PC_PRIM_VTX_CONTROL_POLYMODE_BACK_PTYPE 8
+#define PC_PRIM_VTX_CONTROL_POLYMODE_FRONT_PTYPE 5
+#define PC_PRIM_VTX_CONTROL_PROVOKING_VTX_LAST 25
+#define PC_PRIM_VTX_CONTROL_STRIDE_IN_VPC 0
+#define PC_DRAW_INITIATOR_PRIM_TYPE 0
+#define PC_DRAW_INITIATOR_SOURCE_SELECT 6
+#define PC_DRAW_INITIATOR_VISIBILITY_CULLING_MODE 9
+#define PC_DRAW_INITIATOR_INDEX_SIZE 0x0B
+#define PC_DRAW_INITIATOR_SMALL_INDEX 0x0D
+#define PC_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x0E
+#define RB_COPYCONTROL_COPY_GMEM_BASE 14
+#define RB_COPYCONTROL_RESOLVE_CLEAR_MODE 4
+#define RB_COPYDESTBASE_COPY_DEST_BASE 4
+#define RB_COPYDESTINFO_COPY_COMPONENT_ENABLE 14
+#define RB_COPYDESTINFO_COPY_DEST_ENDIAN 18
+#define RB_COPYDESTINFO_COPY_DEST_FORMAT 2
+#define RB_COPYDESTINFO_COPY_DEST_TILE 0
+#define RB_COPYDESTPITCH_COPY_DEST_PITCH 0
+#define RB_DEPTHCONTROL_Z_TEST_FUNC 4
+#define RB_MODECONTROL_RENDER_MODE 8
+#define RB_MODECONTROL_MARB_CACHE_SPLIT_MODE 15
+#define RB_MODECONTROL_PACKER_TIMER_ENABLE 16
+#define RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE 21
+#define RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR 24
+#define RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR 16
+#define RB_MRTBLENDCONTROL_CLAMP_ENABLE 29
+#define RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE 5
+#define RB_MRTBLENDCONTROL_RGB_DEST_FACTOR 8
+#define RB_MRTBLENDCONTROL_RGB_SRC_FACTOR 0
+#define RB_MRTBUFBASE_COLOR_BUF_BASE 4
+#define RB_MRTBUFINFO_COLOR_BUF_PITCH 17
+#define RB_MRTBUFINFO_COLOR_FORMAT 0
+#define RB_MRTBUFINFO_COLOR_TILE_MODE 6
+#define RB_MRTCONTROL_COMPONENT_ENABLE 24
+#define RB_MRTCONTROL_DITHER_MODE 12
+#define RB_MRTCONTROL_READ_DEST_ENABLE 3
+#define RB_MRTCONTROL_ROP_CODE 8
+#define RB_MSAACONTROL_MSAA_DISABLE 10
+#define RB_MSAACONTROL_SAMPLE_MASK 16
+#define RB_RENDERCONTROL_ALPHA_TEST_FUNC 24
+#define RB_RENDERCONTROL_BIN_WIDTH 4
+#define RB_RENDERCONTROL_DISABLE_COLOR_PIPE 12
+#define RB_STENCILCONTROL_STENCIL_FAIL 11
+#define RB_STENCILCONTROL_STENCIL_FAIL_BF 23
+#define RB_STENCILCONTROL_STENCIL_FUNC 8
+#define RB_STENCILCONTROL_STENCIL_FUNC_BF 20
+#define RB_STENCILCONTROL_STENCIL_ZFAIL 17
+#define RB_STENCILCONTROL_STENCIL_ZFAIL_BF 29
+#define RB_STENCILCONTROL_STENCIL_ZPASS 14
+#define RB_STENCILCONTROL_STENCIL_ZPASS_BF 26
+#define SP_FSCTRLREG0_FSFULLREGFOOTPRINT 10
+#define SP_FSCTRLREG0_FSICACHEINVALID 2
+#define SP_FSCTRLREG0_FSINOUTREGOVERLAP 18
+#define SP_FSCTRLREG0_FSINSTRBUFFERMODE 1
+#define SP_FSCTRLREG0_FSLENGTH 24
+#define SP_FSCTRLREG0_FSSUPERTHREADMODE 21
+#define SP_FSCTRLREG0_FSTHREADMODE 0
+#define SP_FSCTRLREG0_FSTHREADSIZE 20
+#define SP_FSCTRLREG0_PIXLODENABLE 22
+#define SP_FSCTRLREG1_FSCONSTLENGTH 0
+#define SP_FSCTRLREG1_FSINITIALOUTSTANDING 20
+#define SP_FSCTRLREG1_HALFPRECVAROFFSET 24
+#define SP_FSMRTREG_REGID 0
+#define SP_FSOUTREG_PAD0 2
+#define SP_IMAGEOUTPUTREG_MRTFORMAT 0
+#define SP_IMAGEOUTPUTREG_PAD0 6
+#define SP_OBJOFFSETREG_CONSTOBJECTSTARTOFFSET 16
+#define SP_OBJOFFSETREG_SHADEROBJOFFSETINIC 25
+#define SP_SHADERLENGTH_LEN 0
+#define SP_SPCTRLREG_CONSTMODE 18
+#define SP_SPCTRLREG_SLEEPMODE 20
+#define SP_VSCTRLREG0_VSFULLREGFOOTPRINT 10
+#define SP_VSCTRLREG0_VSICACHEINVALID 2
+#define SP_VSCTRLREG0_VSINSTRBUFFERMODE 1
+#define SP_VSCTRLREG0_VSLENGTH 24
+#define SP_VSCTRLREG0_VSSUPERTHREADMODE 21
+#define SP_VSCTRLREG0_VSTHREADMODE 0
+#define SP_VSCTRLREG0_VSTHREADSIZE 20
+#define SP_VSCTRLREG1_VSINITIALOUTSTANDING 24
+#define SP_VSOUTREG_COMPMASK0 9
+#define SP_VSPARAMREG_POSREGID 0
+#define SP_VSPARAMREG_PSIZEREGID 8
+#define SP_VSPARAMREG_TOTALVSOUTVAR 20
+#define SP_VSVPCDSTREG_OUTLOC0 0
+#define TPL1_TPTEXOFFSETREG_BASETABLEPTR 16
+#define TPL1_TPTEXOFFSETREG_MEMOBJOFFSET 8
+#define TPL1_TPTEXOFFSETREG_SAMPLEROFFSET 0
+#define UCHE_INVALIDATE1REG_OPCODE 0x1C
+#define UCHE_INVALIDATE1REG_ALLORPORTION 0x1F
+#define VFD_BASEADDR_BASEADDR 0
+#define VFD_CTRLREG0_PACKETSIZE 18
+#define VFD_CTRLREG0_STRMDECINSTRCNT 22
+#define VFD_CTRLREG0_STRMFETCHINSTRCNT 27
+#define VFD_CTRLREG0_TOTALATTRTOVS 0
+#define VFD_CTRLREG1_MAXSTORAGE 0
+#define VFD_CTRLREG1_REGID4INST 24
+#define VFD_CTRLREG1_REGID4VTX 16
+#define VFD_DECODEINSTRUCTIONS_CONSTFILL 4
+#define VFD_DECODEINSTRUCTIONS_FORMAT 6
+#define VFD_DECODEINSTRUCTIONS_LASTCOMPVALID 29
+#define VFD_DECODEINSTRUCTIONS_REGID 12
+#define VFD_DECODEINSTRUCTIONS_SHIFTCNT 24
+#define VFD_DECODEINSTRUCTIONS_SWITCHNEXT 30
+#define VFD_DECODEINSTRUCTIONS_WRITEMASK 0
+#define VFD_FETCHINSTRUCTIONS_BUFSTRIDE 7
+#define VFD_FETCHINSTRUCTIONS_FETCHSIZE 0
+#define VFD_FETCHINSTRUCTIONS_INDEXDECODE 18
+#define VFD_FETCHINSTRUCTIONS_STEPRATE 24
+#define VFD_FETCHINSTRUCTIONS_SWITCHNEXT 17
+#define VFD_THREADINGTHRESHOLD_REGID_VTXCNT 8
+#define VFD_THREADINGTHRESHOLD_RESERVED6 4
+#define VPC_VPCATTR_LMSIZE 28
+#define VPC_VPCATTR_THRHDASSIGN 12
+#define VPC_VPCATTR_TOTALATTR 0
+#define VPC_VPCPACK_NUMFPNONPOSVAR 8
+#define VPC_VPCPACK_NUMNONPOSVSVAR 16
+#define VPC_VPCVARPSREPLMODE_COMPONENT08 0
+#define VPC_VPCVARPSREPLMODE_COMPONENT09 2
+#define VPC_VPCVARPSREPLMODE_COMPONENT0A 4
+#define VPC_VPCVARPSREPLMODE_COMPONENT0B 6
+#define VPC_VPCVARPSREPLMODE_COMPONENT0C 8
+#define VPC_VPCVARPSREPLMODE_COMPONENT0D 10
+#define VPC_VPCVARPSREPLMODE_COMPONENT0E 12
+#define VPC_VPCVARPSREPLMODE_COMPONENT0F 14
+#define VPC_VPCVARPSREPLMODE_COMPONENT10 16
+#define VPC_VPCVARPSREPLMODE_COMPONENT11 18
+#define VPC_VPCVARPSREPLMODE_COMPONENT12 20
+#define VPC_VPCVARPSREPLMODE_COMPONENT13 22
+#define VPC_VPCVARPSREPLMODE_COMPONENT14 24
+#define VPC_VPCVARPSREPLMODE_COMPONENT15 26
+#define VPC_VPCVARPSREPLMODE_COMPONENT16 28
+#define VPC_VPCVARPSREPLMODE_COMPONENT17 30
+
+#endif
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index ce76a8f..b44dccc 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -10,7 +10,6 @@
  * GNU General Public License for more details.
  *
  */
-#include <linux/delay.h>
 #include <linux/uaccess.h>
 #include <linux/vmalloc.h>
 #include <linux/ioctl.h>
@@ -29,7 +28,7 @@
 #include "adreno_postmortem.h"
 
 #include "a2xx_reg.h"
-#include "kgsl_mmu.h"
+#include "a3xx_reg.h"
 
 #define DRIVER_VERSION_MAJOR   3
 #define DRIVER_VERSION_MINOR   1
@@ -155,35 +154,10 @@
 	{ ADRENO_REV_A225, 2, 2, ANY_ID, ANY_ID,
 		"a225_pm4.fw", "a225_pfp.fw", &adreno_a2xx_gpudev,
 		1536, 768 },
+	{ ADRENO_REV_A320, 3, 1, ANY_ID, ANY_ID,
+		"a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev },
 };
 
-static void adreno_gmeminit(struct adreno_device *adreno_dev)
-{
-	struct kgsl_device *device = &adreno_dev->dev;
-	union reg_rb_edram_info rb_edram_info;
-	unsigned int gmem_size;
-	unsigned int edram_value = 0;
-
-	/* make sure edram range is aligned to size */
-	BUG_ON(adreno_dev->gmemspace.gpu_base &
-				(adreno_dev->gmemspace.sizebytes - 1));
-
-	/* get edram_size value equivalent */
-	gmem_size = (adreno_dev->gmemspace.sizebytes >> 14);
-	while (gmem_size >>= 1)
-		edram_value++;
-
-	rb_edram_info.val = 0;
-
-	rb_edram_info.f.edram_size = edram_value;
-	rb_edram_info.f.edram_mapping_mode = 0; /* EDRAM_MAP_UPPER */
-
-	/* must be aligned to size */
-	rb_edram_info.f.edram_range = (adreno_dev->gmemspace.gpu_base >> 14);
-
-	adreno_regwrite(device, REG_RB_EDRAM_INFO, rb_edram_info.val);
-}
-
 static irqreturn_t adreno_isr(int irq, void *data)
 {
 	irqreturn_t result;
@@ -360,7 +334,32 @@
 }
 
 static unsigned int
-adreno_getchipid(struct kgsl_device *device)
+a3xx_getchipid(struct kgsl_device *device)
+{
+	unsigned int chipid = 0;
+	unsigned int coreid, majorid, minorid, patchid;
+	unsigned int version;
+
+	adreno_regread(device, A3XX_RBBM_HW_VERSION, &version);
+
+	coreid = 0x03;
+
+	/* Version might not be set - if it isn't, assume this is 320 */
+	if (version)
+		majorid = version & 0x0F;
+	else
+		majorid = 1;
+
+	minorid = (version >> 4) & 0xFFF;
+	patchid = 0;
+
+	chipid = (coreid << 24) | (majorid << 16) | (minorid << 8) | patchid;
+
+	return chipid;
+}
+
+static unsigned int
+a2xx_getchipid(struct kgsl_device *device)
 {
 	unsigned int chipid = 0;
 	unsigned int coreid, majorid, minorid, patchid, revid;
@@ -398,6 +397,15 @@
 	return chipid;
 }
 
+static unsigned int
+adreno_getchipid(struct kgsl_device *device)
+{
+	if (cpu_is_apq8064())
+		return a3xx_getchipid(device);
+	else
+		return a2xx_getchipid(device);
+}
+
 static inline bool _rev_match(unsigned int id, unsigned int entry)
 {
 	return (entry == ANY_ID || entry == id);
@@ -493,7 +501,6 @@
 {
 	int status = -EINVAL;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	int init_reftimestamp = 0x7fffffff;
 
 	kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
 
@@ -509,92 +516,36 @@
 		goto error_clk_off;
 	}
 
-	if (adreno_is_a20x(adreno_dev)) {
+	/* Set up the MMU */
+	if (adreno_is_a2xx(adreno_dev)) {
 		/*
 		 * the MH_CLNT_INTF_CTRL_CONFIG registers aren't present
 		 * on older gpus
 		 */
-		device->mh.mh_intf_cfg1 = 0;
-		device->mh.mh_intf_cfg2 = 0;
+		if (adreno_is_a20x(adreno_dev)) {
+			device->mh.mh_intf_cfg1 = 0;
+			device->mh.mh_intf_cfg2 = 0;
+		}
+
+		kgsl_mh_start(device);
 	}
 
-	kgsl_mh_start(device);
-
-	if (kgsl_mmu_start(device))
+	status = kgsl_mmu_start(device);
+	if (status)
 		goto error_clk_off;
 
-	/*We need to make sure all blocks are powered up and clocked before
-	*issuing a soft reset.  The overrides will then be turned off (set to 0)
-	*/
-	adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0xfffffffe);
-	adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0xffffffff);
-
-	/* Only reset CP block if all blocks have previously been reset */
-	if (!(device->flags & KGSL_FLAGS_SOFT_RESET) ||
-		!adreno_is_a22x(adreno_dev)) {
-		adreno_regwrite(device, REG_RBBM_SOFT_RESET, 0xFFFFFFFF);
-		device->flags |= KGSL_FLAGS_SOFT_RESET;
-	} else
-		adreno_regwrite(device, REG_RBBM_SOFT_RESET, 0x00000001);
-
-	/* The core is in an indeterminate state until the reset completes
-	 * after 30ms.
-	 */
-	msleep(30);
-
-	adreno_regwrite(device, REG_RBBM_SOFT_RESET, 0x00000000);
-
-	adreno_regwrite(device, REG_RBBM_CNTL, 0x00004442);
-
-	if (adreno_is_a225(adreno_dev)) {
-		/* Enable large instruction store for A225 */
-		adreno_regwrite(device, REG_SQ_FLOW_CONTROL, 0x18000000);
-	}
-
-	adreno_regwrite(device, REG_SQ_VS_PROGRAM, 0x00000000);
-	adreno_regwrite(device, REG_SQ_PS_PROGRAM, 0x00000000);
-
-	if (cpu_is_msm8960() || cpu_is_msm8930())
-		adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0x200);
-	else
-		adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0);
-
-	if (!adreno_is_a22x(adreno_dev))
-		adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0);
-	else
-		adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0x80);
-
-	kgsl_sharedmem_set(&device->memstore, 0, 0, device->memstore.size);
-
-	kgsl_sharedmem_writel(&device->memstore,
-			      KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts),
-			      init_reftimestamp);
-
-	adreno_regwrite(device, REG_RBBM_DEBUG, 0x00080000);
-
-	/* Make sure interrupts are disabled */
-
-	adreno_regwrite(device, REG_RBBM_INT_CNTL, 0);
-	adreno_regwrite(device, REG_CP_INT_CNTL, 0);
-	adreno_regwrite(device, REG_SQ_INT_CNTL, 0);
-
-	if (adreno_is_a22x(adreno_dev))
-		adreno_dev->gmemspace.sizebytes = SZ_512K;
-	else
-		adreno_dev->gmemspace.sizebytes = SZ_256K;
-	adreno_gmeminit(adreno_dev);
+	/* Start the GPU */
+	adreno_dev->gpudev->start(adreno_dev);
 
 	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
 	device->ftbl->irqctrl(device, 1);
 
 	status = adreno_ringbuffer_start(&adreno_dev->ringbuffer, init_ram);
-	if (status != 0)
-		goto error_irq_off;
+	if (status == 0) {
+		mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
+		return 0;
+	}
 
-	mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
-	return status;
-
-error_irq_off:
 	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
 	kgsl_mmu_stop(device);
 error_clk_off:
@@ -869,7 +820,8 @@
 		msecs_to_jiffies(adreno_dev->wait_timeout);
 	unsigned long wait_time = jiffies + wait_timeout;
 
-	kgsl_cffdump_regpoll(device->id, REG_RBBM_STATUS << 2,
+	kgsl_cffdump_regpoll(device->id,
+		adreno_dev->gpudev->reg_rbbm_status << 2,
 		0x00000000, 0x80000000);
 	/* first, wait until the CP has consumed all the commands in
 	 * the ring buffer
@@ -890,9 +842,15 @@
 	/* now, wait for the GPU to finish its operations */
 	wait_time = jiffies + wait_timeout;
 	while (time_before(jiffies, wait_time)) {
-		adreno_regread(device, REG_RBBM_STATUS, &rbbm_status);
-		if (rbbm_status == 0x110)
-			return 0;
+		adreno_regread(device, adreno_dev->gpudev->reg_rbbm_status,
+			&rbbm_status);
+		if (adreno_is_a2xx(adreno_dev)) {
+			if (rbbm_status == 0x110)
+				return 0;
+		} else {
+			if (!(rbbm_status & 0x80000000))
+				return 0;
+		}
 	}
 
 err:
@@ -918,10 +876,17 @@
 		GSL_RB_GET_READPTR(rb, &rb->rptr);
 		if (!device->active_cnt && (rb->rptr == rb->wptr)) {
 			/* Is the core idle? */
-			adreno_regread(device, REG_RBBM_STATUS,
-					    &rbbm_status);
-			if (rbbm_status == 0x110)
-				status = true;
+			adreno_regread(device,
+				adreno_dev->gpudev->reg_rbbm_status,
+				&rbbm_status);
+
+			if (adreno_is_a2xx(adreno_dev)) {
+				if (rbbm_status == 0x110)
+					status = true;
+			} else {
+				if (!(rbbm_status & 0x80000000))
+					status = true;
+			}
 		}
 	} else {
 		status = true;
@@ -1278,44 +1243,29 @@
 static void adreno_power_stats(struct kgsl_device *device,
 				struct kgsl_power_stats *stats)
 {
-	unsigned int reg;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	unsigned int cycles;
+
+	/* Get the busy cycles counted since the counter was last reset */
+	/* Calling this function also resets and restarts the counter */
+
+	cycles = adreno_dev->gpudev->busy_cycles(adreno_dev);
 
 	/* In order to calculate idle you have to have run the algorithm *
 	 * at least once to get a start time. */
 	if (pwr->time != 0) {
-		s64 tmp;
-		/* Stop the performance moniter and read the current *
-		 * busy cycles. */
-		adreno_regwrite(device,
-			REG_CP_PERFMON_CNTL,
-			REG_PERF_MODE_CNT |
-			REG_PERF_STATE_FREEZE);
-		adreno_regread(device, REG_RBBM_PERFCOUNTER1_LO, &reg);
-		tmp = ktime_to_us(ktime_get());
+		s64 tmp = ktime_to_us(ktime_get());
 		stats->total_time = tmp - pwr->time;
 		pwr->time = tmp;
-		stats->busy_time = adreno_ticks_to_us(reg, device->pwrctrl.
+		stats->busy_time = adreno_ticks_to_us(cycles, device->pwrctrl.
 				pwrlevels[device->pwrctrl.active_pwrlevel].
 				gpu_freq);
-
-		adreno_regwrite(device,
-			REG_CP_PERFMON_CNTL,
-			REG_PERF_MODE_CNT |
-			REG_PERF_STATE_RESET);
 	} else {
 		stats->total_time = 0;
 		stats->busy_time = 0;
 		pwr->time = ktime_to_us(ktime_get());
 	}
-
-	/* re-enable the performance moniters */
-	adreno_regread(device, REG_RBBM_PM_OVERRIDE2, &reg);
-	adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, (reg | 0x40));
-	adreno_regwrite(device, REG_RBBM_PERFCOUNTER1_SELECT, 0x1);
-	adreno_regwrite(device,
-		REG_CP_PERFMON_CNTL,
-		REG_PERF_MODE_CNT | REG_PERF_STATE_ENABLE);
 }
 
 void adreno_irqctrl(struct kgsl_device *device, int state)
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index af5bf51..95b6253 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -52,6 +52,7 @@
 	ADRENO_REV_A205 = 205,
 	ADRENO_REV_A220 = 220,
 	ADRENO_REV_A225 = 225,
+	ADRENO_REV_A320 = 320,
 };
 
 struct adreno_gpudev;
@@ -77,15 +78,28 @@
 };
 
 struct adreno_gpudev {
+	/*
+	 * These registers are in a different location on A3XX,  so define
+	 * them in the structure and use them as variables.
+	 */
+	unsigned int reg_rbbm_status;
+	unsigned int reg_cp_pfp_ucode_data;
+	unsigned int reg_cp_pfp_ucode_addr;
+
+	/* GPU specific function hooks */
 	int (*ctxt_create)(struct adreno_device *, struct adreno_context *);
 	void (*ctxt_save)(struct adreno_device *, struct adreno_context *);
 	void (*ctxt_restore)(struct adreno_device *, struct adreno_context *);
 	irqreturn_t (*irq_handler)(struct adreno_device *);
 	void (*irq_control)(struct adreno_device *, int);
 	void * (*snapshot)(struct adreno_device *, void *, int *, int);
+	void (*rb_init)(struct adreno_device *, struct adreno_ringbuffer *);
+	void (*start)(struct adreno_device *);
+	unsigned int (*busy_cycles)(struct adreno_device *);
 };
 
 extern struct adreno_gpudev adreno_a2xx_gpudev;
+extern struct adreno_gpudev adreno_a3xx_gpudev;
 
 /* A2XX register sets defined in adreno_a2xx.c */
 extern const unsigned int a200_registers[];
@@ -144,7 +158,12 @@
 
 static inline int adreno_is_a2xx(struct adreno_device *adreno_dev)
 {
-	return (adreno_dev->gpurev <= ADRENO_REV_A225);
+	return (adreno_dev->gpurev <= 299);
+}
+
+static inline int adreno_is_a3xx(struct adreno_device *adreno_dev)
+{
+	return (adreno_dev->gpurev >= 300);
 }
 
 /**
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
index 5ce9cf8..e31b76b 100644
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -11,6 +11,9 @@
  *
  */
 
+#include <linux/delay.h>
+#include <mach/socinfo.h>
+
 #include "kgsl.h"
 #include "kgsl_sharedmem.h"
 #include "kgsl_cffdump.h"
@@ -1749,15 +1752,207 @@
 	wmb();
 }
 
+static void a2xx_rb_init(struct adreno_device *adreno_dev,
+			struct adreno_ringbuffer *rb)
+{
+	unsigned int *cmds, cmds_gpu;
+
+	/* ME_INIT */
+	cmds = adreno_ringbuffer_allocspace(rb, 19);
+	cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19);
+
+	GSL_RB_WRITE(cmds, cmds_gpu, cp_type3_packet(CP_ME_INIT, 18));
+	/* All fields present (bits 9:0) */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x000003ff);
+	/* Disable/Enable Real-Time Stream processing (present but ignored) */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+	/* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO));
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET));
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX));
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL));
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL));
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE));
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL));
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE));
+
+	/* Instruction memory size: */
+	GSL_RB_WRITE(cmds, cmds_gpu,
+		(adreno_encode_istore_size(adreno_dev)
+		| adreno_dev->pix_shader_start));
+	/* Maximum Contexts */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000001);
+	/* Write Confirm Interval and The CP will wait the
+	* wait_interval * 16 clocks between polling  */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+
+	/* NQ and External Memory Swap */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+	/* Protected mode error checking */
+	GSL_RB_WRITE(cmds, cmds_gpu, GSL_RB_PROTECTED_MODE_CONTROL);
+	/* Disable header dumping and Header dump address */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+	/* Header dump size */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+
+	adreno_ringbuffer_submit(rb);
+}
+
+static unsigned int a2xx_busy_cycles(struct adreno_device *adreno_dev)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	unsigned int reg, val;
+
+	/* Freeze the counter */
+	adreno_regwrite(device, REG_CP_PERFMON_CNTL,
+		REG_PERF_MODE_CNT | REG_PERF_STATE_FREEZE);
+
+	/* Get the value */
+	adreno_regread(device, REG_RBBM_PERFCOUNTER1_LO, &val);
+
+	/* Reset the counter */
+	adreno_regwrite(device, REG_CP_PERFMON_CNTL,
+		REG_PERF_MODE_CNT | REG_PERF_STATE_RESET);
+
+	/* Re-Enable the performance monitors */
+	adreno_regread(device, REG_RBBM_PM_OVERRIDE2, &reg);
+	adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, (reg | 0x40));
+	adreno_regwrite(device, REG_RBBM_PERFCOUNTER1_SELECT, 0x1);
+	adreno_regwrite(device, REG_CP_PERFMON_CNTL,
+		REG_PERF_MODE_CNT | REG_PERF_STATE_ENABLE);
+
+	return val;
+}
+
+static void a2xx_gmeminit(struct adreno_device *adreno_dev)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	union reg_rb_edram_info rb_edram_info;
+	unsigned int gmem_size;
+	unsigned int edram_value = 0;
+
+	/* make sure edram range is aligned to size */
+	BUG_ON(adreno_dev->gmemspace.gpu_base &
+				(adreno_dev->gmemspace.sizebytes - 1));
+
+	/* get edram_size value equivalent */
+	gmem_size = (adreno_dev->gmemspace.sizebytes >> 14);
+	while (gmem_size >>= 1)
+		edram_value++;
+
+	rb_edram_info.val = 0;
+
+	rb_edram_info.f.edram_size = edram_value;
+	rb_edram_info.f.edram_mapping_mode = 0; /* EDRAM_MAP_UPPER */
+
+	/* must be aligned to size */
+	rb_edram_info.f.edram_range = (adreno_dev->gmemspace.gpu_base >> 14);
+
+	adreno_regwrite(device, REG_RB_EDRAM_INFO, rb_edram_info.val);
+}
+
+static void a2xx_start(struct adreno_device *adreno_dev)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	int init_reftimestamp = 0x7fffffff;
+
+	/*
+	 * We need to make sure all blocks are powered up and clocked
+	 * before issuing a soft reset.  The overrides will then be
+	 * turned off (set to 0)
+	 */
+	adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0xfffffffe);
+	adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0xffffffff);
+
+	/*
+	 * Only reset CP block if all blocks have previously been
+	 * reset
+	 */
+	if (!(device->flags & KGSL_FLAGS_SOFT_RESET) ||
+		!adreno_is_a22x(adreno_dev)) {
+		adreno_regwrite(device, REG_RBBM_SOFT_RESET,
+			0xFFFFFFFF);
+		device->flags |= KGSL_FLAGS_SOFT_RESET;
+	} else {
+		adreno_regwrite(device, REG_RBBM_SOFT_RESET,
+			0x00000001);
+	}
+	/*
+	 * The core is in an indeterminate state until the reset
+	 * completes after 30ms.
+	 */
+	msleep(30);
+
+	adreno_regwrite(device, REG_RBBM_SOFT_RESET, 0x00000000);
+
+	if (adreno_is_a225(adreno_dev)) {
+		/* Enable large instruction store for A225 */
+		adreno_regwrite(device, REG_SQ_FLOW_CONTROL,
+			0x18000000);
+	}
+
+	adreno_regwrite(device, REG_RBBM_CNTL, 0x00004442);
+
+	adreno_regwrite(device, REG_SQ_VS_PROGRAM, 0x00000000);
+	adreno_regwrite(device, REG_SQ_PS_PROGRAM, 0x00000000);
+
+	if (cpu_is_msm8960() || cpu_is_msm8930())
+		adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0x200);
+	else
+		adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0);
+
+	if (!adreno_is_a22x(adreno_dev))
+		adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0);
+	else
+		adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0x80);
+
+	kgsl_sharedmem_set(&device->memstore, 0, 0, device->memstore.size);
+
+	kgsl_sharedmem_writel(&device->memstore,
+			      KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts),
+			      init_reftimestamp);
+
+	adreno_regwrite(device, REG_RBBM_DEBUG, 0x00080000);
+
+	/* Make sure interrupts are disabled */
+	adreno_regwrite(device, REG_RBBM_INT_CNTL, 0);
+	adreno_regwrite(device, REG_CP_INT_CNTL, 0);
+	adreno_regwrite(device, REG_SQ_INT_CNTL, 0);
+
+	if (adreno_is_a22x(adreno_dev))
+		adreno_dev->gmemspace.sizebytes = SZ_512K;
+	else
+		adreno_dev->gmemspace.sizebytes = SZ_256K;
+
+	a2xx_gmeminit(adreno_dev);
+}
+
 /* Defined in adreno_a2xx_snapshot.c */
 void *a2xx_snapshot(struct adreno_device *adreno_dev, void *snapshot,
 	int *remain, int hang);
 
 struct adreno_gpudev adreno_a2xx_gpudev = {
+	.reg_rbbm_status = REG_RBBM_STATUS,
+	.reg_cp_pfp_ucode_addr = REG_CP_PFP_UCODE_ADDR,
+	.reg_cp_pfp_ucode_data = REG_CP_PFP_UCODE_DATA,
+
 	.ctxt_create = a2xx_drawctxt_create,
 	.ctxt_save = a2xx_drawctxt_save,
 	.ctxt_restore = a2xx_drawctxt_restore,
 	.irq_handler = a2xx_irq_handler,
 	.irq_control = a2xx_irq_control,
 	.snapshot = a2xx_snapshot,
+	.rb_init = a2xx_rb_init,
+	.busy_cycles = a2xx_busy_cycles,
+	.start = a2xx_start,
 };
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
new file mode 100644
index 0000000..cbc7bed
--- /dev/null
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -0,0 +1,2547 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+
+#include "kgsl.h"
+#include "adreno.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_cffdump.h"
+#include "a3xx_reg.h"
+
+/* Simple macro to facilitate bit setting in the gmem2sys and sys2gmem
+ * functions.
+ */
+
+#define _SET(_shift, _val) ((_val) << (_shift))
+
+/*
+ ****************************************************************************
+ *
+ * Context state shadow structure:
+ *
+ * +---------------------+------------+-------------+---------------------+---+
+ * | ALU Constant Shadow | Reg Shadow | C&V Buffers | Shader Instr Shadow |Tex|
+ * +---------------------+------------+-------------+---------------------+---+
+ *
+ *		 8K - ALU Constant Shadow (8K aligned)
+ *		 4K - H/W Register Shadow (8K aligned)
+ *		 5K - Command and Vertex Buffers
+ *		 8K - Shader Instruction Shadow
+ *		 ~6K - Texture Constant Shadow
+ *
+ *
+ ***************************************************************************
+ */
+
+/* Sizes of all sections in state shadow memory */
+#define ALU_SHADOW_SIZE      (8*1024) /* 8KB */
+#define REG_SHADOW_SIZE      (4*1024) /* 4KB */
+#define CMD_BUFFER_SIZE      (5*1024) /* 5KB */
+#define TEX_SIZE_MEM_OBJECTS 896      /* bytes */
+#define TEX_SIZE_MIPMAP      1936     /* bytes */
+#define TEX_SIZE_SAMPLER_OBJ 256      /* bytes */
+#define TEX_SHADOW_SIZE                            \
+	((TEX_SIZE_MEM_OBJECTS + TEX_SIZE_MIPMAP + \
+	TEX_SIZE_SAMPLER_OBJ)*2) /* ~6KB */
+#define SHADER_SHADOW_SIZE   (8*1024) /* 8KB */
+
+/* Total context size, excluding GMEM shadow */
+#define CONTEXT_SIZE                         \
+	(ALU_SHADOW_SIZE+REG_SHADOW_SIZE +   \
+	CMD_BUFFER_SIZE+SHADER_SHADOW_SIZE + \
+	TEX_SHADOW_SIZE)
+
+/* Offsets to different sections in context shadow memory */
+#define REG_OFFSET ALU_SHADOW_SIZE
+#define CMD_OFFSET (REG_OFFSET+REG_SHADOW_SIZE)
+#define SHADER_OFFSET (CMD_OFFSET+CMD_BUFFER_SIZE)
+#define TEX_OFFSET (SHADER_OFFSET+SHADER_SHADOW_SIZE)
+#define VS_TEX_OFFSET_MEM_OBJECTS TEX_OFFSET
+#define VS_TEX_OFFSET_MIPMAP (VS_TEX_OFFSET_MEM_OBJECTS+TEX_SIZE_MEM_OBJECTS)
+#define VS_TEX_OFFSET_SAMPLER_OBJ (VS_TEX_OFFSET_MIPMAP+TEX_SIZE_MIPMAP)
+#define FS_TEX_OFFSET_MEM_OBJECTS \
+	(VS_TEX_OFFSET_SAMPLER_OBJ+TEX_SIZE_SAMPLER_OBJ)
+#define FS_TEX_OFFSET_MIPMAP (FS_TEX_OFFSET_MEM_OBJECTS+TEX_SIZE_MEM_OBJECTS)
+#define FS_TEX_OFFSET_SAMPLER_OBJ (FS_TEX_OFFSET_MIPMAP+TEX_SIZE_MIPMAP)
+
+/* The offset for fragment shader data in HLSQ context */
+#define SSIZE (16*1024)
+
+#define HLSQ_SAMPLER_OFFSET 0x000
+#define HLSQ_MEMOBJ_OFFSET  0x400
+#define HLSQ_MIPMAP_OFFSET  0x800
+
+#ifdef GSL_USE_A3XX_HLSQ_SHADOW_RAM
+/* Use shadow RAM */
+#define HLSQ_SHADOW_BASE		(0x10000+SSIZE*2)
+#else
+/* Use working RAM */
+#define HLSQ_SHADOW_BASE		0x10000
+#endif
+
+#define REG_TO_MEM_LOOP_COUNT_SHIFT	15
+
+#define BUILD_PC_DRAW_INITIATOR(prim_type, source_select, index_size, \
+	vis_cull_mode) \
+	(((prim_type)      << PC_DRAW_INITIATOR_PRIM_TYPE) | \
+	((source_select)   << PC_DRAW_INITIATOR_SOURCE_SELECT) | \
+	((index_size & 1)  << PC_DRAW_INITIATOR_INDEX_SIZE) | \
+	((index_size >> 1) << PC_DRAW_INITIATOR_SMALL_INDEX) | \
+	((vis_cull_mode)   << PC_DRAW_INITIATOR_VISIBILITY_CULLING_MODE) | \
+	(1                 << PC_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE))
+
+/*
+ * List of context registers (starting from dword offset 0x2000).
+ * Each line contains start and end of a range of registers.
+ */
+static const unsigned int context_register_ranges[] = {
+	A3XX_GRAS_CL_CLIP_CNTL, A3XX_GRAS_CL_CLIP_CNTL,
+	A3XX_GRAS_CL_GB_CLIP_ADJ, A3XX_GRAS_CL_GB_CLIP_ADJ,
+	A3XX_GRAS_CL_VPORT_XOFFSET, A3XX_GRAS_CL_VPORT_ZSCALE,
+	A3XX_GRAS_SU_POINT_MINMAX, A3XX_GRAS_SU_POINT_SIZE,
+	A3XX_GRAS_SU_POLY_OFFSET_SCALE, A3XX_GRAS_SU_POLY_OFFSET_OFFSET,
+	A3XX_GRAS_SU_MODE_CONTROL, A3XX_GRAS_SU_MODE_CONTROL,
+	A3XX_GRAS_SC_CONTROL, A3XX_GRAS_SC_CONTROL,
+	A3XX_GRAS_SC_SCREEN_SCISSOR_TL, A3XX_GRAS_SC_SCREEN_SCISSOR_BR,
+	A3XX_GRAS_SC_WINDOW_SCISSOR_TL, A3XX_GRAS_SC_WINDOW_SCISSOR_BR,
+	A3XX_RB_MODE_CONTROL, A3XX_RB_MRT_BLEND_CONTROL3,
+	A3XX_RB_BLEND_RED, A3XX_RB_COPY_DEST_INFO,
+	A3XX_RB_DEPTH_CONTROL, A3XX_RB_DEPTH_CONTROL,
+	A3XX_PC_VSTREAM_CONTROL, A3XX_PC_VSTREAM_CONTROL,
+	A3XX_PC_VERTEX_REUSE_BLOCK_CNTL, A3XX_PC_VERTEX_REUSE_BLOCK_CNTL,
+	A3XX_PC_PRIM_VTX_CNTL, A3XX_PC_RESTART_INDEX,
+	A3XX_HLSQ_CONTROL_0_REG, A3XX_HLSQ_CONST_FSPRESV_RANGE_REG,
+	A3XX_HLSQ_CL_NDRANGE_0_REG, A3XX_HLSQ_CL_NDRANGE_0_REG,
+	A3XX_HLSQ_CL_NDRANGE_2_REG, A3XX_HLSQ_CL_CONTROL_1_REG,
+	A3XX_HLSQ_CL_KERNEL_CONST_REG, A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG,
+	A3XX_HLSQ_CL_WG_OFFSET_REG, A3XX_HLSQ_CL_WG_OFFSET_REG,
+	A3XX_VFD_CONTROL_0, A3XX_VFD_VS_THREADING_THRESHOLD,
+	A3XX_SP_SP_CTRL_REG, A3XX_SP_SP_CTRL_REG,
+	A3XX_SP_VS_CTRL_REG0, A3XX_SP_VS_OUT_REG_7,
+	A3XX_SP_VS_VPC_DST_REG_0, A3XX_SP_VS_PVT_MEM_SIZE_REG,
+	A3XX_SP_VS_LENGTH_REG, A3XX_SP_FS_PVT_MEM_SIZE_REG,
+	A3XX_SP_FS_FLAT_SHAD_MODE_REG_0, A3XX_SP_FS_FLAT_SHAD_MODE_REG_1,
+	A3XX_SP_FS_OUTPUT_REG, A3XX_SP_FS_OUTPUT_REG,
+	A3XX_SP_FS_MRT_REG_0, A3XX_SP_FS_IMAGE_OUTPUT_REG_3,
+	A3XX_SP_FS_LENGTH_REG, A3XX_SP_FS_LENGTH_REG,
+	A3XX_TPL1_TP_VS_TEX_OFFSET, A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR,
+	A3XX_VPC_ATTR, A3XX_VPC_VARY_CYLWRAP_ENABLE_1,
+};
+
+/* Global registers that need to be saved separately */
+static const unsigned int global_registers[] = {
+	A3XX_GRAS_CL_USER_PLANE_X0, A3XX_GRAS_CL_USER_PLANE_Y0,
+	A3XX_GRAS_CL_USER_PLANE_Z0, A3XX_GRAS_CL_USER_PLANE_W0,
+	A3XX_GRAS_CL_USER_PLANE_X1, A3XX_GRAS_CL_USER_PLANE_Y1,
+	A3XX_GRAS_CL_USER_PLANE_Z1, A3XX_GRAS_CL_USER_PLANE_W1,
+	A3XX_GRAS_CL_USER_PLANE_X2, A3XX_GRAS_CL_USER_PLANE_Y2,
+	A3XX_GRAS_CL_USER_PLANE_Z2, A3XX_GRAS_CL_USER_PLANE_W2,
+	A3XX_GRAS_CL_USER_PLANE_X3, A3XX_GRAS_CL_USER_PLANE_Y3,
+	A3XX_GRAS_CL_USER_PLANE_Z3, A3XX_GRAS_CL_USER_PLANE_W3,
+	A3XX_GRAS_CL_USER_PLANE_X4, A3XX_GRAS_CL_USER_PLANE_Y4,
+	A3XX_GRAS_CL_USER_PLANE_Z4, A3XX_GRAS_CL_USER_PLANE_W4,
+	A3XX_GRAS_CL_USER_PLANE_X5, A3XX_GRAS_CL_USER_PLANE_Y5,
+	A3XX_GRAS_CL_USER_PLANE_Z5, A3XX_GRAS_CL_USER_PLANE_W5,
+	A3XX_VSC_BIN_SIZE,
+	A3XX_VSC_PIPE_CONFIG_0, A3XX_VSC_PIPE_CONFIG_1,
+	A3XX_VSC_PIPE_CONFIG_2, A3XX_VSC_PIPE_CONFIG_3,
+	A3XX_VSC_PIPE_CONFIG_4, A3XX_VSC_PIPE_CONFIG_5,
+	A3XX_VSC_PIPE_CONFIG_6, A3XX_VSC_PIPE_CONFIG_7,
+	A3XX_VSC_PIPE_DATA_ADDRESS_0, A3XX_VSC_PIPE_DATA_ADDRESS_1,
+	A3XX_VSC_PIPE_DATA_ADDRESS_2, A3XX_VSC_PIPE_DATA_ADDRESS_3,
+	A3XX_VSC_PIPE_DATA_ADDRESS_4, A3XX_VSC_PIPE_DATA_ADDRESS_5,
+	A3XX_VSC_PIPE_DATA_ADDRESS_6, A3XX_VSC_PIPE_DATA_ADDRESS_7,
+	A3XX_VSC_PIPE_DATA_LENGTH_0, A3XX_VSC_PIPE_DATA_LENGTH_1,
+	A3XX_VSC_PIPE_DATA_LENGTH_2, A3XX_VSC_PIPE_DATA_LENGTH_3,
+	A3XX_VSC_PIPE_DATA_LENGTH_4, A3XX_VSC_PIPE_DATA_LENGTH_5,
+	A3XX_VSC_PIPE_DATA_LENGTH_6, A3XX_VSC_PIPE_DATA_LENGTH_7,
+	A3XX_VSC_SIZE_ADDRESS
+};
+
+#define GLOBAL_REGISTER_COUNT ARRAY_SIZE(global_registers)
+
+/* A scratchpad used to build commands during context create */
+static struct tmp_ctx {
+	unsigned int *cmd; /* Next available dword in C&V buffer */
+
+	/* Addresses in comamnd buffer where registers are saved */
+	uint32_t reg_values[GLOBAL_REGISTER_COUNT];
+	uint32_t gmem_base; /* Base GPU address of GMEM */
+} tmp_ctx;
+
+#ifndef GSL_CONTEXT_SWITCH_CPU_SYNC
+/*
+ * Function for executing dest = ( (reg & and) ROL rol ) | or
+ */
+static unsigned int *rmw_regtomem(unsigned int *cmd,
+				  unsigned int reg, unsigned int and,
+				  unsigned int rol, unsigned int or,
+				  unsigned int dest)
+{
+	/* CP_SCRATCH_REG2 = (CP_SCRATCH_REG2 & 0x00000000) | reg */
+	*cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+	*cmd++ = (1 << 30) | A3XX_CP_SCRATCH_REG2;
+	*cmd++ = 0x00000000;	/* AND value */
+	*cmd++ = reg;		/* OR address */
+
+	/* CP_SCRATCH_REG2 = ( (CP_SCRATCH_REG2 & and) ROL rol ) |  or */
+	*cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+	*cmd++ = (rol << 24) | A3XX_CP_SCRATCH_REG2;
+	*cmd++ = and;		/* AND value */
+	*cmd++ = or;		/* OR value */
+
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	*cmd++ = A3XX_CP_SCRATCH_REG2;
+	*cmd++ = dest;
+
+	return cmd;
+}
+#endif
+
+static void build_regconstantsave_cmds(struct adreno_device *adreno_dev,
+				       struct adreno_context *drawctxt)
+{
+	unsigned int *cmd = tmp_ctx.cmd;
+	unsigned int *start = cmd;
+	unsigned int i;
+
+	drawctxt->constant_save_commands[0].hostptr = cmd;
+	drawctxt->constant_save_commands[0].gpuaddr =
+	    virt2gpu(cmd, &drawctxt->gpustate);
+	cmd++;
+
+	*cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+	*cmd++ = 0;
+
+#ifndef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+	/*
+	 * Context registers are already shadowed; just need to
+	 * disable shadowing to prevent corruption.
+	 */
+
+	*cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
+	*cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
+	*cmd++ = 4 << 16;	/* regs, start=0 */
+	*cmd++ = 0x0;		/* count = 0 */
+
+#else
+	/*
+	 * Make sure the HW context has the correct register values before
+	 * reading them.
+	 */
+
+	/* Write context registers into shadow */
+	for (i = 0; i < ARRAY_SIZE(context_register_ranges) / 2; i++) {
+		unsigned int start = context_register_ranges[i * 2];
+		unsigned int end = context_register_ranges[i * 2 + 1];
+		*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+		*cmd++ = ((end - start + 1) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
+			start;
+		*cmd++ = ((drawctxt->gpustate.gpuaddr + REG_OFFSET)
+			  & 0xFFFFE000) + (start - 0x2000) * 4;
+	}
+#endif
+
+	/* Need to handle some of the global registers separately */
+	for (i = 0; i < ARRAY_SIZE(global_registers); i++) {
+		*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+		*cmd++ = global_registers[i];
+		*cmd++ = tmp_ctx.reg_values[i];
+	}
+
+	/* Save vertex shader constants */
+	*cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+	*cmd++ = drawctxt->cond_execs[2].gpuaddr >> 2;
+	*cmd++ = drawctxt->cond_execs[2].gpuaddr >> 2;
+	*cmd++ = 0x0000FFFF;
+	*cmd++ = 3; /* EXEC_COUNT */
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	drawctxt->constant_save_commands[1].hostptr = cmd;
+	drawctxt->constant_save_commands[1].gpuaddr =
+	    virt2gpu(cmd, &drawctxt->gpustate);
+	/*
+	   From fixup:
+
+	   dwords = SP_VS_CTRL_REG1.VSCONSTLENGTH / 4
+	   src = (HLSQ_SHADOW_BASE + 0x2000) / 4
+
+	   From register spec:
+	   SP_VS_CTRL_REG1.VSCONSTLENGTH [09:00]: 0-512, unit = 128bits.
+	 */
+	*cmd++ = 0;	/* (dwords << REG_TO_MEM_LOOP_COUNT_SHIFT) | src  */
+	/* ALU constant shadow base */
+	*cmd++ = drawctxt->gpustate.gpuaddr & 0xfffffffc;
+
+	/* Save fragment shader constants */
+	*cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+	*cmd++ = drawctxt->cond_execs[3].gpuaddr >> 2;
+	*cmd++ = drawctxt->cond_execs[3].gpuaddr >> 2;
+	*cmd++ = 0x0000FFFF;
+	*cmd++ = 3; /* EXEC_COUNT */
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	drawctxt->constant_save_commands[2].hostptr = cmd;
+	drawctxt->constant_save_commands[2].gpuaddr =
+	    virt2gpu(cmd, &drawctxt->gpustate);
+	/*
+	   From fixup:
+
+	   dwords = SP_FS_CTRL_REG1.FSCONSTLENGTH / 4
+	   src = (HLSQ_SHADOW_BASE + 0x2000 + SSIZE) / 4
+
+	   From register spec:
+	   SP_FS_CTRL_REG1.FSCONSTLENGTH [09:00]: 0-512, unit = 128bits.
+	 */
+	*cmd++ = 0;	/* (dwords << REG_TO_MEM_LOOP_COUNT_SHIFT) | src  */
+
+	/*
+	   From fixup:
+
+	   base = drawctxt->gpustate.gpuaddr (ALU constant shadow base)
+	   offset = SP_FS_OBJ_OFFSET_REG.CONSTOBJECTSTARTOFFSET
+
+	   From register spec:
+	   SP_FS_OBJ_OFFSET_REG.CONSTOBJECTSTARTOFFSET [16:24]: Constant object
+	   start offset in on chip RAM,
+	   128bit aligned
+
+	   dst = base + offset
+	   Because of the base alignment we can use
+	   dst = base | offset
+	 */
+	*cmd++ = 0;		/* dst */
+
+	/* Save VS texture memory objects */
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	*cmd++ =
+	    ((TEX_SIZE_MEM_OBJECTS / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
+		((HLSQ_SHADOW_BASE + HLSQ_MEMOBJ_OFFSET) / 4);
+	*cmd++ =
+	    (drawctxt->gpustate.gpuaddr +
+	     VS_TEX_OFFSET_MEM_OBJECTS) & 0xfffffffc;
+
+	/* Save VS texture mipmap pointers */
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	*cmd++ =
+	    ((TEX_SIZE_MIPMAP / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
+		((HLSQ_SHADOW_BASE + HLSQ_MIPMAP_OFFSET) / 4);
+	*cmd++ =
+	    (drawctxt->gpustate.gpuaddr + VS_TEX_OFFSET_MIPMAP) & 0xfffffffc;
+
+	/* Save VS texture sampler objects */
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	*cmd++ = ((TEX_SIZE_SAMPLER_OBJ / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
+		((HLSQ_SHADOW_BASE + HLSQ_SAMPLER_OFFSET) / 4);
+	*cmd++ =
+	    (drawctxt->gpustate.gpuaddr +
+	     VS_TEX_OFFSET_SAMPLER_OBJ) & 0xfffffffc;
+
+	/* Save FS texture memory objects */
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	*cmd++ =
+	    ((TEX_SIZE_MEM_OBJECTS / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
+		((HLSQ_SHADOW_BASE + HLSQ_MEMOBJ_OFFSET + SSIZE) / 4);
+	*cmd++ =
+	    (drawctxt->gpustate.gpuaddr +
+	     FS_TEX_OFFSET_MEM_OBJECTS) & 0xfffffffc;
+
+	/* Save FS texture mipmap pointers */
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	*cmd++ =
+	    ((TEX_SIZE_MIPMAP / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
+		((HLSQ_SHADOW_BASE + HLSQ_MIPMAP_OFFSET + SSIZE) / 4);
+	*cmd++ =
+	    (drawctxt->gpustate.gpuaddr + FS_TEX_OFFSET_MIPMAP) & 0xfffffffc;
+
+	/* Save FS texture sampler objects */
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	*cmd++ =
+	    ((TEX_SIZE_SAMPLER_OBJ / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
+		((HLSQ_SHADOW_BASE + HLSQ_SAMPLER_OFFSET + SSIZE) / 4);
+	*cmd++ =
+	    (drawctxt->gpustate.gpuaddr +
+	     FS_TEX_OFFSET_SAMPLER_OBJ) & 0xfffffffc;
+
+	/* Create indirect buffer command for above command sequence */
+	create_ib1(drawctxt, drawctxt->regconstant_save, start, cmd);
+
+	tmp_ctx.cmd = cmd;
+}
+
+/* Copy GMEM contents to system memory shadow. */
+static unsigned int *build_gmem2sys_cmds(struct adreno_device *adreno_dev,
+					 struct adreno_context *drawctxt,
+					 struct gmem_shadow_t *shadow)
+{
+	unsigned int *cmds = tmp_ctx.cmd;
+	unsigned int *start = cmds;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+	*cmds++ = CP_REG(A3XX_RB_MODE_CONTROL);
+
+	/* RB_MODE_CONTROL */
+	*cmds++ = _SET(RB_MODECONTROL_RENDER_MODE, RB_RESOLVE_PASS) |
+		_SET(RB_MODECONTROL_MARB_CACHE_SPLIT_MODE, 1) |
+		_SET(RB_MODECONTROL_PACKER_TIMER_ENABLE, 1);
+	/* RB_RENDER_CONTROL */
+	*cmds++ = _SET(RB_RENDERCONTROL_BIN_WIDTH, shadow->width >> 5) |
+		_SET(RB_RENDERCONTROL_DISABLE_COLOR_PIPE, 1);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+	*cmds++ = CP_REG(A3XX_RB_COPY_CONTROL);
+	/* RB_COPY_CONTROL */
+	*cmds++ = _SET(RB_COPYCONTROL_RESOLVE_CLEAR_MODE,
+		RB_CLEAR_MODE_RESOLVE) |
+		_SET(RB_COPYCONTROL_COPY_GMEM_BASE,
+		tmp_ctx.gmem_base >> 14);
+	/* RB_COPY_DEST_BASE */
+	*cmds++ = _SET(RB_COPYDESTBASE_COPY_DEST_BASE,
+		shadow->gmemshadow.gpuaddr >> 5);
+	/* RB_COPY_DEST_PITCH */
+	*cmds++ = _SET(RB_COPYDESTPITCH_COPY_DEST_PITCH,
+		(shadow->pitch * 4) / 32);
+	/* RB_COPY_DEST_INFO */
+	*cmds++ = _SET(RB_COPYDESTINFO_COPY_DEST_TILE,
+		RB_TILINGMODE_LINEAR) |
+		_SET(RB_COPYDESTINFO_COPY_DEST_FORMAT, RB_R8G8B8A8_UNORM) |
+		_SET(RB_COPYDESTINFO_COPY_COMPONENT_ENABLE, 0X0F) |
+		_SET(RB_COPYDESTINFO_COPY_DEST_ENDIAN, RB_ENDIAN_NONE);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_GRAS_SC_CONTROL);
+	/* GRAS_SC_CONTROL */
+	*cmds++ = _SET(GRAS_SC_CONTROL_RENDER_MODE, 2);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+	*cmds++ = CP_REG(A3XX_VFD_CONTROL_0);
+	/* VFD_CONTROL_0 */
+	*cmds++ = _SET(VFD_CTRLREG0_TOTALATTRTOVS, 4) |
+		_SET(VFD_CTRLREG0_PACKETSIZE, 2) |
+		_SET(VFD_CTRLREG0_STRMDECINSTRCNT, 1) |
+		_SET(VFD_CTRLREG0_STRMFETCHINSTRCNT, 1);
+	/* VFD_CONTROL_1 */
+	*cmds++ = _SET(VFD_CTRLREG1_MAXSTORAGE, 1) |
+		_SET(VFD_CTRLREG1_REGID4VTX,  252) |
+		_SET(VFD_CTRLREG1_REGID4INST,  252);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+	*cmds++ = CP_REG(A3XX_VFD_FETCH_INSTR_0_0);
+	/* VFD_FETCH_INSTR_0_0 */
+	*cmds++ = _SET(VFD_FETCHINSTRUCTIONS_FETCHSIZE, 11) |
+		_SET(VFD_FETCHINSTRUCTIONS_BUFSTRIDE, 12) |
+		_SET(VFD_FETCHINSTRUCTIONS_STEPRATE, 1);
+	/* VFD_FETCH_INSTR_1_0 */
+	*cmds++ = _SET(VFD_BASEADDR_BASEADDR,
+		shadow->quad_vertices.gpuaddr);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_VFD_DECODE_INSTR_0);
+	/* VFD_DECODE_INSTR_0 */
+	*cmds++ = _SET(VFD_DECODEINSTRUCTIONS_WRITEMASK, 0x0F) |
+		_SET(VFD_DECODEINSTRUCTIONS_CONSTFILL, 1) |
+		_SET(VFD_DECODEINSTRUCTIONS_FORMAT, 2) |
+		_SET(VFD_DECODEINSTRUCTIONS_REGID, 5) |
+		_SET(VFD_DECODEINSTRUCTIONS_SHIFTCNT, 12) |
+		_SET(VFD_DECODEINSTRUCTIONS_LASTCOMPVALID, 1);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+	*cmds++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG);
+	/* HLSQ_CONTROL_0_REG */
+	*cmds++ = _SET(HLSQ_CTRL0REG_FSTHREADSIZE, HLSQ_TWO_PIX_QUADS) |
+		_SET(HLSQ_CTRL0REG_FSSUPERTHREADENABLE, 1) |
+		_SET(HLSQ_CTRL0REG_SPSHADERRESTART, 1) |
+		_SET(HLSQ_CTRL0REG_RESERVED2, 1) |
+		_SET(HLSQ_CTRL0REG_CHUNKDISABLE, 1) |
+		_SET(HLSQ_CTRL0REG_CONSTSWITCHMODE, 1) |
+		_SET(HLSQ_CTRL0REG_LAZYUPDATEDISABLE, 1) |
+		_SET(HLSQ_CTRL0REG_SPCONSTFULLUPDATE, 1) |
+		_SET(HLSQ_CTRL0REG_TPFULLUPDATE, 1);
+	/* HLSQ_CONTROL_1_REG */
+	*cmds++ = _SET(HLSQ_CTRL1REG_VSTHREADSIZE, HLSQ_TWO_VTX_QUADS) |
+		_SET(HLSQ_CTRL1REG_VSSUPERTHREADENABLE, 1) |
+		_SET(HLSQ_CTRL1REG_RESERVED1, 4);
+	/* HLSQ_CONTROL_2_REG */
+	*cmds++ = _SET(HLSQ_CTRL2REG_PRIMALLOCTHRESHOLD, 31);
+	/* HLSQ_CONTROL_3_REG */
+	*cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+	*cmds++ = CP_REG(A3XX_HLSQ_VS_CONTROL_REG);
+	/* HLSQ_VS_CONTROL_REG */
+	*cmds++ = _SET(HLSQ_VSCTRLREG_VSINSTRLENGTH, 1);
+	/* HLSQ_FS_CONTROL_REG */
+	*cmds++ = _SET(HLSQ_FSCTRLREG_FSCONSTLENGTH, 1) |
+		_SET(HLSQ_FSCTRLREG_FSCONSTSTARTOFFSET, 272) |
+		_SET(HLSQ_FSCTRLREG_FSINSTRLENGTH, 1);
+	/* HLSQ_CONST_VSPRESV_RANGE_REG */
+	*cmds++ = 0x00000000;
+	/* HLSQ_CONST_FSPRESV_RANGE_REQ */
+	*cmds++ = _SET(HLSQ_CONSTFSPRESERVEDRANGEREG_STARTENTRY, 32) |
+		_SET(HLSQ_CONSTFSPRESERVEDRANGEREG_ENDENTRY, 32);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_SP_FS_LENGTH_REG);
+	/* SP_FS_LENGTH_REG */
+	*cmds++ = _SET(SP_SHADERLENGTH_LEN, 1);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_SP_SP_CTRL_REG);
+	/* SP_SP_CTRL_REG */
+	*cmds++ = _SET(SP_SPCTRLREG_CONSTMODE, 1) |
+		_SET(SP_SPCTRLREG_SLEEPMODE, 1);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 12);
+	*cmds++ = CP_REG(A3XX_SP_VS_CTRL_REG0);
+	/* SP_VS_CTRL_REG0 */
+	*cmds++ = _SET(SP_VSCTRLREG0_VSTHREADMODE, SP_MULTI) |
+		_SET(SP_VSCTRLREG0_VSINSTRBUFFERMODE, SP_BUFFER_MODE) |
+		_SET(SP_VSCTRLREG0_VSICACHEINVALID, 1) |
+		_SET(SP_VSCTRLREG0_VSFULLREGFOOTPRINT, 3) |
+		_SET(SP_VSCTRLREG0_VSTHREADSIZE, SP_TWO_VTX_QUADS) |
+		_SET(SP_VSCTRLREG0_VSSUPERTHREADMODE, 1) |
+		_SET(SP_VSCTRLREG0_VSLENGTH, 1);
+	/* SP_VS_CTRL_REG1 */
+	*cmds++ = _SET(SP_VSCTRLREG1_VSINITIALOUTSTANDING, 4);
+	/* SP_VS_PARAM_REG */
+	*cmds++ = _SET(SP_VSPARAMREG_POSREGID, 1) |
+		_SET(SP_VSPARAMREG_PSIZEREGID, 252);
+	/* SP_VS_OUT_REG_0 */
+	*cmds++ = 0x00000000;
+	/* SP_VS_OUT_REG_1 */
+	*cmds++ = 0x00000000;
+	/* SP_VS_OUT_REG_2 */
+	*cmds++ = 0x00000000;
+	/* SP_VS_OUT_REG_3 */
+	*cmds++ = 0x00000000;
+	/* SP_VS_OUT_REG_4 */
+	*cmds++ = 0x00000000;
+	/* SP_VS_OUT_REG_5 */
+	*cmds++ = 0x00000000;
+	/* SP_VS_OUT_REG_6 */
+	*cmds++ = 0x00000000;
+	/* SP_VS_OUT_REG_7 */
+	*cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 7);
+	*cmds++ = CP_REG(A3XX_SP_VS_VPC_DST_REG_0);
+	/* SP_VS_VPC_DST_REG_0 */
+	*cmds++ = 0x00000000;
+	/* SP_VS_VPC_DST_REG_1 */
+	*cmds++ = 0x00000000;
+	/* SP_VS_VPC_DST_REG_2 */
+	*cmds++ = 0x00000000;
+	/* SP_VS_VPC_DST_REG_3 */
+	*cmds++ = 0x00000000;
+	/* SP_VS_OBJ_OFFSET_REG */
+	*cmds++ = 0x00000000;
+	/* SP_VS_OBJ_START_REG */
+	*cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 6);
+	*cmds++ = CP_REG(A3XX_SP_VS_LENGTH_REG);
+	/* SP_VS_LENGTH_REG */
+	*cmds++ = _SET(SP_SHADERLENGTH_LEN, 1);
+	/* SP_FS_CTRL_REG0 */
+	*cmds++ = _SET(SP_FSCTRLREG0_FSTHREADMODE, SP_MULTI) |
+		_SET(SP_FSCTRLREG0_FSINSTRBUFFERMODE, SP_BUFFER_MODE) |
+		_SET(SP_FSCTRLREG0_FSICACHEINVALID, 1) |
+		_SET(SP_FSCTRLREG0_FSFULLREGFOOTPRINT, 2) |
+		_SET(SP_FSCTRLREG0_FSINOUTREGOVERLAP, 1) |
+		_SET(SP_FSCTRLREG0_FSTHREADSIZE, SP_TWO_VTX_QUADS) |
+		_SET(SP_FSCTRLREG0_FSSUPERTHREADMODE, 1) |
+		_SET(SP_FSCTRLREG0_FSLENGTH, 1);
+	/* SP_FS_CTRL_REG1 */
+	*cmds++ = _SET(SP_FSCTRLREG1_FSCONSTLENGTH, 1) |
+		_SET(SP_FSCTRLREG1_FSINITIALOUTSTANDING, 2) |
+		_SET(SP_FSCTRLREG1_HALFPRECVAROFFSET, 63);
+	/* SP_FS_OBJ_OFFSET_REG */
+	*cmds++ = _SET(SP_OBJOFFSETREG_CONSTOBJECTSTARTOFFSET, 272) |
+		_SET(SP_OBJOFFSETREG_SHADEROBJOFFSETINIC, 1);
+	/* SP_FS_OBJ_START_REG */
+	*cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+	*cmds++ = CP_REG(A3XX_SP_FS_FLAT_SHAD_MODE_REG_0);
+	/* SP_FS_FLAT_SHAD_MODE_REG_0 */
+	*cmds++ = 0x00000000;
+	/* SP_FS_FLAT_SHAD_MODE_REG_1 */
+	*cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_SP_FS_OUTPUT_REG);
+	/* SP_FS_OUTPUT_REG */
+	*cmds++ = _SET(SP_IMAGEOUTPUTREG_PAD0, SP_PIXEL_BASED);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+	*cmds++ = CP_REG(A3XX_SP_FS_MRT_REG_0);
+	/* SP_FS_MRT_REG_0 */
+	*cmds++ = _SET(SP_FSMRTREG_REGID, 1);
+	/* SP_FS_MRT_REG_1 */
+	*cmds++ = 0x00000000;
+	/* SP_FS_MRT_REG_2 */
+	*cmds++ = 0x00000000;
+	/* SP_FS_MRT_REG_3 */
+	*cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 11);
+	*cmds++ = CP_REG(A3XX_VPC_ATTR);
+	/* VPC_ATTR */
+	*cmds++ = _SET(VPC_VPCATTR_THRHDASSIGN, 1) |
+		_SET(VPC_VPCATTR_LMSIZE, 1);
+	/* VPC_PACK */
+	*cmds++ = 0x00000000;
+	/* VPC_VARRYING_INTERUPT_MODE_0 */
+	*cmds++ = 0x00000000;
+	/* VPC_VARRYING_INTERUPT_MODE_1 */
+	*cmds++ = 0x00000000;
+	/* VPC_VARRYING_INTERUPT_MODE_2 */
+	*cmds++ = 0x00000000;
+	/* VPC_VARRYING_INTERUPT_MODE_3 */
+	*cmds++ = 0x00000000;
+	/* VPC_VARYING_PS_REPL_MODE_0 */
+	*cmds++ = 0x00000000;
+	/* VPC_VARYING_PS_REPL_MODE_1 */
+	*cmds++ = 0x00000000;
+	/* VPC_VARYING_PS_REPL_MODE_2 */
+	*cmds++ = 0x00000000;
+	/* VPC_VARYING_PS_REPL_MODE_3 */
+	*cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_LOAD_STATE, 10);
+	*cmds++ = (0 << CP_LOADSTATE_DSTOFFSET_SHIFT)
+		| (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
+		| (HLSQ_BLOCK_ID_SP_VS << CP_LOADSTATE_STATEBLOCKID_SHIFT)
+		| (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+	*cmds++ = (HLSQ_SP_VS_INSTR << CP_LOADSTATE_STATETYPE_SHIFT)
+		| (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
+
+	/* (sy)(rpt3)mov.f32f32 r0.y, (r)r1.y; */
+	*cmds++ = 0x00000005; *cmds++ = 0x30044b01;
+	/* end; */
+	*cmds++ = 0x00000000; *cmds++ = 0x03000000;
+	/* nop; */
+	*cmds++ = 0x00000000; *cmds++ = 0x00000000;
+	/* nop; */
+	*cmds++ = 0x00000000; *cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_LOAD_STATE, 10);
+	*cmds++ = (0 << CP_LOADSTATE_DSTOFFSET_SHIFT)
+		| (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
+		| (HLSQ_BLOCK_ID_SP_FS << CP_LOADSTATE_STATEBLOCKID_SHIFT)
+		| (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+	*cmds++ = (HLSQ_SP_FS_INSTR << CP_LOADSTATE_STATETYPE_SHIFT)
+		| (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
+
+	/* (sy)(rpt3)mov.f32f32 r0.y, (r)c0.x; */
+	*cmds++ = 0x00000000; *cmds++ = 0x30244b01;
+	/* end; */
+	*cmds++ = 0x00000000; *cmds++ = 0x03000000;
+	/* nop; */
+	*cmds++ = 0x00000000; *cmds++ = 0x00000000;
+	/* nop; */
+	*cmds++ = 0x00000000; *cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_RB_MSAA_CONTROL);
+	/* RB_MSAA_CONTROL */
+	*cmds++ = _SET(RB_MSAACONTROL_MSAA_DISABLE, 1) |
+		_SET(RB_MSAACONTROL_SAMPLE_MASK, 0xFFFF);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_RB_DEPTH_CONTROL);
+	/* RB_DEPTH_CONTROL */
+	*cmds++ = _SET(RB_DEPTHCONTROL_Z_TEST_FUNC, RB_FRAG_NEVER);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_RB_MRT_CONTROL0);
+	/* RB_MRT_CONTROL0 */
+	*cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
+		_SET(RB_MRTCONTROL_ROP_CODE, 12) |
+		_SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_ALWAYS) |
+		_SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+	*cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL0);
+	/* RB_MRT_BLEND_CONTROL0 */
+	*cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+		_SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+		_SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+		_SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+		_SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+		_SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+	/* RB_MRT_CONTROL1 */
+	*cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
+		_SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) |
+		_SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+	*cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL1);
+	/* RB_MRT_BLEND_CONTROL1 */
+	*cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+		_SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+		_SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+		_SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+		_SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+		_SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+	/* RB_MRT_CONTROL2 */
+	*cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
+		_SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) |
+		_SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+	*cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL2);
+	/* RB_MRT_BLEND_CONTROL2 */
+	*cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+		_SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+		_SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+		_SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+		_SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+		_SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+	/* RB_MRT_CONTROL3 */
+	*cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
+		_SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) |
+		_SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL3);
+	/* RB_MRT_BLEND_CONTROL3 */
+	*cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+		_SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+		_SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+		_SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+		_SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+		_SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+	*cmds++ = CP_REG(A3XX_VFD_INDEX_MIN);
+	/* VFD_INDEX_MIN */
+	*cmds++ = 0x00000000;
+	/* VFD_INDEX_MAX */
+	*cmds++ = 0xFFFFFFFF;
+	/* VFD_INSTANCEID_OFFSET */
+	*cmds++ = 0x00000000;
+	/* VFD_INDEX_OFFSET */
+	*cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_VFD_VS_THREADING_THRESHOLD);
+	/* VFD_VS_THREADING_THRESHOLD */
+	*cmds++ = _SET(VFD_THREADINGTHRESHOLD_RESERVED6, 12) |
+		_SET(VFD_THREADINGTHRESHOLD_REGID_VTXCNT, 252);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_TPL1_TP_VS_TEX_OFFSET);
+	/* TPL1_TP_VS_TEX_OFFSET */
+	*cmds++ = 0;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_TPL1_TP_FS_TEX_OFFSET);
+	/* TPL1_TP_FS_TEX_OFFSET */
+	*cmds++ = _SET(TPL1_TPTEXOFFSETREG_SAMPLEROFFSET, 16) |
+		_SET(TPL1_TPTEXOFFSETREG_MEMOBJOFFSET, 16) |
+		_SET(TPL1_TPTEXOFFSETREG_BASETABLEPTR, 224);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_PC_PRIM_VTX_CNTL);
+	/* PC_PRIM_VTX_CNTL */
+	*cmds++ = _SET(PC_PRIM_VTX_CONTROL_POLYMODE_FRONT_PTYPE,
+		PC_DRAW_TRIANGLES) |
+		_SET(PC_PRIM_VTX_CONTROL_POLYMODE_BACK_PTYPE,
+		PC_DRAW_TRIANGLES) |
+		_SET(PC_PRIM_VTX_CONTROL_PROVOKING_VTX_LAST, 1);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+	*cmds++ = CP_REG(A3XX_GRAS_SC_WINDOW_SCISSOR_TL);
+	/* GRAS_SC_WINDOW_SCISSOR_TL */
+	*cmds++ = 0x00000000;
+	/* GRAS_SC_WINDOW_SCISSOR_BR */
+	*cmds++ = _SET(GRAS_SC_WINDOW_SCISSOR_BR_BR_X, shadow->width - 1) |
+		_SET(GRAS_SC_WINDOW_SCISSOR_BR_BR_Y, shadow->height - 1);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+	*cmds++ = CP_REG(A3XX_GRAS_SC_SCREEN_SCISSOR_TL);
+	/* GRAS_SC_SCREEN_SCISSOR_TL */
+	*cmds++ = 0x00000000;
+	/* GRAS_SC_SCREEN_SCISSOR_BR */
+	*cmds++ = _SET(GRAS_SC_SCREEN_SCISSOR_BR_BR_X, shadow->width - 1) |
+		_SET(GRAS_SC_SCREEN_SCISSOR_BR_BR_Y, shadow->height - 1);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+	*cmds++ = CP_REG(A3XX_GRAS_CL_VPORT_XOFFSET);
+	/* GRAS_CL_VPORT_XOFFSET */
+	*cmds++ = 0x00000000;
+	/* GRAS_CL_VPORT_XSCALE */
+	*cmds++ = _SET(GRAS_CL_VPORT_XSCALE_VPORT_XSCALE, 0x3f800000);
+	/* GRAS_CL_VPORT_YOFFSET */
+	*cmds++ = 0x00000000;
+	/* GRAS_CL_VPORT_YSCALE */
+	*cmds++ = _SET(GRAS_CL_VPORT_YSCALE_VPORT_YSCALE, 0x3f800000);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+	*cmds++ = CP_REG(A3XX_GRAS_CL_VPORT_ZOFFSET);
+	/* GRAS_CL_VPORT_ZOFFSET */
+	*cmds++ = 0x00000000;
+	/* GRAS_CL_VPORT_ZSCALE */
+	*cmds++ = _SET(GRAS_CL_VPORT_ZSCALE_VPORT_ZSCALE, 0x3f800000);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_GRAS_CL_CLIP_CNTL);
+	/* GRAS_CL_CLIP_CNTL */
+	*cmds++ = _SET(GRAS_CL_CLIP_CNTL_CLIP_DISABLE, 1) |
+		_SET(GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE, 1) |
+		_SET(GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE, 1) |
+		_SET(GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE, 1) |
+		_SET(GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE, 1);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_GRAS_CL_GB_CLIP_ADJ);
+	/* GRAS_CL_GB_CLIP_ADJ */
+	*cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+	*cmds++ = 0x00000000;
+
+	/*
+	 * Resolve using two draw calls with a dummy register
+	 * write in between. This is a HLM workaround
+	 * that should be removed later.
+	 */
+	*cmds++ = cp_type3_packet(CP_DRAW_INDX_2, 6);
+	*cmds++ = 0x00000000; /* Viz query info */
+	*cmds++ = BUILD_PC_DRAW_INITIATOR(PC_DI_PT_TRILIST,
+					  PC_DI_SRC_SEL_IMMEDIATE,
+					  PC_DI_INDEX_SIZE_32_BIT,
+					  PC_DI_IGNORE_VISIBILITY);
+	*cmds++ = 0x00000003; /* Num indices */
+	*cmds++ = 0x00000000; /* Index 0 */
+	*cmds++ = 0x00000001; /* Index 1 */
+	*cmds++ = 0x00000002; /* Index 2 */
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_HLSQ_CL_CONTROL_0_REG);
+	*cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_DRAW_INDX_2, 6);
+	*cmds++ = 0x00000000; /* Viz query info */
+	*cmds++ = BUILD_PC_DRAW_INITIATOR(PC_DI_PT_TRILIST,
+					  PC_DI_SRC_SEL_IMMEDIATE,
+					  PC_DI_INDEX_SIZE_32_BIT,
+					  PC_DI_IGNORE_VISIBILITY);
+	*cmds++ = 0x00000003; /* Num indices */
+	*cmds++ = 0x00000002; /* Index 0 */
+	*cmds++ = 0x00000001; /* Index 1 */
+	*cmds++ = 0x00000003; /* Index 2 */
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_HLSQ_CL_CONTROL_0_REG);
+	*cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+	*cmds++ = 0x00000000;
+
+	/* Create indirect buffer command for above command sequence */
+	create_ib1(drawctxt, shadow->gmem_save, start, cmds);
+
+	return cmds;
+}
+
+static void build_shader_save_cmds(struct adreno_device *adreno_dev,
+				   struct adreno_context *drawctxt)
+{
+	unsigned int *cmd = tmp_ctx.cmd;
+	unsigned int *start;
+
+	/* Reserve space for boolean values used for COND_EXEC packet */
+	drawctxt->cond_execs[0].hostptr = cmd;
+	drawctxt->cond_execs[0].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
+	*cmd++ = 0;
+	drawctxt->cond_execs[1].hostptr = cmd;
+	drawctxt->cond_execs[1].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
+	*cmd++ = 0;
+
+	drawctxt->shader_save_commands[0].hostptr = cmd;
+	drawctxt->shader_save_commands[0].gpuaddr =
+	    virt2gpu(cmd, &drawctxt->gpustate);
+	*cmd++ = 0;
+	drawctxt->shader_save_commands[1].hostptr = cmd;
+	drawctxt->shader_save_commands[1].gpuaddr =
+	    virt2gpu(cmd, &drawctxt->gpustate);
+	*cmd++ = 0;
+
+	start = cmd;
+
+	/* Save vertex shader */
+
+	*cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+	*cmd++ = drawctxt->cond_execs[0].gpuaddr >> 2;
+	*cmd++ = drawctxt->cond_execs[0].gpuaddr >> 2;
+	*cmd++ = 0x0000FFFF;
+	*cmd++ = 3;		/* EXEC_COUNT */
+
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	drawctxt->shader_save_commands[2].hostptr = cmd;
+	drawctxt->shader_save_commands[2].gpuaddr =
+	    virt2gpu(cmd, &drawctxt->gpustate);
+	/*
+	   From fixup:
+
+	   dwords = SP_VS_CTRL_REG0.VS_LENGTH * 8
+
+	   From regspec:
+	   SP_VS_CTRL_REG0.VS_LENGTH [31:24]: VS length, unit = 256bits.
+	   If bit31 is 1, it means overflow
+	   or any long shader.
+
+	   src = (HLSQ_SHADOW_BASE + 0x1000)/4
+	 */
+	*cmd++ = 0;	/*(dwords << REG_TO_MEM_LOOP_COUNT_SHIFT) | src */
+	*cmd++ = (drawctxt->gpustate.gpuaddr + SHADER_OFFSET) & 0xfffffffc;
+
+	/* Save fragment shader */
+	*cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+	*cmd++ = drawctxt->cond_execs[1].gpuaddr >> 2;
+	*cmd++ = drawctxt->cond_execs[1].gpuaddr >> 2;
+	*cmd++ = 0x0000FFFF;
+	*cmd++ = 3;		/* EXEC_COUNT */
+
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	drawctxt->shader_save_commands[3].hostptr = cmd;
+	drawctxt->shader_save_commands[3].gpuaddr =
+	    virt2gpu(cmd, &drawctxt->gpustate);
+	/*
+	   From fixup:
+
+	   dwords = SP_FS_CTRL_REG0.FS_LENGTH * 8
+
+	   From regspec:
+	   SP_FS_CTRL_REG0.FS_LENGTH [31:24]: FS length, unit = 256bits.
+	   If bit31 is 1, it means overflow
+	   or any long shader.
+
+	   fs_offset = SP_FS_OBJ_OFFSET_REG.SHADEROBJOFFSETINIC * 32
+	   From regspec:
+
+	   SP_FS_OBJ_OFFSET_REG.SHADEROBJOFFSETINIC [31:25]:
+	   First instruction of the whole shader will be stored from
+	   the offset in instruction cache, unit = 256bits, a cache line.
+	   It can start from 0 if no VS available.
+
+	   src = (HLSQ_SHADOW_BASE + 0x1000 + SSIZE + fs_offset)/4
+	 */
+	*cmd++ = 0;	/*(dwords << REG_TO_MEM_LOOP_COUNT_SHIFT) | src */
+	*cmd++ = (drawctxt->gpustate.gpuaddr + SHADER_OFFSET
+		  + (SHADER_SHADOW_SIZE / 2)) & 0xfffffffc;
+
+	/* Create indirect buffer command for above command sequence */
+	create_ib1(drawctxt, drawctxt->shader_save, start, cmd);
+
+	tmp_ctx.cmd = cmd;
+}
+
+/*
+ * Make an IB to modify context save IBs with the correct shader instruction
+ * and constant sizes and offsets.
+ */
+
+static void build_save_fixup_cmds(struct adreno_device *adreno_dev,
+				  struct adreno_context *drawctxt)
+{
+	unsigned int *cmd = tmp_ctx.cmd;
+	unsigned int *start = cmd;
+
+	/* Flush HLSQ lazy updates */
+	*cmd++ = cp_type3_packet(CP_EVENT_WRITE, 1);
+	*cmd++ = 0x7;		/* HLSQ_FLUSH */
+	*cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+	*cmd++ = 0;
+
+	*cmd++ = cp_type0_packet(A3XX_UCHE_CACHE_INVALIDATE0_REG, 2);
+	*cmd++ = 0x00000000; /* No start addr for full invalidate */
+	*cmd++ = (unsigned int)
+		UCHE_ENTIRE_CACHE << UCHE_INVALIDATE1REG_ALLORPORTION |
+		UCHE_OP_INVALIDATE << UCHE_INVALIDATE1REG_OPCODE |
+		0; /* No end addr for full invalidate */
+
+	/* Make sure registers are flushed */
+	*cmd++ = cp_type3_packet(CP_CONTEXT_UPDATE, 1);
+	*cmd++ = 0;
+
+#ifdef GSL_CONTEXT_SWITCH_CPU_SYNC
+
+	/* Save shader sizes */
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	*cmd++ = A3XX_SP_VS_CTRL_REG0;
+	*cmd++ = drawctxt->shader_save_commands[2].gpuaddr;
+
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	*cmd++ = A3XX_SP_FS_CTRL_REG0;
+	*cmd++ = drawctxt->shader_save_commands[3].gpuaddr;
+
+	/* Save shader offsets */
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	*cmd++ = A3XX_SP_FS_OBJ_OFFSET_REG;
+	*cmd++ = drawctxt->shader_save_commands[1].gpuaddr;
+
+	/* Save constant sizes */
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	*cmd++ = A3XX_SP_VS_CTRL_REG1;
+	*cmd++ = drawctxt->constant_save_commands[1].gpuaddr;
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	*cmd++ = A3XX_SP_FS_CTRL_REG1;
+	*cmd++ = drawctxt->constant_save_commands[2].gpuaddr;
+
+	/* Save FS constant offset */
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	*cmd++ = A3XX_SP_FS_OBJ_OFFSET_REG;
+	*cmd++ = drawctxt->constant_save_commands[0].gpuaddr;
+
+
+	/* Save VS instruction store mode */
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	*cmd++ = A3XX_SP_VS_CTRL_REG0;
+	*cmd++ = drawctxt->cond_execs[0].gpuaddr;
+
+	/* Save FS instruction store mode */
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	*cmd++ = A3XX_SP_FS_CTRL_REG0;
+	*cmd++ = drawctxt->cond_execs[1].gpuaddr;
+#else
+
+	/* Shader save */
+	cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG0, 0x7f000000,
+			11+REG_TO_MEM_LOOP_COUNT_SHIFT,
+			(HLSQ_SHADOW_BASE + 0x1000) / 4,
+			drawctxt->shader_save_commands[2].gpuaddr);
+
+	/* CP_SCRATCH_REG2 = (CP_SCRATCH_REG2 & 0x00000000) | SP_FS_CTRL_REG0 */
+	*cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+	*cmd++ = (1 << 30) | A3XX_CP_SCRATCH_REG2;
+	*cmd++ = 0x00000000;	/* AND value */
+	*cmd++ = A3XX_SP_FS_CTRL_REG0;	/* OR address */
+	/* CP_SCRATCH_REG2 = ( (CP_SCRATCH_REG2 & 0x7f000000) >> 21 )
+	   |  ((HLSQ_SHADOW_BASE+0x1000+SSIZE)/4) */
+	*cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+	*cmd++ = ((11 + REG_TO_MEM_LOOP_COUNT_SHIFT) << 24) |
+		A3XX_CP_SCRATCH_REG2;
+	*cmd++ = 0x7f000000;	/* AND value */
+	*cmd++ = (HLSQ_SHADOW_BASE + 0x1000 + SSIZE) / 4;	/* OR value */
+
+	/*
+	 * CP_SCRATCH_REG3 = (CP_SCRATCH_REG3 & 0x00000000) |
+	 * SP_FS_OBJ_OFFSET_REG
+	 */
+
+	*cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+	*cmd++ = (1 << 30) | A3XX_CP_SCRATCH_REG3;
+	*cmd++ = 0x00000000;	/* AND value */
+	*cmd++ = A3XX_SP_FS_OBJ_OFFSET_REG;	/* OR address */
+	/*
+	 * CP_SCRATCH_REG3 = ( (CP_SCRATCH_REG3 & 0xfe000000) >> 25 ) |
+	 * 0x00000000
+	 */
+	*cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+	*cmd++ = A3XX_CP_SCRATCH_REG3;
+	*cmd++ = 0xfe000000;	/* AND value */
+	*cmd++ = 0x00000000;	/* OR value */
+	/*
+	 * CP_SCRATCH_REG2 =  (CP_SCRATCH_REG2 & 0xffffffff) | CP_SCRATCH_REG3
+	 */
+	*cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+	*cmd++ = (1 << 30) | A3XX_CP_SCRATCH_REG2;
+	*cmd++ = 0xffffffff;	/* AND value */
+	*cmd++ = A3XX_CP_SCRATCH_REG3;	/* OR address */
+
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	*cmd++ = A3XX_CP_SCRATCH_REG2;
+	*cmd++ = drawctxt->shader_save_commands[3].gpuaddr;
+
+	/* Constant save */
+	cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG1, 0x000003ff,
+			   17, (HLSQ_SHADOW_BASE + 0x2000) / 4,
+			   drawctxt->constant_save_commands[1].gpuaddr);
+
+	cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG1, 0x000003ff,
+			   17, (HLSQ_SHADOW_BASE + 0x2000 + SSIZE) / 4,
+			   drawctxt->constant_save_commands[2].gpuaddr);
+
+	cmd = rmw_regtomem(cmd, A3XX_SP_FS_OBJ_OFFSET_REG, 0x00ff0000,
+			   18, drawctxt->gpustate.gpuaddr & 0xfffffe00,
+			   drawctxt->constant_save_commands[2].gpuaddr
+			   + sizeof(unsigned int));
+
+	/* Modify constant save conditionals */
+	cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG1, 0x000003ff,
+		0, 0, drawctxt->cond_execs[2].gpuaddr);
+
+	cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG1, 0x000003ff,
+		0, 0, drawctxt->cond_execs[3].gpuaddr);
+
+	/* Save VS instruction store mode */
+
+	cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG0, 0x00000002,
+			   31, 0, drawctxt->cond_execs[0].gpuaddr);
+
+	/* Save FS instruction store mode */
+	cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG0, 0x00000002,
+			   31, 0, drawctxt->cond_execs[1].gpuaddr);
+
+#endif
+
+	create_ib1(drawctxt, drawctxt->save_fixup, start, cmd);
+
+	tmp_ctx.cmd = cmd;
+}
+
+/****************************************************************************/
+/* Functions to build context restore IBs                                   */
+/****************************************************************************/
+
+static unsigned int *build_sys2gmem_cmds(struct adreno_device *adreno_dev,
+					 struct adreno_context *drawctxt,
+					 struct gmem_shadow_t *shadow)
+{
+	unsigned int *cmds = tmp_ctx.cmd;
+	unsigned int *start = cmds;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+	*cmds++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG);
+	/* HLSQ_CONTROL_0_REG */
+	*cmds++ = _SET(HLSQ_CTRL0REG_FSTHREADSIZE, HLSQ_FOUR_PIX_QUADS) |
+		_SET(HLSQ_CTRL0REG_SPSHADERRESTART, 1) |
+		_SET(HLSQ_CTRL0REG_CHUNKDISABLE, 1) |
+		_SET(HLSQ_CTRL0REG_SPCONSTFULLUPDATE, 1) |
+		_SET(HLSQ_CTRL0REG_TPFULLUPDATE, 1);
+	/* HLSQ_CONTROL_1_REG */
+	*cmds++ = _SET(HLSQ_CTRL1REG_VSTHREADSIZE, HLSQ_TWO_VTX_QUADS);
+	/* HLSQ_CONTROL_2_REG */
+	*cmds++ = _SET(HLSQ_CTRL2REG_PRIMALLOCTHRESHOLD, 31);
+	/* HLSQ_CONTROL3_REG */
+	*cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+	*cmds++ = CP_REG(A3XX_RB_MRT_BUF_INFO0);
+	/* RB_MRT_BUF_INFO0 */
+	*cmds++ = _SET(RB_MRTBUFINFO_COLOR_FORMAT, RB_R8G8B8A8_UNORM) |
+		_SET(RB_MRTBUFINFO_COLOR_TILE_MODE, RB_TILINGMODE_32X32) |
+		_SET(RB_MRTBUFINFO_COLOR_BUF_PITCH,
+		(shadow->gmem_pitch * 4 * 8) / 256);
+	/* RB_MRT_BUF_BASE0 */
+	*cmds++ = _SET(RB_MRTBUFBASE_COLOR_BUF_BASE, tmp_ctx.gmem_base >> 5);
+
+	/* Texture samplers */
+	*cmds++ = cp_type3_packet(CP_LOAD_STATE, 4);
+	*cmds++ = (16 << CP_LOADSTATE_DSTOFFSET_SHIFT)
+		| (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
+		| (HLSQ_BLOCK_ID_TP_TEX << CP_LOADSTATE_STATEBLOCKID_SHIFT)
+		| (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+	*cmds++ = (HLSQ_TP_TEX_SAMPLERS << CP_LOADSTATE_STATETYPE_SHIFT)
+		| (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
+	*cmds++ = 0x00000240;
+	*cmds++ = 0x00000000;
+
+	/* Texture memobjs */
+	*cmds++ = cp_type3_packet(CP_LOAD_STATE, 6);
+	*cmds++ = (16 << CP_LOADSTATE_DSTOFFSET_SHIFT)
+		| (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
+		| (HLSQ_BLOCK_ID_TP_TEX << CP_LOADSTATE_STATEBLOCKID_SHIFT)
+		| (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+	*cmds++ = (HLSQ_TP_TEX_MEMOBJ << CP_LOADSTATE_STATETYPE_SHIFT)
+		| (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
+	*cmds++ = 0x4cc06880;
+	*cmds++ = shadow->height | (shadow->width << 14);
+	*cmds++ = (shadow->pitch*4*8) << 9;
+	*cmds++ = 0x00000000;
+
+	/* Mipmap bases */
+	*cmds++ = cp_type3_packet(CP_LOAD_STATE, 16);
+	*cmds++ = (224 << CP_LOADSTATE_DSTOFFSET_SHIFT)
+		| (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
+		| (HLSQ_BLOCK_ID_TP_MIPMAP << CP_LOADSTATE_STATEBLOCKID_SHIFT)
+		| (14 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+	*cmds++ = (HLSQ_TP_MIPMAP_BASE << CP_LOADSTATE_STATETYPE_SHIFT)
+		| (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
+	*cmds++ = shadow->gmemshadow.gpuaddr;
+	*cmds++ = 0x00000000;
+	*cmds++ = 0x00000000;
+	*cmds++ = 0x00000000;
+	*cmds++ = 0x00000000;
+	*cmds++ = 0x00000000;
+	*cmds++ = 0x00000000;
+	*cmds++ = 0x00000000;
+	*cmds++ = 0x00000000;
+	*cmds++ = 0x00000000;
+	*cmds++ = 0x00000000;
+	*cmds++ = 0x00000000;
+	*cmds++ = 0x00000000;
+	*cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+	*cmds++ = CP_REG(A3XX_HLSQ_VS_CONTROL_REG);
+	/* HLSQ_VS_CONTROL_REG */
+	*cmds++ = _SET(HLSQ_VSCTRLREG_VSINSTRLENGTH, 1);
+	/* HLSQ_FS_CONTROL_REG */
+	*cmds++ = _SET(HLSQ_FSCTRLREG_FSCONSTLENGTH, 1) |
+		_SET(HLSQ_FSCTRLREG_FSCONSTSTARTOFFSET, 128) |
+		_SET(HLSQ_FSCTRLREG_FSINSTRLENGTH, 2);
+	/* HLSQ_CONST_VSPRESV_RANGE_REG */
+	*cmds++ = 0x00000000;
+	/* HLSQ_CONST_FSPRESV_RANGE_REG */
+	*cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_SP_FS_LENGTH_REG);
+	/* SP_FS_LENGTH_REG */
+	*cmds++ = _SET(SP_SHADERLENGTH_LEN, 2);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 12);
+	*cmds++ = CP_REG(A3XX_SP_VS_CTRL_REG0);
+	/* SP_VS_CTRL_REG0 */
+	*cmds++ = _SET(SP_VSCTRLREG0_VSTHREADMODE, SP_MULTI) |
+		_SET(SP_VSCTRLREG0_VSINSTRBUFFERMODE, SP_BUFFER_MODE) |
+		_SET(SP_VSCTRLREG0_VSICACHEINVALID, 1) |
+		_SET(SP_VSCTRLREG0_VSFULLREGFOOTPRINT, 2) |
+		_SET(SP_VSCTRLREG0_VSTHREADSIZE, SP_TWO_VTX_QUADS) |
+		_SET(SP_VSCTRLREG0_VSLENGTH, 1);
+	/* SP_VS_CTRL_REG1 */
+	*cmds++ = _SET(SP_VSCTRLREG1_VSINITIALOUTSTANDING, 8);
+	/* SP_VS_PARAM_REG */
+	*cmds++ = _SET(SP_VSPARAMREG_POSREGID, 4) |
+		_SET(SP_VSPARAMREG_PSIZEREGID, 252) |
+		_SET(SP_VSPARAMREG_TOTALVSOUTVAR, 1);
+	/* SP_VS_OUT_REG0 */
+	*cmds++ = _SET(SP_VSOUTREG_COMPMASK0, 3);
+	/* SP_VS_OUT_REG1 */
+	*cmds++ = 0x00000000;
+	/* SP_VS_OUT_REG2 */
+	*cmds++ = 0x00000000;
+	/* SP_VS_OUT_REG3 */
+	*cmds++ = 0x00000000;
+	/* SP_VS_OUT_REG4 */
+	*cmds++ = 0x00000000;
+	/* SP_VS_OUT_REG5 */
+	*cmds++ = 0x00000000;
+	/* SP_VS_OUT_REG6 */
+	*cmds++ = 0x00000000;
+	/* SP_VS_OUT_REG7 */
+	*cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 7);
+	*cmds++ = CP_REG(A3XX_SP_VS_VPC_DST_REG_0);
+	/* SP_VS_VPC_DST_REG0 */
+	*cmds++ = _SET(SP_VSVPCDSTREG_OUTLOC0, 8);
+	/* SP_VS_VPC_DST_REG1 */
+	*cmds++ = 0x00000000;
+	/* SP_VS_VPC_DST_REG2 */
+	*cmds++ = 0x00000000;
+	/* SP_VS_VPC_DST_REG3 */
+	*cmds++ = 0x00000000;
+	/* SP_VS_OBJ_OFFSET_REG */
+	*cmds++ = 0x00000000;
+	/* SP_VS_OBJ_START_REG */
+	*cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 6);
+	*cmds++ = CP_REG(A3XX_SP_VS_LENGTH_REG);
+	/* SP_VS_LENGTH_REG */
+	*cmds++ = _SET(SP_SHADERLENGTH_LEN, 1);
+	/* SP_FS_CTRL_REG0 */
+	*cmds++ = _SET(SP_FSCTRLREG0_FSTHREADMODE, SP_MULTI) |
+		_SET(SP_FSCTRLREG0_FSINSTRBUFFERMODE, SP_BUFFER_MODE) |
+		_SET(SP_FSCTRLREG0_FSICACHEINVALID, 1) |
+		_SET(SP_FSCTRLREG0_FSFULLREGFOOTPRINT, 2) |
+		_SET(SP_FSCTRLREG0_FSINOUTREGOVERLAP, 1) |
+		_SET(SP_FSCTRLREG0_FSTHREADSIZE, SP_FOUR_PIX_QUADS) |
+		_SET(SP_FSCTRLREG0_PIXLODENABLE, 1) |
+		_SET(SP_FSCTRLREG0_FSLENGTH, 2);
+	/* SP_FS_CTRL_REG1 */
+	*cmds++ = _SET(SP_FSCTRLREG1_FSCONSTLENGTH, 1) |
+		_SET(SP_FSCTRLREG1_FSINITIALOUTSTANDING, 2) |
+		_SET(SP_FSCTRLREG1_HALFPRECVAROFFSET, 63);
+	/* SP_FS_OBJ_OFFSET_REG */
+	*cmds++ = _SET(SP_OBJOFFSETREG_CONSTOBJECTSTARTOFFSET, 128);
+	/* SP_FS_OBJ_START_REG */
+	*cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+	*cmds++ = CP_REG(A3XX_SP_FS_FLAT_SHAD_MODE_REG_0);
+	/* SP_FS_FLAT_SHAD_MODE_REG0 */
+	*cmds++ = 0x00000000;
+	/* SP_FS_FLAT_SHAD_MODE_REG1 */
+	*cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_SP_FS_OUTPUT_REG);
+	/* SP_FS_OUT_REG */
+	*cmds++ = _SET(SP_FSOUTREG_PAD0, SP_PIXEL_BASED);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_SP_FS_MRT_REG_0);
+	/* SP_FS_MRT_REG0 */
+	*cmds++ = _SET(SP_FSMRTREG_REGID, 4);
+	/* SP_FS_MRT_REG1 */
+	*cmds++ = 0;
+	/* SP_FS_MRT_REG2 */
+	*cmds++ = 0;
+	/* SP_FS_MRT_REG3 */
+	*cmds++ = 0;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 11);
+	*cmds++ = CP_REG(A3XX_VPC_ATTR);
+	/* VPC_ATTR */
+	*cmds++ = _SET(VPC_VPCATTR_TOTALATTR, 2) |
+		_SET(VPC_VPCATTR_THRHDASSIGN, 1) |
+		_SET(VPC_VPCATTR_LMSIZE, 1);
+	/* VPC_PACK */
+	*cmds++ = _SET(VPC_VPCPACK_NUMFPNONPOSVAR, 2) |
+		_SET(VPC_VPCPACK_NUMNONPOSVSVAR, 2);
+	/* VPC_VARYING_INTERP_MODE_0 */
+	*cmds++ = 0x00000000;
+	/* VPC_VARYING_INTERP_MODE1 */
+	*cmds++ = 0x00000000;
+	/* VPC_VARYING_INTERP_MODE2 */
+	*cmds++ = 0x00000000;
+	/* VPC_VARYING_IINTERP_MODE3 */
+	*cmds++ = 0x00000000;
+	/* VPC_VARRYING_PS_REPL_MODE_0 */
+	*cmds++ = _SET(VPC_VPCVARPSREPLMODE_COMPONENT08, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT09, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0A,	1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0B, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0C, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0D, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0E, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0F, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT10, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT11, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT12, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT13, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT14, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT15, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT16, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT17, 2);
+	/* VPC_VARRYING_PS_REPL_MODE_1 */
+	*cmds++ = _SET(VPC_VPCVARPSREPLMODE_COMPONENT08, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT09, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0A,	1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0B, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0C, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0D, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0E, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0F, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT10, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT11, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT12, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT13, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT14, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT15, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT16, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT17, 2);
+	/* VPC_VARRYING_PS_REPL_MODE_2 */
+	*cmds++ = _SET(VPC_VPCVARPSREPLMODE_COMPONENT08, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT09, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0A,	1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0B, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0C, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0D, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0E, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0F, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT10, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT11, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT12, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT13, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT14, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT15, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT16, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT17, 2);
+	/* VPC_VARRYING_PS_REPL_MODE_3 */
+	*cmds++ = _SET(VPC_VPCVARPSREPLMODE_COMPONENT08, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT09, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0A,	1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0B, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0C, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0D, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0E, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT0F, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT10, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT11, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT12, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT13, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT14, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT15, 2) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT16, 1) |
+		_SET(VPC_VPCVARPSREPLMODE_COMPONENT17, 2);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 11);
+	*cmds++ = CP_REG(A3XX_SP_SP_CTRL_REG);
+	/* SP_SP_CTRL_REG */
+	*cmds++ = _SET(SP_SPCTRLREG_SLEEPMODE, 1);
+
+	/* Load vertex shader */
+	*cmds++ = cp_type3_packet(CP_LOAD_STATE, 10);
+	*cmds++ = (0 << CP_LOADSTATE_DSTOFFSET_SHIFT)
+		| (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
+		| (HLSQ_BLOCK_ID_SP_VS << CP_LOADSTATE_STATEBLOCKID_SHIFT)
+		| (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+	*cmds++ = (HLSQ_SP_VS_INSTR << CP_LOADSTATE_STATETYPE_SHIFT)
+		| (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
+	/* (sy)end; */
+	*cmds++ = 0x00000000; *cmds++ = 0x13000000;
+	/* nop; */
+	*cmds++ = 0x00000000; *cmds++ = 0x00000000;
+	/* nop; */
+	*cmds++ = 0x00000000; *cmds++ = 0x00000000;
+	/* nop; */
+	*cmds++ = 0x00000000; *cmds++ = 0x00000000;
+
+	/* Load fragment shader */
+	*cmds++ = cp_type3_packet(CP_LOAD_STATE, 18);
+	*cmds++ = (0 << CP_LOADSTATE_DSTOFFSET_SHIFT)
+		| (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
+		| (HLSQ_BLOCK_ID_SP_FS << CP_LOADSTATE_STATEBLOCKID_SHIFT)
+		| (2 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+	*cmds++ = (HLSQ_SP_FS_INSTR << CP_LOADSTATE_STATETYPE_SHIFT)
+		| (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
+	/* (sy)(rpt1)bary.f (ei)r0.z, (r)0, r0.x; */
+	*cmds++ = 0x00002000; *cmds++ = 0x57368902;
+	/* (rpt5)nop; */
+	*cmds++ = 0x00000000; *cmds++ = 0x00000500;
+	/* sam (f32)r0.xyzw, r0.z, s#0, t#0; */
+	*cmds++ = 0x00000005; *cmds++ = 0xa0c01f00;
+	/* (sy)mov.f32f32 r1.x, r0.x; */
+	*cmds++ = 0x00000000; *cmds++ = 0x30044004;
+	/* mov.f32f32 r1.y, r0.y; */
+	*cmds++ = 0x00000001; *cmds++ = 0x20044005;
+	/* mov.f32f32 r1.z, r0.z; */
+	*cmds++ = 0x00000002; *cmds++ = 0x20044006;
+	/* mov.f32f32 r1.w, r0.w; */
+	*cmds++ = 0x00000003; *cmds++ = 0x20044007;
+	/* end; */
+	*cmds++ = 0x00000000; *cmds++ = 0x03000000;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+	*cmds++ = CP_REG(A3XX_VFD_CONTROL_0);
+	/* VFD_CONTROL_0 */
+	*cmds++ = _SET(VFD_CTRLREG0_TOTALATTRTOVS, 8) |
+		_SET(VFD_CTRLREG0_PACKETSIZE, 2) |
+		_SET(VFD_CTRLREG0_STRMDECINSTRCNT, 2) |
+		_SET(VFD_CTRLREG0_STRMFETCHINSTRCNT, 2);
+	/* VFD_CONTROL_1 */
+	*cmds++ =  _SET(VFD_CTRLREG1_MAXSTORAGE, 2) |
+		_SET(VFD_CTRLREG1_REGID4VTX, 252) |
+		_SET(VFD_CTRLREG1_REGID4INST, 252);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+	*cmds++ = CP_REG(A3XX_VFD_FETCH_INSTR_0_0);
+	/* VFD_FETCH_INSTR_0_0 */
+	*cmds++ = _SET(VFD_FETCHINSTRUCTIONS_FETCHSIZE, 7) |
+		_SET(VFD_FETCHINSTRUCTIONS_BUFSTRIDE, 8) |
+		_SET(VFD_FETCHINSTRUCTIONS_SWITCHNEXT, 1) |
+		_SET(VFD_FETCHINSTRUCTIONS_STEPRATE, 1);
+	/* VFD_FETCH_INSTR_1_0 */
+	*cmds++ = _SET(VFD_BASEADDR_BASEADDR,
+		shadow->quad_vertices_restore.gpuaddr);
+	/* VFD_FETCH_INSTR_0_1 */
+	*cmds++ = _SET(VFD_FETCHINSTRUCTIONS_FETCHSIZE, 11) |
+		_SET(VFD_FETCHINSTRUCTIONS_BUFSTRIDE, 12) |
+		_SET(VFD_FETCHINSTRUCTIONS_INDEXDECODE, 1) |
+		_SET(VFD_FETCHINSTRUCTIONS_STEPRATE, 1);
+	/* VFD_FETCH_INSTR_1_1 */
+	*cmds++ = _SET(VFD_BASEADDR_BASEADDR,
+		shadow->quad_vertices_restore.gpuaddr + 16);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+	*cmds++ = CP_REG(A3XX_VFD_DECODE_INSTR_0);
+	/* VFD_DECODE_INSTR_0 */
+	*cmds++ = _SET(VFD_DECODEINSTRUCTIONS_WRITEMASK, 0x0F) |
+		_SET(VFD_DECODEINSTRUCTIONS_CONSTFILL, 1) |
+		_SET(VFD_DECODEINSTRUCTIONS_FORMAT, 1) |
+		_SET(VFD_DECODEINSTRUCTIONS_SHIFTCNT, 8) |
+		_SET(VFD_DECODEINSTRUCTIONS_LASTCOMPVALID, 1) |
+		_SET(VFD_DECODEINSTRUCTIONS_SWITCHNEXT, 1);
+	/* VFD_DECODE_INSTR_1 */
+	*cmds++ = _SET(VFD_DECODEINSTRUCTIONS_WRITEMASK, 0x0F) |
+		_SET(VFD_DECODEINSTRUCTIONS_CONSTFILL, 1) |
+		_SET(VFD_DECODEINSTRUCTIONS_FORMAT, 2) |
+		_SET(VFD_DECODEINSTRUCTIONS_REGID, 4) |
+		_SET(VFD_DECODEINSTRUCTIONS_SHIFTCNT, 12) |
+		_SET(VFD_DECODEINSTRUCTIONS_LASTCOMPVALID, 1);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_RB_DEPTH_CONTROL);
+	/* RB_DEPTH_CONTROL */
+	*cmds++ = _SET(RB_DEPTHCONTROL_Z_TEST_FUNC, RB_FRAG_NEVER);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_RB_STENCIL_CONTROL);
+	/* RB_STENCIL_CONTROL */
+	*cmds++ = _SET(RB_STENCILCONTROL_STENCIL_FUNC, RB_REF_NEVER) |
+		_SET(RB_STENCILCONTROL_STENCIL_FAIL, RB_STENCIL_KEEP) |
+		_SET(RB_STENCILCONTROL_STENCIL_ZPASS, RB_STENCIL_KEEP) |
+		_SET(RB_STENCILCONTROL_STENCIL_ZFAIL, RB_STENCIL_KEEP) |
+		_SET(RB_STENCILCONTROL_STENCIL_FUNC_BF, RB_REF_NEVER) |
+		_SET(RB_STENCILCONTROL_STENCIL_FAIL_BF, RB_STENCIL_KEEP) |
+		_SET(RB_STENCILCONTROL_STENCIL_ZPASS_BF, RB_STENCIL_KEEP) |
+		_SET(RB_STENCILCONTROL_STENCIL_ZFAIL_BF, RB_STENCIL_KEEP);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_RB_MODE_CONTROL);
+	/* RB_MODE_CONTROL */
+	*cmds++ = _SET(RB_MODECONTROL_RENDER_MODE, RB_RENDERING_PASS) |
+		_SET(RB_MODECONTROL_MARB_CACHE_SPLIT_MODE, 1);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_RB_RENDER_CONTROL);
+	/* RB_RENDER_CONTROL */
+	*cmds++ = _SET(RB_RENDERCONTROL_BIN_WIDTH, shadow->width >> 5) |
+		_SET(RB_RENDERCONTROL_ALPHA_TEST_FUNC, 7);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_RB_MSAA_CONTROL);
+	/* RB_MSAA_CONTROL */
+	*cmds++ = _SET(RB_MSAACONTROL_MSAA_DISABLE, 1) |
+		_SET(RB_MSAACONTROL_SAMPLE_MASK, 0xFFFF);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_RB_MRT_CONTROL0);
+	/* RB_MRT_CONTROL0 */
+	*cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
+		_SET(RB_MRTCONTROL_ROP_CODE, 12) |
+		_SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_ALWAYS) |
+		_SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+	*cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL0);
+	/* RB_MRT_BLENDCONTROL0 */
+	*cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+		_SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+		_SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+		_SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+		_SET(RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+		_SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+		_SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+	/* RB_MRT_CONTROL1 */
+	*cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
+		_SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) |
+		_SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+	*cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL1);
+	/* RB_MRT_BLENDCONTROL1 */
+	*cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+		_SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+		_SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+		_SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+		_SET(RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+		_SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+		_SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+	/* RB_MRT_CONTROL2 */
+	*cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
+		_SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) |
+		_SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+	*cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL2);
+	/* RB_MRT_BLENDCONTROL2 */
+	*cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+		_SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+		_SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+		_SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+		_SET(RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+		_SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+		_SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+	/* RB_MRT_CONTROL3 */
+	*cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
+		_SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) |
+		_SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL3);
+	/* RB_MRT_BLENDCONTROL3 */
+	*cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+		_SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+		_SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+		_SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+		_SET(RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+		_SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+		_SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+	*cmds++ = CP_REG(A3XX_VFD_INDEX_MIN);
+	/* VFD_INDEX_MIN */
+	*cmds++ = 0x00000000;
+	/* VFD_INDEX_MAX */
+	*cmds++ = 0xFFFFFFFF;
+	/* VFD_INDEX_OFFSET */
+	*cmds++ = 0x00000000;
+	/* TPL1_TP_VS_TEX_OFFSET */
+	*cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_VFD_VS_THREADING_THRESHOLD);
+	/* VFD_VS_THREADING_THRESHOLD */
+	*cmds++ = _SET(VFD_THREADINGTHRESHOLD_RESERVED6, 12) |
+		_SET(VFD_THREADINGTHRESHOLD_REGID_VTXCNT, 252);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_TPL1_TP_VS_TEX_OFFSET);
+	/* TPL1_TP_VS_TEX_OFFSET */
+	*cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_TPL1_TP_FS_TEX_OFFSET);
+	/* TPL1_TP_FS_TEX_OFFSET */
+	*cmds++ = _SET(TPL1_TPTEXOFFSETREG_SAMPLEROFFSET, 16) |
+		_SET(TPL1_TPTEXOFFSETREG_MEMOBJOFFSET, 16) |
+		_SET(TPL1_TPTEXOFFSETREG_BASETABLEPTR, 224);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_GRAS_SC_CONTROL);
+	/* GRAS_SC_CONTROL */
+	*cmds++ = _SET(GRAS_SC_CONTROL_RASTER_MODE, 1);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_GRAS_SU_MODE_CONTROL);
+	/* GRAS_SU_MODE_CONTROL */
+	*cmds++ = 0x00000000;
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+	*cmds++ = CP_REG(A3XX_GRAS_SC_WINDOW_SCISSOR_TL);
+	/* GRAS_SC_WINDOW_SCISSOR_TL */
+	*cmds++ = 0x00000000;
+	/* GRAS_SC_WINDOW_SCISSOR_BR */
+	*cmds++ = _SET(GRAS_SC_WINDOW_SCISSOR_BR_BR_X, shadow->width - 1) |
+		_SET(GRAS_SC_WINDOW_SCISSOR_BR_BR_Y, shadow->height - 1);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+	*cmds++ = CP_REG(A3XX_GRAS_SC_SCREEN_SCISSOR_TL);
+	/* GRAS_SC_SCREEN_SCISSOR_TL */
+	*cmds++ = 0x00000000;
+	/* GRAS_SC_SCREEN_SCISSOR_BR */
+	*cmds++ = _SET(GRAS_SC_SCREEN_SCISSOR_BR_BR_X, shadow->width - 1) |
+		_SET(GRAS_SC_SCREEN_SCISSOR_BR_BR_Y, shadow->height - 1);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+	*cmds++ = CP_REG(A3XX_GRAS_CL_VPORT_XOFFSET);
+	/* GRAS_CL_VPORT_XOFFSET */
+	*cmds++ = 0x00000000;
+	/* GRAS_CL_VPORT_XSCALE */
+	*cmds++ = _SET(GRAS_CL_VPORT_XSCALE_VPORT_XSCALE, 0x3F800000);
+	/* GRAS_CL_VPORT_YOFFSET */
+	*cmds++ = 0x00000000;
+	/* GRAS_CL_VPORT_YSCALE */
+	*cmds++ = _SET(GRAS_CL_VPORT_YSCALE_VPORT_YSCALE, 0x3F800000);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+	*cmds++ = CP_REG(A3XX_GRAS_CL_VPORT_ZOFFSET);
+	/* GRAS_CL_VPORT_ZOFFSET */
+	*cmds++ = 0x00000000;
+	/* GRAS_CL_VPORT_ZSCALE */
+	*cmds++ = _SET(GRAS_CL_VPORT_ZSCALE_VPORT_ZSCALE, 0x3F800000);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_GRAS_CL_CLIP_CNTL);
+	/* GRAS_CL_CLIP_CNTL */
+	*cmds++ = _SET(GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER, 1);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_SP_FS_IMAGE_OUTPUT_REG_0);
+	/* SP_FS_IMAGE_OUTPUT_REG_0 */
+	*cmds++ = _SET(SP_IMAGEOUTPUTREG_MRTFORMAT, SP_R8G8B8A8_UNORM);
+
+	*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmds++ = CP_REG(A3XX_PC_PRIM_VTX_CNTL);
+	/* PC_PRIM_VTX_CONTROL */
+	*cmds++ = _SET(PC_PRIM_VTX_CONTROL_STRIDE_IN_VPC, 2) |
+		_SET(PC_PRIM_VTX_CONTROL_POLYMODE_FRONT_PTYPE,
+		PC_DRAW_TRIANGLES) |
+		_SET(PC_PRIM_VTX_CONTROL_POLYMODE_BACK_PTYPE,
+		PC_DRAW_TRIANGLES) |
+		_SET(PC_PRIM_VTX_CONTROL_PROVOKING_VTX_LAST, 1);
+
+	*cmds++ = cp_type3_packet(CP_DRAW_INDX, 3);
+	*cmds++ = 0x00000000; /* Viz query info */
+	*cmds++ = BUILD_PC_DRAW_INITIATOR(PC_DI_PT_RECTLIST,
+					  PC_DI_SRC_SEL_AUTO_INDEX,
+					  PC_DI_INDEX_SIZE_16_BIT,
+					  PC_DI_IGNORE_VISIBILITY);
+	*cmds++ = 0x00000002; /* Num indices */
+
+	/* Create indirect buffer command for above command sequence */
+	create_ib1(drawctxt, shadow->gmem_restore, start, cmds);
+
+	return cmds;
+}
+
+static void build_regrestore_cmds(struct adreno_device *adreno_dev,
+				  struct adreno_context *drawctxt)
+{
+	unsigned int *start = tmp_ctx.cmd;
+	unsigned int *cmd = start;
+	unsigned int *lcc_start;
+
+	int i;
+
+	/* Flush HLSQ lazy updates */
+	*cmd++ = cp_type3_packet(CP_EVENT_WRITE, 1);
+	*cmd++ = 0x7;		/* HLSQ_FLUSH */
+	*cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+	*cmd++ = 0;
+
+	*cmd++ = cp_type0_packet(A3XX_UCHE_CACHE_INVALIDATE0_REG, 2);
+	*cmd++ = 0x00000000;    /* No start addr for full invalidate */
+	*cmd++ = (unsigned int)
+		UCHE_ENTIRE_CACHE << UCHE_INVALIDATE1REG_ALLORPORTION |
+		UCHE_OP_INVALIDATE << UCHE_INVALIDATE1REG_OPCODE |
+		0;  /* No end addr for full invalidate */
+
+	lcc_start = cmd;
+
+	/* deferred cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, ???); */
+	cmd++;
+
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+	/* Force mismatch */
+	*cmd++ = ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) | 1;
+#else
+	*cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
+#endif
+
+	for (i = 0; i < ARRAY_SIZE(context_register_ranges) / 2; i++) {
+		cmd = reg_range(cmd, context_register_ranges[i * 2],
+				context_register_ranges[i * 2 + 1]);
+	}
+
+	lcc_start[0] = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT,
+					(cmd - lcc_start) - 1);
+
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+	lcc_start[2] |= (0 << 24) | (4 << 16);	/* Disable shadowing. */
+#else
+	lcc_start[2] |= (1 << 24) | (4 << 16);
+#endif
+
+	for (i = 0; i < ARRAY_SIZE(global_registers); i++) {
+		*cmd++ = cp_type0_packet(global_registers[i], 1);
+		tmp_ctx.reg_values[i] = virt2gpu(cmd, &drawctxt->gpustate);
+		*cmd++ = 0x00000000;
+	}
+
+	create_ib1(drawctxt, drawctxt->reg_restore, start, cmd);
+	tmp_ctx.cmd = cmd;
+}
+
+static void build_constantrestore_cmds(struct adreno_device *adreno_dev,
+				       struct adreno_context *drawctxt)
+{
+	unsigned int *cmd = tmp_ctx.cmd;
+	unsigned int *start = cmd;
+	unsigned int mode = 4;	/* Indirect mode */
+	unsigned int stateblock;
+	unsigned int numunits;
+	unsigned int statetype;
+
+	drawctxt->cond_execs[2].hostptr = cmd;
+	drawctxt->cond_execs[2].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
+	*cmd++ = 0;
+	drawctxt->cond_execs[3].hostptr = cmd;
+	drawctxt->cond_execs[3].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
+	*cmd++ = 0;
+
+#ifndef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+	*cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
+	*cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
+	*cmd++ = 4 << 16;
+	*cmd++ = 0x0;
+#endif
+	/* HLSQ full update */
+	*cmd++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmd++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG);
+	*cmd++ = 0x68000240;	/* A3XX_HLSQ_CONTROL_0_REG */
+
+#ifndef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+	/* Re-enable shadowing */
+	*cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
+	*cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
+	*cmd++ = (4 << 16) | (1 << 24);
+	*cmd++ = 0x0;
+#endif
+
+	/* Load vertex shader constants */
+	*cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+	*cmd++ = drawctxt->cond_execs[2].gpuaddr >> 2;
+	*cmd++ = drawctxt->cond_execs[2].gpuaddr >> 2;
+	*cmd++ = 0x0000ffff;
+	*cmd++ = 3; /* EXEC_COUNT */
+	*cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+	drawctxt->constant_load_commands[0].hostptr = cmd;
+	drawctxt->constant_load_commands[0].gpuaddr = virt2gpu(cmd,
+		&drawctxt->gpustate);
+
+	/*
+	   From fixup:
+
+	   mode = 4 (indirect)
+	   stateblock = 4 (Vertex constants)
+	   numunits = SP_VS_CTRL_REG1.VSCONSTLENGTH * 2; (256bit units)
+
+	   From register spec:
+	   SP_VS_CTRL_REG1.VSCONSTLENGTH [09:00]: 0-512, unit = 128bits.
+
+	   ord1 = (numunits<<22) | (stateblock<<19) | (mode<<16);
+	 */
+
+	*cmd++ = 0;		/* ord1 */
+	*cmd++ = ((drawctxt->gpustate.gpuaddr) & 0xfffffffc) | 1;
+
+	/* Load fragment shader constants */
+	*cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+	*cmd++ = drawctxt->cond_execs[3].gpuaddr >> 2;
+	*cmd++ = drawctxt->cond_execs[3].gpuaddr >> 2;
+	*cmd++ = 0x0000ffff;
+	*cmd++ = 3; /* EXEC_COUNT */
+	*cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+	drawctxt->constant_load_commands[1].hostptr = cmd;
+	drawctxt->constant_load_commands[1].gpuaddr =
+	    virt2gpu(cmd, &drawctxt->gpustate);
+	/*
+	   From fixup:
+
+	   mode = 4 (indirect)
+	   stateblock = 6 (Fragment constants)
+	   numunits = SP_FS_CTRL_REG1.FSCONSTLENGTH * 2; (256bit units)
+
+	   From register spec:
+	   SP_FS_CTRL_REG1.FSCONSTLENGTH [09:00]: 0-512, unit = 128bits.
+
+	   ord1 = (numunits<<22) | (stateblock<<19) | (mode<<16);
+	 */
+
+	*cmd++ = 0;		/* ord1 */
+	drawctxt->constant_load_commands[2].hostptr = cmd;
+	drawctxt->constant_load_commands[2].gpuaddr =
+	    virt2gpu(cmd, &drawctxt->gpustate);
+	/*
+	   From fixup:
+	   base = drawctxt->gpustate.gpuaddr (ALU constant shadow base)
+	   offset = SP_FS_OBJ_OFFSET_REG.CONSTOBJECTSTARTOFFSET
+
+	   From register spec:
+	   SP_FS_OBJ_OFFSET_REG.CONSTOBJECTSTARTOFFSET [16:24]: Constant object
+	   start offset in on chip RAM,
+	   128bit aligned
+
+	   ord2 = base + offset | 1
+	   Because of the base alignment we can use
+	   ord2 = base | offset | 1
+	 */
+	*cmd++ = 0;		/* ord2 */
+
+	/* Restore VS texture memory objects */
+	stateblock = 0;
+	statetype = 1;
+	numunits = (TEX_SIZE_MEM_OBJECTS / 7) / 4;
+
+	*cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+	*cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
+	*cmd++ = ((drawctxt->gpustate.gpuaddr + VS_TEX_OFFSET_MEM_OBJECTS)
+	    & 0xfffffffc) | statetype;
+
+	/* Restore VS texture mipmap addresses */
+	stateblock = 1;
+	statetype = 1;
+	numunits = TEX_SIZE_MIPMAP / 4;
+	*cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+	*cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
+	*cmd++ = ((drawctxt->gpustate.gpuaddr + VS_TEX_OFFSET_MIPMAP)
+	    & 0xfffffffc) | statetype;
+
+	/* Restore VS texture sampler objects */
+	stateblock = 0;
+	statetype = 0;
+	numunits = (TEX_SIZE_SAMPLER_OBJ / 2) / 4;
+	*cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+	*cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
+	*cmd++ = ((drawctxt->gpustate.gpuaddr + VS_TEX_OFFSET_SAMPLER_OBJ)
+	    & 0xfffffffc) | statetype;
+
+	/* Restore FS texture memory objects */
+	stateblock = 2;
+	statetype = 1;
+	numunits = (TEX_SIZE_MEM_OBJECTS / 7) / 4;
+	*cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+	*cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
+	*cmd++ = ((drawctxt->gpustate.gpuaddr + FS_TEX_OFFSET_MEM_OBJECTS)
+	    & 0xfffffffc) | statetype;
+
+	/* Restore FS texture mipmap addresses */
+	stateblock = 3;
+	statetype = 1;
+	numunits = TEX_SIZE_MIPMAP / 4;
+	*cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+	*cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
+	*cmd++ = ((drawctxt->gpustate.gpuaddr + FS_TEX_OFFSET_MIPMAP)
+	    & 0xfffffffc) | statetype;
+
+	/* Restore FS texture sampler objects */
+	stateblock = 2;
+	statetype = 0;
+	numunits = (TEX_SIZE_SAMPLER_OBJ / 2) / 4;
+	*cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+	*cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
+	*cmd++ = ((drawctxt->gpustate.gpuaddr + FS_TEX_OFFSET_SAMPLER_OBJ)
+	    & 0xfffffffc) | statetype;
+
+	create_ib1(drawctxt, drawctxt->constant_restore, start, cmd);
+	tmp_ctx.cmd = cmd;
+}
+
+static void build_shader_restore_cmds(struct adreno_device *adreno_dev,
+				      struct adreno_context *drawctxt)
+{
+	unsigned int *cmd = tmp_ctx.cmd;
+	unsigned int *start = cmd;
+
+	/* Vertex shader */
+	*cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+	*cmd++ = drawctxt->cond_execs[0].gpuaddr >> 2;
+	*cmd++ = drawctxt->cond_execs[0].gpuaddr >> 2;
+	*cmd++ = 1;
+	*cmd++ = 3;		/* EXEC_COUNT */
+
+	*cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+	drawctxt->shader_load_commands[0].hostptr = cmd;
+	drawctxt->shader_load_commands[0].gpuaddr =
+	    virt2gpu(cmd, &drawctxt->gpustate);
+	/*
+	   From fixup:
+
+	   mode = 4 (indirect)
+	   stateblock = 4 (Vertex shader)
+	   numunits = SP_VS_CTRL_REG0.VS_LENGTH
+
+	   From regspec:
+	   SP_VS_CTRL_REG0.VS_LENGTH [31:24]: VS length, unit = 256bits.
+	   If bit31 is 1, it means overflow
+	   or any long shader.
+
+	   ord1 = (numunits<<22) | (stateblock<<19) | (mode<<11)
+	 */
+	*cmd++ = 0;		/*ord1 */
+	*cmd++ = (drawctxt->gpustate.gpuaddr + SHADER_OFFSET) & 0xfffffffc;
+
+	/* Fragment shader */
+	*cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+	*cmd++ = drawctxt->cond_execs[1].gpuaddr >> 2;
+	*cmd++ = drawctxt->cond_execs[1].gpuaddr >> 2;
+	*cmd++ = 1;
+	*cmd++ = 3;		/* EXEC_COUNT */
+
+	*cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+	drawctxt->shader_load_commands[1].hostptr = cmd;
+	drawctxt->shader_load_commands[1].gpuaddr =
+	    virt2gpu(cmd, &drawctxt->gpustate);
+	/*
+	   From fixup:
+
+	   mode = 4 (indirect)
+	   stateblock = 6 (Fragment shader)
+	   numunits = SP_FS_CTRL_REG0.FS_LENGTH
+
+	   From regspec:
+	   SP_FS_CTRL_REG0.FS_LENGTH [31:24]: FS length, unit = 256bits.
+	   If bit31 is 1, it means overflow
+	   or any long shader.
+
+	   ord1 = (numunits<<22) | (stateblock<<19) | (mode<<11)
+	 */
+	*cmd++ = 0;		/*ord1 */
+	*cmd++ = (drawctxt->gpustate.gpuaddr + SHADER_OFFSET
+		  + (SHADER_SHADOW_SIZE / 2)) & 0xfffffffc;
+
+	create_ib1(drawctxt, drawctxt->shader_restore, start, cmd);
+	tmp_ctx.cmd = cmd;
+}
+
+static void build_hlsqcontrol_restore_cmds(struct adreno_device *adreno_dev,
+					   struct adreno_context *drawctxt)
+{
+	unsigned int *cmd = tmp_ctx.cmd;
+	unsigned int *start = cmd;
+
+	*cmd++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+	*cmd++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG);
+	drawctxt->hlsqcontrol_restore_commands[0].hostptr = cmd;
+	drawctxt->hlsqcontrol_restore_commands[0].gpuaddr
+	    = virt2gpu(cmd, &drawctxt->gpustate);
+	*cmd++ = 0;
+
+	/* Create indirect buffer command for above command sequence */
+	create_ib1(drawctxt, drawctxt->hlsqcontrol_restore, start, cmd);
+
+	tmp_ctx.cmd = cmd;
+}
+
+/* IB that modifies the shader and constant sizes and offsets in restore IBs. */
+static void build_restore_fixup_cmds(struct adreno_device *adreno_dev,
+				     struct adreno_context *drawctxt)
+{
+	unsigned int *cmd = tmp_ctx.cmd;
+	unsigned int *start = cmd;
+
+#ifdef GSL_CONTEXT_SWITCH_CPU_SYNC
+	/* Save shader sizes */
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	*cmd++ = A3XX_SP_VS_CTRL_REG0;
+	*cmd++ = drawctxt->shader_load_commands[0].gpuaddr;
+
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	*cmd++ = A3XX_SP_FS_CTRL_REG0;
+	*cmd++ = drawctxt->shader_load_commands[1].gpuaddr;
+
+	/* Save constant sizes */
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	*cmd++ = A3XX_SP_VS_CTRL_REG1;
+	*cmd++ = drawctxt->constant_load_commands[0].gpuaddr;
+
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	*cmd++ = A3XX_SP_FS_CTRL_REG1;
+	*cmd++ = drawctxt->constant_load_commands[1].gpuaddr;
+
+	/* Save constant offsets */
+	*cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+	*cmd++ = A3XX_SP_FS_OBJ_OFFSET_REG;
+	*cmd++ = drawctxt->constant_load_commands[2].gpuaddr;
+#else
+	/* Save shader sizes */
+	cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG0, 0x7f000000,
+			   30, (4 << 19) | (4 << 16),
+			   drawctxt->shader_load_commands[0].gpuaddr);
+
+	cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG0, 0x7f000000,
+			   30, (6 << 19) | (4 << 16),
+			   drawctxt->shader_load_commands[1].gpuaddr);
+
+	/* Save constant sizes */
+	cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG1, 0x000003ff,
+			   23, (4 << 19) | (4 << 16),
+			   drawctxt->constant_load_commands[0].gpuaddr);
+
+	cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG1, 0x000003ff,
+			   23, (6 << 19) | (4 << 16),
+			   drawctxt->constant_load_commands[1].gpuaddr);
+
+	/* Modify constant restore conditionals */
+	cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG1, 0x000003ff,
+			0, 0, drawctxt->cond_execs[2].gpuaddr);
+
+	cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG1, 0x000003ff,
+			0, 0, drawctxt->cond_execs[3].gpuaddr);
+
+	/* Save fragment constant shadow offset */
+	cmd = rmw_regtomem(cmd, A3XX_SP_FS_OBJ_OFFSET_REG, 0x00ff0000,
+			   18, (drawctxt->gpustate.gpuaddr & 0xfffffe00) | 1,
+			   drawctxt->constant_load_commands[2].gpuaddr);
+#endif
+
+	/* Use mask value to avoid flushing HLSQ which would cause the HW to
+	   discard all the shader data */
+
+	cmd = rmw_regtomem(cmd,  A3XX_HLSQ_CONTROL_0_REG, 0x9ffffdff,
+		0, 0, drawctxt->hlsqcontrol_restore_commands[0].gpuaddr);
+
+	create_ib1(drawctxt, drawctxt->restore_fixup, start, cmd);
+
+	tmp_ctx.cmd = cmd;
+}
+
+static int a3xx_create_gpustate_shadow(struct adreno_device *adreno_dev,
+				     struct adreno_context *drawctxt)
+{
+	drawctxt->flags |= CTXT_FLAGS_STATE_SHADOW;
+
+	build_regrestore_cmds(adreno_dev, drawctxt);
+	build_constantrestore_cmds(adreno_dev, drawctxt);
+	build_hlsqcontrol_restore_cmds(adreno_dev, drawctxt);
+	build_regconstantsave_cmds(adreno_dev, drawctxt);
+	build_shader_save_cmds(adreno_dev, drawctxt);
+	build_shader_restore_cmds(adreno_dev, drawctxt);
+	build_restore_fixup_cmds(adreno_dev, drawctxt);
+	build_save_fixup_cmds(adreno_dev, drawctxt);
+
+	return 0;
+}
+
+/* create buffers for saving/restoring registers, constants, & GMEM */
+static int a3xx_create_gmem_shadow(struct adreno_device *adreno_dev,
+				 struct adreno_context *drawctxt)
+{
+	calc_gmemsize(&drawctxt->context_gmem_shadow,
+		adreno_dev->gmemspace.sizebytes);
+	tmp_ctx.gmem_base = adreno_dev->gmemspace.gpu_base;
+
+	if (drawctxt->flags & CTXT_FLAGS_GMEM_SHADOW) {
+		int result =
+		    kgsl_allocate(&drawctxt->context_gmem_shadow.gmemshadow,
+			drawctxt->pagetable,
+			drawctxt->context_gmem_shadow.size);
+
+		if (result)
+			return result;
+	} else {
+		memset(&drawctxt->context_gmem_shadow.gmemshadow, 0,
+		       sizeof(drawctxt->context_gmem_shadow.gmemshadow));
+
+		return 0;
+	}
+
+	build_quad_vtxbuff(drawctxt, &drawctxt->context_gmem_shadow,
+		&tmp_ctx.cmd);
+
+	/* Dow we need to idle? */
+	/* adreno_idle(&adreno_dev->dev, KGSL_TIMEOUT_DEFAULT); */
+
+	tmp_ctx.cmd = build_gmem2sys_cmds(adreno_dev, drawctxt,
+		&drawctxt->context_gmem_shadow);
+	tmp_ctx.cmd = build_sys2gmem_cmds(adreno_dev, drawctxt,
+		&drawctxt->context_gmem_shadow);
+
+	kgsl_cache_range_op(&drawctxt->context_gmem_shadow.gmemshadow,
+		KGSL_CACHE_OP_FLUSH);
+
+	return 0;
+}
+
+static int a3xx_drawctxt_create(struct adreno_device *adreno_dev,
+	struct adreno_context *drawctxt)
+{
+	int ret;
+
+	/*
+	 * Allocate memory for the GPU state and the context commands.
+	 * Despite the name, this is much more then just storage for
+	 * the gpustate.  This contains command space for gmem save
+	 * and texture and vertex buffer storage too
+	 */
+
+	ret = kgsl_allocate(&drawctxt->gpustate,
+		drawctxt->pagetable, CONTEXT_SIZE);
+
+	if (ret)
+		return ret;
+
+	kgsl_sharedmem_set(&drawctxt->gpustate, 0, 0, CONTEXT_SIZE);
+	tmp_ctx.cmd = drawctxt->gpustate.hostptr + CMD_OFFSET;
+
+	if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) {
+		ret = a3xx_create_gpustate_shadow(adreno_dev, drawctxt);
+		if (ret)
+			goto done;
+
+		drawctxt->flags |= CTXT_FLAGS_SHADER_SAVE;
+	}
+
+	if (!(drawctxt->flags & CTXT_FLAGS_NOGMEMALLOC))
+		ret = a3xx_create_gmem_shadow(adreno_dev, drawctxt);
+
+done:
+	if (ret)
+		kgsl_sharedmem_free(&drawctxt->gpustate);
+
+	return ret;
+}
+
+static void a3xx_drawctxt_save(struct adreno_device *adreno_dev,
+			   struct adreno_context *context)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+
+	if (context == NULL)
+		return;
+
+	if (context->flags & CTXT_FLAGS_GPU_HANG)
+		KGSL_CTXT_WARN(device,
+			       "Current active context has caused gpu hang\n");
+
+	if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
+		/* Fixup self modifying IBs for save operations */
+		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+			context->save_fixup, 3);
+
+		/* save registers and constants. */
+		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+			context->regconstant_save, 3);
+
+		if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
+			/* Save shader instructions */
+			adreno_ringbuffer_issuecmds(device,
+				KGSL_CMD_FLAGS_PMODE, context->shader_save, 3);
+
+			context->flags |= CTXT_FLAGS_SHADER_RESTORE;
+		}
+	}
+
+	if ((context->flags & CTXT_FLAGS_GMEM_SAVE) &&
+	    (context->flags & CTXT_FLAGS_GMEM_SHADOW)) {
+		/*
+		 * Save GMEM (note: changes shader. shader must
+		 * already be saved.)
+		 */
+
+		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+					    context->context_gmem_shadow.
+					    gmem_save, 3);
+		context->flags |= CTXT_FLAGS_GMEM_RESTORE;
+	}
+}
+
+static void a3xx_drawctxt_restore(struct adreno_device *adreno_dev,
+			      struct adreno_context *context)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	unsigned int cmds[5];
+
+	if (context == NULL) {
+		/* No context - set the default pagetable and thats it */
+		kgsl_mmu_setstate(device, device->mmu.defaultpagetable);
+		return;
+	}
+
+	KGSL_CTXT_INFO(device, "context flags %08x\n", context->flags);
+
+	cmds[0] = cp_nop_packet(1);
+	cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
+	cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2);
+	cmds[3] = device->memstore.gpuaddr +
+	    KGSL_DEVICE_MEMSTORE_OFFSET(current_context);
+	cmds[4] = (unsigned int)context;
+	adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE, cmds, 5);
+	kgsl_mmu_setstate(device, context->pagetable);
+
+	/*
+	 * Restore GMEM.  (note: changes shader.
+	 * Shader must not already be restored.)
+	 */
+
+	if (context->flags & CTXT_FLAGS_GMEM_RESTORE) {
+		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
+					    context->context_gmem_shadow.
+					    gmem_restore, 3);
+		context->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
+	}
+
+	if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
+		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+			context->reg_restore, 3);
+
+		/* Fixup self modifying IBs for restore operations */
+		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+			context->restore_fixup, 3);
+
+		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+			context->constant_restore, 3);
+
+		if (context->flags & CTXT_FLAGS_SHADER_RESTORE)
+			adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+				context->shader_restore, 3);
+
+		/* Restore HLSQ_CONTROL_0 register */
+		adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
+			context->hlsqcontrol_restore, 3);
+	}
+}
+
+static void a3xx_rb_init(struct adreno_device *adreno_dev,
+			 struct adreno_ringbuffer *rb)
+{
+	unsigned int *cmds, cmds_gpu;
+	cmds = adreno_ringbuffer_allocspace(rb, 18);
+	cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint) * (rb->wptr - 18);
+
+	GSL_RB_WRITE(cmds, cmds_gpu, cp_type3_packet(CP_ME_INIT, 17));
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x000003f7);
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000080);
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000100);
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000180);
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00006600);
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000150);
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x0000014e);
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000154);
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000001);
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+	/* Protected mode control - turned off for A3XX */
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+
+	adreno_ringbuffer_submit(rb);
+}
+
+static void a3xx_err_callback(struct adreno_device *adreno_dev, int bit)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	const char *err = "";
+
+	switch (bit) {
+	case A3XX_INT_RBBM_AHB_ERROR: {
+		unsigned int reg;
+
+		adreno_regread(device, A3XX_RBBM_AHB_ERROR_STATUS, &reg);
+
+		/*
+		 * Return the word address of the erroring register so that it
+		 * matches the register specification
+		 */
+
+		KGSL_DRV_CRIT(device,
+			"RBBM | AHB bus error | %s | addr=%x | ports=%x:%x\n",
+			reg & (1 << 28) ? "WRITE" : "READ",
+			(reg & 0xFFFFF) >> 2, (reg >> 20) & 0x3,
+			(reg >> 24) & 0x3);
+
+		/* Clear the error */
+		adreno_regwrite(device, A3XX_RBBM_AHB_CMD, (1 << 3));
+		return;
+	}
+	case A3XX_INT_RBBM_REG_TIMEOUT:
+		err = "RBBM: AHB register timeout";
+		break;
+	case A3XX_INT_RBBM_ME_MS_TIMEOUT:
+		err = "RBBM: ME master split timeout";
+		break;
+	case A3XX_INT_RBBM_PFP_MS_TIMEOUT:
+		err = "RBBM: PFP master split timeout";
+		break;
+	case A3XX_INT_RBBM_ATB_BUS_OVERFLOW:
+		err = "RBBM: ATB bus oveflow";
+		break;
+	case A3XX_INT_VFD_ERROR:
+		err = "VFD: Out of bounds access";
+		break;
+	case A3XX_INT_CP_T0_PACKET_IN_IB:
+		err = "ringbuffer TO packet in IB interrupt";
+		break;
+	case A3XX_INT_CP_OPCODE_ERROR:
+		err = "ringbuffer opcode error interrupt";
+		break;
+	case A3XX_INT_CP_RESERVED_BIT_ERROR:
+		err = "ringbuffer reserved bit error interrupt";
+		break;
+	case A3XX_INT_CP_HW_FAULT:
+		err = "ringbuffer hardware fault";
+		break;
+	case A3XX_INT_CP_REG_PROTECT_FAULT:
+		err = "ringbuffer protected mode error interrupt";
+		break;
+	case A3XX_INT_CP_AHB_ERROR_HALT:
+		err = "ringbuffer AHB error interrupt";
+		break;
+	case A3XX_INT_MISC_HANG_DETECT:
+		err = "MISC: GPU hang detected";
+		break;
+	case A3XX_INT_UCHE_OOB_ACCESS:
+		err = "UCHE:  Out of bounds access";
+		break;
+	}
+
+	KGSL_DRV_CRIT(device, "%s\n", err);
+	kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+}
+
+static void a3xx_cp_callback(struct adreno_device *adreno_dev, int irq)
+{
+	struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+	if (irq == A3XX_INT_CP_RB_INT) {
+		kgsl_sharedmem_writel(&rb->device->memstore,
+			KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable), 0);
+		wmb();
+		KGSL_CMD_WARN(rb->device, "ringbuffer rb interrupt\n");
+	}
+
+	wake_up_interruptible_all(&rb->device->wait_queue);
+
+	/* Schedule work to free mem and issue ibs */
+	queue_work(rb->device->work_queue, &rb->device->ts_expired_ws);
+
+	atomic_notifier_call_chain(&rb->device->ts_notifier_list,
+				   rb->device->id, NULL);
+}
+
+#define A3XX_IRQ_CALLBACK(_c) { .func = _c }
+
+#define A3XX_INT_MASK \
+	((1 << A3XX_INT_RBBM_AHB_ERROR) |        \
+	 (1 << A3XX_INT_RBBM_REG_TIMEOUT) |      \
+	 (1 << A3XX_INT_RBBM_ME_MS_TIMEOUT) |    \
+	 (1 << A3XX_INT_RBBM_PFP_MS_TIMEOUT) |   \
+	 (1 << A3XX_INT_RBBM_ATB_BUS_OVERFLOW) | \
+	 (1 << A3XX_INT_VFD_ERROR) |             \
+	 (1 << A3XX_INT_CP_T0_PACKET_IN_IB) |    \
+	 (1 << A3XX_INT_CP_OPCODE_ERROR) |       \
+	 (1 << A3XX_INT_CP_RESERVED_BIT_ERROR) | \
+	 (1 << A3XX_INT_CP_HW_FAULT) |           \
+	 (1 << A3XX_INT_CP_IB1_INT) |            \
+	 (1 << A3XX_INT_CP_IB2_INT) |            \
+	 (1 << A3XX_INT_CP_RB_INT) |             \
+	 (1 << A3XX_INT_CP_REG_PROTECT_FAULT) |  \
+	 (1 << A3XX_INT_CP_AHB_ERROR_HALT) |     \
+	 (1 << A3XX_INT_MISC_HANG_DETECT) |      \
+	 (1 << A3XX_INT_UCHE_OOB_ACCESS))
+
+static struct {
+	void (*func)(struct adreno_device *, int);
+} a3xx_irq_funcs[] = {
+	A3XX_IRQ_CALLBACK(NULL),               /* 0 - RBBM_GPU_IDLE */
+	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 1 - RBBM_AHB_ERROR */
+	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 2 - RBBM_REG_TIMEOUT */
+	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 3 - RBBM_ME_MS_TIMEOUT */
+	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 4 - RBBM_PFP_MS_TIMEOUT */
+	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 5 - RBBM_ATB_BUS_OVERFLOW */
+	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 6 - RBBM_VFD_ERROR */
+	A3XX_IRQ_CALLBACK(NULL),	       /* 7 - CP_SW */
+	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 8 - CP_T0_PACKET_IN_IB */
+	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 9 - CP_OPCODE_ERROR */
+	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 10 - CP_RESERVED_BIT_ERROR */
+	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 11 - CP_HW_FAULT */
+	A3XX_IRQ_CALLBACK(NULL),	       /* 12 - CP_DMA */
+	A3XX_IRQ_CALLBACK(a3xx_cp_callback),   /* 13 - CP_IB2_INT */
+	A3XX_IRQ_CALLBACK(a3xx_cp_callback),   /* 14 - CP_IB1_INT */
+	A3XX_IRQ_CALLBACK(a3xx_cp_callback),   /* 15 - CP_RB_INT */
+	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 16 - CP_REG_PROTECT_FAULT */
+	A3XX_IRQ_CALLBACK(NULL),	       /* 17 - CP_RB_DONE_TS */
+	A3XX_IRQ_CALLBACK(NULL),	       /* 18 - CP_VS_DONE_TS */
+	A3XX_IRQ_CALLBACK(NULL),	       /* 19 - CP_PS_DONE_TS */
+	A3XX_IRQ_CALLBACK(NULL),	       /* 20 - CP_CACHE_FLUSH_TS */
+	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 21 - CP_AHB_ERROR_FAULT */
+	A3XX_IRQ_CALLBACK(NULL),	       /* 22 - Unused */
+	A3XX_IRQ_CALLBACK(NULL),	       /* 23 - Unused */
+	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 24 - MISC_HANG_DETECT */
+	A3XX_IRQ_CALLBACK(a3xx_err_callback),  /* 25 - UCHE_OOB_ACCESS */
+	/* 26 to 31 - Unused */
+};
+
+static irqreturn_t a3xx_irq_handler(struct adreno_device *adreno_dev)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	irqreturn_t ret = IRQ_NONE;
+	unsigned int status, tmp;
+	int i;
+
+	adreno_regread(&adreno_dev->dev, A3XX_RBBM_INT_0_STATUS, &status);
+
+	for (tmp = status, i = 0; tmp && i < ARRAY_SIZE(a3xx_irq_funcs); i++) {
+		if (tmp & 1) {
+			if (a3xx_irq_funcs[i].func != NULL) {
+				a3xx_irq_funcs[i].func(adreno_dev, i);
+				ret = IRQ_HANDLED;
+			} else {
+				KGSL_DRV_CRIT(device,
+					"Unhandled interrupt bit %x\n", i);
+			}
+		}
+
+		tmp >>= 1;
+	}
+
+	if (status)
+		adreno_regwrite(&adreno_dev->dev, A3XX_RBBM_INT_CLEAR_CMD,
+			status);
+	return ret;
+}
+
+static void a3xx_irq_control(struct adreno_device *adreno_dev, int state)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+
+	if (state)
+		adreno_regwrite(device, A3XX_RBBM_INT_0_MASK, A3XX_INT_MASK);
+	else
+		adreno_regwrite(device, A3XX_RBBM_INT_0_MASK, 0);
+}
+
+static unsigned int a3xx_busy_cycles(struct adreno_device *adreno_dev)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+	unsigned int reg, val;
+
+	/* Freeze the counter */
+	adreno_regread(device, A3XX_RBBM_RBBM_CTL, &reg);
+	reg &= ~RBBM_RBBM_CTL_ENABLE_PWR_CTR1;
+	adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, reg);
+
+	/* Read the value */
+	adreno_regread(device, A3XX_RBBM_PERFCTR_PWR_1_LO, &val);
+
+	/* Reset the counter */
+	reg |= RBBM_RBBM_CTL_RESET_PWR_CTR1;
+	adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, reg);
+
+	/* Re-enable the counter */
+	reg &= ~RBBM_RBBM_CTL_RESET_PWR_CTR1;
+	reg |= RBBM_RBBM_CTL_ENABLE_PWR_CTR1;
+	adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, reg);
+
+	return val;
+}
+
+static void a3xx_start(struct adreno_device *adreno_dev)
+{
+	struct kgsl_device *device = &adreno_dev->dev;
+
+	/* Reset the core */
+	adreno_regwrite(device, A3XX_RBBM_SW_RESET_CMD,
+		0x00000001);
+	msleep(20);
+
+	/*
+	 * enable fixed master AXI port of 0x0 for all clients to keep
+	 * traffic from going to random places
+	 */
+
+	adreno_regwrite(device, A3XX_VBIF_FIXED_SORT_EN, 0x0001003F);
+	adreno_regwrite(device, A3XX_VBIF_FIXED_SORT_SEL0, 0x00000000);
+	adreno_regwrite(device, A3XX_VBIF_FIXED_SORT_SEL1, 0x00000000);
+
+	/* Make all blocks contribute to the GPU BUSY perf counter */
+	adreno_regwrite(device, A3XX_RBBM_GPU_BUSY_MASKED, 0xFFFFFFFF);
+
+	/* Enable the RBBM error reporting bits.  This lets us get
+	   useful information on failure */
+
+	adreno_regwrite(device, A3XX_RBBM_AHB_CTL0, 0x00000001);
+
+	/* Enable AHB error reporting */
+	adreno_regwrite(device, A3XX_RBBM_AHB_CTL1, 0xA6FFFFFF);
+
+	/* Turn on the power counters */
+	adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, 0x00003000);
+}
+
+struct adreno_gpudev adreno_a3xx_gpudev = {
+	.reg_rbbm_status = A3XX_RBBM_STATUS,
+	.reg_cp_pfp_ucode_addr = A3XX_CP_PFP_UCODE_ADDR,
+	.reg_cp_pfp_ucode_data = A3XX_CP_PFP_UCODE_DATA,
+
+	.ctxt_create = a3xx_drawctxt_create,
+	.ctxt_save = a3xx_drawctxt_save,
+	.ctxt_restore = a3xx_drawctxt_restore,
+	.rb_init = a3xx_rb_init,
+	.irq_control = a3xx_irq_control,
+	.irq_handler = a3xx_irq_handler,
+	.busy_cycles = a3xx_busy_cycles,
+	.start = a3xx_start,
+};
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 206a678..9bf85cf 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -19,6 +19,7 @@
 
 /* quad for copying GMEM to context shadow */
 #define QUAD_LEN 12
+#define QUAD_RESTORE_LEN 14
 
 static unsigned int gmem_copy_quad[QUAD_LEN] = {
 	0x00000000, 0x00000000, 0x3f800000,
@@ -27,6 +28,14 @@
 	0x00000000, 0x00000000, 0x3f800000
 };
 
+static unsigned int gmem_restore_quad[QUAD_RESTORE_LEN] = {
+	0x00000000, 0x3f800000, 0x3f800000,
+	0x00000000, 0x00000000, 0x00000000,
+	0x3f800000, 0x00000000, 0x00000000,
+	0x3f800000, 0x00000000, 0x00000000,
+	0x3f800000, 0x3f800000,
+};
+
 #define TEXCOORD_LEN 8
 
 static unsigned int gmem_copy_texcoord[TEXCOORD_LEN] = {
@@ -73,12 +82,12 @@
 	gmem_copy_quad[4] = uint2float(shadow->height);
 	gmem_copy_quad[9] = uint2float(shadow->width);
 
-	gmem_copy_quad[0] = 0;
-	gmem_copy_quad[6] = 0;
-	gmem_copy_quad[7] = 0;
-	gmem_copy_quad[10] = 0;
+	gmem_restore_quad[5] = uint2float(shadow->height);
+	gmem_restore_quad[7] = uint2float(shadow->width);
 
 	memcpy(shadow->quad_vertices.hostptr, gmem_copy_quad, QUAD_LEN << 2);
+	memcpy(shadow->quad_vertices_restore.hostptr, gmem_copy_quad,
+		QUAD_RESTORE_LEN << 2);
 
 	memcpy(shadow->quad_texcoords.hostptr, gmem_copy_texcoord,
 		TEXCOORD_LEN << 2);
@@ -103,6 +112,13 @@
 
 	cmd += QUAD_LEN;
 
+	/* Used by A3XX, but define for both to make the code easier */
+	shadow->quad_vertices_restore.hostptr = cmd;
+	shadow->quad_vertices_restore.gpuaddr =
+		virt2gpu(cmd, &drawctxt->gpustate);
+
+	cmd += QUAD_RESTORE_LEN;
+
 	/* tex coord buffer location (in GPU space) */
 	shadow->quad_texcoords.hostptr = cmd;
 	shadow->quad_texcoords.gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index 3047660..9a7ae3f 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -50,37 +50,56 @@
 struct gmem_shadow_t {
 	struct kgsl_memdesc gmemshadow;	/* Shadow buffer address */
 
-	/* 256 KB GMEM surface = 4 bytes-per-pixel x 256 pixels/row x
-	* 256 rows. */
-	/* width & height must be a multiples of 32, in case tiled textures
-	 * are used. */
-	enum COLORFORMATX format;
+	/*
+	 * 256 KB GMEM surface = 4 bytes-per-pixel x 256 pixels/row x
+	 * 256 rows. Width & height must be multiples of 32 in case tiled
+	 * textures are used
+	*/
+
+	enum COLORFORMATX format; /* Unused on A3XX */
 	unsigned int size;	/* Size of surface used to store GMEM */
 	unsigned int width;	/* Width of surface used to store GMEM */
 	unsigned int height;	/* Height of surface used to store GMEM */
 	unsigned int pitch;	/* Pitch of surface used to store GMEM */
 	unsigned int gmem_pitch;	/* Pitch value used for GMEM */
-	unsigned int *gmem_save_commands;
-	unsigned int *gmem_restore_commands;
+	unsigned int *gmem_save_commands;    /* Unused on A3XX */
+	unsigned int *gmem_restore_commands; /* Unused on A3XX */
 	unsigned int gmem_save[3];
 	unsigned int gmem_restore[3];
 	struct kgsl_memdesc quad_vertices;
 	struct kgsl_memdesc quad_texcoords;
+	struct kgsl_memdesc quad_vertices_restore;
 };
 
 struct adreno_context {
 	uint32_t flags;
 	struct kgsl_pagetable *pagetable;
 	struct kgsl_memdesc gpustate;
-	unsigned int reg_save[3];
 	unsigned int reg_restore[3];
 	unsigned int shader_save[3];
-	unsigned int shader_fixup[3];
 	unsigned int shader_restore[3];
-	unsigned int chicken_restore[3];
-	unsigned int bin_base_offset;
+
 	/* Information of the GMEM shadow that is created in context create */
 	struct gmem_shadow_t context_gmem_shadow;
+
+	/* A2XX specific items */
+	unsigned int reg_save[3];
+	unsigned int shader_fixup[3];
+	unsigned int chicken_restore[3];
+	unsigned int bin_base_offset;
+
+	/* A3XX specific items */
+	unsigned int regconstant_save[3];
+	unsigned int constant_restore[3];
+	unsigned int hlsqcontrol_restore[3];
+	unsigned int save_fixup[3];
+	unsigned int restore_fixup[3];
+	struct kgsl_memdesc shader_load_commands[2];
+	struct kgsl_memdesc shader_save_commands[4];
+	struct kgsl_memdesc constant_save_commands[3];
+	struct kgsl_memdesc constant_load_commands[3];
+	struct kgsl_memdesc cond_execs[4];
+	struct kgsl_memdesc hlsqcontrol_restore_commands[1];
 };
 
 int adreno_drawctxt_create(struct kgsl_device *device,
diff --git a/drivers/gpu/msm/adreno_pm4types.h b/drivers/gpu/msm/adreno_pm4types.h
index 8aea58c..1dffc32 100644
--- a/drivers/gpu/msm/adreno_pm4types.h
+++ b/drivers/gpu/msm/adreno_pm4types.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -157,6 +157,18 @@
 
 #define CP_SET_PROTECTED_MODE  0x5f /* sets the register protection mode */
 
+/*
+ * for a3xx
+ */
+
+#define CP_LOAD_STATE 0x30 /* load high level sequencer command */
+
+#define CP_LOADSTATE_DSTOFFSET_SHIFT 0x00000000
+#define CP_LOADSTATE_STATESRC_SHIFT 0x00000010
+#define CP_LOADSTATE_STATEBLOCKID_SHIFT 0x00000013
+#define CP_LOADSTATE_NUMOFUNITS_SHIFT 0x00000016
+#define CP_LOADSTATE_STATETYPE_SHIFT 0x00000000
+#define CP_LOADSTATE_EXTSRCADDR_SHIFT 0x00000002
 
 /* packet header building macros */
 #define cp_type0_packet(regindx, cnt) \
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index f1748d7..a5c20dc 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -24,28 +24,11 @@
 #include "adreno_ringbuffer.h"
 
 #include "a2xx_reg.h"
+#include "a3xx_reg.h"
 
 #define GSL_RB_NOP_SIZEDWORDS				2
-/* protected mode error checking below register address 0x800
-*  note: if CP_INTERRUPT packet is used then checking needs
-*  to change to below register address 0x7C8
-*/
-#define GSL_RB_PROTECTED_MODE_CONTROL		0x200001F2
 
-/* Firmware file names
- * Legacy names must remain but replacing macro names to
- * match current kgsl model.
- * a200 is yamato
- * a220 is leia
- */
-#define A200_PFP_FW "yamato_pfp.fw"
-#define A200_PM4_FW "yamato_pm4.fw"
-#define A220_PFP_470_FW "leia_pfp_470.fw"
-#define A220_PM4_470_FW "leia_pm4_470.fw"
-#define A225_PFP_FW "a225_pfp.fw"
-#define A225_PM4_FW "a225_pm4.fw"
-
-static void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
+void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
 {
 	BUG_ON(rb->wptr == 0);
 
@@ -104,8 +87,7 @@
 	} while ((freecmds != 0) && (freecmds <= numcmds));
 }
 
-
-static unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
+unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
 					     unsigned int numcmds)
 {
 	unsigned int	*ptr = NULL;
@@ -231,10 +213,11 @@
 	KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n",
 		adreno_dev->pfp_fw[0]);
 
-	adreno_regwrite(device, REG_CP_PFP_UCODE_ADDR, 0);
+	adreno_regwrite(device, adreno_dev->gpudev->reg_cp_pfp_ucode_addr, 0);
 	for (i = 1; i < adreno_dev->pfp_fw_size; i++)
-		adreno_regwrite(device, REG_CP_PFP_UCODE_DATA,
-				     adreno_dev->pfp_fw[i]);
+		adreno_regwrite(device,
+			adreno_dev->gpudev->reg_cp_pfp_ucode_data,
+			adreno_dev->pfp_fw[i]);
 err:
 	return ret;
 }
@@ -244,10 +227,9 @@
 	int status;
 	/*cp_rb_cntl_u cp_rb_cntl; */
 	union reg_cp_rb_cntl cp_rb_cntl;
-	unsigned int *cmds, rb_cntl;
+	unsigned int rb_cntl;
 	struct kgsl_device *device = rb->device;
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-	uint cmds_gpu;
 
 	if (rb->flags & KGSL_FLAGS_STARTED)
 		return 0;
@@ -263,12 +245,15 @@
 	kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
 			   (rb->sizedwords << 2));
 
-	adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
-			     (rb->memptrs_desc.gpuaddr
-			      + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
+	if (adreno_is_a2xx(adreno_dev)) {
+		adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
+			(rb->memptrs_desc.gpuaddr
+			+ GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
 
-	/* setup WPTR delay */
-	adreno_regwrite(device, REG_CP_RB_WPTR_DELAY, 0 /*0x70000010 */);
+		/* setup WPTR delay */
+		adreno_regwrite(device, REG_CP_RB_WPTR_DELAY,
+			0 /*0x70000010 */);
+	}
 
 	/*setup REG_CP_RB_CNTL */
 	adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
@@ -287,7 +272,11 @@
 	*/
 	cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);
 
-	cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN; /* WPTR polling */
+	if (adreno_is_a2xx(adreno_dev)) {
+		/* WPTR polling */
+		cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN;
+	}
+
 	/* mem RPTR writebacks */
 	cp_rb_cntl.f.rb_no_update =  GSL_RB_CNTL_NO_UPDATE;
 
@@ -299,8 +288,36 @@
 			     rb->memptrs_desc.gpuaddr +
 			     GSL_RB_MEMPTRS_RPTR_OFFSET);
 
-	/* explicitly clear all cp interrupts */
-	adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
+	if (adreno_is_a3xx(adreno_dev)) {
+		/* enable access protection to privileged registers */
+		adreno_regwrite(device, A3XX_CP_PROTECT_CTRL, 0x00000007);
+
+		/* RBBM registers */
+		adreno_regwrite(device, A3XX_CP_PROTECT_REG_0, 0x63000040);
+		adreno_regwrite(device, A3XX_CP_PROTECT_REG_1, 0x62000080);
+		adreno_regwrite(device, A3XX_CP_PROTECT_REG_2, 0x600000CC);
+		adreno_regwrite(device, A3XX_CP_PROTECT_REG_3, 0x60000108);
+		adreno_regwrite(device, A3XX_CP_PROTECT_REG_4, 0x64000140);
+		adreno_regwrite(device, A3XX_CP_PROTECT_REG_5, 0x66000400);
+
+		/* CP registers */
+		adreno_regwrite(device, A3XX_CP_PROTECT_REG_6, 0x65000700);
+		adreno_regwrite(device, A3XX_CP_PROTECT_REG_7, 0x610007D8);
+		adreno_regwrite(device, A3XX_CP_PROTECT_REG_8, 0x620007E0);
+		adreno_regwrite(device, A3XX_CP_PROTECT_REG_9, 0x61001178);
+		adreno_regwrite(device, A3XX_CP_PROTECT_REG_A, 0x64001180);
+
+		/* RB registers */
+		adreno_regwrite(device, A3XX_CP_PROTECT_REG_B, 0x60003300);
+
+		/* VBIF registers */
+		adreno_regwrite(device, A3XX_CP_PROTECT_REG_C, 0x6B00C000);
+	}
+
+	if (adreno_is_a2xx(adreno_dev)) {
+		/* explicitly clear all cp interrupts */
+		adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
+	}
 
 	/* setup scratch/timestamp */
 	adreno_regwrite(device, REG_SCRATCH_ADDR,
@@ -329,55 +346,8 @@
 	/* clear ME_HALT to start micro engine */
 	adreno_regwrite(device, REG_CP_ME_CNTL, 0);
 
-	/* ME_INIT */
-	cmds = adreno_ringbuffer_allocspace(rb, 19);
-	cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19);
-
-	GSL_RB_WRITE(cmds, cmds_gpu, CP_HDR_ME_INIT);
-	/* All fields present (bits 9:0) */
-	GSL_RB_WRITE(cmds, cmds_gpu, 0x000003ff);
-	/* Disable/Enable Real-Time Stream processing (present but ignored) */
-	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
-	/* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
-	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
-
-	GSL_RB_WRITE(cmds, cmds_gpu,
-		SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO));
-	GSL_RB_WRITE(cmds, cmds_gpu,
-		SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET));
-	GSL_RB_WRITE(cmds, cmds_gpu,
-		SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX));
-	GSL_RB_WRITE(cmds, cmds_gpu,
-		SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL));
-	GSL_RB_WRITE(cmds, cmds_gpu,
-		SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL));
-	GSL_RB_WRITE(cmds, cmds_gpu,
-		SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE));
-	GSL_RB_WRITE(cmds, cmds_gpu,
-		SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL));
-	GSL_RB_WRITE(cmds, cmds_gpu,
-		SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE));
-
-	/* Instruction memory size: */
-	GSL_RB_WRITE(cmds, cmds_gpu,
-		     (adreno_encode_istore_size(adreno_dev)
-		      | adreno_dev->pix_shader_start));
-	/* Maximum Contexts */
-	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000001);
-	/* Write Confirm Interval and The CP will wait the
-	* wait_interval * 16 clocks between polling  */
-	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
-
-	/* NQ and External Memory Swap */
-	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
-	/* Protected mode error checking */
-	GSL_RB_WRITE(cmds, cmds_gpu, GSL_RB_PROTECTED_MODE_CONTROL);
-	/* Disable header dumping and Header dump address */
-	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
-	/* Header dump size */
-	GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
-
-	adreno_ringbuffer_submit(rb);
+	/* ME init is GPU specific, so jump into the sub-function */
+	adreno_dev->gpudev->rb_init(adreno_dev, rb);
 
 	/* idle device to validate ME INIT */
 	status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
@@ -458,6 +428,7 @@
 				unsigned int flags, unsigned int *cmds,
 				int sizedwords)
 {
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
 	unsigned int *ringcmds;
 	unsigned int timestamp;
 	unsigned int total_sizedwords = sizedwords + 6;
@@ -471,6 +442,9 @@
 	total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0;
 	total_sizedwords += !(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD) ? 2 : 0;
 
+	if (adreno_is_a3xx(adreno_dev))
+		total_sizedwords += 7;
+
 	ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
 	rcmd_gpu = rb->buffer_desc.gpuaddr
 		+ sizeof(uint)*(rb->wptr-total_sizedwords);
@@ -504,6 +478,21 @@
 	/* start-of-pipeline and end-of-pipeline timestamps */
 	GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type0_packet(REG_CP_TIMESTAMP, 1));
 	GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
+
+	if (adreno_is_a3xx(adreno_dev)) {
+		/*
+		 * FLush HLSQ lazy updates to make sure there are no
+		 * rsources pending for indirect loads after the timestamp
+		 */
+
+		GSL_RB_WRITE(ringcmds, rcmd_gpu,
+			cp_type3_packet(CP_EVENT_WRITE, 1));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x07); /* HLSQ_FLUSH */
+		GSL_RB_WRITE(ringcmds, rcmd_gpu,
+			cp_type3_packet(CP_WAIT_FOR_IDLE, 1));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x00);
+	}
+
 	GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_EVENT_WRITE, 3));
 	GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
 	GSL_RB_WRITE(ringcmds, rcmd_gpu,
@@ -527,6 +516,15 @@
 		GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
 	}
 
+	if (adreno_is_a3xx(adreno_dev)) {
+		/* Dummy set-constant to trigger context rollover */
+		GSL_RB_WRITE(ringcmds, rcmd_gpu,
+			cp_type3_packet(CP_SET_CONSTANT, 2));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu,
+			(0x4<<16)|(A3XX_HLSQ_CL_KERNEL_GROUP_X_REG - 0x2000));
+		GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
+	}
+
 	adreno_ringbuffer_submit(rb);
 
 	/* return timestamp of issued coREG_ands */
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index a90b0cb..caedf26 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -105,6 +105,13 @@
 
 #define GSL_RB_CNTL_POLL_EN 0x0 /* disable */
 
+/*
+ * protected mode error checking below register address 0x800
+ * note: if CP_INTERRUPT packet is used then checking needs
+ * to change to below register address 0x7C8
+ */
+#define GSL_RB_PROTECTED_MODE_CONTROL		0x200001F2
+
 int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
 				struct kgsl_context *context,
 				struct kgsl_ibdesc *ibdesc,
@@ -126,6 +133,8 @@
 					unsigned int *cmdaddr,
 					int sizedwords);
 
+void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb);
+
 void kgsl_cp_intrcallback(struct kgsl_device *device);
 
 int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
@@ -136,6 +145,9 @@
 adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
 			int num_rb_contents);
 
+unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
+					     unsigned int numcmds);
+
 static inline int adreno_ringbuffer_count(struct adreno_ringbuffer *rb,
 	unsigned int rptr)
 {