gpu: msm: Add new Adreno driver
* Temporary place for this.
Change-Id: I83b5d75fbd201c352d011ed43f21ebe3576e058c
diff --git a/drivers/gpu/msm2/Kconfig b/drivers/gpu/msm2/Kconfig
new file mode 100644
index 0000000..ba63fbc
--- /dev/null
+++ b/drivers/gpu/msm2/Kconfig
@@ -0,0 +1,98 @@
+config MSM_KGSL
+ tristate "MSM 3D Graphics driver"
+ default n
+ depends on ARCH_MSM && !ARCH_MSM7X00A && !ARCH_MSM7X25
+ select GENERIC_ALLOCATOR
+ select FW_LOADER
+ ---help---
+ 3D graphics driver. Required to use hardware accelerated
+ OpenGL ES 2.0 and 1.1.
+
+config MSM_KGSL_CFF_DUMP
+ bool "Enable KGSL Common File Format (CFF) Dump Feature [Use with caution]"
+ default n
+ depends on MSM_KGSL
+ select RELAY
+ ---help---
+ This is an analysis and diagnostic feature only, and should only be
+ turned on during KGSL GPU diagnostics and will slow down the KGSL
+ performance sigificantly, hence *do not use in production builds*.
+ When enabled, CFF Dump is on at boot. It can be turned off at runtime
+ via 'echo 0 > /d/kgsl/cff_dump'. The log can be captured via
+ /d/kgsl-cff/cpu[0|1].
+
+config MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP
+ bool "When selected will disable KGSL CFF Dump for context switches"
+ default n
+ depends on MSM_KGSL_CFF_DUMP
+ ---help---
+ Dumping all the memory for every context switch can produce quite
+ huge log files, to reduce this, turn this feature on.
+
+config MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL
+ bool "Disable human readable CP_STAT fields in post-mortem dump"
+ default n
+ depends on MSM_KGSL
+ ---help---
+ For a more compact kernel log the human readable output of
+ CP_STAT can be turned off with this option.
+
+config MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP
+ bool "Disable dumping current IB1 and IB2 in post-mortem dump"
+ default n
+ depends on MSM_KGSL
+ ---help---
+ For a more compact kernel log the IB1 and IB2 embedded dump
+ can be turned off with this option. Some IB dumps take up
+ so much space that vital other information gets cut from the
+ post-mortem dump.
+
+config MSM_KGSL_PSTMRTMDMP_RB_HEX
+ bool "Use hex version for ring-buffer in post-mortem dump"
+ default n
+ depends on MSM_KGSL
+ ---help---
+ Use hex version for the ring-buffer in the post-mortem dump, instead
+ of the human readable version.
+
+config MSM_KGSL_2D
+ tristate "MSM 2D graphics driver. Required for OpenVG"
+ default y
+ depends on MSM_KGSL && !ARCH_MSM7X27 && !ARCH_MSM7X27A && !(ARCH_QSD8X50 && !MSM_SOC_REV_A)
+
+config MSM_KGSL_DRM
+ bool "Build a DRM interface for the MSM_KGSL driver"
+ depends on MSM_KGSL && DRM
+
+config KGSL_PER_PROCESS_PAGE_TABLE
+ bool "Enable Per Process page tables for the KGSL driver"
+ default n
+ depends on !MSM_KGSL_DRM
+ ---help---
+ The MMU will use per process pagetables when enabled.
+
+config MSM_KGSL_PAGE_TABLE_SIZE
+ hex "Size of pagetables"
+ default 0xFFF0000
+ ---help---
+ Sets the pagetable size used by the MMU. The max value
+ is 0xFFF0000 or (256M - 64K).
+
+config MSM_KGSL_PAGE_TABLE_COUNT
+ int "Minimum of concurrent pagetables to support"
+ default 8
+ depends on KGSL_PER_PROCESS_PAGE_TABLE
+ ---help---
+ Specify the number of pagetables to allocate at init time
+ This is the number of concurrent processes that are guaranteed to
+ to run at any time. Additional processes can be created dynamically
+ assuming there is enough contiguous memory to allocate the pagetable.
+
+config MSM_KGSL_MMU_PAGE_FAULT
+ bool "Force the GPU MMU to page fault for unmapped regions"
+ default y
+
+config MSM_KGSL_DISABLE_SHADOW_WRITES
+ bool "Disable register shadow writes for context switches"
+ default n
+ depends on MSM_KGSL
diff --git a/drivers/gpu/msm2/Makefile b/drivers/gpu/msm2/Makefile
new file mode 100644
index 0000000..895235f
--- /dev/null
+++ b/drivers/gpu/msm2/Makefile
@@ -0,0 +1,53 @@
+ccflags-y := -Iinclude/uapi/drm -Iinclude/drm -Idrivers/gpu/msm
+
+msm_kgsl_core-y = \
+ kgsl.o \
+ kgsl_trace.o \
+ kgsl_sharedmem.o \
+ kgsl_pwrctrl.o \
+ kgsl_pwrscale.o \
+ kgsl_mmu.o \
+ kgsl_gpummu.o \
+ kgsl_iommu.o \
+ kgsl_snapshot.o \
+ kgsl_events.o
+
+msm_kgsl_core-$(CONFIG_DEBUG_FS) += kgsl_debugfs.o
+msm_kgsl_core-$(CONFIG_MSM_KGSL_CFF_DUMP) += kgsl_cffdump.o
+msm_kgsl_core-$(CONFIG_MSM_KGSL_DRM) += kgsl_drm.o
+msm_kgsl_core-$(CONFIG_MSM_SCM) += kgsl_pwrscale_trustzone.o
+msm_kgsl_core-$(CONFIG_MSM_SLEEP_STATS_DEVICE) += kgsl_pwrscale_idlestats.o
+msm_kgsl_core-$(CONFIG_SYNC) += kgsl_sync.o
+
+msm_adreno-y += \
+ adreno_ringbuffer.o \
+ adreno_drawctxt.o \
+ adreno_dispatch.o \
+ adreno_postmortem.o \
+ adreno_snapshot.o \
+ adreno_trace.o \
+ adreno_coresight.o \
+ adreno_a2xx.o \
+ adreno_a2xx_trace.o \
+ adreno_a2xx_snapshot.o \
+ adreno_a3xx.o \
+ adreno_a4xx.o \
+ adreno_a3xx_trace.o \
+ adreno_a3xx_snapshot.o \
+ adreno.o \
+ adreno_cp_parser.o
+
+msm_adreno-$(CONFIG_DEBUG_FS) += adreno_debugfs.o
+
+msm_z180-y += \
+ z180.o \
+ z180_postmortem.o \
+ z180_trace.o
+
+msm_kgsl_core-objs = $(msm_kgsl_core-y)
+msm_adreno-objs = $(msm_adreno-y)
+msm_z180-objs = $(msm_z180-y)
+
+obj-$(CONFIG_MSM_KGSL) += msm_kgsl_core.o
+obj-$(CONFIG_MSM_KGSL) += msm_adreno.o
+obj-$(CONFIG_MSM_KGSL_2D) += msm_z180.o
diff --git a/drivers/gpu/msm2/a2xx_reg.h b/drivers/gpu/msm2/a2xx_reg.h
new file mode 100644
index 0000000..c70c4eb
--- /dev/null
+++ b/drivers/gpu/msm2/a2xx_reg.h
@@ -0,0 +1,438 @@
+/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __A200_REG_H
+#define __A200_REG_H
+
+enum VGT_EVENT_TYPE {
+ VS_DEALLOC = 0,
+ PS_DEALLOC = 1,
+ VS_DONE_TS = 2,
+ PS_DONE_TS = 3,
+ CACHE_FLUSH_TS = 4,
+ CONTEXT_DONE = 5,
+ CACHE_FLUSH = 6,
+ VIZQUERY_START = 7,
+ VIZQUERY_END = 8,
+ SC_WAIT_WC = 9,
+ RST_PIX_CNT = 13,
+ RST_VTX_CNT = 14,
+ TILE_FLUSH = 15,
+ CACHE_FLUSH_AND_INV_TS_EVENT = 20,
+ ZPASS_DONE = 21,
+ CACHE_FLUSH_AND_INV_EVENT = 22,
+ PERFCOUNTER_START = 23,
+ PERFCOUNTER_STOP = 24,
+ VS_FETCH_DONE = 27,
+ FACENESS_FLUSH = 28,
+};
+
+enum COLORFORMATX {
+ COLORX_4_4_4_4 = 0,
+ COLORX_1_5_5_5 = 1,
+ COLORX_5_6_5 = 2,
+ COLORX_8 = 3,
+ COLORX_8_8 = 4,
+ COLORX_8_8_8_8 = 5,
+ COLORX_S8_8_8_8 = 6,
+ COLORX_16_FLOAT = 7,
+ COLORX_16_16_FLOAT = 8,
+ COLORX_16_16_16_16_FLOAT = 9,
+ COLORX_32_FLOAT = 10,
+ COLORX_32_32_FLOAT = 11,
+ COLORX_32_32_32_32_FLOAT = 12,
+ COLORX_2_3_3 = 13,
+ COLORX_8_8_8 = 14,
+};
+
+enum SURFACEFORMAT {
+ FMT_1_REVERSE = 0,
+ FMT_1 = 1,
+ FMT_8 = 2,
+ FMT_1_5_5_5 = 3,
+ FMT_5_6_5 = 4,
+ FMT_6_5_5 = 5,
+ FMT_8_8_8_8 = 6,
+ FMT_2_10_10_10 = 7,
+ FMT_8_A = 8,
+ FMT_8_B = 9,
+ FMT_8_8 = 10,
+ FMT_Cr_Y1_Cb_Y0 = 11,
+ FMT_Y1_Cr_Y0_Cb = 12,
+ FMT_5_5_5_1 = 13,
+ FMT_8_8_8_8_A = 14,
+ FMT_4_4_4_4 = 15,
+ FMT_10_11_11 = 16,
+ FMT_11_11_10 = 17,
+ FMT_DXT1 = 18,
+ FMT_DXT2_3 = 19,
+ FMT_DXT4_5 = 20,
+ FMT_24_8 = 22,
+ FMT_24_8_FLOAT = 23,
+ FMT_16 = 24,
+ FMT_16_16 = 25,
+ FMT_16_16_16_16 = 26,
+ FMT_16_EXPAND = 27,
+ FMT_16_16_EXPAND = 28,
+ FMT_16_16_16_16_EXPAND = 29,
+ FMT_16_FLOAT = 30,
+ FMT_16_16_FLOAT = 31,
+ FMT_16_16_16_16_FLOAT = 32,
+ FMT_32 = 33,
+ FMT_32_32 = 34,
+ FMT_32_32_32_32 = 35,
+ FMT_32_FLOAT = 36,
+ FMT_32_32_FLOAT = 37,
+ FMT_32_32_32_32_FLOAT = 38,
+ FMT_32_AS_8 = 39,
+ FMT_32_AS_8_8 = 40,
+ FMT_16_MPEG = 41,
+ FMT_16_16_MPEG = 42,
+ FMT_8_INTERLACED = 43,
+ FMT_32_AS_8_INTERLACED = 44,
+ FMT_32_AS_8_8_INTERLACED = 45,
+ FMT_16_INTERLACED = 46,
+ FMT_16_MPEG_INTERLACED = 47,
+ FMT_16_16_MPEG_INTERLACED = 48,
+ FMT_DXN = 49,
+ FMT_8_8_8_8_AS_16_16_16_16 = 50,
+ FMT_DXT1_AS_16_16_16_16 = 51,
+ FMT_DXT2_3_AS_16_16_16_16 = 52,
+ FMT_DXT4_5_AS_16_16_16_16 = 53,
+ FMT_2_10_10_10_AS_16_16_16_16 = 54,
+ FMT_10_11_11_AS_16_16_16_16 = 55,
+ FMT_11_11_10_AS_16_16_16_16 = 56,
+ FMT_32_32_32_FLOAT = 57,
+ FMT_DXT3A = 58,
+ FMT_DXT5A = 59,
+ FMT_CTX1 = 60,
+ FMT_DXT3A_AS_1_1_1_1 = 61
+};
+
+#define REG_PERF_MODE_CNT 0x0
+#define REG_PERF_STATE_RESET 0x0
+#define REG_PERF_STATE_ENABLE 0x1
+#define REG_PERF_STATE_FREEZE 0x2
+
+#define RB_EDRAM_INFO_EDRAM_SIZE_SIZE 4
+#define RB_EDRAM_INFO_EDRAM_MAPPING_MODE_SIZE 2
+#define RB_EDRAM_INFO_UNUSED0_SIZE 8
+#define RB_EDRAM_INFO_EDRAM_RANGE_SIZE 18
+
+struct rb_edram_info_t {
+ unsigned int edram_size:RB_EDRAM_INFO_EDRAM_SIZE_SIZE;
+ unsigned int edram_mapping_mode:RB_EDRAM_INFO_EDRAM_MAPPING_MODE_SIZE;
+ unsigned int unused0:RB_EDRAM_INFO_UNUSED0_SIZE;
+ unsigned int edram_range:RB_EDRAM_INFO_EDRAM_RANGE_SIZE;
+};
+
+union reg_rb_edram_info {
+ unsigned int val;
+ struct rb_edram_info_t f;
+};
+
+#define RBBM_READ_ERROR_ADDRESS_MASK 0x0001fffc
+#define RBBM_READ_ERROR_REQUESTER (1<<30)
+#define RBBM_READ_ERROR_ERROR (1<<31)
+
+#define CP_RB_CNTL_RB_BUFSZ_SIZE 6
+#define CP_RB_CNTL_UNUSED0_SIZE 2
+#define CP_RB_CNTL_RB_BLKSZ_SIZE 6
+#define CP_RB_CNTL_UNUSED1_SIZE 2
+#define CP_RB_CNTL_BUF_SWAP_SIZE 2
+#define CP_RB_CNTL_UNUSED2_SIZE 2
+#define CP_RB_CNTL_RB_POLL_EN_SIZE 1
+#define CP_RB_CNTL_UNUSED3_SIZE 6
+#define CP_RB_CNTL_RB_NO_UPDATE_SIZE 1
+#define CP_RB_CNTL_UNUSED4_SIZE 3
+#define CP_RB_CNTL_RB_RPTR_WR_ENA_SIZE 1
+
+struct cp_rb_cntl_t {
+ unsigned int rb_bufsz:CP_RB_CNTL_RB_BUFSZ_SIZE;
+ unsigned int unused0:CP_RB_CNTL_UNUSED0_SIZE;
+ unsigned int rb_blksz:CP_RB_CNTL_RB_BLKSZ_SIZE;
+ unsigned int unused1:CP_RB_CNTL_UNUSED1_SIZE;
+ unsigned int buf_swap:CP_RB_CNTL_BUF_SWAP_SIZE;
+ unsigned int unused2:CP_RB_CNTL_UNUSED2_SIZE;
+ unsigned int rb_poll_en:CP_RB_CNTL_RB_POLL_EN_SIZE;
+ unsigned int unused3:CP_RB_CNTL_UNUSED3_SIZE;
+ unsigned int rb_no_update:CP_RB_CNTL_RB_NO_UPDATE_SIZE;
+ unsigned int unused4:CP_RB_CNTL_UNUSED4_SIZE;
+ unsigned int rb_rptr_wr_ena:CP_RB_CNTL_RB_RPTR_WR_ENA_SIZE;
+};
+
+union reg_cp_rb_cntl {
+ unsigned int val:32;
+ struct cp_rb_cntl_t f;
+};
+
+#define RB_COLOR_INFO__COLOR_FORMAT_MASK 0x0000000fL
+#define RB_COPY_DEST_INFO__COPY_DEST_FORMAT__SHIFT 0x00000004
+
+
+#define SQ_INT_CNTL__PS_WATCHDOG_MASK 0x00000001L
+#define SQ_INT_CNTL__VS_WATCHDOG_MASK 0x00000002L
+
+#define RBBM_INT_CNTL__RDERR_INT_MASK 0x00000001L
+#define RBBM_INT_CNTL__DISPLAY_UPDATE_INT_MASK 0x00000002L
+#define RBBM_INT_CNTL__GUI_IDLE_INT_MASK 0x00080000L
+
+#define RBBM_STATUS__CMDFIFO_AVAIL_MASK 0x0000001fL
+#define RBBM_STATUS__TC_BUSY_MASK 0x00000020L
+#define RBBM_STATUS__HIRQ_PENDING_MASK 0x00000100L
+#define RBBM_STATUS__CPRQ_PENDING_MASK 0x00000200L
+#define RBBM_STATUS__CFRQ_PENDING_MASK 0x00000400L
+#define RBBM_STATUS__PFRQ_PENDING_MASK 0x00000800L
+#define RBBM_STATUS__VGT_BUSY_NO_DMA_MASK 0x00001000L
+#define RBBM_STATUS__RBBM_WU_BUSY_MASK 0x00004000L
+#define RBBM_STATUS__CP_NRT_BUSY_MASK 0x00010000L
+#define RBBM_STATUS__MH_BUSY_MASK 0x00040000L
+#define RBBM_STATUS__MH_COHERENCY_BUSY_MASK 0x00080000L
+#define RBBM_STATUS__SX_BUSY_MASK 0x00200000L
+#define RBBM_STATUS__TPC_BUSY_MASK 0x00400000L
+#define RBBM_STATUS__SC_CNTX_BUSY_MASK 0x01000000L
+#define RBBM_STATUS__PA_BUSY_MASK 0x02000000L
+#define RBBM_STATUS__VGT_BUSY_MASK 0x04000000L
+#define RBBM_STATUS__SQ_CNTX17_BUSY_MASK 0x08000000L
+#define RBBM_STATUS__SQ_CNTX0_BUSY_MASK 0x10000000L
+#define RBBM_STATUS__RB_CNTX_BUSY_MASK 0x40000000L
+#define RBBM_STATUS__GUI_ACTIVE_MASK 0x80000000L
+
+#define CP_INT_CNTL__SW_INT_MASK 0x00080000L
+#define CP_INT_CNTL__T0_PACKET_IN_IB_MASK 0x00800000L
+#define CP_INT_CNTL__OPCODE_ERROR_MASK 0x01000000L
+#define CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK 0x02000000L
+#define CP_INT_CNTL__RESERVED_BIT_ERROR_MASK 0x04000000L
+#define CP_INT_CNTL__IB_ERROR_MASK 0x08000000L
+#define CP_INT_CNTL__IB2_INT_MASK 0x20000000L
+#define CP_INT_CNTL__IB1_INT_MASK 0x40000000L
+#define CP_INT_CNTL__RB_INT_MASK 0x80000000L
+
+#define MASTER_INT_SIGNAL__MH_INT_STAT 0x00000020L
+#define MASTER_INT_SIGNAL__SQ_INT_STAT 0x04000000L
+#define MASTER_INT_SIGNAL__CP_INT_STAT 0x40000000L
+#define MASTER_INT_SIGNAL__RBBM_INT_STAT 0x80000000L
+
+#define RB_EDRAM_INFO__EDRAM_SIZE_MASK 0x0000000fL
+#define RB_EDRAM_INFO__EDRAM_RANGE_MASK 0xffffc000L
+
+#define MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT 0x00000006
+#define MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT 0x00000007
+#define MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT 0x00000008
+#define MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT 0x00000009
+#define MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT 0x0000000a
+#define MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT 0x0000000d
+#define MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT 0x0000000e
+#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT 0x0000000f
+#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT 0x00000010
+#define MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT 0x00000016
+#define MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT 0x00000017
+#define MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT 0x00000018
+#define MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT 0x00000019
+#define MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT 0x0000001a
+
+#define CP_RB_CNTL__RB_BUFSZ__SHIFT 0x00000000
+#define CP_RB_CNTL__RB_BLKSZ__SHIFT 0x00000008
+#define CP_RB_CNTL__RB_POLL_EN__SHIFT 0x00000014
+#define CP_RB_CNTL__RB_NO_UPDATE__SHIFT 0x0000001b
+
+#define RB_COLOR_INFO__COLOR_FORMAT__SHIFT 0x00000000
+#define RB_EDRAM_INFO__EDRAM_MAPPING_MODE__SHIFT 0x00000004
+#define RB_EDRAM_INFO__EDRAM_RANGE__SHIFT 0x0000000e
+
+#define REG_CP_CSQ_IB1_STAT 0x01FE
+#define REG_CP_CSQ_IB2_STAT 0x01FF
+#define REG_CP_CSQ_RB_STAT 0x01FD
+#define REG_CP_DEBUG 0x01FC
+#define REG_CP_IB1_BASE 0x0458
+#define REG_CP_IB1_BUFSZ 0x0459
+#define REG_CP_IB2_BASE 0x045A
+#define REG_CP_IB2_BUFSZ 0x045B
+#define REG_CP_INT_ACK 0x01F4
+#define REG_CP_INT_CNTL 0x01F2
+#define REG_CP_INT_STATUS 0x01F3
+#define REG_CP_ME_CNTL 0x01F6
+#define REG_CP_ME_RAM_DATA 0x01FA
+#define REG_CP_ME_RAM_WADDR 0x01F8
+#define REG_CP_ME_RAM_RADDR 0x01F9
+#define REG_CP_ME_STATUS 0x01F7
+#define REG_CP_PFP_UCODE_ADDR 0x00C0
+#define REG_CP_PFP_UCODE_DATA 0x00C1
+#define REG_CP_QUEUE_THRESHOLDS 0x01D5
+#define REG_CP_RB_BASE 0x01C0
+#define REG_CP_RB_CNTL 0x01C1
+#define REG_CP_RB_RPTR 0x01C4
+#define REG_CP_RB_RPTR_ADDR 0x01C3
+#define REG_CP_RB_RPTR_WR 0x01C7
+#define REG_CP_RB_WPTR 0x01C5
+#define REG_CP_RB_WPTR_BASE 0x01C8
+#define REG_CP_RB_WPTR_DELAY 0x01C6
+#define REG_CP_STAT 0x047F
+#define REG_CP_STATE_DEBUG_DATA 0x01ED
+#define REG_CP_STATE_DEBUG_INDEX 0x01EC
+#define REG_CP_ST_BASE 0x044D
+#define REG_CP_ST_BUFSZ 0x044E
+
+#define REG_CP_PERFMON_CNTL 0x0444
+#define REG_CP_PERFCOUNTER_SELECT 0x0445
+#define REG_CP_PERFCOUNTER_LO 0x0446
+#define REG_CP_PERFCOUNTER_HI 0x0447
+
+#define REG_RBBM_PERFCOUNTER1_SELECT 0x0395
+#define REG_RBBM_PERFCOUNTER1_HI 0x0398
+#define REG_RBBM_PERFCOUNTER1_LO 0x0397
+
+#define REG_MASTER_INT_SIGNAL 0x03B7
+
+#define REG_PA_CL_VPORT_XSCALE 0x210F
+#define REG_PA_CL_VPORT_ZOFFSET 0x2114
+#define REG_PA_CL_VPORT_ZSCALE 0x2113
+#define REG_PA_CL_CLIP_CNTL 0x2204
+#define REG_PA_CL_VTE_CNTL 0x2206
+#define REG_PA_SC_AA_MASK 0x2312
+#define REG_PA_SC_LINE_CNTL 0x2300
+#define REG_PA_SC_SCREEN_SCISSOR_BR 0x200F
+#define REG_PA_SC_SCREEN_SCISSOR_TL 0x200E
+#define REG_PA_SC_VIZ_QUERY 0x2293
+#define REG_PA_SC_VIZ_QUERY_STATUS 0x0C44
+#define REG_PA_SC_WINDOW_OFFSET 0x2080
+#define REG_PA_SC_WINDOW_SCISSOR_BR 0x2082
+#define REG_PA_SC_WINDOW_SCISSOR_TL 0x2081
+#define REG_PA_SU_FACE_DATA 0x0C86
+#define REG_PA_SU_POINT_SIZE 0x2280
+#define REG_PA_SU_LINE_CNTL 0x2282
+#define REG_PA_SU_POLY_OFFSET_BACK_OFFSET 0x2383
+#define REG_PA_SU_POLY_OFFSET_FRONT_SCALE 0x2380
+#define REG_PA_SU_SC_MODE_CNTL 0x2205
+
+#define REG_PC_INDEX_OFFSET 0x2102
+
+#define REG_RBBM_CNTL 0x003B
+#define REG_RBBM_INT_ACK 0x03B6
+#define REG_RBBM_INT_CNTL 0x03B4
+#define REG_RBBM_INT_STATUS 0x03B5
+#define REG_RBBM_PATCH_RELEASE 0x0001
+#define REG_RBBM_PERIPHID1 0x03F9
+#define REG_RBBM_PERIPHID2 0x03FA
+#define REG_RBBM_DEBUG 0x039B
+#define REG_RBBM_DEBUG_OUT 0x03A0
+#define REG_RBBM_DEBUG_CNTL 0x03A1
+#define REG_RBBM_PM_OVERRIDE1 0x039C
+#define REG_RBBM_PM_OVERRIDE2 0x039D
+#define REG_RBBM_READ_ERROR 0x03B3
+#define REG_RBBM_SOFT_RESET 0x003C
+#define REG_RBBM_STATUS 0x05D0
+
+#define REG_RB_COLORCONTROL 0x2202
+#define REG_RB_COLOR_DEST_MASK 0x2326
+#define REG_RB_COLOR_MASK 0x2104
+#define REG_RB_COPY_CONTROL 0x2318
+#define REG_RB_DEPTHCONTROL 0x2200
+#define REG_RB_EDRAM_INFO 0x0F02
+#define REG_RB_MODECONTROL 0x2208
+#define REG_RB_SURFACE_INFO 0x2000
+#define REG_RB_SAMPLE_POS 0x220a
+
+#define REG_SCRATCH_ADDR 0x01DD
+#define REG_SCRATCH_REG0 0x0578
+#define REG_SCRATCH_REG2 0x057A
+#define REG_SCRATCH_UMSK 0x01DC
+
+#define REG_SQ_CF_BOOLEANS 0x4900
+#define REG_SQ_CF_LOOP 0x4908
+#define REG_SQ_GPR_MANAGEMENT 0x0D00
+#define REG_SQ_FLOW_CONTROL 0x0D01
+#define REG_SQ_INST_STORE_MANAGMENT 0x0D02
+#define REG_SQ_INT_ACK 0x0D36
+#define REG_SQ_INT_CNTL 0x0D34
+#define REG_SQ_INT_STATUS 0x0D35
+#define REG_SQ_PROGRAM_CNTL 0x2180
+#define REG_SQ_PS_PROGRAM 0x21F6
+#define REG_SQ_VS_PROGRAM 0x21F7
+#define REG_SQ_WRAPPING_0 0x2183
+#define REG_SQ_WRAPPING_1 0x2184
+
+#define REG_VGT_ENHANCE 0x2294
+#define REG_VGT_INDX_OFFSET 0x2102
+#define REG_VGT_MAX_VTX_INDX 0x2100
+#define REG_VGT_MIN_VTX_INDX 0x2101
+
+#define REG_TP0_CHICKEN 0x0E1E
+#define REG_TC_CNTL_STATUS 0x0E00
+#define REG_PA_SC_AA_CONFIG 0x2301
+#define REG_VGT_VERTEX_REUSE_BLOCK_CNTL 0x2316
+#define REG_SQ_INTERPOLATOR_CNTL 0x2182
+#define REG_RB_DEPTH_INFO 0x2002
+#define REG_COHER_DEST_BASE_0 0x2006
+#define REG_RB_FOG_COLOR 0x2109
+#define REG_RB_STENCILREFMASK_BF 0x210C
+#define REG_PA_SC_LINE_STIPPLE 0x2283
+#define REG_SQ_PS_CONST 0x2308
+#define REG_RB_DEPTH_CLEAR 0x231D
+#define REG_RB_SAMPLE_COUNT_CTL 0x2324
+#define REG_SQ_CONSTANT_0 0x4000
+#define REG_SQ_FETCH_0 0x4800
+
+#define REG_COHER_BASE_PM4 0xA2A
+#define REG_COHER_STATUS_PM4 0xA2B
+#define REG_COHER_SIZE_PM4 0xA29
+
+/*registers added in adreno220*/
+#define REG_A220_PC_INDX_OFFSET REG_VGT_INDX_OFFSET
+#define REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL REG_VGT_VERTEX_REUSE_BLOCK_CNTL
+#define REG_A220_PC_MAX_VTX_INDX REG_VGT_MAX_VTX_INDX
+#define REG_A220_RB_LRZ_VSC_CONTROL 0x2209
+#define REG_A220_GRAS_CONTROL 0x2210
+#define REG_A220_VSC_BIN_SIZE 0x0C01
+#define REG_A220_VSC_PIPE_DATA_LENGTH_7 0x0C1D
+
+/*registers added in adreno225*/
+#define REG_A225_RB_COLOR_INFO3 0x2005
+#define REG_A225_PC_MULTI_PRIM_IB_RESET_INDX 0x2103
+#define REG_A225_GRAS_UCP0X 0x2340
+#define REG_A225_GRAS_UCP5W 0x2357
+#define REG_A225_GRAS_UCP_ENABLED 0x2360
+
+/* Debug registers used by snapshot */
+#define REG_PA_SU_DEBUG_CNTL 0x0C80
+#define REG_PA_SU_DEBUG_DATA 0x0C81
+#define REG_RB_DEBUG_CNTL 0x0F26
+#define REG_RB_DEBUG_DATA 0x0F27
+#define REG_PC_DEBUG_CNTL 0x0C38
+#define REG_PC_DEBUG_DATA 0x0C39
+#define REG_GRAS_DEBUG_CNTL 0x0C80
+#define REG_GRAS_DEBUG_DATA 0x0C81
+#define REG_SQ_DEBUG_MISC 0x0D05
+#define REG_SQ_DEBUG_INPUT_FSM 0x0DAE
+#define REG_SQ_DEBUG_CONST_MGR_FSM 0x0DAF
+#define REG_SQ_DEBUG_EXP_ALLOC 0x0DB3
+#define REG_SQ_DEBUG_FSM_ALU_0 0x0DB1
+#define REG_SQ_DEBUG_FSM_ALU_1 0x0DB2
+#define REG_SQ_DEBUG_PTR_BUFF 0x0DB4
+#define REG_SQ_DEBUG_GPR_VTX 0x0DB5
+#define REG_SQ_DEBUG_GPR_PIX 0x0DB6
+#define REG_SQ_DEBUG_TB_STATUS_SEL 0x0DB7
+#define REG_SQ_DEBUG_VTX_TB_0 0x0DB8
+#define REG_SQ_DEBUG_VTX_TB_1 0x0DB9
+#define REG_SQ_DEBUG_VTX_TB_STATE_MEM 0x0DBB
+#define REG_SQ_DEBUG_TP_FSM 0x0DB0
+#define REG_SQ_DEBUG_VTX_TB_STATUS_REG 0x0DBA
+#define REG_SQ_DEBUG_PIX_TB_0 0x0DBC
+#define REG_SQ_DEBUG_PIX_TB_STATUS_REG_0 0x0DBD
+#define REG_SQ_DEBUG_PIX_TB_STATUS_REG_1 0x0DBE
+#define REG_SQ_DEBUG_PIX_TB_STATUS_REG_2 0x0DBF
+#define REG_SQ_DEBUG_PIX_TB_STATUS_REG_3 0x0DC0
+#define REG_SQ_DEBUG_PIX_TB_STATE_MEM 0x0DC1
+#define REG_SQ_DEBUG_MISC_0 0x2309
+#define REG_SQ_DEBUG_MISC_1 0x230A
+
+#endif /* __A200_REG_H */
diff --git a/drivers/gpu/msm2/a3xx_reg.h b/drivers/gpu/msm2/a3xx_reg.h
new file mode 100644
index 0000000..676f46d
--- /dev/null
+++ b/drivers/gpu/msm2/a3xx_reg.h
@@ -0,0 +1,794 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _A300_REG_H
+#define _A300_REG_H
+
+/* Interrupt bit positions within RBBM_INT_0 */
+
+#define A3XX_INT_RBBM_GPU_IDLE 0
+#define A3XX_INT_RBBM_AHB_ERROR 1
+#define A3XX_INT_RBBM_REG_TIMEOUT 2
+#define A3XX_INT_RBBM_ME_MS_TIMEOUT 3
+#define A3XX_INT_RBBM_PFP_MS_TIMEOUT 4
+#define A3XX_INT_RBBM_ATB_BUS_OVERFLOW 5
+#define A3XX_INT_VFD_ERROR 6
+#define A3XX_INT_CP_SW_INT 7
+#define A3XX_INT_CP_T0_PACKET_IN_IB 8
+#define A3XX_INT_CP_OPCODE_ERROR 9
+#define A3XX_INT_CP_RESERVED_BIT_ERROR 10
+#define A3XX_INT_CP_HW_FAULT 11
+#define A3XX_INT_CP_DMA 12
+#define A3XX_INT_CP_IB2_INT 13
+#define A3XX_INT_CP_IB1_INT 14
+#define A3XX_INT_CP_RB_INT 15
+#define A3XX_INT_CP_REG_PROTECT_FAULT 16
+#define A3XX_INT_CP_RB_DONE_TS 17
+#define A3XX_INT_CP_VS_DONE_TS 18
+#define A3XX_INT_CP_PS_DONE_TS 19
+#define A3XX_INT_CACHE_FLUSH_TS 20
+#define A3XX_INT_CP_AHB_ERROR_HALT 21
+#define A3XX_INT_MISC_HANG_DETECT 24
+#define A3XX_INT_UCHE_OOB_ACCESS 25
+
+/* Register definitions */
+
+#define A3XX_RBBM_HW_VERSION 0x000
+#define A3XX_RBBM_HW_RELEASE 0x001
+#define A3XX_RBBM_HW_CONFIGURATION 0x002
+#define A3XX_RBBM_CLOCK_CTL 0x010
+#define A3XX_RBBM_SP_HYST_CNT 0x012
+#define A3XX_RBBM_SW_RESET_CMD 0x018
+#define A3XX_RBBM_AHB_CTL0 0x020
+#define A3XX_RBBM_AHB_CTL1 0x021
+#define A3XX_RBBM_AHB_CMD 0x022
+#define A3XX_RBBM_AHB_ERROR_STATUS 0x027
+#define A3XX_RBBM_GPR0_CTL 0x02E
+/* This the same register as on A2XX, just in a different place */
+#define A3XX_RBBM_STATUS 0x030
+#define A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x33
+#define A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x50
+#define A3XX_RBBM_INTERFACE_HANG_MASK_CTL0 0x51
+#define A3XX_RBBM_INTERFACE_HANG_MASK_CTL1 0x54
+#define A3XX_RBBM_INTERFACE_HANG_MASK_CTL2 0x57
+#define A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x5A
+#define A3XX_RBBM_INT_CLEAR_CMD 0x061
+#define A3XX_RBBM_INT_0_MASK 0x063
+#define A3XX_RBBM_INT_0_STATUS 0x064
+#define A3XX_RBBM_PERFCTR_CTL 0x80
+#define A3XX_RBBM_PERFCTR_LOAD_CMD0 0x81
+#define A3XX_RBBM_PERFCTR_LOAD_CMD1 0x82
+#define A3XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x84
+#define A3XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x85
+#define A3XX_RBBM_PERFCOUNTER0_SELECT 0x86
+#define A3XX_RBBM_PERFCOUNTER1_SELECT 0x87
+#define A3XX_RBBM_GPU_BUSY_MASKED 0x88
+#define A3XX_RBBM_PERFCTR_CP_0_LO 0x90
+#define A3XX_RBBM_PERFCTR_CP_0_HI 0x91
+#define A3XX_RBBM_PERFCTR_RBBM_0_LO 0x92
+#define A3XX_RBBM_PERFCTR_RBBM_0_HI 0x93
+#define A3XX_RBBM_PERFCTR_RBBM_1_LO 0x94
+#define A3XX_RBBM_PERFCTR_RBBM_1_HI 0x95
+#define A3XX_RBBM_PERFCTR_PC_0_LO 0x96
+#define A3XX_RBBM_PERFCTR_PC_0_HI 0x97
+#define A3XX_RBBM_PERFCTR_PC_1_LO 0x98
+#define A3XX_RBBM_PERFCTR_PC_1_HI 0x99
+#define A3XX_RBBM_PERFCTR_PC_2_LO 0x9A
+#define A3XX_RBBM_PERFCTR_PC_2_HI 0x9B
+#define A3XX_RBBM_PERFCTR_PC_3_LO 0x9C
+#define A3XX_RBBM_PERFCTR_PC_3_HI 0x9D
+#define A3XX_RBBM_PERFCTR_VFD_0_LO 0x9E
+#define A3XX_RBBM_PERFCTR_VFD_0_HI 0x9F
+#define A3XX_RBBM_PERFCTR_VFD_1_LO 0xA0
+#define A3XX_RBBM_PERFCTR_VFD_1_HI 0xA1
+#define A3XX_RBBM_PERFCTR_HLSQ_0_LO 0xA2
+#define A3XX_RBBM_PERFCTR_HLSQ_0_HI 0xA3
+#define A3XX_RBBM_PERFCTR_HLSQ_1_LO 0xA4
+#define A3XX_RBBM_PERFCTR_HLSQ_1_HI 0xA5
+#define A3XX_RBBM_PERFCTR_HLSQ_2_LO 0xA6
+#define A3XX_RBBM_PERFCTR_HLSQ_2_HI 0xA7
+#define A3XX_RBBM_PERFCTR_HLSQ_3_LO 0xA8
+#define A3XX_RBBM_PERFCTR_HLSQ_3_HI 0xA9
+#define A3XX_RBBM_PERFCTR_HLSQ_4_LO 0xAA
+#define A3XX_RBBM_PERFCTR_HLSQ_4_HI 0xAB
+#define A3XX_RBBM_PERFCTR_HLSQ_5_LO 0xAC
+#define A3XX_RBBM_PERFCTR_HLSQ_5_HI 0xAD
+#define A3XX_RBBM_PERFCTR_VPC_0_LO 0xAE
+#define A3XX_RBBM_PERFCTR_VPC_0_HI 0xAF
+#define A3XX_RBBM_PERFCTR_VPC_1_LO 0xB0
+#define A3XX_RBBM_PERFCTR_VPC_1_HI 0xB1
+#define A3XX_RBBM_PERFCTR_TSE_0_LO 0xB2
+#define A3XX_RBBM_PERFCTR_TSE_0_HI 0xB3
+#define A3XX_RBBM_PERFCTR_TSE_1_LO 0xB4
+#define A3XX_RBBM_PERFCTR_TSE_1_HI 0xB5
+#define A3XX_RBBM_PERFCTR_RAS_0_LO 0xB6
+#define A3XX_RBBM_PERFCTR_RAS_0_HI 0xB7
+#define A3XX_RBBM_PERFCTR_RAS_1_LO 0xB8
+#define A3XX_RBBM_PERFCTR_RAS_1_HI 0xB9
+#define A3XX_RBBM_PERFCTR_UCHE_0_LO 0xBA
+#define A3XX_RBBM_PERFCTR_UCHE_0_HI 0xBB
+#define A3XX_RBBM_PERFCTR_UCHE_1_LO 0xBC
+#define A3XX_RBBM_PERFCTR_UCHE_1_HI 0xBD
+#define A3XX_RBBM_PERFCTR_UCHE_2_LO 0xBE
+#define A3XX_RBBM_PERFCTR_UCHE_2_HI 0xBF
+#define A3XX_RBBM_PERFCTR_UCHE_3_LO 0xC0
+#define A3XX_RBBM_PERFCTR_UCHE_3_HI 0xC1
+#define A3XX_RBBM_PERFCTR_UCHE_4_LO 0xC2
+#define A3XX_RBBM_PERFCTR_UCHE_4_HI 0xC3
+#define A3XX_RBBM_PERFCTR_UCHE_5_LO 0xC4
+#define A3XX_RBBM_PERFCTR_UCHE_5_HI 0xC5
+#define A3XX_RBBM_PERFCTR_TP_0_LO 0xC6
+#define A3XX_RBBM_PERFCTR_TP_0_HI 0xC7
+#define A3XX_RBBM_PERFCTR_TP_1_LO 0xC8
+#define A3XX_RBBM_PERFCTR_TP_1_HI 0xC9
+#define A3XX_RBBM_PERFCTR_TP_2_LO 0xCA
+#define A3XX_RBBM_PERFCTR_TP_2_HI 0xCB
+#define A3XX_RBBM_PERFCTR_TP_3_LO 0xCC
+#define A3XX_RBBM_PERFCTR_TP_3_HI 0xCD
+#define A3XX_RBBM_PERFCTR_TP_4_LO 0xCE
+#define A3XX_RBBM_PERFCTR_TP_4_HI 0xCF
+#define A3XX_RBBM_PERFCTR_TP_5_LO 0xD0
+#define A3XX_RBBM_PERFCTR_TP_5_HI 0xD1
+#define A3XX_RBBM_PERFCTR_SP_0_LO 0xD2
+#define A3XX_RBBM_PERFCTR_SP_0_HI 0xD3
+#define A3XX_RBBM_PERFCTR_SP_1_LO 0xD4
+#define A3XX_RBBM_PERFCTR_SP_1_HI 0xD5
+#define A3XX_RBBM_PERFCTR_SP_2_LO 0xD6
+#define A3XX_RBBM_PERFCTR_SP_2_HI 0xD7
+#define A3XX_RBBM_PERFCTR_SP_3_LO 0xD8
+#define A3XX_RBBM_PERFCTR_SP_3_HI 0xD9
+#define A3XX_RBBM_PERFCTR_SP_4_LO 0xDA
+#define A3XX_RBBM_PERFCTR_SP_4_HI 0xDB
+#define A3XX_RBBM_PERFCTR_SP_5_LO 0xDC
+#define A3XX_RBBM_PERFCTR_SP_5_HI 0xDD
+#define A3XX_RBBM_PERFCTR_SP_6_LO 0xDE
+#define A3XX_RBBM_PERFCTR_SP_6_HI 0xDF
+#define A3XX_RBBM_PERFCTR_SP_7_LO 0xE0
+#define A3XX_RBBM_PERFCTR_SP_7_HI 0xE1
+#define A3XX_RBBM_PERFCTR_RB_0_LO 0xE2
+#define A3XX_RBBM_PERFCTR_RB_0_HI 0xE3
+#define A3XX_RBBM_PERFCTR_RB_1_LO 0xE4
+#define A3XX_RBBM_PERFCTR_RB_1_HI 0xE5
+
+#define A3XX_RBBM_RBBM_CTL 0x100
+#define A3XX_RBBM_PERFCTR_PWR_0_LO 0x0EA
+#define A3XX_RBBM_PERFCTR_PWR_0_HI 0x0EB
+#define A3XX_RBBM_PERFCTR_PWR_1_LO 0x0EC
+#define A3XX_RBBM_PERFCTR_PWR_1_HI 0x0ED
+#define A3XX_RBBM_DEBUG_BUS_CTL 0x111
+#define A3XX_RBBM_DEBUG_BUS_DATA_STATUS 0x112
+#define A3XX_RBBM_DEBUG_BUS_STB_CTL0 0x11B
+#define A3XX_RBBM_DEBUG_BUS_STB_CTL1 0x11C
+#define A3XX_RBBM_INT_TRACE_BUS_CTL 0x11D
+#define A3XX_RBBM_EXT_TRACE_BUS_CTL 0x11E
+#define A3XX_RBBM_EXT_TRACE_STOP_CNT 0x11F
+#define A3XX_RBBM_EXT_TRACE_START_CNT 0x120
+#define A3XX_RBBM_EXT_TRACE_PERIOD_CNT 0x121
+#define A3XX_RBBM_EXT_TRACE_CMD 0x122
+
+/* Following two are same as on A2XX, just in a different place */
+#define A3XX_CP_PFP_UCODE_ADDR 0x1C9
+#define A3XX_CP_PFP_UCODE_DATA 0x1CA
+#define A3XX_CP_ROQ_ADDR 0x1CC
+#define A3XX_CP_ROQ_DATA 0x1CD
+#define A3XX_CP_MERCIU_ADDR 0x1D1
+#define A3XX_CP_MERCIU_DATA 0x1D2
+#define A3XX_CP_MERCIU_DATA2 0x1D3
+#define A3XX_CP_MEQ_ADDR 0x1DA
+#define A3XX_CP_MEQ_DATA 0x1DB
+#define A3XX_CP_PERFCOUNTER_SELECT 0x445
+#define A3XX_CP_WFI_PEND_CTR 0x01F5
+#define A3XX_CP_HW_FAULT 0x45C
+#define A3XX_CP_AHB_FAULT 0x54D
+#define A3XX_CP_PROTECT_CTRL 0x45E
+#define A3XX_CP_PROTECT_STATUS 0x45F
+#define A3XX_CP_PROTECT_REG_0 0x460
+#define A3XX_CP_PROTECT_REG_1 0x461
+#define A3XX_CP_PROTECT_REG_2 0x462
+#define A3XX_CP_PROTECT_REG_3 0x463
+#define A3XX_CP_PROTECT_REG_4 0x464
+#define A3XX_CP_PROTECT_REG_5 0x465
+#define A3XX_CP_PROTECT_REG_6 0x466
+#define A3XX_CP_PROTECT_REG_7 0x467
+#define A3XX_CP_PROTECT_REG_8 0x468
+#define A3XX_CP_PROTECT_REG_9 0x469
+#define A3XX_CP_PROTECT_REG_A 0x46A
+#define A3XX_CP_PROTECT_REG_B 0x46B
+#define A3XX_CP_PROTECT_REG_C 0x46C
+#define A3XX_CP_PROTECT_REG_D 0x46D
+#define A3XX_CP_PROTECT_REG_E 0x46E
+#define A3XX_CP_PROTECT_REG_F 0x46F
+#define A3XX_CP_SCRATCH_REG2 0x57A
+#define A3XX_CP_SCRATCH_REG3 0x57B
+#define A3XX_VSC_BIN_SIZE 0xC01
+#define A3XX_VSC_SIZE_ADDRESS 0xC02
+#define A3XX_VSC_PIPE_CONFIG_0 0xC06
+#define A3XX_VSC_PIPE_DATA_ADDRESS_0 0xC07
+#define A3XX_VSC_PIPE_DATA_LENGTH_0 0xC08
+#define A3XX_VSC_PIPE_CONFIG_1 0xC09
+#define A3XX_VSC_PIPE_DATA_ADDRESS_1 0xC0A
+#define A3XX_VSC_PIPE_DATA_LENGTH_1 0xC0B
+#define A3XX_VSC_PIPE_CONFIG_2 0xC0C
+#define A3XX_VSC_PIPE_DATA_ADDRESS_2 0xC0D
+#define A3XX_VSC_PIPE_DATA_LENGTH_2 0xC0E
+#define A3XX_VSC_PIPE_CONFIG_3 0xC0F
+#define A3XX_VSC_PIPE_DATA_ADDRESS_3 0xC10
+#define A3XX_VSC_PIPE_DATA_LENGTH_3 0xC11
+#define A3XX_VSC_PIPE_CONFIG_4 0xC12
+#define A3XX_VSC_PIPE_DATA_ADDRESS_4 0xC13
+#define A3XX_VSC_PIPE_DATA_LENGTH_4 0xC14
+#define A3XX_VSC_PIPE_CONFIG_5 0xC15
+#define A3XX_VSC_PIPE_DATA_ADDRESS_5 0xC16
+#define A3XX_VSC_PIPE_DATA_LENGTH_5 0xC17
+#define A3XX_VSC_PIPE_CONFIG_6 0xC18
+#define A3XX_VSC_PIPE_DATA_ADDRESS_6 0xC19
+#define A3XX_VSC_PIPE_DATA_LENGTH_6 0xC1A
+#define A3XX_VSC_PIPE_CONFIG_7 0xC1B
+#define A3XX_VSC_PIPE_DATA_ADDRESS_7 0xC1C
+#define A3XX_VSC_PIPE_DATA_LENGTH_7 0xC1D
+#define A3XX_PC_PERFCOUNTER0_SELECT 0xC48
+#define A3XX_PC_PERFCOUNTER1_SELECT 0xC49
+#define A3XX_PC_PERFCOUNTER2_SELECT 0xC4A
+#define A3XX_PC_PERFCOUNTER3_SELECT 0xC4B
+#define A3XX_GRAS_TSE_DEBUG_ECO 0xC81
+#define A3XX_GRAS_PERFCOUNTER0_SELECT 0xC88
+#define A3XX_GRAS_PERFCOUNTER1_SELECT 0xC89
+#define A3XX_GRAS_PERFCOUNTER2_SELECT 0xC8A
+#define A3XX_GRAS_PERFCOUNTER3_SELECT 0xC8B
+#define A3XX_GRAS_CL_USER_PLANE_X0 0xCA0
+#define A3XX_GRAS_CL_USER_PLANE_Y0 0xCA1
+#define A3XX_GRAS_CL_USER_PLANE_Z0 0xCA2
+#define A3XX_GRAS_CL_USER_PLANE_W0 0xCA3
+#define A3XX_GRAS_CL_USER_PLANE_X1 0xCA4
+#define A3XX_GRAS_CL_USER_PLANE_Y1 0xCA5
+#define A3XX_GRAS_CL_USER_PLANE_Z1 0xCA6
+#define A3XX_GRAS_CL_USER_PLANE_W1 0xCA7
+#define A3XX_GRAS_CL_USER_PLANE_X2 0xCA8
+#define A3XX_GRAS_CL_USER_PLANE_Y2 0xCA9
+#define A3XX_GRAS_CL_USER_PLANE_Z2 0xCAA
+#define A3XX_GRAS_CL_USER_PLANE_W2 0xCAB
+#define A3XX_GRAS_CL_USER_PLANE_X3 0xCAC
+#define A3XX_GRAS_CL_USER_PLANE_Y3 0xCAD
+#define A3XX_GRAS_CL_USER_PLANE_Z3 0xCAE
+#define A3XX_GRAS_CL_USER_PLANE_W3 0xCAF
+#define A3XX_GRAS_CL_USER_PLANE_X4 0xCB0
+#define A3XX_GRAS_CL_USER_PLANE_Y4 0xCB1
+#define A3XX_GRAS_CL_USER_PLANE_Z4 0xCB2
+#define A3XX_GRAS_CL_USER_PLANE_W4 0xCB3
+#define A3XX_GRAS_CL_USER_PLANE_X5 0xCB4
+#define A3XX_GRAS_CL_USER_PLANE_Y5 0xCB5
+#define A3XX_GRAS_CL_USER_PLANE_Z5 0xCB6
+#define A3XX_GRAS_CL_USER_PLANE_W5 0xCB7
+#define A3XX_RB_GMEM_BASE_ADDR 0xCC0
+#define A3XX_RB_DEBUG_ECO_CONTROLS_ADDR 0xCC1
+#define A3XX_RB_PERFCOUNTER0_SELECT 0xCC6
+#define A3XX_RB_PERFCOUNTER1_SELECT 0xCC7
+#define A3XX_RB_FRAME_BUFFER_DIMENSION 0xCE0
+#define A3XX_HLSQ_PERFCOUNTER0_SELECT 0xE00
+#define A3XX_HLSQ_PERFCOUNTER1_SELECT 0xE01
+#define A3XX_HLSQ_PERFCOUNTER2_SELECT 0xE02
+#define A3XX_HLSQ_PERFCOUNTER3_SELECT 0xE03
+#define A3XX_HLSQ_PERFCOUNTER4_SELECT 0xE04
+#define A3XX_HLSQ_PERFCOUNTER5_SELECT 0xE05
+#define A3XX_VFD_PERFCOUNTER0_SELECT 0xE44
+#define A3XX_VFD_PERFCOUNTER1_SELECT 0xE45
+#define A3XX_VPC_VPC_DEBUG_RAM_SEL 0xE61
+#define A3XX_VPC_VPC_DEBUG_RAM_READ 0xE62
+#define A3XX_VPC_PERFCOUNTER0_SELECT 0xE64
+#define A3XX_VPC_PERFCOUNTER1_SELECT 0xE65
+#define A3XX_UCHE_CACHE_MODE_CONTROL_REG 0xE82
+#define A3XX_UCHE_PERFCOUNTER0_SELECT 0xE84
+#define A3XX_UCHE_PERFCOUNTER1_SELECT 0xE85
+#define A3XX_UCHE_PERFCOUNTER2_SELECT 0xE86
+#define A3XX_UCHE_PERFCOUNTER3_SELECT 0xE87
+#define A3XX_UCHE_PERFCOUNTER4_SELECT 0xE88
+#define A3XX_UCHE_PERFCOUNTER5_SELECT 0xE89
+#define A3XX_UCHE_CACHE_INVALIDATE0_REG 0xEA0
+#define A3XX_SP_PERFCOUNTER0_SELECT 0xEC4
+#define A3XX_SP_PERFCOUNTER1_SELECT 0xEC5
+#define A3XX_SP_PERFCOUNTER2_SELECT 0xEC6
+#define A3XX_SP_PERFCOUNTER3_SELECT 0xEC7
+#define A3XX_SP_PERFCOUNTER4_SELECT 0xEC8
+#define A3XX_SP_PERFCOUNTER5_SELECT 0xEC9
+#define A3XX_SP_PERFCOUNTER6_SELECT 0xECA
+#define A3XX_SP_PERFCOUNTER7_SELECT 0xECB
+#define A3XX_TP_PERFCOUNTER0_SELECT 0xF04
+#define A3XX_TP_PERFCOUNTER1_SELECT 0xF05
+#define A3XX_TP_PERFCOUNTER2_SELECT 0xF06
+#define A3XX_TP_PERFCOUNTER3_SELECT 0xF07
+#define A3XX_TP_PERFCOUNTER4_SELECT 0xF08
+#define A3XX_TP_PERFCOUNTER5_SELECT 0xF09
+#define A3XX_GRAS_CL_CLIP_CNTL 0x2040
+#define A3XX_GRAS_CL_GB_CLIP_ADJ 0x2044
+#define A3XX_GRAS_CL_VPORT_XOFFSET 0x2048
+#define A3XX_GRAS_CL_VPORT_XSCALE 0x2049
+#define A3XX_GRAS_CL_VPORT_YOFFSET 0x204A
+#define A3XX_GRAS_CL_VPORT_YSCALE 0x204B
+#define A3XX_GRAS_CL_VPORT_ZOFFSET 0x204C
+#define A3XX_GRAS_CL_VPORT_ZSCALE 0x204D
+#define A3XX_GRAS_SU_POINT_MINMAX 0x2068
+#define A3XX_GRAS_SU_POINT_SIZE 0x2069
+#define A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x206C
+#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x206D
+#define A3XX_GRAS_SU_MODE_CONTROL 0x2070
+#define A3XX_GRAS_SC_CONTROL 0x2072
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL 0x2074
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR 0x2075
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL 0x2079
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR 0x207A
+#define A3XX_RB_MODE_CONTROL 0x20C0
+#define A3XX_RB_RENDER_CONTROL 0x20C1
+#define A3XX_RB_MSAA_CONTROL 0x20C2
+#define A3XX_RB_ALPHA_REFERENCE 0x20C3
+#define A3XX_RB_MRT_CONTROL0 0x20C4
+#define A3XX_RB_MRT_BUF_INFO0 0x20C5
+#define A3XX_RB_MRT_BUF_BASE0 0x20C6
+#define A3XX_RB_MRT_BLEND_CONTROL0 0x20C7
+#define A3XX_RB_MRT_CONTROL1 0x20C8
+#define A3XX_RB_MRT_BUF_INFO1 0x20C9
+#define A3XX_RB_MRT_BUF_BASE1 0x20CA
+#define A3XX_RB_MRT_BLEND_CONTROL1 0x20CB
+#define A3XX_RB_MRT_CONTROL2 0x20CC
+#define A3XX_RB_MRT_BUF_INFO2 0x20CD
+#define A3XX_RB_MRT_BUF_BASE2 0x20CE
+#define A3XX_RB_MRT_BLEND_CONTROL2 0x20CF
+#define A3XX_RB_MRT_CONTROL3 0x20D0
+#define A3XX_RB_MRT_BUF_INFO3 0x20D1
+#define A3XX_RB_MRT_BUF_BASE3 0x20D2
+#define A3XX_RB_MRT_BLEND_CONTROL3 0x20D3
+#define A3XX_RB_BLEND_RED 0x20E4
+#define A3XX_RB_BLEND_GREEN 0x20E5
+#define A3XX_RB_BLEND_BLUE 0x20E6
+#define A3XX_RB_BLEND_ALPHA 0x20E7
+#define A3XX_RB_CLEAR_COLOR_DW0 0x20E8
+#define A3XX_RB_CLEAR_COLOR_DW1 0x20E9
+#define A3XX_RB_CLEAR_COLOR_DW2 0x20EA
+#define A3XX_RB_CLEAR_COLOR_DW3 0x20EB
+#define A3XX_RB_COPY_CONTROL 0x20EC
+#define A3XX_RB_COPY_DEST_BASE 0x20ED
+#define A3XX_RB_COPY_DEST_PITCH 0x20EE
+#define A3XX_RB_COPY_DEST_INFO 0x20EF
+#define A3XX_RB_DEPTH_CONTROL 0x2100
+#define A3XX_RB_DEPTH_CLEAR 0x2101
+#define A3XX_RB_DEPTH_BUF_INFO 0x2102
+#define A3XX_RB_DEPTH_BUF_PITCH 0x2103
+#define A3XX_RB_STENCIL_CONTROL 0x2104
+#define A3XX_RB_STENCIL_CLEAR 0x2105
+#define A3XX_RB_STENCIL_BUF_INFO 0x2106
+#define A3XX_RB_STENCIL_BUF_PITCH 0x2107
+#define A3XX_RB_STENCIL_REF_MASK 0x2108
+#define A3XX_RB_STENCIL_REF_MASK_BF 0x2109
+#define A3XX_RB_LRZ_VSC_CONTROL 0x210C
+#define A3XX_RB_WINDOW_OFFSET 0x210E
+#define A3XX_RB_SAMPLE_COUNT_CONTROL 0x2110
+#define A3XX_RB_SAMPLE_COUNT_ADDR 0x2111
+#define A3XX_RB_Z_CLAMP_MIN 0x2114
+#define A3XX_RB_Z_CLAMP_MAX 0x2115
+#define A3XX_PC_VSTREAM_CONTROL 0x21E4
+#define A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x21EA
+#define A3XX_PC_PRIM_VTX_CNTL 0x21EC
+#define A3XX_PC_RESTART_INDEX 0x21ED
+#define A3XX_HLSQ_CONTROL_0_REG 0x2200
+#define A3XX_HLSQ_CONTROL_1_REG 0x2201
+#define A3XX_HLSQ_CONTROL_2_REG 0x2202
+#define A3XX_HLSQ_CONTROL_3_REG 0x2203
+#define A3XX_HLSQ_VS_CONTROL_REG 0x2204
+#define A3XX_HLSQ_FS_CONTROL_REG 0x2205
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG 0x2206
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x2207
+#define A3XX_HLSQ_CL_NDRANGE_0_REG 0x220A
+#define A3XX_HLSQ_CL_NDRANGE_1_REG 0x220B
+#define A3XX_HLSQ_CL_NDRANGE_2_REG 0x220C
+#define A3XX_HLSQ_CL_NDRANGE_3_REG 0x220D
+#define A3XX_HLSQ_CL_NDRANGE_4_REG 0x220E
+#define A3XX_HLSQ_CL_NDRANGE_5_REG 0x220F
+#define A3XX_HLSQ_CL_NDRANGE_6_REG 0x2210
+#define A3XX_HLSQ_CL_CONTROL_0_REG 0x2211
+#define A3XX_HLSQ_CL_CONTROL_1_REG 0x2212
+#define A3XX_HLSQ_CL_KERNEL_CONST_REG 0x2214
+#define A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x2215
+#define A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG 0x2216
+#define A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x2217
+#define A3XX_HLSQ_CL_WG_OFFSET_REG 0x221A
+#define A3XX_VFD_CONTROL_0 0x2240
+#define A3XX_VFD_INDEX_MIN 0x2242
+#define A3XX_VFD_INDEX_MAX 0x2243
+#define A3XX_VFD_FETCH_INSTR_0_0 0x2246
+#define A3XX_VFD_FETCH_INSTR_0_4 0x224E
+#define A3XX_VFD_FETCH_INSTR_1_F 0x2265
+#define A3XX_VFD_DECODE_INSTR_0 0x2266
+#define A3XX_VFD_VS_THREADING_THRESHOLD 0x227E
+#define A3XX_VPC_ATTR 0x2280
+#define A3XX_VPC_VARY_CYLWRAP_ENABLE_1 0x228B
+#define A3XX_SP_SP_CTRL_REG 0x22C0
+#define A3XX_SP_VS_CTRL_REG0 0x22C4
+#define A3XX_SP_VS_CTRL_REG1 0x22C5
+#define A3XX_SP_VS_PARAM_REG 0x22C6
+#define A3XX_SP_VS_OUT_REG_0 0x22C7
+#define A3XX_SP_VS_OUT_REG_1 0x22C8
+#define A3XX_SP_VS_OUT_REG_2 0x22C9
+#define A3XX_SP_VS_OUT_REG_3 0x22CA
+#define A3XX_SP_VS_OUT_REG_4 0x22CB
+#define A3XX_SP_VS_OUT_REG_5 0x22CC
+#define A3XX_SP_VS_OUT_REG_6 0x22CD
+#define A3XX_SP_VS_OUT_REG_7 0x22CE
+#define A3XX_SP_VS_VPC_DST_REG_0 0x22D0
+#define A3XX_SP_VS_VPC_DST_REG_1 0x22D1
+#define A3XX_SP_VS_VPC_DST_REG_2 0x22D2
+#define A3XX_SP_VS_VPC_DST_REG_3 0x22D3
+#define A3XX_SP_VS_OBJ_OFFSET_REG 0x22D4
+#define A3XX_SP_VS_OBJ_START_REG 0x22D5
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG 0x22D6
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG 0x22D7
+#define A3XX_SP_VS_PVT_MEM_SIZE_REG 0x22D8
+#define A3XX_SP_VS_LENGTH_REG 0x22DF
+#define A3XX_SP_FS_CTRL_REG0 0x22E0
+#define A3XX_SP_FS_CTRL_REG1 0x22E1
+#define A3XX_SP_FS_OBJ_OFFSET_REG 0x22E2
+#define A3XX_SP_FS_OBJ_START_REG 0x22E3
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG 0x22E4
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG 0x22E5
+#define A3XX_SP_FS_PVT_MEM_SIZE_REG 0x22E6
+#define A3XX_SP_FS_FLAT_SHAD_MODE_REG_0 0x22E8
+#define A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x22E9
+#define A3XX_SP_FS_OUTPUT_REG 0x22EC
+#define A3XX_SP_FS_MRT_REG_0 0x22F0
+#define A3XX_SP_FS_MRT_REG_1 0x22F1
+#define A3XX_SP_FS_MRT_REG_2 0x22F2
+#define A3XX_SP_FS_MRT_REG_3 0x22F3
+#define A3XX_SP_FS_IMAGE_OUTPUT_REG_0 0x22F4
+#define A3XX_SP_FS_IMAGE_OUTPUT_REG_1 0x22F5
+#define A3XX_SP_FS_IMAGE_OUTPUT_REG_2 0x22F6
+#define A3XX_SP_FS_IMAGE_OUTPUT_REG_3 0x22F7
+#define A3XX_SP_FS_LENGTH_REG 0x22FF
+#define A3XX_TPL1_TP_VS_TEX_OFFSET 0x2340
+#define A3XX_TPL1_TP_FS_TEX_OFFSET 0x2342
+#define A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x2343
+#define A3XX_VBIF_CLKON 0x3001
+#define A3XX_VBIF_FIXED_SORT_EN 0x300C
+#define A3XX_VBIF_FIXED_SORT_SEL0 0x300D
+#define A3XX_VBIF_FIXED_SORT_SEL1 0x300E
+#define A3XX_VBIF_ABIT_SORT 0x301C
+#define A3XX_VBIF_ABIT_SORT_CONF 0x301D
+#define A3XX_VBIF_GATE_OFF_WRREQ_EN 0x302A
+#define A3XX_VBIF_IN_RD_LIM_CONF0 0x302C
+#define A3XX_VBIF_IN_RD_LIM_CONF1 0x302D
+#define A3XX_VBIF_IN_WR_LIM_CONF0 0x3030
+#define A3XX_VBIF_IN_WR_LIM_CONF1 0x3031
+#define A3XX_VBIF_OUT_RD_LIM_CONF0 0x3034
+#define A3XX_VBIF_OUT_WR_LIM_CONF0 0x3035
+#define A3XX_VBIF_DDR_OUT_MAX_BURST 0x3036
+#define A3XX_VBIF_ARB_CTL 0x303C
+#define A3XX_VBIF_ROUND_ROBIN_QOS_ARB 0x3049
+#define A3XX_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x3058
+#define A3XX_VBIF_OUT_AXI_AOOO_EN 0x305E
+#define A3XX_VBIF_OUT_AXI_AOOO 0x305F
+#define A3XX_VBIF_PERF_CNT_EN 0x3070
+#define A3XX_VBIF_PERF_CNT_CLR 0x3071
+#define A3XX_VBIF_PERF_CNT_SEL 0x3072
+#define A3XX_VBIF_PERF_CNT0_LO 0x3073
+#define A3XX_VBIF_PERF_CNT0_HI 0x3074
+#define A3XX_VBIF_PERF_CNT1_LO 0x3075
+#define A3XX_VBIF_PERF_CNT1_HI 0x3076
+#define A3XX_VBIF_PERF_PWR_CNT0_LO 0x3077
+#define A3XX_VBIF_PERF_PWR_CNT0_HI 0x3078
+#define A3XX_VBIF_PERF_PWR_CNT1_LO 0x3079
+#define A3XX_VBIF_PERF_PWR_CNT1_HI 0x307a
+#define A3XX_VBIF_PERF_PWR_CNT2_LO 0x307b
+#define A3XX_VBIF_PERF_PWR_CNT2_HI 0x307c
+
+/* Bit flags for RBBM_CTL */
+#define RBBM_RBBM_CTL_RESET_PWR_CTR0 BIT(0)
+#define RBBM_RBBM_CTL_RESET_PWR_CTR1 BIT(1)
+#define RBBM_RBBM_CTL_ENABLE_PWR_CTR0 BIT(16)
+#define RBBM_RBBM_CTL_ENABLE_PWR_CTR1 BIT(17)
+
+/* Bit flag for RBMM_PERFCTR_CTL */
+#define RBBM_PERFCTR_CTL_ENABLE BIT(0)
+
+/* Various flags used by the context switch code */
+
+#define SP_MULTI 0
+#define SP_BUFFER_MODE 1
+#define SP_TWO_VTX_QUADS 0
+#define SP_PIXEL_BASED 0
+#define SP_R8G8B8A8_UNORM 8
+#define SP_FOUR_PIX_QUADS 1
+
+#define HLSQ_DIRECT 0
+#define HLSQ_BLOCK_ID_SP_VS 4
+#define HLSQ_SP_VS_INSTR 0
+#define HLSQ_SP_FS_INSTR 0
+#define HLSQ_BLOCK_ID_SP_FS 6
+#define HLSQ_TWO_PIX_QUADS 0
+#define HLSQ_TWO_VTX_QUADS 0
+#define HLSQ_BLOCK_ID_TP_TEX 2
+#define HLSQ_TP_TEX_SAMPLERS 0
+#define HLSQ_TP_TEX_MEMOBJ 1
+#define HLSQ_BLOCK_ID_TP_MIPMAP 3
+#define HLSQ_TP_MIPMAP_BASE 1
+#define HLSQ_FOUR_PIX_QUADS 1
+
+#define RB_FACTOR_ONE 1
+#define RB_BLEND_OP_ADD 0
+#define RB_FACTOR_ZERO 0
+#define RB_DITHER_DISABLE 0
+#define RB_DITHER_ALWAYS 1
+#define RB_FRAG_NEVER 0
+#define RB_ENDIAN_NONE 0
+#define RB_R8G8B8A8_UNORM 8
+#define RB_RESOLVE_PASS 2
+#define RB_CLEAR_MODE_RESOLVE 1
+#define RB_TILINGMODE_LINEAR 0
+#define RB_REF_NEVER 0
+#define RB_FRAG_LESS 1
+#define RB_REF_ALWAYS 7
+#define RB_STENCIL_KEEP 0
+#define RB_RENDERING_PASS 0
+#define RB_TILINGMODE_32X32 2
+
+#define PC_DRAW_TRIANGLES 2
+#define PC_DI_PT_RECTLIST 8
+#define PC_DI_SRC_SEL_AUTO_INDEX 2
+#define PC_DI_INDEX_SIZE_16_BIT 0
+#define PC_DI_IGNORE_VISIBILITY 0
+#define PC_DI_PT_TRILIST 4
+#define PC_DI_SRC_SEL_IMMEDIATE 1
+#define PC_DI_INDEX_SIZE_32_BIT 1
+
+#define UCHE_ENTIRE_CACHE 1
+#define UCHE_OP_INVALIDATE 1
+
+/*
+ * The following are bit field shifts within some of the registers defined
+ * above. These are used in the context switch code in conjunction with the
+ * _SET macro
+ */
+
+#define GRAS_CL_CLIP_CNTL_CLIP_DISABLE 16
+#define GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 12
+#define GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 21
+#define GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 19
+#define GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 20
+#define GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 17
+#define GRAS_CL_VPORT_XSCALE_VPORT_XSCALE 0
+#define GRAS_CL_VPORT_YSCALE_VPORT_YSCALE 0
+#define GRAS_CL_VPORT_ZSCALE_VPORT_ZSCALE 0
+#define GRAS_SC_CONTROL_RASTER_MODE 12
+#define GRAS_SC_CONTROL_RENDER_MODE 4
+#define GRAS_SC_SCREEN_SCISSOR_BR_BR_X 0
+#define GRAS_SC_SCREEN_SCISSOR_BR_BR_Y 16
+#define GRAS_SC_WINDOW_SCISSOR_BR_BR_X 0
+#define GRAS_SC_WINDOW_SCISSOR_BR_BR_Y 16
+#define GRAS_SU_CTRLMODE_LINEHALFWIDTH 03
+#define HLSQ_CONSTFSPRESERVEDRANGEREG_ENDENTRY 16
+#define HLSQ_CONSTFSPRESERVEDRANGEREG_STARTENTRY 0
+#define HLSQ_CTRL0REG_CHUNKDISABLE 26
+#define HLSQ_CTRL0REG_CONSTSWITCHMODE 27
+#define HLSQ_CTRL0REG_FSSUPERTHREADENABLE 6
+#define HLSQ_CTRL0REG_FSTHREADSIZE 4
+#define HLSQ_CTRL0REG_LAZYUPDATEDISABLE 28
+#define HLSQ_CTRL0REG_RESERVED2 10
+#define HLSQ_CTRL0REG_SPCONSTFULLUPDATE 29
+#define HLSQ_CTRL0REG_SPSHADERRESTART 9
+#define HLSQ_CTRL0REG_TPFULLUPDATE 30
+#define HLSQ_CTRL1REG_RESERVED1 9
+#define HLSQ_CTRL1REG_VSSUPERTHREADENABLE 8
+#define HLSQ_CTRL1REG_VSTHREADSIZE 6
+#define HLSQ_CTRL2REG_PRIMALLOCTHRESHOLD 26
+#define HLSQ_FSCTRLREG_FSCONSTLENGTH 0
+#define HLSQ_FSCTRLREG_FSCONSTSTARTOFFSET 12
+#define HLSQ_FSCTRLREG_FSINSTRLENGTH 24
+#define HLSQ_VSCTRLREG_VSINSTRLENGTH 24
+#define PC_PRIM_VTX_CONTROL_POLYMODE_BACK_PTYPE 8
+#define PC_PRIM_VTX_CONTROL_POLYMODE_FRONT_PTYPE 5
+#define PC_PRIM_VTX_CONTROL_PROVOKING_VTX_LAST 25
+#define PC_PRIM_VTX_CONTROL_STRIDE_IN_VPC 0
+#define PC_DRAW_INITIATOR_PRIM_TYPE 0
+#define PC_DRAW_INITIATOR_SOURCE_SELECT 6
+#define PC_DRAW_INITIATOR_VISIBILITY_CULLING_MODE 9
+#define PC_DRAW_INITIATOR_INDEX_SIZE 0x0B
+#define PC_DRAW_INITIATOR_SMALL_INDEX 0x0D
+#define PC_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x0E
+#define RB_COPYCONTROL_COPY_GMEM_BASE 14
+#define RB_COPYCONTROL_RESOLVE_CLEAR_MODE 4
+#define RB_COPYDESTBASE_COPY_DEST_BASE 4
+#define RB_COPYDESTINFO_COPY_COMPONENT_ENABLE 14
+#define RB_COPYDESTINFO_COPY_DEST_ENDIAN 18
+#define RB_COPYDESTINFO_COPY_DEST_FORMAT 2
+#define RB_COPYDESTINFO_COPY_DEST_TILE 0
+#define RB_COPYDESTPITCH_COPY_DEST_PITCH 0
+#define RB_DEPTHCONTROL_Z_TEST_FUNC 4
+#define RB_MODECONTROL_RENDER_MODE 8
+#define RB_MODECONTROL_MARB_CACHE_SPLIT_MODE 15
+#define RB_MODECONTROL_PACKER_TIMER_ENABLE 16
+#define RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE 21
+#define RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR 24
+#define RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR 16
+#define RB_MRTBLENDCONTROL_CLAMP_ENABLE 29
+#define RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE 5
+#define RB_MRTBLENDCONTROL_RGB_DEST_FACTOR 8
+#define RB_MRTBLENDCONTROL_RGB_SRC_FACTOR 0
+#define RB_MRTBUFBASE_COLOR_BUF_BASE 4
+#define RB_MRTBUFINFO_COLOR_BUF_PITCH 17
+#define RB_MRTBUFINFO_COLOR_FORMAT 0
+#define RB_MRTBUFINFO_COLOR_TILE_MODE 6
+#define RB_MRTCONTROL_COMPONENT_ENABLE 24
+#define RB_MRTCONTROL_DITHER_MODE 12
+#define RB_MRTCONTROL_READ_DEST_ENABLE 3
+#define RB_MRTCONTROL_ROP_CODE 8
+#define RB_MSAACONTROL_MSAA_DISABLE 10
+#define RB_MSAACONTROL_SAMPLE_MASK 16
+#define RB_RENDERCONTROL_ALPHA_TEST_FUNC 24
+#define RB_RENDERCONTROL_BIN_WIDTH 4
+#define RB_RENDERCONTROL_DISABLE_COLOR_PIPE 12
+#define RB_STENCILCONTROL_STENCIL_FAIL 11
+#define RB_STENCILCONTROL_STENCIL_FAIL_BF 23
+#define RB_STENCILCONTROL_STENCIL_FUNC 8
+#define RB_STENCILCONTROL_STENCIL_FUNC_BF 20
+#define RB_STENCILCONTROL_STENCIL_ZFAIL 17
+#define RB_STENCILCONTROL_STENCIL_ZFAIL_BF 29
+#define RB_STENCILCONTROL_STENCIL_ZPASS 14
+#define RB_STENCILCONTROL_STENCIL_ZPASS_BF 26
+#define SP_FSCTRLREG0_FSFULLREGFOOTPRINT 10
+#define SP_FSCTRLREG0_FSHALFREGFOOTPRINT 4
+#define SP_FSCTRLREG0_FSICACHEINVALID 2
+#define SP_FSCTRLREG0_FSINOUTREGOVERLAP 18
+#define SP_FSCTRLREG0_FSINSTRBUFFERMODE 1
+#define SP_FSCTRLREG0_FSLENGTH 24
+#define SP_FSCTRLREG0_FSSUPERTHREADMODE 21
+#define SP_FSCTRLREG0_FSTHREADMODE 0
+#define SP_FSCTRLREG0_FSTHREADSIZE 20
+#define SP_FSCTRLREG0_PIXLODENABLE 22
+#define SP_FSCTRLREG1_FSCONSTLENGTH 0
+#define SP_FSCTRLREG1_FSINITIALOUTSTANDING 20
+#define SP_FSCTRLREG1_HALFPRECVAROFFSET 24
+#define SP_FSMRTREG_REGID 0
+#define SP_FSMRTREG_PRECISION 8
+#define SP_FSOUTREG_PAD0 2
+#define SP_IMAGEOUTPUTREG_MRTFORMAT 0
+#define SP_IMAGEOUTPUTREG_DEPTHOUTMODE 3
+#define SP_IMAGEOUTPUTREG_PAD0 6
+#define SP_OBJOFFSETREG_CONSTOBJECTSTARTOFFSET 16
+#define SP_OBJOFFSETREG_SHADEROBJOFFSETINIC 25
+#define SP_SHADERLENGTH_LEN 0
+#define SP_SPCTRLREG_CONSTMODE 18
+#define SP_SPCTRLREG_LOMODE 22
+#define SP_SPCTRLREG_SLEEPMODE 20
+#define SP_VSCTRLREG0_VSFULLREGFOOTPRINT 10
+#define SP_VSCTRLREG0_VSICACHEINVALID 2
+#define SP_VSCTRLREG0_VSINSTRBUFFERMODE 1
+#define SP_VSCTRLREG0_VSLENGTH 24
+#define SP_VSCTRLREG0_VSSUPERTHREADMODE 21
+#define SP_VSCTRLREG0_VSTHREADMODE 0
+#define SP_VSCTRLREG0_VSTHREADSIZE 20
+#define SP_VSCTRLREG1_VSINITIALOUTSTANDING 24
+#define SP_VSOUTREG_COMPMASK0 9
+#define SP_VSPARAMREG_POSREGID 0
+#define SP_VSPARAMREG_PSIZEREGID 8
+#define SP_VSPARAMREG_TOTALVSOUTVAR 20
+#define SP_VSVPCDSTREG_OUTLOC0 0
+#define TPL1_TPTEXOFFSETREG_BASETABLEPTR 16
+#define TPL1_TPTEXOFFSETREG_MEMOBJOFFSET 8
+#define TPL1_TPTEXOFFSETREG_SAMPLEROFFSET 0
+#define UCHE_INVALIDATE1REG_OPCODE 0x1C
+#define UCHE_INVALIDATE1REG_ALLORPORTION 0x1F
+#define VFD_BASEADDR_BASEADDR 0
+#define VFD_CTRLREG0_PACKETSIZE 18
+#define VFD_CTRLREG0_STRMDECINSTRCNT 22
+#define VFD_CTRLREG0_STRMFETCHINSTRCNT 27
+#define VFD_CTRLREG0_TOTALATTRTOVS 0
+#define VFD_CTRLREG1_MAXSTORAGE 0
+#define VFD_CTRLREG1_REGID4INST 24
+#define VFD_CTRLREG1_REGID4VTX 16
+#define VFD_DECODEINSTRUCTIONS_CONSTFILL 4
+#define VFD_DECODEINSTRUCTIONS_FORMAT 6
+#define VFD_DECODEINSTRUCTIONS_LASTCOMPVALID 29
+#define VFD_DECODEINSTRUCTIONS_REGID 12
+#define VFD_DECODEINSTRUCTIONS_SHIFTCNT 24
+#define VFD_DECODEINSTRUCTIONS_SWITCHNEXT 30
+#define VFD_DECODEINSTRUCTIONS_WRITEMASK 0
+#define VFD_FETCHINSTRUCTIONS_BUFSTRIDE 7
+#define VFD_FETCHINSTRUCTIONS_FETCHSIZE 0
+#define VFD_FETCHINSTRUCTIONS_INDEXDECODE 18
+#define VFD_FETCHINSTRUCTIONS_STEPRATE 24
+#define VFD_FETCHINSTRUCTIONS_SWITCHNEXT 17
+#define VFD_THREADINGTHRESHOLD_REGID_VTXCNT 8
+#define VFD_THREADINGTHRESHOLD_REGID_THRESHOLD 0
+#define VFD_THREADINGTHRESHOLD_RESERVED6 4
+#define VPC_VPCATTR_LMSIZE 28
+#define VPC_VPCATTR_THRHDASSIGN 12
+#define VPC_VPCATTR_TOTALATTR 0
+#define VPC_VPCPACK_NUMFPNONPOSVAR 8
+#define VPC_VPCPACK_NUMNONPOSVSVAR 16
+#define VPC_VPCVARPSREPLMODE_COMPONENT08 0
+#define VPC_VPCVARPSREPLMODE_COMPONENT09 2
+#define VPC_VPCVARPSREPLMODE_COMPONENT0A 4
+#define VPC_VPCVARPSREPLMODE_COMPONENT0B 6
+#define VPC_VPCVARPSREPLMODE_COMPONENT0C 8
+#define VPC_VPCVARPSREPLMODE_COMPONENT0D 10
+#define VPC_VPCVARPSREPLMODE_COMPONENT0E 12
+#define VPC_VPCVARPSREPLMODE_COMPONENT0F 14
+#define VPC_VPCVARPSREPLMODE_COMPONENT10 16
+#define VPC_VPCVARPSREPLMODE_COMPONENT11 18
+#define VPC_VPCVARPSREPLMODE_COMPONENT12 20
+#define VPC_VPCVARPSREPLMODE_COMPONENT13 22
+#define VPC_VPCVARPSREPLMODE_COMPONENT14 24
+#define VPC_VPCVARPSREPLMODE_COMPONENT15 26
+#define VPC_VPCVARPSREPLMODE_COMPONENT16 28
+#define VPC_VPCVARPSREPLMODE_COMPONENT17 30
+
+/* RBBM Debug bus block IDs */
+#define RBBM_BLOCK_ID_NONE 0x0
+#define RBBM_BLOCK_ID_CP 0x1
+#define RBBM_BLOCK_ID_RBBM 0x2
+#define RBBM_BLOCK_ID_VBIF 0x3
+#define RBBM_BLOCK_ID_HLSQ 0x4
+#define RBBM_BLOCK_ID_UCHE 0x5
+#define RBBM_BLOCK_ID_PC 0x8
+#define RBBM_BLOCK_ID_VFD 0x9
+#define RBBM_BLOCK_ID_VPC 0xa
+#define RBBM_BLOCK_ID_TSE 0xb
+#define RBBM_BLOCK_ID_RAS 0xc
+#define RBBM_BLOCK_ID_VSC 0xd
+#define RBBM_BLOCK_ID_SP_0 0x10
+#define RBBM_BLOCK_ID_SP_1 0x11
+#define RBBM_BLOCK_ID_SP_2 0x12
+#define RBBM_BLOCK_ID_SP_3 0x13
+#define RBBM_BLOCK_ID_TPL1_0 0x18
+#define RBBM_BLOCK_ID_TPL1_1 0x19
+#define RBBM_BLOCK_ID_TPL1_2 0x1a
+#define RBBM_BLOCK_ID_TPL1_3 0x1b
+#define RBBM_BLOCK_ID_RB_0 0x20
+#define RBBM_BLOCK_ID_RB_1 0x21
+#define RBBM_BLOCK_ID_RB_2 0x22
+#define RBBM_BLOCK_ID_RB_3 0x23
+#define RBBM_BLOCK_ID_MARB_0 0x28
+#define RBBM_BLOCK_ID_MARB_1 0x29
+#define RBBM_BLOCK_ID_MARB_2 0x2a
+#define RBBM_BLOCK_ID_MARB_3 0x2b
+
+/* RBBM_CLOCK_CTL default value */
+#define A305_RBBM_CLOCK_CTL_DEFAULT 0xAAAAAAAA
+#define A305C_RBBM_CLOCK_CTL_DEFAULT 0xAAAAAAAA
+#define A320_RBBM_CLOCK_CTL_DEFAULT 0xBFFFFFFF
+#define A330_RBBM_CLOCK_CTL_DEFAULT 0xBFFCFFFF
+#define A330v2_RBBM_CLOCK_CTL_DEFAULT 0xAAAAAAAA
+#define A305B_RBBM_CLOCK_CTL_DEFAULT 0xAAAAAAAA
+
+#define A330_RBBM_GPR0_CTL_DEFAULT 0x00000000
+#define A330v2_RBBM_GPR0_CTL_DEFAULT 0x05515455
+
+/* COUNTABLE FOR SP PERFCOUNTER */
+#define SP_FS_FULL_ALU_INSTRUCTIONS 0x0E
+#define SP_ALU_ACTIVE_CYCLES 0x1D
+#define SP0_ICL1_MISSES 0x1A
+#define SP_FS_CFLOW_INSTRUCTIONS 0x0C
+
+/* VBIF PERFCOUNTER ENA/CLR values */
+#define VBIF_PERF_CNT_0 BIT(0)
+#define VBIF_PERF_CNT_1 BIT(1)
+#define VBIF_PERF_PWR_CNT_0 BIT(2)
+#define VBIF_PERF_PWR_CNT_1 BIT(3)
+#define VBIF_PERF_PWR_CNT_2 BIT(4)
+
+/* VBIF PERFCOUNTER SEL values */
+#define VBIF_PERF_CNT_0_SEL 0
+#define VBIF_PERF_CNT_0_SEL_MASK 0x7f
+#define VBIF_PERF_CNT_1_SEL 8
+#define VBIF_PERF_CNT_1_SEL_MASK 0x7f00
+
+/* VBIF countables */
+#define VBIF_DDR_TOTAL_CYCLES 110
+
+#endif
diff --git a/drivers/gpu/msm2/a4xx_reg.h b/drivers/gpu/msm2/a4xx_reg.h
new file mode 100644
index 0000000..56147f7
--- /dev/null
+++ b/drivers/gpu/msm2/a4xx_reg.h
@@ -0,0 +1,92 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _A4XX_REG_H
+#define _A4XX_REG_H
+
+/* RB registers */
+#define A4XX_RB_GMEM_BASE_ADDR 0xcc0
+
+/* RBBM registers */
+#define A4XX_RBBM_AHB_CMD 0x25
+#define A4XX_RBBM_SP_HYST_CNT 0x21
+#define A4XX_RBBM_AHB_CTL0 0x23
+#define A4XX_RBBM_AHB_CTL1 0x24
+#define A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x2b
+#define A4XX_RBBM_INTERFACE_HANG_INT_CTL 0x2f
+#define A4XX_RBBM_AHB_ERROR_STATUS 0x18f
+#define A4XX_RBBM_STATUS 0x191
+#define A4XX_RBBM_INT_CLEAR_CMD 0x36
+#define A4XX_RBBM_INT_0_MASK 0x37
+#define A4XX_RBBM_INT_0_STATUS 0x17d
+#define A4XX_RBBM_PERFCTR_CTL 0x170
+#define A4XX_RBBM_PERFCTR_LOAD_CMD0 0x171
+#define A4XX_RBBM_PERFCTR_LOAD_CMD1 0x172
+#define A4XX_RBBM_PERFCTR_LOAD_CMD2 0x173
+#define A4XX_RBBM_GPU_BUSY_MASKED 0x17a
+#define A4XX_RBBM_PERFCTR_PWR_1_LO 0x168
+
+/* CP registers */
+#define A4XX_CP_SCRATCH_REG0 0x578
+#define A4XX_CP_SCRATCH_UMASK 0x228
+#define A4XX_CP_SCRATCH_ADDR 0x229
+#define A4XX_CP_RB_BASE 0x200
+#define A4XX_CP_RB_CNTL 0x201
+#define A4XX_CP_RB_WPTR 0x205
+#define A4XX_CP_RB_RPTR_ADDR 0x203
+#define A4XX_CP_RB_RPTR 0x204
+#define A4XX_CP_IB1_BASE 0x206
+#define A4XX_CP_IB1_BUFSZ 0x207
+#define A4XX_CP_IB2_BASE 0x208
+#define A4XX_CP_IB2_BUFSZ 0x209
+#define A4XX_CP_WFI_PEND_CTR 0x4d2
+#define A4XX_CP_ME_CNTL 0x22d
+#define A4XX_CP_ME_RAM_WADDR 0x225
+#define A4XX_CP_ME_RAM_RADDR 0x226
+#define A4XX_CP_ME_RAM_DATA 0x227
+#define A4XX_CP_PFP_UCODE_ADDR 0x223
+#define A4XX_CP_PFP_UCODE_DATA 0x224
+#define A4XX_CP_PROTECT_CTRL 0x250
+#define A4XX_CP_DEBUG 0x22e
+
+/* SP registers */
+#define A4XX_SP_VS_OBJ_START 0x22e1
+#define A4XX_SP_VS_PVT_MEM_ADDR 0x22e3
+#define A4XX_SP_FS_OBJ_START 0x22eb
+#define A4XX_SP_FS_PVT_MEM_ADDR 0x22ed
+
+/* VPC registers */
+#define A4XX_VPC_DEBUG_RAM_SEL 0xe60
+#define A4XX_VPC_DEBUG_RAM_READ 0xe61
+
+/* VSC registers */
+#define A4XX_VSC_SIZE_ADDRESS 0xc01
+#define A4XX_VSC_PIPE_DATA_ADDRESS_0 0xc10
+#define A4XX_VSC_PIPE_DATA_LENGTH_7 0xc1f
+
+/* VFD registers */
+#define A4XX_VFD_CONTROL_0 0x2200
+#define A4XX_VFD_FETCH_INSTR_0_0 0x220a
+#define A4XX_VFD_FETCH_INSTR_1_31 0x2287
+
+/* VBIF */
+#define A4XX_VBIF_ABIT_SORT 0x301c
+#define A4XX_VBIF_ABIT_SORT_CONF 0x301d
+#define A4XX_VBIF_GATE_OFF_WRREQ_EN 0x302a
+#define A4XX_VBIF_IN_RD_LIM_CONF0 0x302c
+#define A4XX_VBIF_IN_RD_LIM_CONF1 0x302d
+#define A4XX_VBIF_IN_WR_LIM_CONF0 0x3030
+#define A4XX_VBIF_IN_WR_LIM_CONF1 0x3031
+#define A4XX_VBIF_ROUND_ROBIN_QOS_ARB 0x3049
+
+#endif /* _A400_REG_H */
diff --git a/drivers/gpu/msm2/adreno.c b/drivers/gpu/msm2/adreno.c
new file mode 100644
index 0000000..72d7410
--- /dev/null
+++ b/drivers/gpu/msm2/adreno.c
@@ -0,0 +1,2891 @@
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/ioctl.h>
+#include <linux/sched.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/msm_kgsl.h>
+#include <linux/delay.h>
+#include <linux/of_coresight.h>
+
+#include <mach/socinfo.h>
+#include <mach/msm_bus_board.h>
+#include <mach/msm_bus.h>
+
+#include "kgsl.h"
+#include "kgsl_pwrscale.h"
+#include "kgsl_cffdump.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_iommu.h"
+
+#include "adreno.h"
+#include "adreno_pm4types.h"
+#include "adreno_trace.h"
+
+#include "a2xx_reg.h"
+#include "a3xx_reg.h"
+
+#define DRIVER_VERSION_MAJOR 3
+#define DRIVER_VERSION_MINOR 1
+
+/* Number of times to try hard reset */
+#define NUM_TIMES_RESET_RETRY 5
+
+/* Adreno MH arbiter config*/
+#define ADRENO_CFG_MHARB \
+ (0x10 \
+ | (0 << MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT) \
+ | (0 << MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT) \
+ | (0 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT) \
+ | (0x8 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT))
+
+#define ADRENO_MMU_CONFIG \
+ (0x01 \
+ | (MMU_CONFIG << MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT))
+
+#define KGSL_LOG_LEVEL_DEFAULT 3
+
+static const struct kgsl_functable adreno_functable;
+
+static struct adreno_device device_3d0 = {
+ .dev = {
+ KGSL_DEVICE_COMMON_INIT(device_3d0.dev),
+ .name = DEVICE_3D0_NAME,
+ .id = KGSL_DEVICE_3D0,
+ .mh = {
+ .mharb = ADRENO_CFG_MHARB,
+ /* Remove 1k boundary check in z470 to avoid a GPU
+ * hang. Notice that this solution won't work if
+ * both EBI and SMI are used
+ */
+ .mh_intf_cfg1 = 0x00032f07,
+ /* turn off memory protection unit by setting
+ acceptable physical address range to include
+ all pages. */
+ .mpu_base = 0x00000000,
+ .mpu_range = 0xFFFFF000,
+ },
+ .mmu = {
+ .config = ADRENO_MMU_CONFIG,
+ },
+ .pwrctrl = {
+ .irq_name = KGSL_3D0_IRQ,
+ },
+ .iomemname = KGSL_3D0_REG_MEMORY,
+ .shadermemname = KGSL_3D0_SHADER_MEMORY,
+ .ftbl = &adreno_functable,
+ .cmd_log = KGSL_LOG_LEVEL_DEFAULT,
+ .ctxt_log = KGSL_LOG_LEVEL_DEFAULT,
+ .drv_log = KGSL_LOG_LEVEL_DEFAULT,
+ .mem_log = KGSL_LOG_LEVEL_DEFAULT,
+ .pwr_log = KGSL_LOG_LEVEL_DEFAULT,
+ .pm_dump_enable = 0,
+ },
+ .gmem_base = 0,
+ .gmem_size = SZ_256K,
+ .pfp_fw = NULL,
+ .pm4_fw = NULL,
+ .wait_timeout = 0, /* in milliseconds, 0 means disabled */
+ .ib_check_level = 0,
+ .ft_policy = KGSL_FT_DEFAULT_POLICY,
+ .ft_pf_policy = KGSL_FT_PAGEFAULT_DEFAULT_POLICY,
+ .fast_hang_detect = 1,
+ .long_ib_detect = 1,
+};
+
+unsigned int ft_detect_regs[FT_DETECT_REGS_COUNT];
+
+/*
+ * This is the master list of all GPU cores that are supported by this
+ * driver.
+ */
+
+#define ANY_ID (~0)
+#define NO_VER (~0)
+
+static const struct {
+ enum adreno_gpurev gpurev;
+ unsigned int core, major, minor, patchid;
+ const char *pm4fw;
+ const char *pfpfw;
+ struct adreno_gpudev *gpudev;
+ unsigned int istore_size;
+ unsigned int pix_shader_start;
+ /* Size of an instruction in dwords */
+ unsigned int instruction_size;
+ /* size of gmem for gpu*/
+ unsigned int gmem_size;
+ /* version of pm4 microcode that supports sync_lock
+ between CPU and GPU for IOMMU-v0 programming */
+ unsigned int sync_lock_pm4_ver;
+ /* version of pfp microcode that supports sync_lock
+ between CPU and GPU for IOMMU-v0 programming */
+ unsigned int sync_lock_pfp_ver;
+ /* PM4 jump table index */
+ unsigned int pm4_jt_idx;
+ /* PM4 jump table load addr */
+ unsigned int pm4_jt_addr;
+ /* PFP jump table index */
+ unsigned int pfp_jt_idx;
+ /* PFP jump table load addr */
+ unsigned int pfp_jt_addr;
+
+} adreno_gpulist[] = {
+ { ADRENO_REV_A200, 0, 2, ANY_ID, ANY_ID,
+ "yamato_pm4.fw", "yamato_pfp.fw", &adreno_a2xx_gpudev,
+ 512, 384, 3, SZ_256K, NO_VER, NO_VER },
+ { ADRENO_REV_A203, 0, 1, 1, ANY_ID,
+ "yamato_pm4.fw", "yamato_pfp.fw", &adreno_a2xx_gpudev,
+ 512, 384, 3, SZ_256K, NO_VER, NO_VER },
+ { ADRENO_REV_A205, 0, 1, 0, ANY_ID,
+ "yamato_pm4.fw", "yamato_pfp.fw", &adreno_a2xx_gpudev,
+ 512, 384, 3, SZ_256K, NO_VER, NO_VER },
+ { ADRENO_REV_A220, 2, 1, ANY_ID, ANY_ID,
+ "leia_pm4_470.fw", "leia_pfp_470.fw", &adreno_a2xx_gpudev,
+ 512, 384, 3, SZ_512K, NO_VER, NO_VER },
+ /*
+ * patchlevel 5 (8960v2) needs special pm4 firmware to work around
+ * a hardware problem.
+ */
+ { ADRENO_REV_A225, 2, 2, 0, 5,
+ "a225p5_pm4.fw", "a225_pfp.fw", &adreno_a2xx_gpudev,
+ 1536, 768, 3, SZ_512K, NO_VER, NO_VER },
+ { ADRENO_REV_A225, 2, 2, 0, 6,
+ "a225_pm4.fw", "a225_pfp.fw", &adreno_a2xx_gpudev,
+ 1536, 768, 3, SZ_512K, 0x225011, 0x225002 },
+ { ADRENO_REV_A225, 2, 2, ANY_ID, ANY_ID,
+ "a225_pm4.fw", "a225_pfp.fw", &adreno_a2xx_gpudev,
+ 1536, 768, 3, SZ_512K, 0x225011, 0x225002 },
+ /* A3XX doesn't use the pix_shader_start */
+ { ADRENO_REV_A305, 3, 0, 5, 0,
+ "a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev,
+ 512, 0, 2, SZ_256K, 0x3FF037, 0x3FF016 },
+ /* A3XX doesn't use the pix_shader_start */
+ { ADRENO_REV_A320, 3, 2, ANY_ID, ANY_ID,
+ "a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev,
+ 512, 0, 2, SZ_512K, 0x3FF037, 0x3FF016 },
+ { ADRENO_REV_A330, 3, 3, 0, ANY_ID,
+ "a330_pm4.fw", "a330_pfp.fw", &adreno_a3xx_gpudev,
+ 512, 0, 2, SZ_1M, NO_VER, NO_VER, 0x8AD, 0x2E4, 0x201, 0x200 },
+ { ADRENO_REV_A305B, 3, 0, 5, 0x10,
+ "a330_pm4.fw", "a330_pfp.fw", &adreno_a3xx_gpudev,
+ 512, 0, 2, SZ_128K, NO_VER, NO_VER, 0x8AD, 0x2E4,
+ 0x201, 0x200 },
+ /* 8226v2 */
+ { ADRENO_REV_A305B, 3, 0, 5, 0x12,
+ "a330_pm4.fw", "a330_pfp.fw", &adreno_a3xx_gpudev,
+ 512, 0, 2, SZ_128K, NO_VER, NO_VER, 0x8AD, 0x2E4,
+ 0x201, 0x200 },
+ { ADRENO_REV_A305C, 3, 0, 5, 0x20,
+ "a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev,
+ 512, 0, 2, SZ_128K, 0x3FF037, 0x3FF016 },
+ { ADRENO_REV_A420, 4, 2, 0, ANY_ID,
+ "a420_pm4.fw", "a420_pfp.fw", &adreno_a4xx_gpudev,
+ 512, 0, 2, (SZ_1M + SZ_512K), NO_VER, NO_VER },
+};
+
+/**
+ * adreno_perfcounter_init: Reserve kernel performance counters
+ * @device: device to configure
+ *
+ * The kernel needs/wants a certain group of performance counters for
+ * its own activities. Reserve these performance counters at init time
+ * to ensure that they are always reserved for the kernel. The performance
+ * counters used by the kernel can be obtained by the user, but these
+ * performance counters will remain active as long as the device is alive.
+ */
+
+static int adreno_perfcounter_init(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ if (adreno_dev->gpudev->perfcounter_init)
+ return adreno_dev->gpudev->perfcounter_init(adreno_dev);
+ return 0;
+};
+
+/**
+ * adreno_perfcounter_close: Release counters initialized by
+ * adreno_perfcounter_init
+ * @device: device to realease counters for
+ *
+ */
+static void adreno_perfcounter_close(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ if (adreno_dev->gpudev->perfcounter_close)
+ return adreno_dev->gpudev->perfcounter_close(adreno_dev);
+}
+
+/**
+ * adreno_perfcounter_start: Enable performance counters
+ * @adreno_dev: Adreno device to configure
+ *
+ * Ensure all performance counters are enabled that are allocated. Since
+ * the device was most likely stopped, we can't trust that the counters
+ * are still valid so make it so.
+ * Returns 0 on success else error code
+ */
+
+static int adreno_perfcounter_start(struct adreno_device *adreno_dev)
+{
+ struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+ struct adreno_perfcount_group *group;
+ unsigned int i, j;
+ int ret = 0;
+
+ if (NULL == counters)
+ return 0;
+ /* group id iter */
+ for (i = 0; i < counters->group_count; i++) {
+ group = &(counters->groups[i]);
+
+ /* countable iter */
+ for (j = 0; j < group->reg_count; j++) {
+ if (group->regs[j].countable ==
+ KGSL_PERFCOUNTER_NOT_USED ||
+ group->regs[j].countable ==
+ KGSL_PERFCOUNTER_BROKEN)
+ continue;
+
+ if (adreno_dev->gpudev->perfcounter_enable)
+ ret = adreno_dev->gpudev->perfcounter_enable(
+ adreno_dev, i, j,
+ group->regs[j].countable);
+ if (ret)
+ goto done;
+ }
+ }
+done:
+ return ret;
+}
+
+/**
+ * adreno_perfcounter_read_group: Determine which countables are in counters
+ * @adreno_dev: Adreno device to configure
+ * @reads: List of kgsl_perfcounter_read_groups
+ * @count: Length of list
+ *
+ * Read the performance counters for the groupid/countable pairs and return
+ * the 64 bit result for each pair
+ */
+
+int adreno_perfcounter_read_group(struct adreno_device *adreno_dev,
+ struct kgsl_perfcounter_read_group *reads, unsigned int count)
+{
+ struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+ struct adreno_perfcount_group *group;
+ struct kgsl_perfcounter_read_group *list = NULL;
+ unsigned int i, j;
+ int ret = 0;
+
+ if (NULL == counters)
+ return -EINVAL;
+
+ /* sanity check for later */
+ if (!adreno_dev->gpudev->perfcounter_read)
+ return -EINVAL;
+
+ /* sanity check params passed in */
+ if (reads == NULL || count == 0 || count > 100)
+ return -EINVAL;
+
+
+ list = kmalloc(sizeof(struct kgsl_perfcounter_read_group) * count,
+ GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+
+ if (copy_from_user(list, reads,
+ sizeof(struct kgsl_perfcounter_read_group) * count)) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ /* verify valid inputs group ids and countables */
+ for (i = 0; i < count; i++) {
+ if (list[i].groupid >= counters->group_count)
+ return -EINVAL;
+ }
+
+ /* list iterator */
+ for (j = 0; j < count; j++) {
+ list[j].value = 0;
+
+ group = &(counters->groups[list[j].groupid]);
+
+ /* group/counter iterator */
+ for (i = 0; i < group->reg_count; i++) {
+ if (group->regs[i].countable == list[j].countable) {
+ list[j].value =
+ adreno_dev->gpudev->perfcounter_read(
+ adreno_dev, list[j].groupid, i);
+ break;
+ }
+ }
+ }
+
+ /* write the data */
+ if (copy_to_user(reads, list,
+ sizeof(struct kgsl_perfcounter_read_group) *
+ count) != 0)
+ ret = -EFAULT;
+
+done:
+ kfree(list);
+ return ret;
+}
+
+/**
+ * adreno_perfcounter_query_group: Determine which countables are in counters
+ * @adreno_dev: Adreno device to configure
+ * @groupid: Desired performance counter group
+ * @countables: Return list of all countables in the groups counters
+ * @count: Max length of the array
+ * @max_counters: max counters for the groupid
+ *
+ * Query the current state of counters for the group.
+ */
+
+int adreno_perfcounter_query_group(struct adreno_device *adreno_dev,
+ unsigned int groupid, unsigned int *countables, unsigned int count,
+ unsigned int *max_counters)
+{
+ struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+ struct adreno_perfcount_group *group;
+ unsigned int i;
+
+ *max_counters = 0;
+
+ if (NULL == counters)
+ return -EINVAL;
+
+ if (groupid >= counters->group_count)
+ return -EINVAL;
+
+ group = &(counters->groups[groupid]);
+ *max_counters = group->reg_count;
+
+ /*
+ * if NULL countable or *count of zero, return max reg_count in
+ * *max_counters and return success
+ */
+ if (countables == NULL || count == 0)
+ return 0;
+
+ /*
+ * Go through all available counters. Write upto *count * countable
+ * values.
+ */
+ for (i = 0; i < group->reg_count && i < count; i++) {
+ if (copy_to_user(&countables[i], &(group->regs[i].countable),
+ sizeof(unsigned int)) != 0)
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/**
+ * adreno_perfcounter_get: Try to put a countable in an available counter
+ * @adreno_dev: Adreno device to configure
+ * @groupid: Desired performance counter group
+ * @countable: Countable desired to be in a counter
+ * @offset: Return offset of the countable
+ * @flags: Used to setup kernel perf counters
+ *
+ * Try to place a countable in an available counter. If the countable is
+ * already in a counter, reference count the counter/countable pair resource
+ * and return success
+ */
+
+int adreno_perfcounter_get(struct adreno_device *adreno_dev,
+ unsigned int groupid, unsigned int countable, unsigned int *offset,
+ unsigned int flags)
+{
+ struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+ struct adreno_perfcount_group *group;
+ unsigned int i, empty = -1;
+ int ret = 0;
+
+ /* always clear return variables */
+ if (offset)
+ *offset = 0;
+
+ if (NULL == counters)
+ return -EINVAL;
+
+ if (groupid >= counters->group_count)
+ return -EINVAL;
+
+ group = &(counters->groups[groupid]);
+
+ /*
+ * Check if the countable is already associated with a counter.
+ * Refcount and return the offset, otherwise, try and find an empty
+ * counter and assign the countable to it.
+ */
+ for (i = 0; i < group->reg_count; i++) {
+ if (group->regs[i].countable == countable) {
+ /* Countable already associated with counter */
+ if (flags & PERFCOUNTER_FLAG_KERNEL)
+ group->regs[i].kernelcount++;
+ else
+ group->regs[i].usercount++;
+
+ if (offset)
+ *offset = group->regs[i].offset;
+ return 0;
+ } else if (group->regs[i].countable ==
+ KGSL_PERFCOUNTER_NOT_USED) {
+ /* keep track of unused counter */
+ empty = i;
+ }
+ }
+
+ /* no available counters, so do nothing else */
+ if (empty == -1)
+ return -EBUSY;
+
+ /* enable the new counter */
+ ret = adreno_dev->gpudev->perfcounter_enable(adreno_dev, groupid, empty,
+ countable);
+ if (ret)
+ return ret;
+ /* initialize the new counter */
+ group->regs[empty].countable = countable;
+
+ /* set initial kernel and user count */
+ if (flags & PERFCOUNTER_FLAG_KERNEL) {
+ group->regs[empty].kernelcount = 1;
+ group->regs[empty].usercount = 0;
+ } else {
+ group->regs[empty].kernelcount = 0;
+ group->regs[empty].usercount = 1;
+ }
+
+ if (offset)
+ *offset = group->regs[empty].offset;
+
+ return ret;
+}
+
+
+/**
+ * adreno_perfcounter_put: Release a countable from counter resource
+ * @adreno_dev: Adreno device to configure
+ * @groupid: Desired performance counter group
+ * @countable: Countable desired to be freed from a counter
+ * @flags: Flag to determine if kernel or user space request
+ *
+ * Put a performance counter/countable pair that was previously received. If
+ * noone else is using the countable, free up the counter for others.
+ */
+int adreno_perfcounter_put(struct adreno_device *adreno_dev,
+ unsigned int groupid, unsigned int countable, unsigned int flags)
+{
+ struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+ struct adreno_perfcount_group *group;
+
+ unsigned int i;
+
+ if (NULL == counters)
+ return -EINVAL;
+
+ if (groupid >= counters->group_count)
+ return -EINVAL;
+
+ group = &(counters->groups[groupid]);
+
+ /*
+ * Find if the counter/countable pair is used currently.
+ * Start cycling through registers in the bank.
+ */
+ for (i = 0; i < group->reg_count; i++) {
+ /* check if countable assigned is what we are looking for */
+ if (group->regs[i].countable == countable) {
+ /* found pair, book keep count based on request type */
+ if (flags & PERFCOUNTER_FLAG_KERNEL &&
+ group->regs[i].kernelcount > 0)
+ group->regs[i].kernelcount--;
+ else if (group->regs[i].usercount > 0)
+ group->regs[i].usercount--;
+ else
+ break;
+
+ /* mark available if not used anymore */
+ if (group->regs[i].kernelcount == 0 &&
+ group->regs[i].usercount == 0)
+ group->regs[i].countable =
+ KGSL_PERFCOUNTER_NOT_USED;
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ return adreno_dev->gpudev->irq_handler(adreno_dev);
+}
+
+static void adreno_cleanup_pt(struct kgsl_device *device,
+ struct kgsl_pagetable *pagetable)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+ kgsl_mmu_unmap(pagetable, &rb->buffer_desc);
+
+ kgsl_mmu_unmap(pagetable, &rb->memptrs_desc);
+
+ kgsl_mmu_unmap(pagetable, &device->memstore);
+
+ kgsl_mmu_unmap(pagetable, &adreno_dev->pwron_fixup);
+
+ kgsl_mmu_unmap(pagetable, &device->mmu.setstate_memory);
+}
+
+static int adreno_setup_pt(struct kgsl_device *device,
+ struct kgsl_pagetable *pagetable)
+{
+ int result;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+ result = kgsl_mmu_map_global(pagetable, &rb->buffer_desc);
+
+ if (!result)
+ result = kgsl_mmu_map_global(pagetable, &rb->memptrs_desc);
+
+ if (!result)
+ result = kgsl_mmu_map_global(pagetable, &device->memstore);
+
+ if (!result)
+ result = kgsl_mmu_map_global(pagetable,
+ &adreno_dev->pwron_fixup);
+
+
+ if (!result)
+ result = kgsl_mmu_map_global(pagetable,
+ &device->mmu.setstate_memory);
+
+ if (result) {
+ /* On error clean up what we have wrought */
+ adreno_cleanup_pt(device, pagetable);
+ return result;
+ }
+
+ /*
+ * Set the mpu end to the last "normal" global memory we use.
+ * For the IOMMU, this will be used to restrict access to the
+ * mapped registers.
+ */
+ device->mh.mpu_range = device->mmu.setstate_memory.gpuaddr +
+ device->mmu.setstate_memory.size;
+
+ return 0;
+}
+
+static unsigned int _adreno_iommu_setstate_v0(struct kgsl_device *device,
+ unsigned int *cmds_orig,
+ phys_addr_t pt_val,
+ int num_iommu_units, uint32_t flags)
+{
+ phys_addr_t reg_pt_val;
+ unsigned int *cmds = cmds_orig;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int i;
+
+ if (cpu_is_msm8960())
+ cmds += adreno_add_change_mh_phys_limit_cmds(cmds, 0xFFFFF000,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+ else
+ cmds += adreno_add_bank_change_cmds(cmds,
+ KGSL_IOMMU_CONTEXT_USER,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+
+ cmds += adreno_add_idle_cmds(adreno_dev, cmds);
+
+ /* Acquire GPU-CPU sync Lock here */
+ cmds += kgsl_mmu_sync_lock(&device->mmu, cmds);
+
+ if (flags & KGSL_MMUFLAGS_PTUPDATE) {
+ /*
+ * We need to perfrom the following operations for all
+ * IOMMU units
+ */
+ for (i = 0; i < num_iommu_units; i++) {
+ reg_pt_val = kgsl_mmu_get_default_ttbr0(&device->mmu,
+ i, KGSL_IOMMU_CONTEXT_USER);
+ reg_pt_val &= ~KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
+ reg_pt_val |= (pt_val & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK);
+ /*
+ * Set address of the new pagetable by writng to IOMMU
+ * TTBR0 register
+ */
+ *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
+ *cmds++ = kgsl_mmu_get_reg_gpuaddr(&device->mmu, i,
+ KGSL_IOMMU_CONTEXT_USER, KGSL_IOMMU_CTX_TTBR0);
+ *cmds++ = reg_pt_val;
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+
+ /*
+ * Read back the ttbr0 register as a barrier to ensure
+ * above writes have completed
+ */
+ cmds += adreno_add_read_cmds(device, cmds,
+ kgsl_mmu_get_reg_gpuaddr(&device->mmu, i,
+ KGSL_IOMMU_CONTEXT_USER, KGSL_IOMMU_CTX_TTBR0),
+ reg_pt_val,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+ }
+ }
+ if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
+ /*
+ * tlb flush
+ */
+ for (i = 0; i < num_iommu_units; i++) {
+ reg_pt_val = (pt_val + kgsl_mmu_get_default_ttbr0(
+ &device->mmu,
+ i, KGSL_IOMMU_CONTEXT_USER));
+ reg_pt_val &= ~KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
+ reg_pt_val |= (pt_val & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK);
+
+ *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
+ *cmds++ = kgsl_mmu_get_reg_gpuaddr(&device->mmu, i,
+ KGSL_IOMMU_CONTEXT_USER,
+ KGSL_IOMMU_CTX_TLBIALL);
+ *cmds++ = 1;
+
+ cmds += __adreno_add_idle_indirect_cmds(cmds,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+
+ cmds += adreno_add_read_cmds(device, cmds,
+ kgsl_mmu_get_reg_gpuaddr(&device->mmu, i,
+ KGSL_IOMMU_CONTEXT_USER,
+ KGSL_IOMMU_CTX_TTBR0),
+ reg_pt_val,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+ }
+ }
+
+ /* Release GPU-CPU sync Lock here */
+ cmds += kgsl_mmu_sync_unlock(&device->mmu, cmds);
+
+ if (cpu_is_msm8960())
+ cmds += adreno_add_change_mh_phys_limit_cmds(cmds,
+ kgsl_mmu_get_reg_gpuaddr(&device->mmu, 0,
+ 0, KGSL_IOMMU_GLOBAL_BASE),
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+ else
+ cmds += adreno_add_bank_change_cmds(cmds,
+ KGSL_IOMMU_CONTEXT_PRIV,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+
+ cmds += adreno_add_idle_cmds(adreno_dev, cmds);
+
+ return cmds - cmds_orig;
+}
+
+static unsigned int _adreno_iommu_setstate_v1(struct kgsl_device *device,
+ unsigned int *cmds_orig,
+ phys_addr_t pt_val,
+ int num_iommu_units, uint32_t flags)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ phys_addr_t ttbr0_val;
+ unsigned int reg_pt_val;
+ unsigned int *cmds = cmds_orig;
+ int i;
+ unsigned int ttbr0, tlbiall, tlbstatus, tlbsync, mmu_ctrl;
+
+ for (i = 0; i < num_iommu_units; i++) {
+ ttbr0_val = kgsl_mmu_get_default_ttbr0(&device->mmu,
+ i, KGSL_IOMMU_CONTEXT_USER);
+ ttbr0_val &= ~KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
+ ttbr0_val |= (pt_val & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK);
+ if (flags & KGSL_MMUFLAGS_PTUPDATE) {
+ mmu_ctrl = kgsl_mmu_get_reg_ahbaddr(
+ &device->mmu, i,
+ KGSL_IOMMU_CONTEXT_USER,
+ KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL) >> 2;
+
+ ttbr0 = kgsl_mmu_get_reg_ahbaddr(&device->mmu, i,
+ KGSL_IOMMU_CONTEXT_USER,
+ KGSL_IOMMU_CTX_TTBR0) >> 2;
+
+ if (kgsl_mmu_hw_halt_supported(&device->mmu, i)) {
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0;
+ /*
+ * glue commands together until next
+ * WAIT_FOR_ME
+ */
+ cmds += adreno_wait_reg_eq(cmds,
+ adreno_getreg(adreno_dev,
+ ADRENO_REG_CP_WFI_PEND_CTR),
+ 1, 0xFFFFFFFF, 0xF);
+
+ /* set the iommu lock bit */
+ *cmds++ = cp_type3_packet(CP_REG_RMW, 3);
+ *cmds++ = mmu_ctrl;
+ /* AND to unmask the lock bit */
+ *cmds++ =
+ ~(KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_HALT);
+ /* OR to set the IOMMU lock bit */
+ *cmds++ =
+ KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_HALT;
+ /* wait for smmu to lock */
+ cmds += adreno_wait_reg_eq(cmds, mmu_ctrl,
+ KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_IDLE,
+ KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_IDLE, 0xF);
+ }
+ /* set ttbr0 */
+ if (sizeof(phys_addr_t) > sizeof(unsigned long)) {
+ reg_pt_val = ttbr0_val & 0xFFFFFFFF;
+ *cmds++ = cp_type0_packet(ttbr0, 1);
+ *cmds++ = reg_pt_val;
+ reg_pt_val = (unsigned int)
+ ((ttbr0_val & 0xFFFFFFFF00000000ULL) >> 32);
+ *cmds++ = cp_type0_packet(ttbr0 + 1, 1);
+ *cmds++ = reg_pt_val;
+ } else {
+ reg_pt_val = ttbr0_val;
+ *cmds++ = cp_type0_packet(ttbr0, 1);
+ *cmds++ = reg_pt_val;
+ }
+ if (kgsl_mmu_hw_halt_supported(&device->mmu, i)) {
+ /* unlock the IOMMU lock */
+ *cmds++ = cp_type3_packet(CP_REG_RMW, 3);
+ *cmds++ = mmu_ctrl;
+ /* AND to unmask the lock bit */
+ *cmds++ =
+ ~(KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_HALT);
+ /* OR with 0 so lock bit is unset */
+ *cmds++ = 0;
+ /* release all commands with wait_for_me */
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1);
+ *cmds++ = 0;
+ }
+ }
+ if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
+ tlbiall = kgsl_mmu_get_reg_ahbaddr(&device->mmu, i,
+ KGSL_IOMMU_CONTEXT_USER,
+ KGSL_IOMMU_CTX_TLBIALL) >> 2;
+ *cmds++ = cp_type0_packet(tlbiall, 1);
+ *cmds++ = 1;
+
+ tlbsync = kgsl_mmu_get_reg_ahbaddr(&device->mmu, i,
+ KGSL_IOMMU_CONTEXT_USER,
+ KGSL_IOMMU_CTX_TLBSYNC) >> 2;
+ *cmds++ = cp_type0_packet(tlbsync, 1);
+ *cmds++ = 0;
+
+ tlbstatus = kgsl_mmu_get_reg_ahbaddr(&device->mmu, i,
+ KGSL_IOMMU_CONTEXT_USER,
+ KGSL_IOMMU_CTX_TLBSTATUS) >> 2;
+ cmds += adreno_wait_reg_eq(cmds, tlbstatus, 0,
+ KGSL_IOMMU_CTX_TLBSTATUS_SACTIVE, 0xF);
+ }
+ }
+ return cmds - cmds_orig;
+}
+
+/**
+ * adreno_use_default_setstate() - Use CPU instead of the GPU to manage the mmu?
+ * @adreno_dev: the device
+ *
+ * In many cases it is preferable to poke the iommu or gpummu directly rather
+ * than using the GPU command stream. If we are idle or trying to go to a low
+ * power state, using the command stream will be slower and asynchronous, which
+ * needlessly complicates the power state transitions. Additionally,
+ * the hardware simulators do not support command stream MMU operations so
+ * the command stream can never be used if we are capturing CFF data.
+ *
+ */
+static bool adreno_use_default_setstate(struct adreno_device *adreno_dev)
+{
+ return (adreno_isidle(&adreno_dev->dev) ||
+ KGSL_STATE_ACTIVE != adreno_dev->dev.state ||
+ atomic_read(&adreno_dev->dev.active_cnt) == 0 ||
+ adreno_dev->dev.cff_dump_enable);
+}
+
+static int adreno_iommu_setstate(struct kgsl_device *device,
+ unsigned int context_id,
+ uint32_t flags)
+{
+ phys_addr_t pt_val;
+ unsigned int link[230];
+ unsigned int *cmds = &link[0];
+ int sizedwords = 0;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int num_iommu_units;
+ struct kgsl_context *context;
+ struct adreno_context *adreno_ctx = NULL;
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ unsigned int result;
+
+ if (adreno_use_default_setstate(adreno_dev)) {
+ kgsl_mmu_device_setstate(&device->mmu, flags);
+ return 0;
+ }
+ num_iommu_units = kgsl_mmu_get_num_iommu_units(&device->mmu);
+
+ context = kgsl_context_get(device, context_id);
+ if (context == NULL) {
+ kgsl_mmu_device_setstate(&device->mmu, KGSL_CONTEXT_INVALID);
+ return -EINVAL;
+ }
+
+ adreno_ctx = ADRENO_CONTEXT(context);
+
+ result = kgsl_mmu_enable_clk(&device->mmu,
+ KGSL_IOMMU_CONTEXT_USER);
+ if (result)
+ goto done;
+
+ pt_val = kgsl_mmu_get_pt_base_addr(&device->mmu,
+ device->mmu.hwpagetable);
+
+ cmds += __adreno_add_idle_indirect_cmds(cmds,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+
+ /* naming mismatch */
+ if (msm_soc_version_supports_iommu_v1())
+ cmds += _adreno_iommu_setstate_v0(device, cmds, pt_val,
+ num_iommu_units, flags);
+ else
+ cmds += _adreno_iommu_setstate_v1(device, cmds, pt_val,
+ num_iommu_units, flags);
+
+ sizedwords += (cmds - &link[0]);
+ if (sizedwords == 0) {
+ KGSL_DRV_ERR(device, "no commands generated\n");
+ BUG();
+ }
+ /* invalidate all base pointers */
+ *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
+ *cmds++ = 0x7fff;
+ sizedwords += 2;
+
+ if (sizedwords > (sizeof(link)/sizeof(unsigned int))) {
+ KGSL_DRV_ERR(device, "Temp command buffer overflow\n");
+ BUG();
+ }
+ /*
+ * This returns the per context timestamp but we need to
+ * use the global timestamp for iommu clock disablement
+ */
+ adreno_ringbuffer_issuecmds(device, adreno_ctx, KGSL_CMD_FLAGS_PMODE,
+ &link[0], sizedwords);
+
+ kgsl_mmu_disable_clk_on_ts(&device->mmu,
+ rb->global_ts, true);
+
+done:
+ kgsl_context_put(context);
+ return result;
+}
+
+static int adreno_gpummu_setstate(struct kgsl_device *device,
+ unsigned int context_id,
+ uint32_t flags)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned int link[32];
+ unsigned int *cmds = &link[0];
+ int sizedwords = 0;
+ unsigned int mh_mmu_invalidate = 0x00000003; /*invalidate all and tc */
+ struct kgsl_context *context;
+ struct adreno_context *adreno_ctx = NULL;
+ int ret = 0;
+
+ /*
+ * Fix target freeze issue by adding TLB flush for each submit
+ * on A20X based targets.
+ */
+ if (adreno_is_a20x(adreno_dev))
+ flags |= KGSL_MMUFLAGS_TLBFLUSH;
+ /*
+ * If possible, then set the state via the command stream to avoid
+ * a CPU idle. Otherwise, use the default setstate which uses register
+ * writes For CFF dump we must idle and use the registers so that it is
+ * easier to filter out the mmu accesses from the dump
+ */
+ if (!adreno_use_default_setstate(adreno_dev)) {
+ context = kgsl_context_get(device, context_id);
+ if (context == NULL)
+ return -EINVAL;
+ adreno_ctx = ADRENO_CONTEXT(context);
+
+ if (flags & KGSL_MMUFLAGS_PTUPDATE) {
+ /* wait for graphics pipe to be idle */
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+
+ /* set page table base */
+ *cmds++ = cp_type0_packet(MH_MMU_PT_BASE, 1);
+ *cmds++ = kgsl_mmu_get_pt_base_addr(&device->mmu,
+ device->mmu.hwpagetable);
+ sizedwords += 4;
+ }
+
+ if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
+ if (!(flags & KGSL_MMUFLAGS_PTUPDATE)) {
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE,
+ 1);
+ *cmds++ = 0x00000000;
+ sizedwords += 2;
+ }
+ *cmds++ = cp_type0_packet(MH_MMU_INVALIDATE, 1);
+ *cmds++ = mh_mmu_invalidate;
+ sizedwords += 2;
+ }
+
+ if (flags & KGSL_MMUFLAGS_PTUPDATE &&
+ adreno_is_a20x(adreno_dev)) {
+ /* HW workaround: to resolve MMU page fault interrupts
+ * caused by the VGT.It prevents the CP PFP from filling
+ * the VGT DMA request fifo too early,thereby ensuring
+ * that the VGT will not fetch vertex/bin data until
+ * after the page table base register has been updated.
+ *
+ * Two null DRAW_INDX_BIN packets are inserted right
+ * after the page table base update, followed by a
+ * wait for idle. The null packets will fill up the
+ * VGT DMA request fifo and prevent any further
+ * vertex/bin updates from occurring until the wait
+ * has finished. */
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = (0x4 << 16) |
+ (REG_PA_SU_SC_MODE_CNTL - 0x2000);
+ *cmds++ = 0; /* disable faceness generation */
+ *cmds++ = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1);
+ *cmds++ = device->mmu.setstate_memory.gpuaddr;
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX_BIN, 6);
+ *cmds++ = 0; /* viz query info */
+ *cmds++ = 0x0003C004; /* draw indicator */
+ *cmds++ = 0; /* bin base */
+ *cmds++ = 3; /* bin size */
+ *cmds++ =
+ device->mmu.setstate_memory.gpuaddr; /* dma base */
+ *cmds++ = 6; /* dma size */
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX_BIN, 6);
+ *cmds++ = 0; /* viz query info */
+ *cmds++ = 0x0003C004; /* draw indicator */
+ *cmds++ = 0; /* bin base */
+ *cmds++ = 3; /* bin size */
+ /* dma base */
+ *cmds++ = device->mmu.setstate_memory.gpuaddr;
+ *cmds++ = 6; /* dma size */
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+ sizedwords += 21;
+ }
+
+
+ if (flags & (KGSL_MMUFLAGS_PTUPDATE | KGSL_MMUFLAGS_TLBFLUSH)) {
+ *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
+ *cmds++ = 0x7fff; /* invalidate all base pointers */
+ sizedwords += 2;
+ }
+
+ ret = adreno_ringbuffer_issuecmds(device, adreno_ctx,
+ KGSL_CMD_FLAGS_PMODE,
+ &link[0], sizedwords);
+
+ kgsl_context_put(context);
+ } else {
+ kgsl_mmu_device_setstate(&device->mmu, flags);
+ }
+
+ return ret;
+}
+
+static int adreno_setstate(struct kgsl_device *device,
+ unsigned int context_id,
+ uint32_t flags)
+{
+ /* call the mmu specific handler */
+ if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype())
+ return adreno_gpummu_setstate(device, context_id, flags);
+ else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
+ return adreno_iommu_setstate(device, context_id, flags);
+
+ return 0;
+}
+
+static unsigned int
+a3xx_getchipid(struct kgsl_device *device)
+{
+ struct kgsl_device_platform_data *pdata =
+ kgsl_device_get_drvdata(device);
+
+ /*
+ * All current A3XX chipids are detected at the SOC level. Leave this
+ * function here to support any future GPUs that have working
+ * chip ID registers
+ */
+
+ return pdata->chipid;
+}
+
+static unsigned int
+a2xx_getchipid(struct kgsl_device *device)
+{
+ unsigned int chipid = 0;
+ unsigned int coreid, majorid, minorid, patchid, revid;
+ struct kgsl_device_platform_data *pdata =
+ kgsl_device_get_drvdata(device);
+
+ /* If the chip id is set at the platform level, then just use that */
+
+ if (pdata->chipid != 0)
+ return pdata->chipid;
+
+ kgsl_regread(device, REG_RBBM_PERIPHID1, &coreid);
+ kgsl_regread(device, REG_RBBM_PERIPHID2, &majorid);
+ kgsl_regread(device, REG_RBBM_PATCH_RELEASE, &revid);
+
+ /*
+ * adreno 22x gpus are indicated by coreid 2,
+ * but REG_RBBM_PERIPHID1 always contains 0 for this field
+ */
+ if (cpu_is_msm8x60())
+ chipid = 2 << 24;
+ else
+ chipid = (coreid & 0xF) << 24;
+
+ chipid |= ((majorid >> 4) & 0xF) << 16;
+
+ minorid = ((revid >> 0) & 0xFF);
+
+ patchid = ((revid >> 16) & 0xFF);
+
+ /* 8x50 returns 0 for patch release, but it should be 1 */
+ /* 8x25 returns 0 for minor id, but it should be 1 */
+ if (cpu_is_qsd8x50())
+ patchid = 1;
+ else if (cpu_is_msm8625() && minorid == 0)
+ minorid = 1;
+
+ chipid |= (minorid << 8) | patchid;
+
+ return chipid;
+}
+
+static unsigned int
+adreno_getchipid(struct kgsl_device *device)
+{
+ struct kgsl_device_platform_data *pdata =
+ kgsl_device_get_drvdata(device);
+
+ /*
+ * All A3XX chipsets will have pdata set, so assume !pdata->chipid is
+ * an A2XX processor
+ */
+
+ if (pdata->chipid == 0 || ADRENO_CHIPID_MAJOR(pdata->chipid) == 2)
+ return a2xx_getchipid(device);
+ else
+ return a3xx_getchipid(device);
+}
+
+static inline bool _rev_match(unsigned int id, unsigned int entry)
+{
+ return (entry == ANY_ID || entry == id);
+}
+
+static void
+adreno_identify_gpu(struct adreno_device *adreno_dev)
+{
+ unsigned int i, core, major, minor, patchid;
+
+ adreno_dev->chip_id = adreno_getchipid(&adreno_dev->dev);
+
+ core = ADRENO_CHIPID_CORE(adreno_dev->chip_id);
+ major = ADRENO_CHIPID_MAJOR(adreno_dev->chip_id);
+ minor = ADRENO_CHIPID_MINOR(adreno_dev->chip_id);
+ patchid = ADRENO_CHIPID_PATCH(adreno_dev->chip_id);
+
+ for (i = 0; i < ARRAY_SIZE(adreno_gpulist); i++) {
+ if (core == adreno_gpulist[i].core &&
+ _rev_match(major, adreno_gpulist[i].major) &&
+ _rev_match(minor, adreno_gpulist[i].minor) &&
+ _rev_match(patchid, adreno_gpulist[i].patchid))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(adreno_gpulist)) {
+ adreno_dev->gpurev = ADRENO_REV_UNKNOWN;
+ return;
+ }
+
+ adreno_dev->gpurev = adreno_gpulist[i].gpurev;
+ adreno_dev->gpudev = adreno_gpulist[i].gpudev;
+ adreno_dev->pfp_fwfile = adreno_gpulist[i].pfpfw;
+ adreno_dev->pm4_fwfile = adreno_gpulist[i].pm4fw;
+ adreno_dev->istore_size = adreno_gpulist[i].istore_size;
+ adreno_dev->pix_shader_start = adreno_gpulist[i].pix_shader_start;
+ adreno_dev->instruction_size = adreno_gpulist[i].instruction_size;
+ adreno_dev->gmem_size = adreno_gpulist[i].gmem_size;
+ adreno_dev->pm4_jt_idx = adreno_gpulist[i].pm4_jt_idx;
+ adreno_dev->pm4_jt_addr = adreno_gpulist[i].pm4_jt_addr;
+ adreno_dev->pfp_jt_idx = adreno_gpulist[i].pfp_jt_idx;
+ adreno_dev->pfp_jt_addr = adreno_gpulist[i].pfp_jt_addr;
+ adreno_dev->gpulist_index = i;
+ /*
+ * Initialize uninitialzed gpu registers, only needs to be done once
+ * Make all offsets that are not initialized to ADRENO_REG_UNUSED
+ */
+ for (i = 0; i < ADRENO_REG_REGISTER_MAX; i++) {
+ if (adreno_dev->gpudev->reg_offsets->offset_0 != i &&
+ !adreno_dev->gpudev->reg_offsets->offsets[i]) {
+ adreno_dev->gpudev->reg_offsets->offsets[i] =
+ ADRENO_REG_UNUSED;
+ }
+ }
+}
+
+static struct platform_device_id adreno_id_table[] = {
+ { DEVICE_3D0_NAME, (kernel_ulong_t)&device_3d0.dev, },
+ {},
+};
+
+MODULE_DEVICE_TABLE(platform, adreno_id_table);
+
+static struct of_device_id adreno_match_table[] = {
+ { .compatible = "qcom,kgsl-3d0", },
+ {}
+};
+
+static inline int adreno_of_read_property(struct device_node *node,
+ const char *prop, unsigned int *ptr)
+{
+ int ret = of_property_read_u32(node, prop, ptr);
+ if (ret)
+ KGSL_CORE_ERR("Unable to read '%s'\n", prop);
+ return ret;
+}
+
+static struct device_node *adreno_of_find_subnode(struct device_node *parent,
+ const char *name)
+{
+ struct device_node *child;
+
+ for_each_child_of_node(parent, child) {
+ if (of_device_is_compatible(child, name))
+ return child;
+ }
+
+ return NULL;
+}
+
+static int adreno_of_get_pwrlevels(struct device_node *parent,
+ struct kgsl_device_platform_data *pdata)
+{
+ struct device_node *node, *child;
+ int ret = -EINVAL;
+
+ node = adreno_of_find_subnode(parent, "qcom,gpu-pwrlevels");
+
+ if (node == NULL) {
+ KGSL_CORE_ERR("Unable to find 'qcom,gpu-pwrlevels'\n");
+ return -EINVAL;
+ }
+
+ pdata->num_levels = 0;
+
+ for_each_child_of_node(node, child) {
+ unsigned int index;
+ struct kgsl_pwrlevel *level;
+
+ if (adreno_of_read_property(child, "reg", &index))
+ goto done;
+
+ if (index >= KGSL_MAX_PWRLEVELS) {
+ KGSL_CORE_ERR("Pwrlevel index %d is out of range\n",
+ index);
+ continue;
+ }
+
+ if (index >= pdata->num_levels)
+ pdata->num_levels = index + 1;
+
+ level = &pdata->pwrlevel[index];
+
+ if (adreno_of_read_property(child, "qcom,gpu-freq",
+ &level->gpu_freq))
+ goto done;
+
+ if (adreno_of_read_property(child, "qcom,bus-freq",
+ &level->bus_freq))
+ goto done;
+
+ if (adreno_of_read_property(child, "qcom,io-fraction",
+ &level->io_fraction))
+ level->io_fraction = 0;
+ }
+
+ if (adreno_of_read_property(parent, "qcom,initial-pwrlevel",
+ &pdata->init_level))
+ pdata->init_level = 1;
+
+ if (adreno_of_read_property(parent, "qcom,step-pwrlevel",
+ &pdata->step_mul))
+ pdata->step_mul = 1;
+
+ if (pdata->init_level < 0 || pdata->init_level > pdata->num_levels) {
+ KGSL_CORE_ERR("Initial power level out of range\n");
+ pdata->init_level = 1;
+ }
+
+ ret = 0;
+done:
+ return ret;
+
+}
+
+static int adreno_of_get_iommu(struct device_node *parent,
+ struct kgsl_device_platform_data *pdata)
+{
+ struct device_node *node, *child;
+ struct kgsl_device_iommu_data *data = NULL;
+ struct kgsl_iommu_ctx *ctxs = NULL;
+ u32 reg_val[2];
+ int ctx_index = 0;
+
+ node = of_parse_phandle(parent, "iommu", 0);
+ if (node == NULL)
+ return -EINVAL;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (data == NULL) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*data));
+ goto err;
+ }
+
+ if (of_property_read_u32_array(node, "reg", reg_val, 2))
+ goto err;
+
+ data->physstart = reg_val[0];
+ data->physend = data->physstart + reg_val[1] - 1;
+ data->iommu_halt_enable = of_property_read_bool(node,
+ "qcom,iommu-enable-halt");
+
+ data->iommu_ctx_count = 0;
+
+ for_each_child_of_node(node, child)
+ data->iommu_ctx_count++;
+
+ ctxs = kzalloc(data->iommu_ctx_count * sizeof(struct kgsl_iommu_ctx),
+ GFP_KERNEL);
+
+ if (ctxs == NULL) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n",
+ data->iommu_ctx_count * sizeof(struct kgsl_iommu_ctx));
+ goto err;
+ }
+
+ for_each_child_of_node(node, child) {
+ int ret = of_property_read_string(child, "label",
+ &ctxs[ctx_index].iommu_ctx_name);
+
+ if (ret) {
+ KGSL_CORE_ERR("Unable to read KGSL IOMMU 'label'\n");
+ goto err;
+ }
+
+ ret = of_property_read_u32_array(child, "reg", reg_val, 2);
+ if (ret) {
+ KGSL_CORE_ERR("Unable to read KGSL IOMMU 'reg'\n");
+ goto err;
+ }
+ if (msm_soc_version_supports_iommu_v1())
+ ctxs[ctx_index].ctx_id = (reg_val[0] -
+ data->physstart) >> KGSL_IOMMU_CTX_SHIFT;
+ else
+ ctxs[ctx_index].ctx_id = ((reg_val[0] -
+ data->physstart) >> KGSL_IOMMU_CTX_SHIFT) - 8;
+
+ ctx_index++;
+ }
+
+ data->iommu_ctxs = ctxs;
+
+ pdata->iommu_data = data;
+ pdata->iommu_count = 1;
+
+ return 0;
+
+err:
+ kfree(ctxs);
+ kfree(data);
+
+ return -EINVAL;
+}
+
+static int adreno_of_get_pdata(struct platform_device *pdev)
+{
+ struct kgsl_device_platform_data *pdata = NULL;
+ struct kgsl_device *device;
+ int ret = -EINVAL;
+
+ pdev->id_entry = adreno_id_table;
+
+ pdata = pdev->dev.platform_data;
+ if (pdata)
+ return 0;
+
+ if (of_property_read_string(pdev->dev.of_node, "label", &pdev->name)) {
+ KGSL_CORE_ERR("Unable to read 'label'\n");
+ goto err;
+ }
+
+ if (adreno_of_read_property(pdev->dev.of_node, "qcom,id", &pdev->id))
+ goto err;
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (pdata == NULL) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*pdata));
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (adreno_of_read_property(pdev->dev.of_node, "qcom,chipid",
+ &pdata->chipid))
+ goto err;
+
+ /* pwrlevel Data */
+ ret = adreno_of_get_pwrlevels(pdev->dev.of_node, pdata);
+ if (ret)
+ goto err;
+
+ if (adreno_of_read_property(pdev->dev.of_node, "qcom,idle-timeout",
+ &pdata->idle_timeout))
+ pdata->idle_timeout = HZ/12;
+
+ pdata->strtstp_sleepwake = of_property_read_bool(pdev->dev.of_node,
+ "qcom,strtstp-sleepwake");
+
+ if (adreno_of_read_property(pdev->dev.of_node, "qcom,clk-map",
+ &pdata->clk_map))
+ goto err;
+
+ device = (struct kgsl_device *)pdev->id_entry->driver_data;
+
+ if (device->id != KGSL_DEVICE_3D0)
+ goto err;
+
+ /* Bus Scale Data */
+
+ pdata->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+ if (IS_ERR_OR_NULL(pdata->bus_scale_table)) {
+ ret = PTR_ERR(pdata->bus_scale_table);
+ if (!ret)
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = adreno_of_get_iommu(pdev->dev.of_node, pdata);
+ if (ret)
+ goto err;
+
+ pdata->coresight_pdata = of_get_coresight_platform_data(&pdev->dev,
+ pdev->dev.of_node);
+
+ pdev->dev.platform_data = pdata;
+ return 0;
+
+err:
+ if (pdata) {
+ if (pdata->iommu_data)
+ kfree(pdata->iommu_data->iommu_ctxs);
+
+ kfree(pdata->iommu_data);
+ }
+
+ kfree(pdata);
+
+ return ret;
+}
+
+#ifdef CONFIG_MSM_OCMEM
+static int
+adreno_ocmem_gmem_malloc(struct adreno_device *adreno_dev)
+{
+ if (!(adreno_is_a330(adreno_dev) ||
+ adreno_is_a305b(adreno_dev)))
+ return 0;
+
+ /* OCMEM is only needed once, do not support consective allocation */
+ if (adreno_dev->ocmem_hdl != NULL)
+ return 0;
+
+ adreno_dev->ocmem_hdl =
+ ocmem_allocate(OCMEM_GRAPHICS, adreno_dev->gmem_size);
+ if (adreno_dev->ocmem_hdl == NULL)
+ return -ENOMEM;
+
+ adreno_dev->gmem_size = adreno_dev->ocmem_hdl->len;
+ adreno_dev->ocmem_base = adreno_dev->ocmem_hdl->addr;
+
+ return 0;
+}
+
+static void
+adreno_ocmem_gmem_free(struct adreno_device *adreno_dev)
+{
+ if (!(adreno_is_a330(adreno_dev) ||
+ adreno_is_a305b(adreno_dev)))
+ return;
+
+ if (adreno_dev->ocmem_hdl == NULL)
+ return;
+
+ ocmem_free(OCMEM_GRAPHICS, adreno_dev->ocmem_hdl);
+ adreno_dev->ocmem_hdl = NULL;
+}
+#else
+static int
+adreno_ocmem_gmem_malloc(struct adreno_device *adreno_dev)
+{
+ return 0;
+}
+
+static void
+adreno_ocmem_gmem_free(struct adreno_device *adreno_dev)
+{
+}
+#endif
+
+static int __devinit
+adreno_probe(struct platform_device *pdev)
+{
+ struct kgsl_device *device;
+ struct kgsl_device_platform_data *pdata = NULL;
+ struct adreno_device *adreno_dev;
+ int status = -EINVAL;
+ bool is_dt;
+
+ is_dt = of_match_device(adreno_match_table, &pdev->dev);
+
+ if (is_dt && pdev->dev.of_node) {
+ status = adreno_of_get_pdata(pdev);
+ if (status)
+ goto error_return;
+ }
+
+ device = (struct kgsl_device *)pdev->id_entry->driver_data;
+ adreno_dev = ADRENO_DEVICE(device);
+ device->parentdev = &pdev->dev;
+
+ status = adreno_ringbuffer_init(device);
+ if (status != 0)
+ goto error;
+
+ status = kgsl_device_platform_probe(device);
+ if (status)
+ goto error_close_rb;
+
+ status = adreno_dispatcher_init(adreno_dev);
+ if (status)
+ goto error_close_device;
+
+ adreno_debugfs_init(device);
+ adreno_ft_init_sysfs(device);
+
+ kgsl_pwrscale_init(device);
+ kgsl_pwrscale_attach_policy(device, ADRENO_DEFAULT_PWRSCALE_POLICY);
+
+ device->flags &= ~KGSL_FLAGS_SOFT_RESET;
+ pdata = kgsl_device_get_drvdata(device);
+
+ adreno_coresight_init(pdev);
+
+ return 0;
+
+error_close_device:
+ kgsl_device_platform_remove(device);
+error_close_rb:
+ adreno_ringbuffer_close(&adreno_dev->ringbuffer);
+error:
+ device->parentdev = NULL;
+error_return:
+ return status;
+}
+
+static int __devexit adreno_remove(struct platform_device *pdev)
+{
+ struct kgsl_device *device;
+ struct adreno_device *adreno_dev;
+
+ device = (struct kgsl_device *)pdev->id_entry->driver_data;
+ adreno_dev = ADRENO_DEVICE(device);
+
+ adreno_coresight_remove(pdev);
+
+ kgsl_pwrscale_detach_policy(device);
+ kgsl_pwrscale_close(device);
+
+ adreno_dispatcher_close(adreno_dev);
+ adreno_ringbuffer_close(&adreno_dev->ringbuffer);
+ adreno_perfcounter_close(device);
+ kgsl_device_platform_remove(device);
+
+ clear_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv);
+
+ return 0;
+}
+
+static int adreno_init(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ int i;
+ int ret;
+
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+ /*
+ * initialization only needs to be done once initially until
+ * device is shutdown
+ */
+ if (test_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv))
+ return 0;
+
+ /* Power up the device */
+ kgsl_pwrctrl_enable(device);
+
+ /* Identify the specific GPU */
+ adreno_identify_gpu(adreno_dev);
+
+ if (adreno_ringbuffer_read_pm4_ucode(device)) {
+ KGSL_DRV_ERR(device, "Reading pm4 microcode failed %s\n",
+ adreno_dev->pm4_fwfile);
+ BUG_ON(1);
+ }
+
+ if (adreno_ringbuffer_read_pfp_ucode(device)) {
+ KGSL_DRV_ERR(device, "Reading pfp microcode failed %s\n",
+ adreno_dev->pfp_fwfile);
+ BUG_ON(1);
+ }
+
+ if (adreno_dev->gpurev == ADRENO_REV_UNKNOWN) {
+ KGSL_DRV_ERR(device, "Unknown chip ID %x\n",
+ adreno_dev->chip_id);
+ BUG_ON(1);
+ }
+
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+ /*
+ * Check if firmware supports the sync lock PM4 packets needed
+ * for IOMMUv1
+ */
+
+ if ((adreno_dev->pm4_fw_version >=
+ adreno_gpulist[adreno_dev->gpulist_index].sync_lock_pm4_ver) &&
+ (adreno_dev->pfp_fw_version >=
+ adreno_gpulist[adreno_dev->gpulist_index].sync_lock_pfp_ver))
+ device->mmu.flags |= KGSL_MMU_FLAGS_IOMMU_SYNC;
+
+ rb->global_ts = 0;
+
+ /* Initialize ft detection register offsets */
+ ft_detect_regs[0] = adreno_getreg(adreno_dev,
+ ADRENO_REG_RBBM_STATUS);
+ ft_detect_regs[1] = adreno_getreg(adreno_dev,
+ ADRENO_REG_CP_RB_RPTR);
+ ft_detect_regs[2] = adreno_getreg(adreno_dev,
+ ADRENO_REG_CP_IB1_BASE);
+ ft_detect_regs[3] = adreno_getreg(adreno_dev,
+ ADRENO_REG_CP_IB1_BUFSZ);
+ ft_detect_regs[4] = adreno_getreg(adreno_dev,
+ ADRENO_REG_CP_IB2_BASE);
+ ft_detect_regs[5] = adreno_getreg(adreno_dev,
+ ADRENO_REG_CP_IB2_BUFSZ);
+ for (i = 6; i < FT_DETECT_REGS_COUNT; i++)
+ ft_detect_regs[i] = 0;
+
+ ret = adreno_perfcounter_init(device);
+
+ /* Power down the device */
+ kgsl_pwrctrl_disable(device);
+
+ if (ret)
+ goto done;
+
+ /* Certain targets need the fixup. You know who you are */
+ if (adreno_is_a330v2(adreno_dev))
+ adreno_a3xx_pwron_fixup_init(adreno_dev);
+
+ set_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv);
+done:
+ return ret;
+}
+
+static int adreno_start(struct kgsl_device *device)
+{
+ int status = -EINVAL;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned int state = device->state;
+ unsigned int regulator_left_on = 0;
+
+ kgsl_cffdump_open(device);
+
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+
+ regulator_left_on = (regulator_is_enabled(device->pwrctrl.gpu_reg) ||
+ (device->pwrctrl.gpu_cx &&
+ regulator_is_enabled(device->pwrctrl.gpu_cx)));
+
+ /* Clear any GPU faults that might have been left over */
+ adreno_set_gpu_fault(adreno_dev, 0);
+
+ /* Power up the device */
+ kgsl_pwrctrl_enable(device);
+
+ /* Set the bit to indicate that we've just powered on */
+ set_bit(ADRENO_DEVICE_PWRON, &adreno_dev->priv);
+
+ /* Set up a2xx special case */
+ if (adreno_is_a2xx(adreno_dev)) {
+ /*
+ * the MH_CLNT_INTF_CTRL_CONFIG registers aren't present
+ * on older gpus
+ */
+ if (adreno_is_a20x(adreno_dev)) {
+ device->mh.mh_intf_cfg1 = 0;
+ device->mh.mh_intf_cfg2 = 0;
+ }
+
+ kgsl_mh_start(device);
+ }
+
+ status = kgsl_mmu_start(device);
+ if (status)
+ goto error_clk_off;
+
+ status = adreno_ocmem_gmem_malloc(adreno_dev);
+ if (status) {
+ KGSL_DRV_ERR(device, "OCMEM malloc failed\n");
+ goto error_mmu_off;
+ }
+
+ if (regulator_left_on && adreno_dev->gpudev->soft_reset) {
+ /*
+ * Reset the GPU for A3xx. A2xx does a soft reset in
+ * the start function.
+ */
+ adreno_dev->gpudev->soft_reset(adreno_dev);
+ }
+
+ /* Start the GPU */
+ adreno_dev->gpudev->start(adreno_dev);
+
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
+ device->ftbl->irqctrl(device, 1);
+
+ status = adreno_ringbuffer_start(&adreno_dev->ringbuffer);
+ if (status)
+ goto error_irq_off;
+
+ status = adreno_perfcounter_start(adreno_dev);
+ if (status)
+ goto error_rb_stop;
+
+ /* Start the dispatcher */
+ adreno_dispatcher_start(adreno_dev);
+
+ device->reset_counter++;
+
+ return 0;
+
+error_rb_stop:
+ adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
+error_irq_off:
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+
+error_mmu_off:
+ kgsl_mmu_stop(&device->mmu);
+
+error_clk_off:
+ kgsl_pwrctrl_disable(device);
+ /* set the state back to original state */
+ kgsl_pwrctrl_set_state(device, state);
+
+ return status;
+}
+
+static int adreno_stop(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ if (adreno_dev->drawctxt_active)
+ kgsl_context_put(&adreno_dev->drawctxt_active->base);
+
+ adreno_dev->drawctxt_active = NULL;
+
+ adreno_dispatcher_stop(adreno_dev);
+ adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
+
+ kgsl_mmu_stop(&device->mmu);
+
+ device->ftbl->irqctrl(device, 0);
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+ del_timer_sync(&device->idle_timer);
+
+ adreno_ocmem_gmem_free(adreno_dev);
+
+ /* Power down the device */
+ kgsl_pwrctrl_disable(device);
+
+ kgsl_cffdump_close(device);
+
+ return 0;
+}
+
+/**
+ * adreno_reset() - Helper function to reset the GPU
+ * @device: Pointer to the KGSL device structure for the GPU
+ *
+ * Helper function to reset the GPU hardware by toggling the footswitch
+ */
+int adreno_reset(struct kgsl_device *device)
+{
+ int ret = -EINVAL;
+ struct kgsl_mmu *mmu = &device->mmu;
+ int i = 0;
+
+ /* Try soft reset first, for non mmu fault case only */
+ if (!atomic_read(&mmu->fault)) {
+ ret = adreno_soft_reset(device);
+ if (ret)
+ KGSL_DEV_ERR_ONCE(device, "Device soft reset failed\n");
+ }
+ if (ret) {
+ /* If soft reset failed/skipped, then pull the power */
+ adreno_stop(device);
+
+ /* Keep trying to start the device until it works */
+ for (i = 0; i < NUM_TIMES_RESET_RETRY; i++) {
+ ret = adreno_start(device);
+ if (!ret)
+ break;
+
+ msleep(20);
+ }
+ }
+ if (ret)
+ return ret;
+
+ if (0 != i)
+ KGSL_DRV_WARN(device, "Device hard reset tried %d tries\n", i);
+
+ /*
+ * If active_cnt is non-zero then the system was active before
+ * going into a reset - put it back in that state
+ */
+
+ if (atomic_read(&device->active_cnt))
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
+
+ /* Set the page table back to the default page table */
+ kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
+ KGSL_MEMSTORE_GLOBAL);
+
+ return ret;
+}
+
+/**
+ * _ft_sysfs_store() - Common routine to write to FT sysfs files
+ * @buf: value to write
+ * @count: size of the value to write
+ * @sysfs_cfg: KGSL FT sysfs config to write
+ *
+ * This is a common routine to write to FT sysfs files.
+ */
+static int _ft_sysfs_store(const char *buf, size_t count, unsigned int *ptr)
+{
+ char temp[20];
+ unsigned long val;
+ int rc;
+
+ snprintf(temp, sizeof(temp), "%.*s",
+ (int)min(count, sizeof(temp) - 1), buf);
+ rc = kstrtoul(temp, 0, &val);
+ if (rc)
+ return rc;
+
+ *ptr = val;
+
+ return count;
+}
+
+/**
+ * _get_adreno_dev() - Routine to get a pointer to adreno dev
+ * @dev: device ptr
+ * @attr: Device attribute
+ * @buf: value to write
+ * @count: size of the value to write
+ */
+struct adreno_device *_get_adreno_dev(struct device *dev)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ return device ? ADRENO_DEVICE(device) : NULL;
+}
+
+/**
+ * _ft_policy_store() - Routine to configure FT policy
+ * @dev: device ptr
+ * @attr: Device attribute
+ * @buf: value to write
+ * @count: size of the value to write
+ *
+ * FT policy can be set to any of the options below.
+ * KGSL_FT_DISABLE -> BIT(0) Set to disable FT
+ * KGSL_FT_REPLAY -> BIT(1) Set to enable replay
+ * KGSL_FT_SKIPIB -> BIT(2) Set to skip IB
+ * KGSL_FT_SKIPFRAME -> BIT(3) Set to skip frame
+ * by default set FT policy to KGSL_FT_DEFAULT_POLICY
+ */
+static int _ft_policy_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adreno_device *adreno_dev = _get_adreno_dev(dev);
+ int ret;
+ if (adreno_dev == NULL)
+ return 0;
+
+ mutex_lock(&adreno_dev->dev.mutex);
+ ret = _ft_sysfs_store(buf, count, &adreno_dev->ft_policy);
+ mutex_unlock(&adreno_dev->dev.mutex);
+
+ return ret;
+}
+
+/**
+ * _ft_policy_show() - Routine to read FT policy
+ * @dev: device ptr
+ * @attr: Device attribute
+ * @buf: value read
+ *
+ * This is a routine to read current FT policy
+ */
+static int _ft_policy_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct adreno_device *adreno_dev = _get_adreno_dev(dev);
+ if (adreno_dev == NULL)
+ return 0;
+ return snprintf(buf, PAGE_SIZE, "0x%X\n", adreno_dev->ft_policy);
+}
+
+/**
+ * _ft_pagefault_policy_store() - Routine to configure FT
+ * pagefault policy
+ * @dev: device ptr
+ * @attr: Device attribute
+ * @buf: value to write
+ * @count: size of the value to write
+ *
+ * FT pagefault policy can be set to any of the options below.
+ * KGSL_FT_PAGEFAULT_INT_ENABLE -> BIT(0) set to enable pagefault INT
+ * KGSL_FT_PAGEFAULT_GPUHALT_ENABLE -> BIT(1) Set to enable GPU HALT on
+ * pagefaults. This stalls the GPU on a pagefault on IOMMU v1 HW.
+ * KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE -> BIT(2) Set to log only one
+ * pagefault per page.
+ * KGSL_FT_PAGEFAULT_LOG_ONE_PER_INT -> BIT(3) Set to log only one
+ * pagefault per INT.
+ */
+static int _ft_pagefault_policy_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adreno_device *adreno_dev = _get_adreno_dev(dev);
+ int ret;
+ if (adreno_dev == NULL)
+ return 0;
+
+ mutex_lock(&adreno_dev->dev.mutex);
+ ret = _ft_sysfs_store(buf, count, &adreno_dev->ft_pf_policy);
+ mutex_unlock(&adreno_dev->dev.mutex);
+
+ return ret;
+}
+
+/**
+ * _ft_pagefault_policy_show() - Routine to read FT pagefault
+ * policy
+ * @dev: device ptr
+ * @attr: Device attribute
+ * @buf: value read
+ *
+ * This is a routine to read current FT pagefault policy
+ */
+static int _ft_pagefault_policy_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct adreno_device *adreno_dev = _get_adreno_dev(dev);
+ if (adreno_dev == NULL)
+ return 0;
+ return snprintf(buf, PAGE_SIZE, "0x%X\n", adreno_dev->ft_pf_policy);
+}
+
+/**
+ * _ft_fast_hang_detect_store() - Routine to configure FT fast
+ * hang detect policy
+ * @dev: device ptr
+ * @attr: Device attribute
+ * @buf: value to write
+ * @count: size of the value to write
+ *
+ * 0x1 - Enable fast hang detection
+ * 0x0 - Disable fast hang detection
+ */
+static int _ft_fast_hang_detect_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adreno_device *adreno_dev = _get_adreno_dev(dev);
+ int ret;
+ if (adreno_dev == NULL)
+ return 0;
+
+ mutex_lock(&adreno_dev->dev.mutex);
+ ret = _ft_sysfs_store(buf, count, &adreno_dev->fast_hang_detect);
+ mutex_unlock(&adreno_dev->dev.mutex);
+
+ return ret;
+
+}
+
+/**
+ * _ft_fast_hang_detect_show() - Routine to read FT fast
+ * hang detect policy
+ * @dev: device ptr
+ * @attr: Device attribute
+ * @buf: value read
+ */
+static int _ft_fast_hang_detect_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct adreno_device *adreno_dev = _get_adreno_dev(dev);
+ if (adreno_dev == NULL)
+ return 0;
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ (adreno_dev->fast_hang_detect ? 1 : 0));
+}
+
+/**
+ * _ft_long_ib_detect_store() - Routine to configure FT long IB
+ * detect policy
+ * @dev: device ptr
+ * @attr: Device attribute
+ * @buf: value to write
+ * @count: size of the value to write
+ *
+ * 0x0 - Enable long IB detection
+ * 0x1 - Disable long IB detection
+ */
+static int _ft_long_ib_detect_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adreno_device *adreno_dev = _get_adreno_dev(dev);
+ int ret;
+ if (adreno_dev == NULL)
+ return 0;
+
+ mutex_lock(&adreno_dev->dev.mutex);
+ ret = _ft_sysfs_store(buf, count, &adreno_dev->long_ib_detect);
+ mutex_unlock(&adreno_dev->dev.mutex);
+
+ return ret;
+
+}
+
+/**
+ * _ft_long_ib_detect_show() - Routine to read FT long IB
+ * detect policy
+ * @dev: device ptr
+ * @attr: Device attribute
+ * @buf: value read
+ */
+static int _ft_long_ib_detect_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct adreno_device *adreno_dev = _get_adreno_dev(dev);
+ if (adreno_dev == NULL)
+ return 0;
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ (adreno_dev->long_ib_detect ? 1 : 0));
+}
+
+
+#define FT_DEVICE_ATTR(name) \
+ DEVICE_ATTR(name, 0644, _ ## name ## _show, _ ## name ## _store);
+
+FT_DEVICE_ATTR(ft_policy);
+FT_DEVICE_ATTR(ft_pagefault_policy);
+FT_DEVICE_ATTR(ft_fast_hang_detect);
+FT_DEVICE_ATTR(ft_long_ib_detect);
+
+
+const struct device_attribute *ft_attr_list[] = {
+ &dev_attr_ft_policy,
+ &dev_attr_ft_pagefault_policy,
+ &dev_attr_ft_fast_hang_detect,
+ &dev_attr_ft_long_ib_detect,
+ NULL,
+};
+
+int adreno_ft_init_sysfs(struct kgsl_device *device)
+{
+ return kgsl_create_device_sysfs_files(device->dev, ft_attr_list);
+}
+
+void adreno_ft_uninit_sysfs(struct kgsl_device *device)
+{
+ kgsl_remove_device_sysfs_files(device->dev, ft_attr_list);
+}
+
+static int adreno_getproperty(struct kgsl_device *device,
+ enum kgsl_property_type type,
+ void *value,
+ unsigned int sizebytes)
+{
+ int status = -EINVAL;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ switch (type) {
+ case KGSL_PROP_DEVICE_INFO:
+ {
+ struct kgsl_devinfo devinfo;
+
+ if (sizebytes != sizeof(devinfo)) {
+ status = -EINVAL;
+ break;
+ }
+
+ memset(&devinfo, 0, sizeof(devinfo));
+ devinfo.device_id = device->id+1;
+ devinfo.chip_id = adreno_dev->chip_id;
+ devinfo.mmu_enabled = kgsl_mmu_enabled();
+ devinfo.gpu_id = adreno_dev->gpurev;
+ devinfo.gmem_gpubaseaddr = adreno_dev->gmem_base;
+ devinfo.gmem_sizebytes = adreno_dev->gmem_size;
+
+ if (copy_to_user(value, &devinfo, sizeof(devinfo)) !=
+ 0) {
+ status = -EFAULT;
+ break;
+ }
+ status = 0;
+ }
+ break;
+ case KGSL_PROP_DEVICE_SHADOW:
+ {
+ struct kgsl_shadowprop shadowprop;
+
+ if (sizebytes != sizeof(shadowprop)) {
+ status = -EINVAL;
+ break;
+ }
+ memset(&shadowprop, 0, sizeof(shadowprop));
+ if (device->memstore.hostptr) {
+ /*NOTE: with mmu enabled, gpuaddr doesn't mean
+ * anything to mmap().
+ */
+ shadowprop.gpuaddr = device->memstore.gpuaddr;
+ shadowprop.size = device->memstore.size;
+ /* GSL needs this to be set, even if it
+ appears to be meaningless */
+ shadowprop.flags = KGSL_FLAGS_INITIALIZED |
+ KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS;
+ }
+ if (copy_to_user(value, &shadowprop,
+ sizeof(shadowprop))) {
+ status = -EFAULT;
+ break;
+ }
+ status = 0;
+ }
+ break;
+ case KGSL_PROP_MMU_ENABLE:
+ {
+ int mmu_prop = kgsl_mmu_enabled();
+
+ if (sizebytes != sizeof(int)) {
+ status = -EINVAL;
+ break;
+ }
+ if (copy_to_user(value, &mmu_prop, sizeof(mmu_prop))) {
+ status = -EFAULT;
+ break;
+ }
+ status = 0;
+ }
+ break;
+ case KGSL_PROP_INTERRUPT_WAITS:
+ {
+ int int_waits = 1;
+ if (sizebytes != sizeof(int)) {
+ status = -EINVAL;
+ break;
+ }
+ if (copy_to_user(value, &int_waits, sizeof(int))) {
+ status = -EFAULT;
+ break;
+ }
+ status = 0;
+ }
+ break;
+ default:
+ status = -EINVAL;
+ }
+
+ return status;
+}
+
+static int adreno_setproperty(struct kgsl_device *device,
+ enum kgsl_property_type type,
+ void *value,
+ unsigned int sizebytes)
+{
+ int status = -EINVAL;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ switch (type) {
+ case KGSL_PROP_PWRCTRL: {
+ unsigned int enable;
+
+ if (sizebytes != sizeof(enable))
+ break;
+
+ if (copy_from_user(&enable, (void __user *) value,
+ sizeof(enable))) {
+ status = -EFAULT;
+ break;
+ }
+
+ if (enable) {
+ adreno_dev->fast_hang_detect = 1;
+ kgsl_pwrscale_enable(device);
+ } else {
+ kgsl_pwrctrl_wake(device);
+ device->pwrctrl.ctrl_flags = KGSL_PWR_ON;
+ adreno_dev->fast_hang_detect = 0;
+ kgsl_pwrscale_disable(device);
+ }
+
+ status = 0;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * adreno_hw_isidle() - Check if the GPU core is idle
+ * @device: Pointer to the KGSL device structure for the GPU
+ *
+ * Return true if the RBBM status register for the GPU type indicates that the
+ * hardware is idle
+ */
+static bool adreno_hw_isidle(struct kgsl_device *device)
+{
+ unsigned int reg_rbbm_status;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ /* Don't consider ourselves idle if there is an IRQ pending */
+ if (adreno_dev->gpudev->irq_pending(adreno_dev))
+ return false;
+
+ /* Read the correct RBBM status for the GPU type */
+ adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS,
+ ®_rbbm_status);
+
+ if (adreno_is_a2xx(adreno_dev)) {
+ if (reg_rbbm_status == 0x110)
+ return true;
+ } else if (adreno_is_a3xx(adreno_dev) || adreno_is_a4xx(adreno_dev)) {
+ if (!(reg_rbbm_status & 0x80000000))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * adreno_soft_reset() - Do a soft reset of the GPU hardware
+ * @device: KGSL device to soft reset
+ *
+ * "soft reset" the GPU hardware - this is a fast path GPU reset
+ * The GPU hardware is reset but we never pull power so we can skip
+ * a lot of the standard adreno_stop/adreno_start sequence
+ */
+int adreno_soft_reset(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int ret;
+
+ if (!adreno_dev->gpudev->soft_reset) {
+ dev_WARN_ONCE(device->dev, 1, "Soft reset not supported");
+ return -EINVAL;
+ }
+
+ adreno_dev->drawctxt_active = NULL;
+
+ /* Stop the ringbuffer */
+ adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
+
+ if (kgsl_pwrctrl_isenabled(device))
+ device->ftbl->irqctrl(device, 0);
+
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+
+ adreno_set_gpu_fault(adreno_dev, 0);
+
+ /* Delete the idle timer */
+ del_timer_sync(&device->idle_timer);
+
+ /* Make sure we are totally awake */
+ kgsl_pwrctrl_enable(device);
+
+ /* Reset the GPU */
+ adreno_dev->gpudev->soft_reset(adreno_dev);
+
+ /* Reinitialize the GPU */
+ adreno_dev->gpudev->start(adreno_dev);
+
+ /* Enable IRQ */
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
+ device->ftbl->irqctrl(device, 1);
+
+ /*
+ * If we have offsets for the jump tables we can try to do a warm start,
+ * otherwise do a full ringbuffer restart
+ */
+
+ if (adreno_dev->pm4_jt_idx)
+ ret = adreno_ringbuffer_warm_start(&adreno_dev->ringbuffer);
+ else
+ ret = adreno_ringbuffer_start(&adreno_dev->ringbuffer);
+
+ if (ret)
+ return ret;
+
+ device->reset_counter++;
+
+ return 0;
+}
+
+/**
+ * adreno_isidle() - return true if the GPU hardware is idle
+ * @device: Pointer to the KGSL device structure for the GPU
+ *
+ * Return true if the GPU hardware is idle and there are no commands pending in
+ * the ringbuffer
+ */
+bool adreno_isidle(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned int rptr;
+
+ /* If the device isn't active, don't force it on. */
+ if (device->state != KGSL_STATE_ACTIVE)
+ return true;
+
+ rptr = adreno_get_rptr(&adreno_dev->ringbuffer);
+
+ if (rptr == adreno_dev->ringbuffer.wptr)
+ return adreno_hw_isidle(device);
+
+ return false;
+}
+
+/**
+ * adreno_idle() - wait for the GPU hardware to go idle
+ * @device: Pointer to the KGSL device structure for the GPU
+ *
+ * Wait up to ADRENO_IDLE_TIMEOUT milliseconds for the GPU hardware to go quiet.
+ */
+
+int adreno_idle(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned long wait = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
+
+ /*
+ * Make sure the device mutex is held so the dispatcher can't send any
+ * more commands to the hardware
+ */
+
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ if (adreno_is_a3xx(adreno_dev) || adreno_is_a4xx(adreno_dev))
+ kgsl_cffdump_regpoll(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2,
+ 0x00000000, 0x80000000);
+ else
+ kgsl_cffdump_regpoll(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2,
+ 0x110, 0x110);
+
+ while (time_before(jiffies, wait)) {
+ /*
+ * If we fault, stop waiting and return an error. The dispatcher
+ * will clean up the fault from the work queue, but we need to
+ * make sure we don't block it by waiting for an idle that
+ * will never come.
+ */
+
+ if (adreno_gpu_fault(adreno_dev) != 0)
+ return -EDEADLK;
+
+ if (adreno_isidle(device))
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * adreno_drain() - Drain the dispatch queue
+ * @device: Pointer to the KGSL device structure for the GPU
+ *
+ * Tell the dispatcher to pause - this has the effect of draining the inflight
+ * command batches
+ */
+static int adreno_drain(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ adreno_dispatcher_pause(adreno_dev);
+ return 0;
+}
+
+/* Caller must hold the device mutex. */
+static int adreno_suspend_context(struct kgsl_device *device)
+{
+ int status = 0;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ /* switch to NULL ctxt */
+ if (adreno_dev->drawctxt_active != NULL) {
+ adreno_drawctxt_switch(adreno_dev, NULL, 0);
+ status = adreno_idle(device);
+ }
+
+ return status;
+}
+
+/* Find a memory structure attached to an adreno context */
+
+struct kgsl_memdesc *adreno_find_ctxtmem(struct kgsl_device *device,
+ phys_addr_t pt_base, unsigned int gpuaddr, unsigned int size)
+{
+ struct kgsl_context *context;
+ int next = 0;
+ struct kgsl_memdesc *desc = NULL;
+
+ read_lock(&device->context_lock);
+ while (1) {
+ context = idr_get_next(&device->context_idr, &next);
+ if (context == NULL)
+ break;
+
+ if (kgsl_mmu_pt_equal(&device->mmu,
+ context->proc_priv->pagetable,
+ pt_base)) {
+ struct adreno_context *adreno_context;
+
+ adreno_context = ADRENO_CONTEXT(context);
+ desc = &adreno_context->gpustate;
+ if (kgsl_gpuaddr_in_memdesc(desc, gpuaddr, size))
+ break;
+
+ desc = &adreno_context->context_gmem_shadow.gmemshadow;
+ if (kgsl_gpuaddr_in_memdesc(desc, gpuaddr, size))
+ break;
+ }
+ next = next + 1;
+ desc = NULL;
+ }
+ read_unlock(&device->context_lock);
+ return desc;
+}
+
+/*
+ * adreno_find_region() - Find corresponding allocation for a given address
+ * @device: Device on which address operates
+ * @pt_base: The pagetable in which address is mapped
+ * @gpuaddr: The gpu address
+ * @size: Size in bytes of the address
+ * @entry: If the allocation is part of user space allocation then the mem
+ * entry is returned in this parameter. Caller is supposed to decrement
+ * refcount on this entry after its done using it.
+ *
+ * Finds an allocation descriptor for a given gpu address range
+ *
+ * Returns the descriptor on success else NULL
+ */
+struct kgsl_memdesc *adreno_find_region(struct kgsl_device *device,
+ phys_addr_t pt_base,
+ unsigned int gpuaddr,
+ unsigned int size,
+ struct kgsl_mem_entry **entry)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *ringbuffer = &adreno_dev->ringbuffer;
+
+ *entry = NULL;
+ if (kgsl_gpuaddr_in_memdesc(&ringbuffer->buffer_desc, gpuaddr, size))
+ return &ringbuffer->buffer_desc;
+
+ if (kgsl_gpuaddr_in_memdesc(&ringbuffer->memptrs_desc, gpuaddr, size))
+ return &ringbuffer->memptrs_desc;
+
+ if (kgsl_gpuaddr_in_memdesc(&device->memstore, gpuaddr, size))
+ return &device->memstore;
+
+ if (kgsl_gpuaddr_in_memdesc(&adreno_dev->pwron_fixup, gpuaddr, size))
+ return &adreno_dev->pwron_fixup;
+
+ if (kgsl_gpuaddr_in_memdesc(&device->mmu.setstate_memory, gpuaddr,
+ size))
+ return &device->mmu.setstate_memory;
+
+ *entry = kgsl_get_mem_entry(device, pt_base, gpuaddr, size);
+
+ if (*entry)
+ return &((*entry)->memdesc);
+
+ return adreno_find_ctxtmem(device, pt_base, gpuaddr, size);
+}
+
+/*
+ * adreno_convertaddr() - Convert a gpu address to kernel mapped address
+ * @device: Device on which the address operates
+ * @pt_base: The pagetable in which address is mapped
+ * @gpuaddr: The start address
+ * @size: The length of address range
+ * @entry: If the allocation is part of user space allocation then the mem
+ * entry is returned in this parameter. Caller is supposed to decrement
+ * refcount on this entry after its done using it.
+ *
+ * Returns the converted host pointer on success else NULL
+ */
+uint8_t *adreno_convertaddr(struct kgsl_device *device, phys_addr_t pt_base,
+ unsigned int gpuaddr, unsigned int size,
+ struct kgsl_mem_entry **entry)
+{
+ struct kgsl_memdesc *memdesc;
+
+ memdesc = adreno_find_region(device, pt_base, gpuaddr, size, entry);
+
+ return memdesc ? kgsl_gpuaddr_to_vaddr(memdesc, gpuaddr) : NULL;
+}
+
+
+/**
+ * adreno_read - General read function to read adreno device memory
+ * @device - Pointer to the GPU device struct (for adreno device)
+ * @base - Base address (kernel virtual) where the device memory is mapped
+ * @offsetwords - Offset in words from the base address, of the memory that
+ * is to be read
+ * @value - Value read from the device memory
+ * @mem_len - Length of the device memory mapped to the kernel
+ */
+static void adreno_read(struct kgsl_device *device, void *base,
+ unsigned int offsetwords, unsigned int *value,
+ unsigned int mem_len)
+{
+
+ unsigned int *reg;
+ BUG_ON(offsetwords*sizeof(uint32_t) >= mem_len);
+ reg = (unsigned int *)(base + (offsetwords << 2));
+
+ if (!in_interrupt())
+ kgsl_pre_hwaccess(device);
+
+ /*ensure this read finishes before the next one.
+ * i.e. act like normal readl() */
+ *value = __raw_readl(reg);
+ rmb();
+}
+
+/**
+ * adreno_regread - Used to read adreno device registers
+ * @offsetwords - Word (4 Bytes) offset to the register to be read
+ * @value - Value read from device register
+ */
+static void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
+ unsigned int *value)
+{
+ adreno_read(device, device->reg_virt, offsetwords, value,
+ device->reg_len);
+}
+
+/**
+ * adreno_shadermem_regread - Used to read GPU (adreno) shader memory
+ * @device - GPU device whose shader memory is to be read
+ * @offsetwords - Offset in words, of the shader memory address to be read
+ * @value - Pointer to where the read shader mem value is to be stored
+ */
+void adreno_shadermem_regread(struct kgsl_device *device,
+ unsigned int offsetwords, unsigned int *value)
+{
+ adreno_read(device, device->shader_mem_virt, offsetwords, value,
+ device->shader_mem_len);
+}
+
+static void adreno_regwrite(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int value)
+{
+ unsigned int *reg;
+
+ BUG_ON(offsetwords*sizeof(uint32_t) >= device->reg_len);
+
+ if (!in_interrupt())
+ kgsl_pre_hwaccess(device);
+
+ kgsl_trace_regwrite(device, offsetwords, value);
+
+ kgsl_cffdump_regwrite(device, offsetwords << 2, value);
+ reg = (unsigned int *)(device->reg_virt + (offsetwords << 2));
+
+ /*ensure previous writes post before this one,
+ * i.e. act like normal writel() */
+ wmb();
+ __raw_writel(value, reg);
+}
+
+/**
+ * adreno_waittimestamp - sleep while waiting for the specified timestamp
+ * @device - pointer to a KGSL device structure
+ * @context - pointer to the active kgsl context
+ * @timestamp - GPU timestamp to wait for
+ * @msecs - amount of time to wait (in milliseconds)
+ *
+ * Wait up to 'msecs' milliseconds for the specified timestamp to expire.
+ */
+static int adreno_waittimestamp(struct kgsl_device *device,
+ struct kgsl_context *context,
+ unsigned int timestamp,
+ unsigned int msecs)
+{
+ int ret;
+ struct adreno_context *drawctxt;
+
+ if (context == NULL) {
+ /* If they are doing then complain once */
+ dev_WARN_ONCE(device->dev, 1,
+ "IOCTL_KGSL_DEVICE_WAITTIMESTAMP is deprecated\n");
+ return -ENOTTY;
+ }
+
+ /* Return -EINVAL if the context has been detached */
+ if (kgsl_context_detached(context))
+ return -EINVAL;
+
+ ret = adreno_drawctxt_wait(ADRENO_DEVICE(device), context,
+ timestamp, msecs_to_jiffies(msecs));
+
+ /* If the context got invalidated then return a specific error */
+ drawctxt = ADRENO_CONTEXT(context);
+
+ if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
+ ret = -EDEADLK;
+
+ return ret;
+}
+
+static unsigned int adreno_readtimestamp(struct kgsl_device *device,
+ struct kgsl_context *context, enum kgsl_timestamp_type type)
+{
+ unsigned int timestamp = 0;
+ unsigned int id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
+
+ switch (type) {
+ case KGSL_TIMESTAMP_QUEUED: {
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ timestamp = adreno_context_timestamp(context,
+ &adreno_dev->ringbuffer);
+ break;
+ }
+ case KGSL_TIMESTAMP_CONSUMED:
+ kgsl_sharedmem_readl(&device->memstore, ×tamp,
+ KGSL_MEMSTORE_OFFSET(id, soptimestamp));
+ break;
+ case KGSL_TIMESTAMP_RETIRED:
+ kgsl_sharedmem_readl(&device->memstore, ×tamp,
+ KGSL_MEMSTORE_OFFSET(id, eoptimestamp));
+ break;
+ }
+
+ rmb();
+
+ return timestamp;
+}
+
+static long adreno_ioctl(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ struct kgsl_device *device = dev_priv->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int result = 0;
+
+ switch (cmd) {
+ case IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET: {
+ struct kgsl_drawctxt_set_bin_base_offset *binbase = data;
+ struct kgsl_context *context;
+
+ binbase = data;
+
+ context = kgsl_context_get_owner(dev_priv,
+ binbase->drawctxt_id);
+ if (context) {
+ adreno_drawctxt_set_bin_base_offset(
+ device, context, binbase->offset);
+ } else {
+ result = -EINVAL;
+ KGSL_DRV_ERR(device,
+ "invalid drawctxt drawctxt_id %d "
+ "device_id=%d\n",
+ binbase->drawctxt_id, device->id);
+ }
+
+ kgsl_context_put(context);
+ break;
+ }
+ case IOCTL_KGSL_PERFCOUNTER_GET: {
+ struct kgsl_perfcounter_get *get = data;
+ result = adreno_perfcounter_get(adreno_dev, get->groupid,
+ get->countable, &get->offset, PERFCOUNTER_FLAG_NONE);
+ break;
+ }
+ case IOCTL_KGSL_PERFCOUNTER_PUT: {
+ struct kgsl_perfcounter_put *put = data;
+ result = adreno_perfcounter_put(adreno_dev, put->groupid,
+ put->countable, PERFCOUNTER_FLAG_NONE);
+ break;
+ }
+ case IOCTL_KGSL_PERFCOUNTER_QUERY: {
+ struct kgsl_perfcounter_query *query = data;
+ result = adreno_perfcounter_query_group(adreno_dev,
+ query->groupid, query->countables,
+ query->count, &query->max_counters);
+ break;
+ }
+ case IOCTL_KGSL_PERFCOUNTER_READ: {
+ struct kgsl_perfcounter_read *read = data;
+ result = adreno_perfcounter_read_group(adreno_dev,
+ read->reads, read->count);
+ break;
+ }
+ default:
+ KGSL_DRV_INFO(dev_priv->device,
+ "invalid ioctl code %08x\n", cmd);
+ result = -ENOIOCTLCMD;
+ break;
+ }
+ return result;
+
+}
+
+static inline s64 adreno_ticks_to_us(u32 ticks, u32 gpu_freq)
+{
+ gpu_freq /= 1000000;
+ return ticks / gpu_freq;
+}
+
+static void adreno_power_stats(struct kgsl_device *device,
+ struct kgsl_power_stats *stats)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ unsigned int cycles = 0;
+
+ /*
+ * Get the busy cycles counted since the counter was last reset.
+ * If we're not currently active, there shouldn't have been
+ * any cycles since the last time this function was called.
+ */
+ if (device->state == KGSL_STATE_ACTIVE)
+ cycles = adreno_dev->gpudev->busy_cycles(adreno_dev);
+
+ /*
+ * In order to calculate idle you have to have run the algorithm
+ * at least once to get a start time.
+ */
+ if (pwr->time != 0) {
+ s64 tmp = ktime_to_us(ktime_get());
+ stats->total_time = tmp - pwr->time;
+ pwr->time = tmp;
+ stats->busy_time = adreno_ticks_to_us(cycles, device->pwrctrl.
+ pwrlevels[device->pwrctrl.active_pwrlevel].
+ gpu_freq);
+ } else {
+ stats->total_time = 0;
+ stats->busy_time = 0;
+ pwr->time = ktime_to_us(ktime_get());
+ }
+}
+
+void adreno_irqctrl(struct kgsl_device *device, int state)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ adreno_dev->gpudev->irq_control(adreno_dev, state);
+}
+
+static unsigned int adreno_gpuid(struct kgsl_device *device,
+ unsigned int *chipid)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ /* Some applications need to know the chip ID too, so pass
+ * that as a parameter */
+
+ if (chipid != NULL)
+ *chipid = adreno_dev->chip_id;
+
+ /* Standard KGSL gpuid format:
+ * top word is 0x0002 for 2D or 0x0003 for 3D
+ * Bottom word is core specific identifer
+ */
+
+ return (0x0003 << 16) | ((int) adreno_dev->gpurev);
+}
+
+static void adreno_resume(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ adreno_dispatcher_resume(adreno_dev);
+}
+
+static const struct kgsl_functable adreno_functable = {
+ /* Mandatory functions */
+ .regread = adreno_regread,
+ .regwrite = adreno_regwrite,
+ .idle = adreno_idle,
+ .isidle = adreno_isidle,
+ .suspend_context = adreno_suspend_context,
+ .init = adreno_init,
+ .start = adreno_start,
+ .stop = adreno_stop,
+ .getproperty = adreno_getproperty,
+ .waittimestamp = adreno_waittimestamp,
+ .readtimestamp = adreno_readtimestamp,
+ .issueibcmds = adreno_ringbuffer_issueibcmds,
+ .ioctl = adreno_ioctl,
+ .setup_pt = adreno_setup_pt,
+ .cleanup_pt = adreno_cleanup_pt,
+ .power_stats = adreno_power_stats,
+ .irqctrl = adreno_irqctrl,
+ .gpuid = adreno_gpuid,
+ .snapshot = adreno_snapshot,
+ .irq_handler = adreno_irq_handler,
+ .drain = adreno_drain,
+ /* Optional functions */
+ .setstate = adreno_setstate,
+ .drawctxt_create = adreno_drawctxt_create,
+ .drawctxt_detach = adreno_drawctxt_detach,
+ .drawctxt_destroy = adreno_drawctxt_destroy,
+ .setproperty = adreno_setproperty,
+ .postmortem_dump = adreno_dump,
+ .drawctxt_sched = adreno_drawctxt_sched,
+ .resume = adreno_resume,
+};
+
+static struct platform_driver adreno_platform_driver = {
+ .probe = adreno_probe,
+ .remove = __devexit_p(adreno_remove),
+ .suspend = kgsl_suspend_driver,
+ .resume = kgsl_resume_driver,
+ .id_table = adreno_id_table,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DEVICE_3D_NAME,
+ .pm = &kgsl_pm_ops,
+ .of_match_table = adreno_match_table,
+ }
+};
+
+static int __init kgsl_3d_init(void)
+{
+ return platform_driver_register(&adreno_platform_driver);
+}
+
+static void __exit kgsl_3d_exit(void)
+{
+ platform_driver_unregister(&adreno_platform_driver);
+}
+
+module_init(kgsl_3d_init);
+module_exit(kgsl_3d_exit);
+
+MODULE_DESCRIPTION("3D Graphics driver");
+MODULE_VERSION("1.2");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kgsl_3d");
diff --git a/drivers/gpu/msm2/adreno.h b/drivers/gpu/msm2/adreno.h
new file mode 100644
index 0000000..881e39c
--- /dev/null
+++ b/drivers/gpu/msm2/adreno.h
@@ -0,0 +1,875 @@
+/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ADRENO_H
+#define __ADRENO_H
+
+#include "kgsl_device.h"
+#include "adreno_drawctxt.h"
+#include "adreno_ringbuffer.h"
+#include "kgsl_iommu.h"
+#include <mach/ocmem.h>
+
+#define DEVICE_3D_NAME "kgsl-3d"
+#define DEVICE_3D0_NAME "kgsl-3d0"
+
+#define ADRENO_DEVICE(device) \
+ KGSL_CONTAINER_OF(device, struct adreno_device, dev)
+
+#define ADRENO_CONTEXT(device) \
+ KGSL_CONTAINER_OF(device, struct adreno_context, base)
+
+#define ADRENO_CHIPID_CORE(_id) (((_id) >> 24) & 0xFF)
+#define ADRENO_CHIPID_MAJOR(_id) (((_id) >> 16) & 0xFF)
+#define ADRENO_CHIPID_MINOR(_id) (((_id) >> 8) & 0xFF)
+#define ADRENO_CHIPID_PATCH(_id) ((_id) & 0xFF)
+
+/* Flags to control command packet settings */
+#define KGSL_CMD_FLAGS_NONE 0
+#define KGSL_CMD_FLAGS_PMODE BIT(0)
+#define KGSL_CMD_FLAGS_INTERNAL_ISSUE BIT(1)
+#define KGSL_CMD_FLAGS_WFI BIT(2)
+#define KGSL_CMD_FLAGS_PWRON_FIXUP BIT(3)
+
+/* Command identifiers */
+#define KGSL_CONTEXT_TO_MEM_IDENTIFIER 0x2EADBEEF
+#define KGSL_CMD_IDENTIFIER 0x2EEDFACE
+#define KGSL_CMD_INTERNAL_IDENTIFIER 0x2EEDD00D
+#define KGSL_START_OF_IB_IDENTIFIER 0x2EADEABE
+#define KGSL_END_OF_IB_IDENTIFIER 0x2ABEDEAD
+#define KGSL_END_OF_FRAME_IDENTIFIER 0x2E0F2E0F
+#define KGSL_NOP_IB_IDENTIFIER 0x20F20F20
+#define KGSL_PWRON_FIXUP_IDENTIFIER 0x2AFAFAFA
+
+#ifdef CONFIG_MSM_SCM
+#define ADRENO_DEFAULT_PWRSCALE_POLICY (&kgsl_pwrscale_policy_tz)
+#elif defined CONFIG_MSM_SLEEP_STATS_DEVICE
+#define ADRENO_DEFAULT_PWRSCALE_POLICY (&kgsl_pwrscale_policy_idlestats)
+#else
+#define ADRENO_DEFAULT_PWRSCALE_POLICY NULL
+#endif
+
+void adreno_debugfs_init(struct kgsl_device *device);
+
+#define ADRENO_ISTORE_START 0x5000 /* Istore offset */
+
+#define ADRENO_NUM_CTX_SWITCH_ALLOWED_BEFORE_DRAW 50
+
+/* One cannot wait forever for the core to idle, so set an upper limit to the
+ * amount of time to wait for the core to go idle
+ */
+
+#define ADRENO_IDLE_TIMEOUT (20 * 1000)
+
+enum adreno_gpurev {
+ ADRENO_REV_UNKNOWN = 0,
+ ADRENO_REV_A200 = 200,
+ ADRENO_REV_A203 = 203,
+ ADRENO_REV_A205 = 205,
+ ADRENO_REV_A220 = 220,
+ ADRENO_REV_A225 = 225,
+ ADRENO_REV_A305 = 305,
+ ADRENO_REV_A305C = 306,
+ ADRENO_REV_A320 = 320,
+ ADRENO_REV_A330 = 330,
+ ADRENO_REV_A305B = 335,
+ ADRENO_REV_A420 = 420,
+};
+
+enum coresight_debug_reg {
+ DEBUG_BUS_CTL,
+ TRACE_STOP_CNT,
+ TRACE_START_CNT,
+ TRACE_PERIOD_CNT,
+ TRACE_CMD,
+ TRACE_BUS_CTL,
+};
+
+#define ADRENO_SOFT_FAULT BIT(0)
+#define ADRENO_HARD_FAULT BIT(1)
+#define ADRENO_TIMEOUT_FAULT BIT(2)
+#define ADRENO_IOMMU_PAGE_FAULT BIT(3)
+
+/*
+ * Maximum size of the dispatcher ringbuffer - the actual inflight size will be
+ * smaller then this but this size will allow for a larger range of inflight
+ * sizes that can be chosen at runtime
+ */
+
+#define ADRENO_DISPATCH_CMDQUEUE_SIZE 128
+
+/**
+ * struct adreno_dispatcher - container for the adreno GPU dispatcher
+ * @mutex: Mutex to protect the structure
+ * @state: Current state of the dispatcher (active or paused)
+ * @timer: Timer to monitor the progress of the command batches
+ * @inflight: Number of command batch operations pending in the ringbuffer
+ * @fault: Non-zero if a fault was detected.
+ * @pending: Priority list of contexts waiting to submit command batches
+ * @plist_lock: Spin lock to protect the pending queue
+ * @cmdqueue: Queue of command batches currently flight
+ * @head: pointer to the head of of the cmdqueue. This is the oldest pending
+ * operation
+ * @tail: pointer to the tail of the cmdqueue. This is the most recently
+ * submitted operation
+ * @work: work_struct to put the dispatcher in a work queue
+ * @kobj: kobject for the dispatcher directory in the device sysfs node
+ */
+struct adreno_dispatcher {
+ struct mutex mutex;
+ unsigned int state;
+ struct timer_list timer;
+ struct timer_list fault_timer;
+ unsigned int inflight;
+ atomic_t fault;
+ struct plist_head pending;
+ spinlock_t plist_lock;
+ struct kgsl_cmdbatch *cmdqueue[ADRENO_DISPATCH_CMDQUEUE_SIZE];
+ unsigned int head;
+ unsigned int tail;
+ struct work_struct work;
+ struct kobject kobj;
+};
+
+struct adreno_gpudev;
+
+struct adreno_device {
+ struct kgsl_device dev; /* Must be first field in this struct */
+ unsigned long priv;
+ unsigned int chip_id;
+ enum adreno_gpurev gpurev;
+ unsigned long gmem_base;
+ unsigned int gmem_size;
+ struct adreno_context *drawctxt_active;
+ const char *pfp_fwfile;
+ unsigned int *pfp_fw;
+ size_t pfp_fw_size;
+ unsigned int pfp_fw_version;
+ const char *pm4_fwfile;
+ unsigned int *pm4_fw;
+ size_t pm4_fw_size;
+ unsigned int pm4_fw_version;
+ struct adreno_ringbuffer ringbuffer;
+ unsigned int mharb;
+ struct adreno_gpudev *gpudev;
+ unsigned int wait_timeout;
+ unsigned int pm4_jt_idx;
+ unsigned int pm4_jt_addr;
+ unsigned int pfp_jt_idx;
+ unsigned int pfp_jt_addr;
+ unsigned int istore_size;
+ unsigned int pix_shader_start;
+ unsigned int instruction_size;
+ unsigned int ib_check_level;
+ unsigned int fast_hang_detect;
+ unsigned int ft_policy;
+ unsigned int long_ib_detect;
+ unsigned int long_ib;
+ unsigned int long_ib_ts;
+ unsigned int ft_pf_policy;
+ unsigned int gpulist_index;
+ struct ocmem_buf *ocmem_hdl;
+ unsigned int ocmem_base;
+ unsigned int gpu_cycles;
+ struct adreno_dispatcher dispatcher;
+ struct kgsl_memdesc pwron_fixup;
+ unsigned int pwron_fixup_dwords;
+};
+
+/**
+ * enum adreno_device_flags - Private flags for the adreno_device
+ * @ADRENO_DEVICE_PWRON - Set during init after a power collapse
+ * @ADRENO_DEVICE_PWRON_FIXUP - Set if the target requires the shader fixup
+ * after power collapse
+ */
+enum adreno_device_flags {
+ ADRENO_DEVICE_PWRON = 0,
+ ADRENO_DEVICE_PWRON_FIXUP = 1,
+ ADRENO_DEVICE_INITIALIZED = 2,
+};
+
+#define PERFCOUNTER_FLAG_NONE 0x0
+#define PERFCOUNTER_FLAG_KERNEL 0x1
+
+/* Structs to maintain the list of active performance counters */
+
+/**
+ * struct adreno_perfcount_register: register state
+ * @countable: countable the register holds
+ * @kernelcount: number of user space users of the register
+ * @usercount: number of kernel users of the register
+ * @offset: register hardware offset
+ * @load_bit: The bit number in LOAD register which corresponds to this counter
+ * @select: The countable register offset
+ */
+struct adreno_perfcount_register {
+ unsigned int countable;
+ unsigned int kernelcount;
+ unsigned int usercount;
+ unsigned int offset;
+ int load_bit;
+ unsigned int select;
+};
+
+/**
+ * struct adreno_perfcount_group: registers for a hardware group
+ * @regs: available registers for this group
+ * @reg_count: total registers for this group
+ */
+struct adreno_perfcount_group {
+ struct adreno_perfcount_register *regs;
+ unsigned int reg_count;
+};
+
+/**
+ * adreno_perfcounts: all available perfcounter groups
+ * @groups: available groups for this device
+ * @group_count: total groups for this device
+ */
+struct adreno_perfcounters {
+ struct adreno_perfcount_group *groups;
+ unsigned int group_count;
+};
+
+#define ADRENO_PERFCOUNTER_GROUP(core, name) { core##_perfcounters_##name, \
+ ARRAY_SIZE(core##_perfcounters_##name) }
+
+/**
+ * adreno_regs: List of registers that are used in kgsl driver for all
+ * 3D devices. Each device type has different offset value for the same
+ * register, so an array of register offsets are declared for every device
+ * and are indexed by the enumeration values defined in this enum
+ */
+enum adreno_regs {
+ ADRENO_REG_CP_DEBUG,
+ ADRENO_REG_CP_ME_RAM_WADDR,
+ ADRENO_REG_CP_ME_RAM_DATA,
+ ADRENO_REG_CP_PFP_UCODE_DATA,
+ ADRENO_REG_CP_PFP_UCODE_ADDR,
+ ADRENO_REG_CP_WFI_PEND_CTR,
+ ADRENO_REG_CP_RB_BASE,
+ ADRENO_REG_CP_RB_RPTR_ADDR,
+ ADRENO_REG_CP_RB_RPTR,
+ ADRENO_REG_CP_RB_WPTR,
+ ADRENO_REG_CP_PROTECT_CTRL,
+ ADRENO_REG_CP_ME_CNTL,
+ ADRENO_REG_CP_RB_CNTL,
+ ADRENO_REG_CP_IB1_BASE,
+ ADRENO_REG_CP_IB1_BUFSZ,
+ ADRENO_REG_CP_IB2_BASE,
+ ADRENO_REG_CP_IB2_BUFSZ,
+ ADRENO_REG_CP_TIMESTAMP,
+ ADRENO_REG_CP_ME_RAM_RADDR,
+ ADRENO_REG_SCRATCH_ADDR,
+ ADRENO_REG_SCRATCH_UMSK,
+ ADRENO_REG_SCRATCH_REG2,
+ ADRENO_REG_RBBM_STATUS,
+ ADRENO_REG_RBBM_PERFCTR_CTL,
+ ADRENO_REG_RBBM_PERFCTR_LOAD_CMD0,
+ ADRENO_REG_RBBM_PERFCTR_LOAD_CMD1,
+ ADRENO_REG_RBBM_PERFCTR_LOAD_CMD2,
+ ADRENO_REG_RBBM_PERFCTR_PWR_1_LO,
+ ADRENO_REG_RBBM_INT_0_MASK,
+ ADRENO_REG_RBBM_INT_0_STATUS,
+ ADRENO_REG_RBBM_AHB_ERROR_STATUS,
+ ADRENO_REG_RBBM_PM_OVERRIDE2,
+ ADRENO_REG_RBBM_AHB_CMD,
+ ADRENO_REG_RBBM_INT_CLEAR_CMD,
+ ADRENO_REG_VPC_DEBUG_RAM_SEL,
+ ADRENO_REG_VPC_DEBUG_RAM_READ,
+ ADRENO_REG_VSC_PIPE_DATA_ADDRESS_0,
+ ADRENO_REG_VSC_PIPE_DATA_LENGTH_7,
+ ADRENO_REG_VSC_SIZE_ADDRESS,
+ ADRENO_REG_VFD_CONTROL_0,
+ ADRENO_REG_VFD_FETCH_INSTR_0_0,
+ ADRENO_REG_VFD_FETCH_INSTR_1_F,
+ ADRENO_REG_VFD_INDEX_MAX,
+ ADRENO_REG_SP_VS_PVT_MEM_ADDR_REG,
+ ADRENO_REG_SP_FS_PVT_MEM_ADDR_REG,
+ ADRENO_REG_SP_VS_OBJ_START_REG,
+ ADRENO_REG_SP_FS_OBJ_START_REG,
+ ADRENO_REG_PA_SC_AA_CONFIG,
+ ADRENO_REG_SQ_GPR_MANAGEMENT,
+ ADRENO_REG_SQ_INST_STORE_MANAGMENT,
+ ADRENO_REG_TC_CNTL_STATUS,
+ ADRENO_REG_TP0_CHICKEN,
+ ADRENO_REG_RBBM_RBBM_CTL,
+ ADRENO_REG_REGISTER_MAX,
+};
+
+/**
+ * adreno_reg_offsets: Holds array of register offsets
+ * @offsets: Offset array of size defined by enum adreno_regs
+ * @offset_0: This is the index of the register in offset array whose value
+ * is 0. 0 is a valid register offset and during initialization of the
+ * offset array we need to know if an offset value is correctly defined to 0
+ */
+struct adreno_reg_offsets {
+ unsigned int *const offsets;
+ enum adreno_regs offset_0;
+};
+
+#define ADRENO_REG_UNUSED 0xFFFFFFFF
+#define ADRENO_REG_DEFINE(_offset, _reg) [_offset] = _reg
+
+/*
+ * struct adreno_vbif_data - Describes vbif register value pair
+ * @reg: Offset to vbif register
+ * @val: The value that should be programmed in the register at reg
+ */
+struct adreno_vbif_data {
+ unsigned int reg;
+ unsigned int val;
+};
+
+/*
+ * struct adreno_vbif_platform - Holds an array of vbif reg value pairs
+ * for a particular core
+ * @devfunc: Pointer to platform/core identification function
+ * @vbif: Array of reg value pairs for vbif registers
+ */
+struct adreno_vbif_platform {
+ int(*devfunc)(struct adreno_device *);
+ const struct adreno_vbif_data *vbif;
+};
+
+struct adreno_gpudev {
+ /*
+ * These registers are in a different location on different devices,
+ * so define them in the structure and use them as variables.
+ */
+ const struct adreno_reg_offsets *reg_offsets;
+ /* keeps track of when we need to execute the draw workaround code */
+ int ctx_switches_since_last_draw;
+
+ struct adreno_perfcounters *perfcounters;
+
+ /* GPU specific function hooks */
+ int (*ctxt_create)(struct adreno_device *, struct adreno_context *);
+ int (*ctxt_save)(struct adreno_device *, struct adreno_context *);
+ int (*ctxt_restore)(struct adreno_device *, struct adreno_context *);
+ int (*ctxt_draw_workaround)(struct adreno_device *,
+ struct adreno_context *);
+ irqreturn_t (*irq_handler)(struct adreno_device *);
+ void (*irq_control)(struct adreno_device *, int);
+ unsigned int (*irq_pending)(struct adreno_device *);
+ void * (*snapshot)(struct adreno_device *, void *, int *, int);
+ int (*rb_init)(struct adreno_device *, struct adreno_ringbuffer *);
+ int (*perfcounter_init)(struct adreno_device *);
+ void (*perfcounter_close)(struct adreno_device *);
+ void (*start)(struct adreno_device *);
+ unsigned int (*busy_cycles)(struct adreno_device *);
+ int (*perfcounter_enable)(struct adreno_device *, unsigned int group,
+ unsigned int counter, unsigned int countable);
+ uint64_t (*perfcounter_read)(struct adreno_device *adreno_dev,
+ unsigned int group, unsigned int counter);
+ int (*coresight_enable) (struct kgsl_device *device);
+ void (*coresight_disable) (struct kgsl_device *device);
+ void (*coresight_config_debug_reg) (struct kgsl_device *device,
+ int debug_reg, unsigned int val);
+ void (*postmortem_dump)(struct adreno_device *adreno_dev);
+ void (*soft_reset)(struct adreno_device *device);
+};
+
+#define FT_DETECT_REGS_COUNT 12
+
+struct log_field {
+ bool show;
+ const char *display;
+};
+
+/* Fault Tolerance policy flags */
+#define KGSL_FT_OFF 0
+#define KGSL_FT_REPLAY 1
+#define KGSL_FT_SKIPIB 2
+#define KGSL_FT_SKIPFRAME 3
+#define KGSL_FT_DISABLE 4
+#define KGSL_FT_TEMP_DISABLE 5
+#define KGSL_FT_DEFAULT_POLICY (BIT(KGSL_FT_REPLAY) + BIT(KGSL_FT_SKIPIB))
+
+/* This internal bit is used to skip the PM dump on replayed command batches */
+#define KGSL_FT_SKIP_PMDUMP 31
+
+/* Pagefault policy flags */
+#define KGSL_FT_PAGEFAULT_INT_ENABLE BIT(0)
+#define KGSL_FT_PAGEFAULT_GPUHALT_ENABLE BIT(1)
+#define KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE BIT(2)
+#define KGSL_FT_PAGEFAULT_LOG_ONE_PER_INT BIT(3)
+#define KGSL_FT_PAGEFAULT_DEFAULT_POLICY KGSL_FT_PAGEFAULT_INT_ENABLE
+
+#define ADRENO_FT_TYPES \
+ { BIT(KGSL_FT_OFF), "off" }, \
+ { BIT(KGSL_FT_REPLAY), "replay" }, \
+ { BIT(KGSL_FT_SKIPIB), "skipib" }, \
+ { BIT(KGSL_FT_SKIPFRAME), "skipframe" }, \
+ { BIT(KGSL_FT_DISABLE), "disable" }, \
+ { BIT(KGSL_FT_TEMP_DISABLE), "temp" }
+
+extern struct adreno_gpudev adreno_a2xx_gpudev;
+extern struct adreno_gpudev adreno_a3xx_gpudev;
+extern struct adreno_gpudev adreno_a4xx_gpudev;
+
+/* A2XX register sets defined in adreno_a2xx.c */
+extern const unsigned int a200_registers[];
+extern const unsigned int a220_registers[];
+extern const unsigned int a225_registers[];
+extern const unsigned int a200_registers_count;
+extern const unsigned int a220_registers_count;
+extern const unsigned int a225_registers_count;
+
+/* A3XX register set defined in adreno_a3xx.c */
+extern const unsigned int a3xx_registers[];
+extern const unsigned int a3xx_registers_count;
+
+extern const unsigned int a3xx_hlsq_registers[];
+extern const unsigned int a3xx_hlsq_registers_count;
+
+extern const unsigned int a330_registers[];
+extern const unsigned int a330_registers_count;
+
+/* A4XX register set defined in adreno_a4xx.c */
+extern const unsigned int a4xx_registers[];
+extern const unsigned int a4xx_registers_count;
+
+extern unsigned int ft_detect_regs[];
+
+int adreno_coresight_enable(struct coresight_device *csdev);
+void adreno_coresight_disable(struct coresight_device *csdev);
+void adreno_coresight_remove(struct platform_device *pdev);
+int adreno_coresight_init(struct platform_device *pdev);
+
+int adreno_idle(struct kgsl_device *device);
+bool adreno_isidle(struct kgsl_device *device);
+
+void adreno_shadermem_regread(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int *value);
+
+int adreno_dump(struct kgsl_device *device, int manual);
+void adreno_dump_fields(struct kgsl_device *device,
+ const char *start, const struct log_field *lines,
+ int num);
+unsigned int adreno_a3xx_rbbm_clock_ctl_default(struct adreno_device
+ *adreno_dev);
+
+struct kgsl_memdesc *adreno_find_region(struct kgsl_device *device,
+ phys_addr_t pt_base,
+ unsigned int gpuaddr,
+ unsigned int size,
+ struct kgsl_mem_entry **entry);
+
+uint8_t *adreno_convertaddr(struct kgsl_device *device,
+ phys_addr_t pt_base, unsigned int gpuaddr, unsigned int size,
+ struct kgsl_mem_entry **entry);
+
+struct kgsl_memdesc *adreno_find_ctxtmem(struct kgsl_device *device,
+ phys_addr_t pt_base, unsigned int gpuaddr, unsigned int size);
+
+void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
+ int hang);
+
+void adreno_dispatcher_start(struct adreno_device *adreno_dev);
+int adreno_dispatcher_init(struct adreno_device *adreno_dev);
+void adreno_dispatcher_close(struct adreno_device *adreno_dev);
+int adreno_dispatcher_idle(struct adreno_device *adreno_dev,
+ unsigned int timeout);
+void adreno_dispatcher_irq_fault(struct kgsl_device *device);
+void adreno_dispatcher_stop(struct adreno_device *adreno_dev);
+
+int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
+ uint32_t *timestamp);
+
+void adreno_dispatcher_schedule(struct kgsl_device *device);
+void adreno_dispatcher_pause(struct adreno_device *adreno_dev);
+void adreno_dispatcher_resume(struct adreno_device *adreno_dev);
+void adreno_dispatcher_queue_context(struct kgsl_device *device,
+ struct adreno_context *drawctxt);
+int adreno_reset(struct kgsl_device *device);
+
+int adreno_ft_init_sysfs(struct kgsl_device *device);
+void adreno_ft_uninit_sysfs(struct kgsl_device *device);
+
+int adreno_perfcounter_get(struct adreno_device *adreno_dev,
+ unsigned int groupid, unsigned int countable, unsigned int *offset,
+ unsigned int flags);
+
+int adreno_perfcounter_put(struct adreno_device *adreno_dev,
+ unsigned int groupid, unsigned int countable, unsigned int flags);
+
+int adreno_soft_reset(struct kgsl_device *device);
+
+int adreno_a3xx_pwron_fixup_init(struct adreno_device *adreno_dev);
+
+static inline int adreno_is_a200(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev == ADRENO_REV_A200);
+}
+
+static inline int adreno_is_a203(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev == ADRENO_REV_A203);
+}
+
+static inline int adreno_is_a205(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev == ADRENO_REV_A205);
+}
+
+static inline int adreno_is_a20x(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev <= 209);
+}
+
+static inline int adreno_is_a220(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev == ADRENO_REV_A220);
+}
+
+static inline int adreno_is_a225(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev == ADRENO_REV_A225);
+}
+
+static inline int adreno_is_a22x(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev == ADRENO_REV_A220 ||
+ adreno_dev->gpurev == ADRENO_REV_A225);
+}
+
+static inline int adreno_is_a2xx(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev <= 299);
+}
+
+static inline int adreno_is_a3xx(struct adreno_device *adreno_dev)
+{
+ return ((adreno_dev->gpurev >= 300) && (adreno_dev->gpurev < 400));
+}
+
+static inline int adreno_is_a305(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev == ADRENO_REV_A305);
+}
+
+static inline int adreno_is_a305b(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev == ADRENO_REV_A305B);
+}
+
+static inline int adreno_is_a305c(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev == ADRENO_REV_A305C);
+}
+
+static inline int adreno_is_a320(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev == ADRENO_REV_A320);
+}
+
+static inline int adreno_is_a330(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev == ADRENO_REV_A330);
+}
+
+static inline int adreno_is_a330v2(struct adreno_device *adreno_dev)
+{
+ return ((adreno_dev->gpurev == ADRENO_REV_A330) &&
+ (ADRENO_CHIPID_PATCH(adreno_dev->chip_id) > 0));
+}
+
+
+static inline int adreno_is_a4xx(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev >= 400);
+}
+
+static inline int adreno_is_a420(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev == ADRENO_REV_A420);
+}
+
+static inline int adreno_rb_ctxtswitch(unsigned int *cmd)
+{
+ return (cmd[0] == cp_nop_packet(1) &&
+ cmd[1] == KGSL_CONTEXT_TO_MEM_IDENTIFIER);
+}
+
+/**
+ * adreno_context_timestamp() - Return the last queued timestamp for the context
+ * @k_ctxt: Pointer to the KGSL context to query
+ * @rb: Pointer to the ringbuffer structure for the GPU
+ *
+ * Return the last queued context for the given context. This is used to verify
+ * that incoming requests are not using an invalid (unsubmitted) timestamp
+ */
+static inline int adreno_context_timestamp(struct kgsl_context *k_ctxt,
+ struct adreno_ringbuffer *rb)
+{
+ if (k_ctxt) {
+ struct adreno_context *a_ctxt = ADRENO_CONTEXT(k_ctxt);
+ return a_ctxt->timestamp;
+ }
+ return rb->global_ts;
+}
+
+/**
+ * adreno_encode_istore_size - encode istore size in CP format
+ * @adreno_dev - The 3D device.
+ *
+ * Encode the istore size into the format expected that the
+ * CP_SET_SHADER_BASES and CP_ME_INIT commands:
+ * bits 31:29 - istore size as encoded by this function
+ * bits 27:16 - vertex shader start offset in instructions
+ * bits 11:0 - pixel shader start offset in instructions.
+ */
+static inline int adreno_encode_istore_size(struct adreno_device *adreno_dev)
+{
+ unsigned int size;
+ /* in a225 the CP microcode multiplies the encoded
+ * value by 3 while decoding.
+ */
+ if (adreno_is_a225(adreno_dev))
+ size = adreno_dev->istore_size/3;
+ else
+ size = adreno_dev->istore_size;
+
+ return (ilog2(size) - 5) << 29;
+}
+
+static inline int __adreno_add_idle_indirect_cmds(unsigned int *cmds,
+ unsigned int nop_gpuaddr)
+{
+ /* Adding an indirect buffer ensures that the prefetch stalls until
+ * the commands in indirect buffer have completed. We need to stall
+ * prefetch with a nop indirect buffer when updating pagetables
+ * because it provides stabler synchronization */
+ *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
+ *cmds++ = nop_gpuaddr;
+ *cmds++ = 2;
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+ return 5;
+}
+
+static inline int adreno_add_change_mh_phys_limit_cmds(unsigned int *cmds,
+ unsigned int new_phys_limit,
+ unsigned int nop_gpuaddr)
+{
+ unsigned int *start = cmds;
+
+ *cmds++ = cp_type0_packet(MH_MMU_MPU_END, 1);
+ *cmds++ = new_phys_limit;
+ cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
+ return cmds - start;
+}
+
+static inline int adreno_add_bank_change_cmds(unsigned int *cmds,
+ int cur_ctx_bank,
+ unsigned int nop_gpuaddr)
+{
+ unsigned int *start = cmds;
+
+ *cmds++ = cp_type0_packet(REG_CP_STATE_DEBUG_INDEX, 1);
+ *cmds++ = (cur_ctx_bank ? 0 : 0x20);
+ cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
+ return cmds - start;
+}
+
+/*
+ * adreno_read_cmds - Add pm4 packets to perform read
+ * @device - Pointer to device structure
+ * @cmds - Pointer to memory where read commands need to be added
+ * @addr - gpu address of the read
+ * @val - The GPU will wait until the data at address addr becomes
+ * equal to value
+ */
+static inline int adreno_add_read_cmds(struct kgsl_device *device,
+ unsigned int *cmds, unsigned int addr,
+ unsigned int val, unsigned int nop_gpuaddr)
+{
+ unsigned int *start = cmds;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_REG_MEM, 5);
+ /* MEM SPACE = memory, FUNCTION = equals */
+ *cmds++ = 0x13;
+ *cmds++ = addr;
+ *cmds++ = val;
+ *cmds++ = 0xFFFFFFFF;
+ *cmds++ = 0xFFFFFFFF;
+ cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
+ return cmds - start;
+}
+
+/*
+ * adreno_idle_cmds - Add pm4 packets for GPU idle
+ * @adreno_dev - Pointer to device structure
+ * @cmds - Pointer to memory where idle commands need to be added
+ */
+static inline int adreno_add_idle_cmds(struct adreno_device *adreno_dev,
+ unsigned int *cmds)
+{
+ unsigned int *start = cmds;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+
+ if ((adreno_dev->gpurev == ADRENO_REV_A305) ||
+ (adreno_dev->gpurev == ADRENO_REV_A305C) ||
+ (adreno_dev->gpurev == ADRENO_REV_A320)) {
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1);
+ *cmds++ = 0x00000000;
+ }
+
+ return cmds - start;
+}
+
+/*
+ * adreno_wait_reg_eq() - Add a CP_WAIT_REG_EQ command
+ * @cmds: Pointer to memory where commands are to be added
+ * @addr: Regiater address to poll for
+ * @val: Value to poll for
+ * @mask: The value against which register value is masked
+ * @interval: wait interval
+ */
+static inline int adreno_wait_reg_eq(unsigned int *cmds, unsigned int addr,
+ unsigned int val, unsigned int mask,
+ unsigned int interval)
+{
+ unsigned int *start = cmds;
+ *cmds++ = cp_type3_packet(CP_WAIT_REG_EQ, 4);
+ *cmds++ = addr;
+ *cmds++ = val;
+ *cmds++ = mask;
+ *cmds++ = interval;
+ return cmds - start;
+}
+
+/*
+ * adreno_checkreg_off() - Checks the validity of a register enum
+ * @adreno_dev: Pointer to adreno device
+ * @offset_name: The register enum that is checked
+ */
+static inline bool adreno_checkreg_off(struct adreno_device *adreno_dev,
+ enum adreno_regs offset_name)
+{
+ if (offset_name >= ADRENO_REG_REGISTER_MAX ||
+ ADRENO_REG_UNUSED ==
+ adreno_dev->gpudev->reg_offsets->offsets[offset_name]) {
+ BUG_ON(1);
+ }
+ return true;
+}
+
+/*
+ * adreno_readreg() - Read a register by getting its offset from the
+ * offset array defined in gpudev node
+ * @adreno_dev: Pointer to the the adreno device
+ * @offset_name: The register enum that is to be read
+ * @val: Register value read is placed here
+ */
+static inline void adreno_readreg(struct adreno_device *adreno_dev,
+ enum adreno_regs offset_name, unsigned int *val)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ if (adreno_checkreg_off(adreno_dev, offset_name))
+ kgsl_regread(device,
+ adreno_dev->gpudev->reg_offsets->offsets[offset_name],
+ val);
+}
+
+/*
+ * adreno_writereg() - Write a register by getting its offset from the
+ * offset array defined in gpudev node
+ * @adreno_dev: Pointer to the the adreno device
+ * @offset_name: The register enum that is to be written
+ * @val: Value to write
+ */
+static inline void adreno_writereg(struct adreno_device *adreno_dev,
+ enum adreno_regs offset_name, unsigned int val)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ if (adreno_checkreg_off(adreno_dev, offset_name))
+ kgsl_regwrite(device,
+ adreno_dev->gpudev->reg_offsets->offsets[offset_name], val);
+}
+
+/*
+ * adreno_getreg() - Returns the offset value of a register from the
+ * register offset array in the gpudev node
+ * @adreno_dev: Pointer to the the adreno device
+ * @offset_name: The register enum whore offset is returned
+ */
+static inline unsigned int adreno_getreg(struct adreno_device *adreno_dev,
+ enum adreno_regs offset_name)
+{
+ if (!adreno_checkreg_off(adreno_dev, offset_name))
+ return ADRENO_REG_REGISTER_MAX;
+ return adreno_dev->gpudev->reg_offsets->offsets[offset_name];
+}
+
+/**
+ * adreno_gpu_fault() - Return the current state of the GPU
+ * @adreno_dev: A ponter to the adreno_device to query
+ *
+ * Return 0 if there is no fault or positive with the last type of fault that
+ * occurred
+ */
+static inline unsigned int adreno_gpu_fault(struct adreno_device *adreno_dev)
+{
+ smp_rmb();
+ return atomic_read(&adreno_dev->dispatcher.fault);
+}
+
+/**
+ * adreno_set_gpu_fault() - Set the current fault status of the GPU
+ * @adreno_dev: A pointer to the adreno_device to set
+ * @state: fault state to set
+ *
+ */
+static inline void adreno_set_gpu_fault(struct adreno_device *adreno_dev,
+ int state)
+{
+ /* only set the fault bit w/o overwriting other bits */
+ atomic_add(state, &adreno_dev->dispatcher.fault);
+ smp_wmb();
+}
+
+/*
+ * adreno_vbif_start() - Program VBIF registers, called in device start
+ * @device: Pointer to device whose vbif data is to be programmed
+ * @vbif_platforms: list register value pair of vbif for a family
+ * of adreno cores
+ * @num_platforms: Number of platforms contained in vbif_platforms
+ */
+static inline void adreno_vbif_start(struct kgsl_device *device,
+ const struct adreno_vbif_platform *vbif_platforms,
+ int num_platforms)
+{
+ int i;
+ const struct adreno_vbif_data *vbif = NULL;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ for (i = 0; i < num_platforms; i++) {
+ if (vbif_platforms[i].devfunc(adreno_dev)) {
+ vbif = vbif_platforms[i].vbif;
+ break;
+ }
+ }
+ BUG_ON(vbif == NULL);
+ while (vbif->reg != 0) {
+ kgsl_regwrite(device, vbif->reg, vbif->val);
+ vbif++;
+ }
+}
+
+#endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm2/adreno_a2xx.c b/drivers/gpu/msm2/adreno_a2xx.c
new file mode 100644
index 0000000..1f4544f
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_a2xx.c
@@ -0,0 +1,2307 @@
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <mach/socinfo.h>
+
+#include "kgsl.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_cffdump.h"
+#include "adreno.h"
+#include "adreno_a2xx_trace.h"
+
+/*
+ * These are the registers that are dumped with GPU snapshot
+ * and postmortem. The lists are dword offset pairs in the
+ * form of {start offset, end offset} inclusive.
+ */
+
+/* A200, A205 */
+const unsigned int a200_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
+ 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
+ 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
+ 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
+ 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
+ 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
+ 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
+ 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A43, 0x0A45, 0x0A45,
+ 0x0A4E, 0x0A4F, 0x0C2C, 0x0C2C, 0x0C30, 0x0C30, 0x0C38, 0x0C3C,
+ 0x0C40, 0x0C40, 0x0C44, 0x0C44, 0x0C80, 0x0C86, 0x0C88, 0x0C94,
+ 0x0C99, 0x0C9A, 0x0CA4, 0x0CA5, 0x0D00, 0x0D03, 0x0D06, 0x0D06,
+ 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
+ 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
+ 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
+ 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x0F0C, 0x0F0C, 0x0F0E, 0x0F12,
+ 0x0F26, 0x0F2A, 0x0F2C, 0x0F2C, 0x2000, 0x2002, 0x2006, 0x200F,
+ 0x2080, 0x2082, 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184,
+ 0x21F5, 0x21F7, 0x2200, 0x2208, 0x2280, 0x2283, 0x2293, 0x2294,
+ 0x2300, 0x2308, 0x2312, 0x2312, 0x2316, 0x231D, 0x2324, 0x2326,
+ 0x2380, 0x2383, 0x2400, 0x2402, 0x2406, 0x240F, 0x2480, 0x2482,
+ 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7,
+ 0x2600, 0x2608, 0x2680, 0x2683, 0x2693, 0x2694, 0x2700, 0x2708,
+ 0x2712, 0x2712, 0x2716, 0x271D, 0x2724, 0x2726, 0x2780, 0x2783,
+ 0x4000, 0x4003, 0x4800, 0x4805, 0x4900, 0x4900, 0x4908, 0x4908,
+};
+
+const unsigned int a220_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
+ 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
+ 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
+ 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
+ 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
+ 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
+ 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
+ 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A40, 0x0A42, 0x0A43,
+ 0x0A45, 0x0A45, 0x0A4E, 0x0A4F, 0x0C30, 0x0C30, 0x0C38, 0x0C39,
+ 0x0C3C, 0x0C3C, 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03,
+ 0x0D05, 0x0D06, 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1,
+ 0x0DC8, 0x0DD4, 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04,
+ 0x0E17, 0x0E1E, 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0,
+ 0x0ED4, 0x0ED7, 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x2002,
+ 0x2006, 0x200F, 0x2080, 0x2082, 0x2100, 0x2102, 0x2104, 0x2109,
+ 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7, 0x2200, 0x2202,
+ 0x2204, 0x2204, 0x2208, 0x2208, 0x2280, 0x2282, 0x2294, 0x2294,
+ 0x2300, 0x2308, 0x2309, 0x230A, 0x2312, 0x2312, 0x2316, 0x2316,
+ 0x2318, 0x231D, 0x2324, 0x2326, 0x2380, 0x2383, 0x2400, 0x2402,
+ 0x2406, 0x240F, 0x2480, 0x2482, 0x2500, 0x2502, 0x2504, 0x2509,
+ 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7, 0x2600, 0x2602,
+ 0x2604, 0x2606, 0x2608, 0x2608, 0x2680, 0x2682, 0x2694, 0x2694,
+ 0x2700, 0x2708, 0x2712, 0x2712, 0x2716, 0x2716, 0x2718, 0x271D,
+ 0x2724, 0x2726, 0x2780, 0x2783, 0x4000, 0x4003, 0x4800, 0x4805,
+ 0x4900, 0x4900, 0x4908, 0x4908,
+};
+
+const unsigned int a225_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x013C, 0x013C, 0x0140, 0x014F, 0x01C0, 0x01C1,
+ 0x01C3, 0x01C8, 0x01D5, 0x01D9, 0x01DC, 0x01DD, 0x01EA, 0x01EA,
+ 0x01EE, 0x01F3, 0x01F6, 0x01F7, 0x01FC, 0x01FF, 0x0391, 0x0392,
+ 0x039B, 0x039E, 0x03B2, 0x03B5, 0x03B7, 0x03B7, 0x03F8, 0x03FB,
+ 0x0440, 0x0440, 0x0443, 0x0444, 0x044B, 0x044B, 0x044D, 0x044F,
+ 0x0452, 0x0452, 0x0454, 0x045B, 0x047F, 0x047F, 0x0578, 0x0587,
+ 0x05C9, 0x05C9, 0x05D0, 0x05D0, 0x0601, 0x0604, 0x0606, 0x0609,
+ 0x060B, 0x060E, 0x0613, 0x0614, 0x0A29, 0x0A2B, 0x0A2F, 0x0A31,
+ 0x0A40, 0x0A40, 0x0A42, 0x0A43, 0x0A45, 0x0A45, 0x0A4E, 0x0A4F,
+ 0x0C01, 0x0C1D, 0x0C30, 0x0C30, 0x0C38, 0x0C39, 0x0C3C, 0x0C3C,
+ 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03, 0x0D05, 0x0D06,
+ 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
+ 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
+ 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
+ 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x200F, 0x2080, 0x2082,
+ 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7,
+ 0x2200, 0x2202, 0x2204, 0x2206, 0x2208, 0x2210, 0x2220, 0x2222,
+ 0x2280, 0x2282, 0x2294, 0x2294, 0x2297, 0x2297, 0x2300, 0x230A,
+ 0x2312, 0x2312, 0x2315, 0x2316, 0x2318, 0x231D, 0x2324, 0x2326,
+ 0x2340, 0x2357, 0x2360, 0x2360, 0x2380, 0x2383, 0x2400, 0x240F,
+ 0x2480, 0x2482, 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584,
+ 0x25F5, 0x25F7, 0x2600, 0x2602, 0x2604, 0x2606, 0x2608, 0x2610,
+ 0x2620, 0x2622, 0x2680, 0x2682, 0x2694, 0x2694, 0x2697, 0x2697,
+ 0x2700, 0x270A, 0x2712, 0x2712, 0x2715, 0x2716, 0x2718, 0x271D,
+ 0x2724, 0x2726, 0x2740, 0x2757, 0x2760, 0x2760, 0x2780, 0x2783,
+ 0x4000, 0x4003, 0x4800, 0x4806, 0x4808, 0x4808, 0x4900, 0x4900,
+ 0x4908, 0x4908,
+};
+
+const unsigned int a200_registers_count = ARRAY_SIZE(a200_registers) / 2;
+const unsigned int a220_registers_count = ARRAY_SIZE(a220_registers) / 2;
+const unsigned int a225_registers_count = ARRAY_SIZE(a225_registers) / 2;
+
+/*
+ *
+ * Memory Map for Register, Constant & Instruction Shadow, and Command Buffers
+ * (34.5KB)
+ *
+ * +---------------------+------------+-------------+---+---------------------+
+ * | ALU Constant Shadow | Reg Shadow | C&V Buffers |Tex| Shader Instr Shadow |
+ * +---------------------+------------+-------------+---+---------------------+
+ * ________________________________/ \____________________
+ * / |
+ * +--------------+-----------+------+-----------+------------------------+
+ * | Restore Regs | Save Regs | Quad | Gmem Save | Gmem Restore | unused |
+ * +--------------+-----------+------+-----------+------------------------+
+ *
+ * 8K - ALU Constant Shadow (8K aligned)
+ * 4K - H/W Register Shadow (8K aligned)
+ * 4K - Command and Vertex Buffers
+ * - Indirect command buffer : Const/Reg restore
+ * - includes Loop & Bool const shadows
+ * - Indirect command buffer : Const/Reg save
+ * - Quad vertices & texture coordinates
+ * - Indirect command buffer : Gmem save
+ * - Indirect command buffer : Gmem restore
+ * - Unused (padding to 8KB boundary)
+ * <1K - Texture Constant Shadow (768 bytes) (8K aligned)
+ * 18K - Shader Instruction Shadow
+ * - 6K vertex (32 byte aligned)
+ * - 6K pixel (32 byte aligned)
+ * - 6K shared (32 byte aligned)
+ *
+ * Note: Reading constants into a shadow, one at a time using REG_TO_MEM, takes
+ * 3 DWORDS per DWORD transfered, plus 1 DWORD for the shadow, for a total of
+ * 16 bytes per constant. If the texture constants were transfered this way,
+ * the Command & Vertex Buffers section would extend past the 16K boundary.
+ * By moving the texture constant shadow area to start at 16KB boundary, we
+ * only require approximately 40 bytes more memory, but are able to use the
+ * LOAD_CONSTANT_CONTEXT shadowing feature for the textures, speeding up
+ * context switching.
+ *
+ * [Using LOAD_CONSTANT_CONTEXT shadowing feature for the Loop and/or Bool
+ * constants would require an additional 8KB each, for alignment.]
+ *
+ */
+
+/* Constants */
+
+#define ALU_CONSTANTS 2048 /* DWORDS */
+#define NUM_REGISTERS 1024 /* DWORDS */
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+#define CMD_BUFFER_LEN 9216 /* DWORDS */
+#else
+#define CMD_BUFFER_LEN 3072 /* DWORDS */
+#endif
+#define TEX_CONSTANTS (32*6) /* DWORDS */
+#define BOOL_CONSTANTS 8 /* DWORDS */
+#define LOOP_CONSTANTS 56 /* DWORDS */
+
+/* LOAD_CONSTANT_CONTEXT shadow size */
+#define LCC_SHADOW_SIZE 0x2000 /* 8KB */
+
+#define ALU_SHADOW_SIZE LCC_SHADOW_SIZE /* 8KB */
+#define REG_SHADOW_SIZE 0x1000 /* 4KB */
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+#define CMD_BUFFER_SIZE 0x9000 /* 36KB */
+#else
+#define CMD_BUFFER_SIZE 0x3000 /* 12KB */
+#endif
+#define TEX_SHADOW_SIZE (TEX_CONSTANTS*4) /* 768 bytes */
+
+#define REG_OFFSET LCC_SHADOW_SIZE
+#define CMD_OFFSET (REG_OFFSET + REG_SHADOW_SIZE)
+#define TEX_OFFSET (CMD_OFFSET + CMD_BUFFER_SIZE)
+#define SHADER_OFFSET ((TEX_OFFSET + TEX_SHADOW_SIZE + 32) & ~31)
+
+static inline int _shader_shadow_size(struct adreno_device *adreno_dev)
+{
+ return adreno_dev->istore_size *
+ (adreno_dev->instruction_size * sizeof(unsigned int));
+}
+
+static inline int _context_size(struct adreno_device *adreno_dev)
+{
+ return SHADER_OFFSET + 3*_shader_shadow_size(adreno_dev);
+}
+
+/* A scratchpad used to build commands during context create */
+
+static struct tmp_ctx {
+ unsigned int *start; /* Command & Vertex buffer start */
+ unsigned int *cmd; /* Next available dword in C&V buffer */
+
+ /* address of buffers, needed when creating IB1 command buffers. */
+ uint32_t bool_shadow; /* bool constants */
+ uint32_t loop_shadow; /* loop constants */
+
+ uint32_t shader_shared; /* shared shader instruction shadow */
+ uint32_t shader_vertex; /* vertex shader instruction shadow */
+ uint32_t shader_pixel; /* pixel shader instruction shadow */
+
+ /* Addresses in command buffer where separately handled registers
+ * are saved
+ */
+ uint32_t reg_values[33];
+ uint32_t chicken_restore;
+
+ uint32_t gmem_base; /* Base gpu address of GMEM */
+
+} tmp_ctx;
+
+/* context save (gmem -> sys) */
+
+/* pre-compiled vertex shader program
+*
+* attribute vec4 P;
+* void main(void)
+* {
+* gl_Position = P;
+* }
+*/
+#define GMEM2SYS_VTX_PGM_LEN 0x12
+
+static unsigned int gmem2sys_vtx_pgm[GMEM2SYS_VTX_PGM_LEN] = {
+ 0x00011003, 0x00001000, 0xc2000000,
+ 0x00001004, 0x00001000, 0xc4000000,
+ 0x00001005, 0x00002000, 0x00000000,
+ 0x1cb81000, 0x00398a88, 0x00000003,
+ 0x140f803e, 0x00000000, 0xe2010100,
+ 0x14000000, 0x00000000, 0xe2000000
+};
+
+/* pre-compiled fragment shader program
+*
+* precision highp float;
+* uniform vec4 clear_color;
+* void main(void)
+* {
+* gl_FragColor = clear_color;
+* }
+*/
+
+#define GMEM2SYS_FRAG_PGM_LEN 0x0c
+
+static unsigned int gmem2sys_frag_pgm[GMEM2SYS_FRAG_PGM_LEN] = {
+ 0x00000000, 0x1002c400, 0x10000000,
+ 0x00001003, 0x00002000, 0x00000000,
+ 0x140f8000, 0x00000000, 0x22000000,
+ 0x14000000, 0x00000000, 0xe2000000
+};
+
+/* context restore (sys -> gmem) */
+/* pre-compiled vertex shader program
+*
+* attribute vec4 position;
+* attribute vec4 texcoord;
+* varying vec4 texcoord0;
+* void main()
+* {
+* gl_Position = position;
+* texcoord0 = texcoord;
+* }
+*/
+
+#define SYS2GMEM_VTX_PGM_LEN 0x18
+
+static unsigned int sys2gmem_vtx_pgm[SYS2GMEM_VTX_PGM_LEN] = {
+ 0x00052003, 0x00001000, 0xc2000000, 0x00001005,
+ 0x00001000, 0xc4000000, 0x00001006, 0x10071000,
+ 0x20000000, 0x18981000, 0x0039ba88, 0x00000003,
+ 0x12982000, 0x40257b08, 0x00000002, 0x140f803e,
+ 0x00000000, 0xe2010100, 0x140f8000, 0x00000000,
+ 0xe2020200, 0x14000000, 0x00000000, 0xe2000000
+};
+
+/* pre-compiled fragment shader program
+*
+* precision mediump float;
+* uniform sampler2D tex0;
+* varying vec4 texcoord0;
+* void main()
+* {
+* gl_FragColor = texture2D(tex0, texcoord0.xy);
+* }
+*/
+
+#define SYS2GMEM_FRAG_PGM_LEN 0x0f
+
+static unsigned int sys2gmem_frag_pgm[SYS2GMEM_FRAG_PGM_LEN] = {
+ 0x00011002, 0x00001000, 0xc4000000, 0x00001003,
+ 0x10041000, 0x20000000, 0x10000001, 0x1ffff688,
+ 0x00000002, 0x140f8000, 0x00000000, 0xe2000000,
+ 0x14000000, 0x00000000, 0xe2000000
+};
+
+/* shader texture constants (sysmem -> gmem) */
+#define SYS2GMEM_TEX_CONST_LEN 6
+
+static unsigned int sys2gmem_tex_const[SYS2GMEM_TEX_CONST_LEN] = {
+ /* Texture, FormatXYZW=Unsigned, ClampXYZ=Wrap/Repeat,
+ * RFMode=ZeroClamp-1, Dim=1:2d
+ */
+ 0x00000002, /* Pitch = TBD */
+
+ /* Format=6:8888_WZYX, EndianSwap=0:None, ReqSize=0:256bit, DimHi=0,
+ * NearestClamp=1:OGL Mode
+ */
+ 0x00000800, /* Address[31:12] = TBD */
+
+ /* Width, Height, EndianSwap=0:None */
+ 0, /* Width & Height = TBD */
+
+ /* NumFormat=0:RF, DstSelXYZW=XYZW, ExpAdj=0, MagFilt=MinFilt=0:Point,
+ * Mip=2:BaseMap
+ */
+ 0 << 1 | 1 << 4 | 2 << 7 | 3 << 10 | 2 << 23,
+
+ /* VolMag=VolMin=0:Point, MinMipLvl=0, MaxMipLvl=1, LodBiasH=V=0,
+ * Dim3d=0
+ */
+ 0,
+
+ /* BorderColor=0:ABGRBlack, ForceBC=0:diable, TriJuice=0, Aniso=0,
+ * Dim=1:2d, MipPacking=0
+ */
+ 1 << 9 /* Mip Address[31:12] = TBD */
+};
+
+#define NUM_COLOR_FORMATS 13
+
+static enum SURFACEFORMAT surface_format_table[NUM_COLOR_FORMATS] = {
+ FMT_4_4_4_4, /* COLORX_4_4_4_4 */
+ FMT_1_5_5_5, /* COLORX_1_5_5_5 */
+ FMT_5_6_5, /* COLORX_5_6_5 */
+ FMT_8, /* COLORX_8 */
+ FMT_8_8, /* COLORX_8_8 */
+ FMT_8_8_8_8, /* COLORX_8_8_8_8 */
+ FMT_8_8_8_8, /* COLORX_S8_8_8_8 */
+ FMT_16_FLOAT, /* COLORX_16_FLOAT */
+ FMT_16_16_FLOAT, /* COLORX_16_16_FLOAT */
+ FMT_16_16_16_16_FLOAT, /* COLORX_16_16_16_16_FLOAT */
+ FMT_32_FLOAT, /* COLORX_32_FLOAT */
+ FMT_32_32_FLOAT, /* COLORX_32_32_FLOAT */
+ FMT_32_32_32_32_FLOAT, /* COLORX_32_32_32_32_FLOAT */
+};
+
+static unsigned int format2bytesperpixel[NUM_COLOR_FORMATS] = {
+ 2, /* COLORX_4_4_4_4 */
+ 2, /* COLORX_1_5_5_5 */
+ 2, /* COLORX_5_6_5 */
+ 1, /* COLORX_8 */
+ 2, /* COLORX_8_8 8*/
+ 4, /* COLORX_8_8_8_8 */
+ 4, /* COLORX_S8_8_8_8 */
+ 2, /* COLORX_16_FLOAT */
+ 4, /* COLORX_16_16_FLOAT */
+ 8, /* COLORX_16_16_16_16_FLOAT */
+ 4, /* COLORX_32_FLOAT */
+ 8, /* COLORX_32_32_FLOAT */
+ 16, /* COLORX_32_32_32_32_FLOAT */
+};
+
+/* shader linkage info */
+#define SHADER_CONST_ADDR (11 * 6 + 3)
+
+
+static unsigned int *program_shader(unsigned int *cmds, int vtxfrag,
+ unsigned int *shader_pgm, int dwords)
+{
+ /* load the patched vertex shader stream */
+ *cmds++ = cp_type3_packet(CP_IM_LOAD_IMMEDIATE, 2 + dwords);
+ /* 0=vertex shader, 1=fragment shader */
+ *cmds++ = vtxfrag;
+ /* instruction start & size (in 32-bit words) */
+ *cmds++ = ((0 << 16) | dwords);
+
+ memcpy(cmds, shader_pgm, dwords << 2);
+ cmds += dwords;
+
+ return cmds;
+}
+
+static unsigned int *reg_to_mem(unsigned int *cmds, uint32_t dst,
+ uint32_t src, int dwords)
+{
+ while (dwords-- > 0) {
+ *cmds++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmds++ = src++;
+ *cmds++ = dst;
+ dst += 4;
+ }
+
+ return cmds;
+}
+
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+
+static void build_reg_to_mem_range(unsigned int start, unsigned int end,
+ unsigned int **cmd,
+ struct adreno_context *drawctxt)
+{
+ unsigned int i = start;
+
+ for (i = start; i <= end; i++) {
+ *(*cmd)++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *(*cmd)++ = i;
+ *(*cmd)++ =
+ ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) +
+ (i - 0x2000) * 4;
+ }
+}
+
+#endif
+
+/* chicken restore */
+static unsigned int *build_chicken_restore_cmds(
+ struct adreno_context *drawctxt)
+{
+ unsigned int *start = tmp_ctx.cmd;
+ unsigned int *cmds = start;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0;
+
+ *cmds++ = cp_type0_packet(REG_TP0_CHICKEN, 1);
+ tmp_ctx.chicken_restore = virt2gpu(cmds, &drawctxt->gpustate);
+ *cmds++ = 0x00000000;
+
+ /* create indirect buffer command for above command sequence */
+ create_ib1(drawctxt, drawctxt->chicken_restore, start, cmds);
+
+ return cmds;
+}
+
+/****************************************************************************/
+/* context save */
+/****************************************************************************/
+
+static const unsigned int register_ranges_a20x[] = {
+ REG_RB_SURFACE_INFO, REG_RB_DEPTH_INFO,
+ REG_COHER_DEST_BASE_0, REG_PA_SC_SCREEN_SCISSOR_BR,
+ REG_PA_SC_WINDOW_OFFSET, REG_PA_SC_WINDOW_SCISSOR_BR,
+ REG_RB_STENCILREFMASK_BF, REG_PA_CL_VPORT_ZOFFSET,
+ REG_SQ_PROGRAM_CNTL, REG_SQ_WRAPPING_1,
+ REG_PA_SC_LINE_CNTL, REG_SQ_PS_CONST,
+ REG_PA_SC_AA_MASK, REG_PA_SC_AA_MASK,
+ REG_RB_SAMPLE_COUNT_CTL, REG_RB_COLOR_DEST_MASK,
+ REG_PA_SU_POLY_OFFSET_FRONT_SCALE, REG_PA_SU_POLY_OFFSET_BACK_OFFSET,
+ REG_VGT_MAX_VTX_INDX, REG_RB_FOG_COLOR,
+ REG_RB_DEPTHCONTROL, REG_RB_MODECONTROL,
+ REG_PA_SU_POINT_SIZE, REG_PA_SC_LINE_STIPPLE,
+ REG_PA_SC_VIZ_QUERY, REG_PA_SC_VIZ_QUERY,
+ REG_VGT_VERTEX_REUSE_BLOCK_CNTL, REG_RB_DEPTH_CLEAR
+};
+
+static const unsigned int register_ranges_a220[] = {
+ REG_RB_SURFACE_INFO, REG_RB_DEPTH_INFO,
+ REG_COHER_DEST_BASE_0, REG_PA_SC_SCREEN_SCISSOR_BR,
+ REG_PA_SC_WINDOW_OFFSET, REG_PA_SC_WINDOW_SCISSOR_BR,
+ REG_RB_STENCILREFMASK_BF, REG_PA_CL_VPORT_ZOFFSET,
+ REG_SQ_PROGRAM_CNTL, REG_SQ_WRAPPING_1,
+ REG_PA_SC_LINE_CNTL, REG_SQ_PS_CONST,
+ REG_PA_SC_AA_MASK, REG_PA_SC_AA_MASK,
+ REG_RB_SAMPLE_COUNT_CTL, REG_RB_COLOR_DEST_MASK,
+ REG_PA_SU_POLY_OFFSET_FRONT_SCALE, REG_PA_SU_POLY_OFFSET_BACK_OFFSET,
+ REG_A220_PC_MAX_VTX_INDX, REG_A220_PC_INDX_OFFSET,
+ REG_RB_COLOR_MASK, REG_RB_FOG_COLOR,
+ REG_RB_DEPTHCONTROL, REG_RB_COLORCONTROL,
+ REG_PA_CL_CLIP_CNTL, REG_PA_CL_VTE_CNTL,
+ REG_RB_MODECONTROL, REG_RB_SAMPLE_POS,
+ REG_PA_SU_POINT_SIZE, REG_PA_SU_LINE_CNTL,
+ REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL,
+ REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL,
+ REG_RB_COPY_CONTROL, REG_RB_DEPTH_CLEAR
+};
+
+static const unsigned int register_ranges_a225[] = {
+ REG_RB_SURFACE_INFO, REG_A225_RB_COLOR_INFO3,
+ REG_COHER_DEST_BASE_0, REG_PA_SC_SCREEN_SCISSOR_BR,
+ REG_PA_SC_WINDOW_OFFSET, REG_PA_SC_WINDOW_SCISSOR_BR,
+ REG_RB_STENCILREFMASK_BF, REG_PA_CL_VPORT_ZOFFSET,
+ REG_SQ_PROGRAM_CNTL, REG_SQ_WRAPPING_1,
+ REG_PA_SC_LINE_CNTL, REG_SQ_PS_CONST,
+ REG_PA_SC_AA_MASK, REG_PA_SC_AA_MASK,
+ REG_RB_SAMPLE_COUNT_CTL, REG_RB_COLOR_DEST_MASK,
+ REG_PA_SU_POLY_OFFSET_FRONT_SCALE, REG_PA_SU_POLY_OFFSET_BACK_OFFSET,
+ REG_A220_PC_MAX_VTX_INDX, REG_A225_PC_MULTI_PRIM_IB_RESET_INDX,
+ REG_RB_COLOR_MASK, REG_RB_FOG_COLOR,
+ REG_RB_DEPTHCONTROL, REG_RB_COLORCONTROL,
+ REG_PA_CL_CLIP_CNTL, REG_PA_CL_VTE_CNTL,
+ REG_RB_MODECONTROL, REG_RB_SAMPLE_POS,
+ REG_PA_SU_POINT_SIZE, REG_PA_SU_LINE_CNTL,
+ REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL,
+ REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL,
+ REG_RB_COPY_CONTROL, REG_RB_DEPTH_CLEAR,
+ REG_A225_GRAS_UCP0X, REG_A225_GRAS_UCP5W,
+ REG_A225_GRAS_UCP_ENABLED, REG_A225_GRAS_UCP_ENABLED
+};
+
+
+/* save h/w regs, alu constants, texture contants, etc. ...
+* requires: bool_shadow_gpuaddr, loop_shadow_gpuaddr
+*/
+static void build_regsave_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ unsigned int *start = tmp_ctx.cmd;
+ unsigned int *cmd = start;
+
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmd++ = 0;
+
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+ /* Make sure the HW context has the correct register values
+ * before reading them. */
+ *cmd++ = cp_type3_packet(CP_CONTEXT_UPDATE, 1);
+ *cmd++ = 0;
+
+ {
+ unsigned int i = 0;
+ unsigned int reg_array_size = 0;
+ const unsigned int *ptr_register_ranges;
+
+ /* Based on chip id choose the register ranges */
+ if (adreno_is_a220(adreno_dev)) {
+ ptr_register_ranges = register_ranges_a220;
+ reg_array_size = ARRAY_SIZE(register_ranges_a220);
+ } else if (adreno_is_a225(adreno_dev)) {
+ ptr_register_ranges = register_ranges_a225;
+ reg_array_size = ARRAY_SIZE(register_ranges_a225);
+ } else {
+ ptr_register_ranges = register_ranges_a20x;
+ reg_array_size = ARRAY_SIZE(register_ranges_a20x);
+ }
+
+
+ /* Write HW registers into shadow */
+ for (i = 0; i < (reg_array_size/2) ; i++) {
+ build_reg_to_mem_range(ptr_register_ranges[i*2],
+ ptr_register_ranges[i*2+1],
+ &cmd, drawctxt);
+ }
+ }
+
+ /* Copy ALU constants */
+ cmd =
+ reg_to_mem(cmd, (drawctxt->gpustate.gpuaddr) & 0xFFFFE000,
+ REG_SQ_CONSTANT_0, ALU_CONSTANTS);
+
+ /* Copy Tex constants */
+ cmd =
+ reg_to_mem(cmd,
+ (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000,
+ REG_SQ_FETCH_0, TEX_CONSTANTS);
+#else
+
+ /* Insert a wait for idle packet before reading the registers.
+ * This is to fix a hang/reset seen during stress testing. In this
+ * hang, CP encountered a timeout reading SQ's boolean constant
+ * register. There is logic in the HW that blocks reading of this
+ * register when the SQ block is not idle, which we believe is
+ * contributing to the hang.*/
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmd++ = 0;
+
+ /* H/w registers are already shadowed; just need to disable shadowing
+ * to prevent corruption.
+ */
+ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
+ *cmd++ = 4 << 16; /* regs, start=0 */
+ *cmd++ = 0x0; /* count = 0 */
+
+ /* ALU constants are already shadowed; just need to disable shadowing
+ * to prevent corruption.
+ */
+ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000;
+ *cmd++ = 0 << 16; /* ALU, start=0 */
+ *cmd++ = 0x0; /* count = 0 */
+
+ /* Tex constants are already shadowed; just need to disable shadowing
+ * to prevent corruption.
+ */
+ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000;
+ *cmd++ = 1 << 16; /* Tex, start=0 */
+ *cmd++ = 0x0; /* count = 0 */
+#endif
+
+ /* Need to handle some of the registers separately */
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = REG_SQ_GPR_MANAGEMENT;
+ *cmd++ = tmp_ctx.reg_values[0];
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = REG_TP0_CHICKEN;
+ *cmd++ = tmp_ctx.reg_values[1];
+
+ if (adreno_is_a22x(adreno_dev)) {
+ unsigned int i;
+ unsigned int j = 2;
+ for (i = REG_A220_VSC_BIN_SIZE; i <=
+ REG_A220_VSC_PIPE_DATA_LENGTH_7; i++) {
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = i;
+ *cmd++ = tmp_ctx.reg_values[j];
+ j++;
+ }
+ }
+
+ /* Copy Boolean constants */
+ cmd = reg_to_mem(cmd, tmp_ctx.bool_shadow, REG_SQ_CF_BOOLEANS,
+ BOOL_CONSTANTS);
+
+ /* Copy Loop constants */
+ cmd = reg_to_mem(cmd, tmp_ctx.loop_shadow,
+ REG_SQ_CF_LOOP, LOOP_CONSTANTS);
+
+ /* create indirect buffer command for above command sequence */
+ create_ib1(drawctxt, drawctxt->reg_save, start, cmd);
+
+ tmp_ctx.cmd = cmd;
+}
+
+/*copy colour, depth, & stencil buffers from graphics memory to system memory*/
+static unsigned int *build_gmem2sys_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt,
+ struct gmem_shadow_t *shadow)
+{
+ unsigned int *cmds = shadow->gmem_save_commands;
+ unsigned int *start = cmds;
+ /* Calculate the new offset based on the adjusted base */
+ unsigned int bytesperpixel = format2bytesperpixel[shadow->format];
+ unsigned int addr = shadow->gmemshadow.gpuaddr;
+ unsigned int offset = (addr - (addr & 0xfffff000)) / bytesperpixel;
+
+ if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) {
+ /* Store TP0_CHICKEN register */
+ *cmds++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmds++ = REG_TP0_CHICKEN;
+
+ *cmds++ = tmp_ctx.chicken_restore;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0;
+ }
+
+ /* Set TP0_CHICKEN to zero */
+ *cmds++ = cp_type0_packet(REG_TP0_CHICKEN, 1);
+ *cmds++ = 0x00000000;
+
+ /* Set PA_SC_AA_CONFIG to 0 */
+ *cmds++ = cp_type0_packet(REG_PA_SC_AA_CONFIG, 1);
+ *cmds++ = 0x00000000;
+
+ /* program shader */
+
+ /* load shader vtx constants ... 5 dwords */
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 4);
+ *cmds++ = (0x1 << 16) | SHADER_CONST_ADDR;
+ *cmds++ = 0;
+ /* valid(?) vtx constant flag & addr */
+ *cmds++ = shadow->quad_vertices.gpuaddr | 0x3;
+ /* limit = 12 dwords */
+ *cmds++ = 0x00000030;
+
+ /* Invalidate L2 cache to make sure vertices are updated */
+ *cmds++ = cp_type0_packet(REG_TC_CNTL_STATUS, 1);
+ *cmds++ = 0x1;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 4);
+ *cmds++ = CP_REG(REG_VGT_MAX_VTX_INDX);
+ *cmds++ = 0x00ffffff; /* REG_VGT_MAX_VTX_INDX */
+ *cmds++ = 0x0; /* REG_VGT_MIN_VTX_INDX */
+ *cmds++ = 0x00000000; /* REG_VGT_INDX_OFFSET */
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_PA_SC_AA_MASK);
+ *cmds++ = 0x0000ffff; /* REG_PA_SC_AA_MASK */
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_RB_COLORCONTROL);
+ *cmds++ = 0x00000c20;
+
+ /* Repartition shaders */
+ *cmds++ = cp_type0_packet(REG_SQ_INST_STORE_MANAGMENT, 1);
+ *cmds++ = adreno_dev->pix_shader_start;
+
+ /* Invalidate Vertex & Pixel instruction code address and sizes */
+ *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
+ *cmds++ = 0x00003F00;
+
+ *cmds++ = cp_type3_packet(CP_SET_SHADER_BASES, 1);
+ *cmds++ = adreno_encode_istore_size(adreno_dev)
+ | adreno_dev->pix_shader_start;
+
+ /* load the patched vertex shader stream */
+ cmds = program_shader(cmds, 0, gmem2sys_vtx_pgm, GMEM2SYS_VTX_PGM_LEN);
+
+ /* Load the patched fragment shader stream */
+ cmds =
+ program_shader(cmds, 1, gmem2sys_frag_pgm, GMEM2SYS_FRAG_PGM_LEN);
+
+ /* SQ_PROGRAM_CNTL / SQ_CONTEXT_MISC */
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_SQ_PROGRAM_CNTL);
+ if (adreno_is_a22x(adreno_dev))
+ *cmds++ = 0x10018001;
+ else
+ *cmds++ = 0x10010001;
+ *cmds++ = 0x00000008;
+
+ /* resolve */
+
+ /* PA_CL_VTE_CNTL */
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_PA_CL_VTE_CNTL);
+ /* disable X/Y/Z transforms, X/Y/Z are premultiplied by W */
+ *cmds++ = 0x00000b00;
+
+ /* program surface info */
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_RB_SURFACE_INFO);
+ *cmds++ = shadow->gmem_pitch; /* pitch, MSAA = 1 */
+
+ /* RB_COLOR_INFO Endian=none, Linear, Format=RGBA8888, Swap=0,
+ * Base=gmem_base
+ */
+ /* gmem base assumed 4K aligned. */
+ BUG_ON(tmp_ctx.gmem_base & 0xFFF);
+ *cmds++ =
+ (shadow->
+ format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT) | tmp_ctx.gmem_base;
+
+ /* disable Z */
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_RB_DEPTHCONTROL);
+ if (adreno_is_a22x(adreno_dev))
+ *cmds++ = 0x08;
+ else
+ *cmds++ = 0;
+
+ /* set REG_PA_SU_SC_MODE_CNTL
+ * Front_ptype = draw triangles
+ * Back_ptype = draw triangles
+ * Provoking vertex = last
+ */
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_PA_SU_SC_MODE_CNTL);
+ *cmds++ = 0x00080240;
+
+ /* Use maximum scissor values -- quad vertices already have the
+ * correct bounds */
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_PA_SC_SCREEN_SCISSOR_TL);
+ *cmds++ = (0 << 16) | 0;
+ *cmds++ = (0x1fff << 16) | (0x1fff);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_PA_SC_WINDOW_SCISSOR_TL);
+ *cmds++ = (unsigned int)((1U << 31) | (0 << 16) | 0);
+ *cmds++ = (0x1fff << 16) | (0x1fff);
+
+ /* load the viewport so that z scale = clear depth and
+ * z offset = 0.0f
+ */
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_PA_CL_VPORT_ZSCALE);
+ *cmds++ = 0xbf800000; /* -1.0f */
+ *cmds++ = 0x0;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_RB_COLOR_MASK);
+ *cmds++ = 0x0000000f; /* R = G = B = 1:enabled */
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_RB_COLOR_DEST_MASK);
+ *cmds++ = 0xffffffff;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_SQ_WRAPPING_0);
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+
+ /* load the stencil ref value
+ * $AAM - do this later
+ */
+
+ /* load the COPY state */
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 6);
+ *cmds++ = CP_REG(REG_RB_COPY_CONTROL);
+ *cmds++ = 0; /* RB_COPY_CONTROL */
+ *cmds++ = addr & 0xfffff000; /* RB_COPY_DEST_BASE */
+ *cmds++ = shadow->pitch >> 5; /* RB_COPY_DEST_PITCH */
+
+ /* Endian=none, Linear, Format=RGBA8888,Swap=0,!Dither,
+ * MaskWrite:R=G=B=A=1
+ */
+ *cmds++ = 0x0003c008 |
+ (shadow->format << RB_COPY_DEST_INFO__COPY_DEST_FORMAT__SHIFT);
+ /* Make sure we stay in offsetx field. */
+ BUG_ON(offset & 0xfffff000);
+ *cmds++ = offset;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_RB_MODECONTROL);
+ *cmds++ = 0x6; /* EDRAM copy */
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_PA_CL_CLIP_CNTL);
+ *cmds++ = 0x00010000;
+
+ if (adreno_is_a22x(adreno_dev)) {
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_A220_RB_LRZ_VSC_CONTROL);
+ *cmds++ = 0x0000000;
+
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX, 3);
+ *cmds++ = 0; /* viz query info. */
+ /* PrimType=RectList, SrcSel=AutoIndex, VisCullMode=Ignore*/
+ *cmds++ = 0x00004088;
+ *cmds++ = 3; /* NumIndices=3 */
+ } else {
+ /* queue the draw packet */
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX, 2);
+ *cmds++ = 0; /* viz query info. */
+ /* PrimType=RectList, NumIndices=3, SrcSel=AutoIndex */
+ *cmds++ = 0x00030088;
+ }
+
+ /* create indirect buffer command for above command sequence */
+ create_ib1(drawctxt, shadow->gmem_save, start, cmds);
+
+ return cmds;
+}
+
+/* context restore */
+
+/*copy colour, depth, & stencil buffers from system memory to graphics memory*/
+static unsigned int *build_sys2gmem_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt,
+ struct gmem_shadow_t *shadow)
+{
+ unsigned int *cmds = shadow->gmem_restore_commands;
+ unsigned int *start = cmds;
+
+ if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) {
+ /* Store TP0_CHICKEN register */
+ *cmds++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmds++ = REG_TP0_CHICKEN;
+ *cmds++ = tmp_ctx.chicken_restore;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0;
+ }
+
+ /* Set TP0_CHICKEN to zero */
+ *cmds++ = cp_type0_packet(REG_TP0_CHICKEN, 1);
+ *cmds++ = 0x00000000;
+
+ /* Set PA_SC_AA_CONFIG to 0 */
+ *cmds++ = cp_type0_packet(REG_PA_SC_AA_CONFIG, 1);
+ *cmds++ = 0x00000000;
+ /* shader constants */
+
+ /* vertex buffer constants */
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 7);
+
+ *cmds++ = (0x1 << 16) | (9 * 6);
+ /* valid(?) vtx constant flag & addr */
+ *cmds++ = shadow->quad_vertices.gpuaddr | 0x3;
+ /* limit = 12 dwords */
+ *cmds++ = 0x00000030;
+ /* valid(?) vtx constant flag & addr */
+ *cmds++ = shadow->quad_texcoords.gpuaddr | 0x3;
+ /* limit = 8 dwords */
+ *cmds++ = 0x00000020;
+ *cmds++ = 0;
+ *cmds++ = 0;
+
+ /* Invalidate L2 cache to make sure vertices are updated */
+ *cmds++ = cp_type0_packet(REG_TC_CNTL_STATUS, 1);
+ *cmds++ = 0x1;
+
+ cmds = program_shader(cmds, 0, sys2gmem_vtx_pgm, SYS2GMEM_VTX_PGM_LEN);
+
+ /* Repartition shaders */
+ *cmds++ = cp_type0_packet(REG_SQ_INST_STORE_MANAGMENT, 1);
+ *cmds++ = adreno_dev->pix_shader_start;
+
+ /* Invalidate Vertex & Pixel instruction code address and sizes */
+ *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
+ *cmds++ = 0x00000300; /* 0x100 = Vertex, 0x200 = Pixel */
+
+ *cmds++ = cp_type3_packet(CP_SET_SHADER_BASES, 1);
+ *cmds++ = adreno_encode_istore_size(adreno_dev)
+ | adreno_dev->pix_shader_start;
+
+ /* Load the patched fragment shader stream */
+ cmds =
+ program_shader(cmds, 1, sys2gmem_frag_pgm, SYS2GMEM_FRAG_PGM_LEN);
+
+ /* SQ_PROGRAM_CNTL / SQ_CONTEXT_MISC */
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_SQ_PROGRAM_CNTL);
+ *cmds++ = 0x10030002;
+ *cmds++ = 0x00000008;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_PA_SC_AA_MASK);
+ *cmds++ = 0x0000ffff; /* REG_PA_SC_AA_MASK */
+
+ if (!adreno_is_a22x(adreno_dev)) {
+ /* PA_SC_VIZ_QUERY */
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_PA_SC_VIZ_QUERY);
+ *cmds++ = 0x0; /*REG_PA_SC_VIZ_QUERY */
+ }
+
+ /* RB_COLORCONTROL */
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_RB_COLORCONTROL);
+ *cmds++ = 0x00000c20;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 4);
+ *cmds++ = CP_REG(REG_VGT_MAX_VTX_INDX);
+ *cmds++ = 0x00ffffff; /* mmVGT_MAX_VTX_INDX */
+ *cmds++ = 0x0; /* mmVGT_MIN_VTX_INDX */
+ *cmds++ = 0x00000000; /* mmVGT_INDX_OFFSET */
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_VGT_VERTEX_REUSE_BLOCK_CNTL);
+ *cmds++ = 0x00000002; /* mmVGT_VERTEX_REUSE_BLOCK_CNTL */
+ *cmds++ = 0x00000002; /* mmVGT_OUT_DEALLOC_CNTL */
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_SQ_INTERPOLATOR_CNTL);
+ *cmds++ = 0xffffffff; /* mmSQ_INTERPOLATOR_CNTL */
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_PA_SC_AA_CONFIG);
+ *cmds++ = 0x00000000; /* REG_PA_SC_AA_CONFIG */
+
+ /* set REG_PA_SU_SC_MODE_CNTL
+ * Front_ptype = draw triangles
+ * Back_ptype = draw triangles
+ * Provoking vertex = last
+ */
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_PA_SU_SC_MODE_CNTL);
+ *cmds++ = 0x00080240;
+
+ /* texture constants */
+ *cmds++ =
+ cp_type3_packet(CP_SET_CONSTANT, (SYS2GMEM_TEX_CONST_LEN + 1));
+ *cmds++ = (0x1 << 16) | (0 * 6);
+ memcpy(cmds, sys2gmem_tex_const, SYS2GMEM_TEX_CONST_LEN << 2);
+ cmds[0] |= (shadow->pitch >> 5) << 22;
+ cmds[1] |=
+ shadow->gmemshadow.gpuaddr | surface_format_table[shadow->format];
+ cmds[2] |= (shadow->width - 1) | (shadow->height - 1) << 13;
+ cmds += SYS2GMEM_TEX_CONST_LEN;
+
+ /* program surface info */
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_RB_SURFACE_INFO);
+ *cmds++ = shadow->gmem_pitch; /* pitch, MSAA = 1 */
+
+ /* RB_COLOR_INFO Endian=none, Linear, Format=RGBA8888, Swap=0,
+ * Base=gmem_base
+ */
+ *cmds++ =
+ (shadow->
+ format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT) | tmp_ctx.gmem_base;
+
+ /* RB_DEPTHCONTROL */
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_RB_DEPTHCONTROL);
+
+ if (adreno_is_a22x(adreno_dev))
+ *cmds++ = 8; /* disable Z */
+ else
+ *cmds++ = 0; /* disable Z */
+
+ /* Use maximum scissor values -- quad vertices already
+ * have the correct bounds */
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_PA_SC_SCREEN_SCISSOR_TL);
+ *cmds++ = (0 << 16) | 0;
+ *cmds++ = ((0x1fff) << 16) | 0x1fff;
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_PA_SC_WINDOW_SCISSOR_TL);
+ *cmds++ = (unsigned int)((1U << 31) | (0 << 16) | 0);
+ *cmds++ = ((0x1fff) << 16) | 0x1fff;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_PA_CL_VTE_CNTL);
+ /* disable X/Y/Z transforms, X/Y/Z are premultiplied by W */
+ *cmds++ = 0x00000b00;
+
+ /*load the viewport so that z scale = clear depth and z offset = 0.0f */
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_PA_CL_VPORT_ZSCALE);
+ *cmds++ = 0xbf800000;
+ *cmds++ = 0x0;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_RB_COLOR_MASK);
+ *cmds++ = 0x0000000f; /* R = G = B = 1:enabled */
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_RB_COLOR_DEST_MASK);
+ *cmds++ = 0xffffffff;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_SQ_WRAPPING_0);
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+
+ /* load the stencil ref value
+ * $AAM - do this later
+ */
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_RB_MODECONTROL);
+ /* draw pixels with color and depth/stencil component */
+ *cmds++ = 0x4;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_PA_CL_CLIP_CNTL);
+ *cmds++ = 0x00010000;
+
+ if (adreno_is_a22x(adreno_dev)) {
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_A220_RB_LRZ_VSC_CONTROL);
+ *cmds++ = 0x0000000;
+
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX, 3);
+ *cmds++ = 0; /* viz query info. */
+ /* PrimType=RectList, SrcSel=AutoIndex, VisCullMode=Ignore*/
+ *cmds++ = 0x00004088;
+ *cmds++ = 3; /* NumIndices=3 */
+ } else {
+ /* queue the draw packet */
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX, 2);
+ *cmds++ = 0; /* viz query info. */
+ /* PrimType=RectList, NumIndices=3, SrcSel=AutoIndex */
+ *cmds++ = 0x00030088;
+ }
+
+ /* create indirect buffer command for above command sequence */
+ create_ib1(drawctxt, shadow->gmem_restore, start, cmds);
+
+ return cmds;
+}
+
+static void build_regrestore_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ unsigned int *start = tmp_ctx.cmd;
+ unsigned int *cmd = start;
+
+ unsigned int i = 0;
+ unsigned int reg_array_size = 0;
+ const unsigned int *ptr_register_ranges;
+
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmd++ = 0;
+
+ /* H/W Registers */
+ /* deferred cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, ???); */
+ cmd++;
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+ /* Force mismatch */
+ *cmd++ = ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) | 1;
+#else
+ *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
+#endif
+
+ /* Based on chip id choose the registers ranges*/
+ if (adreno_is_a220(adreno_dev)) {
+ ptr_register_ranges = register_ranges_a220;
+ reg_array_size = ARRAY_SIZE(register_ranges_a220);
+ } else if (adreno_is_a225(adreno_dev)) {
+ ptr_register_ranges = register_ranges_a225;
+ reg_array_size = ARRAY_SIZE(register_ranges_a225);
+ } else {
+ ptr_register_ranges = register_ranges_a20x;
+ reg_array_size = ARRAY_SIZE(register_ranges_a20x);
+ }
+
+
+ for (i = 0; i < (reg_array_size/2); i++) {
+ cmd = reg_range(cmd, ptr_register_ranges[i*2],
+ ptr_register_ranges[i*2+1]);
+ }
+
+ /* Now we know how many register blocks we have, we can compute command
+ * length
+ */
+ start[2] =
+ cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, (cmd - start) - 3);
+ /* Enable shadowing for the entire register block. */
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+ start[4] |= (0 << 24) | (4 << 16); /* Disable shadowing. */
+#else
+ start[4] |= (1 << 24) | (4 << 16);
+#endif
+
+ /* Need to handle some of the registers separately */
+ *cmd++ = cp_type0_packet(REG_SQ_GPR_MANAGEMENT, 1);
+ tmp_ctx.reg_values[0] = virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0x00040400;
+
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmd++ = 0;
+ *cmd++ = cp_type0_packet(REG_TP0_CHICKEN, 1);
+ tmp_ctx.reg_values[1] = virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0x00000000;
+
+ if (adreno_is_a22x(adreno_dev)) {
+ unsigned int i;
+ unsigned int j = 2;
+ for (i = REG_A220_VSC_BIN_SIZE; i <=
+ REG_A220_VSC_PIPE_DATA_LENGTH_7; i++) {
+ *cmd++ = cp_type0_packet(i, 1);
+ tmp_ctx.reg_values[j] = virt2gpu(cmd,
+ &drawctxt->gpustate);
+ *cmd++ = 0x00000000;
+ j++;
+ }
+ }
+
+ /* ALU Constants */
+ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000;
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+ *cmd++ = (0 << 24) | (0 << 16) | 0; /* Disable shadowing */
+#else
+ *cmd++ = (1 << 24) | (0 << 16) | 0;
+#endif
+ *cmd++ = ALU_CONSTANTS;
+
+ /* Texture Constants */
+ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000;
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+ /* Disable shadowing */
+ *cmd++ = (0 << 24) | (1 << 16) | 0;
+#else
+ *cmd++ = (1 << 24) | (1 << 16) | 0;
+#endif
+ *cmd++ = TEX_CONSTANTS;
+
+ /* Boolean Constants */
+ *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 1 + BOOL_CONSTANTS);
+ *cmd++ = (2 << 16) | 0;
+
+ /* the next BOOL_CONSTANT dwords is the shadow area for
+ * boolean constants.
+ */
+ tmp_ctx.bool_shadow = virt2gpu(cmd, &drawctxt->gpustate);
+ cmd += BOOL_CONSTANTS;
+
+ /* Loop Constants */
+ *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 1 + LOOP_CONSTANTS);
+ *cmd++ = (3 << 16) | 0;
+
+ /* the next LOOP_CONSTANTS dwords is the shadow area for
+ * loop constants.
+ */
+ tmp_ctx.loop_shadow = virt2gpu(cmd, &drawctxt->gpustate);
+ cmd += LOOP_CONSTANTS;
+
+ /* create indirect buffer command for above command sequence */
+ create_ib1(drawctxt, drawctxt->reg_restore, start, cmd);
+
+ tmp_ctx.cmd = cmd;
+}
+
+static void
+build_shader_save_restore_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ unsigned int *cmd = tmp_ctx.cmd;
+ unsigned int *save, *restore, *fixup;
+ unsigned int *startSizeVtx, *startSizePix, *startSizeShared;
+ unsigned int *partition1;
+ unsigned int *shaderBases, *partition2;
+
+ /* compute vertex, pixel and shared instruction shadow GPU addresses */
+ tmp_ctx.shader_vertex = drawctxt->gpustate.gpuaddr + SHADER_OFFSET;
+ tmp_ctx.shader_pixel = tmp_ctx.shader_vertex
+ + _shader_shadow_size(adreno_dev);
+ tmp_ctx.shader_shared = tmp_ctx.shader_pixel
+ + _shader_shadow_size(adreno_dev);
+
+ /* restore shader partitioning and instructions */
+
+ restore = cmd; /* start address */
+
+ /* Invalidate Vertex & Pixel instruction code address and sizes */
+ *cmd++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
+ *cmd++ = 0x00000300; /* 0x100 = Vertex, 0x200 = Pixel */
+
+ /* Restore previous shader vertex & pixel instruction bases. */
+ *cmd++ = cp_type3_packet(CP_SET_SHADER_BASES, 1);
+ shaderBases = cmd++; /* TBD #5: shader bases (from fixup) */
+
+ /* write the shader partition information to a scratch register */
+ *cmd++ = cp_type0_packet(REG_SQ_INST_STORE_MANAGMENT, 1);
+ partition1 = cmd++; /* TBD #4a: partition info (from save) */
+
+ /* load vertex shader instructions from the shadow. */
+ *cmd++ = cp_type3_packet(CP_IM_LOAD, 2);
+ *cmd++ = tmp_ctx.shader_vertex + 0x0; /* 0x0 = Vertex */
+ startSizeVtx = cmd++; /* TBD #1: start/size (from save) */
+
+ /* load pixel shader instructions from the shadow. */
+ *cmd++ = cp_type3_packet(CP_IM_LOAD, 2);
+ *cmd++ = tmp_ctx.shader_pixel + 0x1; /* 0x1 = Pixel */
+ startSizePix = cmd++; /* TBD #2: start/size (from save) */
+
+ /* load shared shader instructions from the shadow. */
+ *cmd++ = cp_type3_packet(CP_IM_LOAD, 2);
+ *cmd++ = tmp_ctx.shader_shared + 0x2; /* 0x2 = Shared */
+ startSizeShared = cmd++; /* TBD #3: start/size (from save) */
+
+ /* create indirect buffer command for above command sequence */
+ create_ib1(drawctxt, drawctxt->shader_restore, restore, cmd);
+
+ /*
+ * fixup SET_SHADER_BASES data
+ *
+ * since self-modifying PM4 code is being used here, a seperate
+ * command buffer is used for this fixup operation, to ensure the
+ * commands are not read by the PM4 engine before the data fields
+ * have been written.
+ */
+
+ fixup = cmd; /* start address */
+
+ /* write the shader partition information to a scratch register */
+ *cmd++ = cp_type0_packet(REG_SCRATCH_REG2, 1);
+ partition2 = cmd++; /* TBD #4b: partition info (from save) */
+
+ /* mask off unused bits, then OR with shader instruction memory size */
+ *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+ *cmd++ = REG_SCRATCH_REG2;
+ /* AND off invalid bits. */
+ *cmd++ = 0x0FFF0FFF;
+ /* OR in instruction memory size. */
+ *cmd++ = adreno_encode_istore_size(adreno_dev);
+
+ /* write the computed value to the SET_SHADER_BASES data field */
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = REG_SCRATCH_REG2;
+ /* TBD #5: shader bases (to restore) */
+ *cmd++ = virt2gpu(shaderBases, &drawctxt->gpustate);
+
+ /* create indirect buffer command for above command sequence */
+ create_ib1(drawctxt, drawctxt->shader_fixup, fixup, cmd);
+
+ /* save shader partitioning and instructions */
+
+ save = cmd; /* start address */
+
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmd++ = 0;
+
+ /* fetch the SQ_INST_STORE_MANAGMENT register value,
+ * store the value in the data fields of the SET_CONSTANT commands
+ * above.
+ */
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = REG_SQ_INST_STORE_MANAGMENT;
+ /* TBD #4a: partition info (to restore) */
+ *cmd++ = virt2gpu(partition1, &drawctxt->gpustate);
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = REG_SQ_INST_STORE_MANAGMENT;
+ /* TBD #4b: partition info (to fixup) */
+ *cmd++ = virt2gpu(partition2, &drawctxt->gpustate);
+
+
+ /* store the vertex shader instructions */
+ *cmd++ = cp_type3_packet(CP_IM_STORE, 2);
+ *cmd++ = tmp_ctx.shader_vertex + 0x0; /* 0x0 = Vertex */
+ /* TBD #1: start/size (to restore) */
+ *cmd++ = virt2gpu(startSizeVtx, &drawctxt->gpustate);
+
+ /* store the pixel shader instructions */
+ *cmd++ = cp_type3_packet(CP_IM_STORE, 2);
+ *cmd++ = tmp_ctx.shader_pixel + 0x1; /* 0x1 = Pixel */
+ /* TBD #2: start/size (to restore) */
+ *cmd++ = virt2gpu(startSizePix, &drawctxt->gpustate);
+
+ /* store the shared shader instructions if vertex base is nonzero */
+
+ *cmd++ = cp_type3_packet(CP_IM_STORE, 2);
+ *cmd++ = tmp_ctx.shader_shared + 0x2; /* 0x2 = Shared */
+ /* TBD #3: start/size (to restore) */
+ *cmd++ = virt2gpu(startSizeShared, &drawctxt->gpustate);
+
+
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmd++ = 0;
+
+ /* create indirect buffer command for above command sequence */
+ create_ib1(drawctxt, drawctxt->shader_save, save, cmd);
+
+ tmp_ctx.cmd = cmd;
+}
+
+/* create buffers for saving/restoring registers, constants, & GMEM */
+static int a2xx_create_gpustate_shadow(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ drawctxt->flags |= CTXT_FLAGS_STATE_SHADOW;
+
+ /* build indirect command buffers to save & restore regs/constants */
+ build_regrestore_cmds(adreno_dev, drawctxt);
+ build_regsave_cmds(adreno_dev, drawctxt);
+
+ build_shader_save_restore_cmds(adreno_dev, drawctxt);
+
+ return 0;
+}
+
+/* create buffers for saving/restoring registers, constants, & GMEM */
+static int a2xx_create_gmem_shadow(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ int result;
+
+ calc_gmemsize(&drawctxt->context_gmem_shadow, adreno_dev->gmem_size);
+ tmp_ctx.gmem_base = adreno_dev->gmem_base;
+
+ result = kgsl_allocate(&drawctxt->context_gmem_shadow.gmemshadow,
+ drawctxt->base.proc_priv->pagetable,
+ drawctxt->context_gmem_shadow.size);
+
+ if (result)
+ return result;
+
+ /* set the gmem shadow flag for the context */
+ drawctxt->flags |= CTXT_FLAGS_GMEM_SHADOW;
+
+ /* blank out gmem shadow. */
+ kgsl_sharedmem_set(drawctxt->base.device,
+ &drawctxt->context_gmem_shadow.gmemshadow, 0, 0,
+ drawctxt->context_gmem_shadow.size);
+
+ /* build quad vertex buffer */
+ build_quad_vtxbuff(drawctxt, &drawctxt->context_gmem_shadow,
+ &tmp_ctx.cmd);
+
+ /* build TP0_CHICKEN register restore command buffer */
+ if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE))
+ tmp_ctx.cmd = build_chicken_restore_cmds(drawctxt);
+
+ /* build indirect command buffers to save & restore gmem */
+ drawctxt->context_gmem_shadow.gmem_save_commands = tmp_ctx.cmd;
+ tmp_ctx.cmd =
+ build_gmem2sys_cmds(adreno_dev, drawctxt,
+ &drawctxt->context_gmem_shadow);
+ drawctxt->context_gmem_shadow.gmem_restore_commands = tmp_ctx.cmd;
+ tmp_ctx.cmd =
+ build_sys2gmem_cmds(adreno_dev, drawctxt,
+ &drawctxt->context_gmem_shadow);
+
+ kgsl_cache_range_op(&drawctxt->context_gmem_shadow.gmemshadow,
+ KGSL_CACHE_OP_FLUSH);
+
+ kgsl_cffdump_syncmem(drawctxt->base.device,
+ &drawctxt->context_gmem_shadow.gmemshadow,
+ drawctxt->context_gmem_shadow.gmemshadow.gpuaddr,
+ drawctxt->context_gmem_shadow.gmemshadow.size, false);
+
+ return 0;
+}
+
+static int a2xx_drawctxt_create(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ int ret;
+
+ /*
+ * Allocate memory for the GPU state and the context commands.
+ * Despite the name, this is much more then just storage for
+ * the gpustate. This contains command space for gmem save
+ * and texture and vertex buffer storage too
+ */
+
+ ret = kgsl_allocate(&drawctxt->gpustate,
+ drawctxt->base.proc_priv->pagetable, _context_size(adreno_dev));
+
+ if (ret)
+ return ret;
+
+ kgsl_sharedmem_set(drawctxt->base.device, &drawctxt->gpustate,
+ 0, 0, _context_size(adreno_dev));
+
+ tmp_ctx.cmd = tmp_ctx.start
+ = (unsigned int *)((char *)drawctxt->gpustate.hostptr + CMD_OFFSET);
+
+ if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) {
+ ret = a2xx_create_gpustate_shadow(adreno_dev, drawctxt);
+ if (ret)
+ goto done;
+
+ drawctxt->flags |= CTXT_FLAGS_SHADER_SAVE;
+ }
+
+ if (!(drawctxt->flags & CTXT_FLAGS_NOGMEMALLOC)) {
+ ret = a2xx_create_gmem_shadow(adreno_dev, drawctxt);
+ if (ret)
+ goto done;
+ }
+
+ /* Flush and sync the gpustate memory */
+
+ kgsl_cache_range_op(&drawctxt->gpustate,
+ KGSL_CACHE_OP_FLUSH);
+
+ kgsl_cffdump_syncmem(drawctxt->base.device,
+ &drawctxt->gpustate, drawctxt->gpustate.gpuaddr,
+ drawctxt->gpustate.size, false);
+
+done:
+ if (ret)
+ kgsl_sharedmem_free(&drawctxt->gpustate);
+
+ return ret;
+}
+
+static int a2xx_drawctxt_draw_workaround(struct adreno_device *adreno_dev,
+ struct adreno_context *context)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ unsigned int cmd[11];
+ unsigned int *cmds = &cmd[0];
+
+ if (adreno_is_a225(adreno_dev)) {
+ adreno_dev->gpudev->ctx_switches_since_last_draw++;
+ /* If there have been > than
+ * ADRENO_NUM_CTX_SWITCH_ALLOWED_BEFORE_DRAW calls to context
+ * switches w/o gmem being saved then we need to execute
+ * this workaround */
+ if (adreno_dev->gpudev->ctx_switches_since_last_draw >
+ ADRENO_NUM_CTX_SWITCH_ALLOWED_BEFORE_DRAW)
+ adreno_dev->gpudev->ctx_switches_since_last_draw = 0;
+ else
+ return 0;
+ /*
+ * Issue an empty draw call to avoid possible hangs due to
+ * repeated idles without intervening draw calls.
+ * On adreno 225 the PC block has a cache that is only
+ * flushed on draw calls and repeated idles can make it
+ * overflow. The gmem save path contains draw calls so
+ * this workaround isn't needed there.
+ */
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = (0x4 << 16) | (REG_PA_SU_SC_MODE_CNTL - 0x2000);
+ *cmds++ = 0;
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX, 5);
+ *cmds++ = 0;
+ *cmds++ = 1<<14;
+ *cmds++ = 0;
+ *cmds++ = device->mmu.setstate_memory.gpuaddr;
+ *cmds++ = 0;
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+ } else {
+ /* On Adreno 20x/220, if the events for shader space reuse
+ * gets dropped, the CP block would wait indefinitely.
+ * Sending CP_SET_SHADER_BASES packet unblocks the CP from
+ * this wait.
+ */
+ *cmds++ = cp_type3_packet(CP_SET_SHADER_BASES, 1);
+ *cmds++ = adreno_encode_istore_size(adreno_dev)
+ | adreno_dev->pix_shader_start;
+ }
+
+ return adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_PMODE, &cmd[0], cmds - cmd);
+}
+
+static int a2xx_drawctxt_save(struct adreno_device *adreno_dev,
+ struct adreno_context *context)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ int ret;
+
+ if (context == NULL || (context->flags & CTXT_FLAGS_BEING_DESTROYED))
+ return 0;
+
+ if (context->state == ADRENO_CONTEXT_STATE_INVALID)
+ return 0;
+
+ if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
+ kgsl_cffdump_syncmem(context->base.device, &context->gpustate,
+ context->reg_save[1],
+ context->reg_save[2] << 2, true);
+ /* save registers and constants. */
+ ret = adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
+ context->reg_save, 3);
+
+ if (ret)
+ return ret;
+
+ if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
+ kgsl_cffdump_syncmem(context->base.device,
+ &context->gpustate,
+ context->shader_save[1],
+ context->shader_save[2] << 2, true);
+ /* save shader partitioning and instructions. */
+ ret = adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_PMODE,
+ context->shader_save, 3);
+
+ kgsl_cffdump_syncmem(context->base.device,
+ &context->gpustate,
+ context->shader_fixup[1],
+ context->shader_fixup[2] << 2, true);
+ /*
+ * fixup shader partitioning parameter for
+ * SET_SHADER_BASES.
+ */
+ ret = adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
+ context->shader_fixup, 3);
+
+ if (ret)
+ return ret;
+
+ context->flags |= CTXT_FLAGS_SHADER_RESTORE;
+ }
+ }
+
+ if ((context->flags & CTXT_FLAGS_GMEM_SAVE) &&
+ (context->flags & CTXT_FLAGS_GMEM_SHADOW)) {
+ kgsl_cffdump_syncmem(context->base.device, &context->gpustate,
+ context->context_gmem_shadow.gmem_save[1],
+ context->context_gmem_shadow.gmem_save[2] << 2, true);
+ /* save gmem.
+ * (note: changes shader. shader must already be saved.)
+ */
+ ret = adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_PMODE,
+ context->context_gmem_shadow.gmem_save, 3);
+
+ if (ret)
+ return ret;
+ kgsl_cffdump_syncmem(context->base.device, &context->gpustate,
+ context->chicken_restore[1],
+ context->chicken_restore[2] << 2, true);
+
+ /* Restore TP0_CHICKEN */
+ if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
+ ret = adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
+ context->chicken_restore, 3);
+
+ if (ret)
+ return ret;
+ }
+ adreno_dev->gpudev->ctx_switches_since_last_draw = 0;
+
+ context->flags |= CTXT_FLAGS_GMEM_RESTORE;
+ } else if (adreno_is_a2xx(adreno_dev))
+ return a2xx_drawctxt_draw_workaround(adreno_dev, context);
+
+ return 0;
+}
+
+static int a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
+ struct adreno_context *context)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ unsigned int cmds[5];
+ int ret = 0;
+
+ if (context == NULL) {
+ /* No context - set the default pagetable and thats it */
+ unsigned int id;
+ /*
+ * If there isn't a current context, the kgsl_mmu_setstate
+ * will use the CPU path so we don't need to give
+ * it a valid context id.
+ */
+ id = (adreno_dev->drawctxt_active != NULL)
+ ? adreno_dev->drawctxt_active->base.id
+ : KGSL_CONTEXT_INVALID;
+ kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
+ id);
+ return 0;
+ }
+
+ cmds[0] = cp_nop_packet(1);
+ cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
+ cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2);
+ cmds[3] = device->memstore.gpuaddr +
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
+ cmds[4] = context->base.id;
+ ret = adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
+ cmds, 5);
+ if (ret)
+ return ret;
+
+ kgsl_mmu_setstate(&device->mmu, context->base.proc_priv->pagetable,
+ context->base.id);
+
+ /* restore gmem.
+ * (note: changes shader. shader must not already be restored.)
+ */
+ if (context->flags & CTXT_FLAGS_GMEM_RESTORE) {
+ kgsl_cffdump_syncmem(context->base.device, &context->gpustate,
+ context->context_gmem_shadow.gmem_restore[1],
+ context->context_gmem_shadow.gmem_restore[2] << 2,
+ true);
+
+ ret = adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_PMODE,
+ context->context_gmem_shadow.gmem_restore, 3);
+ if (ret)
+ return ret;
+
+ if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
+ kgsl_cffdump_syncmem(context->base.device,
+ &context->gpustate,
+ context->chicken_restore[1],
+ context->chicken_restore[2] << 2, true);
+
+ /* Restore TP0_CHICKEN */
+ ret = adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
+ context->chicken_restore, 3);
+ if (ret)
+ return ret;
+ }
+
+ context->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
+ }
+
+ if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
+ kgsl_cffdump_syncmem(context->base.device, &context->gpustate,
+ context->reg_restore[1],
+ context->reg_restore[2] << 2, true);
+
+ /* restore registers and constants. */
+ ret = adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);
+ if (ret)
+ return ret;
+
+ /* restore shader instructions & partitioning. */
+ if (context->flags & CTXT_FLAGS_SHADER_RESTORE) {
+ kgsl_cffdump_syncmem(context->base.device,
+ &context->gpustate,
+ context->shader_restore[1],
+ context->shader_restore[2] << 2, true);
+
+ ret = adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
+ context->shader_restore, 3);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (adreno_is_a20x(adreno_dev)) {
+ cmds[0] = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1);
+ cmds[1] = context->bin_base_offset;
+ ret = adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE, cmds, 2);
+ }
+
+ return ret;
+}
+
+/*
+ * Interrupt management
+ *
+ * a2xx interrupt control is distributed among the various
+ * hardware components (RB, CP, MMU). The main interrupt
+ * tells us which component fired the interrupt, but one needs
+ * to go to the individual component to find out why. The
+ * following functions provide the broken out support for
+ * managing the interrupts
+ */
+
+#define RBBM_INT_MASK RBBM_INT_CNTL__RDERR_INT_MASK
+
+#define CP_INT_MASK \
+ (CP_INT_CNTL__T0_PACKET_IN_IB_MASK | \
+ CP_INT_CNTL__OPCODE_ERROR_MASK | \
+ CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK | \
+ CP_INT_CNTL__RESERVED_BIT_ERROR_MASK | \
+ CP_INT_CNTL__IB_ERROR_MASK | \
+ CP_INT_CNTL__IB1_INT_MASK | \
+ CP_INT_CNTL__RB_INT_MASK)
+
+#define VALID_STATUS_COUNT_MAX 10
+
+static struct {
+ unsigned int mask;
+ const char *message;
+} kgsl_cp_error_irqs[] = {
+ { CP_INT_CNTL__T0_PACKET_IN_IB_MASK,
+ "ringbuffer TO packet in IB interrupt" },
+ { CP_INT_CNTL__OPCODE_ERROR_MASK,
+ "ringbuffer opcode error interrupt" },
+ { CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK,
+ "ringbuffer protected mode error interrupt" },
+ { CP_INT_CNTL__RESERVED_BIT_ERROR_MASK,
+ "ringbuffer reserved bit error interrupt" },
+ { CP_INT_CNTL__IB_ERROR_MASK,
+ "ringbuffer IB error interrupt" },
+};
+
+static void a2xx_cp_intrcallback(struct kgsl_device *device)
+{
+ unsigned int status = 0, num_reads = 0, master_status = 0;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ int i;
+
+ kgsl_regread(device, REG_MASTER_INT_SIGNAL, &master_status);
+ while (!status && (num_reads < VALID_STATUS_COUNT_MAX) &&
+ (master_status & MASTER_INT_SIGNAL__CP_INT_STAT)) {
+ kgsl_regread(device, REG_CP_INT_STATUS, &status);
+ kgsl_regread(device, REG_MASTER_INT_SIGNAL,
+ &master_status);
+ num_reads++;
+ }
+ if (num_reads > 1)
+ KGSL_DRV_WARN(device,
+ "Looped %d times to read REG_CP_INT_STATUS\n",
+ num_reads);
+
+ trace_kgsl_a2xx_irq_status(device, master_status, status);
+
+ if (!status) {
+ if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) {
+ /*
+ * This indicates that we could not read CP_INT_STAT.
+ * As a precaution schedule the dispatcher to check
+ * things out. Since we did not ack any interrupts this
+ * interrupt will be generated again
+ */
+ KGSL_DRV_WARN(device, "Unable to read CP_INT_STATUS\n");
+ adreno_dispatcher_schedule(device);
+ } else
+ KGSL_DRV_WARN(device, "Spurious interrput detected\n");
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(kgsl_cp_error_irqs); i++) {
+ if (status & kgsl_cp_error_irqs[i].mask) {
+ KGSL_CMD_CRIT(rb->device, "%s\n",
+ kgsl_cp_error_irqs[i].message);
+ /*
+ * on fatal errors, turn off the interrupts to
+ * avoid storming. This has the side effect of
+ * forcing a PM dump when the timestamp times out
+ */
+
+ kgsl_pwrctrl_irq(rb->device, KGSL_PWRFLAGS_OFF);
+ }
+ }
+
+ /* only ack bits we understand */
+ status &= CP_INT_MASK;
+ kgsl_regwrite(device, REG_CP_INT_ACK, status);
+
+ if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) {
+ queue_work(device->work_queue, &device->ts_expired_ws);
+ adreno_dispatcher_schedule(device);
+ }
+}
+
+static void a2xx_rbbm_intrcallback(struct kgsl_device *device)
+{
+ unsigned int status = 0;
+ unsigned int rderr = 0;
+ unsigned int addr = 0;
+ const char *source;
+
+ kgsl_regread(device, REG_RBBM_INT_STATUS, &status);
+
+ if (status & RBBM_INT_CNTL__RDERR_INT_MASK) {
+ kgsl_regread(device, REG_RBBM_READ_ERROR, &rderr);
+ source = (rderr & RBBM_READ_ERROR_REQUESTER)
+ ? "host" : "cp";
+ /* convert to dword address */
+ addr = (rderr & RBBM_READ_ERROR_ADDRESS_MASK) >> 2;
+
+ /*
+ * Log CP_INT_STATUS interrupts from the CP at a
+ * lower level because they can happen frequently
+ * and are worked around in a2xx_irq_handler.
+ */
+ if (addr == REG_CP_INT_STATUS &&
+ rderr & RBBM_READ_ERROR_ERROR &&
+ rderr & RBBM_READ_ERROR_REQUESTER)
+ KGSL_DRV_WARN(device,
+ "rbbm read error interrupt: %s reg: %04X\n",
+ source, addr);
+ else
+ KGSL_DRV_CRIT(device,
+ "rbbm read error interrupt: %s reg: %04X\n",
+ source, addr);
+ }
+
+ status &= RBBM_INT_MASK;
+ kgsl_regwrite(device, REG_RBBM_INT_ACK, status);
+}
+
+irqreturn_t a2xx_irq_handler(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ irqreturn_t result = IRQ_NONE;
+ unsigned int status;
+
+ kgsl_regread(device, REG_MASTER_INT_SIGNAL, &status);
+
+ if (status & MASTER_INT_SIGNAL__MH_INT_STAT) {
+ kgsl_mh_intrcallback(device);
+ result = IRQ_HANDLED;
+ }
+
+ if (status & MASTER_INT_SIGNAL__CP_INT_STAT) {
+ a2xx_cp_intrcallback(device);
+ result = IRQ_HANDLED;
+ }
+
+ if (status & MASTER_INT_SIGNAL__RBBM_INT_STAT) {
+ a2xx_rbbm_intrcallback(device);
+ result = IRQ_HANDLED;
+ }
+
+ return result;
+}
+
+static void a2xx_irq_control(struct adreno_device *adreno_dev, int state)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ if (state) {
+ kgsl_regwrite(device, REG_RBBM_INT_CNTL, RBBM_INT_MASK);
+ kgsl_regwrite(device, REG_CP_INT_CNTL, CP_INT_MASK);
+ kgsl_regwrite(device, MH_INTERRUPT_MASK,
+ kgsl_mmu_get_int_mask());
+ } else {
+ kgsl_regwrite(device, REG_RBBM_INT_CNTL, 0);
+ kgsl_regwrite(device, REG_CP_INT_CNTL, 0);
+ kgsl_regwrite(device, MH_INTERRUPT_MASK, 0);
+ }
+
+ /* Force the writes to post before touching the IRQ line */
+ wmb();
+}
+
+static unsigned int a2xx_irq_pending(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ unsigned int status;
+
+ kgsl_regread(device, REG_MASTER_INT_SIGNAL, &status);
+
+ return (status &
+ (MASTER_INT_SIGNAL__MH_INT_STAT |
+ MASTER_INT_SIGNAL__CP_INT_STAT |
+ MASTER_INT_SIGNAL__RBBM_INT_STAT)) ? 1 : 0;
+}
+
+static int a2xx_rb_init(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb)
+{
+ unsigned int *cmds, cmds_gpu;
+
+ /* ME_INIT */
+ cmds = adreno_ringbuffer_allocspace(rb, NULL, 19);
+ if (cmds == NULL)
+ return -ENOMEM;
+
+ cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19);
+
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, cp_type3_packet(CP_ME_INIT,
+ 18));
+ /* All fields present (bits 9:0) */
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x000003ff);
+ /* Disable/Enable Real-Time Stream processing (present but ignored) */
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x00000000);
+ /* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x00000000);
+
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu,
+ SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO));
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu,
+ SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET));
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu,
+ SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX));
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu,
+ SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL));
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu,
+ SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL));
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu,
+ SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE));
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu,
+ SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL));
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu,
+ SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE));
+
+ /* Instruction memory size: */
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu,
+ (adreno_encode_istore_size(adreno_dev)
+ | adreno_dev->pix_shader_start));
+ /* Maximum Contexts */
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x00000001);
+ /* Write Confirm Interval and The CP will wait the
+ * wait_interval * 16 clocks between polling */
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x00000000);
+
+ /* NQ and External Memory Swap */
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x00000000);
+ /* Protected mode error checking
+ * If iommu is used then protection needs to be turned off
+ * to enable context bank switching */
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0);
+ else
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu,
+ GSL_RB_PROTECTED_MODE_CONTROL);
+ /* Disable header dumping and Header dump address */
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x00000000);
+ /* Header dump size */
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x00000000);
+
+ adreno_ringbuffer_submit(rb);
+
+ return 0;
+}
+
+static unsigned int a2xx_busy_cycles(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ unsigned int reg, val;
+
+ /* Freeze the counter */
+ kgsl_regwrite(device, REG_CP_PERFMON_CNTL,
+ REG_PERF_MODE_CNT | REG_PERF_STATE_FREEZE);
+
+ /* Get the value */
+ kgsl_regread(device, REG_RBBM_PERFCOUNTER1_LO, &val);
+
+ /* Reset the counter */
+ kgsl_regwrite(device, REG_CP_PERFMON_CNTL,
+ REG_PERF_MODE_CNT | REG_PERF_STATE_RESET);
+
+ /* Re-Enable the performance monitors */
+ kgsl_regread(device, REG_RBBM_PM_OVERRIDE2, ®);
+ kgsl_regwrite(device, REG_RBBM_PM_OVERRIDE2, (reg | 0x40));
+ kgsl_regwrite(device, REG_RBBM_PERFCOUNTER1_SELECT, 0x1);
+ kgsl_regwrite(device, REG_CP_PERFMON_CNTL,
+ REG_PERF_MODE_CNT | REG_PERF_STATE_ENABLE);
+
+ return val;
+}
+
+static void a2xx_gmeminit(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ union reg_rb_edram_info rb_edram_info;
+ unsigned int gmem_size;
+ unsigned int edram_value = 0;
+
+ /* get edram_size value equivalent */
+ gmem_size = (adreno_dev->gmem_size >> 14);
+ while (gmem_size >>= 1)
+ edram_value++;
+
+ rb_edram_info.val = 0;
+
+ rb_edram_info.f.edram_size = edram_value;
+ rb_edram_info.f.edram_mapping_mode = 0; /* EDRAM_MAP_UPPER */
+
+ /* must be aligned to size */
+ rb_edram_info.f.edram_range = (adreno_dev->gmem_base >> 14);
+
+ kgsl_regwrite(device, REG_RB_EDRAM_INFO, rb_edram_info.val);
+}
+
+static void a2xx_start(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ /*
+ * We need to make sure all blocks are powered up and clocked
+ * before issuing a soft reset. The overrides will then be
+ * turned off (set to 0)
+ */
+ kgsl_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0xfffffffe);
+ kgsl_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0xffffffff);
+
+ /*
+ * Only reset CP block if all blocks have previously been
+ * reset
+ */
+ if (!(device->flags & KGSL_FLAGS_SOFT_RESET) ||
+ !adreno_is_a22x(adreno_dev)) {
+ kgsl_regwrite(device, REG_RBBM_SOFT_RESET,
+ 0xFFFFFFFF);
+ device->flags |= KGSL_FLAGS_SOFT_RESET;
+ } else {
+ kgsl_regwrite(device, REG_RBBM_SOFT_RESET,
+ 0x00000001);
+ }
+ /*
+ * The core is in an indeterminate state until the reset
+ * completes after 30ms.
+ */
+ msleep(30);
+
+ kgsl_regwrite(device, REG_RBBM_SOFT_RESET, 0x00000000);
+
+ if (adreno_is_a225(adreno_dev)) {
+ /* Enable large instruction store for A225 */
+ kgsl_regwrite(device, REG_SQ_FLOW_CONTROL,
+ 0x18000000);
+ }
+
+ if (adreno_is_a20x(adreno_dev))
+ /* For A20X based targets increase number of clocks
+ * that RBBM will wait before de-asserting Register
+ * Clock Active signal */
+ kgsl_regwrite(device, REG_RBBM_CNTL, 0x0000FFFF);
+ else
+ kgsl_regwrite(device, REG_RBBM_CNTL, 0x00004442);
+
+ kgsl_regwrite(device, REG_SQ_VS_PROGRAM, 0x00000000);
+ kgsl_regwrite(device, REG_SQ_PS_PROGRAM, 0x00000000);
+
+ if (cpu_is_msm8960())
+ kgsl_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0x200);
+ else
+ kgsl_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0);
+
+ if (!adreno_is_a22x(adreno_dev))
+ kgsl_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0);
+ else
+ kgsl_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0x80);
+
+ kgsl_regwrite(device, REG_RBBM_DEBUG, 0x00080000);
+
+ /* Make sure interrupts are disabled */
+ kgsl_regwrite(device, REG_RBBM_INT_CNTL, 0);
+ kgsl_regwrite(device, REG_CP_INT_CNTL, 0);
+ kgsl_regwrite(device, REG_SQ_INT_CNTL, 0);
+
+ a2xx_gmeminit(adreno_dev);
+}
+
+static void a2xx_postmortem_dump(struct adreno_device *adreno_dev)
+{
+ unsigned int r1, r2, r3, rbbm_status;
+ unsigned int cp_stat, rb_count;
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ kgsl_regread(device, REG_RBBM_STATUS, &rbbm_status);
+
+ kgsl_regread(device, REG_RBBM_PM_OVERRIDE1, &r2);
+ kgsl_regread(device, REG_RBBM_PM_OVERRIDE2, &r3);
+ KGSL_LOG_DUMP(device,
+ "RBBM: STATUS = %08X | PM_OVERRIDE1 = %08X | PM_OVERRIDE2 = %08X\n",
+ rbbm_status, r2, r3);
+
+ kgsl_regread(device, REG_RBBM_INT_CNTL, &r1);
+ kgsl_regread(device, REG_RBBM_INT_STATUS, &r2);
+ kgsl_regread(device, REG_RBBM_READ_ERROR, &r3);
+ KGSL_LOG_DUMP(device,
+ "INT_CNTL = %08X | INT_STATUS = %08X | READ_ERROR = %08X\n",
+ r1, r2, r3);
+
+ {
+ char cmdFifo[16];
+ struct log_field lines[] = {
+ {rbbm_status & 0x001F, cmdFifo},
+ {rbbm_status & BIT(5), "TC busy "},
+ {rbbm_status & BIT(8), "HIRQ pending"},
+ {rbbm_status & BIT(9), "CPRQ pending"},
+ {rbbm_status & BIT(10), "CFRQ pending"},
+ {rbbm_status & BIT(11), "PFRQ pending"},
+ {rbbm_status & BIT(12), "VGT 0DMA bsy"},
+ {rbbm_status & BIT(14), "RBBM WU busy"},
+ {rbbm_status & BIT(16), "CP NRT busy "},
+ {rbbm_status & BIT(18), "MH busy "},
+ {rbbm_status & BIT(19), "MH chncy bsy"},
+ {rbbm_status & BIT(21), "SX busy "},
+ {rbbm_status & BIT(22), "TPC busy "},
+ {rbbm_status & BIT(24), "SC CNTX busy"},
+ {rbbm_status & BIT(25), "PA busy "},
+ {rbbm_status & BIT(26), "VGT busy "},
+ {rbbm_status & BIT(27), "SQ cntx1 bsy"},
+ {rbbm_status & BIT(28), "SQ cntx0 bsy"},
+ {rbbm_status & BIT(30), "RB busy "},
+ {rbbm_status & BIT(31), "Grphs pp bsy"},
+ };
+ snprintf(cmdFifo, sizeof(cmdFifo), "CMD FIFO=%01X ",
+ rbbm_status & 0xf);
+ adreno_dump_fields(device, " STATUS=", lines,
+ ARRAY_SIZE(lines));
+ }
+
+ kgsl_regread(device, REG_CP_RB_BASE, &r1);
+ kgsl_regread(device, REG_CP_RB_CNTL, &r2);
+ rb_count = 2 << (r2 & (BIT(6)-1));
+ kgsl_regread(device, REG_CP_RB_RPTR_ADDR, &r3);
+ KGSL_LOG_DUMP(device,
+ " RPTR = %08X | WPTR = %08X | RPTR_WR = %08X"
+ "\n", r1, r2, r3);
+
+ kgsl_regread(device, REG_CP_IB1_BASE, &r1);
+ kgsl_regread(device, REG_CP_IB1_BUFSZ, &r2);
+ KGSL_LOG_DUMP(device, "CP_IB1: BASE = %08X | BUFSZ = %d\n", r1, r2);
+
+ kgsl_regread(device, REG_CP_IB2_BASE, &r1);
+ kgsl_regread(device, REG_CP_IB2_BUFSZ, &r2);
+ KGSL_LOG_DUMP(device, "CP_IB2: BASE = %08X | BUFSZ = %d\n", r1, r2);
+
+ kgsl_regread(device, REG_CP_INT_CNTL, &r1);
+ kgsl_regread(device, REG_CP_INT_STATUS, &r2);
+ KGSL_LOG_DUMP(device, "CP_INT: CNTL = %08X | STATUS = %08X\n", r1, r2);
+
+ kgsl_regread(device, REG_CP_ME_CNTL, &r1);
+ kgsl_regread(device, REG_CP_ME_STATUS, &r2);
+ kgsl_regread(device, REG_MASTER_INT_SIGNAL, &r3);
+ KGSL_LOG_DUMP(device,
+ "CP_ME: CNTL = %08X | STATUS = %08X | MSTR_INT_SGNL = "
+ "%08X\n", r1, r2, r3);
+
+ kgsl_regread(device, REG_CP_STAT, &cp_stat);
+ KGSL_LOG_DUMP(device, "CP_STAT = %08X\n", cp_stat);
+#ifndef CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL
+ {
+ struct log_field lns[] = {
+ {cp_stat & BIT(0), "WR_BSY 0"},
+ {cp_stat & BIT(1), "RD_RQ_BSY 1"},
+ {cp_stat & BIT(2), "RD_RTN_BSY 2"},
+ };
+ adreno_dump_fields(device, " MIU=", lns, ARRAY_SIZE(lns));
+ }
+ {
+ struct log_field lns[] = {
+ {cp_stat & BIT(5), "RING_BUSY 5"},
+ {cp_stat & BIT(6), "NDRCTS_BSY 6"},
+ {cp_stat & BIT(7), "NDRCT2_BSY 7"},
+ {cp_stat & BIT(9), "ST_BUSY 9"},
+ {cp_stat & BIT(10), "BUSY 10"},
+ };
+ adreno_dump_fields(device, " CSF=", lns, ARRAY_SIZE(lns));
+ }
+ {
+ struct log_field lns[] = {
+ {cp_stat & BIT(11), "RNG_Q_BSY 11"},
+ {cp_stat & BIT(12), "NDRCTS_Q_B12"},
+ {cp_stat & BIT(13), "NDRCT2_Q_B13"},
+ {cp_stat & BIT(16), "ST_QUEUE_B16"},
+ {cp_stat & BIT(17), "PFP_BUSY 17"},
+ };
+ adreno_dump_fields(device, " RING=", lns, ARRAY_SIZE(lns));
+ }
+ {
+ struct log_field lns[] = {
+ {cp_stat & BIT(3), "RBIU_BUSY 3"},
+ {cp_stat & BIT(4), "RCIU_BUSY 4"},
+ {cp_stat & BIT(18), "MQ_RG_BSY 18"},
+ {cp_stat & BIT(19), "MQ_NDRS_BS19"},
+ {cp_stat & BIT(20), "MQ_NDR2_BS20"},
+ {cp_stat & BIT(21), "MIU_WC_STL21"},
+ {cp_stat & BIT(22), "CP_NRT_BSY22"},
+ {cp_stat & BIT(23), "3D_BUSY 23"},
+ {cp_stat & BIT(26), "ME_BUSY 26"},
+ {cp_stat & BIT(29), "ME_WC_BSY 29"},
+ {cp_stat & BIT(30), "MIU_FF EM 30"},
+ {cp_stat & BIT(31), "CP_BUSY 31"},
+ };
+ adreno_dump_fields(device, " CP_STT=", lns, ARRAY_SIZE(lns));
+ }
+#endif
+
+ kgsl_regread(device, REG_SCRATCH_REG0, &r1);
+ KGSL_LOG_DUMP(device, "SCRATCH_REG0 = %08X\n", r1);
+
+ kgsl_regread(device, REG_COHER_SIZE_PM4, &r1);
+ kgsl_regread(device, REG_COHER_BASE_PM4, &r2);
+ kgsl_regread(device, REG_COHER_STATUS_PM4, &r3);
+ KGSL_LOG_DUMP(device,
+ "COHER: SIZE_PM4 = %08X | BASE_PM4 = %08X | STATUS_PM4"
+ " = %08X\n", r1, r2, r3);
+
+ kgsl_regread(device, MH_AXI_ERROR, &r1);
+ KGSL_LOG_DUMP(device, "MH: AXI_ERROR = %08X\n", r1);
+
+ kgsl_regread(device, MH_MMU_PAGE_FAULT, &r1);
+ kgsl_regread(device, MH_MMU_CONFIG, &r2);
+ kgsl_regread(device, MH_MMU_MPU_BASE, &r3);
+ KGSL_LOG_DUMP(device,
+ "MH_MMU: PAGE_FAULT = %08X | CONFIG = %08X | MPU_BASE ="
+ " %08X\n", r1, r2, r3);
+
+ kgsl_regread(device, MH_MMU_MPU_END, &r1);
+ kgsl_regread(device, MH_MMU_VA_RANGE, &r2);
+ r3 = kgsl_mmu_get_current_ptbase(&device->mmu);
+ KGSL_LOG_DUMP(device,
+ " MPU_END = %08X | VA_RANGE = %08X | PT_BASE ="
+ " %08X\n", r1, r2, r3);
+
+ KGSL_LOG_DUMP(device, "PAGETABLE SIZE: %08X ",
+ kgsl_mmu_get_ptsize(&device->mmu));
+
+ kgsl_regread(device, MH_MMU_TRAN_ERROR, &r1);
+ KGSL_LOG_DUMP(device, " TRAN_ERROR = %08X\n", r1);
+
+ kgsl_regread(device, MH_INTERRUPT_MASK, &r1);
+ kgsl_regread(device, MH_INTERRUPT_STATUS, &r2);
+ KGSL_LOG_DUMP(device,
+ "MH_INTERRUPT: MASK = %08X | STATUS = %08X\n", r1, r2);
+}
+
+/* Register offset defines for A2XX */
+static unsigned int a2xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_DEBUG, REG_CP_DEBUG),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_RAM_WADDR, REG_CP_ME_RAM_WADDR),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_RAM_DATA, REG_CP_ME_RAM_DATA),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_PFP_UCODE_DATA, REG_CP_PFP_UCODE_DATA),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_PFP_UCODE_ADDR, REG_CP_PFP_UCODE_ADDR),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, REG_CP_RB_BASE),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR, REG_CP_RB_RPTR_ADDR),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, REG_CP_RB_RPTR),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_WPTR, REG_CP_RB_WPTR),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_CNTL, REG_CP_ME_CNTL),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_CNTL, REG_CP_RB_CNTL),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE, REG_CP_IB1_BASE),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BUFSZ, REG_CP_IB1_BUFSZ),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE, REG_CP_IB2_BASE),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BUFSZ, REG_CP_IB2_BUFSZ),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_TIMESTAMP, REG_CP_TIMESTAMP),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_RAM_RADDR, REG_CP_ME_RAM_RADDR),
+ ADRENO_REG_DEFINE(ADRENO_REG_SCRATCH_ADDR, REG_SCRATCH_ADDR),
+ ADRENO_REG_DEFINE(ADRENO_REG_SCRATCH_UMSK, REG_SCRATCH_UMSK),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, REG_RBBM_STATUS),
+ ADRENO_REG_DEFINE(ADRENO_REG_PA_SC_AA_CONFIG, REG_PA_SC_AA_CONFIG),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PM_OVERRIDE2, REG_RBBM_PM_OVERRIDE2),
+ ADRENO_REG_DEFINE(ADRENO_REG_SCRATCH_REG2, REG_SCRATCH_REG2),
+ ADRENO_REG_DEFINE(ADRENO_REG_SQ_GPR_MANAGEMENT, REG_SQ_GPR_MANAGEMENT),
+ ADRENO_REG_DEFINE(ADRENO_REG_SQ_INST_STORE_MANAGMENT,
+ REG_SQ_INST_STORE_MANAGMENT),
+ ADRENO_REG_DEFINE(ADRENO_REG_TC_CNTL_STATUS, REG_TC_CNTL_STATUS),
+ ADRENO_REG_DEFINE(ADRENO_REG_TP0_CHICKEN, REG_TP0_CHICKEN),
+};
+
+const struct adreno_reg_offsets a2xx_reg_offsets = {
+ .offsets = a2xx_register_offsets,
+ .offset_0 = ADRENO_REG_REGISTER_MAX,
+};
+
+/* Defined in adreno_a2xx_snapshot.c */
+void *a2xx_snapshot(struct adreno_device *adreno_dev, void *snapshot,
+ int *remain, int hang);
+
+struct adreno_gpudev adreno_a2xx_gpudev = {
+ .reg_offsets = &a2xx_reg_offsets,
+
+ .ctxt_create = a2xx_drawctxt_create,
+ .ctxt_save = a2xx_drawctxt_save,
+ .ctxt_restore = a2xx_drawctxt_restore,
+ .ctxt_draw_workaround = a2xx_drawctxt_draw_workaround,
+ .irq_handler = a2xx_irq_handler,
+ .irq_control = a2xx_irq_control,
+ .irq_pending = a2xx_irq_pending,
+ .snapshot = a2xx_snapshot,
+ .rb_init = a2xx_rb_init,
+ .busy_cycles = a2xx_busy_cycles,
+ .start = a2xx_start,
+ .postmortem_dump = a2xx_postmortem_dump,
+};
diff --git a/drivers/gpu/msm2/adreno_a2xx_snapshot.c b/drivers/gpu/msm2/adreno_a2xx_snapshot.c
new file mode 100644
index 0000000..5134ed6
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_a2xx_snapshot.c
@@ -0,0 +1,383 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "kgsl.h"
+#include "adreno.h"
+#include "kgsl_snapshot.h"
+
+#define DEBUG_SECTION_SZ(_dwords) (((_dwords) * sizeof(unsigned int)) \
+ + sizeof(struct kgsl_snapshot_debug))
+
+/* Dump the SX debug registers into a GPU snapshot debug section */
+
+#define SXDEBUG_COUNT 0x1B
+
+static int a2xx_snapshot_sxdebug(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int i;
+
+ if (remain < DEBUG_SECTION_SZ(SXDEBUG_COUNT)) {
+ SNAPSHOT_ERR_NOMEM(device, "SX DEBUG");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_SX;
+ header->size = SXDEBUG_COUNT;
+
+ for (i = 0; i < SXDEBUG_COUNT; i++) {
+ kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1B00 | i);
+ kgsl_regread(device, REG_RBBM_DEBUG_OUT, &data[i]);
+ }
+
+ kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0);
+
+ return DEBUG_SECTION_SZ(SXDEBUG_COUNT);
+}
+
+#define CPDEBUG_COUNT 0x20
+
+static int a2xx_snapshot_cpdebug(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int i;
+
+ if (remain < DEBUG_SECTION_SZ(CPDEBUG_COUNT)) {
+ SNAPSHOT_ERR_NOMEM(device, "CP DEBUG");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_CP;
+ header->size = CPDEBUG_COUNT;
+
+ for (i = 0; i < CPDEBUG_COUNT; i++) {
+ kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1628);
+ kgsl_regread(device, REG_RBBM_DEBUG_OUT, &data[i]);
+ }
+
+ kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0);
+
+ return DEBUG_SECTION_SZ(CPDEBUG_COUNT);
+}
+
+/*
+ * The contents of the SQ debug sections are dword pairs:
+ * [register offset]:[value]
+ * This macro writes both dwords for the given register
+ */
+
+#define SQ_DEBUG_WRITE(_device, _reg, _data, _offset) \
+ do { _data[(_offset)++] = (_reg); \
+ kgsl_regread(_device, (_reg), &_data[(_offset)++]); \
+ } while (0)
+
+#define SQ_DEBUG_BANK_SIZE 23
+
+static int a2xx_snapshot_sqdebug(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int i, offset = 0;
+ int size = SQ_DEBUG_BANK_SIZE * 2 * 2;
+
+ if (remain < DEBUG_SECTION_SZ(size)) {
+ SNAPSHOT_ERR_NOMEM(device, "SQ Debug");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_SQ;
+ header->size = size;
+
+ for (i = 0; i < 2; i++) {
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_CONST_MGR_FSM+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_EXP_ALLOC+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_FSM_ALU_0+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_FSM_ALU_1+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_GPR_PIX+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_GPR_VTX+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_INPUT_FSM+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_MISC+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_MISC_0+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_MISC_1+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_0+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATE_MEM+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device,
+ REG_SQ_DEBUG_PIX_TB_STATUS_REG_0+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device,
+ REG_SQ_DEBUG_PIX_TB_STATUS_REG_1+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device,
+ REG_SQ_DEBUG_PIX_TB_STATUS_REG_2+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device,
+ REG_SQ_DEBUG_PIX_TB_STATUS_REG_3+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PTR_BUFF+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_TB_STATUS_SEL+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_TP_FSM+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_VTX_TB_0+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_VTX_TB_1+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_VTX_TB_STATE_MEM+i*0x1000,
+ data, offset);
+ }
+
+ return DEBUG_SECTION_SZ(size);
+}
+
+#define SQ_DEBUG_THREAD_SIZE 7
+
+static int a2xx_snapshot_sqthreaddebug(struct kgsl_device *device,
+ void *snapshot, int remain, void *priv)
+{
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int i, offset = 0;
+ int size = SQ_DEBUG_THREAD_SIZE * 2 * 16;
+
+ if (remain < DEBUG_SECTION_SZ(size)) {
+ SNAPSHOT_ERR_NOMEM(device, "SQ THREAD DEBUG");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_SQTHREAD;
+ header->size = size;
+
+ for (i = 0; i < 16; i++) {
+ kgsl_regwrite(device, REG_SQ_DEBUG_TB_STATUS_SEL,
+ i | (6<<4) | (i<<7) | (1<<11) | (1<<12)
+ | (i<<16) | (6<<20) | (i<<23));
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_VTX_TB_STATE_MEM,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_VTX_TB_STATUS_REG,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATE_MEM,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATUS_REG_0,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATUS_REG_1,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATUS_REG_2,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATUS_REG_3,
+ data, offset);
+ }
+
+ return DEBUG_SECTION_SZ(size);
+}
+
+#define MIUDEBUG_COUNT 0x10
+
+static int a2xx_snapshot_miudebug(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int i;
+
+ if (remain < DEBUG_SECTION_SZ(MIUDEBUG_COUNT)) {
+ SNAPSHOT_ERR_NOMEM(device, "MIU DEBUG");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_MIU;
+ header->size = MIUDEBUG_COUNT;
+
+ for (i = 0; i < MIUDEBUG_COUNT; i++) {
+ kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1600 | i);
+ kgsl_regread(device, REG_RBBM_DEBUG_OUT, &data[i]);
+ }
+
+ kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0);
+
+ return DEBUG_SECTION_SZ(MIUDEBUG_COUNT);
+}
+
+/* Snapshot the istore memory */
+static int a2xx_snapshot_istore(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_istore *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int count, i;
+
+ count = adreno_dev->istore_size * adreno_dev->instruction_size;
+
+ if (remain < (count * 4) + sizeof(*header)) {
+ KGSL_DRV_ERR(device,
+ "snapshot: Not enough memory for the istore section");
+ return 0;
+ }
+
+ header->count = adreno_dev->istore_size;
+
+ for (i = 0; i < count; i++)
+ kgsl_regread(device, ADRENO_ISTORE_START + i, &data[i]);
+
+ return (count * 4) + sizeof(*header);
+}
+
+/* A2XX GPU snapshot function - this is where all of the A2XX specific
+ * bits and pieces are grabbed into the snapshot memory
+ */
+
+void *a2xx_snapshot(struct adreno_device *adreno_dev, void *snapshot,
+ int *remain, int hang)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct kgsl_snapshot_registers_list list;
+ struct kgsl_snapshot_registers regs;
+ unsigned int pmoverride;
+
+ /* Choose the register set to dump */
+
+ if (adreno_is_a20x(adreno_dev)) {
+ regs.regs = (unsigned int *) a200_registers;
+ regs.count = a200_registers_count;
+ } else if (adreno_is_a220(adreno_dev)) {
+ regs.regs = (unsigned int *) a220_registers;
+ regs.count = a220_registers_count;
+ } else if (adreno_is_a225(adreno_dev)) {
+ regs.regs = (unsigned int *) a225_registers;
+ regs.count = a225_registers_count;
+ }
+
+ list.registers = ®s;
+ list.count = 1;
+
+ /* Master set of (non debug) registers */
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_REGS, snapshot, remain,
+ kgsl_snapshot_dump_regs, &list);
+
+ /* CP_STATE_DEBUG indexed registers */
+ snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
+ remain, REG_CP_STATE_DEBUG_INDEX,
+ REG_CP_STATE_DEBUG_DATA, 0x0, 0x14);
+
+ /* CP_ME indexed registers */
+ snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
+ remain, REG_CP_ME_CNTL, REG_CP_ME_STATUS,
+ 64, 44);
+
+ /*
+ * Need to temporarily turn off clock gating for the debug bus to
+ * work
+ */
+
+ kgsl_regread(device, REG_RBBM_PM_OVERRIDE2, &pmoverride);
+ kgsl_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0xFF);
+
+ /* SX debug registers */
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a2xx_snapshot_sxdebug, NULL);
+
+ /* SU debug indexed registers (only for < 470) */
+ if (!adreno_is_a22x(adreno_dev))
+ snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
+ remain, REG_PA_SU_DEBUG_CNTL,
+ REG_PA_SU_DEBUG_DATA,
+ 0, 0x1B);
+
+ /* CP debug registers */
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a2xx_snapshot_cpdebug, NULL);
+
+ /* MH debug indexed registers */
+ snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
+ remain, MH_DEBUG_CTRL, MH_DEBUG_DATA, 0x0, 0x40);
+
+ /* Leia only register sets */
+ if (adreno_is_a22x(adreno_dev)) {
+ /* RB DEBUG indexed regisers */
+ snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
+ remain, REG_RB_DEBUG_CNTL, REG_RB_DEBUG_DATA, 0, 8);
+
+ /* RB DEBUG indexed registers bank 2 */
+ snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
+ remain, REG_RB_DEBUG_CNTL, REG_RB_DEBUG_DATA + 0x1000,
+ 0, 8);
+
+ /* PC_DEBUG indexed registers */
+ snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
+ remain, REG_PC_DEBUG_CNTL, REG_PC_DEBUG_DATA, 0, 8);
+
+ /* GRAS_DEBUG indexed registers */
+ snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
+ remain, REG_GRAS_DEBUG_CNTL, REG_GRAS_DEBUG_DATA, 0, 4);
+
+ /* MIU debug registers */
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a2xx_snapshot_miudebug, NULL);
+
+ /* SQ DEBUG debug registers */
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a2xx_snapshot_sqdebug, NULL);
+
+ /*
+ * Reading SQ THREAD causes bad things to happen on a running
+ * system, so only read it if the GPU is already hung
+ */
+
+ if (hang) {
+ /* SQ THREAD debug registers */
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a2xx_snapshot_sqthreaddebug, NULL);
+ }
+ }
+
+ /*
+ * Only dump the istore on a hang - reading it on a running system
+ * has a non zero chance of hanging the GPU.
+ */
+
+ if (adreno_is_a2xx(adreno_dev) && hang) {
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_ISTORE, snapshot, remain,
+ a2xx_snapshot_istore, NULL);
+ }
+
+
+ /* Reset the clock gating */
+ kgsl_regwrite(device, REG_RBBM_PM_OVERRIDE2, pmoverride);
+
+ return snapshot;
+}
diff --git a/drivers/gpu/msm2/adreno_a2xx_trace.c b/drivers/gpu/msm2/adreno_a2xx_trace.c
new file mode 100644
index 0000000..87c930b
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_a2xx_trace.c
@@ -0,0 +1,19 @@
+/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "kgsl.h"
+#include "adreno.h"
+
+/* Instantiate tracepoints */
+#define CREATE_TRACE_POINTS
+#include "adreno_a2xx_trace.h"
diff --git a/drivers/gpu/msm2/adreno_a2xx_trace.h b/drivers/gpu/msm2/adreno_a2xx_trace.h
new file mode 100644
index 0000000..af355d6
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_a2xx_trace.h
@@ -0,0 +1,78 @@
+/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#if !defined(_ADRENO_A2XX_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _ADRENO_A2XX_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kgsl
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE adreno_a2xx_trace
+
+#include <linux/tracepoint.h>
+
+struct kgsl_device;
+
+/*
+ * Tracepoint for a2xx irq. Includes status info
+ */
+TRACE_EVENT(kgsl_a2xx_irq_status,
+
+ TP_PROTO(struct kgsl_device *device, unsigned int master_status,
+ unsigned int status),
+
+ TP_ARGS(device, master_status, status),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, master_status)
+ __field(unsigned int, status)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->master_status = master_status;
+ __entry->status = status;
+ ),
+
+ TP_printk(
+ "d_name=%s master=%s status=%s",
+ __get_str(device_name),
+ __entry->master_status ? __print_flags(__entry->master_status,
+ "|",
+ { MASTER_INT_SIGNAL__MH_INT_STAT, "MH" },
+ { MASTER_INT_SIGNAL__SQ_INT_STAT, "SQ" },
+ { MASTER_INT_SIGNAL__CP_INT_STAT, "CP" },
+ { MASTER_INT_SIGNAL__RBBM_INT_STAT, "RBBM" }) : "None",
+ __entry->status ? __print_flags(__entry->status, "|",
+ { CP_INT_CNTL__SW_INT_MASK, "SW" },
+ { CP_INT_CNTL__T0_PACKET_IN_IB_MASK,
+ "T0_PACKET_IN_IB" },
+ { CP_INT_CNTL__OPCODE_ERROR_MASK, "OPCODE_ERROR" },
+ { CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK,
+ "PROTECTED_MODE_ERROR" },
+ { CP_INT_CNTL__RESERVED_BIT_ERROR_MASK,
+ "RESERVED_BIT_ERROR" },
+ { CP_INT_CNTL__IB_ERROR_MASK, "IB_ERROR" },
+ { CP_INT_CNTL__IB2_INT_MASK, "IB2" },
+ { CP_INT_CNTL__IB1_INT_MASK, "IB1" },
+ { CP_INT_CNTL__RB_INT_MASK, "RB" }) : "None"
+ )
+);
+
+#endif /* _ADRENO_A2XX_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/msm2/adreno_a3xx.c b/drivers/gpu/msm2/adreno_a3xx.c
new file mode 100644
index 0000000..b563c13
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_a3xx.c
@@ -0,0 +1,4406 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <mach/socinfo.h>
+
+#include "kgsl.h"
+#include "adreno.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_cffdump.h"
+#include "a3xx_reg.h"
+#include "adreno_a3xx_trace.h"
+
+/*
+ * Set of registers to dump for A3XX on postmortem and snapshot.
+ * Registers in pairs - first value is the start offset, second
+ * is the stop offset (inclusive)
+ */
+
+const unsigned int a3xx_registers[] = {
+ 0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027,
+ 0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c,
+ 0x0060, 0x006c, 0x0080, 0x0082, 0x0084, 0x0088, 0x0090, 0x00e5,
+ 0x00ea, 0x00ed, 0x0100, 0x0100, 0x0110, 0x0123, 0x01c0, 0x01c1,
+ 0x01c3, 0x01c5, 0x01c7, 0x01c7, 0x01d5, 0x01d9, 0x01dc, 0x01dd,
+ 0x01ea, 0x01ea, 0x01ee, 0x01f1, 0x01f5, 0x01f5, 0x01fc, 0x01ff,
+ 0x0440, 0x0440, 0x0443, 0x0443, 0x0445, 0x0445, 0x044d, 0x044f,
+ 0x0452, 0x0452, 0x0454, 0x046f, 0x047c, 0x047c, 0x047f, 0x047f,
+ 0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e,
+ 0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f,
+ 0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7,
+ 0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5,
+ 0x0e41, 0x0e45, 0x0e64, 0x0e65,
+ 0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7,
+ 0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09,
+ 0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069,
+ 0x206c, 0x206d, 0x2070, 0x2070, 0x2072, 0x2072, 0x2074, 0x2075,
+ 0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109,
+ 0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115,
+ 0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0,
+ 0x2240, 0x227e,
+ 0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
+ 0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
+ 0x22ff, 0x22ff, 0x2340, 0x2343,
+ 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
+ 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472,
+ 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
+ 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
+ 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed,
+ 0x25f0, 0x25f0,
+ 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
+ 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
+ 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743,
+ 0x300C, 0x300E, 0x301C, 0x301D,
+ 0x302A, 0x302A, 0x302C, 0x302D, 0x3030, 0x3031, 0x3034, 0x3036,
+ 0x303C, 0x303C, 0x305E, 0x305F,
+};
+
+const unsigned int a3xx_registers_count = ARRAY_SIZE(a3xx_registers) / 2;
+
+/* Removed the following HLSQ register ranges from being read during
+ * fault tolerance since reading the registers may cause the device to hang:
+ */
+const unsigned int a3xx_hlsq_registers[] = {
+ 0x0e00, 0x0e05, 0x0e0c, 0x0e0c, 0x0e22, 0x0e23,
+ 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a,
+ 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a,
+};
+
+const unsigned int a3xx_hlsq_registers_count =
+ ARRAY_SIZE(a3xx_hlsq_registers) / 2;
+
+/* The set of additional registers to be dumped for A330 */
+
+const unsigned int a330_registers[] = {
+ 0x1d0, 0x1d0, 0x1d4, 0x1d4, 0x453, 0x453,
+};
+
+const unsigned int a330_registers_count = ARRAY_SIZE(a330_registers) / 2;
+
+/* Simple macro to facilitate bit setting in the gmem2sys and sys2gmem
+ * functions.
+ */
+
+#define _SET(_shift, _val) ((_val) << (_shift))
+
+/*
+ ****************************************************************************
+ *
+ * Context state shadow structure:
+ *
+ * +---------------------+------------+-------------+---------------------+---+
+ * | ALU Constant Shadow | Reg Shadow | C&V Buffers | Shader Instr Shadow |Tex|
+ * +---------------------+------------+-------------+---------------------+---+
+ *
+ * 8K - ALU Constant Shadow (8K aligned)
+ * 4K - H/W Register Shadow (8K aligned)
+ * 5K - Command and Vertex Buffers
+ * 8K - Shader Instruction Shadow
+ * ~6K - Texture Constant Shadow
+ *
+ *
+ ***************************************************************************
+ */
+
+/* Sizes of all sections in state shadow memory */
+#define ALU_SHADOW_SIZE (8*1024) /* 8KB */
+#define REG_SHADOW_SIZE (4*1024) /* 4KB */
+#define CMD_BUFFER_SIZE (5*1024) /* 5KB */
+#define TEX_SIZE_MEM_OBJECTS 896 /* bytes */
+#define TEX_SIZE_MIPMAP 1936 /* bytes */
+#define TEX_SIZE_SAMPLER_OBJ 256 /* bytes */
+#define TEX_SHADOW_SIZE \
+ ((TEX_SIZE_MEM_OBJECTS + TEX_SIZE_MIPMAP + \
+ TEX_SIZE_SAMPLER_OBJ)*2) /* ~6KB */
+#define SHADER_SHADOW_SIZE (8*1024) /* 8KB */
+
+/* Total context size, excluding GMEM shadow */
+#define CONTEXT_SIZE \
+ (ALU_SHADOW_SIZE+REG_SHADOW_SIZE + \
+ CMD_BUFFER_SIZE+SHADER_SHADOW_SIZE + \
+ TEX_SHADOW_SIZE)
+
+/* Offsets to different sections in context shadow memory */
+#define REG_OFFSET ALU_SHADOW_SIZE
+#define CMD_OFFSET (REG_OFFSET+REG_SHADOW_SIZE)
+#define SHADER_OFFSET (CMD_OFFSET+CMD_BUFFER_SIZE)
+#define TEX_OFFSET (SHADER_OFFSET+SHADER_SHADOW_SIZE)
+#define VS_TEX_OFFSET_MEM_OBJECTS TEX_OFFSET
+#define VS_TEX_OFFSET_MIPMAP (VS_TEX_OFFSET_MEM_OBJECTS+TEX_SIZE_MEM_OBJECTS)
+#define VS_TEX_OFFSET_SAMPLER_OBJ (VS_TEX_OFFSET_MIPMAP+TEX_SIZE_MIPMAP)
+#define FS_TEX_OFFSET_MEM_OBJECTS \
+ (VS_TEX_OFFSET_SAMPLER_OBJ+TEX_SIZE_SAMPLER_OBJ)
+#define FS_TEX_OFFSET_MIPMAP (FS_TEX_OFFSET_MEM_OBJECTS+TEX_SIZE_MEM_OBJECTS)
+#define FS_TEX_OFFSET_SAMPLER_OBJ (FS_TEX_OFFSET_MIPMAP+TEX_SIZE_MIPMAP)
+
+/* The offset for fragment shader data in HLSQ context */
+#define SSIZE (16*1024)
+
+#define HLSQ_SAMPLER_OFFSET 0x000
+#define HLSQ_MEMOBJ_OFFSET 0x400
+#define HLSQ_MIPMAP_OFFSET 0x800
+
+/* Use shadow RAM */
+#define HLSQ_SHADOW_BASE (0x10000+SSIZE*2)
+
+#define REG_TO_MEM_LOOP_COUNT_SHIFT 18
+
+#define BUILD_PC_DRAW_INITIATOR(prim_type, source_select, index_size, \
+ vis_cull_mode) \
+ (((prim_type) << PC_DRAW_INITIATOR_PRIM_TYPE) | \
+ ((source_select) << PC_DRAW_INITIATOR_SOURCE_SELECT) | \
+ ((index_size & 1) << PC_DRAW_INITIATOR_INDEX_SIZE) | \
+ ((index_size >> 1) << PC_DRAW_INITIATOR_SMALL_INDEX) | \
+ ((vis_cull_mode) << PC_DRAW_INITIATOR_VISIBILITY_CULLING_MODE) | \
+ (1 << PC_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE))
+
+/*
+ * List of context registers (starting from dword offset 0x2000).
+ * Each line contains start and end of a range of registers.
+ */
+static const unsigned int context_register_ranges[] = {
+ A3XX_GRAS_CL_CLIP_CNTL, A3XX_GRAS_CL_CLIP_CNTL,
+ A3XX_GRAS_CL_GB_CLIP_ADJ, A3XX_GRAS_CL_GB_CLIP_ADJ,
+ A3XX_GRAS_CL_VPORT_XOFFSET, A3XX_GRAS_CL_VPORT_ZSCALE,
+ A3XX_GRAS_SU_POINT_MINMAX, A3XX_GRAS_SU_POINT_SIZE,
+ A3XX_GRAS_SU_POLY_OFFSET_SCALE, A3XX_GRAS_SU_POLY_OFFSET_OFFSET,
+ A3XX_GRAS_SU_MODE_CONTROL, A3XX_GRAS_SU_MODE_CONTROL,
+ A3XX_GRAS_SC_CONTROL, A3XX_GRAS_SC_CONTROL,
+ A3XX_GRAS_SC_SCREEN_SCISSOR_TL, A3XX_GRAS_SC_SCREEN_SCISSOR_BR,
+ A3XX_GRAS_SC_WINDOW_SCISSOR_TL, A3XX_GRAS_SC_WINDOW_SCISSOR_BR,
+ A3XX_RB_MODE_CONTROL, A3XX_RB_MRT_BLEND_CONTROL3,
+ A3XX_RB_BLEND_RED, A3XX_RB_COPY_DEST_INFO,
+ A3XX_RB_DEPTH_CONTROL, A3XX_RB_DEPTH_CONTROL,
+ A3XX_PC_VSTREAM_CONTROL, A3XX_PC_VSTREAM_CONTROL,
+ A3XX_PC_VERTEX_REUSE_BLOCK_CNTL, A3XX_PC_VERTEX_REUSE_BLOCK_CNTL,
+ A3XX_PC_PRIM_VTX_CNTL, A3XX_PC_RESTART_INDEX,
+ A3XX_HLSQ_CONTROL_0_REG, A3XX_HLSQ_CONST_FSPRESV_RANGE_REG,
+ A3XX_HLSQ_CL_NDRANGE_0_REG, A3XX_HLSQ_CL_NDRANGE_0_REG,
+ A3XX_HLSQ_CL_NDRANGE_2_REG, A3XX_HLSQ_CL_CONTROL_1_REG,
+ A3XX_HLSQ_CL_KERNEL_CONST_REG, A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG,
+ A3XX_HLSQ_CL_WG_OFFSET_REG, A3XX_HLSQ_CL_WG_OFFSET_REG,
+ A3XX_VFD_CONTROL_0, A3XX_VFD_VS_THREADING_THRESHOLD,
+ A3XX_SP_SP_CTRL_REG, A3XX_SP_SP_CTRL_REG,
+ A3XX_SP_VS_CTRL_REG0, A3XX_SP_VS_OUT_REG_7,
+ A3XX_SP_VS_VPC_DST_REG_0, A3XX_SP_VS_PVT_MEM_SIZE_REG,
+ A3XX_SP_VS_LENGTH_REG, A3XX_SP_FS_PVT_MEM_SIZE_REG,
+ A3XX_SP_FS_FLAT_SHAD_MODE_REG_0, A3XX_SP_FS_FLAT_SHAD_MODE_REG_1,
+ A3XX_SP_FS_OUTPUT_REG, A3XX_SP_FS_OUTPUT_REG,
+ A3XX_SP_FS_MRT_REG_0, A3XX_SP_FS_IMAGE_OUTPUT_REG_3,
+ A3XX_SP_FS_LENGTH_REG, A3XX_SP_FS_LENGTH_REG,
+ A3XX_TPL1_TP_VS_TEX_OFFSET, A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR,
+ A3XX_VPC_ATTR, A3XX_VPC_VARY_CYLWRAP_ENABLE_1,
+};
+
+/* Global registers that need to be saved separately */
+static const unsigned int global_registers[] = {
+ A3XX_GRAS_CL_USER_PLANE_X0, A3XX_GRAS_CL_USER_PLANE_Y0,
+ A3XX_GRAS_CL_USER_PLANE_Z0, A3XX_GRAS_CL_USER_PLANE_W0,
+ A3XX_GRAS_CL_USER_PLANE_X1, A3XX_GRAS_CL_USER_PLANE_Y1,
+ A3XX_GRAS_CL_USER_PLANE_Z1, A3XX_GRAS_CL_USER_PLANE_W1,
+ A3XX_GRAS_CL_USER_PLANE_X2, A3XX_GRAS_CL_USER_PLANE_Y2,
+ A3XX_GRAS_CL_USER_PLANE_Z2, A3XX_GRAS_CL_USER_PLANE_W2,
+ A3XX_GRAS_CL_USER_PLANE_X3, A3XX_GRAS_CL_USER_PLANE_Y3,
+ A3XX_GRAS_CL_USER_PLANE_Z3, A3XX_GRAS_CL_USER_PLANE_W3,
+ A3XX_GRAS_CL_USER_PLANE_X4, A3XX_GRAS_CL_USER_PLANE_Y4,
+ A3XX_GRAS_CL_USER_PLANE_Z4, A3XX_GRAS_CL_USER_PLANE_W4,
+ A3XX_GRAS_CL_USER_PLANE_X5, A3XX_GRAS_CL_USER_PLANE_Y5,
+ A3XX_GRAS_CL_USER_PLANE_Z5, A3XX_GRAS_CL_USER_PLANE_W5,
+ A3XX_VSC_BIN_SIZE,
+ A3XX_VSC_PIPE_CONFIG_0, A3XX_VSC_PIPE_CONFIG_1,
+ A3XX_VSC_PIPE_CONFIG_2, A3XX_VSC_PIPE_CONFIG_3,
+ A3XX_VSC_PIPE_CONFIG_4, A3XX_VSC_PIPE_CONFIG_5,
+ A3XX_VSC_PIPE_CONFIG_6, A3XX_VSC_PIPE_CONFIG_7,
+ A3XX_VSC_PIPE_DATA_ADDRESS_0, A3XX_VSC_PIPE_DATA_ADDRESS_1,
+ A3XX_VSC_PIPE_DATA_ADDRESS_2, A3XX_VSC_PIPE_DATA_ADDRESS_3,
+ A3XX_VSC_PIPE_DATA_ADDRESS_4, A3XX_VSC_PIPE_DATA_ADDRESS_5,
+ A3XX_VSC_PIPE_DATA_ADDRESS_6, A3XX_VSC_PIPE_DATA_ADDRESS_7,
+ A3XX_VSC_PIPE_DATA_LENGTH_0, A3XX_VSC_PIPE_DATA_LENGTH_1,
+ A3XX_VSC_PIPE_DATA_LENGTH_2, A3XX_VSC_PIPE_DATA_LENGTH_3,
+ A3XX_VSC_PIPE_DATA_LENGTH_4, A3XX_VSC_PIPE_DATA_LENGTH_5,
+ A3XX_VSC_PIPE_DATA_LENGTH_6, A3XX_VSC_PIPE_DATA_LENGTH_7,
+ A3XX_VSC_SIZE_ADDRESS
+};
+
+#define GLOBAL_REGISTER_COUNT ARRAY_SIZE(global_registers)
+
+/* A scratchpad used to build commands during context create */
+static struct tmp_ctx {
+ unsigned int *cmd; /* Next available dword in C&V buffer */
+
+ /* Addresses in comamnd buffer where registers are saved */
+ uint32_t reg_values[GLOBAL_REGISTER_COUNT];
+ uint32_t gmem_base; /* Base GPU address of GMEM */
+} tmp_ctx;
+
+#ifndef GSL_CONTEXT_SWITCH_CPU_SYNC
+/*
+ * Function for executing dest = ( (reg & and) ROL rol ) | or
+ */
+static unsigned int *rmw_regtomem(unsigned int *cmd,
+ unsigned int reg, unsigned int and,
+ unsigned int rol, unsigned int or,
+ unsigned int dest)
+{
+ /* CP_SCRATCH_REG2 = (CP_SCRATCH_REG2 & 0x00000000) | reg */
+ *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+ *cmd++ = (1 << 30) | A3XX_CP_SCRATCH_REG2;
+ *cmd++ = 0x00000000; /* AND value */
+ *cmd++ = reg; /* OR address */
+
+ /* CP_SCRATCH_REG2 = ( (CP_SCRATCH_REG2 & and) ROL rol ) | or */
+ *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+ *cmd++ = (rol << 24) | A3XX_CP_SCRATCH_REG2;
+ *cmd++ = and; /* AND value */
+ *cmd++ = or; /* OR value */
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_CP_SCRATCH_REG2;
+ *cmd++ = dest;
+
+ return cmd;
+}
+#endif
+
+static void build_regconstantsave_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ unsigned int *cmd = tmp_ctx.cmd;
+ unsigned int *start;
+ unsigned int i;
+
+ drawctxt->constant_save_commands[0].hostptr = cmd;
+ drawctxt->constant_save_commands[0].gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+ cmd++;
+
+ start = cmd;
+
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmd++ = 0;
+
+#ifndef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+ /*
+ * Context registers are already shadowed; just need to
+ * disable shadowing to prevent corruption.
+ */
+
+ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
+ *cmd++ = 4 << 16; /* regs, start=0 */
+ *cmd++ = 0x0; /* count = 0 */
+
+#else
+ /*
+ * Make sure the HW context has the correct register values before
+ * reading them.
+ */
+
+ /* Write context registers into shadow */
+ for (i = 0; i < ARRAY_SIZE(context_register_ranges) / 2; i++) {
+ unsigned int start = context_register_ranges[i * 2];
+ unsigned int end = context_register_ranges[i * 2 + 1];
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = ((end - start + 1) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
+ start;
+ *cmd++ = ((drawctxt->gpustate.gpuaddr + REG_OFFSET)
+ & 0xFFFFE000) + (start - 0x2000) * 4;
+ }
+#endif
+
+ /* Need to handle some of the global registers separately */
+ for (i = 0; i < ARRAY_SIZE(global_registers); i++) {
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = global_registers[i];
+ *cmd++ = tmp_ctx.reg_values[i];
+ }
+
+ /* Save vertex shader constants */
+ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+ *cmd++ = drawctxt->cond_execs[2].gpuaddr >> 2;
+ *cmd++ = drawctxt->cond_execs[2].gpuaddr >> 2;
+ *cmd++ = 0x0000FFFF;
+ *cmd++ = 3; /* EXEC_COUNT */
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ drawctxt->constant_save_commands[1].hostptr = cmd;
+ drawctxt->constant_save_commands[1].gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+ /*
+ From fixup:
+
+ dwords = SP_VS_CTRL_REG1.VSCONSTLENGTH / 4
+ src = (HLSQ_SHADOW_BASE + 0x2000) / 4
+
+ From register spec:
+ SP_VS_CTRL_REG1.VSCONSTLENGTH [09:00]: 0-512, unit = 128bits.
+ */
+ *cmd++ = 0; /* (dwords << REG_TO_MEM_LOOP_COUNT_SHIFT) | src */
+ /* ALU constant shadow base */
+ *cmd++ = drawctxt->gpustate.gpuaddr & 0xfffffffc;
+
+ /* Save fragment shader constants */
+ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+ *cmd++ = drawctxt->cond_execs[3].gpuaddr >> 2;
+ *cmd++ = drawctxt->cond_execs[3].gpuaddr >> 2;
+ *cmd++ = 0x0000FFFF;
+ *cmd++ = 3; /* EXEC_COUNT */
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ drawctxt->constant_save_commands[2].hostptr = cmd;
+ drawctxt->constant_save_commands[2].gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+ /*
+ From fixup:
+
+ dwords = SP_FS_CTRL_REG1.FSCONSTLENGTH / 4
+ src = (HLSQ_SHADOW_BASE + 0x2000 + SSIZE) / 4
+
+ From register spec:
+ SP_FS_CTRL_REG1.FSCONSTLENGTH [09:00]: 0-512, unit = 128bits.
+ */
+ *cmd++ = 0; /* (dwords << REG_TO_MEM_LOOP_COUNT_SHIFT) | src */
+
+ /*
+ From fixup:
+
+ base = drawctxt->gpustate.gpuaddr (ALU constant shadow base)
+ offset = SP_FS_OBJ_OFFSET_REG.CONSTOBJECTSTARTOFFSET
+
+ From register spec:
+ SP_FS_OBJ_OFFSET_REG.CONSTOBJECTSTARTOFFSET [16:24]: Constant object
+ start offset in on chip RAM,
+ 128bit aligned
+
+ dst = base + offset
+ Because of the base alignment we can use
+ dst = base | offset
+ */
+ *cmd++ = 0; /* dst */
+
+ /* Save VS texture memory objects */
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ =
+ ((TEX_SIZE_MEM_OBJECTS / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
+ ((HLSQ_SHADOW_BASE + HLSQ_MEMOBJ_OFFSET) / 4);
+ *cmd++ =
+ (drawctxt->gpustate.gpuaddr +
+ VS_TEX_OFFSET_MEM_OBJECTS) & 0xfffffffc;
+
+ /* Save VS texture mipmap pointers */
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ =
+ ((TEX_SIZE_MIPMAP / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
+ ((HLSQ_SHADOW_BASE + HLSQ_MIPMAP_OFFSET) / 4);
+ *cmd++ =
+ (drawctxt->gpustate.gpuaddr + VS_TEX_OFFSET_MIPMAP) & 0xfffffffc;
+
+ /* Save VS texture sampler objects */
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = ((TEX_SIZE_SAMPLER_OBJ / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
+ ((HLSQ_SHADOW_BASE + HLSQ_SAMPLER_OFFSET) / 4);
+ *cmd++ =
+ (drawctxt->gpustate.gpuaddr +
+ VS_TEX_OFFSET_SAMPLER_OBJ) & 0xfffffffc;
+
+ /* Save FS texture memory objects */
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ =
+ ((TEX_SIZE_MEM_OBJECTS / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
+ ((HLSQ_SHADOW_BASE + HLSQ_MEMOBJ_OFFSET + SSIZE) / 4);
+ *cmd++ =
+ (drawctxt->gpustate.gpuaddr +
+ FS_TEX_OFFSET_MEM_OBJECTS) & 0xfffffffc;
+
+ /* Save FS texture mipmap pointers */
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ =
+ ((TEX_SIZE_MIPMAP / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
+ ((HLSQ_SHADOW_BASE + HLSQ_MIPMAP_OFFSET + SSIZE) / 4);
+ *cmd++ =
+ (drawctxt->gpustate.gpuaddr + FS_TEX_OFFSET_MIPMAP) & 0xfffffffc;
+
+ /* Save FS texture sampler objects */
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ =
+ ((TEX_SIZE_SAMPLER_OBJ / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
+ ((HLSQ_SHADOW_BASE + HLSQ_SAMPLER_OFFSET + SSIZE) / 4);
+ *cmd++ =
+ (drawctxt->gpustate.gpuaddr +
+ FS_TEX_OFFSET_SAMPLER_OBJ) & 0xfffffffc;
+
+ /* Create indirect buffer command for above command sequence */
+ create_ib1(drawctxt, drawctxt->regconstant_save, start, cmd);
+
+ tmp_ctx.cmd = cmd;
+}
+
+unsigned int adreno_a3xx_rbbm_clock_ctl_default(struct adreno_device
+ *adreno_dev)
+{
+ if (adreno_is_a305(adreno_dev))
+ return A305_RBBM_CLOCK_CTL_DEFAULT;
+ else if (adreno_is_a305c(adreno_dev))
+ return A305C_RBBM_CLOCK_CTL_DEFAULT;
+ else if (adreno_is_a320(adreno_dev))
+ return A320_RBBM_CLOCK_CTL_DEFAULT;
+ else if (adreno_is_a330v2(adreno_dev))
+ return A330v2_RBBM_CLOCK_CTL_DEFAULT;
+ else if (adreno_is_a330(adreno_dev))
+ return A330_RBBM_CLOCK_CTL_DEFAULT;
+ else if (adreno_is_a305b(adreno_dev))
+ return A305B_RBBM_CLOCK_CTL_DEFAULT;
+
+ BUG_ON(1);
+}
+
+/* Copy GMEM contents to system memory shadow. */
+static unsigned int *build_gmem2sys_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt,
+ struct gmem_shadow_t *shadow)
+{
+ unsigned int *cmds = tmp_ctx.cmd;
+ unsigned int *start = cmds;
+
+ *cmds++ = cp_type0_packet(A3XX_RBBM_CLOCK_CTL, 1);
+ *cmds++ = adreno_a3xx_rbbm_clock_ctl_default(adreno_dev);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_RB_MODE_CONTROL);
+
+ /* RB_MODE_CONTROL */
+ *cmds++ = _SET(RB_MODECONTROL_RENDER_MODE, RB_RESOLVE_PASS) |
+ _SET(RB_MODECONTROL_MARB_CACHE_SPLIT_MODE, 1) |
+ _SET(RB_MODECONTROL_PACKER_TIMER_ENABLE, 1);
+ /* RB_RENDER_CONTROL */
+ *cmds++ = _SET(RB_RENDERCONTROL_BIN_WIDTH, shadow->width >> 5) |
+ _SET(RB_RENDERCONTROL_DISABLE_COLOR_PIPE, 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_RB_COPY_CONTROL);
+ /* RB_COPY_CONTROL */
+ *cmds++ = _SET(RB_COPYCONTROL_RESOLVE_CLEAR_MODE,
+ RB_CLEAR_MODE_RESOLVE) |
+ _SET(RB_COPYCONTROL_COPY_GMEM_BASE,
+ tmp_ctx.gmem_base >> 14);
+ /* RB_COPY_DEST_BASE */
+ *cmds++ = _SET(RB_COPYDESTBASE_COPY_DEST_BASE,
+ shadow->gmemshadow.gpuaddr >> 5);
+ /* RB_COPY_DEST_PITCH */
+ *cmds++ = _SET(RB_COPYDESTPITCH_COPY_DEST_PITCH,
+ (shadow->pitch * 4) / 32);
+ /* RB_COPY_DEST_INFO */
+ *cmds++ = _SET(RB_COPYDESTINFO_COPY_DEST_TILE,
+ RB_TILINGMODE_LINEAR) |
+ _SET(RB_COPYDESTINFO_COPY_DEST_FORMAT, RB_R8G8B8A8_UNORM) |
+ _SET(RB_COPYDESTINFO_COPY_COMPONENT_ENABLE, 0X0F) |
+ _SET(RB_COPYDESTINFO_COPY_DEST_ENDIAN, RB_ENDIAN_NONE);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_GRAS_SC_CONTROL);
+ /* GRAS_SC_CONTROL */
+ *cmds++ = _SET(GRAS_SC_CONTROL_RENDER_MODE, 2);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_VFD_CONTROL_0);
+ /* VFD_CONTROL_0 */
+ *cmds++ = _SET(VFD_CTRLREG0_TOTALATTRTOVS, 4) |
+ _SET(VFD_CTRLREG0_PACKETSIZE, 2) |
+ _SET(VFD_CTRLREG0_STRMDECINSTRCNT, 1) |
+ _SET(VFD_CTRLREG0_STRMFETCHINSTRCNT, 1);
+ /* VFD_CONTROL_1 */
+ *cmds++ = _SET(VFD_CTRLREG1_MAXSTORAGE, 1) |
+ _SET(VFD_CTRLREG1_REGID4VTX, 252) |
+ _SET(VFD_CTRLREG1_REGID4INST, 252);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_VFD_FETCH_INSTR_0_0);
+ /* VFD_FETCH_INSTR_0_0 */
+ *cmds++ = _SET(VFD_FETCHINSTRUCTIONS_FETCHSIZE, 11) |
+ _SET(VFD_FETCHINSTRUCTIONS_BUFSTRIDE, 12) |
+ _SET(VFD_FETCHINSTRUCTIONS_STEPRATE, 1);
+ /* VFD_FETCH_INSTR_1_0 */
+ *cmds++ = _SET(VFD_BASEADDR_BASEADDR,
+ shadow->quad_vertices.gpuaddr);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_VFD_DECODE_INSTR_0);
+ /* VFD_DECODE_INSTR_0 */
+ *cmds++ = _SET(VFD_DECODEINSTRUCTIONS_WRITEMASK, 0x0F) |
+ _SET(VFD_DECODEINSTRUCTIONS_CONSTFILL, 1) |
+ _SET(VFD_DECODEINSTRUCTIONS_FORMAT, 2) |
+ _SET(VFD_DECODEINSTRUCTIONS_SHIFTCNT, 12) |
+ _SET(VFD_DECODEINSTRUCTIONS_LASTCOMPVALID, 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG);
+ /* HLSQ_CONTROL_0_REG */
+ *cmds++ = _SET(HLSQ_CTRL0REG_FSTHREADSIZE, HLSQ_FOUR_PIX_QUADS) |
+ _SET(HLSQ_CTRL0REG_FSSUPERTHREADENABLE, 1) |
+ _SET(HLSQ_CTRL0REG_RESERVED2, 1) |
+ _SET(HLSQ_CTRL0REG_SPCONSTFULLUPDATE, 1);
+ /* HLSQ_CONTROL_1_REG */
+ *cmds++ = _SET(HLSQ_CTRL1REG_VSTHREADSIZE, HLSQ_TWO_VTX_QUADS) |
+ _SET(HLSQ_CTRL1REG_VSSUPERTHREADENABLE, 1);
+ /* HLSQ_CONTROL_2_REG */
+ *cmds++ = _SET(HLSQ_CTRL2REG_PRIMALLOCTHRESHOLD, 31);
+ /* HLSQ_CONTROL_3_REG */
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_HLSQ_VS_CONTROL_REG);
+ /* HLSQ_VS_CONTROL_REG */
+ *cmds++ = _SET(HLSQ_VSCTRLREG_VSINSTRLENGTH, 1);
+ /* HLSQ_FS_CONTROL_REG */
+ *cmds++ = _SET(HLSQ_FSCTRLREG_FSCONSTLENGTH, 1) |
+ _SET(HLSQ_FSCTRLREG_FSCONSTSTARTOFFSET, 128) |
+ _SET(HLSQ_FSCTRLREG_FSINSTRLENGTH, 1);
+ /* HLSQ_CONST_VSPRESV_RANGE_REG */
+ *cmds++ = 0x00000000;
+ /* HLSQ_CONST_FSPRESV_RANGE_REQ */
+ *cmds++ = _SET(HLSQ_CONSTFSPRESERVEDRANGEREG_STARTENTRY, 32) |
+ _SET(HLSQ_CONSTFSPRESERVEDRANGEREG_ENDENTRY, 32);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_SP_FS_LENGTH_REG);
+ /* SP_FS_LENGTH_REG */
+ *cmds++ = _SET(SP_SHADERLENGTH_LEN, 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_SP_SP_CTRL_REG);
+ /* SP_SP_CTRL_REG */
+ *cmds++ = _SET(SP_SPCTRLREG_SLEEPMODE, 1) |
+ _SET(SP_SPCTRLREG_LOMODE, 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 12);
+ *cmds++ = CP_REG(A3XX_SP_VS_CTRL_REG0);
+ /* SP_VS_CTRL_REG0 */
+ *cmds++ = _SET(SP_VSCTRLREG0_VSTHREADMODE, SP_MULTI) |
+ _SET(SP_VSCTRLREG0_VSINSTRBUFFERMODE, SP_BUFFER_MODE) |
+ _SET(SP_VSCTRLREG0_VSICACHEINVALID, 1) |
+ _SET(SP_VSCTRLREG0_VSFULLREGFOOTPRINT, 1) |
+ _SET(SP_VSCTRLREG0_VSTHREADSIZE, SP_TWO_VTX_QUADS) |
+ _SET(SP_VSCTRLREG0_VSSUPERTHREADMODE, 1) |
+ _SET(SP_VSCTRLREG0_VSLENGTH, 1);
+ /* SP_VS_CTRL_REG1 */
+ *cmds++ = _SET(SP_VSCTRLREG1_VSINITIALOUTSTANDING, 4);
+ /* SP_VS_PARAM_REG */
+ *cmds++ = _SET(SP_VSPARAMREG_PSIZEREGID, 252);
+ /* SP_VS_OUT_REG_0 */
+ *cmds++ = 0x00000000;
+ /* SP_VS_OUT_REG_1 */
+ *cmds++ = 0x00000000;
+ /* SP_VS_OUT_REG_2 */
+ *cmds++ = 0x00000000;
+ /* SP_VS_OUT_REG_3 */
+ *cmds++ = 0x00000000;
+ /* SP_VS_OUT_REG_4 */
+ *cmds++ = 0x00000000;
+ /* SP_VS_OUT_REG_5 */
+ *cmds++ = 0x00000000;
+ /* SP_VS_OUT_REG_6 */
+ *cmds++ = 0x00000000;
+ /* SP_VS_OUT_REG_7 */
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 7);
+ *cmds++ = CP_REG(A3XX_SP_VS_VPC_DST_REG_0);
+ /* SP_VS_VPC_DST_REG_0 */
+ *cmds++ = 0x00000000;
+ /* SP_VS_VPC_DST_REG_1 */
+ *cmds++ = 0x00000000;
+ /* SP_VS_VPC_DST_REG_2 */
+ *cmds++ = 0x00000000;
+ /* SP_VS_VPC_DST_REG_3 */
+ *cmds++ = 0x00000000;
+ /* SP_VS_OBJ_OFFSET_REG */
+ *cmds++ = 0x00000000;
+ /* SP_VS_OBJ_START_REG */
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 6);
+ *cmds++ = CP_REG(A3XX_SP_VS_LENGTH_REG);
+ /* SP_VS_LENGTH_REG */
+ *cmds++ = _SET(SP_SHADERLENGTH_LEN, 1);
+ /* SP_FS_CTRL_REG0 */
+ *cmds++ = _SET(SP_FSCTRLREG0_FSTHREADMODE, SP_MULTI) |
+ _SET(SP_FSCTRLREG0_FSINSTRBUFFERMODE, SP_BUFFER_MODE) |
+ _SET(SP_FSCTRLREG0_FSICACHEINVALID, 1) |
+ _SET(SP_FSCTRLREG0_FSHALFREGFOOTPRINT, 1) |
+ _SET(SP_FSCTRLREG0_FSINOUTREGOVERLAP, 1) |
+ _SET(SP_FSCTRLREG0_FSTHREADSIZE, SP_FOUR_PIX_QUADS) |
+ _SET(SP_FSCTRLREG0_FSSUPERTHREADMODE, 1) |
+ _SET(SP_FSCTRLREG0_FSLENGTH, 1);
+ /* SP_FS_CTRL_REG1 */
+ *cmds++ = _SET(SP_FSCTRLREG1_FSCONSTLENGTH, 1) |
+ _SET(SP_FSCTRLREG1_HALFPRECVAROFFSET, 63);
+ /* SP_FS_OBJ_OFFSET_REG */
+ *cmds++ = _SET(SP_OBJOFFSETREG_CONSTOBJECTSTARTOFFSET, 128) |
+ _SET(SP_OBJOFFSETREG_SHADEROBJOFFSETINIC, 127);
+ /* SP_FS_OBJ_START_REG */
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_SP_FS_FLAT_SHAD_MODE_REG_0);
+ /* SP_FS_FLAT_SHAD_MODE_REG_0 */
+ *cmds++ = 0x00000000;
+ /* SP_FS_FLAT_SHAD_MODE_REG_1 */
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_SP_FS_OUTPUT_REG);
+ /* SP_FS_OUTPUT_REG */
+ *cmds++ = _SET(SP_IMAGEOUTPUTREG_DEPTHOUTMODE, SP_PIXEL_BASED);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_SP_FS_MRT_REG_0);
+ /* SP_FS_MRT_REG_0 */
+ *cmds++ = _SET(SP_FSMRTREG_PRECISION, 1);
+
+ /* SP_FS_MRT_REG_1 */
+ *cmds++ = 0x00000000;
+ /* SP_FS_MRT_REG_2 */
+ *cmds++ = 0x00000000;
+ /* SP_FS_MRT_REG_3 */
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 11);
+ *cmds++ = CP_REG(A3XX_VPC_ATTR);
+ /* VPC_ATTR */
+ *cmds++ = _SET(VPC_VPCATTR_THRHDASSIGN, 1) |
+ _SET(VPC_VPCATTR_LMSIZE, 1);
+ /* VPC_PACK */
+ *cmds++ = 0x00000000;
+ /* VPC_VARRYING_INTERUPT_MODE_0 */
+ *cmds++ = 0x00000000;
+ /* VPC_VARRYING_INTERUPT_MODE_1 */
+ *cmds++ = 0x00000000;
+ /* VPC_VARRYING_INTERUPT_MODE_2 */
+ *cmds++ = 0x00000000;
+ /* VPC_VARRYING_INTERUPT_MODE_3 */
+ *cmds++ = 0x00000000;
+ /* VPC_VARYING_PS_REPL_MODE_0 */
+ *cmds++ = 0x00000000;
+ /* VPC_VARYING_PS_REPL_MODE_1 */
+ *cmds++ = 0x00000000;
+ /* VPC_VARYING_PS_REPL_MODE_2 */
+ *cmds++ = 0x00000000;
+ /* VPC_VARYING_PS_REPL_MODE_3 */
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 10);
+ *cmds++ = (0 << CP_LOADSTATE_DSTOFFSET_SHIFT)
+ | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
+ | (HLSQ_BLOCK_ID_SP_VS << CP_LOADSTATE_STATEBLOCKID_SHIFT)
+ | (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+ *cmds++ = (HLSQ_SP_VS_INSTR << CP_LOADSTATE_STATETYPE_SHIFT)
+ | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
+
+ /* (sy)(rpt3)mov.f32f32 r0.y, (r)r1.y; */
+ *cmds++ = 0x00000000; *cmds++ = 0x13001000;
+ /* end; */
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+ /* nop; */
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+ /* nop; */
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+
+
+ *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 10);
+ *cmds++ = (0 << CP_LOADSTATE_DSTOFFSET_SHIFT)
+ | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
+ | (HLSQ_BLOCK_ID_SP_FS << CP_LOADSTATE_STATEBLOCKID_SHIFT)
+ | (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+ *cmds++ = (HLSQ_SP_FS_INSTR << CP_LOADSTATE_STATETYPE_SHIFT)
+ | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
+
+ /* (sy)(rpt3)mov.f32f32 r0.y, (r)c0.x; */
+ *cmds++ = 0x00000000; *cmds++ = 0x30201b00;
+ /* end; */
+ *cmds++ = 0x00000000; *cmds++ = 0x03000000;
+ /* nop; */
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+ /* nop; */
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+
+
+
+ *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_MSAA_CONTROL);
+ /* RB_MSAA_CONTROL */
+ *cmds++ = _SET(RB_MSAACONTROL_MSAA_DISABLE, 1) |
+ _SET(RB_MSAACONTROL_SAMPLE_MASK, 0xFFFF);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_DEPTH_CONTROL);
+ /* RB_DEPTH_CONTROL */
+ *cmds++ = _SET(RB_DEPTHCONTROL_Z_TEST_FUNC, RB_FRAG_NEVER);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_STENCIL_CONTROL);
+ /* RB_STENCIL_CONTROL */
+ *cmds++ = _SET(RB_STENCILCONTROL_STENCIL_FUNC, RB_REF_NEVER) |
+ _SET(RB_STENCILCONTROL_STENCIL_FAIL, RB_STENCIL_KEEP) |
+ _SET(RB_STENCILCONTROL_STENCIL_ZPASS, RB_STENCIL_KEEP) |
+ _SET(RB_STENCILCONTROL_STENCIL_ZFAIL, RB_STENCIL_KEEP) |
+ _SET(RB_STENCILCONTROL_STENCIL_FUNC_BF, RB_REF_NEVER) |
+ _SET(RB_STENCILCONTROL_STENCIL_FAIL_BF, RB_STENCIL_KEEP) |
+ _SET(RB_STENCILCONTROL_STENCIL_ZPASS_BF, RB_STENCIL_KEEP) |
+ _SET(RB_STENCILCONTROL_STENCIL_ZFAIL_BF, RB_STENCIL_KEEP);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_GRAS_SU_MODE_CONTROL);
+ /* GRAS_SU_MODE_CONTROL */
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_MRT_CONTROL0);
+ /* RB_MRT_CONTROL0 */
+ *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
+ _SET(RB_MRTCONTROL_ROP_CODE, 12) |
+ _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_ALWAYS) |
+ _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL0);
+ /* RB_MRT_BLEND_CONTROL0 */
+ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+ /* RB_MRT_CONTROL1 */
+ *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
+ _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) |
+ _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL1);
+ /* RB_MRT_BLEND_CONTROL1 */
+ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+ /* RB_MRT_CONTROL2 */
+ *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
+ _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) |
+ _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL2);
+ /* RB_MRT_BLEND_CONTROL2 */
+ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+ /* RB_MRT_CONTROL3 */
+ *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
+ _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) |
+ _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL3);
+ /* RB_MRT_BLEND_CONTROL3 */
+ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_VFD_INDEX_MIN);
+ /* VFD_INDEX_MIN */
+ *cmds++ = 0x00000000;
+ /* VFD_INDEX_MAX */
+ *cmds++ = 0x155;
+ /* VFD_INSTANCEID_OFFSET */
+ *cmds++ = 0x00000000;
+ /* VFD_INDEX_OFFSET */
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_VFD_VS_THREADING_THRESHOLD);
+ /* VFD_VS_THREADING_THRESHOLD */
+ *cmds++ = _SET(VFD_THREADINGTHRESHOLD_REGID_THRESHOLD, 15) |
+ _SET(VFD_THREADINGTHRESHOLD_REGID_VTXCNT, 252);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_TPL1_TP_VS_TEX_OFFSET);
+ /* TPL1_TP_VS_TEX_OFFSET */
+ *cmds++ = 0;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_TPL1_TP_FS_TEX_OFFSET);
+ /* TPL1_TP_FS_TEX_OFFSET */
+ *cmds++ = _SET(TPL1_TPTEXOFFSETREG_SAMPLEROFFSET, 16) |
+ _SET(TPL1_TPTEXOFFSETREG_MEMOBJOFFSET, 16) |
+ _SET(TPL1_TPTEXOFFSETREG_BASETABLEPTR, 224);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_PC_PRIM_VTX_CNTL);
+ /* PC_PRIM_VTX_CNTL */
+ *cmds++ = _SET(PC_PRIM_VTX_CONTROL_POLYMODE_FRONT_PTYPE,
+ PC_DRAW_TRIANGLES) |
+ _SET(PC_PRIM_VTX_CONTROL_POLYMODE_BACK_PTYPE,
+ PC_DRAW_TRIANGLES) |
+ _SET(PC_PRIM_VTX_CONTROL_PROVOKING_VTX_LAST, 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_GRAS_SC_WINDOW_SCISSOR_TL);
+ /* GRAS_SC_WINDOW_SCISSOR_TL */
+ *cmds++ = 0x00000000;
+ /* GRAS_SC_WINDOW_SCISSOR_BR */
+ *cmds++ = _SET(GRAS_SC_WINDOW_SCISSOR_BR_BR_X, shadow->width - 1) |
+ _SET(GRAS_SC_WINDOW_SCISSOR_BR_BR_Y, shadow->height - 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_GRAS_SC_SCREEN_SCISSOR_TL);
+ /* GRAS_SC_SCREEN_SCISSOR_TL */
+ *cmds++ = 0x00000000;
+ /* GRAS_SC_SCREEN_SCISSOR_BR */
+ *cmds++ = _SET(GRAS_SC_SCREEN_SCISSOR_BR_BR_X, shadow->width - 1) |
+ _SET(GRAS_SC_SCREEN_SCISSOR_BR_BR_Y, shadow->height - 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_GRAS_CL_VPORT_XOFFSET);
+ /* GRAS_CL_VPORT_XOFFSET */
+ *cmds++ = 0x00000000;
+ /* GRAS_CL_VPORT_XSCALE */
+ *cmds++ = _SET(GRAS_CL_VPORT_XSCALE_VPORT_XSCALE, 0x3f800000);
+ /* GRAS_CL_VPORT_YOFFSET */
+ *cmds++ = 0x00000000;
+ /* GRAS_CL_VPORT_YSCALE */
+ *cmds++ = _SET(GRAS_CL_VPORT_YSCALE_VPORT_YSCALE, 0x3f800000);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_GRAS_CL_VPORT_ZOFFSET);
+ /* GRAS_CL_VPORT_ZOFFSET */
+ *cmds++ = 0x00000000;
+ /* GRAS_CL_VPORT_ZSCALE */
+ *cmds++ = _SET(GRAS_CL_VPORT_ZSCALE_VPORT_ZSCALE, 0x3f800000);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_GRAS_CL_CLIP_CNTL);
+ /* GRAS_CL_CLIP_CNTL */
+ *cmds++ = _SET(GRAS_CL_CLIP_CNTL_CLIP_DISABLE, 1) |
+ _SET(GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE, 1) |
+ _SET(GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE, 1) |
+ _SET(GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE, 1) |
+ _SET(GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE, 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_GRAS_CL_GB_CLIP_ADJ);
+ /* GRAS_CL_GB_CLIP_ADJ */
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+
+
+ /* oxili_generate_context_roll_packets */
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG0, 1);
+ *cmds++ = 0x00000400;
+
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG0, 1);
+ *cmds++ = 0x00000400;
+
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_SIZE_REG, 1);
+ *cmds++ = 0x00008000; /* SP_VS_MEM_SIZE_REG */
+
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_SIZE_REG, 1);
+ *cmds++ = 0x00008000; /* SP_FS_MEM_SIZE_REG */
+
+ /* Clear cache invalidate bit when re-loading the shader control regs */
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG0, 1);
+ *cmds++ = _SET(SP_VSCTRLREG0_VSTHREADMODE, SP_MULTI) |
+ _SET(SP_VSCTRLREG0_VSINSTRBUFFERMODE, SP_BUFFER_MODE) |
+ _SET(SP_VSCTRLREG0_VSFULLREGFOOTPRINT, 1) |
+ _SET(SP_VSCTRLREG0_VSTHREADSIZE, SP_TWO_VTX_QUADS) |
+ _SET(SP_VSCTRLREG0_VSSUPERTHREADMODE, 1) |
+ _SET(SP_VSCTRLREG0_VSLENGTH, 1);
+
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG0, 1);
+ *cmds++ = _SET(SP_FSCTRLREG0_FSTHREADMODE, SP_MULTI) |
+ _SET(SP_FSCTRLREG0_FSINSTRBUFFERMODE, SP_BUFFER_MODE) |
+ _SET(SP_FSCTRLREG0_FSHALFREGFOOTPRINT, 1) |
+ _SET(SP_FSCTRLREG0_FSINOUTREGOVERLAP, 1) |
+ _SET(SP_FSCTRLREG0_FSTHREADSIZE, SP_FOUR_PIX_QUADS) |
+ _SET(SP_FSCTRLREG0_FSSUPERTHREADMODE, 1) |
+ _SET(SP_FSCTRLREG0_FSLENGTH, 1);
+
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_SIZE_REG, 1);
+ *cmds++ = 0x00000000; /* SP_VS_MEM_SIZE_REG */
+
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_SIZE_REG, 1);
+ *cmds++ = 0x00000000; /* SP_FS_MEM_SIZE_REG */
+
+ /* end oxili_generate_context_roll_packets */
+
+ /*
+ * Resolve using two draw calls with a dummy register
+ * write in between. This is a HLM workaround
+ * that should be removed later.
+ */
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX_2, 6);
+ *cmds++ = 0x00000000; /* Viz query info */
+ *cmds++ = BUILD_PC_DRAW_INITIATOR(PC_DI_PT_TRILIST,
+ PC_DI_SRC_SEL_IMMEDIATE,
+ PC_DI_INDEX_SIZE_32_BIT,
+ PC_DI_IGNORE_VISIBILITY);
+ *cmds++ = 0x00000003; /* Num indices */
+ *cmds++ = 0x00000000; /* Index 0 */
+ *cmds++ = 0x00000001; /* Index 1 */
+ *cmds++ = 0x00000002; /* Index 2 */
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_HLSQ_CL_CONTROL_0_REG);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX_2, 6);
+ *cmds++ = 0x00000000; /* Viz query info */
+ *cmds++ = BUILD_PC_DRAW_INITIATOR(PC_DI_PT_TRILIST,
+ PC_DI_SRC_SEL_IMMEDIATE,
+ PC_DI_INDEX_SIZE_32_BIT,
+ PC_DI_IGNORE_VISIBILITY);
+ *cmds++ = 0x00000003; /* Num indices */
+ *cmds++ = 0x00000002; /* Index 0 */
+ *cmds++ = 0x00000001; /* Index 1 */
+ *cmds++ = 0x00000003; /* Index 2 */
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_HLSQ_CL_CONTROL_0_REG);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+
+ /* Create indirect buffer command for above command sequence */
+ create_ib1(drawctxt, shadow->gmem_save, start, cmds);
+
+ return cmds;
+}
+static void build_shader_save_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ unsigned int *cmd = tmp_ctx.cmd;
+ unsigned int *start;
+
+ /* Reserve space for boolean values used for COND_EXEC packet */
+ drawctxt->cond_execs[0].hostptr = cmd;
+ drawctxt->cond_execs[0].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0;
+ drawctxt->cond_execs[1].hostptr = cmd;
+ drawctxt->cond_execs[1].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0;
+
+ drawctxt->shader_save_commands[0].hostptr = cmd;
+ drawctxt->shader_save_commands[0].gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0;
+ drawctxt->shader_save_commands[1].hostptr = cmd;
+ drawctxt->shader_save_commands[1].gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0;
+
+ start = cmd;
+
+ /* Save vertex shader */
+
+ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+ *cmd++ = drawctxt->cond_execs[0].gpuaddr >> 2;
+ *cmd++ = drawctxt->cond_execs[0].gpuaddr >> 2;
+ *cmd++ = 0x0000FFFF;
+ *cmd++ = 3; /* EXEC_COUNT */
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ drawctxt->shader_save_commands[2].hostptr = cmd;
+ drawctxt->shader_save_commands[2].gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+ /*
+ From fixup:
+
+ dwords = SP_VS_CTRL_REG0.VS_LENGTH * 8
+
+ From regspec:
+ SP_VS_CTRL_REG0.VS_LENGTH [31:24]: VS length, unit = 256bits.
+ If bit31 is 1, it means overflow
+ or any long shader.
+
+ src = (HLSQ_SHADOW_BASE + 0x1000)/4
+ */
+ *cmd++ = 0; /*(dwords << REG_TO_MEM_LOOP_COUNT_SHIFT) | src */
+ *cmd++ = (drawctxt->gpustate.gpuaddr + SHADER_OFFSET) & 0xfffffffc;
+
+ /* Save fragment shader */
+ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+ *cmd++ = drawctxt->cond_execs[1].gpuaddr >> 2;
+ *cmd++ = drawctxt->cond_execs[1].gpuaddr >> 2;
+ *cmd++ = 0x0000FFFF;
+ *cmd++ = 3; /* EXEC_COUNT */
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ drawctxt->shader_save_commands[3].hostptr = cmd;
+ drawctxt->shader_save_commands[3].gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+ /*
+ From fixup:
+
+ dwords = SP_FS_CTRL_REG0.FS_LENGTH * 8
+
+ From regspec:
+ SP_FS_CTRL_REG0.FS_LENGTH [31:24]: FS length, unit = 256bits.
+ If bit31 is 1, it means overflow
+ or any long shader.
+
+ fs_offset = SP_FS_OBJ_OFFSET_REG.SHADEROBJOFFSETINIC * 32
+ From regspec:
+
+ SP_FS_OBJ_OFFSET_REG.SHADEROBJOFFSETINIC [31:25]:
+ First instruction of the whole shader will be stored from
+ the offset in instruction cache, unit = 256bits, a cache line.
+ It can start from 0 if no VS available.
+
+ src = (HLSQ_SHADOW_BASE + 0x1000 + SSIZE + fs_offset)/4
+ */
+ *cmd++ = 0; /*(dwords << REG_TO_MEM_LOOP_COUNT_SHIFT) | src */
+ *cmd++ = (drawctxt->gpustate.gpuaddr + SHADER_OFFSET
+ + (SHADER_SHADOW_SIZE / 2)) & 0xfffffffc;
+
+ /* Create indirect buffer command for above command sequence */
+ create_ib1(drawctxt, drawctxt->shader_save, start, cmd);
+
+ tmp_ctx.cmd = cmd;
+}
+
+/*
+ * Make an IB to modify context save IBs with the correct shader instruction
+ * and constant sizes and offsets.
+ */
+
+static void build_save_fixup_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ unsigned int *cmd = tmp_ctx.cmd;
+ unsigned int *start = cmd;
+
+ /* Flush HLSQ lazy updates */
+ *cmd++ = cp_type3_packet(CP_EVENT_WRITE, 1);
+ *cmd++ = 0x7; /* HLSQ_FLUSH */
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmd++ = 0;
+
+ *cmd++ = cp_type0_packet(A3XX_UCHE_CACHE_INVALIDATE0_REG, 2);
+ *cmd++ = 0x00000000; /* No start addr for full invalidate */
+ *cmd++ = (unsigned int)
+ UCHE_ENTIRE_CACHE << UCHE_INVALIDATE1REG_ALLORPORTION |
+ UCHE_OP_INVALIDATE << UCHE_INVALIDATE1REG_OPCODE |
+ 0; /* No end addr for full invalidate */
+
+ /* Make sure registers are flushed */
+ *cmd++ = cp_type3_packet(CP_CONTEXT_UPDATE, 1);
+ *cmd++ = 0;
+
+#ifdef GSL_CONTEXT_SWITCH_CPU_SYNC
+
+ /* Save shader sizes */
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_VS_CTRL_REG0;
+ *cmd++ = drawctxt->shader_save_commands[2].gpuaddr;
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_FS_CTRL_REG0;
+ *cmd++ = drawctxt->shader_save_commands[3].gpuaddr;
+
+ /* Save shader offsets */
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_FS_OBJ_OFFSET_REG;
+ *cmd++ = drawctxt->shader_save_commands[1].gpuaddr;
+
+ /* Save constant sizes */
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_VS_CTRL_REG1;
+ *cmd++ = drawctxt->constant_save_commands[1].gpuaddr;
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_FS_CTRL_REG1;
+ *cmd++ = drawctxt->constant_save_commands[2].gpuaddr;
+
+ /* Save FS constant offset */
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_FS_OBJ_OFFSET_REG;
+ *cmd++ = drawctxt->constant_save_commands[0].gpuaddr;
+
+
+ /* Save VS instruction store mode */
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_VS_CTRL_REG0;
+ *cmd++ = drawctxt->cond_execs[0].gpuaddr;
+
+ /* Save FS instruction store mode */
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_FS_CTRL_REG0;
+ *cmd++ = drawctxt->cond_execs[1].gpuaddr;
+#else
+
+ /* Shader save */
+ cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG0, 0x7f000000,
+ 11+REG_TO_MEM_LOOP_COUNT_SHIFT,
+ (HLSQ_SHADOW_BASE + 0x1000) / 4,
+ drawctxt->shader_save_commands[2].gpuaddr);
+
+ /* CP_SCRATCH_REG2 = (CP_SCRATCH_REG2 & 0x00000000) | SP_FS_CTRL_REG0 */
+ *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+ *cmd++ = (1 << 30) | A3XX_CP_SCRATCH_REG2;
+ *cmd++ = 0x00000000; /* AND value */
+ *cmd++ = A3XX_SP_FS_CTRL_REG0; /* OR address */
+ /* CP_SCRATCH_REG2 = ( (CP_SCRATCH_REG2 & 0x7f000000) >> 21 )
+ | ((HLSQ_SHADOW_BASE+0x1000+SSIZE)/4) */
+ *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+ *cmd++ = ((11 + REG_TO_MEM_LOOP_COUNT_SHIFT) << 24) |
+ A3XX_CP_SCRATCH_REG2;
+ *cmd++ = 0x7f000000; /* AND value */
+ *cmd++ = (HLSQ_SHADOW_BASE + 0x1000 + SSIZE) / 4; /* OR value */
+
+ /*
+ * CP_SCRATCH_REG3 = (CP_SCRATCH_REG3 & 0x00000000) |
+ * SP_FS_OBJ_OFFSET_REG
+ */
+
+ *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+ *cmd++ = (1 << 30) | A3XX_CP_SCRATCH_REG3;
+ *cmd++ = 0x00000000; /* AND value */
+ *cmd++ = A3XX_SP_FS_OBJ_OFFSET_REG; /* OR address */
+ /*
+ * CP_SCRATCH_REG3 = ( (CP_SCRATCH_REG3 & 0xfe000000) >> 25 ) |
+ * 0x00000000
+ */
+ *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+ *cmd++ = A3XX_CP_SCRATCH_REG3;
+ *cmd++ = 0xfe000000; /* AND value */
+ *cmd++ = 0x00000000; /* OR value */
+ /*
+ * CP_SCRATCH_REG2 = (CP_SCRATCH_REG2 & 0xffffffff) | CP_SCRATCH_REG3
+ */
+ *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+ *cmd++ = (1 << 30) | A3XX_CP_SCRATCH_REG2;
+ *cmd++ = 0xffffffff; /* AND value */
+ *cmd++ = A3XX_CP_SCRATCH_REG3; /* OR address */
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_CP_SCRATCH_REG2;
+ *cmd++ = drawctxt->shader_save_commands[3].gpuaddr;
+
+ /* Constant save */
+ cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG1, 0x000003ff,
+ 2 + REG_TO_MEM_LOOP_COUNT_SHIFT,
+ (HLSQ_SHADOW_BASE + 0x2000) / 4,
+ drawctxt->constant_save_commands[1].gpuaddr);
+
+ cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG1, 0x000003ff,
+ 2 + REG_TO_MEM_LOOP_COUNT_SHIFT,
+ (HLSQ_SHADOW_BASE + 0x2000 + SSIZE) / 4,
+ drawctxt->constant_save_commands[2].gpuaddr);
+
+ cmd = rmw_regtomem(cmd, A3XX_SP_FS_OBJ_OFFSET_REG, 0x00ff0000,
+ 18, drawctxt->gpustate.gpuaddr & 0xfffffe00,
+ drawctxt->constant_save_commands[2].gpuaddr
+ + sizeof(unsigned int));
+
+ /* Modify constant save conditionals */
+ cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG1, 0x000003ff,
+ 0, 0, drawctxt->cond_execs[2].gpuaddr);
+
+ cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG1, 0x000003ff,
+ 0, 0, drawctxt->cond_execs[3].gpuaddr);
+
+ /* Save VS instruction store mode */
+
+ cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG0, 0x00000002,
+ 31, 0, drawctxt->cond_execs[0].gpuaddr);
+
+ /* Save FS instruction store mode */
+ cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG0, 0x00000002,
+ 31, 0, drawctxt->cond_execs[1].gpuaddr);
+
+#endif
+
+ create_ib1(drawctxt, drawctxt->save_fixup, start, cmd);
+
+ tmp_ctx.cmd = cmd;
+}
+
+/****************************************************************************/
+/* Functions to build context restore IBs */
+/****************************************************************************/
+
+static unsigned int *build_sys2gmem_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt,
+ struct gmem_shadow_t *shadow)
+{
+ unsigned int *cmds = tmp_ctx.cmd;
+ unsigned int *start = cmds;
+
+ *cmds++ = cp_type0_packet(A3XX_RBBM_CLOCK_CTL, 1);
+ *cmds++ = adreno_a3xx_rbbm_clock_ctl_default(adreno_dev);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG);
+ /* HLSQ_CONTROL_0_REG */
+ *cmds++ = _SET(HLSQ_CTRL0REG_FSTHREADSIZE, HLSQ_FOUR_PIX_QUADS) |
+ _SET(HLSQ_CTRL0REG_FSSUPERTHREADENABLE, 1) |
+ _SET(HLSQ_CTRL0REG_SPSHADERRESTART, 1) |
+ _SET(HLSQ_CTRL0REG_CHUNKDISABLE, 1) |
+ _SET(HLSQ_CTRL0REG_SPCONSTFULLUPDATE, 1);
+ /* HLSQ_CONTROL_1_REG */
+ *cmds++ = _SET(HLSQ_CTRL1REG_VSTHREADSIZE, HLSQ_TWO_VTX_QUADS) |
+ _SET(HLSQ_CTRL1REG_VSSUPERTHREADENABLE, 1);
+ /* HLSQ_CONTROL_2_REG */
+ *cmds++ = _SET(HLSQ_CTRL2REG_PRIMALLOCTHRESHOLD, 31);
+ /* HLSQ_CONTROL3_REG */
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_RB_MRT_BUF_INFO0);
+ /* RB_MRT_BUF_INFO0 */
+ *cmds++ = _SET(RB_MRTBUFINFO_COLOR_FORMAT, RB_R8G8B8A8_UNORM) |
+ _SET(RB_MRTBUFINFO_COLOR_TILE_MODE, RB_TILINGMODE_32X32) |
+ _SET(RB_MRTBUFINFO_COLOR_BUF_PITCH,
+ (shadow->gmem_pitch * 4 * 8) / 256);
+ /* RB_MRT_BUF_BASE0 */
+ *cmds++ = _SET(RB_MRTBUFBASE_COLOR_BUF_BASE, tmp_ctx.gmem_base >> 5);
+
+ /* Texture samplers */
+ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 4);
+ *cmds++ = (16 << CP_LOADSTATE_DSTOFFSET_SHIFT)
+ | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
+ | (HLSQ_BLOCK_ID_TP_TEX << CP_LOADSTATE_STATEBLOCKID_SHIFT)
+ | (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+ *cmds++ = (HLSQ_TP_TEX_SAMPLERS << CP_LOADSTATE_STATETYPE_SHIFT)
+ | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
+ *cmds++ = 0x00000240;
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
+ *cmds++ = 0x00000000;
+
+ /* Texture memobjs */
+ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 6);
+ *cmds++ = (16 << CP_LOADSTATE_DSTOFFSET_SHIFT)
+ | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
+ | (HLSQ_BLOCK_ID_TP_TEX << CP_LOADSTATE_STATEBLOCKID_SHIFT)
+ | (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+ *cmds++ = (HLSQ_TP_TEX_MEMOBJ << CP_LOADSTATE_STATETYPE_SHIFT)
+ | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
+ *cmds++ = 0x4cc06880;
+ *cmds++ = shadow->height | (shadow->width << 14);
+ *cmds++ = (shadow->pitch*4*8) << 9;
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
+ *cmds++ = 0x00000000;
+
+ /* Mipmap bases */
+ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 16);
+ *cmds++ = (224 << CP_LOADSTATE_DSTOFFSET_SHIFT)
+ | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
+ | (HLSQ_BLOCK_ID_TP_MIPMAP << CP_LOADSTATE_STATEBLOCKID_SHIFT)
+ | (14 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+ *cmds++ = (HLSQ_TP_MIPMAP_BASE << CP_LOADSTATE_STATETYPE_SHIFT)
+ | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
+ *cmds++ = shadow->gmemshadow.gpuaddr;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_HLSQ_VS_CONTROL_REG);
+ /* HLSQ_VS_CONTROL_REG */
+ *cmds++ = _SET(HLSQ_VSCTRLREG_VSINSTRLENGTH, 1);
+ /* HLSQ_FS_CONTROL_REG */
+ *cmds++ = _SET(HLSQ_FSCTRLREG_FSCONSTLENGTH, 1) |
+ _SET(HLSQ_FSCTRLREG_FSCONSTSTARTOFFSET, 128) |
+ _SET(HLSQ_FSCTRLREG_FSINSTRLENGTH, 2);
+ /* HLSQ_CONST_VSPRESV_RANGE_REG */
+ *cmds++ = 0x00000000;
+ /* HLSQ_CONST_FSPRESV_RANGE_REG */
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_SP_FS_LENGTH_REG);
+ /* SP_FS_LENGTH_REG */
+ *cmds++ = _SET(SP_SHADERLENGTH_LEN, 2);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 12);
+ *cmds++ = CP_REG(A3XX_SP_VS_CTRL_REG0);
+ /* SP_VS_CTRL_REG0 */
+ *cmds++ = _SET(SP_VSCTRLREG0_VSTHREADMODE, SP_MULTI) |
+ _SET(SP_VSCTRLREG0_VSINSTRBUFFERMODE, SP_BUFFER_MODE) |
+ _SET(SP_VSCTRLREG0_VSICACHEINVALID, 1) |
+ _SET(SP_VSCTRLREG0_VSFULLREGFOOTPRINT, 2) |
+ _SET(SP_VSCTRLREG0_VSTHREADSIZE, SP_TWO_VTX_QUADS) |
+ _SET(SP_VSCTRLREG0_VSLENGTH, 1);
+ /* SP_VS_CTRL_REG1 */
+ *cmds++ = _SET(SP_VSCTRLREG1_VSINITIALOUTSTANDING, 8);
+ /* SP_VS_PARAM_REG */
+ *cmds++ = _SET(SP_VSPARAMREG_POSREGID, 4) |
+ _SET(SP_VSPARAMREG_PSIZEREGID, 252) |
+ _SET(SP_VSPARAMREG_TOTALVSOUTVAR, 1);
+ /* SP_VS_OUT_REG0 */
+ *cmds++ = _SET(SP_VSOUTREG_COMPMASK0, 3);
+ /* SP_VS_OUT_REG1 */
+ *cmds++ = 0x00000000;
+ /* SP_VS_OUT_REG2 */
+ *cmds++ = 0x00000000;
+ /* SP_VS_OUT_REG3 */
+ *cmds++ = 0x00000000;
+ /* SP_VS_OUT_REG4 */
+ *cmds++ = 0x00000000;
+ /* SP_VS_OUT_REG5 */
+ *cmds++ = 0x00000000;
+ /* SP_VS_OUT_REG6 */
+ *cmds++ = 0x00000000;
+ /* SP_VS_OUT_REG7 */
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 7);
+ *cmds++ = CP_REG(A3XX_SP_VS_VPC_DST_REG_0);
+ /* SP_VS_VPC_DST_REG0 */
+ *cmds++ = _SET(SP_VSVPCDSTREG_OUTLOC0, 8);
+ /* SP_VS_VPC_DST_REG1 */
+ *cmds++ = 0x00000000;
+ /* SP_VS_VPC_DST_REG2 */
+ *cmds++ = 0x00000000;
+ /* SP_VS_VPC_DST_REG3 */
+ *cmds++ = 0x00000000;
+ /* SP_VS_OBJ_OFFSET_REG */
+ *cmds++ = 0x00000000;
+ /* SP_VS_OBJ_START_REG */
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 6);
+ *cmds++ = CP_REG(A3XX_SP_VS_LENGTH_REG);
+ /* SP_VS_LENGTH_REG */
+ *cmds++ = _SET(SP_SHADERLENGTH_LEN, 1);
+ /* SP_FS_CTRL_REG0 */
+ *cmds++ = _SET(SP_FSCTRLREG0_FSTHREADMODE, SP_MULTI) |
+ _SET(SP_FSCTRLREG0_FSINSTRBUFFERMODE, SP_BUFFER_MODE) |
+ _SET(SP_FSCTRLREG0_FSICACHEINVALID, 1) |
+ _SET(SP_FSCTRLREG0_FSHALFREGFOOTPRINT, 1) |
+ _SET(SP_FSCTRLREG0_FSFULLREGFOOTPRINT, 1) |
+ _SET(SP_FSCTRLREG0_FSINOUTREGOVERLAP, 1) |
+ _SET(SP_FSCTRLREG0_FSTHREADSIZE, SP_FOUR_PIX_QUADS) |
+ _SET(SP_FSCTRLREG0_FSSUPERTHREADMODE, 1) |
+ _SET(SP_FSCTRLREG0_PIXLODENABLE, 1) |
+ _SET(SP_FSCTRLREG0_FSLENGTH, 2);
+ /* SP_FS_CTRL_REG1 */
+ *cmds++ = _SET(SP_FSCTRLREG1_FSCONSTLENGTH, 1) |
+ _SET(SP_FSCTRLREG1_FSINITIALOUTSTANDING, 2) |
+ _SET(SP_FSCTRLREG1_HALFPRECVAROFFSET, 63);
+ /* SP_FS_OBJ_OFFSET_REG */
+ *cmds++ = _SET(SP_OBJOFFSETREG_CONSTOBJECTSTARTOFFSET, 128) |
+ _SET(SP_OBJOFFSETREG_SHADEROBJOFFSETINIC, 126);
+ /* SP_FS_OBJ_START_REG */
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_SP_FS_FLAT_SHAD_MODE_REG_0);
+ /* SP_FS_FLAT_SHAD_MODE_REG0 */
+ *cmds++ = 0x00000000;
+ /* SP_FS_FLAT_SHAD_MODE_REG1 */
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_SP_FS_OUTPUT_REG);
+ /* SP_FS_OUT_REG */
+ *cmds++ = _SET(SP_FSOUTREG_PAD0, SP_PIXEL_BASED);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_SP_FS_MRT_REG_0);
+ /* SP_FS_MRT_REG0 */
+ *cmds++ = _SET(SP_FSMRTREG_PRECISION, 1);
+ /* SP_FS_MRT_REG1 */
+ *cmds++ = 0;
+ /* SP_FS_MRT_REG2 */
+ *cmds++ = 0;
+ /* SP_FS_MRT_REG3 */
+ *cmds++ = 0;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 11);
+ *cmds++ = CP_REG(A3XX_VPC_ATTR);
+ /* VPC_ATTR */
+ *cmds++ = _SET(VPC_VPCATTR_TOTALATTR, 2) |
+ _SET(VPC_VPCATTR_THRHDASSIGN, 1) |
+ _SET(VPC_VPCATTR_LMSIZE, 1);
+ /* VPC_PACK */
+ *cmds++ = _SET(VPC_VPCPACK_NUMFPNONPOSVAR, 2) |
+ _SET(VPC_VPCPACK_NUMNONPOSVSVAR, 2);
+ /* VPC_VARYING_INTERP_MODE_0 */
+ *cmds++ = 0x00000000;
+ /* VPC_VARYING_INTERP_MODE1 */
+ *cmds++ = 0x00000000;
+ /* VPC_VARYING_INTERP_MODE2 */
+ *cmds++ = 0x00000000;
+ /* VPC_VARYING_IINTERP_MODE3 */
+ *cmds++ = 0x00000000;
+ /* VPC_VARRYING_PS_REPL_MODE_0 */
+ *cmds++ = _SET(VPC_VPCVARPSREPLMODE_COMPONENT08, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT09, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0A, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0B, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0C, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0D, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0E, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0F, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT10, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT11, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT12, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT13, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT14, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT15, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT16, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT17, 2);
+ /* VPC_VARRYING_PS_REPL_MODE_1 */
+ *cmds++ = _SET(VPC_VPCVARPSREPLMODE_COMPONENT08, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT09, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0A, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0B, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0C, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0D, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0E, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0F, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT10, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT11, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT12, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT13, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT14, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT15, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT16, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT17, 2);
+ /* VPC_VARRYING_PS_REPL_MODE_2 */
+ *cmds++ = _SET(VPC_VPCVARPSREPLMODE_COMPONENT08, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT09, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0A, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0B, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0C, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0D, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0E, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0F, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT10, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT11, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT12, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT13, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT14, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT15, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT16, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT17, 2);
+ /* VPC_VARRYING_PS_REPL_MODE_3 */
+ *cmds++ = _SET(VPC_VPCVARPSREPLMODE_COMPONENT08, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT09, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0A, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0B, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0C, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0D, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0E, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0F, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT10, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT11, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT12, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT13, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT14, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT15, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT16, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT17, 2);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_SP_SP_CTRL_REG);
+ /* SP_SP_CTRL_REG */
+ *cmds++ = _SET(SP_SPCTRLREG_SLEEPMODE, 1) |
+ _SET(SP_SPCTRLREG_LOMODE, 1);
+
+ /* Load vertex shader */
+ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 10);
+ *cmds++ = (0 << CP_LOADSTATE_DSTOFFSET_SHIFT)
+ | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
+ | (HLSQ_BLOCK_ID_SP_VS << CP_LOADSTATE_STATEBLOCKID_SHIFT)
+ | (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+ *cmds++ = (HLSQ_SP_VS_INSTR << CP_LOADSTATE_STATETYPE_SHIFT)
+ | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
+ /* (sy)end; */
+ *cmds++ = 0x00000000; *cmds++ = 0x13001000;
+ /* nop; */
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+ /* nop; */
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+ /* nop; */
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+
+
+ /* Load fragment shader */
+ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 18);
+ *cmds++ = (0 << CP_LOADSTATE_DSTOFFSET_SHIFT)
+ | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
+ | (HLSQ_BLOCK_ID_SP_FS << CP_LOADSTATE_STATEBLOCKID_SHIFT)
+ | (2 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+ *cmds++ = (HLSQ_SP_FS_INSTR << CP_LOADSTATE_STATETYPE_SHIFT)
+ | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
+ /* (sy)(rpt1)bary.f (ei)r0.z, (r)0, r0.x; */
+ *cmds++ = 0x00002000; *cmds++ = 0x57309902;
+ /* (rpt5)nop; */
+ *cmds++ = 0x00000000; *cmds++ = 0x00000500;
+ /* sam (f32)r0.xyzw, r0.z, s#0, t#0; */
+ *cmds++ = 0x00000005; *cmds++ = 0xa0c01f00;
+ /* (sy)mov.f32f32 r1.x, r0.x; */
+ *cmds++ = 0x00000000; *cmds++ = 0x30040b00;
+ /* mov.f32f32 r1.y, r0.y; */
+ *cmds++ = 0x00000000; *cmds++ = 0x03000000;
+ /* mov.f32f32 r1.z, r0.z; */
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+ /* mov.f32f32 r1.w, r0.w; */
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+ /* end; */
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_VFD_CONTROL_0);
+ /* VFD_CONTROL_0 */
+ *cmds++ = _SET(VFD_CTRLREG0_TOTALATTRTOVS, 8) |
+ _SET(VFD_CTRLREG0_PACKETSIZE, 2) |
+ _SET(VFD_CTRLREG0_STRMDECINSTRCNT, 2) |
+ _SET(VFD_CTRLREG0_STRMFETCHINSTRCNT, 2);
+ /* VFD_CONTROL_1 */
+ *cmds++ = _SET(VFD_CTRLREG1_MAXSTORAGE, 2) |
+ _SET(VFD_CTRLREG1_REGID4VTX, 252) |
+ _SET(VFD_CTRLREG1_REGID4INST, 252);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_VFD_FETCH_INSTR_0_0);
+ /* VFD_FETCH_INSTR_0_0 */
+ *cmds++ = _SET(VFD_FETCHINSTRUCTIONS_FETCHSIZE, 7) |
+ _SET(VFD_FETCHINSTRUCTIONS_BUFSTRIDE, 8) |
+ _SET(VFD_FETCHINSTRUCTIONS_SWITCHNEXT, 1) |
+ _SET(VFD_FETCHINSTRUCTIONS_STEPRATE, 1);
+ /* VFD_FETCH_INSTR_1_0 */
+ *cmds++ = _SET(VFD_BASEADDR_BASEADDR,
+ shadow->quad_vertices_restore.gpuaddr);
+ /* VFD_FETCH_INSTR_0_1 */
+ *cmds++ = _SET(VFD_FETCHINSTRUCTIONS_FETCHSIZE, 11) |
+ _SET(VFD_FETCHINSTRUCTIONS_BUFSTRIDE, 12) |
+ _SET(VFD_FETCHINSTRUCTIONS_INDEXDECODE, 1) |
+ _SET(VFD_FETCHINSTRUCTIONS_STEPRATE, 1);
+ /* VFD_FETCH_INSTR_1_1 */
+ *cmds++ = _SET(VFD_BASEADDR_BASEADDR,
+ shadow->quad_vertices_restore.gpuaddr + 16);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_VFD_DECODE_INSTR_0);
+ /* VFD_DECODE_INSTR_0 */
+ *cmds++ = _SET(VFD_DECODEINSTRUCTIONS_WRITEMASK, 0x0F) |
+ _SET(VFD_DECODEINSTRUCTIONS_CONSTFILL, 1) |
+ _SET(VFD_DECODEINSTRUCTIONS_FORMAT, 1) |
+ _SET(VFD_DECODEINSTRUCTIONS_SHIFTCNT, 8) |
+ _SET(VFD_DECODEINSTRUCTIONS_LASTCOMPVALID, 1) |
+ _SET(VFD_DECODEINSTRUCTIONS_SWITCHNEXT, 1);
+ /* VFD_DECODE_INSTR_1 */
+ *cmds++ = _SET(VFD_DECODEINSTRUCTIONS_WRITEMASK, 0x0F) |
+ _SET(VFD_DECODEINSTRUCTIONS_CONSTFILL, 1) |
+ _SET(VFD_DECODEINSTRUCTIONS_FORMAT, 2) |
+ _SET(VFD_DECODEINSTRUCTIONS_REGID, 4) |
+ _SET(VFD_DECODEINSTRUCTIONS_SHIFTCNT, 12) |
+ _SET(VFD_DECODEINSTRUCTIONS_LASTCOMPVALID, 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_DEPTH_CONTROL);
+ /* RB_DEPTH_CONTROL */
+ *cmds++ = _SET(RB_DEPTHCONTROL_Z_TEST_FUNC, RB_FRAG_LESS);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_STENCIL_CONTROL);
+ /* RB_STENCIL_CONTROL */
+ *cmds++ = _SET(RB_STENCILCONTROL_STENCIL_FUNC, RB_REF_ALWAYS) |
+ _SET(RB_STENCILCONTROL_STENCIL_FAIL, RB_STENCIL_KEEP) |
+ _SET(RB_STENCILCONTROL_STENCIL_ZPASS, RB_STENCIL_KEEP) |
+ _SET(RB_STENCILCONTROL_STENCIL_ZFAIL, RB_STENCIL_KEEP) |
+ _SET(RB_STENCILCONTROL_STENCIL_FUNC_BF, RB_REF_ALWAYS) |
+ _SET(RB_STENCILCONTROL_STENCIL_FAIL_BF, RB_STENCIL_KEEP) |
+ _SET(RB_STENCILCONTROL_STENCIL_ZPASS_BF, RB_STENCIL_KEEP) |
+ _SET(RB_STENCILCONTROL_STENCIL_ZFAIL_BF, RB_STENCIL_KEEP);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_MODE_CONTROL);
+ /* RB_MODE_CONTROL */
+ *cmds++ = _SET(RB_MODECONTROL_RENDER_MODE, RB_RENDERING_PASS) |
+ _SET(RB_MODECONTROL_MARB_CACHE_SPLIT_MODE, 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_RENDER_CONTROL);
+ /* RB_RENDER_CONTROL */
+ *cmds++ = _SET(RB_RENDERCONTROL_BIN_WIDTH, shadow->width >> 5) |
+ _SET(RB_RENDERCONTROL_ALPHA_TEST_FUNC, 7);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_MSAA_CONTROL);
+ /* RB_MSAA_CONTROL */
+ *cmds++ = _SET(RB_MSAACONTROL_MSAA_DISABLE, 1) |
+ _SET(RB_MSAACONTROL_SAMPLE_MASK, 0xFFFF);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_MRT_CONTROL0);
+ /* RB_MRT_CONTROL0 */
+ *cmds++ = _SET(RB_MRTCONTROL_ROP_CODE, 12) |
+ _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) |
+ _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL0);
+ /* RB_MRT_BLENDCONTROL0 */
+ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+ /* RB_MRT_CONTROL1 */
+ *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
+ _SET(RB_MRTCONTROL_ROP_CODE, 12) |
+ _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_ALWAYS) |
+ _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL1);
+ /* RB_MRT_BLENDCONTROL1 */
+ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+ /* RB_MRT_CONTROL2 */
+ *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
+ _SET(RB_MRTCONTROL_ROP_CODE, 12) |
+ _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_ALWAYS) |
+ _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL2);
+ /* RB_MRT_BLENDCONTROL2 */
+ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+ /* RB_MRT_CONTROL3 */
+ *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
+ _SET(RB_MRTCONTROL_ROP_CODE, 12) |
+ _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_ALWAYS) |
+ _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL3);
+ /* RB_MRT_BLENDCONTROL3 */
+ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_VFD_INDEX_MIN);
+ /* VFD_INDEX_MIN */
+ *cmds++ = 0x00000000;
+ /* VFD_INDEX_MAX */
+ *cmds++ = 340;
+ /* VFD_INDEX_OFFSET */
+ *cmds++ = 0x00000000;
+ /* TPL1_TP_VS_TEX_OFFSET */
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_VFD_VS_THREADING_THRESHOLD);
+ /* VFD_VS_THREADING_THRESHOLD */
+ *cmds++ = _SET(VFD_THREADINGTHRESHOLD_REGID_THRESHOLD, 15) |
+ _SET(VFD_THREADINGTHRESHOLD_REGID_VTXCNT, 252);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_TPL1_TP_VS_TEX_OFFSET);
+ /* TPL1_TP_VS_TEX_OFFSET */
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_TPL1_TP_FS_TEX_OFFSET);
+ /* TPL1_TP_FS_TEX_OFFSET */
+ *cmds++ = _SET(TPL1_TPTEXOFFSETREG_SAMPLEROFFSET, 16) |
+ _SET(TPL1_TPTEXOFFSETREG_MEMOBJOFFSET, 16) |
+ _SET(TPL1_TPTEXOFFSETREG_BASETABLEPTR, 224);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_GRAS_SC_CONTROL);
+ /* GRAS_SC_CONTROL */
+ /*cmds++ = _SET(GRAS_SC_CONTROL_RASTER_MODE, 1);
+ *cmds++ = _SET(GRAS_SC_CONTROL_RASTER_MODE, 1) |*/
+ *cmds++ = 0x04001000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_GRAS_SU_MODE_CONTROL);
+ /* GRAS_SU_MODE_CONTROL */
+ *cmds++ = _SET(GRAS_SU_CTRLMODE_LINEHALFWIDTH, 2);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_GRAS_SC_WINDOW_SCISSOR_TL);
+ /* GRAS_SC_WINDOW_SCISSOR_TL */
+ *cmds++ = 0x00000000;
+ /* GRAS_SC_WINDOW_SCISSOR_BR */
+ *cmds++ = _SET(GRAS_SC_WINDOW_SCISSOR_BR_BR_X, shadow->width - 1) |
+ _SET(GRAS_SC_WINDOW_SCISSOR_BR_BR_Y, shadow->height - 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_GRAS_SC_SCREEN_SCISSOR_TL);
+ /* GRAS_SC_SCREEN_SCISSOR_TL */
+ *cmds++ = 0x00000000;
+ /* GRAS_SC_SCREEN_SCISSOR_BR */
+ *cmds++ = _SET(GRAS_SC_SCREEN_SCISSOR_BR_BR_X, shadow->width - 1) |
+ _SET(GRAS_SC_SCREEN_SCISSOR_BR_BR_Y, shadow->height - 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_GRAS_CL_VPORT_XOFFSET);
+ /* GRAS_CL_VPORT_XOFFSET */
+ *cmds++ = 0x00000000;
+ /* GRAS_CL_VPORT_XSCALE */
+ *cmds++ = _SET(GRAS_CL_VPORT_XSCALE_VPORT_XSCALE, 0x3F800000);
+ /* GRAS_CL_VPORT_YOFFSET */
+ *cmds++ = 0x00000000;
+ /* GRAS_CL_VPORT_YSCALE */
+ *cmds++ = _SET(GRAS_CL_VPORT_YSCALE_VPORT_YSCALE, 0x3F800000);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_GRAS_CL_VPORT_ZOFFSET);
+ /* GRAS_CL_VPORT_ZOFFSET */
+ *cmds++ = 0x00000000;
+ /* GRAS_CL_VPORT_ZSCALE */
+ *cmds++ = _SET(GRAS_CL_VPORT_ZSCALE_VPORT_ZSCALE, 0x3F800000);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_GRAS_CL_CLIP_CNTL);
+ /* GRAS_CL_CLIP_CNTL */
+ *cmds++ = _SET(GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER, 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_SP_FS_IMAGE_OUTPUT_REG_0);
+ /* SP_FS_IMAGE_OUTPUT_REG_0 */
+ *cmds++ = _SET(SP_IMAGEOUTPUTREG_MRTFORMAT, SP_R8G8B8A8_UNORM);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_PC_PRIM_VTX_CNTL);
+ /* PC_PRIM_VTX_CONTROL */
+ *cmds++ = _SET(PC_PRIM_VTX_CONTROL_STRIDE_IN_VPC, 2) |
+ _SET(PC_PRIM_VTX_CONTROL_POLYMODE_FRONT_PTYPE,
+ PC_DRAW_TRIANGLES) |
+ _SET(PC_PRIM_VTX_CONTROL_POLYMODE_BACK_PTYPE,
+ PC_DRAW_TRIANGLES) |
+ _SET(PC_PRIM_VTX_CONTROL_PROVOKING_VTX_LAST, 1);
+
+
+ /* oxili_generate_context_roll_packets */
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG0, 1);
+ *cmds++ = 0x00000400;
+
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG0, 1);
+ *cmds++ = 0x00000400;
+
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_SIZE_REG, 1);
+ *cmds++ = 0x00008000; /* SP_VS_MEM_SIZE_REG */
+
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_SIZE_REG, 1);
+ *cmds++ = 0x00008000; /* SP_FS_MEM_SIZE_REG */
+
+ /* Clear cache invalidate bit when re-loading the shader control regs */
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG0, 1);
+ *cmds++ = _SET(SP_VSCTRLREG0_VSTHREADMODE, SP_MULTI) |
+ _SET(SP_VSCTRLREG0_VSINSTRBUFFERMODE, SP_BUFFER_MODE) |
+ _SET(SP_VSCTRLREG0_VSFULLREGFOOTPRINT, 2) |
+ _SET(SP_VSCTRLREG0_VSTHREADSIZE, SP_TWO_VTX_QUADS) |
+ _SET(SP_VSCTRLREG0_VSLENGTH, 1);
+
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG0, 1);
+ *cmds++ = _SET(SP_FSCTRLREG0_FSTHREADMODE, SP_MULTI) |
+ _SET(SP_FSCTRLREG0_FSINSTRBUFFERMODE, SP_BUFFER_MODE) |
+ _SET(SP_FSCTRLREG0_FSHALFREGFOOTPRINT, 1) |
+ _SET(SP_FSCTRLREG0_FSFULLREGFOOTPRINT, 1) |
+ _SET(SP_FSCTRLREG0_FSINOUTREGOVERLAP, 1) |
+ _SET(SP_FSCTRLREG0_FSTHREADSIZE, SP_FOUR_PIX_QUADS) |
+ _SET(SP_FSCTRLREG0_FSSUPERTHREADMODE, 1) |
+ _SET(SP_FSCTRLREG0_FSLENGTH, 2);
+
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_SIZE_REG, 1);
+ *cmds++ = 0x00000000; /* SP_VS_MEM_SIZE_REG */
+
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_SIZE_REG, 1);
+ *cmds++ = 0x00000000; /* SP_FS_MEM_SIZE_REG */
+
+ /* end oxili_generate_context_roll_packets */
+
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX, 3);
+ *cmds++ = 0x00000000; /* Viz query info */
+ *cmds++ = BUILD_PC_DRAW_INITIATOR(PC_DI_PT_RECTLIST,
+ PC_DI_SRC_SEL_AUTO_INDEX,
+ PC_DI_INDEX_SIZE_16_BIT,
+ PC_DI_IGNORE_VISIBILITY);
+ *cmds++ = 0x00000002; /* Num indices */
+
+ /* Create indirect buffer command for above command sequence */
+ create_ib1(drawctxt, shadow->gmem_restore, start, cmds);
+
+ return cmds;
+}
+
+
+static void build_regrestore_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ unsigned int *start = tmp_ctx.cmd;
+ unsigned int *cmd = start;
+ unsigned int *lcc_start;
+
+ int i;
+
+ /* Flush HLSQ lazy updates */
+ *cmd++ = cp_type3_packet(CP_EVENT_WRITE, 1);
+ *cmd++ = 0x7; /* HLSQ_FLUSH */
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmd++ = 0;
+
+ *cmd++ = cp_type0_packet(A3XX_UCHE_CACHE_INVALIDATE0_REG, 2);
+ *cmd++ = 0x00000000; /* No start addr for full invalidate */
+ *cmd++ = (unsigned int)
+ UCHE_ENTIRE_CACHE << UCHE_INVALIDATE1REG_ALLORPORTION |
+ UCHE_OP_INVALIDATE << UCHE_INVALIDATE1REG_OPCODE |
+ 0; /* No end addr for full invalidate */
+
+ lcc_start = cmd;
+
+ /* deferred cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, ???); */
+ cmd++;
+
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+ /* Force mismatch */
+ *cmd++ = ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) | 1;
+#else
+ *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
+#endif
+
+ for (i = 0; i < ARRAY_SIZE(context_register_ranges) / 2; i++) {
+ cmd = reg_range(cmd, context_register_ranges[i * 2],
+ context_register_ranges[i * 2 + 1]);
+ }
+
+ lcc_start[0] = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT,
+ (cmd - lcc_start) - 1);
+
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+ lcc_start[2] |= (0 << 24) | (4 << 16); /* Disable shadowing. */
+#else
+ lcc_start[2] |= (1 << 24) | (4 << 16);
+#endif
+
+ for (i = 0; i < ARRAY_SIZE(global_registers); i++) {
+ *cmd++ = cp_type0_packet(global_registers[i], 1);
+ tmp_ctx.reg_values[i] = virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0x00000000;
+ }
+
+ create_ib1(drawctxt, drawctxt->reg_restore, start, cmd);
+ tmp_ctx.cmd = cmd;
+}
+
+static void build_constantrestore_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ unsigned int *cmd = tmp_ctx.cmd;
+ unsigned int *start = cmd;
+ unsigned int mode = 4; /* Indirect mode */
+ unsigned int stateblock;
+ unsigned int numunits;
+ unsigned int statetype;
+
+ drawctxt->cond_execs[2].hostptr = cmd;
+ drawctxt->cond_execs[2].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0;
+ drawctxt->cond_execs[3].hostptr = cmd;
+ drawctxt->cond_execs[3].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0;
+
+#ifndef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
+ *cmd++ = 4 << 16;
+ *cmd++ = 0x0;
+#endif
+ /* HLSQ full update */
+ *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmd++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG);
+ *cmd++ = 0x68000240; /* A3XX_HLSQ_CONTROL_0_REG */
+
+#ifndef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+ /* Re-enable shadowing */
+ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
+ *cmd++ = (4 << 16) | (1 << 24);
+ *cmd++ = 0x0;
+#endif
+
+ /* Load vertex shader constants */
+ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+ *cmd++ = drawctxt->cond_execs[2].gpuaddr >> 2;
+ *cmd++ = drawctxt->cond_execs[2].gpuaddr >> 2;
+ *cmd++ = 0x0000ffff;
+ *cmd++ = 3; /* EXEC_COUNT */
+ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+ drawctxt->constant_load_commands[0].hostptr = cmd;
+ drawctxt->constant_load_commands[0].gpuaddr = virt2gpu(cmd,
+ &drawctxt->gpustate);
+
+ /*
+ From fixup:
+
+ mode = 4 (indirect)
+ stateblock = 4 (Vertex constants)
+ numunits = SP_VS_CTRL_REG1.VSCONSTLENGTH * 2; (256bit units)
+
+ From register spec:
+ SP_VS_CTRL_REG1.VSCONSTLENGTH [09:00]: 0-512, unit = 128bits.
+
+ ord1 = (numunits<<22) | (stateblock<<19) | (mode<<16);
+ */
+
+ *cmd++ = 0; /* ord1 */
+ *cmd++ = ((drawctxt->gpustate.gpuaddr) & 0xfffffffc) | 1;
+
+ /* Load fragment shader constants */
+ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+ *cmd++ = drawctxt->cond_execs[3].gpuaddr >> 2;
+ *cmd++ = drawctxt->cond_execs[3].gpuaddr >> 2;
+ *cmd++ = 0x0000ffff;
+ *cmd++ = 3; /* EXEC_COUNT */
+ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+ drawctxt->constant_load_commands[1].hostptr = cmd;
+ drawctxt->constant_load_commands[1].gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+ /*
+ From fixup:
+
+ mode = 4 (indirect)
+ stateblock = 6 (Fragment constants)
+ numunits = SP_FS_CTRL_REG1.FSCONSTLENGTH * 2; (256bit units)
+
+ From register spec:
+ SP_FS_CTRL_REG1.FSCONSTLENGTH [09:00]: 0-512, unit = 128bits.
+
+ ord1 = (numunits<<22) | (stateblock<<19) | (mode<<16);
+ */
+
+ *cmd++ = 0; /* ord1 */
+ drawctxt->constant_load_commands[2].hostptr = cmd;
+ drawctxt->constant_load_commands[2].gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+ /*
+ From fixup:
+ base = drawctxt->gpustate.gpuaddr (ALU constant shadow base)
+ offset = SP_FS_OBJ_OFFSET_REG.CONSTOBJECTSTARTOFFSET
+
+ From register spec:
+ SP_FS_OBJ_OFFSET_REG.CONSTOBJECTSTARTOFFSET [16:24]: Constant object
+ start offset in on chip RAM,
+ 128bit aligned
+
+ ord2 = base + offset | 1
+ Because of the base alignment we can use
+ ord2 = base | offset | 1
+ */
+ *cmd++ = 0; /* ord2 */
+
+ /* Restore VS texture memory objects */
+ stateblock = 0;
+ statetype = 1;
+ numunits = (TEX_SIZE_MEM_OBJECTS / 7) / 4;
+
+ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+ *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
+ *cmd++ = ((drawctxt->gpustate.gpuaddr + VS_TEX_OFFSET_MEM_OBJECTS)
+ & 0xfffffffc) | statetype;
+
+ /* Restore VS texture mipmap addresses */
+ stateblock = 1;
+ statetype = 1;
+ numunits = TEX_SIZE_MIPMAP / 4;
+ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+ *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
+ *cmd++ = ((drawctxt->gpustate.gpuaddr + VS_TEX_OFFSET_MIPMAP)
+ & 0xfffffffc) | statetype;
+
+ /* Restore VS texture sampler objects */
+ stateblock = 0;
+ statetype = 0;
+ numunits = (TEX_SIZE_SAMPLER_OBJ / 2) / 4;
+ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+ *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
+ *cmd++ = ((drawctxt->gpustate.gpuaddr + VS_TEX_OFFSET_SAMPLER_OBJ)
+ & 0xfffffffc) | statetype;
+
+ /* Restore FS texture memory objects */
+ stateblock = 2;
+ statetype = 1;
+ numunits = (TEX_SIZE_MEM_OBJECTS / 7) / 4;
+ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+ *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
+ *cmd++ = ((drawctxt->gpustate.gpuaddr + FS_TEX_OFFSET_MEM_OBJECTS)
+ & 0xfffffffc) | statetype;
+
+ /* Restore FS texture mipmap addresses */
+ stateblock = 3;
+ statetype = 1;
+ numunits = TEX_SIZE_MIPMAP / 4;
+ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+ *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
+ *cmd++ = ((drawctxt->gpustate.gpuaddr + FS_TEX_OFFSET_MIPMAP)
+ & 0xfffffffc) | statetype;
+
+ /* Restore FS texture sampler objects */
+ stateblock = 2;
+ statetype = 0;
+ numunits = (TEX_SIZE_SAMPLER_OBJ / 2) / 4;
+ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+ *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
+ *cmd++ = ((drawctxt->gpustate.gpuaddr + FS_TEX_OFFSET_SAMPLER_OBJ)
+ & 0xfffffffc) | statetype;
+
+ create_ib1(drawctxt, drawctxt->constant_restore, start, cmd);
+ tmp_ctx.cmd = cmd;
+}
+
+static void build_shader_restore_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ unsigned int *cmd = tmp_ctx.cmd;
+ unsigned int *start = cmd;
+
+ /* Vertex shader */
+ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+ *cmd++ = drawctxt->cond_execs[0].gpuaddr >> 2;
+ *cmd++ = drawctxt->cond_execs[0].gpuaddr >> 2;
+ *cmd++ = 1;
+ *cmd++ = 3; /* EXEC_COUNT */
+
+ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+ drawctxt->shader_load_commands[0].hostptr = cmd;
+ drawctxt->shader_load_commands[0].gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+ /*
+ From fixup:
+
+ mode = 4 (indirect)
+ stateblock = 4 (Vertex shader)
+ numunits = SP_VS_CTRL_REG0.VS_LENGTH
+
+ From regspec:
+ SP_VS_CTRL_REG0.VS_LENGTH [31:24]: VS length, unit = 256bits.
+ If bit31 is 1, it means overflow
+ or any long shader.
+
+ ord1 = (numunits<<22) | (stateblock<<19) | (mode<<11)
+ */
+ *cmd++ = 0; /*ord1 */
+ *cmd++ = (drawctxt->gpustate.gpuaddr + SHADER_OFFSET) & 0xfffffffc;
+
+ /* Fragment shader */
+ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+ *cmd++ = drawctxt->cond_execs[1].gpuaddr >> 2;
+ *cmd++ = drawctxt->cond_execs[1].gpuaddr >> 2;
+ *cmd++ = 1;
+ *cmd++ = 3; /* EXEC_COUNT */
+
+ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+ drawctxt->shader_load_commands[1].hostptr = cmd;
+ drawctxt->shader_load_commands[1].gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+ /*
+ From fixup:
+
+ mode = 4 (indirect)
+ stateblock = 6 (Fragment shader)
+ numunits = SP_FS_CTRL_REG0.FS_LENGTH
+
+ From regspec:
+ SP_FS_CTRL_REG0.FS_LENGTH [31:24]: FS length, unit = 256bits.
+ If bit31 is 1, it means overflow
+ or any long shader.
+
+ ord1 = (numunits<<22) | (stateblock<<19) | (mode<<11)
+ */
+ *cmd++ = 0; /*ord1 */
+ *cmd++ = (drawctxt->gpustate.gpuaddr + SHADER_OFFSET
+ + (SHADER_SHADOW_SIZE / 2)) & 0xfffffffc;
+
+ create_ib1(drawctxt, drawctxt->shader_restore, start, cmd);
+ tmp_ctx.cmd = cmd;
+}
+
+static void build_hlsqcontrol_restore_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ unsigned int *cmd = tmp_ctx.cmd;
+ unsigned int *start = cmd;
+
+ *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmd++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG);
+ drawctxt->hlsqcontrol_restore_commands[0].hostptr = cmd;
+ drawctxt->hlsqcontrol_restore_commands[0].gpuaddr
+ = virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0;
+
+ /* Create indirect buffer command for above command sequence */
+ create_ib1(drawctxt, drawctxt->hlsqcontrol_restore, start, cmd);
+
+ tmp_ctx.cmd = cmd;
+}
+
+/* IB that modifies the shader and constant sizes and offsets in restore IBs. */
+static void build_restore_fixup_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ unsigned int *cmd = tmp_ctx.cmd;
+ unsigned int *start = cmd;
+
+#ifdef GSL_CONTEXT_SWITCH_CPU_SYNC
+ /* Save shader sizes */
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_VS_CTRL_REG0;
+ *cmd++ = drawctxt->shader_load_commands[0].gpuaddr;
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_FS_CTRL_REG0;
+ *cmd++ = drawctxt->shader_load_commands[1].gpuaddr;
+
+ /* Save constant sizes */
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_VS_CTRL_REG1;
+ *cmd++ = drawctxt->constant_load_commands[0].gpuaddr;
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_FS_CTRL_REG1;
+ *cmd++ = drawctxt->constant_load_commands[1].gpuaddr;
+
+ /* Save constant offsets */
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_FS_OBJ_OFFSET_REG;
+ *cmd++ = drawctxt->constant_load_commands[2].gpuaddr;
+#else
+ /* Save shader sizes */
+ cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG0, 0x7f000000,
+ 30, (4 << 19) | (4 << 16),
+ drawctxt->shader_load_commands[0].gpuaddr);
+
+ cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG0, 0x7f000000,
+ 30, (6 << 19) | (4 << 16),
+ drawctxt->shader_load_commands[1].gpuaddr);
+
+ /* Save constant sizes */
+ cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG1, 0x000003ff,
+ 23, (4 << 19) | (4 << 16),
+ drawctxt->constant_load_commands[0].gpuaddr);
+
+ cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG1, 0x000003ff,
+ 23, (6 << 19) | (4 << 16),
+ drawctxt->constant_load_commands[1].gpuaddr);
+
+ /* Modify constant restore conditionals */
+ cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG1, 0x000003ff,
+ 0, 0, drawctxt->cond_execs[2].gpuaddr);
+
+ cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG1, 0x000003ff,
+ 0, 0, drawctxt->cond_execs[3].gpuaddr);
+
+ /* Save fragment constant shadow offset */
+ cmd = rmw_regtomem(cmd, A3XX_SP_FS_OBJ_OFFSET_REG, 0x00ff0000,
+ 18, (drawctxt->gpustate.gpuaddr & 0xfffffe00) | 1,
+ drawctxt->constant_load_commands[2].gpuaddr);
+#endif
+
+ /* Use mask value to avoid flushing HLSQ which would cause the HW to
+ discard all the shader data */
+
+ cmd = rmw_regtomem(cmd, A3XX_HLSQ_CONTROL_0_REG, 0x9ffffdff,
+ 0, 0, drawctxt->hlsqcontrol_restore_commands[0].gpuaddr);
+
+ create_ib1(drawctxt, drawctxt->restore_fixup, start, cmd);
+
+ tmp_ctx.cmd = cmd;
+}
+
+static int a3xx_create_gpustate_shadow(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ drawctxt->flags |= CTXT_FLAGS_STATE_SHADOW;
+
+ build_regrestore_cmds(adreno_dev, drawctxt);
+ build_constantrestore_cmds(adreno_dev, drawctxt);
+ build_hlsqcontrol_restore_cmds(adreno_dev, drawctxt);
+ build_regconstantsave_cmds(adreno_dev, drawctxt);
+ build_shader_save_cmds(adreno_dev, drawctxt);
+ build_shader_restore_cmds(adreno_dev, drawctxt);
+ build_restore_fixup_cmds(adreno_dev, drawctxt);
+ build_save_fixup_cmds(adreno_dev, drawctxt);
+
+ return 0;
+}
+
+/* create buffers for saving/restoring registers, constants, & GMEM */
+static int a3xx_create_gmem_shadow(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ int result;
+
+ calc_gmemsize(&drawctxt->context_gmem_shadow, adreno_dev->gmem_size);
+ tmp_ctx.gmem_base = adreno_dev->gmem_base;
+
+ result = kgsl_allocate(&drawctxt->context_gmem_shadow.gmemshadow,
+ drawctxt->base.proc_priv->pagetable,
+ drawctxt->context_gmem_shadow.size);
+
+ if (result)
+ return result;
+
+ build_quad_vtxbuff(drawctxt, &drawctxt->context_gmem_shadow,
+ &tmp_ctx.cmd);
+
+ tmp_ctx.cmd = build_gmem2sys_cmds(adreno_dev, drawctxt,
+ &drawctxt->context_gmem_shadow);
+ tmp_ctx.cmd = build_sys2gmem_cmds(adreno_dev, drawctxt,
+ &drawctxt->context_gmem_shadow);
+
+ kgsl_cache_range_op(&drawctxt->context_gmem_shadow.gmemshadow,
+ KGSL_CACHE_OP_FLUSH);
+
+ drawctxt->flags |= CTXT_FLAGS_GMEM_SHADOW;
+
+ return 0;
+}
+
+static int a3xx_drawctxt_create(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ int ret;
+
+ /*
+ * Allocate memory for the GPU state and the context commands.
+ * Despite the name, this is much more then just storage for
+ * the gpustate. This contains command space for gmem save
+ * and texture and vertex buffer storage too
+ */
+
+ ret = kgsl_allocate(&drawctxt->gpustate,
+ drawctxt->base.proc_priv->pagetable, CONTEXT_SIZE);
+
+ if (ret)
+ return ret;
+
+ kgsl_sharedmem_set(&adreno_dev->dev, &drawctxt->gpustate, 0, 0,
+ CONTEXT_SIZE);
+ tmp_ctx.cmd = drawctxt->gpustate.hostptr + CMD_OFFSET;
+
+ if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) {
+ ret = a3xx_create_gpustate_shadow(adreno_dev, drawctxt);
+ if (ret)
+ goto done;
+
+ drawctxt->flags |= CTXT_FLAGS_SHADER_SAVE;
+ }
+
+ if (!(drawctxt->flags & CTXT_FLAGS_NOGMEMALLOC))
+ ret = a3xx_create_gmem_shadow(adreno_dev, drawctxt);
+
+done:
+ if (ret)
+ kgsl_sharedmem_free(&drawctxt->gpustate);
+
+ return ret;
+}
+
+static int a3xx_drawctxt_save(struct adreno_device *adreno_dev,
+ struct adreno_context *context)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ int ret;
+
+ if (context == NULL || (context->flags & CTXT_FLAGS_BEING_DESTROYED))
+ return 0;
+
+ if (context->state == ADRENO_CONTEXT_STATE_INVALID)
+ return 0;
+
+ if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
+ /* Fixup self modifying IBs for save operations */
+ ret = adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE, context->save_fixup, 3);
+ if (ret)
+ return ret;
+
+ /* save registers and constants. */
+ ret = adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
+ context->regconstant_save, 3);
+ if (ret)
+ return ret;
+
+ if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
+ /* Save shader instructions */
+ ret = adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_PMODE, context->shader_save, 3);
+ if (ret)
+ return ret;
+
+ context->flags |= CTXT_FLAGS_SHADER_RESTORE;
+ }
+ }
+
+ if ((context->flags & CTXT_FLAGS_GMEM_SAVE) &&
+ (context->flags & CTXT_FLAGS_GMEM_SHADOW)) {
+ /*
+ * Save GMEM (note: changes shader. shader must
+ * already be saved.)
+ */
+
+ kgsl_cffdump_syncmem(context->base.device,
+ &context->gpustate,
+ context->context_gmem_shadow.gmem_save[1],
+ context->context_gmem_shadow.gmem_save[2] << 2, true);
+
+ ret = adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_PMODE,
+ context->context_gmem_shadow.
+ gmem_save, 3);
+ if (ret)
+ return ret;
+
+ context->flags |= CTXT_FLAGS_GMEM_RESTORE;
+ }
+
+ return 0;
+}
+
+static int a3xx_drawctxt_restore(struct adreno_device *adreno_dev,
+ struct adreno_context *context)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ unsigned int cmds[5];
+ int ret = 0;
+
+ if (context == NULL) {
+ /* No context - set the default pagetable and thats it */
+ unsigned int id;
+ /*
+ * If there isn't a current context, the kgsl_mmu_setstate
+ * will use the CPU path so we don't need to give
+ * it a valid context id.
+ */
+ id = (adreno_dev->drawctxt_active != NULL)
+ ? adreno_dev->drawctxt_active->base.id
+ : KGSL_CONTEXT_INVALID;
+ kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
+ id);
+ return 0;
+ }
+
+ cmds[0] = cp_nop_packet(1);
+ cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
+ cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2);
+ cmds[3] = device->memstore.gpuaddr +
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
+ cmds[4] = context->base.id;
+ ret = adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
+ cmds, 5);
+ if (ret)
+ return ret;
+
+ kgsl_mmu_setstate(&device->mmu, context->base.proc_priv->pagetable,
+ context->base.id);
+
+ /*
+ * Restore GMEM. (note: changes shader.
+ * Shader must not already be restored.)
+ */
+
+ if (context->flags & CTXT_FLAGS_GMEM_RESTORE) {
+ kgsl_cffdump_syncmem(context->base.device,
+ &context->gpustate,
+ context->context_gmem_shadow.gmem_restore[1],
+ context->context_gmem_shadow.gmem_restore[2] << 2,
+ true);
+
+ ret = adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_PMODE,
+ context->context_gmem_shadow.
+ gmem_restore, 3);
+ if (ret)
+ return ret;
+ context->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
+ }
+
+ if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
+ ret = adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);
+ if (ret)
+ return ret;
+
+ /* Fixup self modifying IBs for restore operations */
+ ret = adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
+ context->restore_fixup, 3);
+ if (ret)
+ return ret;
+
+ ret = adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
+ context->constant_restore, 3);
+ if (ret)
+ return ret;
+
+ if (context->flags & CTXT_FLAGS_SHADER_RESTORE)
+ ret = adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
+ context->shader_restore, 3);
+ if (ret)
+ return ret;
+ /* Restore HLSQ_CONTROL_0 register */
+ ret = adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
+ context->hlsqcontrol_restore, 3);
+ }
+
+ return ret;
+}
+
+static const unsigned int _a3xx_pwron_fixup_fs_instructions[] = {
+ 0x00000000, 0x302CC300, 0x00000000, 0x302CC304,
+ 0x00000000, 0x302CC308, 0x00000000, 0x302CC30C,
+ 0x00000000, 0x302CC310, 0x00000000, 0x302CC314,
+ 0x00000000, 0x302CC318, 0x00000000, 0x302CC31C,
+ 0x00000000, 0x302CC320, 0x00000000, 0x302CC324,
+ 0x00000000, 0x302CC328, 0x00000000, 0x302CC32C,
+ 0x00000000, 0x302CC330, 0x00000000, 0x302CC334,
+ 0x00000000, 0x302CC338, 0x00000000, 0x302CC33C,
+ 0x00000000, 0x00000400, 0x00020000, 0x63808003,
+ 0x00060004, 0x63828007, 0x000A0008, 0x6384800B,
+ 0x000E000C, 0x6386800F, 0x00120010, 0x63888013,
+ 0x00160014, 0x638A8017, 0x001A0018, 0x638C801B,
+ 0x001E001C, 0x638E801F, 0x00220020, 0x63908023,
+ 0x00260024, 0x63928027, 0x002A0028, 0x6394802B,
+ 0x002E002C, 0x6396802F, 0x00320030, 0x63988033,
+ 0x00360034, 0x639A8037, 0x003A0038, 0x639C803B,
+ 0x003E003C, 0x639E803F, 0x00000000, 0x00000400,
+ 0x00000003, 0x80D60003, 0x00000007, 0x80D60007,
+ 0x0000000B, 0x80D6000B, 0x0000000F, 0x80D6000F,
+ 0x00000013, 0x80D60013, 0x00000017, 0x80D60017,
+ 0x0000001B, 0x80D6001B, 0x0000001F, 0x80D6001F,
+ 0x00000023, 0x80D60023, 0x00000027, 0x80D60027,
+ 0x0000002B, 0x80D6002B, 0x0000002F, 0x80D6002F,
+ 0x00000033, 0x80D60033, 0x00000037, 0x80D60037,
+ 0x0000003B, 0x80D6003B, 0x0000003F, 0x80D6003F,
+ 0x00000000, 0x03000000, 0x00000000, 0x00000000,
+};
+
+/**
+ * adreno_a3xx_pwron_fixup_init() - Initalize a special command buffer to run a
+ * post-power collapse shader workaround
+ * @adreno_dev: Pointer to a adreno_device struct
+ *
+ * Some targets require a special workaround shader to be executed after
+ * power-collapse. Construct the IB once at init time and keep it
+ * handy
+ *
+ * Returns: 0 on success or negative on error
+ */
+int adreno_a3xx_pwron_fixup_init(struct adreno_device *adreno_dev)
+{
+ unsigned int *cmds;
+ int count = ARRAY_SIZE(_a3xx_pwron_fixup_fs_instructions);
+ int ret;
+
+ /* Return if the fixup is already in place */
+ if (test_bit(ADRENO_DEVICE_PWRON_FIXUP, &adreno_dev->priv))
+ return 0;
+
+ ret = kgsl_allocate_contiguous(&adreno_dev->pwron_fixup, PAGE_SIZE);
+
+ if (ret)
+ return ret;
+
+ adreno_dev->pwron_fixup.flags |= KGSL_MEMFLAGS_GPUREADONLY;
+
+ cmds = adreno_dev->pwron_fixup.hostptr;
+
+ *cmds++ = cp_type0_packet(A3XX_UCHE_CACHE_INVALIDATE0_REG, 2);
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x90000000;
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type3_packet(CP_REG_RMW, 3);
+ *cmds++ = A3XX_RBBM_CLOCK_CTL;
+ *cmds++ = 0xFFFCFFFF;
+ *cmds++ = 0x00010000;
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CONTROL_0_REG, 1);
+ *cmds++ = 0x1E000150;
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG);
+ *cmds++ = 0x1E000150;
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CONTROL_0_REG, 1);
+ *cmds++ = 0x1E000150;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CONTROL_1_REG, 1);
+ *cmds++ = 0x00000040;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CONTROL_2_REG, 1);
+ *cmds++ = 0x80000000;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CONTROL_3_REG, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_VS_CONTROL_REG, 1);
+ *cmds++ = 0x00000001;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_FS_CONTROL_REG, 1);
+ *cmds++ = 0x0D001002;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CONST_VSPRESV_RANGE_REG, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CONST_FSPRESV_RANGE_REG, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_NDRANGE_0_REG, 1);
+ *cmds++ = 0x00401101;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_NDRANGE_1_REG, 1);
+ *cmds++ = 0x00000400;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_NDRANGE_2_REG, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_NDRANGE_3_REG, 1);
+ *cmds++ = 0x00000001;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_NDRANGE_4_REG, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_NDRANGE_5_REG, 1);
+ *cmds++ = 0x00000001;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_NDRANGE_6_REG, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_CONTROL_0_REG, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_CONTROL_1_REG, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_KERNEL_CONST_REG, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_KERNEL_GROUP_X_REG, 1);
+ *cmds++ = 0x00000010;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG, 1);
+ *cmds++ = 0x00000001;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG, 1);
+ *cmds++ = 0x00000001;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_WG_OFFSET_REG, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_SP_CTRL_REG, 1);
+ *cmds++ = 0x00040000;
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG0, 1);
+ *cmds++ = 0x0000000A;
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG1, 1);
+ *cmds++ = 0x00000001;
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_PARAM_REG, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_0, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_1, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_2, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_3, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_4, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_5, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_6, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_OUT_REG_7, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_VPC_DST_REG_0, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_VPC_DST_REG_1, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_VPC_DST_REG_2, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_VPC_DST_REG_3, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_OBJ_OFFSET_REG, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_OBJ_START_REG, 1);
+ *cmds++ = 0x00000004;
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_PARAM_REG, 1);
+ *cmds++ = 0x04008001;
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_ADDR_REG, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_SIZE_REG, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_LENGTH_REG, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG0, 1);
+ *cmds++ = 0x0DB0400A;
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG1, 1);
+ *cmds++ = 0x00300402;
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_OBJ_OFFSET_REG, 1);
+ *cmds++ = 0x00010000;
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_OBJ_START_REG, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_PARAM_REG, 1);
+ *cmds++ = 0x04008001;
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_ADDR_REG, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_SIZE_REG, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_FLAT_SHAD_MODE_REG_0, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_FLAT_SHAD_MODE_REG_1, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_OUTPUT_REG, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_MRT_REG_0, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_MRT_REG_1, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_MRT_REG_2, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_MRT_REG_3, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_IMAGE_OUTPUT_REG_0, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_IMAGE_OUTPUT_REG_1, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_IMAGE_OUTPUT_REG_2, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_IMAGE_OUTPUT_REG_3, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_LENGTH_REG, 1);
+ *cmds++ = 0x0000000D;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_CLIP_CNTL, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_GB_CLIP_ADJ, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_VPORT_XOFFSET, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_VPORT_XSCALE, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_VPORT_YOFFSET, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_VPORT_YSCALE, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_VPORT_ZOFFSET, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_VPORT_ZSCALE, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_X0, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Y0, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Z0, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_W0, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_X1, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Y1, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Z1, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_W1, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_X2, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Y2, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Z2, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_W2, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_X3, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Y3, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Z3, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_W3, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_X4, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Y4, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Z4, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_W4, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_X5, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Y5, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_Z5, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_CL_USER_PLANE_W5, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_SU_POINT_MINMAX, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_SU_POINT_SIZE, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_SU_POLY_OFFSET_OFFSET, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_SU_POLY_OFFSET_SCALE, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_SU_MODE_CONTROL, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_SC_CONTROL, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_SC_SCREEN_SCISSOR_TL, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_SC_SCREEN_SCISSOR_BR, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_SC_WINDOW_SCISSOR_BR, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_SC_WINDOW_SCISSOR_TL, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_TSE_DEBUG_ECO, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_PERFCOUNTER0_SELECT, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_PERFCOUNTER1_SELECT, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_PERFCOUNTER2_SELECT, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_GRAS_PERFCOUNTER3_SELECT, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_MODE_CONTROL, 1);
+ *cmds++ = 0x00008000;
+ *cmds++ = cp_type0_packet(A3XX_RB_RENDER_CONTROL, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_MSAA_CONTROL, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_ALPHA_REFERENCE, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_MRT_CONTROL0, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_MRT_CONTROL1, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_MRT_CONTROL2, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_MRT_CONTROL3, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_INFO0, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_INFO1, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_INFO2, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_INFO3, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_BASE0, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_BASE1, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_BASE2, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_MRT_BUF_BASE3, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_MRT_BLEND_CONTROL0, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_MRT_BLEND_CONTROL1, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_MRT_BLEND_CONTROL2, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_MRT_BLEND_CONTROL3, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_BLEND_RED, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_BLEND_GREEN, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_BLEND_BLUE, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_BLEND_ALPHA, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_CLEAR_COLOR_DW0, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_CLEAR_COLOR_DW1, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_CLEAR_COLOR_DW2, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_CLEAR_COLOR_DW3, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_COPY_CONTROL, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_COPY_DEST_BASE, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_COPY_DEST_PITCH, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_COPY_DEST_INFO, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_DEPTH_CONTROL, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_DEPTH_CLEAR, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_DEPTH_BUF_INFO, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_DEPTH_BUF_PITCH, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_STENCIL_CONTROL, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_STENCIL_CLEAR, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_STENCIL_BUF_INFO, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_STENCIL_BUF_PITCH, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_STENCIL_REF_MASK, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_STENCIL_REF_MASK_BF, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_LRZ_VSC_CONTROL, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_WINDOW_OFFSET, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_SAMPLE_COUNT_CONTROL, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_SAMPLE_COUNT_ADDR, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_Z_CLAMP_MIN, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_Z_CLAMP_MAX, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_GMEM_BASE_ADDR, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_DEBUG_ECO_CONTROLS_ADDR, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_PERFCOUNTER0_SELECT, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_PERFCOUNTER1_SELECT, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_RB_FRAME_BUFFER_DIMENSION, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 4);
+ *cmds++ = (1 << CP_LOADSTATE_DSTOFFSET_SHIFT) |
+ (0 << CP_LOADSTATE_STATESRC_SHIFT) |
+ (6 << CP_LOADSTATE_STATEBLOCKID_SHIFT) |
+ (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+ *cmds++ = (1 << CP_LOADSTATE_STATETYPE_SHIFT) |
+ (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
+ *cmds++ = 0x00400000;
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 4);
+ *cmds++ = (2 << CP_LOADSTATE_DSTOFFSET_SHIFT) |
+ (6 << CP_LOADSTATE_STATEBLOCKID_SHIFT) |
+ (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+ *cmds++ = (1 << CP_LOADSTATE_STATETYPE_SHIFT);
+ *cmds++ = 0x00400220;
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 4);
+ *cmds++ = (6 << CP_LOADSTATE_STATEBLOCKID_SHIFT) |
+ (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+ *cmds++ = (1 << CP_LOADSTATE_STATETYPE_SHIFT);
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 2 + count);
+ *cmds++ = (6 << CP_LOADSTATE_STATEBLOCKID_SHIFT) |
+ (13 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+ *cmds++ = 0x00000000;
+
+ memcpy(cmds, _a3xx_pwron_fixup_fs_instructions, count << 2);
+
+ cmds += count;
+
+ *cmds++ = cp_type3_packet(CP_EXEC_CL, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CL_CONTROL_0_REG, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type0_packet(A3XX_HLSQ_CONTROL_0_REG, 1);
+ *cmds++ = 0x1E000150;
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG);
+ *cmds++ = 0x1E000050;
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type3_packet(CP_REG_RMW, 3);
+ *cmds++ = A3XX_RBBM_CLOCK_CTL;
+ *cmds++ = 0xFFFCFFFF;
+ *cmds++ = 0x00000000;
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+
+ /*
+ * Remember the number of dwords in the command buffer for when we
+ * program the indirect buffer call in the ringbuffer
+ */
+ adreno_dev->pwron_fixup_dwords =
+ (cmds - (unsigned int *) adreno_dev->pwron_fixup.hostptr);
+
+ /* Mark the flag in ->priv to show that we have the fix */
+ set_bit(ADRENO_DEVICE_PWRON_FIXUP, &adreno_dev->priv);
+ return 0;
+}
+
+/*
+ * a3xx_rb_init() - Initialize ringbuffer
+ * @adreno_dev: Pointer to adreno device
+ * @rb: Pointer to the ringbuffer of device
+ *
+ * Submit commands for ME initialization, common function shared between
+ * a3xx and a4xx devices
+ */
+int a3xx_rb_init(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb)
+{
+ unsigned int *cmds, cmds_gpu;
+ cmds = adreno_ringbuffer_allocspace(rb, NULL, 18);
+ if (cmds == NULL)
+ return -ENOMEM;
+
+ cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint) * (rb->wptr - 18);
+
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu,
+ cp_type3_packet(CP_ME_INIT, 17));
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x000003f7);
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x00000000);
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x00000000);
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x00000000);
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x00000080);
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x00000100);
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x00000180);
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x00006600);
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x00000150);
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x0000014e);
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x00000154);
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x00000001);
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x00000000);
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x00000000);
+ /* Protected mode control - turned off for A3XX/A4XX */
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x00000000);
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x00000000);
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, 0x00000000);
+
+ adreno_ringbuffer_submit(rb);
+
+ return 0;
+}
+
+static void a3xx_err_callback(struct adreno_device *adreno_dev, int bit)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ const char *err = "";
+
+ switch (bit) {
+ case A3XX_INT_RBBM_AHB_ERROR: {
+ unsigned int reg;
+
+ kgsl_regread(device, A3XX_RBBM_AHB_ERROR_STATUS, ®);
+
+ /*
+ * Return the word address of the erroring register so that it
+ * matches the register specification
+ */
+
+ KGSL_DRV_CRIT(device,
+ "RBBM | AHB bus error | %s | addr=%x | ports=%x:%x\n",
+ reg & (1 << 28) ? "WRITE" : "READ",
+ (reg & 0xFFFFF) >> 2, (reg >> 20) & 0x3,
+ (reg >> 24) & 0x3);
+
+ /* Clear the error */
+ kgsl_regwrite(device, A3XX_RBBM_AHB_CMD, (1 << 3));
+ goto done;
+ }
+ case A3XX_INT_RBBM_REG_TIMEOUT:
+ err = "RBBM: AHB register timeout";
+ break;
+ case A3XX_INT_RBBM_ME_MS_TIMEOUT:
+ err = "RBBM: ME master split timeout";
+ break;
+ case A3XX_INT_RBBM_PFP_MS_TIMEOUT:
+ err = "RBBM: PFP master split timeout";
+ break;
+ case A3XX_INT_RBBM_ATB_BUS_OVERFLOW:
+ err = "RBBM: ATB bus oveflow";
+ break;
+ case A3XX_INT_VFD_ERROR:
+ err = "VFD: Out of bounds access";
+ break;
+ case A3XX_INT_CP_T0_PACKET_IN_IB:
+ err = "ringbuffer TO packet in IB interrupt";
+ break;
+ case A3XX_INT_CP_OPCODE_ERROR:
+ err = "ringbuffer opcode error interrupt";
+ break;
+ case A3XX_INT_CP_RESERVED_BIT_ERROR:
+ err = "ringbuffer reserved bit error interrupt";
+ break;
+ case A3XX_INT_CP_HW_FAULT:
+ err = "ringbuffer hardware fault";
+ break;
+ case A3XX_INT_CP_REG_PROTECT_FAULT:
+ err = "ringbuffer protected mode error interrupt";
+ break;
+ case A3XX_INT_CP_AHB_ERROR_HALT:
+ err = "ringbuffer AHB error interrupt";
+ break;
+ case A3XX_INT_MISC_HANG_DETECT:
+ err = "MISC: GPU hang detected";
+ break;
+ case A3XX_INT_UCHE_OOB_ACCESS:
+ err = "UCHE: Out of bounds access";
+ break;
+ default:
+ return;
+ }
+ KGSL_DRV_CRIT(device, "%s\n", err);
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+
+done:
+ /* Trigger a fault in the dispatcher - this will effect a restart */
+ adreno_dispatcher_irq_fault(device);
+}
+
+/*
+ * a3xx_cp_callback() - CP interrupt handler
+ * @adreno_dev: Adreno device pointer
+ * @irq: irq number
+ *
+ * Handle the cp interrupt generated by GPU, common function between a3xx and
+ * a4xx devices
+ */
+static void a3xx_cp_callback(struct adreno_device *adreno_dev, int irq)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ device->pwrctrl.irq_last = 1;
+ queue_work(device->work_queue, &device->ts_expired_ws);
+
+ adreno_dispatcher_schedule(device);
+}
+
+
+static int a3xx_perfcounter_enable_pwr(struct kgsl_device *device,
+ unsigned int counter)
+{
+ unsigned int in, out;
+
+ if (counter > 1)
+ return -EINVAL;
+
+ kgsl_regread(device, A3XX_RBBM_RBBM_CTL, &in);
+
+ if (counter == 0)
+ out = in | RBBM_RBBM_CTL_RESET_PWR_CTR0;
+ else
+ out = in | RBBM_RBBM_CTL_RESET_PWR_CTR1;
+
+ kgsl_regwrite(device, A3XX_RBBM_RBBM_CTL, out);
+
+ if (counter == 0)
+ out = in | RBBM_RBBM_CTL_ENABLE_PWR_CTR0;
+ else
+ out = in | RBBM_RBBM_CTL_ENABLE_PWR_CTR1;
+
+ kgsl_regwrite(device, A3XX_RBBM_RBBM_CTL, out);
+
+ return 0;
+}
+
+static int a3xx_perfcounter_enable_vbif(struct kgsl_device *device,
+ unsigned int counter,
+ unsigned int countable)
+{
+ unsigned int in, out, bit, sel;
+
+ if (counter > 1 || countable > 0x7f)
+ return -EINVAL;
+
+ kgsl_regread(device, A3XX_VBIF_PERF_CNT_EN, &in);
+ kgsl_regread(device, A3XX_VBIF_PERF_CNT_SEL, &sel);
+
+ if (counter == 0) {
+ bit = VBIF_PERF_CNT_0;
+ sel = (sel & ~VBIF_PERF_CNT_0_SEL_MASK) | countable;
+ } else {
+ bit = VBIF_PERF_CNT_1;
+ sel = (sel & ~VBIF_PERF_CNT_1_SEL_MASK)
+ | (countable << VBIF_PERF_CNT_1_SEL);
+ }
+
+ out = in | bit;
+
+ kgsl_regwrite(device, A3XX_VBIF_PERF_CNT_SEL, sel);
+
+ kgsl_regwrite(device, A3XX_VBIF_PERF_CNT_CLR, bit);
+ kgsl_regwrite(device, A3XX_VBIF_PERF_CNT_CLR, 0);
+
+ kgsl_regwrite(device, A3XX_VBIF_PERF_CNT_EN, out);
+ return 0;
+}
+
+static int a3xx_perfcounter_enable_vbif_pwr(struct kgsl_device *device,
+ unsigned int counter)
+{
+ unsigned int in, out, bit;
+
+ if (counter > 2)
+ return -EINVAL;
+
+ kgsl_regread(device, A3XX_VBIF_PERF_CNT_EN, &in);
+ if (counter == 0)
+ bit = VBIF_PERF_PWR_CNT_0;
+ else if (counter == 1)
+ bit = VBIF_PERF_PWR_CNT_1;
+ else
+ bit = VBIF_PERF_PWR_CNT_2;
+
+ out = in | bit;
+
+ kgsl_regwrite(device, A3XX_VBIF_PERF_CNT_CLR, bit);
+ kgsl_regwrite(device, A3XX_VBIF_PERF_CNT_CLR, 0);
+
+ kgsl_regwrite(device, A3XX_VBIF_PERF_CNT_EN, out);
+ return 0;
+}
+
+/*
+ * a3xx_perfcounter_enable - Configure a performance counter for a countable
+ * @adreno_dev - Adreno device to configure
+ * @group - Desired performance counter group
+ * @counter - Desired performance counter in the group
+ * @countable - Desired countable
+ *
+ * Physically set up a counter within a group with the desired countable
+ * Return 0 on success else error code
+ */
+
+static int a3xx_perfcounter_enable(struct adreno_device *adreno_dev,
+ unsigned int group, unsigned int counter, unsigned int countable)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ unsigned int val = 0;
+ struct adreno_perfcount_register *reg;
+
+ /* Special cases */
+ if (group == KGSL_PERFCOUNTER_GROUP_PWR)
+ return a3xx_perfcounter_enable_pwr(device, counter);
+ else if (group == KGSL_PERFCOUNTER_GROUP_VBIF)
+ return a3xx_perfcounter_enable_vbif(device, counter,
+ countable);
+ else if (group == KGSL_PERFCOUNTER_GROUP_VBIF_PWR)
+ return a3xx_perfcounter_enable_vbif_pwr(device, counter);
+
+ if (group >= adreno_dev->gpudev->perfcounters->group_count)
+ return -EINVAL;
+
+ if ((0 == adreno_dev->gpudev->perfcounters->groups[group].reg_count) ||
+ (counter >=
+ adreno_dev->gpudev->perfcounters->groups[group].reg_count))
+ return -EINVAL;
+
+ reg = &(adreno_dev->gpudev->perfcounters->groups[group].regs[counter]);
+
+ /* Select the desired perfcounter */
+ kgsl_regwrite(device, reg->select, countable);
+
+ if (reg->load_bit < 32) {
+ kgsl_regread(device, A3XX_RBBM_PERFCTR_LOAD_CMD0, &val);
+ val |= (1 << reg->load_bit);
+ kgsl_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_CMD0, val);
+ } else {
+ kgsl_regread(device, A3XX_RBBM_PERFCTR_LOAD_CMD1, &val);
+ val |= (1 << (reg->load_bit - 32));
+ kgsl_regwrite(device, A3XX_RBBM_PERFCTR_LOAD_CMD1, val);
+ }
+ return 0;
+}
+
+static uint64_t a3xx_perfcounter_read_pwr(struct adreno_device *adreno_dev,
+ unsigned int counter)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+ struct adreno_perfcount_register *reg;
+ unsigned int in, out, lo = 0, hi = 0;
+ unsigned int enable_bit;
+
+ if (counter > 1)
+ return 0;
+ if (0 == counter)
+ enable_bit = RBBM_RBBM_CTL_ENABLE_PWR_CTR0;
+ else
+ enable_bit = RBBM_RBBM_CTL_ENABLE_PWR_CTR1;
+ /* freeze counter */
+ adreno_readreg(adreno_dev, ADRENO_REG_RBBM_RBBM_CTL, &in);
+ out = (in & ~enable_bit);
+ adreno_writereg(adreno_dev, ADRENO_REG_RBBM_RBBM_CTL, out);
+
+ reg = &counters->groups[KGSL_PERFCOUNTER_GROUP_PWR].regs[counter];
+ kgsl_regread(device, reg->offset, &lo);
+ kgsl_regread(device, reg->offset + 1, &hi);
+
+ /* restore the counter control value */
+ adreno_writereg(adreno_dev, ADRENO_REG_RBBM_RBBM_CTL, in);
+
+ return (((uint64_t) hi) << 32) | lo;
+}
+
+static uint64_t a3xx_perfcounter_read_vbif(struct adreno_device *adreno_dev,
+ unsigned int counter)
+{
+ struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct adreno_perfcount_register *reg;
+ unsigned int in, out, lo = 0, hi = 0;
+
+ if (counter > 1)
+ return 0;
+
+ /* freeze counter */
+ kgsl_regread(device, A3XX_VBIF_PERF_CNT_EN, &in);
+ if (counter == 0)
+ out = (in & ~VBIF_PERF_CNT_0);
+ else
+ out = (in & ~VBIF_PERF_CNT_1);
+ kgsl_regwrite(device, A3XX_VBIF_PERF_CNT_EN, out);
+
+ reg = &counters->groups[KGSL_PERFCOUNTER_GROUP_VBIF].regs[counter];
+ kgsl_regread(device, reg->offset, &lo);
+ kgsl_regread(device, reg->offset + 1, &hi);
+
+ /* restore the perfcounter value */
+ kgsl_regwrite(device, A3XX_VBIF_PERF_CNT_EN, in);
+
+ return (((uint64_t) hi) << 32) | lo;
+}
+
+static uint64_t a3xx_perfcounter_read_vbif_pwr(struct adreno_device *adreno_dev,
+ unsigned int counter)
+{
+ struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct adreno_perfcount_register *reg;
+ unsigned int in, out, lo = 0, hi = 0;
+
+ if (counter > 2)
+ return 0;
+
+ /* freeze counter */
+ kgsl_regread(device, A3XX_VBIF_PERF_CNT_EN, &in);
+ if (0 == counter)
+ out = (in & ~VBIF_PERF_PWR_CNT_0);
+ else
+ out = (in & ~VBIF_PERF_PWR_CNT_2);
+ kgsl_regwrite(device, A3XX_VBIF_PERF_CNT_EN, out);
+
+ reg = &counters->groups[KGSL_PERFCOUNTER_GROUP_VBIF_PWR].regs[counter];
+ kgsl_regread(device, reg->offset, &lo);
+ kgsl_regread(device, reg->offset + 1, &hi);
+ /* restore the perfcounter value */
+ kgsl_regwrite(device, A3XX_VBIF_PERF_CNT_EN, in);
+
+ return (((uint64_t) hi) << 32) | lo;
+}
+
+static uint64_t a3xx_perfcounter_read(struct adreno_device *adreno_dev,
+ unsigned int group, unsigned int counter)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct adreno_perfcount_register *reg;
+ unsigned int lo = 0, hi = 0;
+ unsigned int offset;
+ unsigned int in, out;
+
+ if (group == KGSL_PERFCOUNTER_GROUP_VBIF_PWR)
+ return a3xx_perfcounter_read_vbif_pwr(adreno_dev, counter);
+
+ if (group == KGSL_PERFCOUNTER_GROUP_VBIF)
+ return a3xx_perfcounter_read_vbif(adreno_dev, counter);
+
+ if (group == KGSL_PERFCOUNTER_GROUP_PWR)
+ return a3xx_perfcounter_read_pwr(adreno_dev, counter);
+
+ if (group >= adreno_dev->gpudev->perfcounters->group_count)
+ return 0;
+
+ if ((0 == adreno_dev->gpudev->perfcounters->groups[group].reg_count) ||
+ (counter >=
+ adreno_dev->gpudev->perfcounters->groups[group].reg_count))
+ return 0;
+
+ reg = &(adreno_dev->gpudev->perfcounters->groups[group].regs[counter]);
+
+ /* Freeze the counter */
+ kgsl_regread(device, A3XX_RBBM_PERFCTR_CTL, &in);
+ out = in & ~RBBM_PERFCTR_CTL_ENABLE;
+ kgsl_regwrite(device, A3XX_RBBM_PERFCTR_CTL, out);
+
+ offset = reg->offset;
+ /* Read the values */
+ kgsl_regread(device, offset, &lo);
+ kgsl_regread(device, offset + 1, &hi);
+
+ /* Re-Enable the counter */
+ kgsl_regwrite(device, A3XX_RBBM_PERFCTR_CTL, in);
+ return (((uint64_t) hi) << 32) | lo;
+}
+
+#define A3XX_IRQ_CALLBACK(_c) { .func = _c }
+
+#define A3XX_INT_MASK \
+ ((1 << A3XX_INT_RBBM_AHB_ERROR) | \
+ (1 << A3XX_INT_RBBM_ATB_BUS_OVERFLOW) | \
+ (1 << A3XX_INT_CP_T0_PACKET_IN_IB) | \
+ (1 << A3XX_INT_CP_OPCODE_ERROR) | \
+ (1 << A3XX_INT_CP_RESERVED_BIT_ERROR) | \
+ (1 << A3XX_INT_CP_HW_FAULT) | \
+ (1 << A3XX_INT_CP_IB1_INT) | \
+ (1 << A3XX_INT_CP_IB2_INT) | \
+ (1 << A3XX_INT_CP_RB_INT) | \
+ (1 << A3XX_INT_CP_REG_PROTECT_FAULT) | \
+ (1 << A3XX_INT_CP_AHB_ERROR_HALT) | \
+ (1 << A3XX_INT_UCHE_OOB_ACCESS))
+
+static struct {
+ void (*func)(struct adreno_device *, int);
+} a3xx_irq_funcs[] = {
+ A3XX_IRQ_CALLBACK(NULL), /* 0 - RBBM_GPU_IDLE */
+ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 1 - RBBM_AHB_ERROR */
+ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 2 - RBBM_REG_TIMEOUT */
+ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 3 - RBBM_ME_MS_TIMEOUT */
+ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 4 - RBBM_PFP_MS_TIMEOUT */
+ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 5 - RBBM_ATB_BUS_OVERFLOW */
+ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 6 - RBBM_VFD_ERROR */
+ A3XX_IRQ_CALLBACK(NULL), /* 7 - CP_SW */
+ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 8 - CP_T0_PACKET_IN_IB */
+ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 9 - CP_OPCODE_ERROR */
+ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 10 - CP_RESERVED_BIT_ERROR */
+ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 11 - CP_HW_FAULT */
+ A3XX_IRQ_CALLBACK(NULL), /* 12 - CP_DMA */
+ A3XX_IRQ_CALLBACK(a3xx_cp_callback), /* 13 - CP_IB2_INT */
+ A3XX_IRQ_CALLBACK(a3xx_cp_callback), /* 14 - CP_IB1_INT */
+ A3XX_IRQ_CALLBACK(a3xx_cp_callback), /* 15 - CP_RB_INT */
+ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 16 - CP_REG_PROTECT_FAULT */
+ A3XX_IRQ_CALLBACK(NULL), /* 17 - CP_RB_DONE_TS */
+ A3XX_IRQ_CALLBACK(NULL), /* 18 - CP_VS_DONE_TS */
+ A3XX_IRQ_CALLBACK(NULL), /* 19 - CP_PS_DONE_TS */
+ A3XX_IRQ_CALLBACK(NULL), /* 20 - CP_CACHE_FLUSH_TS */
+ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 21 - CP_AHB_ERROR_FAULT */
+ A3XX_IRQ_CALLBACK(NULL), /* 22 - Unused */
+ A3XX_IRQ_CALLBACK(NULL), /* 23 - Unused */
+ A3XX_IRQ_CALLBACK(NULL), /* 24 - MISC_HANG_DETECT */
+ A3XX_IRQ_CALLBACK(a3xx_err_callback), /* 25 - UCHE_OOB_ACCESS */
+ /* 26 to 31 - Unused */
+};
+
+/*
+ * a3xx_irq_handler() - Interrupt handler function
+ * @adreno_dev: Pointer to adreno device
+ *
+ * Interrupt handler for adreno device, this function is common between
+ * a3xx and a4xx devices
+ */
+irqreturn_t a3xx_irq_handler(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned int status, tmp;
+ int i;
+
+ adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &status);
+
+ for (tmp = status, i = 0; tmp && i < ARRAY_SIZE(a3xx_irq_funcs); i++) {
+ if (tmp & 1) {
+ if (a3xx_irq_funcs[i].func != NULL) {
+ a3xx_irq_funcs[i].func(adreno_dev, i);
+ ret = IRQ_HANDLED;
+ } else {
+ KGSL_DRV_CRIT(device,
+ "Unhandled interrupt bit %x\n", i);
+ }
+ }
+
+ tmp >>= 1;
+ }
+
+ trace_kgsl_a3xx_irq_status(device, status);
+
+ if (status)
+ adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_CLEAR_CMD,
+ status);
+ return ret;
+}
+
+/*
+ * a3xx_irq_control() - Function called to enable/disable interrupts
+ * @adreno_dev: Pointer to device whose interrupts are enabled/disabled
+ * @state: When set interrupts are enabled else disabled
+ *
+ * This function is common for a3xx and a4xx adreno devices
+ */
+void a3xx_irq_control(struct adreno_device *adreno_dev, int state)
+{
+ if (state)
+ adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_0_MASK,
+ A3XX_INT_MASK);
+ else
+ adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_0_MASK, 0);
+}
+
+/*
+ * a3xx_irq_pending() - Checks if interrupt is generated by h/w
+ * @adreno_dev: Pointer to device whose interrupts are checked
+ *
+ * Returns true if interrupts are pending from device else 0. This
+ * function is shared by both a3xx and a4xx devices.
+ */
+unsigned int a3xx_irq_pending(struct adreno_device *adreno_dev)
+{
+ unsigned int status;
+
+ adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &status);
+
+ return (status & A3XX_INT_MASK) ? 1 : 0;
+}
+
+/*
+ * a3xx_busy_cycles() - Returns number of gpu cycles
+ * @adreno_dev: Pointer to device ehose cycles are checked
+ *
+ * Returns number of busy clycles since the last time this function is called
+ * Function is common between a3xx and a4xx devices
+ */
+unsigned int a3xx_busy_cycles(struct adreno_device *adreno_dev)
+{
+ unsigned int val;
+ unsigned int ret = 0;
+
+ /* Read the value */
+ adreno_readreg(adreno_dev, ADRENO_REG_RBBM_PERFCTR_PWR_1_LO, &val);
+
+ /* Return 0 for the first read */
+ if (adreno_dev->gpu_cycles != 0) {
+ if (val < adreno_dev->gpu_cycles)
+ ret = (0xFFFFFFFF - adreno_dev->gpu_cycles) + val;
+ else
+ ret = val - adreno_dev->gpu_cycles;
+ }
+
+ adreno_dev->gpu_cycles = val;
+ return ret;
+}
+
+/* VBIF registers start after 0x3000 so use 0x0 as end of list marker */
+static const struct adreno_vbif_data a305_vbif[] = {
+ /* Set up 16 deep read/write request queues */
+ { A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010 },
+ { A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010 },
+ { A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010 },
+ { A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010 },
+ { A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303 },
+ { A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010 },
+ { A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010 },
+ /* Enable WR-REQ */
+ { A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000FF },
+ /* Set up round robin arbitration between both AXI ports */
+ { A3XX_VBIF_ARB_CTL, 0x00000030 },
+ /* Set up AOOO */
+ { A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003C },
+ { A3XX_VBIF_OUT_AXI_AOOO, 0x003C003C },
+ {0, 0},
+};
+
+static const struct adreno_vbif_data a305b_vbif[] = {
+ { A3XX_VBIF_IN_RD_LIM_CONF0, 0x00181818 },
+ { A3XX_VBIF_IN_WR_LIM_CONF0, 0x00181818 },
+ { A3XX_VBIF_OUT_RD_LIM_CONF0, 0x00000018 },
+ { A3XX_VBIF_OUT_WR_LIM_CONF0, 0x00000018 },
+ { A3XX_VBIF_DDR_OUT_MAX_BURST, 0x00000303 },
+ { A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003 },
+ {0, 0},
+};
+
+static const struct adreno_vbif_data a305c_vbif[] = {
+ { A3XX_VBIF_IN_RD_LIM_CONF0, 0x00101010 },
+ { A3XX_VBIF_IN_WR_LIM_CONF0, 0x00101010 },
+ { A3XX_VBIF_OUT_RD_LIM_CONF0, 0x00000010 },
+ { A3XX_VBIF_OUT_WR_LIM_CONF0, 0x00000010 },
+ { A3XX_VBIF_DDR_OUT_MAX_BURST, 0x00000101 },
+ { A3XX_VBIF_ARB_CTL, 0x00000010 },
+ /* Set up AOOO */
+ { A3XX_VBIF_OUT_AXI_AOOO_EN, 0x00000007 },
+ { A3XX_VBIF_OUT_AXI_AOOO, 0x00070007 },
+ {0, 0},
+};
+
+static const struct adreno_vbif_data a320_vbif[] = {
+ /* Set up 16 deep read/write request queues */
+ { A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010 },
+ { A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010 },
+ { A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010 },
+ { A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010 },
+ { A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303 },
+ { A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010 },
+ { A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010 },
+ /* Enable WR-REQ */
+ { A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000FF },
+ /* Set up round robin arbitration between both AXI ports */
+ { A3XX_VBIF_ARB_CTL, 0x00000030 },
+ /* Set up AOOO */
+ { A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003C },
+ { A3XX_VBIF_OUT_AXI_AOOO, 0x003C003C },
+ /* Enable 1K sort */
+ { A3XX_VBIF_ABIT_SORT, 0x000000FF },
+ { A3XX_VBIF_ABIT_SORT_CONF, 0x000000A4 },
+ {0, 0},
+};
+
+static const struct adreno_vbif_data a330_vbif[] = {
+ /* Set up 16 deep read/write request queues */
+ { A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818 },
+ { A3XX_VBIF_IN_RD_LIM_CONF1, 0x00001818 },
+ { A3XX_VBIF_OUT_RD_LIM_CONF0, 0x00001818 },
+ { A3XX_VBIF_OUT_WR_LIM_CONF0, 0x00001818 },
+ { A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303 },
+ { A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818 },
+ { A3XX_VBIF_IN_WR_LIM_CONF1, 0x00001818 },
+ /* Enable WR-REQ */
+ { A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003F },
+ /* Set up round robin arbitration between both AXI ports */
+ { A3XX_VBIF_ARB_CTL, 0x00000030 },
+ /* Set up VBIF_ROUND_ROBIN_QOS_ARB */
+ { A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001 },
+ /* Set up AOOO */
+ { A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003F },
+ { A3XX_VBIF_OUT_AXI_AOOO, 0x003F003F },
+ /* Enable 1K sort */
+ { A3XX_VBIF_ABIT_SORT, 0x0001003F },
+ { A3XX_VBIF_ABIT_SORT_CONF, 0x000000A4 },
+ /* Disable VBIF clock gating. This is to enable AXI running
+ * higher frequency than GPU.
+ */
+ { A3XX_VBIF_CLKON, 1 },
+ {0, 0},
+};
+
+/*
+ * Most of the VBIF registers on 8974v2 have the correct values at power on, so
+ * we won't modify those if we don't need to
+ */
+static const struct adreno_vbif_data a330v2_vbif[] = {
+ /* Enable 1k sort */
+ { A3XX_VBIF_ABIT_SORT, 0x0001003F },
+ { A3XX_VBIF_ABIT_SORT_CONF, 0x000000A4 },
+ /* Enable WR-REQ */
+ { A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003F },
+ { A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303 },
+ /* Set up VBIF_ROUND_ROBIN_QOS_ARB */
+ { A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003 },
+ {0, 0},
+};
+
+const struct adreno_vbif_platform a3xx_vbif_platforms[] = {
+ { adreno_is_a305, a305_vbif },
+ { adreno_is_a305c, a305c_vbif },
+ { adreno_is_a320, a320_vbif },
+ /* A330v2 needs to be ahead of A330 so the right device matches */
+ { adreno_is_a330v2, a330v2_vbif },
+ { adreno_is_a330, a330_vbif },
+ { adreno_is_a305b, a305b_vbif },
+};
+
+/*
+ * Define the available perfcounter groups - these get used by
+ * adreno_perfcounter_get and adreno_perfcounter_put
+ */
+
+static struct adreno_perfcount_register a3xx_perfcounters_cp[] = {
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_CP_0_LO,
+ 0, A3XX_CP_PERFCOUNTER_SELECT },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_rbbm[] = {
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RBBM_0_LO,
+ 1, A3XX_RBBM_PERFCOUNTER0_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RBBM_1_LO,
+ 2, A3XX_RBBM_PERFCOUNTER1_SELECT },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_pc[] = {
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_0_LO,
+ 3, A3XX_PC_PERFCOUNTER0_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_1_LO,
+ 4, A3XX_PC_PERFCOUNTER1_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_2_LO,
+ 5, A3XX_PC_PERFCOUNTER2_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PC_3_LO,
+ 6, A3XX_PC_PERFCOUNTER3_SELECT },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_vfd[] = {
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VFD_0_LO,
+ 7, A3XX_VFD_PERFCOUNTER0_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VFD_1_LO,
+ 8, A3XX_VFD_PERFCOUNTER1_SELECT },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_hlsq[] = {
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_0_LO,
+ 9, A3XX_HLSQ_PERFCOUNTER0_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_1_LO,
+ 10, A3XX_HLSQ_PERFCOUNTER1_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_2_LO,
+ 11, A3XX_HLSQ_PERFCOUNTER2_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_3_LO,
+ 12, A3XX_HLSQ_PERFCOUNTER3_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_4_LO,
+ 13, A3XX_HLSQ_PERFCOUNTER4_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_HLSQ_5_LO,
+ 14, A3XX_HLSQ_PERFCOUNTER5_SELECT },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_vpc[] = {
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VPC_0_LO,
+ 15, A3XX_VPC_PERFCOUNTER0_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_VPC_1_LO,
+ 16, A3XX_VPC_PERFCOUNTER1_SELECT },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_tse[] = {
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TSE_0_LO,
+ 17, A3XX_GRAS_PERFCOUNTER0_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TSE_1_LO,
+ 18, A3XX_GRAS_PERFCOUNTER1_SELECT },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_ras[] = {
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RAS_0_LO,
+ 19, A3XX_GRAS_PERFCOUNTER2_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RAS_1_LO,
+ 20, A3XX_GRAS_PERFCOUNTER3_SELECT },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_uche[] = {
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_0_LO,
+ 21, A3XX_UCHE_PERFCOUNTER0_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_1_LO,
+ 22, A3XX_UCHE_PERFCOUNTER1_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_2_LO,
+ 23, A3XX_UCHE_PERFCOUNTER2_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_3_LO,
+ 24, A3XX_UCHE_PERFCOUNTER3_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_4_LO,
+ 25, A3XX_UCHE_PERFCOUNTER4_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_UCHE_5_LO,
+ 26, A3XX_UCHE_PERFCOUNTER5_SELECT },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_tp[] = {
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_0_LO,
+ 27, A3XX_TP_PERFCOUNTER0_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_1_LO,
+ 28, A3XX_TP_PERFCOUNTER1_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_2_LO,
+ 29, A3XX_TP_PERFCOUNTER2_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_3_LO,
+ 30, A3XX_TP_PERFCOUNTER3_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_4_LO,
+ 31, A3XX_TP_PERFCOUNTER4_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_TP_5_LO,
+ 32, A3XX_TP_PERFCOUNTER5_SELECT },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_sp[] = {
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_0_LO,
+ 33, A3XX_SP_PERFCOUNTER0_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_1_LO,
+ 34, A3XX_SP_PERFCOUNTER1_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_2_LO,
+ 35, A3XX_SP_PERFCOUNTER2_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_3_LO,
+ 36, A3XX_SP_PERFCOUNTER3_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_4_LO,
+ 37, A3XX_SP_PERFCOUNTER4_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_5_LO,
+ 38, A3XX_SP_PERFCOUNTER5_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_6_LO,
+ 39, A3XX_SP_PERFCOUNTER6_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_SP_7_LO,
+ 40, A3XX_SP_PERFCOUNTER7_SELECT },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_rb[] = {
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RB_0_LO,
+ 41, A3XX_RB_PERFCOUNTER0_SELECT },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_RB_1_LO,
+ 42, A3XX_RB_PERFCOUNTER1_SELECT },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_pwr[] = {
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PWR_0_LO,
+ -1, 0 },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PWR_1_LO,
+ -1, 0 },
+};
+
+static struct adreno_perfcount_register a3xx_perfcounters_vbif[] = {
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_CNT0_LO, -1, 0 },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_CNT1_LO, -1, 0 },
+};
+static struct adreno_perfcount_register a3xx_perfcounters_vbif_pwr[] = {
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_PWR_CNT0_LO, -1, 0 },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_PWR_CNT1_LO, -1, 0 },
+ { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_VBIF_PERF_PWR_CNT2_LO, -1, 0 },
+};
+
+static struct adreno_perfcount_group a3xx_perfcounter_groups[] = {
+ ADRENO_PERFCOUNTER_GROUP(a3xx, cp),
+ ADRENO_PERFCOUNTER_GROUP(a3xx, rbbm),
+ ADRENO_PERFCOUNTER_GROUP(a3xx, pc),
+ ADRENO_PERFCOUNTER_GROUP(a3xx, vfd),
+ ADRENO_PERFCOUNTER_GROUP(a3xx, hlsq),
+ ADRENO_PERFCOUNTER_GROUP(a3xx, vpc),
+ ADRENO_PERFCOUNTER_GROUP(a3xx, tse),
+ ADRENO_PERFCOUNTER_GROUP(a3xx, ras),
+ ADRENO_PERFCOUNTER_GROUP(a3xx, uche),
+ ADRENO_PERFCOUNTER_GROUP(a3xx, tp),
+ ADRENO_PERFCOUNTER_GROUP(a3xx, sp),
+ ADRENO_PERFCOUNTER_GROUP(a3xx, rb),
+ ADRENO_PERFCOUNTER_GROUP(a3xx, pwr),
+ ADRENO_PERFCOUNTER_GROUP(a3xx, vbif),
+ ADRENO_PERFCOUNTER_GROUP(a3xx, vbif_pwr),
+};
+
+static struct adreno_perfcounters a3xx_perfcounters = {
+ a3xx_perfcounter_groups,
+ ARRAY_SIZE(a3xx_perfcounter_groups),
+};
+
+/*
+ * a3xx_perfcounter_close() - Return counters that were initialized in
+ * a3xx_perfcounter_init
+ * @adreno_dev: The device for which counters were initialized
+ */
+static void a3xx_perfcounter_close(struct adreno_device *adreno_dev)
+{
+ adreno_perfcounter_put(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+ SP_FS_FULL_ALU_INSTRUCTIONS,
+ PERFCOUNTER_FLAG_KERNEL);
+ adreno_perfcounter_put(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+ SP_FS_CFLOW_INSTRUCTIONS,
+ PERFCOUNTER_FLAG_KERNEL);
+ adreno_perfcounter_put(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+ SP0_ICL1_MISSES,
+ PERFCOUNTER_FLAG_KERNEL);
+ adreno_perfcounter_put(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+ SP_ALU_ACTIVE_CYCLES,
+ PERFCOUNTER_FLAG_KERNEL);
+}
+
+static int a3xx_perfcounter_init(struct adreno_device *adreno_dev)
+{
+ int ret;
+ /* SP[3] counter is broken on a330 so disable it if a330 device */
+ if (adreno_is_a330(adreno_dev))
+ a3xx_perfcounters_sp[3].countable = KGSL_PERFCOUNTER_BROKEN;
+
+ /*
+ * Set SP to count SP_ALU_ACTIVE_CYCLES, it includes
+ * all ALU instruction execution regardless precision or shader ID.
+ * Set SP to count SP0_ICL1_MISSES, It counts
+ * USP L1 instruction miss request.
+ * Set SP to count SP_FS_FULL_ALU_INSTRUCTIONS, it
+ * counts USP flow control instruction execution.
+ * we will use this to augment our hang detection
+ */
+ if (adreno_dev->fast_hang_detect) {
+ ret = adreno_perfcounter_get(adreno_dev,
+ KGSL_PERFCOUNTER_GROUP_SP,
+ SP_ALU_ACTIVE_CYCLES, &ft_detect_regs[6],
+ PERFCOUNTER_FLAG_KERNEL);
+ if (ret)
+ goto err;
+ ft_detect_regs[7] = ft_detect_regs[6] + 1;
+ ret = adreno_perfcounter_get(adreno_dev,
+ KGSL_PERFCOUNTER_GROUP_SP,
+ SP0_ICL1_MISSES, &ft_detect_regs[8],
+ PERFCOUNTER_FLAG_KERNEL);
+ if (ret)
+ goto err;
+ ft_detect_regs[9] = ft_detect_regs[8] + 1;
+ ret = adreno_perfcounter_get(adreno_dev,
+ KGSL_PERFCOUNTER_GROUP_SP,
+ SP_FS_CFLOW_INSTRUCTIONS, &ft_detect_regs[10],
+ PERFCOUNTER_FLAG_KERNEL);
+ if (ret)
+ goto err;
+ ft_detect_regs[11] = ft_detect_regs[10] + 1;
+ }
+
+ ret = adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_SP,
+ SP_FS_FULL_ALU_INSTRUCTIONS, NULL, PERFCOUNTER_FLAG_KERNEL);
+ if (ret)
+ goto err;
+
+ /* Reserve and start countable 1 in the PWR perfcounter group */
+ ret = adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_PWR, 1,
+ NULL, PERFCOUNTER_FLAG_KERNEL);
+ if (ret)
+ goto err;
+
+ return ret;
+
+err:
+ a3xx_perfcounter_close(adreno_dev);
+ return ret;
+}
+
+/**
+ * a3xx_protect_init() - Initializes register protection on a3xx
+ * @device: Pointer to the device structure
+ * Performs register writes to enable protected access to sensitive
+ * registers
+ */
+static void a3xx_protect_init(struct kgsl_device *device)
+{
+ /* enable access protection to privileged registers */
+ kgsl_regwrite(device, A3XX_CP_PROTECT_CTRL, 0x00000007);
+
+ /* RBBM registers */
+ kgsl_regwrite(device, A3XX_CP_PROTECT_REG_0, 0x63000040);
+ kgsl_regwrite(device, A3XX_CP_PROTECT_REG_1, 0x62000080);
+ kgsl_regwrite(device, A3XX_CP_PROTECT_REG_2, 0x600000CC);
+ kgsl_regwrite(device, A3XX_CP_PROTECT_REG_3, 0x60000108);
+ kgsl_regwrite(device, A3XX_CP_PROTECT_REG_4, 0x64000140);
+ kgsl_regwrite(device, A3XX_CP_PROTECT_REG_5, 0x66000400);
+
+ /* CP registers */
+ kgsl_regwrite(device, A3XX_CP_PROTECT_REG_6, 0x65000700);
+ kgsl_regwrite(device, A3XX_CP_PROTECT_REG_7, 0x610007D8);
+ kgsl_regwrite(device, A3XX_CP_PROTECT_REG_8, 0x620007E0);
+ kgsl_regwrite(device, A3XX_CP_PROTECT_REG_9, 0x61001178);
+ kgsl_regwrite(device, A3XX_CP_PROTECT_REG_A, 0x64001180);
+
+ /* RB registers */
+ kgsl_regwrite(device, A3XX_CP_PROTECT_REG_B, 0x60003300);
+
+ /* VBIF registers */
+ kgsl_regwrite(device, A3XX_CP_PROTECT_REG_C, 0x6B00C000);
+}
+
+static void a3xx_start(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ adreno_vbif_start(device, a3xx_vbif_platforms,
+ ARRAY_SIZE(a3xx_vbif_platforms));
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ kgsl_regwrite(device, A3XX_RBBM_GPU_BUSY_MASKED, 0xFFFFFFFF);
+
+ /* Tune the hystersis counters for SP and CP idle detection */
+ kgsl_regwrite(device, A3XX_RBBM_SP_HYST_CNT, 0x10);
+ kgsl_regwrite(device, A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
+
+ /* Enable the RBBM error reporting bits. This lets us get
+ useful information on failure */
+
+ kgsl_regwrite(device, A3XX_RBBM_AHB_CTL0, 0x00000001);
+
+ /* Enable AHB error reporting */
+ kgsl_regwrite(device, A3XX_RBBM_AHB_CTL1, 0xA6FFFFFF);
+
+ /* Turn on the power counters */
+ kgsl_regwrite(device, A3XX_RBBM_RBBM_CTL, 0x00030000);
+
+ /* Turn on hang detection - this spews a lot of useful information
+ * into the RBBM registers on a hang */
+
+ kgsl_regwrite(device, A3XX_RBBM_INTERFACE_HANG_INT_CTL,
+ (1 << 16) | 0xFFF);
+
+ /* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0). */
+ kgsl_regwrite(device, A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
+
+ /* Enable Clock gating */
+ kgsl_regwrite(device, A3XX_RBBM_CLOCK_CTL,
+ adreno_a3xx_rbbm_clock_ctl_default(adreno_dev));
+
+ if (adreno_is_a330v2(adreno_dev))
+ kgsl_regwrite(device, A3XX_RBBM_GPR0_CTL,
+ A330v2_RBBM_GPR0_CTL_DEFAULT);
+ else if (adreno_is_a330(adreno_dev))
+ kgsl_regwrite(device, A3XX_RBBM_GPR0_CTL,
+ A330_RBBM_GPR0_CTL_DEFAULT);
+
+ /* Set the OCMEM base address for A330 */
+ if (adreno_is_a330(adreno_dev) ||
+ adreno_is_a305b(adreno_dev)) {
+ kgsl_regwrite(device, A3XX_RB_GMEM_BASE_ADDR,
+ (unsigned int)(adreno_dev->ocmem_base >> 14));
+ }
+ /* Turn on protection */
+ a3xx_protect_init(device);
+
+ /* Turn on performance counters */
+ kgsl_regwrite(device, A3XX_RBBM_PERFCTR_CTL, 0x01);
+
+ /* Turn on the GPU busy counter and let it run free */
+
+ adreno_dev->gpu_cycles = 0;
+}
+
+/**
+ * a3xx_coresight_enable() - Enables debugging through coresight
+ * debug bus for adreno a3xx devices.
+ * @device: Pointer to GPU device structure
+ */
+int a3xx_coresight_enable(struct kgsl_device *device)
+{
+ mutex_lock(&device->mutex);
+ if (!kgsl_active_count_get(device)) {
+ kgsl_regwrite(device, A3XX_RBBM_DEBUG_BUS_CTL, 0x0001093F);
+ kgsl_regwrite(device, A3XX_RBBM_DEBUG_BUS_STB_CTL0,
+ 0x00000000);
+ kgsl_regwrite(device, A3XX_RBBM_DEBUG_BUS_STB_CTL1,
+ 0xFFFFFFFE);
+ kgsl_regwrite(device, A3XX_RBBM_INT_TRACE_BUS_CTL,
+ 0x00201111);
+ kgsl_regwrite(device, A3XX_RBBM_EXT_TRACE_BUS_CTL,
+ 0x89100010);
+ kgsl_regwrite(device, A3XX_RBBM_EXT_TRACE_STOP_CNT,
+ 0x00017fff);
+ kgsl_regwrite(device, A3XX_RBBM_EXT_TRACE_START_CNT,
+ 0x0001000f);
+ kgsl_regwrite(device, A3XX_RBBM_EXT_TRACE_PERIOD_CNT ,
+ 0x0001ffff);
+ kgsl_regwrite(device, A3XX_RBBM_EXT_TRACE_CMD,
+ 0x00000001);
+ kgsl_active_count_put(device);
+ }
+ mutex_unlock(&device->mutex);
+ return 0;
+}
+
+/**
+ * a3xx_coresight_disable() - Disables debugging through coresight
+ * debug bus for adreno a3xx devices.
+ * @device: Pointer to GPU device structure
+ */
+void a3xx_coresight_disable(struct kgsl_device *device)
+{
+ mutex_lock(&device->mutex);
+ if (!kgsl_active_count_get(device)) {
+ kgsl_regwrite(device, A3XX_RBBM_DEBUG_BUS_CTL, 0x0);
+ kgsl_regwrite(device, A3XX_RBBM_DEBUG_BUS_STB_CTL0, 0x0);
+ kgsl_regwrite(device, A3XX_RBBM_DEBUG_BUS_STB_CTL1, 0x0);
+ kgsl_regwrite(device, A3XX_RBBM_INT_TRACE_BUS_CTL, 0x0);
+ kgsl_regwrite(device, A3XX_RBBM_EXT_TRACE_BUS_CTL, 0x0);
+ kgsl_regwrite(device, A3XX_RBBM_EXT_TRACE_STOP_CNT, 0x0);
+ kgsl_regwrite(device, A3XX_RBBM_EXT_TRACE_START_CNT, 0x0);
+ kgsl_regwrite(device, A3XX_RBBM_EXT_TRACE_PERIOD_CNT , 0x0);
+ kgsl_regwrite(device, A3XX_RBBM_EXT_TRACE_CMD, 0x0);
+ kgsl_active_count_put(device);
+ }
+ mutex_unlock(&device->mutex);
+}
+
+static void a3xx_coresight_write_reg(struct kgsl_device *device,
+ unsigned int wordoffset, unsigned int val)
+{
+ mutex_lock(&device->mutex);
+ if (!kgsl_active_count_get(device)) {
+ kgsl_regwrite(device, wordoffset, val);
+ kgsl_active_count_put(device);
+ }
+ mutex_unlock(&device->mutex);
+}
+
+void a3xx_coresight_config_debug_reg(struct kgsl_device *device,
+ int debug_reg, unsigned int val)
+{
+ switch (debug_reg) {
+
+ case DEBUG_BUS_CTL:
+ a3xx_coresight_write_reg(device, A3XX_RBBM_DEBUG_BUS_CTL, val);
+ break;
+
+ case TRACE_STOP_CNT:
+ a3xx_coresight_write_reg(device, A3XX_RBBM_EXT_TRACE_STOP_CNT,
+ val);
+ break;
+
+ case TRACE_START_CNT:
+ a3xx_coresight_write_reg(device, A3XX_RBBM_EXT_TRACE_START_CNT,
+ val);
+ break;
+
+ case TRACE_PERIOD_CNT:
+ a3xx_coresight_write_reg(device, A3XX_RBBM_EXT_TRACE_PERIOD_CNT,
+ val);
+ break;
+
+ case TRACE_CMD:
+ a3xx_coresight_write_reg(device, A3XX_RBBM_EXT_TRACE_CMD, val);
+ break;
+
+ case TRACE_BUS_CTL:
+ a3xx_coresight_write_reg(device, A3XX_RBBM_EXT_TRACE_BUS_CTL,
+ val);
+ break;
+ }
+
+}
+
+/*
+ * a3xx_soft_reset() - Soft reset GPU
+ * @adreno_dev: Pointer to adreno device
+ *
+ * Soft reset the GPU by doing a AHB write of value 1 to RBBM_SW_RESET
+ * register. This is used when we want to reset the GPU without
+ * turning off GFX power rail. The reset when asserted resets
+ * all the HW logic, restores GPU registers to default state and
+ * flushes out pending VBIF transactions.
+ */
+static void a3xx_soft_reset(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ unsigned int reg;
+
+ kgsl_regwrite(device, A3XX_RBBM_SW_RESET_CMD, 1);
+ /*
+ * Do a dummy read to get a brief read cycle delay for the reset to take
+ * effect
+ */
+ kgsl_regread(device, A3XX_RBBM_SW_RESET_CMD, ®);
+ kgsl_regwrite(device, A3XX_RBBM_SW_RESET_CMD, 0);
+}
+
+/* Defined in adreno_a3xx_snapshot.c */
+void *a3xx_snapshot(struct adreno_device *adreno_dev, void *snapshot,
+ int *remain, int hang);
+
+static void a3xx_postmortem_dump(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ unsigned int r1, r2, r3, rbbm_status;
+ unsigned int cp_stat, rb_count;
+
+ kgsl_regread(device, REG_RBBM_STATUS, &rbbm_status);
+ KGSL_LOG_DUMP(device, "RBBM: STATUS = %08X\n", rbbm_status);
+
+ {
+ struct log_field lines[] = {
+ {rbbm_status & BIT(0), "HI busy "},
+ {rbbm_status & BIT(1), "CP ME busy "},
+ {rbbm_status & BIT(2), "CP PFP busy "},
+ {rbbm_status & BIT(14), "CP NRT busy "},
+ {rbbm_status & BIT(15), "VBIF busy "},
+ {rbbm_status & BIT(16), "TSE busy "},
+ {rbbm_status & BIT(17), "RAS busy "},
+ {rbbm_status & BIT(18), "RB busy "},
+ {rbbm_status & BIT(19), "PC DCALL bsy"},
+ {rbbm_status & BIT(20), "PC VSD busy "},
+ {rbbm_status & BIT(21), "VFD busy "},
+ {rbbm_status & BIT(22), "VPC busy "},
+ {rbbm_status & BIT(23), "UCHE busy "},
+ {rbbm_status & BIT(24), "SP busy "},
+ {rbbm_status & BIT(25), "TPL1 busy "},
+ {rbbm_status & BIT(26), "MARB busy "},
+ {rbbm_status & BIT(27), "VSC busy "},
+ {rbbm_status & BIT(28), "ARB busy "},
+ {rbbm_status & BIT(29), "HLSQ busy "},
+ {rbbm_status & BIT(30), "GPU bsy noHC"},
+ {rbbm_status & BIT(31), "GPU busy "},
+ };
+ adreno_dump_fields(device, " STATUS=", lines,
+ ARRAY_SIZE(lines));
+ }
+
+ kgsl_regread(device, REG_CP_RB_BASE, &r1);
+ kgsl_regread(device, REG_CP_RB_CNTL, &r2);
+ rb_count = 2 << (r2 & (BIT(6) - 1));
+ kgsl_regread(device, REG_CP_RB_RPTR_ADDR, &r3);
+ KGSL_LOG_DUMP(device,
+ "CP_RB: BASE = %08X | CNTL = %08X | RPTR_ADDR = %08X"
+ "| rb_count = %08X\n", r1, r2, r3, rb_count);
+
+ kgsl_regread(device, REG_CP_RB_RPTR, &r1);
+ kgsl_regread(device, REG_CP_RB_WPTR, &r2);
+ kgsl_regread(device, REG_CP_RB_RPTR_WR, &r3);
+ KGSL_LOG_DUMP(device,
+ "CP_RB: BASE = %08X | CNTL = %08X | RPTR_ADDR = %08X"
+ "| rb_count = %08X\n", r1, r2, r3, rb_count);
+
+ kgsl_regread(device, REG_CP_RB_RPTR, &r1);
+ kgsl_regread(device, REG_CP_RB_WPTR, &r2);
+ kgsl_regread(device, REG_CP_RB_RPTR_WR, &r3);
+ KGSL_LOG_DUMP(device,
+ " RPTR = %08X | WPTR = %08X | RPTR_WR = %08X"
+ "\n", r1, r2, r3);
+
+ kgsl_regread(device, REG_CP_IB1_BASE, &r1);
+ kgsl_regread(device, REG_CP_IB1_BUFSZ, &r2);
+ KGSL_LOG_DUMP(device, "CP_IB1: BASE = %08X | BUFSZ = %d\n", r1, r2);
+
+ kgsl_regread(device, REG_CP_ME_CNTL, &r1);
+ kgsl_regread(device, REG_CP_ME_STATUS, &r2);
+ KGSL_LOG_DUMP(device, "CP_ME: CNTL = %08X | STATUS = %08X\n", r1, r2);
+
+ kgsl_regread(device, REG_CP_STAT, &cp_stat);
+ KGSL_LOG_DUMP(device, "CP_STAT = %08X\n", cp_stat);
+#ifndef CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL
+ {
+ struct log_field lns[] = {
+ {cp_stat & BIT(0), "WR_BSY 0"},
+ {cp_stat & BIT(1), "RD_RQ_BSY 1"},
+ {cp_stat & BIT(2), "RD_RTN_BSY 2"},
+ };
+ adreno_dump_fields(device, " MIU=", lns, ARRAY_SIZE(lns));
+ }
+ {
+ struct log_field lns[] = {
+ {cp_stat & BIT(5), "RING_BUSY 5"},
+ {cp_stat & BIT(6), "NDRCTS_BSY 6"},
+ {cp_stat & BIT(7), "NDRCT2_BSY 7"},
+ {cp_stat & BIT(9), "ST_BUSY 9"},
+ {cp_stat & BIT(10), "BUSY 10"},
+ };
+ adreno_dump_fields(device, " CSF=", lns, ARRAY_SIZE(lns));
+ }
+ {
+ struct log_field lns[] = {
+ {cp_stat & BIT(11), "RNG_Q_BSY 11"},
+ {cp_stat & BIT(12), "NDRCTS_Q_B12"},
+ {cp_stat & BIT(13), "NDRCT2_Q_B13"},
+ {cp_stat & BIT(16), "ST_QUEUE_B16"},
+ {cp_stat & BIT(17), "PFP_BUSY 17"},
+ };
+ adreno_dump_fields(device, " RING=", lns, ARRAY_SIZE(lns));
+ }
+ {
+ struct log_field lns[] = {
+ {cp_stat & BIT(3), "RBIU_BUSY 3"},
+ {cp_stat & BIT(4), "RCIU_BUSY 4"},
+ {cp_stat & BIT(8), "EVENT_BUSY 8"},
+ {cp_stat & BIT(18), "MQ_RG_BSY 18"},
+ {cp_stat & BIT(19), "MQ_NDRS_BS19"},
+ {cp_stat & BIT(20), "MQ_NDR2_BS20"},
+ {cp_stat & BIT(21), "MIU_WC_STL21"},
+ {cp_stat & BIT(22), "CP_NRT_BSY22"},
+ {cp_stat & BIT(23), "3D_BUSY 23"},
+ {cp_stat & BIT(26), "ME_BUSY 26"},
+ {cp_stat & BIT(27), "RB_FFO_BSY27"},
+ {cp_stat & BIT(28), "CF_FFO_BSY28"},
+ {cp_stat & BIT(29), "PS_FFO_BSY29"},
+ {cp_stat & BIT(30), "VS_FFO_BSY30"},
+ {cp_stat & BIT(31), "CP_BUSY 31"},
+ };
+ adreno_dump_fields(device, " CP_STT=", lns, ARRAY_SIZE(lns));
+ }
+#endif
+
+ kgsl_regread(device, A3XX_RBBM_INT_0_STATUS, &r1);
+ KGSL_LOG_DUMP(device, "MSTR_INT_SGNL = %08X\n", r1);
+ {
+ struct log_field ints[] = {
+ {r1 & BIT(0), "RBBM_GPU_IDLE 0"},
+ {r1 & BIT(1), "RBBM_AHB_ERROR 1"},
+ {r1 & BIT(2), "RBBM_REG_TIMEOUT 2"},
+ {r1 & BIT(3), "RBBM_ME_MS_TIMEOUT 3"},
+ {r1 & BIT(4), "RBBM_PFP_MS_TIMEOUT 4"},
+ {r1 & BIT(5), "RBBM_ATB_BUS_OVERFLOW 5"},
+ {r1 & BIT(6), "VFD_ERROR 6"},
+ {r1 & BIT(7), "CP_SW_INT 7"},
+ {r1 & BIT(8), "CP_T0_PACKET_IN_IB 8"},
+ {r1 & BIT(9), "CP_OPCODE_ERROR 9"},
+ {r1 & BIT(10), "CP_RESERVED_BIT_ERROR 10"},
+ {r1 & BIT(11), "CP_HW_FAULT 11"},
+ {r1 & BIT(12), "CP_DMA 12"},
+ {r1 & BIT(13), "CP_IB2_INT 13"},
+ {r1 & BIT(14), "CP_IB1_INT 14"},
+ {r1 & BIT(15), "CP_RB_INT 15"},
+ {r1 & BIT(16), "CP_REG_PROTECT_FAULT 16"},
+ {r1 & BIT(17), "CP_RB_DONE_TS 17"},
+ {r1 & BIT(18), "CP_VS_DONE_TS 18"},
+ {r1 & BIT(19), "CP_PS_DONE_TS 19"},
+ {r1 & BIT(20), "CACHE_FLUSH_TS 20"},
+ {r1 & BIT(21), "CP_AHB_ERROR_HALT 21"},
+ {r1 & BIT(24), "MISC_HANG_DETECT 24"},
+ {r1 & BIT(25), "UCHE_OOB_ACCESS 25"},
+ };
+ adreno_dump_fields(device, "INT_SGNL=", ints, ARRAY_SIZE(ints));
+ }
+}
+
+/* Register offset defines for A3XX */
+static unsigned int a3xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_DEBUG, REG_CP_DEBUG),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_RAM_WADDR, REG_CP_ME_RAM_WADDR),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_RAM_DATA, REG_CP_ME_RAM_DATA),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_PFP_UCODE_DATA, A3XX_CP_PFP_UCODE_DATA),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_PFP_UCODE_ADDR, A3XX_CP_PFP_UCODE_ADDR),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_WFI_PEND_CTR, A3XX_CP_WFI_PEND_CTR),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, REG_CP_RB_BASE),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR, REG_CP_RB_RPTR_ADDR),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, REG_CP_RB_RPTR),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_WPTR, REG_CP_RB_WPTR),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_PROTECT_CTRL, A3XX_CP_PROTECT_CTRL),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_CNTL, REG_CP_ME_CNTL),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_CNTL, REG_CP_RB_CNTL),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE, REG_CP_IB1_BASE),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BUFSZ, REG_CP_IB1_BUFSZ),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE, REG_CP_IB2_BASE),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BUFSZ, REG_CP_IB2_BUFSZ),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_TIMESTAMP, REG_CP_TIMESTAMP),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_RAM_RADDR, REG_CP_ME_RAM_RADDR),
+ ADRENO_REG_DEFINE(ADRENO_REG_SCRATCH_ADDR, REG_SCRATCH_ADDR),
+ ADRENO_REG_DEFINE(ADRENO_REG_SCRATCH_UMSK, REG_SCRATCH_UMSK),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A3XX_RBBM_STATUS),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_CTL, A3XX_RBBM_PERFCTR_CTL),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD0,
+ A3XX_RBBM_PERFCTR_LOAD_CMD0),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD1,
+ A3XX_RBBM_PERFCTR_LOAD_CMD1),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_PWR_1_LO,
+ A3XX_RBBM_PERFCTR_PWR_1_LO),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_MASK, A3XX_RBBM_INT_0_MASK),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_STATUS, A3XX_RBBM_INT_0_STATUS),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_AHB_ERROR_STATUS,
+ A3XX_RBBM_AHB_ERROR_STATUS),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_AHB_CMD, A3XX_RBBM_AHB_CMD),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_CLEAR_CMD,
+ A3XX_RBBM_INT_CLEAR_CMD),
+ ADRENO_REG_DEFINE(ADRENO_REG_VPC_DEBUG_RAM_SEL,
+ A3XX_VPC_VPC_DEBUG_RAM_SEL),
+ ADRENO_REG_DEFINE(ADRENO_REG_VPC_DEBUG_RAM_READ,
+ A3XX_VPC_VPC_DEBUG_RAM_READ),
+ ADRENO_REG_DEFINE(ADRENO_REG_VSC_PIPE_DATA_ADDRESS_0,
+ A3XX_VSC_PIPE_DATA_ADDRESS_0),
+ ADRENO_REG_DEFINE(ADRENO_REG_VSC_PIPE_DATA_LENGTH_7,
+ A3XX_VSC_PIPE_DATA_LENGTH_7),
+ ADRENO_REG_DEFINE(ADRENO_REG_VSC_SIZE_ADDRESS, A3XX_VSC_SIZE_ADDRESS),
+ ADRENO_REG_DEFINE(ADRENO_REG_VFD_CONTROL_0, A3XX_VFD_CONTROL_0),
+ ADRENO_REG_DEFINE(ADRENO_REG_VFD_FETCH_INSTR_0_0,
+ A3XX_VFD_FETCH_INSTR_0_0),
+ ADRENO_REG_DEFINE(ADRENO_REG_VFD_FETCH_INSTR_1_F,
+ A3XX_VFD_FETCH_INSTR_1_F),
+ ADRENO_REG_DEFINE(ADRENO_REG_VFD_INDEX_MAX, A3XX_VFD_INDEX_MAX),
+ ADRENO_REG_DEFINE(ADRENO_REG_SP_VS_PVT_MEM_ADDR_REG,
+ A3XX_SP_VS_PVT_MEM_ADDR_REG),
+ ADRENO_REG_DEFINE(ADRENO_REG_SP_FS_PVT_MEM_ADDR_REG,
+ A3XX_SP_FS_PVT_MEM_ADDR_REG),
+ ADRENO_REG_DEFINE(ADRENO_REG_SP_VS_OBJ_START_REG,
+ A3XX_SP_VS_OBJ_START_REG),
+ ADRENO_REG_DEFINE(ADRENO_REG_SP_FS_OBJ_START_REG,
+ A3XX_SP_FS_OBJ_START_REG),
+ ADRENO_REG_DEFINE(ADRENO_REG_PA_SC_AA_CONFIG, REG_PA_SC_AA_CONFIG),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PM_OVERRIDE2, REG_RBBM_PM_OVERRIDE2),
+ ADRENO_REG_DEFINE(ADRENO_REG_SCRATCH_REG2, REG_SCRATCH_REG2),
+ ADRENO_REG_DEFINE(ADRENO_REG_SQ_GPR_MANAGEMENT, REG_SQ_GPR_MANAGEMENT),
+ ADRENO_REG_DEFINE(ADRENO_REG_SQ_INST_STORE_MANAGMENT,
+ REG_SQ_INST_STORE_MANAGMENT),
+ ADRENO_REG_DEFINE(ADRENO_REG_TC_CNTL_STATUS, REG_TC_CNTL_STATUS),
+ ADRENO_REG_DEFINE(ADRENO_REG_TP0_CHICKEN, REG_TP0_CHICKEN),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_RBBM_CTL, A3XX_RBBM_RBBM_CTL),
+};
+
+const struct adreno_reg_offsets a3xx_reg_offsets = {
+ .offsets = a3xx_register_offsets,
+ .offset_0 = ADRENO_REG_REGISTER_MAX,
+};
+
+struct adreno_gpudev adreno_a3xx_gpudev = {
+ .reg_offsets = &a3xx_reg_offsets,
+ .perfcounters = &a3xx_perfcounters,
+
+ .ctxt_create = a3xx_drawctxt_create,
+ .ctxt_save = a3xx_drawctxt_save,
+ .ctxt_restore = a3xx_drawctxt_restore,
+ .ctxt_draw_workaround = NULL,
+ .rb_init = a3xx_rb_init,
+ .perfcounter_init = a3xx_perfcounter_init,
+ .perfcounter_close = a3xx_perfcounter_close,
+ .irq_control = a3xx_irq_control,
+ .irq_handler = a3xx_irq_handler,
+ .irq_pending = a3xx_irq_pending,
+ .busy_cycles = a3xx_busy_cycles,
+ .start = a3xx_start,
+ .snapshot = a3xx_snapshot,
+ .perfcounter_enable = a3xx_perfcounter_enable,
+ .perfcounter_read = a3xx_perfcounter_read,
+ .coresight_enable = a3xx_coresight_enable,
+ .coresight_disable = a3xx_coresight_disable,
+ .coresight_config_debug_reg = a3xx_coresight_config_debug_reg,
+ .postmortem_dump = a3xx_postmortem_dump,
+ .soft_reset = a3xx_soft_reset,
+};
diff --git a/drivers/gpu/msm2/adreno_a3xx.h b/drivers/gpu/msm2/adreno_a3xx.h
new file mode 100644
index 0000000..fbd28e2
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_a3xx.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __A3XX_H
+#define __A3XX_H
+
+void a3xx_err_callback(struct adreno_device *adreno_dev, int bit);
+irqreturn_t a3xx_irq_handler(struct adreno_device *adreno_dev);
+void a3xx_irq_control(struct adreno_device *adreno_dev, int state);
+unsigned int a3xx_irq_pending(struct adreno_device *adreno_dev);
+unsigned int a3xx_busy_cycles(struct adreno_device *adreno_dev);
+
+int a3xx_rb_init(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb);
+
+#endif /*__A3XX_H */
diff --git a/drivers/gpu/msm2/adreno_a3xx_snapshot.c b/drivers/gpu/msm2/adreno_a3xx_snapshot.c
new file mode 100644
index 0000000..ba0ef6a
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_a3xx_snapshot.c
@@ -0,0 +1,526 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/io.h>
+#include "kgsl.h"
+#include "adreno.h"
+#include "kgsl_snapshot.h"
+#include "a3xx_reg.h"
+
+#define DEBUG_SECTION_SZ(_dwords) (((_dwords) * sizeof(unsigned int)) \
+ + sizeof(struct kgsl_snapshot_debug))
+
+/* Shader memory size in words */
+#define SHADER_MEMORY_SIZE 0x4000
+
+/**
+ * _rbbm_debug_bus_read - Helper function to read data from the RBBM
+ * debug bus.
+ * @device - GPU device to read/write registers
+ * @block_id - Debug bus block to read from
+ * @index - Index in the debug bus block to read
+ * @ret - Value of the register read
+ */
+static void _rbbm_debug_bus_read(struct kgsl_device *device,
+ unsigned int block_id, unsigned int index, unsigned int *val)
+{
+ unsigned int block = (block_id << 8) | 1 << 16;
+ kgsl_regwrite(device, A3XX_RBBM_DEBUG_BUS_CTL, block | index);
+ kgsl_regread(device, A3XX_RBBM_DEBUG_BUS_DATA_STATUS, val);
+}
+
+/**
+ * a3xx_snapshot_shader_memory - Helper function to dump the GPU shader
+ * memory to the snapshot buffer.
+ * @device - GPU device whose shader memory is to be dumped
+ * @snapshot - Pointer to binary snapshot data blob being made
+ * @remain - Number of remaining bytes in the snapshot blob
+ * @priv - Unused parameter
+ */
+static int a3xx_snapshot_shader_memory(struct kgsl_device *device,
+ void *snapshot, int remain, void *priv)
+{
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int i;
+ unsigned int *data = snapshot + sizeof(*header);
+ unsigned int shader_read_len = SHADER_MEMORY_SIZE;
+
+ if (SHADER_MEMORY_SIZE > (device->shader_mem_len >> 2))
+ shader_read_len = (device->shader_mem_len >> 2);
+
+ if (remain < DEBUG_SECTION_SZ(SHADER_MEMORY_SIZE)) {
+ SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_SHADER_MEMORY;
+ header->size = SHADER_MEMORY_SIZE;
+
+ /* Map shader memory to kernel, for dumping */
+ if (device->shader_mem_virt == NULL)
+ device->shader_mem_virt = devm_ioremap(device->dev,
+ device->shader_mem_phys,
+ device->shader_mem_len);
+
+ if (device->shader_mem_virt == NULL) {
+ KGSL_DRV_ERR(device,
+ "Unable to map shader memory region\n");
+ return 0;
+ }
+
+ /* Now, dump shader memory to snapshot */
+ for (i = 0; i < shader_read_len; i++)
+ adreno_shadermem_regread(device, i, &data[i]);
+
+
+ return DEBUG_SECTION_SZ(SHADER_MEMORY_SIZE);
+}
+
+#define VPC_MEMORY_BANKS 4
+#define VPC_MEMORY_SIZE 512
+
+static int a3xx_snapshot_vpc_memory(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int size = VPC_MEMORY_BANKS * VPC_MEMORY_SIZE;
+ int bank, addr, i = 0;
+
+ if (remain < DEBUG_SECTION_SZ(size)) {
+ SNAPSHOT_ERR_NOMEM(device, "VPC MEMORY");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_VPC_MEMORY;
+ header->size = size;
+
+ for (bank = 0; bank < VPC_MEMORY_BANKS; bank++) {
+ for (addr = 0; addr < VPC_MEMORY_SIZE; addr++) {
+ unsigned int val = bank | (addr << 4);
+ kgsl_regwrite(device,
+ A3XX_VPC_VPC_DEBUG_RAM_SEL, val);
+ kgsl_regread(device,
+ A3XX_VPC_VPC_DEBUG_RAM_READ, &data[i++]);
+ }
+ }
+
+ return DEBUG_SECTION_SZ(size);
+}
+
+#define CP_MEQ_SIZE 16
+static int a3xx_snapshot_cp_meq(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int i;
+
+ if (remain < DEBUG_SECTION_SZ(CP_MEQ_SIZE)) {
+ SNAPSHOT_ERR_NOMEM(device, "CP MEQ DEBUG");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_CP_MEQ;
+ header->size = CP_MEQ_SIZE;
+
+ kgsl_regwrite(device, A3XX_CP_MEQ_ADDR, 0x0);
+ for (i = 0; i < CP_MEQ_SIZE; i++)
+ kgsl_regread(device, A3XX_CP_MEQ_DATA, &data[i]);
+
+ return DEBUG_SECTION_SZ(CP_MEQ_SIZE);
+}
+
+static int a3xx_snapshot_cp_pm4_ram(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int i, size = adreno_dev->pm4_fw_size - 1;
+
+ if (remain < DEBUG_SECTION_SZ(size)) {
+ SNAPSHOT_ERR_NOMEM(device, "CP PM4 RAM DEBUG");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_CP_PM4_RAM;
+ header->size = size;
+
+ /*
+ * Read the firmware from the GPU rather than use our cache in order to
+ * try to catch mis-programming or corruption in the hardware. We do
+ * use the cached version of the size, however, instead of trying to
+ * maintain always changing hardcoded constants
+ */
+
+ kgsl_regwrite(device, REG_CP_ME_RAM_RADDR, 0x0);
+ for (i = 0; i < size; i++)
+ kgsl_regread(device, REG_CP_ME_RAM_DATA, &data[i]);
+
+ return DEBUG_SECTION_SZ(size);
+}
+
+static int a3xx_snapshot_cp_pfp_ram(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int i, size = adreno_dev->pfp_fw_size - 1;
+
+ if (remain < DEBUG_SECTION_SZ(size)) {
+ SNAPSHOT_ERR_NOMEM(device, "CP PFP RAM DEBUG");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_CP_PFP_RAM;
+ header->size = size;
+
+ /*
+ * Read the firmware from the GPU rather than use our cache in order to
+ * try to catch mis-programming or corruption in the hardware. We do
+ * use the cached version of the size, however, instead of trying to
+ * maintain always changing hardcoded constants
+ */
+ kgsl_regwrite(device, A3XX_CP_PFP_UCODE_ADDR, 0x0);
+ for (i = 0; i < size; i++)
+ kgsl_regread(device, A3XX_CP_PFP_UCODE_DATA, &data[i]);
+
+ return DEBUG_SECTION_SZ(size);
+}
+
+/* This is the ROQ buffer size on both the A305 and A320 */
+#define A320_CP_ROQ_SIZE 128
+/* This is the ROQ buffer size on the A330 */
+#define A330_CP_ROQ_SIZE 512
+
+static int a3xx_snapshot_cp_roq(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int i, size;
+
+ /* The size of the ROQ buffer is core dependent */
+ size = (adreno_is_a330(adreno_dev) ||
+ adreno_is_a305b(adreno_dev)) ?
+ A330_CP_ROQ_SIZE : A320_CP_ROQ_SIZE;
+
+ if (remain < DEBUG_SECTION_SZ(size)) {
+ SNAPSHOT_ERR_NOMEM(device, "CP ROQ DEBUG");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_CP_ROQ;
+ header->size = size;
+
+ kgsl_regwrite(device, A3XX_CP_ROQ_ADDR, 0x0);
+ for (i = 0; i < size; i++)
+ kgsl_regread(device, A3XX_CP_ROQ_DATA, &data[i]);
+
+ return DEBUG_SECTION_SZ(size);
+}
+
+#define A330_CP_MERCIU_QUEUE_SIZE 32
+
+static int a330_snapshot_cp_merciu(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int i, size;
+
+ /* The MERCIU data is two dwords per entry */
+ size = A330_CP_MERCIU_QUEUE_SIZE << 1;
+
+ if (remain < DEBUG_SECTION_SZ(size)) {
+ SNAPSHOT_ERR_NOMEM(device, "CP MERCIU DEBUG");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_CP_MERCIU;
+ header->size = size;
+
+ kgsl_regwrite(device, A3XX_CP_MERCIU_ADDR, 0x0);
+
+ for (i = 0; i < A330_CP_MERCIU_QUEUE_SIZE; i++) {
+ kgsl_regread(device, A3XX_CP_MERCIU_DATA,
+ &data[(i * 2)]);
+ kgsl_regread(device, A3XX_CP_MERCIU_DATA2,
+ &data[(i * 2) + 1]);
+ }
+
+ return DEBUG_SECTION_SZ(size);
+}
+
+struct debugbus_block {
+ unsigned int block_id;
+ unsigned int dwords;
+};
+
+static int a3xx_snapshot_debugbus_block(struct kgsl_device *device,
+ void *snapshot, int remain, void *priv)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ struct kgsl_snapshot_debugbus *header = snapshot;
+ struct debugbus_block *block = priv;
+ unsigned int val;
+ int i;
+ unsigned int *data = snapshot + sizeof(*header);
+ unsigned int dwords;
+ int size;
+
+ /*
+ * For A305 and A320 all debug bus regions are the same size (0x40). For
+ * A330, they can be different sizes - most are still 0x40, but some
+ * like CP are larger
+ */
+
+ dwords = (adreno_is_a330(adreno_dev) ||
+ adreno_is_a305b(adreno_dev)) ?
+ block->dwords : 0x40;
+
+ size = (dwords * sizeof(unsigned int)) + sizeof(*header);
+
+ if (remain < size) {
+ SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
+ return 0;
+ }
+
+ val = (block->block_id << 8) | (1 << 16);
+
+ header->id = block->block_id;
+ header->count = dwords;
+
+ for (i = 0; i < dwords; i++)
+ _rbbm_debug_bus_read(device, block->block_id, i, &data[i]);
+
+ return size;
+}
+
+static struct debugbus_block debugbus_blocks[] = {
+ { RBBM_BLOCK_ID_CP, 0x52, },
+ { RBBM_BLOCK_ID_RBBM, 0x40, },
+ { RBBM_BLOCK_ID_VBIF, 0x40, },
+ { RBBM_BLOCK_ID_HLSQ, 0x40, },
+ { RBBM_BLOCK_ID_UCHE, 0x40, },
+ { RBBM_BLOCK_ID_PC, 0x40, },
+ { RBBM_BLOCK_ID_VFD, 0x40, },
+ { RBBM_BLOCK_ID_VPC, 0x40, },
+ { RBBM_BLOCK_ID_TSE, 0x40, },
+ { RBBM_BLOCK_ID_RAS, 0x40, },
+ { RBBM_BLOCK_ID_VSC, 0x40, },
+ { RBBM_BLOCK_ID_SP_0, 0x40, },
+ { RBBM_BLOCK_ID_SP_1, 0x40, },
+ { RBBM_BLOCK_ID_SP_2, 0x40, },
+ { RBBM_BLOCK_ID_SP_3, 0x40, },
+ { RBBM_BLOCK_ID_TPL1_0, 0x40, },
+ { RBBM_BLOCK_ID_TPL1_1, 0x40, },
+ { RBBM_BLOCK_ID_TPL1_2, 0x40, },
+ { RBBM_BLOCK_ID_TPL1_3, 0x40, },
+ { RBBM_BLOCK_ID_RB_0, 0x40, },
+ { RBBM_BLOCK_ID_RB_1, 0x40, },
+ { RBBM_BLOCK_ID_RB_2, 0x40, },
+ { RBBM_BLOCK_ID_RB_3, 0x40, },
+ { RBBM_BLOCK_ID_MARB_0, 0x40, },
+ { RBBM_BLOCK_ID_MARB_1, 0x40, },
+ { RBBM_BLOCK_ID_MARB_2, 0x40, },
+ { RBBM_BLOCK_ID_MARB_3, 0x40, },
+};
+
+static void *a3xx_snapshot_debugbus(struct kgsl_device *device,
+ void *snapshot, int *remain)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(debugbus_blocks); i++) {
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUGBUS, snapshot, remain,
+ a3xx_snapshot_debugbus_block,
+ (void *) &debugbus_blocks[i]);
+ }
+
+ return snapshot;
+}
+
+static void _snapshot_a3xx_regs(struct kgsl_snapshot_registers *regs,
+ struct kgsl_snapshot_registers_list *list)
+{
+ regs[list->count].regs = (unsigned int *) a3xx_registers;
+ regs[list->count].count = a3xx_registers_count;
+ list->count++;
+}
+
+static void _snapshot_hlsq_regs(struct kgsl_snapshot_registers *regs,
+ struct kgsl_snapshot_registers_list *list,
+ struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ /*
+ * Trying to read HLSQ registers when the HLSQ block is busy
+ * will cause the device to hang. The RBBM_DEBUG_BUS has information
+ * that will tell us if the HLSQ block is busy or not. Read values
+ * from the debug bus to ensure the HLSQ block is not busy (this
+ * is hardware dependent). If the HLSQ block is busy do not
+ * dump the registers, otherwise dump the HLSQ registers.
+ */
+
+ if (adreno_is_a330(adreno_dev)) {
+ /*
+ * stall_ctxt_full status bit: RBBM_BLOCK_ID_HLSQ index 49 [27]
+ *
+ * if (!stall_context_full)
+ * then dump HLSQ registers
+ */
+ unsigned int stall_context_full = 0;
+
+ _rbbm_debug_bus_read(device, RBBM_BLOCK_ID_HLSQ, 49,
+ &stall_context_full);
+ stall_context_full &= 0x08000000;
+
+ if (stall_context_full)
+ return;
+ } else {
+ /*
+ * tpif status bits: RBBM_BLOCK_ID_HLSQ index 4 [4:0]
+ * spif status bits: RBBM_BLOCK_ID_HLSQ index 7 [5:0]
+ *
+ * if ((tpif == 0, 1, 28) && (spif == 0, 1, 10))
+ * then dump HLSQ registers
+ */
+ unsigned int next_pif = 0;
+
+ /* check tpif */
+ _rbbm_debug_bus_read(device, RBBM_BLOCK_ID_HLSQ, 4, &next_pif);
+ next_pif &= 0x1f;
+ if (next_pif != 0 && next_pif != 1 && next_pif != 28)
+ return;
+
+ /* check spif */
+ _rbbm_debug_bus_read(device, RBBM_BLOCK_ID_HLSQ, 7, &next_pif);
+ next_pif &= 0x3f;
+ if (next_pif != 0 && next_pif != 1 && next_pif != 10)
+ return;
+ }
+
+ regs[list->count].regs = (unsigned int *) a3xx_hlsq_registers;
+ regs[list->count].count = a3xx_hlsq_registers_count;
+ list->count++;
+}
+
+static void _snapshot_a330_regs(struct kgsl_snapshot_registers *regs,
+ struct kgsl_snapshot_registers_list *list)
+{
+ /* For A330, append the additional list of new registers to grab */
+ regs[list->count].regs = (unsigned int *) a330_registers;
+ regs[list->count].count = a330_registers_count;
+ list->count++;
+}
+
+/* A3XX GPU snapshot function - this is where all of the A3XX specific
+ * bits and pieces are grabbed into the snapshot memory
+ */
+
+void *a3xx_snapshot(struct adreno_device *adreno_dev, void *snapshot,
+ int *remain, int hang)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct kgsl_snapshot_registers_list list;
+ struct kgsl_snapshot_registers regs[5];
+ int size;
+
+ list.registers = regs;
+ list.count = 0;
+
+ /* Disable Clock gating temporarily for the debug bus to work */
+ kgsl_regwrite(device, A3XX_RBBM_CLOCK_CTL, 0x00);
+
+ /* Store relevant registers in list to snapshot */
+ _snapshot_a3xx_regs(regs, &list);
+ _snapshot_hlsq_regs(regs, &list, adreno_dev);
+ if (adreno_is_a330(adreno_dev) || adreno_is_a305b(adreno_dev))
+ _snapshot_a330_regs(regs, &list);
+
+ /* Master set of (non debug) registers */
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_REGS, snapshot, remain,
+ kgsl_snapshot_dump_regs, &list);
+
+ /*
+ * CP_STATE_DEBUG indexed registers - 20 on 305 and 320 and 46 on A330
+ */
+ size = (adreno_is_a330(adreno_dev) ||
+ adreno_is_a305b(adreno_dev)) ? 0x2E : 0x14;
+
+ snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
+ remain, REG_CP_STATE_DEBUG_INDEX,
+ REG_CP_STATE_DEBUG_DATA, 0x0, size);
+
+ /* CP_ME indexed registers */
+ snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
+ remain, REG_CP_ME_CNTL, REG_CP_ME_STATUS,
+ 64, 44);
+
+ /* VPC memory */
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a3xx_snapshot_vpc_memory, NULL);
+
+ /* CP MEQ */
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a3xx_snapshot_cp_meq, NULL);
+
+ /* Shader working/shadow memory */
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a3xx_snapshot_shader_memory, NULL);
+
+
+ /* CP PFP and PM4 */
+ /* Reading these will hang the GPU if it isn't already hung */
+
+ if (hang) {
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a3xx_snapshot_cp_pfp_ram, NULL);
+
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a3xx_snapshot_cp_pm4_ram, NULL);
+ }
+
+ /* CP ROQ */
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a3xx_snapshot_cp_roq, NULL);
+
+ if (adreno_is_a330(adreno_dev) ||
+ adreno_is_a305b(adreno_dev)) {
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a330_snapshot_cp_merciu, NULL);
+ }
+
+ snapshot = a3xx_snapshot_debugbus(device, snapshot, remain);
+
+ /* Enable Clock gating */
+ kgsl_regwrite(device, A3XX_RBBM_CLOCK_CTL,
+ adreno_a3xx_rbbm_clock_ctl_default(adreno_dev));
+
+ return snapshot;
+}
diff --git a/drivers/gpu/msm2/adreno_a3xx_trace.c b/drivers/gpu/msm2/adreno_a3xx_trace.c
new file mode 100644
index 0000000..325b068
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_a3xx_trace.c
@@ -0,0 +1,20 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "kgsl.h"
+#include "adreno.h"
+
+/* Instantiate tracepoints */
+#define CREATE_TRACE_POINTS
+#include "a3xx_reg.h"
+#include "adreno_a3xx_trace.h"
diff --git a/drivers/gpu/msm2/adreno_a3xx_trace.h b/drivers/gpu/msm2/adreno_a3xx_trace.h
new file mode 100644
index 0000000..d48faf4
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_a3xx_trace.h
@@ -0,0 +1,89 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#if !defined(_ADRENO_A3XX_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _ADRENO_A3XX_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kgsl
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE adreno_a3xx_trace
+
+#include <linux/tracepoint.h>
+
+struct kgsl_device;
+
+/*
+ * Tracepoint for a3xx irq. Includes status info
+ */
+TRACE_EVENT(kgsl_a3xx_irq_status,
+
+ TP_PROTO(struct kgsl_device *device, unsigned int status),
+
+ TP_ARGS(device, status),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, status)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->status = status;
+ ),
+
+ TP_printk(
+ "d_name=%s status=%s",
+ __get_str(device_name),
+ __entry->status ? __print_flags(__entry->status, "|",
+ { 1 << A3XX_INT_RBBM_AHB_ERROR, "RBBM_GPU_IDLE" },
+ { 1 << A3XX_INT_RBBM_AHB_ERROR, "RBBM_AHB_ERR" },
+ { 1 << A3XX_INT_RBBM_REG_TIMEOUT, "RBBM_REG_TIMEOUT" },
+ { 1 << A3XX_INT_RBBM_ME_MS_TIMEOUT,
+ "RBBM_ME_MS_TIMEOUT" },
+ { 1 << A3XX_INT_RBBM_PFP_MS_TIMEOUT,
+ "RBBM_PFP_MS_TIMEOUT" },
+ { 1 << A3XX_INT_RBBM_ATB_BUS_OVERFLOW,
+ "RBBM_ATB_BUS_OVERFLOW" },
+ { 1 << A3XX_INT_VFD_ERROR, "RBBM_VFD_ERROR" },
+ { 1 << A3XX_INT_CP_SW_INT, "CP_SW" },
+ { 1 << A3XX_INT_CP_T0_PACKET_IN_IB,
+ "CP_T0_PACKET_IN_IB" },
+ { 1 << A3XX_INT_CP_OPCODE_ERROR, "CP_OPCODE_ERROR" },
+ { 1 << A3XX_INT_CP_RESERVED_BIT_ERROR,
+ "CP_RESERVED_BIT_ERROR" },
+ { 1 << A3XX_INT_CP_HW_FAULT, "CP_HW_FAULT" },
+ { 1 << A3XX_INT_CP_DMA, "CP_DMA" },
+ { 1 << A3XX_INT_CP_IB2_INT, "CP_IB2_INT" },
+ { 1 << A3XX_INT_CP_IB1_INT, "CP_IB1_INT" },
+ { 1 << A3XX_INT_CP_RB_INT, "CP_RB_INT" },
+ { 1 << A3XX_INT_CP_REG_PROTECT_FAULT,
+ "CP_REG_PROTECT_FAULT" },
+ { 1 << A3XX_INT_CP_RB_DONE_TS, "CP_RB_DONE_TS" },
+ { 1 << A3XX_INT_CP_VS_DONE_TS, "CP_VS_DONE_TS" },
+ { 1 << A3XX_INT_CP_PS_DONE_TS, "CP_PS_DONE_TS" },
+ { 1 << A3XX_INT_CACHE_FLUSH_TS, "CACHE_FLUSH_TS" },
+ { 1 << A3XX_INT_CP_AHB_ERROR_HALT,
+ "CP_AHB_ERROR_HALT" },
+ { 1 << A3XX_INT_MISC_HANG_DETECT, "MISC_HANG_DETECT" },
+ { 1 << A3XX_INT_UCHE_OOB_ACCESS, "UCHE_OOB_ACCESS" })
+ : "None"
+ )
+);
+
+#endif /* _ADRENO_A3XX_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/msm2/adreno_a4xx.c b/drivers/gpu/msm2/adreno_a4xx.c
new file mode 100644
index 0000000..90d816b
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_a4xx.c
@@ -0,0 +1,305 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "adreno.h"
+#include "a4xx_reg.h"
+#include "adreno_a3xx.h"
+
+/*
+ * Set of registers to dump for A4XX on postmortem and snapshot.
+ * Registers in pairs - first value is the start offset, second
+ * is the stop offset (inclusive)
+ */
+
+const unsigned int a4xx_registers[] = {
+ 0x0000, 0x0002, /* RBBM_HW_VERSION - RBBM_HW_CONFIGURATION */
+ 0x0020, 0x0020, /* RBBM_CLOCK_CTL */
+ 0x0021, 0x0021, /* RBBM_SP_HYST_CNT */
+ 0x0023, 0x0024, /* RBBM_AHB_CTL0 - RBBM_AHB_CTL1 */
+ 0x0026, 0x0026, /* RBBM_RB_SUB_BLOCK_SEL_CTL */
+ 0x0028, 0x0034, /* RBBM_RAM_ACC_63_32 - RBBM_INTERFACE_HANG_MASK_CTL4 */
+ 0x0037, 0x003f, /* RBBM_INT_0_MASK - RBBM_AHB_DEBUG_CTL */
+ 0x0041, 0x0045, /* RBBM_VBIF_DEBUG_CTL - BLOCK_SW_RESET_CMD */
+ 0x0047, 0x0049, /* RBBM_RESET_CYCLES - RBBM_EXT_TRACE_BUS_CTL */
+ 0x009c, 0x0170, /* RBBM_PERFCTR_CP_0_LO - RBBM_PERFCTR_CTL */
+ 0x0174, 0x0182, /* RBBM_PERFCTR_LOAD_VALUE_LO - RBBM_CLOCK_STATUS */
+ 0x0189, 0x019f, /* RBBM_AHB_STATUS - RBBM_INTERFACE_RRDY_STATUS5 */
+
+ 0x0206, 0x0217, /* CP_IB1_BASE - CP_ME_RB_DONE_DATA */
+ 0x0219, 0x0219, /* CP_QUEUE_THRESH2 */
+ 0x021b, 0x021b, /* CP_MERCIU_SIZE */
+ 0x0228, 0x0229, /* CP_SCRATCH_UMASK - CP_SCRATCH_ADDR */
+ 0x022a, 0x022c, /* CP_PREEMPT - CP_CNTL */
+ 0x022e, 0x022e, /* CP_DEBUG */
+ 0x0231, 0x0232, /* CP_DEBUG_ECO_CONTROL - CP_DRAW_STATE_ADDR */
+ 0x0240, 0x0250, /* CP_PROTECT_REG_0 - CP_PROTECT_CTRL */
+ 0x04c0, 0x04ce, /* CP_ST_BASE - CP_STQ_AVAIL */
+ 0x04d0, 0x04d0, /* CP_MERCIU_STAT */
+ 0x04d2, 0x04dd, /* CP_WFI_PEND_CTR - CP_EVENTS_IN_FLIGHT */
+ 0x0500, 0x050b, /* CP_PERFCTR_CP_SEL_0 - CP_PERFCOMBINER_SELECT */
+ 0x0578, 0x058f, /* CP_SCRATCH_REG0 - CP_SCRATCH_REG23 */
+
+ 0x0c00, 0x0c03, /* VSC_BIN_SIZE - VSC_DEBUG_ECO_CONTROL */
+ 0x0c08, 0x0c41, /* VSC_PIPE_CONFIG_0 - VSC_PIPE_PARTIAL_POSN_1 */
+ 0x0c50, 0x0c51, /* VSC_PERFCTR_VSC_SEL_0 - VSC_PERFCTR_VSC_SEL_1 */
+
+ 0x0e64, 0x0e68, /* VPC_DEBUG_ECO_CONTROL - VPC_PERFCTR_VPC_SEL_3 */
+ 0x2140, 0x216e, /* VPC_ATTR - VPC_SO_FLUSH_WADDR_3 - ctx0 */
+ 0x2540, 0x256e, /* VPC_ATTR - VPC_SO_FLUSH_WADDR_3 - ctx1 */
+
+ 0x0f00, 0x0f0b, /* TPL1_DEBUG_ECO_CONTROL - TPL1_PERFCTR_TP_SEL_7 */
+ /* TPL1_TP_TEX_OFFSET - TPL1_TP_CS_TEXMEMOBJ_BASE_ADDR - ctx0 */
+ 0x2380, 0x23a6,
+ /* TPL1_TP_TEX_OFFSET - TPL1_TP_CS_TEXMEMOBJ_BASE_ADDR - ctx1 */
+ 0x2780, 0x27a6,
+
+ 0x0ec0, 0x0ecf, /* SP_VS_STATUS - SP_PERFCTR_SP_SEL_11 */
+ 0x22c0, 0x22c1, /* SP_SP_CTRL - SP_INSTR_CACHE_CTRL - ctx0 */
+ 0x22c4, 0x2360, /* SP_VS_CTRL_0 - SP_GS_LENGTH - ctx0 */
+ 0x26c0, 0x26c1, /* SP_SP_CTRL - SP_INSTR_CACHE_CTRL - ctx1 */
+ 0x26c4, 0x2760, /* SP_VS_CTRL_0 - SP_GS_LENGTH - ctx1 */
+
+ 0x0cc0, 0x0cd2, /* RB_GMEM_BASE_ADDR - RB_PERFCTR_CCU_SEL_3 */
+ 0x20a0, 0x213f, /* RB_MODE_CONTROL - RB_VPORT_Z_CLAMP_MAX_15 - ctx0 */
+ 0x24a0, 0x253f, /* RB_MODE_CONTROL - RB_VPORT_Z_CLAMP_MAX_15 - ctx1 */
+
+ 0x0e40, 0x0e4a, /* VFD_DEBUG_CONTROL - VFD_PERFCTR_VFD_SEL_7 */
+ 0x2200, 0x2204, /* VFD_CONTROL_0 - VFD_CONTROL_4 - ctx 0 */
+ 0x2208, 0x22a9, /* VFD_INDEX_OFFSET - VFD_DECODE_INSTR_31 - ctx 0 */
+ 0x2600, 0x2604, /* VFD_CONTROL_0 - VFD_CONTROL_4 - ctx 1 */
+ 0x2608, 0x26a9, /* VFD_INDEX_OFFSET - VFD_DECODE_INSTR_31 - ctx 1 */
+
+ 0x0c80, 0x0c81, /* GRAS_TSE_STATUS - GRAS_DEBUG_ECO_CONTROL */
+ 0x0c88, 0x0c8b, /* GRAS_PERFCTR_TSE_SEL_0 - GRAS_PERFCTR_TSE_SEL_3 */
+ 0x2000, 0x2004, /* GRAS_CL_CLIP_CNTL - GRAS_CL_GB_CLIP_ADJ - ctx 0 */
+ /* GRAS_CL_VPORT_XOFFSET_0 - GRAS_SC_EXTENT_WINDOW_TL - ctx 0 */
+ 0x2008, 0x209f,
+ 0x2400, 0x2404, /* GRAS_CL_CLIP_CNTL - GRAS_CL_GB_CLIP_ADJ - ctx 1 */
+ /* GRAS_CL_VPORT_XOFFSET_0 - GRAS_SC_EXTENT_WINDOW_TL - ctx 1 */
+ 0x2408, 0x249f,
+
+ 0x0e80, 0x0e84, /* UCHE_CACHE_MODE_CONTROL - UCHE_TRAP_BASE_HI */
+ 0x0e88, 0x0e95, /* UCHE_CACHE_STATUS - UCHE_PERFCTR_UCHE_SEL_7 */
+
+ 0x0e00, 0x0e00, /* HLSQ_TIMEOUT_THRESHOLD - HLSQ_TIMEOUT_THRESHOLD */
+ 0x0e04, 0x0e0e, /* HLSQ_DEBUG_ECO_CONTROL - HLSQ_PERF_PIPE_MASK */
+ 0x23c0, 0x23db, /* HLSQ_CONTROL_0 - HLSQ_UPDATE_CONTROL - ctx 0 */
+ 0x27c0, 0x27db, /* HLSQ_CONTROL_0 - HLSQ_UPDATE_CONTROL - ctx 1 */
+
+ 0x0d00, 0x0d0c, /* PC_BINNING_COMMAND - PC_DRAWCALL_SETUP_OVERRIDE */
+ 0x0d10, 0x0d17, /* PC_PERFCTR_PC_SEL_0 - PC_PERFCTR_PC_SEL_7 */
+ 0x21c0, 0x21c6, /* PC_BIN_BASE - PC_RESTART_INDEX - ctx 0 */
+ 0x21e5, 0x21e7, /* PC_GS_PARAM - PC_HS_PARAM - ctx 0 */
+ 0x25c0, 0x25c6, /* PC_BIN_BASE - PC_RESTART_INDEX - ctx 1 */
+ 0x25e5, 0x25e7, /* PC_GS_PARAM - PC_HS_PARAM - ctx 1 */
+};
+
+const unsigned int a4xx_registers_count = ARRAY_SIZE(a4xx_registers) / 2;
+
+static int a4xx_drawctxt_create(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ int ret = 0;
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) {
+ /* This option is not supported on a4xx */
+ KGSL_DRV_ERR(device,
+ "Preambles required for A4XX draw contexts\n");
+ ret = -EPERM;
+ goto done;
+ }
+
+ if (!(drawctxt->flags & CTXT_FLAGS_NOGMEMALLOC)) {
+ /* This option is not supported on a4xx */
+ KGSL_DRV_ERR(device,
+ "Cannot create context with gmemalloc\n");
+ ret = -EPERM;
+ }
+
+done:
+ return ret;
+}
+
+static int a4xx_drawctxt_restore(struct adreno_device *adreno_dev,
+ struct adreno_context *context)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ unsigned int cmds[5];
+ int ret;
+
+ if (context == NULL) {
+ /* No context - set the default pagetable and thats it */
+ unsigned int id;
+ /*
+ * If there isn't a current context, the kgsl_mmu_setstate
+ * will use the CPU path so we don't need to give
+ * it a valid context id.
+ */
+ id = (adreno_dev->drawctxt_active != NULL)
+ ? adreno_dev->drawctxt_active->base.id
+ : KGSL_CONTEXT_INVALID;
+ kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
+ id);
+ return 0;
+ }
+
+ cmds[0] = cp_nop_packet(1);
+ cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
+ cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2);
+ cmds[3] = device->memstore.gpuaddr +
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
+ cmds[4] = context->base.id;
+ ret = adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
+ cmds, 5);
+ if (ret)
+ return ret;
+ ret = kgsl_mmu_setstate(&device->mmu,
+ context->base.proc_priv->pagetable,
+ context->base.id);
+ return ret;
+}
+
+static const struct adreno_vbif_data a420_vbif[] = {
+ { A4XX_VBIF_ABIT_SORT, 0x0001001F },
+ { A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4 },
+ { A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001 },
+ { A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818 },
+ { A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018 },
+ { A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818 },
+ { A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018 },
+ { A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003 },
+ {0, 0},
+};
+
+const struct adreno_vbif_platform a4xx_vbif_platforms[] = {
+ { adreno_is_a420, a420_vbif },
+};
+
+static void a4xx_start(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ adreno_vbif_start(device, a4xx_vbif_platforms,
+ ARRAY_SIZE(a4xx_vbif_platforms));
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ kgsl_regwrite(device, A4XX_RBBM_GPU_BUSY_MASKED, 0xFFFFFFFF);
+
+ /* Tune the hystersis counters for SP and CP idle detection */
+ kgsl_regwrite(device, A4XX_RBBM_SP_HYST_CNT, 0x10);
+ kgsl_regwrite(device, A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
+
+ /*
+ * Enable the RBBM error reporting bits. This lets us get
+ * useful information on failure
+ */
+
+ kgsl_regwrite(device, A4XX_RBBM_AHB_CTL0, 0x00000001);
+
+ /* Enable AHB error reporting */
+ kgsl_regwrite(device, A4XX_RBBM_AHB_CTL1, 0xA6FFFFFF);
+
+ /*
+ * Turn on hang detection - this spews a lot of useful information
+ * into the RBBM registers on a hang
+ */
+
+ kgsl_regwrite(device, A4XX_RBBM_INTERFACE_HANG_INT_CTL,
+ (1 << 16) | 0xFFF);
+
+ /* Set the OCMEM base address for A4XX */
+ kgsl_regwrite(device, A4XX_RB_GMEM_BASE_ADDR,
+ (unsigned int)(adreno_dev->ocmem_base >> 14));
+}
+
+/* Register offset defines for A4XX, in order of enum adreno_regs */
+static unsigned int a4xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_DEBUG, A4XX_CP_DEBUG),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_RAM_WADDR, A4XX_CP_ME_RAM_WADDR),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_RAM_DATA, A4XX_CP_ME_RAM_DATA),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_PFP_UCODE_DATA, A4XX_CP_PFP_UCODE_DATA),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_PFP_UCODE_ADDR, A4XX_CP_PFP_UCODE_ADDR),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_WFI_PEND_CTR, A4XX_CP_WFI_PEND_CTR),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A4XX_CP_RB_BASE),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR, A4XX_CP_RB_RPTR_ADDR),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, A4XX_CP_RB_RPTR),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_WPTR, A4XX_CP_RB_WPTR),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_PROTECT_CTRL, A4XX_CP_PROTECT_CTRL),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_CNTL, A4XX_CP_ME_CNTL),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_CNTL, A4XX_CP_RB_CNTL),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BASE, A4XX_CP_IB1_BASE),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_IB1_BUFSZ, A4XX_CP_IB1_BUFSZ),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BASE, A4XX_CP_IB2_BASE),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BUFSZ, A4XX_CP_IB2_BUFSZ),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_TIMESTAMP, A4XX_CP_SCRATCH_REG0),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_ME_RAM_RADDR, A4XX_CP_ME_RAM_RADDR),
+ ADRENO_REG_DEFINE(ADRENO_REG_SCRATCH_ADDR, A4XX_CP_SCRATCH_ADDR),
+ ADRENO_REG_DEFINE(ADRENO_REG_SCRATCH_UMSK, A4XX_CP_SCRATCH_UMASK),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A4XX_RBBM_STATUS),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_CTL, A4XX_RBBM_PERFCTR_CTL),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD0,
+ A4XX_RBBM_PERFCTR_LOAD_CMD0),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD1,
+ A4XX_RBBM_PERFCTR_LOAD_CMD1),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_CMD2,
+ A4XX_RBBM_PERFCTR_LOAD_CMD2),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_PWR_1_LO,
+ A4XX_RBBM_PERFCTR_PWR_1_LO),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_MASK, A4XX_RBBM_INT_0_MASK),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_0_STATUS, A4XX_RBBM_INT_0_STATUS),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_AHB_ERROR_STATUS,
+ A4XX_RBBM_AHB_ERROR_STATUS),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_AHB_CMD, A4XX_RBBM_AHB_CMD),
+ ADRENO_REG_DEFINE(ADRENO_REG_VPC_DEBUG_RAM_SEL,
+ A4XX_VPC_DEBUG_RAM_SEL),
+ ADRENO_REG_DEFINE(ADRENO_REG_VPC_DEBUG_RAM_READ,
+ A4XX_VPC_DEBUG_RAM_READ),
+ ADRENO_REG_DEFINE(ADRENO_REG_RBBM_INT_CLEAR_CMD,
+ A4XX_RBBM_INT_CLEAR_CMD),
+ ADRENO_REG_DEFINE(ADRENO_REG_VSC_PIPE_DATA_ADDRESS_0,
+ A4XX_VSC_PIPE_DATA_ADDRESS_0),
+ ADRENO_REG_DEFINE(ADRENO_REG_VSC_PIPE_DATA_LENGTH_7,
+ A4XX_VSC_PIPE_DATA_LENGTH_7),
+ ADRENO_REG_DEFINE(ADRENO_REG_VSC_SIZE_ADDRESS, A4XX_VSC_SIZE_ADDRESS),
+ ADRENO_REG_DEFINE(ADRENO_REG_VFD_CONTROL_0, A4XX_VFD_CONTROL_0),
+ ADRENO_REG_DEFINE(ADRENO_REG_VFD_FETCH_INSTR_0_0,
+ A4XX_VFD_FETCH_INSTR_0_0),
+ ADRENO_REG_DEFINE(ADRENO_REG_VFD_FETCH_INSTR_1_F,
+ A4XX_VFD_FETCH_INSTR_1_31),
+ ADRENO_REG_DEFINE(ADRENO_REG_SP_VS_PVT_MEM_ADDR_REG,
+ A4XX_SP_VS_PVT_MEM_ADDR),
+ ADRENO_REG_DEFINE(ADRENO_REG_SP_FS_PVT_MEM_ADDR_REG,
+ A4XX_SP_FS_PVT_MEM_ADDR),
+ ADRENO_REG_DEFINE(ADRENO_REG_SP_VS_OBJ_START_REG,
+ A4XX_SP_VS_OBJ_START),
+ ADRENO_REG_DEFINE(ADRENO_REG_SP_FS_OBJ_START_REG,
+ A4XX_SP_FS_OBJ_START),
+};
+
+const struct adreno_reg_offsets a4xx_reg_offsets = {
+ .offsets = a4xx_register_offsets,
+ .offset_0 = ADRENO_REG_REGISTER_MAX,
+};
+
+struct adreno_gpudev adreno_a4xx_gpudev = {
+ .reg_offsets = &a4xx_reg_offsets,
+
+ .ctxt_create = a4xx_drawctxt_create,
+ .ctxt_restore = a4xx_drawctxt_restore,
+ .rb_init = a3xx_rb_init,
+ .irq_control = a3xx_irq_control,
+ .irq_handler = a3xx_irq_handler,
+ .irq_pending = a3xx_irq_pending,
+ .busy_cycles = a3xx_busy_cycles,
+ .start = a4xx_start,
+};
diff --git a/drivers/gpu/msm2/adreno_coresight.c b/drivers/gpu/msm2/adreno_coresight.c
new file mode 100644
index 0000000..1b827ff
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_coresight.c
@@ -0,0 +1,219 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/of_coresight.h>
+#include <linux/coresight.h>
+#include <linux/memory_alloc.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#include "kgsl.h"
+#include "kgsl_device.h"
+#include "adreno.h"
+
+struct coresight_attr {
+ struct device_attribute attr;
+ int regname;
+};
+
+#define CORESIGHT_CREATE_REG_ATTR(_attrname, _regname) \
+ struct coresight_attr coresight_attr_##_attrname = \
+ { __ATTR(_attrname, S_IRUGO | S_IWUSR, gfx_show_reg, gfx_store_reg),\
+ _regname}
+
+/**
+ * adreno_coresight_enable() - Generic function to enable coresight debugging
+ * @csdev: Pointer to coresight's device struct
+ *
+ * This is a generic function to enable coresight debug bus on adreno
+ * devices. This should be used in all cases of enabling
+ * coresight debug bus for adreno devices. This function in turn calls
+ * the adreno device specific function through gpudev hook.
+ * This function is registered as the coresight enable function
+ * with coresight driver. It should only be called through coresight driver
+ * as that would ensure that the necessary setup required to be done
+ * on coresight driver's part is also done.
+ */
+int adreno_coresight_enable(struct coresight_device *csdev)
+{
+ struct kgsl_device *device = dev_get_drvdata(csdev->dev.parent);
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ /* Check if coresight compatible device, return error otherwise */
+ if (adreno_dev->gpudev->coresight_enable)
+ return adreno_dev->gpudev->coresight_enable(device);
+ else
+ return -ENODEV;
+}
+
+/**
+ * adreno_coresight_disable() - Generic function to disable coresight debugging
+ * @csdev: Pointer to coresight's device struct
+ *
+ * This is a generic function to disable coresight debug bus on adreno
+ * devices. This should be used in all cases of disabling
+ * coresight debug bus for adreno devices. This function in turn calls
+ * the adreno device specific function through the gpudev hook.
+ * This function is registered as the coresight disable function
+ * with coresight driver. It should only be called through coresight driver
+ * as that would ensure that the necessary setup required to be done on
+ * coresight driver's part is also done.
+ */
+void adreno_coresight_disable(struct coresight_device *csdev)
+{
+ struct kgsl_device *device = dev_get_drvdata(csdev->dev.parent);
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ /* Check if coresight compatible device, bail otherwise */
+ if (adreno_dev->gpudev->coresight_disable)
+ return adreno_dev->gpudev->coresight_disable(device);
+}
+
+static const struct coresight_ops_source adreno_coresight_ops_source = {
+ .enable = adreno_coresight_enable,
+ .disable = adreno_coresight_disable,
+};
+
+static const struct coresight_ops adreno_coresight_cs_ops = {
+ .source_ops = &adreno_coresight_ops_source,
+};
+
+void adreno_coresight_remove(struct platform_device *pdev)
+{
+ struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
+ coresight_unregister(pdata->csdev);
+}
+
+static ssize_t coresight_read_reg(struct kgsl_device *device,
+ unsigned int offset, char *buf)
+{
+ unsigned int regval = 0;
+
+ mutex_lock(&device->mutex);
+ if (!kgsl_active_count_get(device)) {
+ kgsl_regread(device, offset, ®val);
+ kgsl_active_count_put(device);
+ }
+ mutex_unlock(&device->mutex);
+ return snprintf(buf, PAGE_SIZE, "0x%X", regval);
+}
+
+static inline unsigned int coresight_convert_reg(const char *buf)
+{
+ long regval = 0;
+ int rv = 0;
+
+ rv = kstrtoul(buf, 16, ®val);
+ if (!rv)
+ return (unsigned int)regval;
+ else
+ return rv;
+}
+
+static ssize_t gfx_show_reg(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct kgsl_device *device = dev_get_drvdata(dev->parent);
+ struct coresight_attr *csight_attr = container_of(attr,
+ struct coresight_attr, attr);
+ return coresight_read_reg(device, csight_attr->regname, buf);
+}
+
+static ssize_t gfx_store_reg(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct kgsl_device *device = dev_get_drvdata(dev->parent);
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct coresight_attr *csight_attr = container_of(attr,
+ struct coresight_attr, attr);
+ unsigned int regval = 0;
+
+ regval = coresight_convert_reg(buf);
+
+ if (adreno_dev->gpudev->coresight_config_debug_reg)
+ adreno_dev->gpudev->coresight_config_debug_reg(device,
+ csight_attr->regname, regval);
+ return size;
+}
+
+CORESIGHT_CREATE_REG_ATTR(config_debug_bus, DEBUG_BUS_CTL);
+CORESIGHT_CREATE_REG_ATTR(config_trace_stop_cnt, TRACE_STOP_CNT);
+CORESIGHT_CREATE_REG_ATTR(config_trace_start_cnt, TRACE_START_CNT);
+CORESIGHT_CREATE_REG_ATTR(config_trace_period_cnt, TRACE_PERIOD_CNT);
+CORESIGHT_CREATE_REG_ATTR(config_trace_cmd, TRACE_CMD);
+CORESIGHT_CREATE_REG_ATTR(config_trace_bus_ctl, TRACE_BUS_CTL);
+
+static struct attribute *gfx_attrs[] = {
+ &coresight_attr_config_debug_bus.attr.attr,
+ &coresight_attr_config_trace_start_cnt.attr.attr,
+ &coresight_attr_config_trace_stop_cnt.attr.attr,
+ &coresight_attr_config_trace_period_cnt.attr.attr,
+ &coresight_attr_config_trace_cmd.attr.attr,
+ &coresight_attr_config_trace_bus_ctl.attr.attr,
+ NULL,
+};
+
+static struct attribute_group gfx_attr_grp = {
+ .attrs = gfx_attrs,
+};
+
+static const struct attribute_group *gfx_attr_grps[] = {
+ &gfx_attr_grp,
+ NULL,
+};
+
+int adreno_coresight_init(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct coresight_desc *desc;
+
+ if (IS_ERR_OR_NULL(pdata->coresight_pdata))
+ return -ENODATA;
+
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+
+ desc->type = CORESIGHT_DEV_TYPE_SOURCE;
+ desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_BUS;
+ desc->ops = &adreno_coresight_cs_ops;
+ desc->pdata = pdata->coresight_pdata;
+ desc->dev = &pdev->dev;
+ desc->owner = THIS_MODULE;
+ desc->groups = gfx_attr_grps;
+ pdata->csdev = coresight_register(desc);
+ if (IS_ERR(pdata->csdev)) {
+ ret = PTR_ERR(pdata->csdev);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ devm_kfree(dev, desc);
+ return ret;
+}
+
diff --git a/drivers/gpu/msm2/adreno_cp_parser.c b/drivers/gpu/msm2/adreno_cp_parser.c
new file mode 100644
index 0000000..944c50f
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_cp_parser.c
@@ -0,0 +1,823 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "kgsl.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_snapshot.h"
+
+#include "adreno.h"
+#include "adreno_pm4types.h"
+#include "a2xx_reg.h"
+#include "a3xx_reg.h"
+#include "adreno_cp_parser.h"
+
+#define MAX_IB_OBJS 1000
+
+/*
+ * This structure keeps track of type0 writes to VSC_PIPE_DATA_ADDRESS_x and
+ * VSC_PIPE_DATA_LENGTH_x. When a draw initator is called these registers
+ * point to buffers that we need to freeze for a snapshot
+ */
+
+struct ib_vsc_pipe {
+ unsigned int base;
+ unsigned int size;
+};
+
+/*
+ * This struct keeps track of type0 writes to VFD_FETCH_INSTR_0_X and
+ * VFD_FETCH_INSTR_1_X registers. When a draw initator is called the addresses
+ * and sizes in these registers point to VBOs that we need to freeze for a
+ * snapshot
+ */
+
+struct ib_vbo {
+ unsigned int base;
+ unsigned int stride;
+};
+
+/* List of variables used when parsing an IB */
+struct ib_parser_variables {
+ struct ib_vsc_pipe vsc_pipe[8];
+ /*
+ * This is the cached value of type0 writes to the VSC_SIZE_ADDRESS
+ * which contains the buffer address of the visiblity stream size
+ * buffer during a binning pass
+ */
+ unsigned int vsc_size_address;
+ struct ib_vbo vbo[16];
+ /* This is the cached value of type0 writes to VFD_INDEX_MAX. */
+ unsigned int vfd_index_max;
+ /*
+ * This is the cached value of type0 writes to VFD_CONTROL_0 which
+ * tells us how many VBOs are active when the draw initator is called
+ */
+ unsigned int vfd_control_0;
+ /* Cached value of type0 writes to SP_VS_PVT_MEM_ADDR and
+ * SP_FS_PVT_MEM_ADDR. This is a buffer that contains private
+ * stack information for the shader
+ */
+ unsigned int sp_vs_pvt_mem_addr;
+ unsigned int sp_fs_pvt_mem_addr;
+ /* Cached value of SP_VS_OBJ_START_REG and SP_FS_OBJ_START_REG. */
+ unsigned int sp_vs_obj_start_reg;
+ unsigned int sp_fs_obj_start_reg;
+};
+
+/*
+ * Used for locating shader objects. This array holds the unit size of shader
+ * objects based on type and block of shader. The type can be 0 or 1 hence there
+ * are 2 columns and block can be 0-7 hence 7 rows.
+ */
+static int load_state_unit_sizes[7][2] = {
+ { 2, 4 },
+ { 0, 1 },
+ { 2, 4 },
+ { 0, 1 },
+ { 8, 2 },
+ { 8, 2 },
+ { 8, 2 },
+};
+
+/*
+ * adreno_ib_merge_range() - Increases the address range tracked by an ib
+ * object
+ * @ib_obj: The ib object
+ * @gpuaddr: The start address which is to be merged
+ * @size: Size of the merging address
+ */
+static void adreno_ib_merge_range(struct adreno_ib_object *ib_obj,
+ unsigned int gpuaddr, unsigned int size)
+{
+ unsigned int addr_end1 = ib_obj->gpuaddr + ib_obj->size;
+ unsigned int addr_end2 = gpuaddr + size;
+ if (gpuaddr < ib_obj->gpuaddr)
+ ib_obj->gpuaddr = gpuaddr;
+ if (addr_end2 > addr_end1)
+ ib_obj->size = addr_end2 - ib_obj->gpuaddr;
+ else
+ ib_obj->size = addr_end1 - ib_obj->gpuaddr;
+}
+
+/*
+ * adreno_ib_check_overlap() - Checks if an address range overlap
+ * @gpuaddr: The start address range to check for overlap
+ * @size: Size of the address range
+ * @ib_obj_list: The list of address ranges to check for overlap
+ *
+ * Checks if an address range overlaps with a list of address ranges
+ * Returns the entry from list which overlaps else NULL
+ */
+static struct adreno_ib_object *adreno_ib_check_overlap(unsigned int gpuaddr,
+ unsigned int size, struct adreno_ib_object_list *ib_obj_list)
+{
+ struct adreno_ib_object *ib_obj;
+ int i;
+
+ for (i = 0; i < ib_obj_list->num_objs; i++) {
+ ib_obj = &(ib_obj_list->obj_list[i]);
+ if (kgsl_addr_range_overlap(ib_obj->gpuaddr, ib_obj->size,
+ gpuaddr, size))
+ /* regions overlap */
+ return ib_obj;
+ }
+ return NULL;
+}
+
+/*
+ * adreno_ib_add_range() - Add a gpuaddress range to list
+ * @device: Device on which the gpuaddress range is valid
+ * @ptbase: Pagtebale base on which the gpuaddress is mapped
+ * @size: Size of the address range in concern
+ * @type: The type of address range
+ * @ib_obj_list: List of the address ranges in which the given range is to be
+ * added
+ *
+ * Add a gpuaddress range as an ib object to a given list after checking if it
+ * overlaps with another entry on the list. If it conflicts then change the
+ * existing entry to incorporate this range
+ *
+ * Returns 0 on success else error code
+ */
+static int adreno_ib_add_range(struct kgsl_device *device,
+ phys_addr_t ptbase,
+ unsigned int gpuaddr,
+ unsigned int size, int type,
+ struct adreno_ib_object_list *ib_obj_list)
+{
+ struct adreno_ib_object *ib_obj;
+ struct kgsl_mem_entry *entry;
+
+ entry = kgsl_get_mem_entry(device, ptbase, gpuaddr, size);
+ if (!entry)
+ /*
+ * Do not fail if gpuaddr not found, we can continue
+ * to search for other objects even if few objects are
+ * not found
+ */
+ return 0;
+
+ if (!size) {
+ size = entry->memdesc.size;
+ gpuaddr = entry->memdesc.gpuaddr;
+ }
+
+ ib_obj = adreno_ib_check_overlap(gpuaddr, size, ib_obj_list);
+ if (ib_obj) {
+ adreno_ib_merge_range(ib_obj, gpuaddr, size);
+ } else {
+ if (MAX_IB_OBJS == ib_obj_list->num_objs) {
+ KGSL_DRV_ERR(device,
+ "Max objects reached %d\n", ib_obj_list->num_objs);
+ return -ENOMEM;
+ }
+ adreno_ib_init_ib_obj(gpuaddr, size, type, entry,
+ &(ib_obj_list->obj_list[ib_obj_list->num_objs]));
+ ib_obj_list->num_objs++;
+ }
+ return 0;
+}
+
+/*
+ * ib_save_mip_addresses() - Find mip addresses
+ * @device: Device on which the IB is running
+ * @pkt: Pointer to the packet in IB
+ * @ptbase: The pagetable on which IB is mapped
+ * @ib_obj_list: List in which any objects found are added
+ *
+ * Returns 0 on success else error code
+ */
+static int ib_save_mip_addresses(struct kgsl_device *device, unsigned int *pkt,
+ phys_addr_t ptbase, struct adreno_ib_object_list *ib_obj_list)
+{
+ int ret = 0;
+ int num_levels = (pkt[1] >> 22) & 0x03FF;
+ int i;
+ unsigned int *hostptr;
+ struct kgsl_mem_entry *ent;
+ unsigned int block, type;
+ int unitsize = 0;
+
+ block = (pkt[1] >> 19) & 0x07;
+ type = pkt[2] & 0x03;
+
+ if (type == 0)
+ unitsize = load_state_unit_sizes[block][0];
+ else
+ unitsize = load_state_unit_sizes[block][1];
+
+ if (3 == block && 1 == type) {
+ ent = kgsl_get_mem_entry(device, ptbase, pkt[2] & 0xFFFFFFFC,
+ (num_levels * unitsize) << 2);
+ if (!ent)
+ return -EINVAL;
+
+ hostptr = (unsigned int *)kgsl_gpuaddr_to_vaddr(&ent->memdesc,
+ pkt[2] & 0xFFFFFFFC);
+ if (!hostptr) {
+ kgsl_mem_entry_put(ent);
+ return -EINVAL;
+ }
+ for (i = 0; i < num_levels; i++) {
+ ret = adreno_ib_add_range(device, ptbase, hostptr[i],
+ 0, SNAPSHOT_GPU_OBJECT_GENERIC, ib_obj_list);
+ if (ret < 0)
+ break;
+ }
+ kgsl_memdesc_unmap(&ent->memdesc);
+ kgsl_mem_entry_put(ent);
+ }
+ return ret;
+}
+
+/*
+ * ib_parse_load_state() - Parse load state packet
+ * @device: Device on which the IB is running
+ * @pkt: Pointer to the packet in IB
+ * @ptbase: The pagetable on which IB is mapped
+ * @ib_obj_list: List in which any objects found are added
+ * @ib_parse_vars: VAriable list that store temporary addressses
+ *
+ * Parse load state packet found in an IB and add any memory object found to
+ * a list
+ * Returns 0 on success else error code
+ */
+static int ib_parse_load_state(struct kgsl_device *device, unsigned int *pkt,
+ phys_addr_t ptbase, struct adreno_ib_object_list *ib_obj_list,
+ struct ib_parser_variables *ib_parse_vars)
+{
+ unsigned int block, source, type;
+ int ret = 0;
+ int unitsize = 0;
+
+ /*
+ * The object here is to find indirect shaders i.e - shaders loaded from
+ * GPU memory instead of directly in the command. These should be added
+ * to the list of memory objects to dump. So look at the load state
+ * if the block is indirect (source = 4). If so then add the memory
+ * address to the list. The size of the object differs depending on the
+ * type per the load_state_unit_sizes array above.
+ */
+
+ if (type3_pkt_size(pkt[0]) < 2)
+ return 0;
+
+ /*
+ * pkt[1] 18:16 - source
+ * pkt[1] 21:19 - state block
+ * pkt[1] 31:22 - size in units
+ * pkt[2] 0:1 - type
+ * pkt[2] 31:2 - GPU memory address
+ */
+
+ block = (pkt[1] >> 19) & 0x07;
+ source = (pkt[1] >> 16) & 0x07;
+ type = pkt[2] & 0x03;
+
+ if (source == 4) {
+ if (type == 0)
+ unitsize = load_state_unit_sizes[block][0];
+ else
+ unitsize = load_state_unit_sizes[block][1];
+
+ /* Freeze the GPU buffer containing the shader */
+
+ ret = adreno_ib_add_range(device, ptbase, pkt[2] & 0xFFFFFFFC,
+ (((pkt[1] >> 22) & 0x03FF) * unitsize) << 2,
+ SNAPSHOT_GPU_OBJECT_SHADER,
+ ib_obj_list);
+ if (ret < 0)
+ return ret;
+ }
+ /* get the mip addresses */
+ ret = ib_save_mip_addresses(device, pkt, ptbase, ib_obj_list);
+ return ret;
+}
+
+/*
+ * This opcode sets the base addresses for the visibilty stream buffer and the
+ * visiblity stream size buffer.
+ */
+
+static int ib_parse_set_bin_data(struct kgsl_device *device, unsigned int *pkt,
+ phys_addr_t ptbase, struct adreno_ib_object_list *ib_obj_list,
+ struct ib_parser_variables *ib_parse_vars)
+{
+ int ret = 0;
+
+ if (type3_pkt_size(pkt[0]) < 2)
+ return 0;
+
+ /* Visiblity stream buffer */
+ ret = adreno_ib_add_range(device, ptbase, pkt[1], 0,
+ SNAPSHOT_GPU_OBJECT_GENERIC, ib_obj_list);
+ if (ret < 0)
+ return ret;
+
+ /* visiblity stream size buffer (fixed size 8 dwords) */
+ ret = adreno_ib_add_range(device, ptbase, pkt[2], 32,
+ SNAPSHOT_GPU_OBJECT_GENERIC, ib_obj_list);
+
+ return ret;
+}
+
+/*
+ * This opcode writes to GPU memory - if the buffer is written to, there is a
+ * good chance that it would be valuable to capture in the snapshot, so mark all
+ * buffers that are written to as frozen
+ */
+
+static int ib_parse_mem_write(struct kgsl_device *device, unsigned int *pkt,
+ phys_addr_t ptbase, struct adreno_ib_object_list *ib_obj_list,
+ struct ib_parser_variables *ib_parse_vars)
+{
+ int ret = 0;
+
+ if (type3_pkt_size(pkt[0]) < 1)
+ return 0;
+
+ /*
+ * The address is where the data in the rest of this packet is written
+ * to, but since that might be an offset into the larger buffer we need
+ * to get the whole thing. Pass a size of 0 tocapture the entire buffer.
+ */
+
+ ret = adreno_ib_add_range(device, ptbase, pkt[1] & 0xFFFFFFFC, 0,
+ SNAPSHOT_GPU_OBJECT_GENERIC, ib_obj_list);
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+/*
+ * ib_add_type0_entries() - Add memory objects to list
+ * @device: The device on which the IB will execute
+ * @ptbase: The ptbase on which IB is mapped
+ * @ib_obj_list: The list of gpu objects
+ * @ib_parse_vars: addresses ranges found in type0 packets
+ *
+ * Add memory objects to given list that are found in type0 packets
+ * Returns 0 on success else 0
+ */
+static int ib_add_type0_entries(struct kgsl_device *device,
+ phys_addr_t ptbase, struct adreno_ib_object_list *ib_obj_list,
+ struct ib_parser_variables *ib_parse_vars)
+{
+ int ret = 0;
+ int i;
+ /* First up the visiblity stream buffer */
+
+ for (i = 0; i < ARRAY_SIZE(ib_parse_vars->vsc_pipe); i++) {
+ if (ib_parse_vars->vsc_pipe[i].base != 0 &&
+ ib_parse_vars->vsc_pipe[i].size != 0) {
+ ret = adreno_ib_add_range(device, ptbase,
+ ib_parse_vars->vsc_pipe[i].base,
+ ib_parse_vars->vsc_pipe[i].size,
+ SNAPSHOT_GPU_OBJECT_GENERIC,
+ ib_obj_list);
+ if (ret < 0)
+ return ret;
+ ib_parse_vars->vsc_pipe[i].size = 0;
+ ib_parse_vars->vsc_pipe[i].base = 0;
+ }
+ }
+
+ /* Next the visibility stream size buffer */
+
+ if (ib_parse_vars->vsc_size_address) {
+ ret = adreno_ib_add_range(device, ptbase,
+ ib_parse_vars->vsc_size_address, 32,
+ SNAPSHOT_GPU_OBJECT_GENERIC, ib_obj_list);
+ if (ret < 0)
+ return ret;
+ ib_parse_vars->vsc_size_address = 0;
+ }
+
+ /* Next private shader buffer memory */
+ if (ib_parse_vars->sp_vs_pvt_mem_addr) {
+ ret = adreno_ib_add_range(device, ptbase,
+ ib_parse_vars->sp_vs_pvt_mem_addr, 8192,
+ SNAPSHOT_GPU_OBJECT_GENERIC, ib_obj_list);
+ if (ret < 0)
+ return ret;
+
+ ib_parse_vars->sp_vs_pvt_mem_addr = 0;
+ }
+
+ if (ib_parse_vars->sp_fs_pvt_mem_addr) {
+ ret = adreno_ib_add_range(device, ptbase,
+ ib_parse_vars->sp_fs_pvt_mem_addr, 8192,
+ SNAPSHOT_GPU_OBJECT_GENERIC,
+ ib_obj_list);
+ if (ret < 0)
+ return ret;
+
+ ib_parse_vars->sp_fs_pvt_mem_addr = 0;
+ }
+
+ if (ib_parse_vars->sp_vs_obj_start_reg) {
+ ret = adreno_ib_add_range(device, ptbase,
+ ib_parse_vars->sp_vs_obj_start_reg & 0xFFFFFFE0,
+ 0, SNAPSHOT_GPU_OBJECT_GENERIC, ib_obj_list);
+ if (ret < 0)
+ return -ret;
+ ib_parse_vars->sp_vs_obj_start_reg = 0;
+ }
+
+ if (ib_parse_vars->sp_fs_obj_start_reg) {
+ ret = adreno_ib_add_range(device, ptbase,
+ ib_parse_vars->sp_fs_obj_start_reg & 0xFFFFFFE0,
+ 0, SNAPSHOT_GPU_OBJECT_GENERIC, ib_obj_list);
+ if (ret < 0)
+ return ret;
+ ib_parse_vars->sp_fs_obj_start_reg = 0;
+ }
+
+ /* Finally: VBOs */
+
+ /* The number of active VBOs is stored in VFD_CONTROL_O[31:27] */
+ for (i = 0; i < (ib_parse_vars->vfd_control_0) >> 27; i++) {
+ int size;
+
+ /*
+ * The size of the VBO is the stride stored in
+ * VFD_FETCH_INSTR_0_X.BUFSTRIDE * VFD_INDEX_MAX. The base
+ * is stored in VFD_FETCH_INSTR_1_X
+ */
+
+ if (ib_parse_vars->vbo[i].base != 0) {
+ size = ib_parse_vars->vbo[i].stride *
+ ib_parse_vars->vfd_index_max;
+
+ ret = adreno_ib_add_range(device, ptbase,
+ ib_parse_vars->vbo[i].base,
+ 0, SNAPSHOT_GPU_OBJECT_GENERIC, ib_obj_list);
+ if (ret < 0)
+ return ret;
+ }
+
+ ib_parse_vars->vbo[i].base = 0;
+ ib_parse_vars->vbo[i].stride = 0;
+ }
+
+ ib_parse_vars->vfd_control_0 = 0;
+ ib_parse_vars->vfd_index_max = 0;
+
+ return ret;
+}
+
+/*
+ * The DRAW_INDX opcode sends a draw initator which starts a draw operation in
+ * the GPU, so this is the point where all the registers and buffers become
+ * "valid". The DRAW_INDX may also have an index buffer pointer that should be
+ * frozen with the others
+ */
+
+static int ib_parse_draw_indx(struct kgsl_device *device, unsigned int *pkt,
+ phys_addr_t ptbase, struct adreno_ib_object_list *ib_obj_list,
+ struct ib_parser_variables *ib_parse_vars)
+{
+ int ret = 0;
+
+ if (type3_pkt_size(pkt[0]) < 3)
+ return 0;
+
+ /* DRAW_IDX may have a index buffer pointer */
+
+ if (type3_pkt_size(pkt[0]) > 3) {
+ ret = adreno_ib_add_range(device, ptbase, pkt[4], pkt[5],
+ SNAPSHOT_GPU_OBJECT_GENERIC, ib_obj_list);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * All of the type0 writes are valid at a draw initiator, so freeze
+ * the various buffers that we are tracking
+ */
+ ret = ib_add_type0_entries(device, ptbase, ib_obj_list,
+ ib_parse_vars);
+ return ret;
+}
+
+/*
+ * Parse all the type3 opcode packets that may contain important information,
+ * such as additional GPU buffers to grab or a draw initator
+ */
+
+static int ib_parse_type3(struct kgsl_device *device, unsigned int *ptr,
+ phys_addr_t ptbase, struct adreno_ib_object_list *ib_obj_list,
+ struct ib_parser_variables *ib_parse_vars)
+{
+ int opcode = cp_type3_opcode(*ptr);
+
+ if (opcode == CP_LOAD_STATE)
+ return ib_parse_load_state(device, ptr, ptbase, ib_obj_list,
+ ib_parse_vars);
+ else if (opcode == CP_SET_BIN_DATA)
+ return ib_parse_set_bin_data(device, ptr, ptbase, ib_obj_list,
+ ib_parse_vars);
+ else if (opcode == CP_MEM_WRITE)
+ return ib_parse_mem_write(device, ptr, ptbase, ib_obj_list,
+ ib_parse_vars);
+ else if (opcode == CP_DRAW_INDX)
+ return ib_parse_draw_indx(device, ptr, ptbase, ib_obj_list,
+ ib_parse_vars);
+
+ return 0;
+}
+
+/*
+ * Parse type0 packets found in the stream. Some of the registers that are
+ * written are clues for GPU buffers that we need to freeze. Register writes
+ * are considred valid when a draw initator is called, so just cache the values
+ * here and freeze them when a CP_DRAW_INDX is seen. This protects against
+ * needlessly caching buffers that won't be used during a draw call
+ */
+
+static void ib_parse_type0(struct kgsl_device *device, unsigned int *ptr,
+ phys_addr_t ptbase, struct adreno_ib_object_list *ib_obj_list,
+ struct ib_parser_variables *ib_parse_vars)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int size = type0_pkt_size(*ptr);
+ int offset = type0_pkt_offset(*ptr);
+ int i;
+
+ for (i = 0; i < size; i++, offset++) {
+
+ /* Visiblity stream buffer */
+
+ if (offset >= adreno_getreg(adreno_dev,
+ ADRENO_REG_VSC_PIPE_DATA_ADDRESS_0) &&
+ offset <= adreno_getreg(adreno_dev,
+ ADRENO_REG_VSC_PIPE_DATA_LENGTH_7)) {
+ int index = offset - adreno_getreg(adreno_dev,
+ ADRENO_REG_VSC_PIPE_DATA_ADDRESS_0);
+
+ /* Each bank of address and length registers are
+ * interleaved with an empty register:
+ *
+ * address 0
+ * length 0
+ * empty
+ * address 1
+ * length 1
+ * empty
+ * ...
+ */
+
+ if ((index % 3) == 0)
+ ib_parse_vars->vsc_pipe[index / 3].base =
+ ptr[i + 1];
+ else if ((index % 3) == 1)
+ ib_parse_vars->vsc_pipe[index / 3].size =
+ ptr[i + 1];
+ } else if ((offset >= adreno_getreg(adreno_dev,
+ ADRENO_REG_VFD_FETCH_INSTR_0_0)) &&
+ (offset <= adreno_getreg(adreno_dev,
+ ADRENO_REG_VFD_FETCH_INSTR_1_F))) {
+ int index = offset -
+ adreno_getreg(adreno_dev,
+ ADRENO_REG_VFD_FETCH_INSTR_0_0);
+
+ /*
+ * FETCH_INSTR_0_X and FETCH_INSTR_1_X banks are
+ * interleaved as above but without the empty register
+ * in between
+ */
+
+ if ((index % 2) == 0)
+ ib_parse_vars->vbo[index >> 1].stride =
+ (ptr[i + 1] >> 7) & 0x1FF;
+ else
+ ib_parse_vars->vbo[index >> 1].base =
+ ptr[i + 1];
+ } else {
+ /*
+ * Cache various support registers for calculating
+ * buffer sizes
+ */
+
+ if (offset ==
+ adreno_getreg(adreno_dev,
+ ADRENO_REG_VFD_CONTROL_0))
+ ib_parse_vars->vfd_control_0 = ptr[i + 1];
+ else if (offset ==
+ adreno_getreg(adreno_dev,
+ ADRENO_REG_VFD_INDEX_MAX))
+ ib_parse_vars->vfd_index_max = ptr[i + 1];
+ else if (offset ==
+ adreno_getreg(adreno_dev,
+ ADRENO_REG_VSC_SIZE_ADDRESS))
+ ib_parse_vars->vsc_size_address = ptr[i + 1];
+ else if (offset == adreno_getreg(adreno_dev,
+ ADRENO_REG_SP_VS_PVT_MEM_ADDR_REG))
+ ib_parse_vars->sp_vs_pvt_mem_addr = ptr[i + 1];
+ else if (offset == adreno_getreg(adreno_dev,
+ ADRENO_REG_SP_FS_PVT_MEM_ADDR_REG))
+ ib_parse_vars->sp_fs_pvt_mem_addr = ptr[i + 1];
+ else if (offset == adreno_getreg(adreno_dev,
+ ADRENO_REG_SP_VS_OBJ_START_REG))
+ ib_parse_vars->sp_vs_obj_start_reg = ptr[i + 1];
+ else if (offset == adreno_getreg(adreno_dev,
+ ADRENO_REG_SP_FS_OBJ_START_REG))
+ ib_parse_vars->sp_fs_obj_start_reg = ptr[i + 1];
+ }
+ }
+ ib_add_type0_entries(device, ptbase, ib_obj_list,
+ ib_parse_vars);
+}
+
+/*
+ * adreno_ib_find_objs() - Find all IB objects in a given IB
+ * @device: The device pointer on which the IB executes
+ * @ptbase: The pagetable base in which in the IBis mapped and so are the
+ * objects in it
+ * @gpuaddr: The gpu address of the IB
+ * @dwords: Size of ib in dwords
+ * @ib_obj_list: The list in which the IB and the objects in it are added.
+ *
+ * Finds all IB objects in a given IB and puts then in a list. Can be called
+ * recursively for the IB2's in the IB1's
+ * Returns 0 on success else error code
+ */
+static int adreno_ib_find_objs(struct kgsl_device *device,
+ phys_addr_t ptbase,
+ unsigned int gpuaddr, unsigned int dwords,
+ struct adreno_ib_object_list *ib_obj_list)
+{
+ int ret = 0;
+ int rem = dwords;
+ int i;
+ struct ib_parser_variables ib_parse_vars;
+ unsigned int *src;
+ struct adreno_ib_object *ib_obj;
+ struct kgsl_mem_entry *entry;
+
+ /* check that this IB is not already on list */
+ for (i = 0; i < ib_obj_list->num_objs; i++) {
+ ib_obj = &(ib_obj_list->obj_list[i]);
+ if ((ib_obj->gpuaddr <= gpuaddr) &&
+ ((ib_obj->gpuaddr + ib_obj->size) >=
+ (gpuaddr + (dwords << 2))))
+ return 0;
+ }
+
+ entry = kgsl_get_mem_entry(device, ptbase, gpuaddr, (dwords << 2));
+ if (!entry)
+ return -EINVAL;
+
+ src = (unsigned int *)kgsl_gpuaddr_to_vaddr(&entry->memdesc, gpuaddr);
+ if (!src) {
+ kgsl_mem_entry_put(entry);
+ return -EINVAL;
+ }
+
+ memset(&ib_parse_vars, 0, sizeof(struct ib_parser_variables));
+
+ ret = adreno_ib_add_range(device, ptbase, gpuaddr, dwords << 2,
+ SNAPSHOT_GPU_OBJECT_IB, ib_obj_list);
+ if (ret)
+ goto done;
+
+ for (i = 0; rem > 0; rem--, i++) {
+ int pktsize;
+
+ /*
+ * If the packet isn't a type 1 or a type 3, then don't bother
+ * parsing it - it is likely corrupted
+ */
+ if (!pkt_is_type0(src[i]) && !pkt_is_type3(src[i]))
+ break;
+
+ pktsize = type3_pkt_size(src[i]);
+
+ if (!pktsize || (pktsize + 1) > rem)
+ break;
+
+ if (pkt_is_type3(src[i])) {
+ if (adreno_cmd_is_ib(src[i])) {
+ unsigned int gpuaddrib2 = src[i + 1];
+ unsigned int size = src[i + 2];
+
+ ret = adreno_ib_find_objs(
+ device, ptbase,
+ gpuaddrib2, size,
+ ib_obj_list);
+ if (ret < 0)
+ goto done;
+ } else {
+ ret = ib_parse_type3(device, &src[i], ptbase,
+ ib_obj_list,
+ &ib_parse_vars);
+ /*
+ * If the parse function failed (probably
+ * because of a bad decode) then bail out and
+ * just capture the binary IB data
+ */
+
+ if (ret < 0)
+ goto done;
+ }
+ } else if (pkt_is_type0(src[i])) {
+ ib_parse_type0(device, &src[i], ptbase, ib_obj_list,
+ &ib_parse_vars);
+ }
+
+ i += pktsize;
+ rem -= pktsize;
+ }
+ /*
+ * If any type objects got missed because we did not come across draw
+ * indx packets then catch them here. This works better for the replay
+ * tool and also if the draw indx packet is in an IB2 and these setups
+ * are in IB1 then these objects are definitely valid and should be
+ * dumped
+ */
+ ret = ib_add_type0_entries(device, ptbase, ib_obj_list,
+ &ib_parse_vars);
+done:
+ kgsl_memdesc_unmap(&entry->memdesc);
+ kgsl_mem_entry_put(entry);
+ return ret;
+}
+
+
+/*
+ * adreno_ib_create_object_list() - Find all the memory objects in IB
+ * @device: The device pointer on which the IB executes
+ * @ptbase: The pagetable base in which in the IBis mapped and so are the
+ * objects in it
+ * @gpuaddr: The gpu address of the IB
+ * @dwords: Size of ib in dwords
+ * @ib_obj_list: The list in which the IB and the objects in it are added.
+ *
+ * Find all the memory objects that an IB needs for execution and place
+ * them in a list including the IB.
+ * Returns the ib object list else error code in pointer.
+ */
+int adreno_ib_create_object_list(struct kgsl_device *device,
+ phys_addr_t ptbase,
+ unsigned int gpuaddr, unsigned int dwords,
+ struct adreno_ib_object_list **out_ib_obj_list)
+{
+ int ret = 0;
+ struct adreno_ib_object_list *ib_obj_list;
+
+ if (!out_ib_obj_list)
+ return -EINVAL;
+
+ ib_obj_list = kzalloc(sizeof(*ib_obj_list), GFP_KERNEL);
+ if (!ib_obj_list)
+ return -ENOMEM;
+
+ ib_obj_list->obj_list = vmalloc(MAX_IB_OBJS *
+ sizeof(struct adreno_ib_object));
+
+ if (!ib_obj_list->obj_list) {
+ kfree(ib_obj_list);
+ return -ENOMEM;
+ }
+
+ ret = adreno_ib_find_objs(device, ptbase, gpuaddr, dwords,
+ ib_obj_list);
+
+ if (ret)
+ adreno_ib_destroy_obj_list(ib_obj_list);
+ else
+ *out_ib_obj_list = ib_obj_list;
+
+ return ret;
+}
+
+/*
+ * adreno_ib_destroy_obj_list() - Destroy an ib object list
+ * @ib_obj_list: List to destroy
+ *
+ * Free up all resources used by an ib_obj_list
+ */
+void adreno_ib_destroy_obj_list(struct adreno_ib_object_list *ib_obj_list)
+{
+ int i;
+
+ if (!ib_obj_list)
+ return;
+
+ for (i = 0; i < ib_obj_list->num_objs; i++) {
+ if (ib_obj_list->obj_list[i].entry)
+ kgsl_mem_entry_put(ib_obj_list->obj_list[i].entry);
+ }
+ vfree(ib_obj_list->obj_list);
+ kfree(ib_obj_list);
+}
diff --git a/drivers/gpu/msm2/adreno_cp_parser.h b/drivers/gpu/msm2/adreno_cp_parser.h
new file mode 100644
index 0000000..fb2886c
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_cp_parser.h
@@ -0,0 +1,68 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ADRENO_IB_PARSER__
+#define __ADRENO_IB_PARSER__
+
+/*
+ * struct adreno_ib_object - Structure containing information about an
+ * address range found in an IB
+ * @gpuaddr: The starting gpuaddress of the range
+ * @size: Size of the range
+ * @snapshot_obj_type - Type of range used in snapshot
+ * @entry: The memory entry in which this range is found
+ */
+struct adreno_ib_object {
+ unsigned int gpuaddr;
+ unsigned int size;
+ int snapshot_obj_type;
+ struct kgsl_mem_entry *entry;
+};
+
+/*
+ * struct adreno_ib_object_list - List of address ranges found in IB
+ * @obj_list: The address range list
+ * @num_objs: Number of objects in list
+ */
+struct adreno_ib_object_list {
+ struct adreno_ib_object *obj_list;
+ int num_objs;
+};
+
+/*
+ * adreno_ib_init_ib_obj() - Create an ib object structure and initialize it
+ * with gpuaddress and size
+ * @gpuaddr: gpuaddr with which to initialize the object with
+ * @size: Size in bytes with which the object is initialized
+ * @ib_type: The IB type used by snapshot
+ *
+ * Returns the object pointer on success else error code in the pointer
+ */
+static inline void adreno_ib_init_ib_obj(unsigned int gpuaddr,
+ unsigned int size, int obj_type,
+ struct kgsl_mem_entry *entry,
+ struct adreno_ib_object *ib_obj)
+{
+ ib_obj->gpuaddr = gpuaddr;
+ ib_obj->size = size;
+ ib_obj->snapshot_obj_type = obj_type;
+ ib_obj->entry = entry;
+}
+
+int adreno_ib_create_object_list(
+ struct kgsl_device *device, phys_addr_t ptbase,
+ unsigned int gpuaddr, unsigned int dwords,
+ struct adreno_ib_object_list **out_ib_obj_list);
+
+void adreno_ib_destroy_obj_list(struct adreno_ib_object_list *ib_obj_list);
+
+#endif
diff --git a/drivers/gpu/msm2/adreno_debugfs.c b/drivers/gpu/msm2/adreno_debugfs.c
new file mode 100644
index 0000000..12804a3
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_debugfs.c
@@ -0,0 +1,96 @@
+/* Copyright (c) 2002,2008-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include "kgsl.h"
+#include "adreno.h"
+#include "kgsl_cffdump.h"
+
+#include "a2xx_reg.h"
+
+unsigned int kgsl_cff_dump_enable;
+
+DEFINE_SIMPLE_ATTRIBUTE(kgsl_cff_dump_enable_fops, kgsl_cff_dump_enable_get,
+ kgsl_cff_dump_enable_set, "%llu\n");
+
+static int _active_count_get(void *data, u64 *val)
+{
+ struct kgsl_device *device = data;
+ unsigned int i = atomic_read(&device->active_cnt);
+
+ *val = (u64) i;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(_active_count_fops, _active_count_get, NULL, "%llu\n");
+
+typedef void (*reg_read_init_t)(struct kgsl_device *device);
+typedef void (*reg_read_fill_t)(struct kgsl_device *device, int i,
+ unsigned int *vals, int linec);
+
+void adreno_debugfs_init(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ if (!device->d_debugfs || IS_ERR(device->d_debugfs))
+ return;
+
+ debugfs_create_file("cff_dump", 0644, device->d_debugfs, device,
+ &kgsl_cff_dump_enable_fops);
+ debugfs_create_u32("wait_timeout", 0644, device->d_debugfs,
+ &adreno_dev->wait_timeout);
+ debugfs_create_u32("ib_check", 0644, device->d_debugfs,
+ &adreno_dev->ib_check_level);
+ /* By Default enable fast hang detection */
+ adreno_dev->fast_hang_detect = 1;
+ debugfs_create_u32("fast_hang_detect", 0644, device->d_debugfs,
+ &adreno_dev->fast_hang_detect);
+ /*
+ * FT policy can be set to any of the options below.
+ * KGSL_FT_OFF -> BIT(0) Set to turn off FT
+ * KGSL_FT_REPLAY -> BIT(1) Set to enable replay
+ * KGSL_FT_SKIPIB -> BIT(2) Set to skip IB
+ * KGSL_FT_SKIPFRAME -> BIT(3) Set to skip frame
+ * KGSL_FT_DISABLE -> BIT(4) Set to disable FT for faulting context
+ * by default set FT policy to KGSL_FT_DEFAULT_POLICY
+ */
+ adreno_dev->ft_policy = KGSL_FT_DEFAULT_POLICY;
+ debugfs_create_u32("ft_policy", 0644, device->d_debugfs,
+ &adreno_dev->ft_policy);
+ /* By default enable long IB detection */
+ adreno_dev->long_ib_detect = 1;
+ debugfs_create_u32("long_ib_detect", 0644, device->d_debugfs,
+ &adreno_dev->long_ib_detect);
+
+ /*
+ * FT pagefault policy can be set to any of the options below.
+ * KGSL_FT_PAGEFAULT_INT_ENABLE -> BIT(0) set to enable pagefault INT
+ * KGSL_FT_PAGEFAULT_GPUHALT_ENABLE -> BIT(1) Set to enable GPU HALT on
+ * pagefaults. This stalls the GPU on a pagefault on IOMMU v1 HW.
+ * KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE -> BIT(2) Set to log only one
+ * pagefault per page.
+ * KGSL_FT_PAGEFAULT_LOG_ONE_PER_INT -> BIT(3) Set to log only one
+ * pagefault per INT.
+ */
+ adreno_dev->ft_pf_policy = KGSL_FT_PAGEFAULT_DEFAULT_POLICY;
+ debugfs_create_u32("ft_pagefault_policy", 0644, device->d_debugfs,
+ &adreno_dev->ft_pf_policy);
+
+ debugfs_create_file("active_cnt", 0444, device->d_debugfs, device,
+ &_active_count_fops);
+}
diff --git a/drivers/gpu/msm2/adreno_dispatch.c b/drivers/gpu/msm2/adreno_dispatch.c
new file mode 100644
index 0000000..3bcc7a3
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_dispatch.c
@@ -0,0 +1,1697 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+
+#include "kgsl.h"
+#include "adreno.h"
+#include "adreno_ringbuffer.h"
+#include "adreno_trace.h"
+
+#define ADRENO_DISPATCHER_ACTIVE 0
+#define ADRENO_DISPATCHER_PAUSE 1
+
+#define ADRENO_DISPATCHER_SOFT_FAULT 1
+#define ADRENO_DISPATCHER_HARD_FAULT 2
+#define ADRENO_DISPATCHER_TIMEOUT_FAULT 3
+
+#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
+
+/* Number of commands that can be queued in a context before it sleeps */
+static unsigned int _context_cmdqueue_size = 50;
+
+/* Number of milliseconds to wait for the context queue to clear */
+static unsigned int _context_queue_wait = 10000;
+
+/* Number of command batches sent at a time from a single context */
+static unsigned int _context_cmdbatch_burst = 5;
+
+/* Number of command batches inflight in the ringbuffer at any time */
+static unsigned int _dispatcher_inflight = 15;
+
+/* Command batch timeout (in milliseconds) */
+static unsigned int _cmdbatch_timeout = 2000;
+
+/* Interval for reading and comparing fault detection registers */
+static unsigned int _fault_timer_interval = 50;
+
+/* Local array for the current set of fault detect registers */
+static unsigned int fault_detect_regs[FT_DETECT_REGS_COUNT];
+
+/* The last retired global timestamp read during fault detect */
+static unsigned int fault_detect_ts;
+
+/**
+ * fault_detect_read() - Read the set of fault detect registers
+ * @device: Pointer to the KGSL device struct
+ *
+ * Read the set of fault detect registers and store them in the local array.
+ * This is for the initial values that are compared later with
+ * fault_detect_read_compare
+ */
+static void fault_detect_read(struct kgsl_device *device)
+{
+ int i;
+
+ fault_detect_ts = kgsl_readtimestamp(device, NULL,
+ KGSL_TIMESTAMP_RETIRED);
+
+ for (i = 0; i < FT_DETECT_REGS_COUNT; i++) {
+ if (ft_detect_regs[i] == 0)
+ continue;
+ kgsl_regread(device, ft_detect_regs[i],
+ &fault_detect_regs[i]);
+ }
+}
+
+/*
+ * Check to see if the device is idle and that the global timestamp is up to
+ * date
+ */
+static inline bool _isidle(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned int ts;
+
+ ts = kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED);
+
+ if (adreno_isidle(device) == true &&
+ (ts >= adreno_dev->ringbuffer.global_ts))
+ return true;
+
+ return false;
+}
+
+/**
+ * fault_detect_read_compare() - Read the fault detect registers and compare
+ * them to the current value
+ * @device: Pointer to the KGSL device struct
+ *
+ * Read the set of fault detect registers and compare them to the current set
+ * of registers. Return 1 if any of the register values changed
+ */
+static int fault_detect_read_compare(struct kgsl_device *device)
+{
+ int i, ret = 0;
+ unsigned int ts;
+
+ /* Check to see if the device is idle - if so report no hang */
+ if (_isidle(device) == true)
+ ret = 1;
+
+ for (i = 0; i < FT_DETECT_REGS_COUNT; i++) {
+ unsigned int val;
+
+ if (ft_detect_regs[i] == 0)
+ continue;
+ kgsl_regread(device, ft_detect_regs[i], &val);
+ if (val != fault_detect_regs[i])
+ ret = 1;
+ fault_detect_regs[i] = val;
+ }
+
+ ts = kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED);
+ if (ts != fault_detect_ts)
+ ret = 1;
+
+ fault_detect_ts = ts;
+
+ return ret;
+}
+
+/**
+ * adreno_context_get_cmdbatch() - Get a new command from a context queue
+ * @drawctxt: Pointer to the adreno draw context
+ *
+ * Dequeue a new command batch from the context list
+ */
+static inline struct kgsl_cmdbatch *adreno_context_get_cmdbatch(
+ struct adreno_context *drawctxt)
+{
+ struct kgsl_cmdbatch *cmdbatch = NULL;
+
+ mutex_lock(&drawctxt->mutex);
+ if (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
+ cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+
+ /*
+ * Don't dequeue a cmdbatch that is still waiting for other
+ * events
+ */
+ if (kgsl_cmdbatch_sync_pending(cmdbatch)) {
+ cmdbatch = ERR_PTR(-EAGAIN);
+ goto done;
+ }
+
+ drawctxt->cmdqueue_head =
+ CMDQUEUE_NEXT(drawctxt->cmdqueue_head,
+ ADRENO_CONTEXT_CMDQUEUE_SIZE);
+ drawctxt->queued--;
+ }
+
+done:
+ mutex_unlock(&drawctxt->mutex);
+
+ return cmdbatch;
+}
+
+/**
+ * adreno_context_requeue_cmdbatch() - Put a command back on the context queue
+ * @drawctxt: Pointer to the adreno draw context
+ * @cmdbatch: Pointer to the KGSL cmdbatch to requeue
+ *
+ * Failure to submit a command to the ringbuffer isn't the fault of the command
+ * being submitted so if a failure happens, push it back on the head of the the
+ * context queue to be reconsidered again unless the context got detached.
+ */
+static inline int adreno_dispatcher_requeue_cmdbatch(
+ struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch)
+{
+ unsigned int prev;
+ mutex_lock(&drawctxt->mutex);
+
+ if (kgsl_context_detached(&drawctxt->base) ||
+ drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
+ mutex_unlock(&drawctxt->mutex);
+ /* get rid of this cmdbatch since the context is bad */
+ kgsl_cmdbatch_destroy(cmdbatch);
+ return -EINVAL;
+ }
+
+ prev = drawctxt->cmdqueue_head - 1;
+
+ if (prev < 0)
+ prev = ADRENO_CONTEXT_CMDQUEUE_SIZE - 1;
+
+ /*
+ * The maximum queue size always needs to be one less then the size of
+ * the ringbuffer queue so there is "room" to put the cmdbatch back in
+ */
+
+ BUG_ON(prev == drawctxt->cmdqueue_tail);
+
+ drawctxt->cmdqueue[prev] = cmdbatch;
+ drawctxt->queued++;
+
+ /* Reset the command queue head to reflect the newly requeued change */
+ drawctxt->cmdqueue_head = prev;
+ mutex_unlock(&drawctxt->mutex);
+ return 0;
+}
+
+/**
+ * dispatcher_queue_context() - Queue a context in the dispatcher pending list
+ * @dispatcher: Pointer to the adreno dispatcher struct
+ * @drawctxt: Pointer to the adreno draw context
+ *
+ * Add a context to the dispatcher pending list.
+ */
+static void dispatcher_queue_context(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ /* Refuse to queue a detached context */
+ if (kgsl_context_detached(&drawctxt->base))
+ return;
+
+ spin_lock(&dispatcher->plist_lock);
+
+
+ if (plist_node_empty(&drawctxt->pending)) {
+ /* Get a reference to the context while it sits on the list */
+ if (_kgsl_context_get(&drawctxt->base)) {
+ trace_dispatch_queue_context(drawctxt);
+ plist_add(&drawctxt->pending, &dispatcher->pending);
+ }
+ }
+
+ spin_unlock(&dispatcher->plist_lock);
+}
+
+/**
+ * sendcmd() - Send a command batch to the GPU hardware
+ * @dispatcher: Pointer to the adreno dispatcher struct
+ * @cmdbatch: Pointer to the KGSL cmdbatch being sent
+ *
+ * Send a KGSL command batch to the GPU hardware
+ */
+static int sendcmd(struct adreno_device *adreno_dev,
+ struct kgsl_cmdbatch *cmdbatch)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ int ret;
+
+ mutex_lock(&device->mutex);
+
+ dispatcher->inflight++;
+
+ if (dispatcher->inflight == 1) {
+ /* Time to make the donuts. Turn on the GPU */
+ ret = kgsl_active_count_get(device);
+ if (ret) {
+ dispatcher->inflight--;
+ mutex_unlock(&device->mutex);
+ return ret;
+ }
+ }
+
+ ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdbatch);
+
+ /*
+ * On the first command, if the submission was successful, then read the
+ * fault registers. If it failed then turn off the GPU. Sad face.
+ */
+
+ if (dispatcher->inflight == 1) {
+ if (ret == 0)
+ fault_detect_read(device);
+ else
+ kgsl_active_count_put(device);
+ }
+
+ mutex_unlock(&device->mutex);
+
+ if (ret) {
+ dispatcher->inflight--;
+ KGSL_DRV_ERR(device,
+ "Unable to submit command to the ringbuffer %d\n", ret);
+ return ret;
+ }
+
+ trace_adreno_cmdbatch_submitted(cmdbatch, dispatcher->inflight);
+
+ dispatcher->cmdqueue[dispatcher->tail] = cmdbatch;
+ dispatcher->tail = (dispatcher->tail + 1) %
+ ADRENO_DISPATCH_CMDQUEUE_SIZE;
+
+ /*
+ * If this is the first command in the pipe then the GPU will
+ * immediately start executing it so we can start the expiry timeout on
+ * the command batch here. Subsequent command batches will have their
+ * timer started when the previous command batch is retired
+ */
+ if (dispatcher->inflight == 1) {
+ cmdbatch->expires = jiffies +
+ msecs_to_jiffies(_cmdbatch_timeout);
+ mod_timer(&dispatcher->timer, cmdbatch->expires);
+
+ /* Start the fault detection timer */
+ if (adreno_dev->fast_hang_detect)
+ mod_timer(&dispatcher->fault_timer,
+ jiffies +
+ msecs_to_jiffies(_fault_timer_interval));
+ }
+
+ return 0;
+}
+
+/**
+ * dispatcher_context_sendcmds() - Send commands from a context to the GPU
+ * @adreno_dev: Pointer to the adreno device struct
+ * @drawctxt: Pointer to the adreno context to dispatch commands from
+ *
+ * Dequeue and send a burst of commands from the specified context to the GPU
+ * Returns postive if the context needs to be put back on the pending queue
+ * 0 if the context is empty or detached and negative on error
+ */
+static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ int count = 0;
+ int requeued = 0;
+
+ /*
+ * Each context can send a specific number of command batches per cycle
+ */
+ for ( ; count < _context_cmdbatch_burst &&
+ dispatcher->inflight < _dispatcher_inflight; count++) {
+ int ret;
+ struct kgsl_cmdbatch *cmdbatch =
+ adreno_context_get_cmdbatch(drawctxt);
+
+ if (cmdbatch == NULL)
+ break;
+
+ /*
+ * adreno_context_get_cmdbatch returns -EAGAIN if the current
+ * cmdbatch has pending sync points so no more to do here.
+ * When the sync points are satisfied then the context will get
+ * reqeueued
+ */
+
+ if (IS_ERR(cmdbatch) && PTR_ERR(cmdbatch) == -EAGAIN) {
+ requeued = 1;
+ break;
+ }
+
+ /*
+ * If this is a synchronization submission then there are no
+ * commands to submit. Discard it and get the next item from
+ * the queue. Decrement count so this packet doesn't count
+ * against the burst for the context
+ */
+
+ if (cmdbatch->flags & KGSL_CONTEXT_SYNC) {
+ count--;
+ kgsl_cmdbatch_destroy(cmdbatch);
+ continue;
+ }
+
+ ret = sendcmd(adreno_dev, cmdbatch);
+
+ /*
+ * There are various reasons why we can't submit a command (no
+ * memory for the commands, full ringbuffer, etc) but none of
+ * these are actually the current command's fault. Requeue it
+ * back on the context and let it come back around again if
+ * conditions improve
+ */
+ if (ret) {
+ requeued = adreno_dispatcher_requeue_cmdbatch(drawctxt,
+ cmdbatch) ? 0 : 1;
+ break;
+ }
+
+ count++;
+ }
+
+ /*
+ * If the context successfully submitted commands there will be room
+ * in the context queue so wake up any snoozing threads that want to
+ * submit commands
+ */
+
+ if (count)
+ wake_up_interruptible_all(&drawctxt->wq);
+
+ /*
+ * Return positive if the context submitted commands or if we figured
+ * out that we need to requeue due to a pending sync or error.
+ */
+
+ return (count || requeued) ? 1 : 0;
+}
+
+/**
+ * _adreno_dispatcher_issuecmds() - Issue commmands from pending contexts
+ * @adreno_dev: Pointer to the adreno device struct
+ *
+ * Issue as many commands as possible (up to inflight) from the pending contexts
+ * This function assumes the dispatcher mutex has been locked.
+ */
+static int _adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ struct adreno_context *drawctxt, *next;
+ struct plist_head requeue;
+ int ret;
+
+ /* Leave early if the dispatcher isn't in a happy state */
+ if ((dispatcher->state != ADRENO_DISPATCHER_ACTIVE) ||
+ adreno_gpu_fault(adreno_dev) != 0)
+ return 0;
+
+ plist_head_init(&requeue);
+
+ /* Try to fill the ringbuffer as much as possible */
+ while (dispatcher->inflight < _dispatcher_inflight) {
+
+ /* Stop doing things if the dispatcher is paused or faulted */
+ if ((dispatcher->state != ADRENO_DISPATCHER_ACTIVE) ||
+ adreno_gpu_fault(adreno_dev) != 0)
+ break;
+
+ spin_lock(&dispatcher->plist_lock);
+
+ if (plist_head_empty(&dispatcher->pending)) {
+ spin_unlock(&dispatcher->plist_lock);
+ break;
+ }
+
+ /* Get the next entry on the list */
+ drawctxt = plist_first_entry(&dispatcher->pending,
+ struct adreno_context, pending);
+
+ plist_del(&drawctxt->pending, &dispatcher->pending);
+
+ spin_unlock(&dispatcher->plist_lock);
+
+ if (kgsl_context_detached(&drawctxt->base) ||
+ drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
+ kgsl_context_put(&drawctxt->base);
+ continue;
+ }
+
+ ret = dispatcher_context_sendcmds(adreno_dev, drawctxt);
+
+ if (ret > 0) {
+ spin_lock(&dispatcher->plist_lock);
+
+ /*
+ * Check to seen if the context had been requeued while
+ * we were processing it (probably by another thread
+ * pushing commands). If it has then we don't need to
+ * bother with it but do a put to make sure the
+ * reference counting stays accurate. If the node is
+ * empty then we will put it on the requeue list and not
+ * touch the refcount since we already hold it from the
+ * first time it went on the list.
+ */
+
+ if (plist_node_empty(&drawctxt->pending))
+ plist_add(&drawctxt->pending, &requeue);
+ else
+ kgsl_context_put(&drawctxt->base);
+
+ spin_unlock(&dispatcher->plist_lock);
+ } else {
+ /*
+ * If the context doesn't need be requeued put back the
+ * refcount
+ */
+
+ kgsl_context_put(&drawctxt->base);
+ }
+ }
+
+ /* Put all the requeued contexts back on the master list */
+
+ spin_lock(&dispatcher->plist_lock);
+
+ plist_for_each_entry_safe(drawctxt, next, &requeue, pending) {
+ plist_del(&drawctxt->pending, &requeue);
+ plist_add(&drawctxt->pending, &dispatcher->pending);
+ }
+
+ spin_unlock(&dispatcher->plist_lock);
+
+ return 0;
+}
+
+/**
+ * adreno_dispatcher_issuecmds() - Issue commmands from pending contexts
+ * @adreno_dev: Pointer to the adreno device struct
+ *
+ * Lock the dispatcher and call _adreno_dispatcher_issueibcmds
+ */
+int adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ int ret;
+
+ mutex_lock(&dispatcher->mutex);
+ ret = _adreno_dispatcher_issuecmds(adreno_dev);
+ mutex_unlock(&dispatcher->mutex);
+
+ return ret;
+}
+
+static int _check_context_queue(struct adreno_context *drawctxt)
+{
+ int ret;
+
+ mutex_lock(&drawctxt->mutex);
+
+ /*
+ * Wake up if there is room in the context or if the whole thing got
+ * invalidated while we were asleep
+ */
+
+ if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
+ ret = 1;
+ else
+ ret = drawctxt->queued < _context_cmdqueue_size ? 1 : 0;
+
+ mutex_unlock(&drawctxt->mutex);
+
+ return ret;
+}
+
+/**
+ * get_timestamp() - Return the next timestamp for the context
+ * @drawctxt - Pointer to an adreno draw context struct
+ * @cmdbatch - Pointer to a command batch
+ * @timestamp - Pointer to a timestamp value possibly passed from the user
+ *
+ * Assign a timestamp based on the settings of the draw context and the command
+ * batch.
+ */
+static int get_timestamp(struct adreno_context *drawctxt,
+ struct kgsl_cmdbatch *cmdbatch, unsigned int *timestamp)
+{
+ /* Synchronization commands don't get a timestamp */
+ if (cmdbatch->flags & KGSL_CONTEXT_SYNC) {
+ *timestamp = 0;
+ return 0;
+ }
+
+ if (drawctxt->flags & CTXT_FLAGS_USER_GENERATED_TS) {
+ /*
+ * User specified timestamps need to be greater than the last
+ * issued timestamp in the context
+ */
+ if (timestamp_cmp(drawctxt->timestamp, *timestamp) >= 0)
+ return -ERANGE;
+
+ drawctxt->timestamp = *timestamp;
+ } else
+ drawctxt->timestamp++;
+
+ *timestamp = drawctxt->timestamp;
+ return 0;
+}
+
+/**
+ * adreno_dispatcher_queue_cmd() - Queue a new command in the context
+ * @adreno_dev: Pointer to the adreno device struct
+ * @drawctxt: Pointer to the adreno draw context
+ * @cmdbatch: Pointer to the command batch being submitted
+ * @timestamp: Pointer to the requested timestamp
+ *
+ * Queue a command in the context - if there isn't any room in the queue, then
+ * block until there is
+ */
+int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
+ uint32_t *timestamp)
+{
+ int ret;
+
+ mutex_lock(&drawctxt->mutex);
+
+ if (drawctxt->flags & CTXT_FLAGS_BEING_DESTROYED) {
+ mutex_unlock(&drawctxt->mutex);
+ return -EINVAL;
+ }
+
+ /*
+ * After skipping to the end of the frame we need to force the preamble
+ * to run (if it exists) regardless of the context state.
+ */
+
+ if (drawctxt->flags & CTXT_FLAGS_FORCE_PREAMBLE) {
+ cmdbatch->priv |= CMDBATCH_FLAG_FORCE_PREAMBLE;
+ drawctxt->flags &= ~CTXT_FLAGS_FORCE_PREAMBLE;
+ }
+
+ /*
+ * If we are waiting for the end of frame and it hasn't appeared yet,
+ * then mark the command batch as skipped. It will still progress
+ * through the pipeline but it won't actually send any commands
+ */
+
+ if (drawctxt->flags & CTXT_FLAGS_SKIP_EOF) {
+ cmdbatch->priv |= CMDBATCH_FLAG_SKIP;
+
+ /*
+ * If this command batch represents the EOF then clear the way
+ * for the dispatcher to continue submitting
+ */
+
+ if (cmdbatch->flags & KGSL_CONTEXT_END_OF_FRAME) {
+ drawctxt->flags &= ~CTXT_FLAGS_SKIP_EOF;
+
+ /*
+ * Force the preamble on the next command to ensure that
+ * the state is correct
+ */
+
+ drawctxt->flags |= CTXT_FLAGS_FORCE_PREAMBLE;
+ }
+ }
+
+ /* Wait for room in the context queue */
+
+ while (drawctxt->queued >= _context_cmdqueue_size) {
+ trace_adreno_context_sleep(drawctxt);
+ mutex_unlock(&drawctxt->mutex);
+
+ ret = wait_event_interruptible_timeout(drawctxt->wq,
+ _check_context_queue(drawctxt),
+ msecs_to_jiffies(_context_queue_wait));
+
+ mutex_lock(&drawctxt->mutex);
+ trace_adreno_context_wake(drawctxt);
+
+ if (ret <= 0) {
+ mutex_unlock(&drawctxt->mutex);
+ return (ret == 0) ? -ETIMEDOUT : (int) ret;
+ }
+ }
+ /*
+ * Account for the possiblity that the context got invalidated
+ * while we were sleeping
+ */
+
+ if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
+ mutex_unlock(&drawctxt->mutex);
+ return -EDEADLK;
+ }
+ if (kgsl_context_detached(&drawctxt->base)) {
+ mutex_unlock(&drawctxt->mutex);
+ return -EINVAL;
+ }
+
+ ret = get_timestamp(drawctxt, cmdbatch, timestamp);
+ if (ret) {
+ mutex_unlock(&drawctxt->mutex);
+ return ret;
+ }
+
+ cmdbatch->timestamp = *timestamp;
+
+ /* The batch fault policy is the current system fault policy */
+ cmdbatch->fault_policy = adreno_dev->ft_policy;
+
+ /*
+ * Set the fault tolerance policy for the command batch - assuming the
+ * context hsn't disabled FT use the current device policy
+ */
+
+ if (drawctxt->flags & CTXT_FLAGS_NO_FAULT_TOLERANCE)
+ set_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy);
+ else
+ cmdbatch->fault_policy = adreno_dev->ft_policy;
+
+ /* Put the command into the queue */
+ drawctxt->cmdqueue[drawctxt->cmdqueue_tail] = cmdbatch;
+ drawctxt->cmdqueue_tail = (drawctxt->cmdqueue_tail + 1) %
+ ADRENO_CONTEXT_CMDQUEUE_SIZE;
+
+ drawctxt->queued++;
+ trace_adreno_cmdbatch_queued(cmdbatch, drawctxt->queued);
+
+
+ mutex_unlock(&drawctxt->mutex);
+
+ /* Add the context to the dispatcher pending list */
+ dispatcher_queue_context(adreno_dev, drawctxt);
+
+ /*
+ * Only issue commands if inflight is less than burst -this prevents us
+ * from sitting around waiting for the mutex on a busy system - the work
+ * loop will schedule it for us. Inflight is mutex protected but the
+ * worse that can happen is that it will go to 0 after we check and if
+ * it goes to 0 it is because the work loop decremented it and the work
+ * queue will try to schedule new commands anyway.
+ */
+
+ if (adreno_dev->dispatcher.inflight < _context_cmdbatch_burst)
+ adreno_dispatcher_issuecmds(adreno_dev);
+
+ return 0;
+}
+
+/*
+ * If an IB inside of the command batch has a gpuaddr that matches the base
+ * passed in then zero the size which effectively skips it when it is submitted
+ * in the ringbuffer.
+ */
+static void cmdbatch_skip_ib(struct kgsl_cmdbatch *cmdbatch, unsigned int base)
+{
+ int i;
+
+ for (i = 0; i < cmdbatch->ibcount; i++) {
+ if (cmdbatch->ibdesc[i].gpuaddr == base) {
+ cmdbatch->ibdesc[i].sizedwords = 0;
+ if (base)
+ return;
+ }
+ }
+}
+
+static void cmdbatch_skip_frame(struct kgsl_cmdbatch *cmdbatch,
+ struct kgsl_cmdbatch **replay, int count)
+{
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ int skip = 1;
+ int i;
+
+ for (i = 0; i < count; i++) {
+
+ /*
+ * Only operate on command batches that belong to the
+ * faulting context
+ */
+
+ if (replay[i]->context->id != cmdbatch->context->id)
+ continue;
+
+ /*
+ * Skip all the command batches in this context until
+ * the EOF flag is seen. If the EOF flag is seen then
+ * force the preamble for the next command.
+ */
+
+ if (skip) {
+ set_bit(CMDBATCH_FLAG_SKIP, &replay[i]->priv);
+
+ if (replay[i]->flags & KGSL_CONTEXT_END_OF_FRAME)
+ skip = 0;
+ } else {
+ set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
+ return;
+ }
+ }
+
+ /*
+ * If the EOF flag hasn't been seen yet then set the flag in the
+ * drawctxt to keep looking for it
+ */
+
+ if (skip && drawctxt)
+ drawctxt->flags |= CTXT_FLAGS_SKIP_EOF;
+
+ /*
+ * If we did see the EOF flag then force the preamble on for the
+ * next command issued on this context
+ */
+
+ if (!skip && drawctxt)
+ drawctxt->flags |= CTXT_FLAGS_FORCE_PREAMBLE;
+}
+
+static void remove_invalidated_cmdbatches(struct kgsl_device *device,
+ struct kgsl_cmdbatch **replay, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ struct kgsl_cmdbatch *cmd = replay[i];
+ struct adreno_context *drawctxt;
+
+ if (cmd == NULL)
+ continue;
+
+ drawctxt = ADRENO_CONTEXT(cmd->context);
+
+ if (kgsl_context_detached(cmd->context) ||
+ drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
+ replay[i] = NULL;
+
+ mutex_lock(&device->mutex);
+ kgsl_cancel_events_timestamp(device, cmd->context,
+ cmd->timestamp);
+ mutex_unlock(&device->mutex);
+
+ kgsl_cmdbatch_destroy(cmd);
+ }
+ }
+}
+
+static char _pidname[TASK_COMM_LEN];
+
+static inline const char *_kgsl_context_comm(struct kgsl_context *context)
+{
+ struct task_struct *task = NULL;
+
+ if (context)
+ task = find_task_by_vpid(context->pid);
+
+ if (task)
+ get_task_comm(_pidname, task);
+ else
+ snprintf(_pidname, TASK_COMM_LEN, "unknown");
+
+ return _pidname;
+}
+
+#define pr_fault(_d, _c, fmt, args...) \
+ dev_err((_d)->dev, "%s[%d]: " fmt, \
+ _kgsl_context_comm((_c)->context), \
+ (_c)->context->pid, ##args)
+
+
+static void adreno_fault_header(struct kgsl_device *device,
+ struct kgsl_cmdbatch *cmdbatch)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned int status, base, rptr, wptr, ib1base, ib2base, ib1sz, ib2sz;
+
+ kgsl_regread(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS),
+ &status);
+ kgsl_regread(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_CP_RB_BASE),
+ &base);
+ kgsl_regread(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_CP_RB_RPTR),
+ &rptr);
+ kgsl_regread(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_CP_RB_WPTR),
+ &wptr);
+ kgsl_regread(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_CP_IB1_BASE),
+ &ib1base);
+ kgsl_regread(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_CP_IB1_BUFSZ),
+ &ib1sz);
+ kgsl_regread(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_CP_IB2_BASE),
+ &ib2base);
+ kgsl_regread(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ),
+ &ib2sz);
+
+ trace_adreno_gpu_fault(cmdbatch->context->id, cmdbatch->timestamp,
+ status, rptr, wptr, ib1base, ib1sz, ib2base, ib2sz);
+
+ pr_fault(device, cmdbatch,
+ "gpu fault ctx %d ts %d status %8.8X rb %4.4x/%4.4x ib1 %8.8x/%4.4x ib2 %8.8x/%4.4x\n",
+ cmdbatch->context->id, cmdbatch->timestamp, status,
+ rptr, wptr, ib1base, ib1sz, ib2base, ib2sz);
+}
+
+static int dispatcher_do_fault(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ unsigned int ptr;
+ unsigned int reg, base;
+ struct kgsl_cmdbatch **replay = NULL;
+ struct kgsl_cmdbatch *cmdbatch;
+ int ret, i, count = 0;
+ int fault, first = 0;
+ bool pagefault = false;
+
+ fault = atomic_xchg(&dispatcher->fault, 0);
+ if (fault == 0)
+ return 0;
+ /*
+ * Return early if no command inflight - can happen on
+ * false hang detects
+ */
+ if (dispatcher->inflight == 0) {
+ KGSL_DRV_WARN(device,
+ "dispatcher_do_fault with 0 inflight commands\n");
+ return 0;
+ }
+
+ /* Turn off all the timers */
+ del_timer_sync(&dispatcher->timer);
+ del_timer_sync(&dispatcher->fault_timer);
+
+ mutex_lock(&device->mutex);
+
+ cmdbatch = dispatcher->cmdqueue[dispatcher->head];
+
+ trace_adreno_cmdbatch_fault(cmdbatch, fault);
+
+ /*
+ * If the fault was due to a timeout then stop the CP to ensure we don't
+ * get activity while we are trying to dump the state of the system
+ */
+
+ if (fault & ADRENO_TIMEOUT_FAULT) {
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_ME_CNTL, ®);
+ reg |= (1 << 27) | (1 << 28);
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, reg);
+
+ /* Skip the PM dump for a timeout because it confuses people */
+ set_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy);
+ }
+
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BASE, &base);
+
+ /*
+ * Dump the postmortem and snapshot information if this is the first
+ * detected fault for the oldest active command batch
+ */
+
+ if (!test_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy)) {
+ adreno_fault_header(device, cmdbatch);
+
+ if (device->pm_dump_enable)
+ kgsl_postmortem_dump(device, 0);
+
+ kgsl_device_snapshot(device, 1);
+ }
+
+ mutex_unlock(&device->mutex);
+
+ /* Allocate memory to store the inflight commands */
+ replay = kzalloc(sizeof(*replay) * dispatcher->inflight, GFP_KERNEL);
+
+ if (replay == NULL) {
+ unsigned int ptr = dispatcher->head;
+
+ while (ptr != dispatcher->tail) {
+ struct kgsl_context *context =
+ dispatcher->cmdqueue[ptr]->context;
+
+ adreno_drawctxt_invalidate(device, context);
+ kgsl_cmdbatch_destroy(dispatcher->cmdqueue[ptr]);
+
+ ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
+ }
+
+ /*
+ * Set the replay count to zero - this will ensure that the
+ * hardware gets reset but nothing else goes played
+ */
+
+ count = 0;
+ goto replay;
+ }
+
+ /* Copy the inflight command batches into the temporary storage */
+ ptr = dispatcher->head;
+
+ while (ptr != dispatcher->tail) {
+ replay[count++] = dispatcher->cmdqueue[ptr];
+ ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
+ }
+
+ /*
+ * For the purposes of replay, we assume that the oldest command batch
+ * that hasn't retired a timestamp is "hung".
+ */
+
+ cmdbatch = replay[0];
+
+ /*
+ * If FT is disabled for this cmdbatch invalidate immediately
+ */
+
+ if (test_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy) ||
+ test_bit(KGSL_FT_TEMP_DISABLE, &cmdbatch->fault_policy)) {
+ pr_fault(device, cmdbatch, "gpu skipped ctx %d ts %d\n",
+ cmdbatch->context->id, cmdbatch->timestamp);
+
+ adreno_drawctxt_invalidate(device, cmdbatch->context);
+ }
+
+ /*
+ * Set a flag so we don't print another PM dump if the cmdbatch fails
+ * again on replay
+ */
+
+ set_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy);
+
+ /*
+ * A hardware fault generally means something was deterministically
+ * wrong with the command batch - no point in trying to replay it
+ * Clear the replay bit and move on to the next policy level
+ */
+
+ if (fault & ADRENO_HARD_FAULT)
+ clear_bit(KGSL_FT_REPLAY, &(cmdbatch->fault_policy));
+
+ /*
+ * A timeout fault means the IB timed out - clear the policy and
+ * invalidate - this will clear the FT_SKIP_PMDUMP bit but that is okay
+ * because we won't see this cmdbatch again
+ */
+
+ if (fault & ADRENO_TIMEOUT_FAULT)
+ bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG);
+
+ /*
+ * If the context had a GPU page fault then it is likely it would fault
+ * again if replayed
+ */
+
+ if (test_bit(KGSL_CONTEXT_PAGEFAULT, &cmdbatch->context->priv)) {
+ /* we'll need to resume the mmu later... */
+ pagefault = true;
+ clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy);
+ clear_bit(KGSL_CONTEXT_PAGEFAULT, &cmdbatch->context->priv);
+ }
+
+ /*
+ * Execute the fault tolerance policy. Each command batch stores the
+ * current fault policy that was set when it was queued.
+ * As the options are tried in descending priority
+ * (REPLAY -> SKIPIBS -> SKIPFRAME -> NOTHING) the bits are cleared
+ * from the cmdbatch policy so the next thing can be tried if the
+ * change comes around again
+ */
+
+ /* Replay the hanging command batch again */
+ if (test_and_clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_REPLAY));
+ set_bit(KGSL_FT_REPLAY, &cmdbatch->fault_recovery);
+ goto replay;
+ }
+
+ /*
+ * Skip the last IB1 that was played but replay everything else.
+ * Note that the last IB1 might not be in the "hung" command batch
+ * because the CP may have caused a page-fault while it was prefetching
+ * the next IB1/IB2. walk all outstanding commands and zap the
+ * supposedly bad IB1 where ever it lurks.
+ */
+
+ if (test_and_clear_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_SKIPIB));
+ set_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_recovery);
+
+ for (i = 0; i < count; i++) {
+ if (replay[i] != NULL &&
+ replay[i]->context->id == cmdbatch->context->id)
+ cmdbatch_skip_ib(replay[i], base);
+ }
+
+ goto replay;
+ }
+
+ if (test_and_clear_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(cmdbatch,
+ BIT(KGSL_FT_SKIPFRAME));
+ set_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_recovery);
+
+ /*
+ * Skip all the pending command batches for this context until
+ * the EOF frame is seen
+ */
+ cmdbatch_skip_frame(cmdbatch, replay, count);
+ goto replay;
+ }
+
+ /* If we get here then all the policies failed */
+
+ pr_fault(device, cmdbatch, "gpu failed ctx %d ts %d\n",
+ cmdbatch->context->id, cmdbatch->timestamp);
+
+ /* Invalidate the context */
+ adreno_drawctxt_invalidate(device, cmdbatch->context);
+
+
+replay:
+ /* Reset the dispatcher queue */
+ dispatcher->inflight = 0;
+ dispatcher->head = dispatcher->tail = 0;
+
+ /* Reset the GPU */
+ mutex_lock(&device->mutex);
+
+ ret = adreno_reset(device);
+ mutex_unlock(&device->mutex);
+ /* if any other fault got in until reset then ignore */
+ fault = atomic_xchg(&dispatcher->fault, 0);
+
+ /* If adreno_reset() fails then what hope do we have for the future? */
+ BUG_ON(ret);
+
+ /* Remove any pending command batches that have been invalidated */
+ remove_invalidated_cmdbatches(device, replay, count);
+
+ /* Replay the pending command buffers */
+ for (i = 0; i < count; i++) {
+
+ int ret;
+
+ if (replay[i] == NULL)
+ continue;
+
+ /*
+ * Force the preamble on the first command (if applicable) to
+ * avoid any strange stage issues
+ */
+
+ if (first == 0) {
+ set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
+ first = 1;
+ }
+
+ /*
+ * Force each command batch to wait for idle - this avoids weird
+ * CP parse issues
+ */
+
+ set_bit(CMDBATCH_FLAG_WFI, &replay[i]->priv);
+
+ ret = sendcmd(adreno_dev, replay[i]);
+
+ /*
+ * If sending the command fails, then try to recover by
+ * invalidating the context
+ */
+
+ if (ret) {
+ pr_fault(device, replay[i],
+ "gpu reset failed ctx %d ts %d\n",
+ replay[i]->context->id, replay[i]->timestamp);
+
+ adreno_drawctxt_invalidate(device, replay[i]->context);
+ remove_invalidated_cmdbatches(device, &replay[i],
+ count - i);
+ }
+ }
+
+ mutex_lock(&device->mutex);
+ kgsl_active_count_put(device);
+ mutex_unlock(&device->mutex);
+
+ kfree(replay);
+
+ return 1;
+}
+
+static inline int cmdbatch_consumed(struct kgsl_cmdbatch *cmdbatch,
+ unsigned int consumed, unsigned int retired)
+{
+ return ((timestamp_cmp(cmdbatch->timestamp, consumed) >= 0) &&
+ (timestamp_cmp(retired, cmdbatch->timestamp) < 0));
+}
+
+static void _print_recovery(struct kgsl_device *device,
+ struct kgsl_cmdbatch *cmdbatch)
+{
+ static struct {
+ unsigned int mask;
+ const char *str;
+ } flags[] = { ADRENO_FT_TYPES };
+
+ int i, nr = find_first_bit(&cmdbatch->fault_recovery, BITS_PER_LONG);
+ char *result = "unknown";
+
+ for (i = 0; i < ARRAY_SIZE(flags); i++) {
+ if (flags[i].mask == BIT(nr)) {
+ result = (char *) flags[i].str;
+ break;
+ }
+ }
+
+ pr_fault(device, cmdbatch,
+ "gpu %s ctx %d ts %d policy %lX\n",
+ result, cmdbatch->context->id, cmdbatch->timestamp,
+ cmdbatch->fault_recovery);
+}
+
+/**
+ * adreno_dispatcher_work() - Master work handler for the dispatcher
+ * @work: Pointer to the work struct for the current work queue
+ *
+ * Process expired commands and send new ones.
+ */
+static void adreno_dispatcher_work(struct work_struct *work)
+{
+ struct adreno_dispatcher *dispatcher =
+ container_of(work, struct adreno_dispatcher, work);
+ struct adreno_device *adreno_dev =
+ container_of(dispatcher, struct adreno_device, dispatcher);
+ struct kgsl_device *device = &adreno_dev->dev;
+ int count = 0;
+ int fault_handled = 0;
+
+ mutex_lock(&dispatcher->mutex);
+
+ while (dispatcher->head != dispatcher->tail) {
+ uint32_t consumed, retired = 0;
+ struct kgsl_cmdbatch *cmdbatch =
+ dispatcher->cmdqueue[dispatcher->head];
+ struct adreno_context *drawctxt;
+ BUG_ON(cmdbatch == NULL);
+
+ drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+
+ /*
+ * First try to expire the timestamp. This happens if the
+ * context is valid and the timestamp expired normally or if the
+ * context was destroyed before the command batch was finished
+ * in the GPU. Either way retire the command batch advance the
+ * pointers and continue processing the queue
+ */
+
+ retired = kgsl_readtimestamp(device, cmdbatch->context,
+ KGSL_TIMESTAMP_RETIRED);
+
+ if ((timestamp_cmp(cmdbatch->timestamp, retired) <= 0)) {
+
+ /*
+ * If the cmdbatch in question had faulted announce its
+ * successful completion to the world
+ */
+
+ if (cmdbatch->fault_recovery != 0)
+ _print_recovery(device, cmdbatch);
+
+ trace_adreno_cmdbatch_retired(cmdbatch,
+ dispatcher->inflight - 1);
+
+ /* Reduce the number of inflight command batches */
+ dispatcher->inflight--;
+
+ /* Zero the old entry*/
+ dispatcher->cmdqueue[dispatcher->head] = NULL;
+
+ /* Advance the buffer head */
+ dispatcher->head = CMDQUEUE_NEXT(dispatcher->head,
+ ADRENO_DISPATCH_CMDQUEUE_SIZE);
+
+ /* Destroy the retired command batch */
+ kgsl_cmdbatch_destroy(cmdbatch);
+
+ /* Update the expire time for the next command batch */
+
+ if (dispatcher->inflight > 0) {
+ cmdbatch =
+ dispatcher->cmdqueue[dispatcher->head];
+ cmdbatch->expires = jiffies +
+ msecs_to_jiffies(_cmdbatch_timeout);
+ }
+
+ count++;
+ continue;
+ }
+
+ /*
+ * If we got a fault from the interrupt handler, this command
+ * is to blame. Invalidate it, reset and replay
+ */
+
+ if (dispatcher_do_fault(device))
+ goto done;
+ fault_handled = 1;
+
+ /* Get the last consumed timestamp */
+ consumed = kgsl_readtimestamp(device, cmdbatch->context,
+ KGSL_TIMESTAMP_CONSUMED);
+
+ /*
+ * Break here if fault detection is disabled for the context or
+ * if the long running IB detection is disaled device wide
+ * Long running command buffers will be allowed to run to
+ * completion - but badly behaving command buffers (infinite
+ * shaders etc) can end up running forever.
+ */
+
+ if (!adreno_dev->long_ib_detect ||
+ drawctxt->flags & CTXT_FLAGS_NO_FAULT_TOLERANCE)
+ break;
+
+ /*
+ * The last line of defense is to check if the command batch has
+ * timed out. If we get this far but the timeout hasn't expired
+ * yet then the GPU is still ticking away
+ */
+
+ if (time_is_after_jiffies(cmdbatch->expires))
+ break;
+
+ /* Boom goes the dynamite */
+
+ pr_fault(device, cmdbatch,
+ "gpu timeout ctx %d ts %d\n",
+ cmdbatch->context->id, cmdbatch->timestamp);
+
+ adreno_set_gpu_fault(adreno_dev, ADRENO_TIMEOUT_FAULT);
+
+ dispatcher_do_fault(device);
+ fault_handled = 1;
+ break;
+ }
+
+ /*
+ * Call the dispatcher fault routine here so the fault bit gets cleared
+ * when no commands are in dispatcher but fault bit is set. This can
+ * happen on false hang detects
+ */
+ if (!fault_handled && dispatcher_do_fault(device))
+ goto done;
+ /*
+ * Decrement the active count to 0 - this will allow the system to go
+ * into suspend even if there are queued command batches
+ */
+
+ mutex_lock(&device->mutex);
+ if (count && dispatcher->inflight == 0) {
+ del_timer_sync(&dispatcher->fault_timer);
+ kgsl_active_count_put(device);
+ /* Queue back up the event processor to catch stragglers */
+ queue_work(device->work_queue, &device->ts_expired_ws);
+ }
+ mutex_unlock(&device->mutex);
+
+ /* Dispatch new commands if we have the room */
+ if (dispatcher->inflight < _dispatcher_inflight)
+ _adreno_dispatcher_issuecmds(adreno_dev);
+
+done:
+ /* Either update the timer for the next command batch or disable it */
+ if (dispatcher->inflight) {
+ struct kgsl_cmdbatch *cmdbatch
+ = dispatcher->cmdqueue[dispatcher->head];
+
+ /* Update the timeout timer for the next command batch */
+ mod_timer(&dispatcher->timer, cmdbatch->expires);
+ }
+
+ /* Before leaving update the pwrscale information */
+ mutex_lock(&device->mutex);
+ kgsl_pwrscale_idle(device);
+ mutex_unlock(&device->mutex);
+
+ mutex_unlock(&dispatcher->mutex);
+}
+
+void adreno_dispatcher_schedule(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ queue_work(device->work_queue, &dispatcher->work);
+}
+
+/**
+ * adreno_dispatcher_queue_context() - schedule a drawctxt in the dispatcher
+ * device: pointer to the KGSL device
+ * drawctxt: pointer to the drawctxt to schedule
+ *
+ * Put a draw context on the dispatcher pending queue and schedule the
+ * dispatcher. This is used to reschedule changes that might have been blocked
+ * for sync points or other concerns
+ */
+void adreno_dispatcher_queue_context(struct kgsl_device *device,
+ struct adreno_context *drawctxt)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ dispatcher_queue_context(adreno_dev, drawctxt);
+ adreno_dispatcher_schedule(device);
+}
+
+/*
+ * This is called on a regular basis while command batches are inflight. Fault
+ * detection registers are read and compared to the existing values - if they
+ * changed then the GPU is still running. If they are the same between
+ * subsequent calls then the GPU may have faulted
+ */
+
+void adreno_dispatcher_fault_timer(unsigned long data)
+{
+ struct adreno_device *adreno_dev = (struct adreno_device *) data;
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ BUG_ON(atomic_read(&device->active_cnt) == 0);
+
+ /* Leave if the user decided to turn off fast hang detection */
+ if (adreno_dev->fast_hang_detect == 0)
+ return;
+
+ if (adreno_gpu_fault(adreno_dev)) {
+ adreno_dispatcher_schedule(device);
+ return;
+ }
+
+ /*
+ * Read the fault registers - if it returns 0 then they haven't changed
+ * so mark the dispatcher as faulted and schedule the work loop.
+ */
+
+ if (!fault_detect_read_compare(device)) {
+ adreno_set_gpu_fault(adreno_dev, ADRENO_SOFT_FAULT);
+ adreno_dispatcher_schedule(device);
+ } else {
+ mod_timer(&dispatcher->fault_timer,
+ jiffies + msecs_to_jiffies(_fault_timer_interval));
+ }
+}
+
+/*
+ * This is called when the timer expires - it either means the GPU is hung or
+ * the IB is taking too long to execute
+ */
+void adreno_dispatcher_timer(unsigned long data)
+{
+ struct adreno_device *adreno_dev = (struct adreno_device *) data;
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ adreno_dispatcher_schedule(device);
+}
+/**
+ * adreno_dispatcher_irq_fault() - Trigger a fault in the dispatcher
+ * @device: Pointer to the KGSL device
+ *
+ * Called from an interrupt context this will trigger a fault in the
+ * dispatcher for the oldest pending command batch
+ */
+void adreno_dispatcher_irq_fault(struct kgsl_device *device)
+{
+ adreno_set_gpu_fault(ADRENO_DEVICE(device), ADRENO_HARD_FAULT);
+ adreno_dispatcher_schedule(device);
+}
+
+/**
+ * adreno_dispatcher_pause() - stop the dispatcher
+ * @adreno_dev: pointer to the adreno device structure
+ *
+ * Pause the dispather so it doesn't accept any new commands
+ */
+void adreno_dispatcher_pause(struct adreno_device *adreno_dev)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ /*
+ * This will probably get called while holding other mutexes so don't
+ * take the dispatcher mutex. The biggest penalty is that another
+ * command might be submitted while we are in here but thats okay
+ * because whoever is waiting for the drain will just have another
+ * command batch to wait for
+ */
+
+ dispatcher->state = ADRENO_DISPATCHER_PAUSE;
+}
+
+/**
+ * adreno_dispatcher_resume() - resume the dispatcher
+ * @adreno_dev: pointer to the adreno device structure
+ *
+ * Set the dispatcher active so it can start accepting commands again
+ */
+void adreno_dispatcher_resume(struct adreno_device *adreno_dev)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ dispatcher->state = ADRENO_DISPATCHER_ACTIVE;
+ adreno_dispatcher_schedule(&adreno_dev->dev);
+}
+
+/**
+ * adreno_dispatcher_start() - activate the dispatcher
+ * @adreno_dev: pointer to the adreno device structure
+ *
+ * Set the disaptcher active and start the loop once to get things going
+ */
+void adreno_dispatcher_start(struct adreno_device *adreno_dev)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ dispatcher->state = ADRENO_DISPATCHER_ACTIVE;
+
+ /* Schedule the work loop to get things going */
+ adreno_dispatcher_schedule(&adreno_dev->dev);
+}
+
+/**
+ * adreno_dispatcher_stop() - stop the dispatcher
+ * @adreno_dev: pointer to the adreno device structure
+ *
+ * Stop the dispatcher and close all the timers
+ */
+void adreno_dispatcher_stop(struct adreno_device *adreno_dev)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ del_timer_sync(&dispatcher->timer);
+ del_timer_sync(&dispatcher->fault_timer);
+
+ dispatcher->state = ADRENO_DISPATCHER_PAUSE;
+}
+
+/**
+ * adreno_dispatcher_close() - close the dispatcher
+ * @adreno_dev: pointer to the adreno device structure
+ *
+ * Close the dispatcher and free all the oustanding commands and memory
+ */
+void adreno_dispatcher_close(struct adreno_device *adreno_dev)
+{
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ mutex_lock(&dispatcher->mutex);
+ del_timer_sync(&dispatcher->timer);
+ del_timer_sync(&dispatcher->fault_timer);
+
+ while (dispatcher->head != dispatcher->tail) {
+ kgsl_cmdbatch_destroy(dispatcher->cmdqueue[dispatcher->head]);
+ dispatcher->head = (dispatcher->head + 1)
+ % ADRENO_DISPATCH_CMDQUEUE_SIZE;
+ }
+
+ mutex_unlock(&dispatcher->mutex);
+
+ kobject_put(&dispatcher->kobj);
+}
+
+struct dispatcher_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct adreno_dispatcher *,
+ struct dispatcher_attribute *, char *);
+ ssize_t (*store)(struct adreno_dispatcher *,
+ struct dispatcher_attribute *, const char *buf,
+ size_t count);
+ unsigned int max;
+ unsigned int *value;
+};
+
+#define DISPATCHER_UINT_ATTR(_name, _mode, _max, _value) \
+ struct dispatcher_attribute dispatcher_attr_##_name = { \
+ .attr = { .name = __stringify(_name), .mode = _mode }, \
+ .show = _show_uint, \
+ .store = _store_uint, \
+ .max = _max, \
+ .value = &(_value), \
+ }
+
+#define to_dispatcher_attr(_a) \
+ container_of((_a), struct dispatcher_attribute, attr)
+#define to_dispatcher(k) container_of(k, struct adreno_dispatcher, kobj)
+
+static ssize_t _store_uint(struct adreno_dispatcher *dispatcher,
+ struct dispatcher_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ int ret = kstrtoul(buf, 0, &val);
+
+ if (ret)
+ return ret;
+
+ if (!val || (attr->max && (val > attr->max)))
+ return -EINVAL;
+
+ *((unsigned int *) attr->value) = val;
+ return size;
+}
+
+static ssize_t _show_uint(struct adreno_dispatcher *dispatcher,
+ struct dispatcher_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ *((unsigned int *) attr->value));
+}
+
+static DISPATCHER_UINT_ATTR(inflight, 0644, ADRENO_DISPATCH_CMDQUEUE_SIZE,
+ _dispatcher_inflight);
+/*
+ * Our code that "puts back" a command from the context is much cleaner
+ * if we are sure that there will always be enough room in the
+ * ringbuffer so restrict the maximum size of the context queue to
+ * ADRENO_CONTEXT_CMDQUEUE_SIZE - 1
+ */
+static DISPATCHER_UINT_ATTR(context_cmdqueue_size, 0644,
+ ADRENO_CONTEXT_CMDQUEUE_SIZE - 1, _context_cmdqueue_size);
+static DISPATCHER_UINT_ATTR(context_burst_count, 0644, 0,
+ _context_cmdbatch_burst);
+static DISPATCHER_UINT_ATTR(cmdbatch_timeout, 0644, 0, _cmdbatch_timeout);
+static DISPATCHER_UINT_ATTR(context_queue_wait, 0644, 0, _context_queue_wait);
+static DISPATCHER_UINT_ATTR(fault_detect_interval, 0644, 0,
+ _fault_timer_interval);
+
+static struct attribute *dispatcher_attrs[] = {
+ &dispatcher_attr_inflight.attr,
+ &dispatcher_attr_context_cmdqueue_size.attr,
+ &dispatcher_attr_context_burst_count.attr,
+ &dispatcher_attr_cmdbatch_timeout.attr,
+ &dispatcher_attr_context_queue_wait.attr,
+ &dispatcher_attr_fault_detect_interval.attr,
+ NULL,
+};
+
+static ssize_t dispatcher_sysfs_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct adreno_dispatcher *dispatcher = to_dispatcher(kobj);
+ struct dispatcher_attribute *pattr = to_dispatcher_attr(attr);
+ ssize_t ret = -EIO;
+
+ if (pattr->show)
+ ret = pattr->show(dispatcher, pattr, buf);
+
+ return ret;
+}
+
+static ssize_t dispatcher_sysfs_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adreno_dispatcher *dispatcher = to_dispatcher(kobj);
+ struct dispatcher_attribute *pattr = to_dispatcher_attr(attr);
+ ssize_t ret = -EIO;
+
+ if (pattr->store)
+ ret = pattr->store(dispatcher, pattr, buf, count);
+
+ return ret;
+}
+
+static void dispatcher_sysfs_release(struct kobject *kobj)
+{
+}
+
+static const struct sysfs_ops dispatcher_sysfs_ops = {
+ .show = dispatcher_sysfs_show,
+ .store = dispatcher_sysfs_store
+};
+
+static struct kobj_type ktype_dispatcher = {
+ .sysfs_ops = &dispatcher_sysfs_ops,
+ .default_attrs = dispatcher_attrs,
+ .release = dispatcher_sysfs_release
+};
+
+/**
+ * adreno_dispatcher_init() - Initialize the dispatcher
+ * @adreno_dev: pointer to the adreno device structure
+ *
+ * Initialize the dispatcher
+ */
+int adreno_dispatcher_init(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ int ret;
+
+ memset(dispatcher, 0, sizeof(*dispatcher));
+
+ mutex_init(&dispatcher->mutex);
+
+ setup_timer(&dispatcher->timer, adreno_dispatcher_timer,
+ (unsigned long) adreno_dev);
+
+ setup_timer(&dispatcher->fault_timer, adreno_dispatcher_fault_timer,
+ (unsigned long) adreno_dev);
+
+ INIT_WORK(&dispatcher->work, adreno_dispatcher_work);
+
+ plist_head_init(&dispatcher->pending);
+ spin_lock_init(&dispatcher->plist_lock);
+
+ dispatcher->state = ADRENO_DISPATCHER_ACTIVE;
+
+ ret = kobject_init_and_add(&dispatcher->kobj, &ktype_dispatcher,
+ &device->dev->kobj, "dispatch");
+
+ return ret;
+}
diff --git a/drivers/gpu/msm2/adreno_drawctxt.c b/drivers/gpu/msm2/adreno_drawctxt.c
new file mode 100644
index 0000000..b666c47
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_drawctxt.c
@@ -0,0 +1,680 @@
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/msm_kgsl.h>
+#include <linux/sched.h>
+
+#include "kgsl.h"
+#include "kgsl_sharedmem.h"
+#include "adreno.h"
+#include "adreno_trace.h"
+
+#define KGSL_INIT_REFTIMESTAMP 0x7FFFFFFF
+
+/* quad for copying GMEM to context shadow */
+#define QUAD_LEN 12
+#define QUAD_RESTORE_LEN 14
+
+static unsigned int gmem_copy_quad[QUAD_LEN] = {
+ 0x00000000, 0x00000000, 0x3f800000,
+ 0x00000000, 0x00000000, 0x3f800000,
+ 0x00000000, 0x00000000, 0x3f800000,
+ 0x00000000, 0x00000000, 0x3f800000
+};
+
+static unsigned int gmem_restore_quad[QUAD_RESTORE_LEN] = {
+ 0x00000000, 0x3f800000, 0x3f800000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x3f800000, 0x00000000, 0x00000000,
+ 0x3f800000, 0x00000000, 0x00000000,
+ 0x3f800000, 0x3f800000,
+};
+
+#define TEXCOORD_LEN 8
+
+static unsigned int gmem_copy_texcoord[TEXCOORD_LEN] = {
+ 0x00000000, 0x3f800000,
+ 0x3f800000, 0x3f800000,
+ 0x00000000, 0x00000000,
+ 0x3f800000, 0x00000000
+};
+
+/*
+ * Helper functions
+ * These are global helper functions used by the GPUs during context switch
+ */
+
+/**
+ * uint2float - convert a uint to IEEE754 single precision float
+ * @ uintval - value to convert
+ */
+
+unsigned int uint2float(unsigned int uintval)
+{
+ unsigned int exp, frac = 0;
+
+ if (uintval == 0)
+ return 0;
+
+ exp = ilog2(uintval);
+
+ /* Calculate fraction */
+ if (23 > exp)
+ frac = (uintval & (~(1 << exp))) << (23 - exp);
+
+ /* Exp is biased by 127 and shifted 23 bits */
+ exp = (exp + 127) << 23;
+
+ return exp | frac;
+}
+
+static void set_gmem_copy_quad(struct gmem_shadow_t *shadow)
+{
+ /* set vertex buffer values */
+ gmem_copy_quad[1] = uint2float(shadow->height);
+ gmem_copy_quad[3] = uint2float(shadow->width);
+ gmem_copy_quad[4] = uint2float(shadow->height);
+ gmem_copy_quad[9] = uint2float(shadow->width);
+
+ gmem_restore_quad[5] = uint2float(shadow->height);
+ gmem_restore_quad[7] = uint2float(shadow->width);
+
+ memcpy(shadow->quad_vertices.hostptr, gmem_copy_quad, QUAD_LEN << 2);
+ memcpy(shadow->quad_vertices_restore.hostptr, gmem_restore_quad,
+ QUAD_RESTORE_LEN << 2);
+
+ memcpy(shadow->quad_texcoords.hostptr, gmem_copy_texcoord,
+ TEXCOORD_LEN << 2);
+}
+
+/**
+ * build_quad_vtxbuff - Create a quad for saving/restoring GMEM
+ * @ context - Pointer to the context being created
+ * @ shadow - Pointer to the GMEM shadow structure
+ * @ incmd - Pointer to pointer to the temporary command buffer
+ */
+
+/* quad for saving/restoring gmem */
+void build_quad_vtxbuff(struct adreno_context *drawctxt,
+ struct gmem_shadow_t *shadow, unsigned int **incmd)
+{
+ unsigned int *cmd = *incmd;
+
+ /* quad vertex buffer location (in GPU space) */
+ shadow->quad_vertices.hostptr = cmd;
+ shadow->quad_vertices.gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
+
+ cmd += QUAD_LEN;
+
+ /* Used by A3XX, but define for both to make the code easier */
+ shadow->quad_vertices_restore.hostptr = cmd;
+ shadow->quad_vertices_restore.gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+
+ cmd += QUAD_RESTORE_LEN;
+
+ /* tex coord buffer location (in GPU space) */
+ shadow->quad_texcoords.hostptr = cmd;
+ shadow->quad_texcoords.gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
+
+ cmd += TEXCOORD_LEN;
+
+ set_gmem_copy_quad(shadow);
+ *incmd = cmd;
+}
+
+static void wait_callback(struct kgsl_device *device, void *priv, u32 id,
+ u32 timestamp, u32 type)
+{
+ struct adreno_context *drawctxt = priv;
+ wake_up_interruptible_all(&drawctxt->waiting);
+}
+
+#define adreno_wait_event_interruptible_timeout(wq, condition, timeout, io) \
+({ \
+ long __ret = timeout; \
+ if (io) \
+ __wait_io_event_interruptible_timeout(wq, condition, __ret); \
+ else \
+ __wait_event_interruptible_timeout(wq, condition, __ret); \
+ __ret; \
+})
+
+#define adreno_wait_event_interruptible(wq, condition, io) \
+({ \
+ long __ret; \
+ if (io) \
+ __wait_io_event_interruptible(wq, condition, __ret); \
+ else \
+ __wait_event_interruptible(wq, condition, __ret); \
+ __ret; \
+})
+
+static int _check_context_timestamp(struct kgsl_device *device,
+ struct adreno_context *drawctxt, unsigned int timestamp)
+{
+ int ret = 0;
+
+ /* Bail if the drawctxt has been invalidated or destroyed */
+ if (kgsl_context_detached(&drawctxt->base) ||
+ drawctxt->state != ADRENO_CONTEXT_STATE_ACTIVE)
+ return 1;
+
+ mutex_lock(&device->mutex);
+ ret = kgsl_check_timestamp(device, &drawctxt->base, timestamp);
+ mutex_unlock(&device->mutex);
+
+ return ret;
+}
+
+/**
+ * adreno_drawctxt_wait() - sleep until a timestamp expires
+ * @adreno_dev: pointer to the adreno_device struct
+ * @drawctxt: Pointer to the draw context to sleep for
+ * @timetamp: Timestamp to wait on
+ * @timeout: Number of jiffies to wait (0 for infinite)
+ *
+ * Register an event to wait for a timestamp on a context and sleep until it
+ * has past. Returns < 0 on error, -ETIMEDOUT if the timeout expires or 0
+ * on success
+ */
+int adreno_drawctxt_wait(struct adreno_device *adreno_dev,
+ struct kgsl_context *context,
+ uint32_t timestamp, unsigned int timeout)
+{
+ static unsigned int io_cnt;
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+ int ret, io;
+
+ if (kgsl_context_detached(context))
+ return -EINVAL;
+
+ if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
+ return -EDEADLK;
+
+ /* Needs to hold the device mutex */
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ trace_adreno_drawctxt_wait_start(context->id, timestamp);
+
+ ret = kgsl_add_event(device, context->id, timestamp,
+ wait_callback, drawctxt, NULL);
+ if (ret)
+ goto done;
+
+ /*
+ * For proper power accounting sometimes we need to call
+ * io_wait_interruptible_timeout and sometimes we need to call
+ * plain old wait_interruptible_timeout. We call the regular
+ * timeout N times out of 100, where N is a number specified by
+ * the current power level
+ */
+
+ io_cnt = (io_cnt + 1) % 100;
+ io = (io_cnt < pwr->pwrlevels[pwr->active_pwrlevel].io_fraction)
+ ? 0 : 1;
+
+ mutex_unlock(&device->mutex);
+
+ if (timeout) {
+ ret = (int) adreno_wait_event_interruptible_timeout(
+ drawctxt->waiting,
+ _check_context_timestamp(device, drawctxt, timestamp),
+ msecs_to_jiffies(timeout), io);
+
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+ else if (ret > 0)
+ ret = 0;
+ } else {
+ ret = (int) adreno_wait_event_interruptible(drawctxt->waiting,
+ _check_context_timestamp(device, drawctxt, timestamp),
+ io);
+ }
+
+ mutex_lock(&device->mutex);
+
+ /* -EDEADLK if the context was invalidated while we were waiting */
+ if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
+ ret = -EDEADLK;
+
+
+ /* Return -EINVAL if the context was detached while we were waiting */
+ if (kgsl_context_detached(context))
+ ret = -EINVAL;
+
+done:
+ trace_adreno_drawctxt_wait_done(context->id, timestamp, ret);
+ return ret;
+}
+
+static void global_wait_callback(struct kgsl_device *device, void *priv, u32 id,
+ u32 timestamp, u32 type)
+{
+ struct adreno_context *drawctxt = priv;
+
+ wake_up_interruptible_all(&drawctxt->waiting);
+ kgsl_context_put(&drawctxt->base);
+}
+
+static int _check_global_timestamp(struct kgsl_device *device,
+ unsigned int timestamp)
+{
+ int ret;
+
+ mutex_lock(&device->mutex);
+ ret = kgsl_check_timestamp(device, NULL, timestamp);
+ mutex_unlock(&device->mutex);
+
+ return ret;
+}
+
+int adreno_drawctxt_wait_global(struct adreno_device *adreno_dev,
+ struct kgsl_context *context,
+ uint32_t timestamp, unsigned int timeout)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+ int ret;
+
+ /* Needs to hold the device mutex */
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ if (!_kgsl_context_get(context)) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ trace_adreno_drawctxt_wait_start(KGSL_MEMSTORE_GLOBAL, timestamp);
+
+ ret = kgsl_add_event(device, KGSL_MEMSTORE_GLOBAL, timestamp,
+ global_wait_callback, drawctxt, NULL);
+ if (ret) {
+ kgsl_context_put(context);
+ goto done;
+ }
+
+ mutex_unlock(&device->mutex);
+
+ if (timeout) {
+ ret = (int) wait_event_timeout(drawctxt->waiting,
+ _check_global_timestamp(device, timestamp),
+ msecs_to_jiffies(timeout));
+
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+ else if (ret > 0)
+ ret = 0;
+ } else {
+ wait_event(drawctxt->waiting,
+ _check_global_timestamp(device, timestamp));
+ }
+
+ mutex_lock(&device->mutex);
+
+ if (ret)
+ kgsl_cancel_events_timestamp(device, NULL, timestamp);
+
+done:
+ trace_adreno_drawctxt_wait_done(KGSL_MEMSTORE_GLOBAL, timestamp, ret);
+ return ret;
+}
+
+/**
+ * adreno_drawctxt_invalidate() - Invalidate an adreno draw context
+ * @device: Pointer to the KGSL device structure for the GPU
+ * @context: Pointer to the KGSL context structure
+ *
+ * Invalidate the context and remove all queued commands and cancel any pending
+ * waiters
+ */
+void adreno_drawctxt_invalidate(struct kgsl_device *device,
+ struct kgsl_context *context)
+{
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+
+ drawctxt->state = ADRENO_CONTEXT_STATE_INVALID;
+
+ /* Clear the pending queue */
+ mutex_lock(&drawctxt->mutex);
+
+ /*
+ * set the timestamp to the last value since the context is invalidated
+ * and we want the pending events for this context to go away
+ */
+ kgsl_sharedmem_writel(device, &device->memstore,
+ KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
+ drawctxt->timestamp);
+
+ kgsl_sharedmem_writel(device, &device->memstore,
+ KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
+ drawctxt->timestamp);
+
+ while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
+ struct kgsl_cmdbatch *cmdbatch =
+ drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+
+ drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
+ ADRENO_CONTEXT_CMDQUEUE_SIZE;
+
+ mutex_unlock(&drawctxt->mutex);
+
+ mutex_lock(&device->mutex);
+ kgsl_cancel_events_timestamp(device, context,
+ cmdbatch->timestamp);
+ mutex_unlock(&device->mutex);
+
+ kgsl_cmdbatch_destroy(cmdbatch);
+ mutex_lock(&drawctxt->mutex);
+ }
+
+ mutex_unlock(&drawctxt->mutex);
+
+ /* Give the bad news to everybody waiting around */
+ wake_up_interruptible_all(&drawctxt->waiting);
+ wake_up_interruptible_all(&drawctxt->wq);
+}
+
+/**
+ * adreno_drawctxt_create - create a new adreno draw context
+ * @dev_priv: the owner of the context
+ * @flags: flags for the context (passed from user space)
+ *
+ * Create and return a new draw context for the 3D core.
+ */
+struct kgsl_context *
+adreno_drawctxt_create(struct kgsl_device_private *dev_priv,
+ uint32_t *flags)
+{
+ struct adreno_context *drawctxt;
+ struct kgsl_device *device = dev_priv->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int ret;
+
+ drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
+ if (drawctxt == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ ret = kgsl_context_init(dev_priv, &drawctxt->base);
+ if (ret != 0) {
+ kfree(drawctxt);
+ return ERR_PTR(ret);
+ }
+
+ drawctxt->bin_base_offset = 0;
+ drawctxt->timestamp = 0;
+
+ *flags &= (KGSL_CONTEXT_PREAMBLE |
+ KGSL_CONTEXT_NO_GMEM_ALLOC |
+ KGSL_CONTEXT_PER_CONTEXT_TS |
+ KGSL_CONTEXT_USER_GENERATED_TS |
+ KGSL_CONTEXT_NO_FAULT_TOLERANCE |
+ KGSL_CONTEXT_TYPE_MASK);
+
+ /* Always enable per-context timestamps */
+ *flags |= KGSL_CONTEXT_PER_CONTEXT_TS;
+ drawctxt->flags |= CTXT_FLAGS_PER_CONTEXT_TS;
+
+ if (*flags & KGSL_CONTEXT_PREAMBLE)
+ drawctxt->flags |= CTXT_FLAGS_PREAMBLE;
+
+ if (*flags & KGSL_CONTEXT_NO_GMEM_ALLOC)
+ drawctxt->flags |= CTXT_FLAGS_NOGMEMALLOC;
+
+ if (*flags & KGSL_CONTEXT_USER_GENERATED_TS)
+ drawctxt->flags |= CTXT_FLAGS_USER_GENERATED_TS;
+
+ mutex_init(&drawctxt->mutex);
+ init_waitqueue_head(&drawctxt->wq);
+ init_waitqueue_head(&drawctxt->waiting);
+
+ /*
+ * Set up the plist node for the dispatcher. For now all contexts have
+ * the same priority, but later the priority will be set at create time
+ * by the user
+ */
+
+ plist_node_init(&drawctxt->pending, ADRENO_CONTEXT_DEFAULT_PRIORITY);
+
+ if (*flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
+ drawctxt->flags |= CTXT_FLAGS_NO_FAULT_TOLERANCE;
+
+ drawctxt->type =
+ (*flags & KGSL_CONTEXT_TYPE_MASK) >> KGSL_CONTEXT_TYPE_SHIFT;
+
+ ret = adreno_dev->gpudev->ctxt_create(adreno_dev, drawctxt);
+ if (ret)
+ goto err;
+
+ kgsl_sharedmem_writel(device, &device->memstore,
+ KGSL_MEMSTORE_OFFSET(drawctxt->base.id, soptimestamp),
+ 0);
+ kgsl_sharedmem_writel(device, &device->memstore,
+ KGSL_MEMSTORE_OFFSET(drawctxt->base.id, eoptimestamp),
+ 0);
+
+ return &drawctxt->base;
+err:
+ kgsl_context_detach(&drawctxt->base);
+ return ERR_PTR(ret);
+}
+
+/**
+ * adreno_drawctxt_sched() - Schedule a previously blocked context
+ * @device: pointer to a KGSL device
+ * @drawctxt: drawctxt to rechedule
+ *
+ * This function is called by the core when it knows that a previously blocked
+ * context has been unblocked. The default adreno response is to reschedule the
+ * context on the dispatcher
+ */
+void adreno_drawctxt_sched(struct kgsl_device *device,
+ struct kgsl_context *context)
+{
+ adreno_dispatcher_queue_context(device, ADRENO_CONTEXT(context));
+}
+
+/**
+ * adreno_drawctxt_detach(): detach a context from the GPU
+ * @context: Generic KGSL context container for the context
+ *
+ */
+int adreno_drawctxt_detach(struct kgsl_context *context)
+{
+ struct kgsl_device *device;
+ struct adreno_device *adreno_dev;
+ struct adreno_context *drawctxt;
+ int ret;
+
+ if (context == NULL)
+ return 0;
+
+ device = context->device;
+ adreno_dev = ADRENO_DEVICE(device);
+ drawctxt = ADRENO_CONTEXT(context);
+
+ /* deactivate context */
+ if (adreno_dev->drawctxt_active == drawctxt) {
+ /* no need to save GMEM or shader, the context is
+ * being destroyed.
+ */
+ drawctxt->flags &= ~(CTXT_FLAGS_GMEM_SAVE |
+ CTXT_FLAGS_SHADER_SAVE |
+ CTXT_FLAGS_GMEM_SHADOW |
+ CTXT_FLAGS_STATE_SHADOW);
+
+ drawctxt->flags |= CTXT_FLAGS_BEING_DESTROYED;
+
+ adreno_drawctxt_switch(adreno_dev, NULL, 0);
+ }
+
+ mutex_lock(&drawctxt->mutex);
+
+ while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
+ struct kgsl_cmdbatch *cmdbatch =
+ drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+
+ drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
+ ADRENO_CONTEXT_CMDQUEUE_SIZE;
+
+ mutex_unlock(&drawctxt->mutex);
+
+ /*
+ * Don't hold the drawctxt mutex while the cmdbatch is being
+ * destroyed because the cmdbatch destroy takes the device
+ * mutex and the world falls in on itself
+ */
+
+ kgsl_cmdbatch_destroy(cmdbatch);
+ mutex_lock(&drawctxt->mutex);
+ }
+
+ mutex_unlock(&drawctxt->mutex);
+ /*
+ * internal_timestamp is set in adreno_ringbuffer_addcmds,
+ * which holds the device mutex. The entire context destroy
+ * process requires the device mutex as well. But lets
+ * make sure we notice if the locking changes.
+ */
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ /* Wait for the last global timestamp to pass before continuing */
+ ret = adreno_drawctxt_wait_global(adreno_dev, context,
+ drawctxt->internal_timestamp, 10 * 1000);
+
+ kgsl_sharedmem_writel(device, &device->memstore,
+ KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
+ drawctxt->timestamp);
+
+ kgsl_sharedmem_writel(device, &device->memstore,
+ KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
+ drawctxt->timestamp);
+
+ kgsl_sharedmem_free(&drawctxt->gpustate);
+ kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow);
+
+ /* wake threads waiting to submit commands from this context */
+ wake_up_interruptible_all(&drawctxt->waiting);
+ wake_up_interruptible_all(&drawctxt->wq);
+
+ return ret;
+}
+
+
+void adreno_drawctxt_destroy(struct kgsl_context *context)
+{
+ struct adreno_context *drawctxt;
+ if (context == NULL)
+ return;
+
+ drawctxt = ADRENO_CONTEXT(context);
+ kfree(drawctxt);
+}
+
+/**
+ * adreno_drawctxt_set_bin_base_offset - set bin base offset for the context
+ * @device - KGSL device that owns the context
+ * @context- Generic KGSL context container for the context
+ * @offset - Offset to set
+ *
+ * Set the bin base offset for A2XX devices. Not valid for A3XX devices.
+ */
+
+void adreno_drawctxt_set_bin_base_offset(struct kgsl_device *device,
+ struct kgsl_context *context,
+ unsigned int offset)
+{
+ struct adreno_context *drawctxt;
+
+ if (context == NULL)
+ return;
+ drawctxt = ADRENO_CONTEXT(context);
+ drawctxt->bin_base_offset = offset;
+}
+
+/**
+ * adreno_drawctxt_switch - switch the current draw context
+ * @adreno_dev - The 3D device that owns the context
+ * @drawctxt - the 3D context to switch to
+ * @flags - Flags to accompany the switch (from user space)
+ *
+ * Switch the current draw context
+ */
+
+int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt,
+ unsigned int flags)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ int ret = 0;
+
+ if (drawctxt) {
+ if (flags & KGSL_CONTEXT_SAVE_GMEM)
+ /* Set the flag in context so that the save is done
+ * when this context is switched out. */
+ drawctxt->flags |= CTXT_FLAGS_GMEM_SAVE;
+ else
+ /* Remove GMEM saving flag from the context */
+ drawctxt->flags &= ~CTXT_FLAGS_GMEM_SAVE;
+ }
+
+ /* already current? */
+ if (adreno_dev->drawctxt_active == drawctxt) {
+ if (adreno_dev->gpudev->ctxt_draw_workaround &&
+ adreno_is_a225(adreno_dev))
+ ret = adreno_dev->gpudev->ctxt_draw_workaround(
+ adreno_dev, drawctxt);
+ return ret;
+ }
+
+ trace_adreno_drawctxt_switch(adreno_dev->drawctxt_active,
+ drawctxt, flags);
+
+ /* Save the old context */
+ if (adreno_dev->gpudev->ctxt_save) {
+ ret = adreno_dev->gpudev->ctxt_save(adreno_dev,
+ adreno_dev->drawctxt_active);
+
+ if (ret) {
+ KGSL_DRV_ERR(device,
+ "Error in GPU context %d save: %d\n",
+ adreno_dev->drawctxt_active->base.id, ret);
+ return ret;
+ }
+ }
+
+ /* Put the old instance of the active drawctxt */
+ if (adreno_dev->drawctxt_active)
+ kgsl_context_put(&adreno_dev->drawctxt_active->base);
+
+ /* Get a refcount to the new instance */
+ if (drawctxt) {
+ if (!_kgsl_context_get(&drawctxt->base))
+ return -EINVAL;
+ }
+
+ /* Set the new context */
+ ret = adreno_dev->gpudev->ctxt_restore(adreno_dev, drawctxt);
+ if (ret) {
+ KGSL_DRV_ERR(device,
+ "Error in GPU context %d restore: %d\n",
+ drawctxt->base.id, ret);
+ return ret;
+ }
+
+ adreno_dev->drawctxt_active = drawctxt;
+ return 0;
+}
diff --git a/drivers/gpu/msm2/adreno_drawctxt.h b/drivers/gpu/msm2/adreno_drawctxt.h
new file mode 100644
index 0000000..0f65d4b
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_drawctxt.h
@@ -0,0 +1,270 @@
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ADRENO_DRAWCTXT_H
+#define __ADRENO_DRAWCTXT_H
+
+#include "adreno_pm4types.h"
+#include "a2xx_reg.h"
+
+/* Flags */
+
+#define CTXT_FLAGS_NOT_IN_USE 0x00000000
+#define CTXT_FLAGS_IN_USE BIT(0)
+
+/* state shadow memory allocated */
+#define CTXT_FLAGS_STATE_SHADOW BIT(1)
+
+/* gmem shadow memory allocated */
+#define CTXT_FLAGS_GMEM_SHADOW BIT(2)
+/* gmem must be copied to shadow */
+#define CTXT_FLAGS_GMEM_SAVE BIT(3)
+/* gmem can be restored from shadow */
+#define CTXT_FLAGS_GMEM_RESTORE BIT(4)
+/* preamble packed in cmdbuffer for context switching */
+#define CTXT_FLAGS_PREAMBLE BIT(5)
+/* shader must be copied to shadow */
+#define CTXT_FLAGS_SHADER_SAVE BIT(6)
+/* shader can be restored from shadow */
+#define CTXT_FLAGS_SHADER_RESTORE BIT(7)
+/* Context has caused a GPU hang */
+#define CTXT_FLAGS_GPU_HANG BIT(8)
+/* Specifies there is no need to save GMEM */
+#define CTXT_FLAGS_NOGMEMALLOC BIT(9)
+/* Trash state for context */
+#define CTXT_FLAGS_TRASHSTATE BIT(10)
+/* per context timestamps enabled */
+#define CTXT_FLAGS_PER_CONTEXT_TS BIT(11)
+/* Context has caused a GPU hang and fault tolerance successful */
+#define CTXT_FLAGS_GPU_HANG_FT BIT(12)
+/* Context is being destroyed so dont save it */
+#define CTXT_FLAGS_BEING_DESTROYED BIT(13)
+/* User mode generated timestamps enabled */
+#define CTXT_FLAGS_USER_GENERATED_TS BIT(14)
+/* Context skip till EOF */
+#define CTXT_FLAGS_SKIP_EOF BIT(15)
+/* Context no fault tolerance */
+#define CTXT_FLAGS_NO_FAULT_TOLERANCE BIT(16)
+/* Force the preamble for the next submission */
+#define CTXT_FLAGS_FORCE_PREAMBLE BIT(17)
+
+/* Symbolic table for the adreno draw context type */
+#define ADRENO_DRAWCTXT_TYPES \
+ { KGSL_CONTEXT_TYPE_ANY, "any" }, \
+ { KGSL_CONTEXT_TYPE_GL, "GL" }, \
+ { KGSL_CONTEXT_TYPE_CL, "CL" }, \
+ { KGSL_CONTEXT_TYPE_C2D, "C2D" }, \
+ { KGSL_CONTEXT_TYPE_RS, "RS" }
+
+#define ADRENO_CONTEXT_CMDQUEUE_SIZE 128
+
+#define ADRENO_CONTEXT_DEFAULT_PRIORITY 1
+
+#define ADRENO_CONTEXT_STATE_ACTIVE 0
+#define ADRENO_CONTEXT_STATE_INVALID 1
+
+struct kgsl_device;
+struct adreno_device;
+struct kgsl_device_private;
+struct kgsl_context;
+
+/* draw context */
+struct gmem_shadow_t {
+ struct kgsl_memdesc gmemshadow; /* Shadow buffer address */
+
+ /*
+ * 256 KB GMEM surface = 4 bytes-per-pixel x 256 pixels/row x
+ * 256 rows. Width & height must be multiples of 32 in case tiled
+ * textures are used
+ */
+
+ enum COLORFORMATX format; /* Unused on A3XX */
+ unsigned int size; /* Size of surface used to store GMEM */
+ unsigned int width; /* Width of surface used to store GMEM */
+ unsigned int height; /* Height of surface used to store GMEM */
+ unsigned int pitch; /* Pitch of surface used to store GMEM */
+ unsigned int gmem_pitch; /* Pitch value used for GMEM */
+ unsigned int *gmem_save_commands; /* Unused on A3XX */
+ unsigned int *gmem_restore_commands; /* Unused on A3XX */
+ unsigned int gmem_save[3];
+ unsigned int gmem_restore[3];
+ struct kgsl_memdesc quad_vertices;
+ struct kgsl_memdesc quad_texcoords;
+ struct kgsl_memdesc quad_vertices_restore;
+};
+
+/**
+ * struct adreno_context - Adreno GPU draw context
+ * @id: Unique integer ID of the context
+ * @timestamp: Last issued context-specific timestamp
+ * @internal_timestamp: Global timestamp of the last issued command
+ * NOTE: guarded by device->mutex, not drawctxt->mutex!
+ * @state: Current state of the context
+ * @flags: Bitfield controlling behavior of the context
+ * @type: Context type (GL, CL, RS)
+ * @mutex: Mutex to protect the cmdqueue
+ * @pagetable: Pointer to the GPU pagetable for the context
+ * @gpustate: Pointer to the GPU scratch memory for context save/restore
+ * @reg_restore: Command buffer for restoring context registers
+ * @shader_save: Command buffer for saving shaders
+ * @shader_restore: Command buffer to restore shaders
+ * @context_gmem_shadow: GMEM shadow structure for save/restore
+ * @reg_save: A2XX command buffer to save context registers
+ * @shader_fixup: A2XX command buffer to "fix" shaders on restore
+ * @chicken_restore: A2XX command buffer to "fix" register restore
+ * @bin_base_offset: Saved value of the A2XX BIN_BASE_OFFSET register
+ * @regconstant_save: A3XX command buffer to save some registers
+ * @constant_retore: A3XX command buffer to restore some registers
+ * @hslqcontrol_restore: A3XX command buffer to restore HSLSQ registers
+ * @save_fixup: A3XX command buffer to "fix" register save
+ * @restore_fixup: A3XX cmmand buffer to restore register save fixes
+ * @shader_load_commands: A3XX GPU memory descriptor for shader load IB
+ * @shader_save_commands: A3XX GPU memory descriptor for shader save IB
+ * @constantr_save_commands: A3XX GPU memory descriptor for constant save IB
+ * @constant_load_commands: A3XX GPU memory descriptor for constant load IB
+ * @cond_execs: A3XX GPU memory descriptor for conditional exec IB
+ * @hlsq_restore_commands: A3XX GPU memory descriptor for HLSQ restore IB
+ * @cmdqueue: Queue of command batches waiting to be dispatched for this context
+ * @cmdqueue_head: Head of the cmdqueue queue
+ * @cmdqueue_tail: Tail of the cmdqueue queue
+ * @pending: Priority list node for the dispatcher list of pending contexts
+ * @wq: Workqueue structure for contexts to sleep pending room in the queue
+ * @waiting: Workqueue structure for contexts waiting for a timestamp or event
+ * @queued: Number of commands queued in the cmdqueue
+ */
+struct adreno_context {
+ struct kgsl_context base;
+ unsigned int ib_gpu_time_used;
+ unsigned int timestamp;
+ unsigned int internal_timestamp;
+ int state;
+ uint32_t flags;
+ unsigned int type;
+ struct mutex mutex;
+ struct kgsl_memdesc gpustate;
+ unsigned int reg_restore[3];
+ unsigned int shader_save[3];
+ unsigned int shader_restore[3];
+
+ struct gmem_shadow_t context_gmem_shadow;
+
+ /* A2XX specific items */
+ unsigned int reg_save[3];
+ unsigned int shader_fixup[3];
+ unsigned int chicken_restore[3];
+ unsigned int bin_base_offset;
+
+ /* A3XX specific items */
+ unsigned int regconstant_save[3];
+ unsigned int constant_restore[3];
+ unsigned int hlsqcontrol_restore[3];
+ unsigned int save_fixup[3];
+ unsigned int restore_fixup[3];
+ struct kgsl_memdesc shader_load_commands[2];
+ struct kgsl_memdesc shader_save_commands[4];
+ struct kgsl_memdesc constant_save_commands[3];
+ struct kgsl_memdesc constant_load_commands[3];
+ struct kgsl_memdesc cond_execs[4];
+ struct kgsl_memdesc hlsqcontrol_restore_commands[1];
+
+ /* Dispatcher */
+ struct kgsl_cmdbatch *cmdqueue[ADRENO_CONTEXT_CMDQUEUE_SIZE];
+ int cmdqueue_head;
+ int cmdqueue_tail;
+
+ struct plist_node pending;
+ wait_queue_head_t wq;
+ wait_queue_head_t waiting;
+
+ int queued;
+};
+
+
+struct kgsl_context *adreno_drawctxt_create(struct kgsl_device_private *,
+ uint32_t *flags);
+
+int adreno_drawctxt_detach(struct kgsl_context *context);
+
+void adreno_drawctxt_destroy(struct kgsl_context *context);
+
+void adreno_drawctxt_sched(struct kgsl_device *device,
+ struct kgsl_context *context);
+
+int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt,
+ unsigned int flags);
+void adreno_drawctxt_set_bin_base_offset(struct kgsl_device *device,
+ struct kgsl_context *context,
+ unsigned int offset);
+
+int adreno_drawctxt_wait(struct adreno_device *adreno_dev,
+ struct kgsl_context *context,
+ uint32_t timestamp, unsigned int timeout);
+
+void adreno_drawctxt_invalidate(struct kgsl_device *device,
+ struct kgsl_context *context);
+
+/* GPU context switch helper functions */
+
+void build_quad_vtxbuff(struct adreno_context *drawctxt,
+ struct gmem_shadow_t *shadow, unsigned int **incmd);
+
+unsigned int uint2float(unsigned int);
+
+static inline unsigned int virt2gpu(unsigned int *cmd,
+ struct kgsl_memdesc *memdesc)
+{
+ return memdesc->gpuaddr + ((char *) cmd - (char *) memdesc->hostptr);
+}
+
+static inline void create_ib1(struct adreno_context *drawctxt,
+ unsigned int *cmd,
+ unsigned int *start,
+ unsigned int *end)
+{
+ cmd[0] = CP_HDR_INDIRECT_BUFFER_PFD;
+ cmd[1] = virt2gpu(start, &drawctxt->gpustate);
+ cmd[2] = end - start;
+}
+
+
+static inline unsigned int *reg_range(unsigned int *cmd, unsigned int start,
+ unsigned int end)
+{
+ *cmd++ = CP_REG(start); /* h/w regs, start addr */
+ *cmd++ = end - start + 1; /* count */
+ return cmd;
+}
+
+static inline void calc_gmemsize(struct gmem_shadow_t *shadow, int gmem_size)
+{
+ int w = 64, h = 64;
+
+ shadow->format = COLORX_8_8_8_8;
+
+ /* convert from bytes to 32-bit words */
+ gmem_size = (gmem_size + 3) / 4;
+
+ while ((w * h) < gmem_size) {
+ if (w < h)
+ w *= 2;
+ else
+ h *= 2;
+ }
+
+ shadow->pitch = shadow->width = w;
+ shadow->height = h;
+ shadow->gmem_pitch = shadow->pitch;
+ shadow->size = shadow->pitch * shadow->height * 4;
+}
+
+#endif /* __ADRENO_DRAWCTXT_H */
diff --git a/drivers/gpu/msm2/adreno_pm4types.h b/drivers/gpu/msm2/adreno_pm4types.h
new file mode 100644
index 0000000..e6ec91d
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_pm4types.h
@@ -0,0 +1,247 @@
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ADRENO_PM4TYPES_H
+#define __ADRENO_PM4TYPES_H
+
+
+#define CP_PKT_MASK 0xc0000000
+
+#define CP_TYPE0_PKT ((unsigned int)0 << 30)
+#define CP_TYPE1_PKT ((unsigned int)1 << 30)
+#define CP_TYPE2_PKT ((unsigned int)2 << 30)
+#define CP_TYPE3_PKT ((unsigned int)3 << 30)
+
+
+/* type3 packets */
+/* initialize CP's micro-engine */
+#define CP_ME_INIT 0x48
+
+/* skip N 32-bit words to get to the next packet */
+#define CP_NOP 0x10
+
+/* indirect buffer dispatch. same as IB, but init is pipelined */
+#define CP_INDIRECT_BUFFER_PFD 0x37
+
+/* wait for the IDLE state of the engine */
+#define CP_WAIT_FOR_IDLE 0x26
+
+/* wait until a register or memory location is a specific value */
+#define CP_WAIT_REG_MEM 0x3c
+
+/* wait until a register location is equal to a specific value */
+#define CP_WAIT_REG_EQ 0x52
+
+/* wait until a register location is >= a specific value */
+#define CP_WAT_REG_GTE 0x53
+
+/* wait until a read completes */
+#define CP_WAIT_UNTIL_READ 0x5c
+
+/* wait until all base/size writes from an IB_PFD packet have completed */
+#define CP_WAIT_IB_PFD_COMPLETE 0x5d
+
+/* register read/modify/write */
+#define CP_REG_RMW 0x21
+
+/* Set binning configuration registers */
+#define CP_SET_BIN_DATA 0x2f
+
+/* reads register in chip and writes to memory */
+#define CP_REG_TO_MEM 0x3e
+
+/* write N 32-bit words to memory */
+#define CP_MEM_WRITE 0x3d
+
+/* write CP_PROG_COUNTER value to memory */
+#define CP_MEM_WRITE_CNTR 0x4f
+
+/* conditional execution of a sequence of packets */
+#define CP_COND_EXEC 0x44
+
+/* conditional write to memory or register */
+#define CP_COND_WRITE 0x45
+
+/* generate an event that creates a write to memory when completed */
+#define CP_EVENT_WRITE 0x46
+
+/* generate a VS|PS_done event */
+#define CP_EVENT_WRITE_SHD 0x58
+
+/* generate a cache flush done event */
+#define CP_EVENT_WRITE_CFL 0x59
+
+/* generate a z_pass done event */
+#define CP_EVENT_WRITE_ZPD 0x5b
+
+
+/* initiate fetch of index buffer and draw */
+#define CP_DRAW_INDX 0x22
+
+/* draw using supplied indices in packet */
+#define CP_DRAW_INDX_2 0x36
+
+/* initiate fetch of index buffer and binIDs and draw */
+#define CP_DRAW_INDX_BIN 0x34
+
+/* initiate fetch of bin IDs and draw using supplied indices */
+#define CP_DRAW_INDX_2_BIN 0x35
+
+
+/* begin/end initiator for viz query extent processing */
+#define CP_VIZ_QUERY 0x23
+
+/* fetch state sub-blocks and initiate shader code DMAs */
+#define CP_SET_STATE 0x25
+
+/* load constant into chip and to memory */
+#define CP_SET_CONSTANT 0x2d
+
+/* load sequencer instruction memory (pointer-based) */
+#define CP_IM_LOAD 0x27
+
+/* load sequencer instruction memory (code embedded in packet) */
+#define CP_IM_LOAD_IMMEDIATE 0x2b
+
+/* load constants from a location in memory */
+#define CP_LOAD_CONSTANT_CONTEXT 0x2e
+
+/* (A2x) sets binning configuration registers */
+#define CP_SET_BIN_DATA 0x2f
+
+/* selective invalidation of state pointers */
+#define CP_INVALIDATE_STATE 0x3b
+
+
+/* dynamically changes shader instruction memory partition */
+#define CP_SET_SHADER_BASES 0x4A
+
+/* sets the 64-bit BIN_MASK register in the PFP */
+#define CP_SET_BIN_MASK 0x50
+
+/* sets the 64-bit BIN_SELECT register in the PFP */
+#define CP_SET_BIN_SELECT 0x51
+
+
+/* updates the current context, if needed */
+#define CP_CONTEXT_UPDATE 0x5e
+
+/* generate interrupt from the command stream */
+#define CP_INTERRUPT 0x40
+
+
+/* copy sequencer instruction memory to system memory */
+#define CP_IM_STORE 0x2c
+
+/* test 2 memory locations to dword values specified */
+#define CP_TEST_TWO_MEMS 0x71
+
+/* PFP waits until the FIFO between the PFP and the ME is empty */
+#define CP_WAIT_FOR_ME 0x13
+
+/*
+ * for a20x
+ * program an offset that will added to the BIN_BASE value of
+ * the 3D_DRAW_INDX_BIN packet
+ */
+#define CP_SET_BIN_BASE_OFFSET 0x4B
+
+/*
+ * for a22x
+ * sets draw initiator flags register in PFP, gets bitwise-ORed into
+ * every draw initiator
+ */
+#define CP_SET_DRAW_INIT_FLAGS 0x4B
+
+#define CP_SET_PROTECTED_MODE 0x5f /* sets the register protection mode */
+
+/*
+ * for a3xx
+ */
+
+#define CP_LOAD_STATE 0x30 /* load high level sequencer command */
+
+/* Conditionally load a IB based on a flag */
+#define CP_COND_INDIRECT_BUFFER_PFE 0x3A /* prefetch enabled */
+#define CP_COND_INDIRECT_BUFFER_PFD 0x32 /* prefetch disabled */
+
+/* Load a buffer with pre-fetch enabled */
+#define CP_INDIRECT_BUFFER_PFE 0x3F
+
+#define CP_EXEC_CL 0x31
+
+#define CP_LOADSTATE_DSTOFFSET_SHIFT 0x00000000
+#define CP_LOADSTATE_STATESRC_SHIFT 0x00000010
+#define CP_LOADSTATE_STATEBLOCKID_SHIFT 0x00000013
+#define CP_LOADSTATE_NUMOFUNITS_SHIFT 0x00000016
+#define CP_LOADSTATE_STATETYPE_SHIFT 0x00000000
+#define CP_LOADSTATE_EXTSRCADDR_SHIFT 0x00000002
+
+/* packet header building macros */
+#define cp_type0_packet(regindx, cnt) \
+ (CP_TYPE0_PKT | (((cnt)-1) << 16) | ((regindx) & 0x7FFF))
+
+#define cp_type0_packet_for_sameregister(regindx, cnt) \
+ ((CP_TYPE0_PKT | (((cnt)-1) << 16) | ((1 << 15) | \
+ ((regindx) & 0x7FFF)))
+
+#define cp_type1_packet(reg0, reg1) \
+ (CP_TYPE1_PKT | ((reg1) << 12) | (reg0))
+
+#define cp_type3_packet(opcode, cnt) \
+ (CP_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8))
+
+#define cp_predicated_type3_packet(opcode, cnt) \
+ (CP_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8) | 0x1)
+
+#define cp_nop_packet(cnt) \
+ (CP_TYPE3_PKT | (((cnt)-1) << 16) | (CP_NOP << 8))
+
+#define pkt_is_type0(pkt) (((pkt) & 0XC0000000) == CP_TYPE0_PKT)
+
+#define type0_pkt_size(pkt) ((((pkt) >> 16) & 0x3FFF) + 1)
+#define type0_pkt_offset(pkt) ((pkt) & 0x7FFF)
+
+/*
+ * Check both for the type3 opcode and make sure that the reserved bits [1:7]
+ * and 15 are 0
+ */
+
+#define pkt_is_type3(pkt) \
+ ((((pkt) & 0xC0000000) == CP_TYPE3_PKT) && \
+ (((pkt) & 0x80FE) == 0))
+
+#define cp_type3_opcode(pkt) (((pkt) >> 8) & 0xFF)
+#define type3_pkt_size(pkt) ((((pkt) >> 16) & 0x3FFF) + 1)
+
+/* packet headers */
+#define CP_HDR_ME_INIT cp_type3_packet(CP_ME_INIT, 18)
+#define CP_HDR_INDIRECT_BUFFER_PFD cp_type3_packet(CP_INDIRECT_BUFFER_PFD, 2)
+#define CP_HDR_INDIRECT_BUFFER_PFE cp_type3_packet(CP_INDIRECT_BUFFER_PFE, 2)
+
+/* dword base address of the GFX decode space */
+#define SUBBLOCK_OFFSET(reg) ((unsigned int)((reg) - (0x2000)))
+
+/* gmem command buffer length */
+#define CP_REG(reg) ((0x4 << 16) | (SUBBLOCK_OFFSET(reg)))
+
+
+/* Return 1 if the command is an indirect buffer of any kind */
+static inline int adreno_cmd_is_ib(unsigned int cmd)
+{
+ return (cmd == cp_type3_packet(CP_INDIRECT_BUFFER_PFE, 2) ||
+ cmd == cp_type3_packet(CP_INDIRECT_BUFFER_PFD, 2) ||
+ cmd == cp_type3_packet(CP_COND_INDIRECT_BUFFER_PFE, 2) ||
+ cmd == cp_type3_packet(CP_COND_INDIRECT_BUFFER_PFD, 2));
+}
+
+#endif /* __ADRENO_PM4TYPES_H */
diff --git a/drivers/gpu/msm2/adreno_postmortem.c b/drivers/gpu/msm2/adreno_postmortem.c
new file mode 100644
index 0000000..33aa095
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_postmortem.c
@@ -0,0 +1,632 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/vmalloc.h>
+
+#include "kgsl.h"
+#include "kgsl_sharedmem.h"
+
+#include "adreno.h"
+#include "adreno_pm4types.h"
+#include "adreno_ringbuffer.h"
+#include "kgsl_cffdump.h"
+#include "kgsl_pwrctrl.h"
+#include "adreno_trace.h"
+
+#include "a2xx_reg.h"
+#include "a3xx_reg.h"
+
+#define INVALID_RB_CMD 0xaaaaaaaa
+#define NUM_DWORDS_OF_RINGBUFFER_HISTORY 100
+
+struct pm_id_name {
+ enum adreno_regs id;
+ char name[9];
+};
+
+static const struct pm_id_name pm0_types[] = {
+ {ADRENO_REG_PA_SC_AA_CONFIG, "RPASCAAC"},
+ {ADRENO_REG_RBBM_PM_OVERRIDE2, "RRBBPMO2"},
+ {ADRENO_REG_SCRATCH_REG2, "RSCRTRG2"},
+ {ADRENO_REG_SQ_GPR_MANAGEMENT, "RSQGPRMN"},
+ {ADRENO_REG_SQ_INST_STORE_MANAGMENT, "RSQINSTS"},
+ {ADRENO_REG_TC_CNTL_STATUS, "RTCCNTLS"},
+ {ADRENO_REG_TP0_CHICKEN, "RTP0CHCK"},
+ {ADRENO_REG_CP_TIMESTAMP, "CP_TM_ST"},
+};
+
+static const struct pm_id_name pm3_types[] = {
+ {CP_COND_EXEC, "CND_EXEC"},
+ {CP_CONTEXT_UPDATE, "CX__UPDT"},
+ {CP_DRAW_INDX, "DRW_NDX_"},
+ {CP_DRAW_INDX_BIN, "DRW_NDXB"},
+ {CP_EVENT_WRITE, "EVENT_WT"},
+ {CP_IM_LOAD, "IN__LOAD"},
+ {CP_IM_LOAD_IMMEDIATE, "IM_LOADI"},
+ {CP_IM_STORE, "IM_STORE"},
+ {CP_INDIRECT_BUFFER_PFE, "IND_BUF_"},
+ {CP_INDIRECT_BUFFER_PFD, "IND_BUFP"},
+ {CP_INTERRUPT, "PM4_INTR"},
+ {CP_INVALIDATE_STATE, "INV_STAT"},
+ {CP_LOAD_CONSTANT_CONTEXT, "LD_CN_CX"},
+ {CP_ME_INIT, "ME__INIT"},
+ {CP_NOP, "PM4__NOP"},
+ {CP_REG_RMW, "REG__RMW"},
+ {CP_REG_TO_MEM, "REG2_MEM"},
+ {CP_SET_BIN_BASE_OFFSET, "ST_BIN_O"},
+ {CP_SET_CONSTANT, "ST_CONST"},
+ {CP_SET_PROTECTED_MODE, "ST_PRT_M"},
+ {CP_SET_SHADER_BASES, "ST_SHD_B"},
+ {CP_WAIT_FOR_IDLE, "WAIT4IDL"},
+ {CP_WAIT_FOR_ME, "WAIT4ME"},
+ {CP_WAIT_REG_EQ, "WAITRGEQ"},
+};
+
+static const struct pm_id_name pm3_nop_values[] = {
+ {KGSL_CONTEXT_TO_MEM_IDENTIFIER, "CTX_SWCH"},
+ {KGSL_CMD_IDENTIFIER, "CMD__EXT"},
+ {KGSL_CMD_INTERNAL_IDENTIFIER, "CMD__INT"},
+ {KGSL_START_OF_IB_IDENTIFIER, "IB_START"},
+ {KGSL_END_OF_IB_IDENTIFIER, "IB___END"},
+};
+
+static uint32_t adreno_is_pm4_len(uint32_t word)
+{
+ if (word == INVALID_RB_CMD)
+ return 0;
+
+ return (word >> 16) & 0x3FFF;
+}
+
+static bool adreno_is_pm4_type(struct kgsl_device *device, uint32_t word)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int i;
+
+ if (word == INVALID_RB_CMD)
+ return 1;
+
+ if (adreno_is_pm4_len(word) > 16)
+ return 0;
+
+ if ((word & (3<<30)) == CP_TYPE0_PKT) {
+ for (i = 0; i < ARRAY_SIZE(pm0_types); ++i) {
+ if ((word & 0x7FFF) == adreno_getreg(adreno_dev,
+ pm0_types[i].id))
+ return 1;
+ }
+ return 0;
+ }
+ if ((word & (3<<30)) == CP_TYPE3_PKT) {
+ for (i = 0; i < ARRAY_SIZE(pm3_types); ++i) {
+ if ((word & 0xFFFF) == (pm3_types[i].id << 8))
+ return 1;
+ }
+ return 0;
+ }
+ return 0;
+}
+
+static const char *adreno_pm4_name(uint32_t word)
+{
+ int i;
+
+ if (word == INVALID_RB_CMD)
+ return "--------";
+
+ if ((word & (3<<30)) == CP_TYPE0_PKT) {
+ for (i = 0; i < ARRAY_SIZE(pm0_types); ++i) {
+ if ((word & 0x7FFF) == pm0_types[i].id)
+ return pm0_types[i].name;
+ }
+ return "????????";
+ }
+ if ((word & (3<<30)) == CP_TYPE3_PKT) {
+ for (i = 0; i < ARRAY_SIZE(pm3_types); ++i) {
+ if ((word & 0xFFFF) == (pm3_types[i].id << 8))
+ return pm3_types[i].name;
+ }
+ return "????????";
+ }
+ return "????????";
+}
+
+static bool adreno_is_pm3_nop_value(uint32_t word)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pm3_nop_values); ++i) {
+ if (word == pm3_nop_values[i].id)
+ return 1;
+ }
+ return 0;
+}
+
+static const char *adreno_pm3_nop_name(uint32_t word)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pm3_nop_values); ++i) {
+ if (word == pm3_nop_values[i].id)
+ return pm3_nop_values[i].name;
+ }
+ return "????????";
+}
+
+static void adreno_dump_regs(struct kgsl_device *device,
+ const int *registers, int size)
+{
+ int range = 0, offset = 0;
+
+ for (range = 0; range < size; range++) {
+ /* start and end are in dword offsets */
+ int start = registers[range * 2];
+ int end = registers[range * 2 + 1];
+
+ unsigned char linebuf[32 * 3 + 2 + 32 + 1];
+ int linelen, i;
+
+ for (offset = start; offset <= end; offset += linelen) {
+ unsigned int regvals[32/4];
+ linelen = min(end+1-offset, 32/4);
+
+ for (i = 0; i < linelen; ++i)
+ kgsl_regread(device, offset+i, regvals+i);
+
+ hex_dump_to_buffer(regvals, linelen*4, 32, 4,
+ linebuf, sizeof(linebuf), 0);
+ KGSL_LOG_DUMP(device,
+ "REG: %5.5X: %s\n", offset, linebuf);
+ }
+ }
+}
+
+static void dump_ib(struct kgsl_device *device, char *buffId,
+ phys_addr_t pt_base, uint32_t base_offset, uint32_t ib_base,
+ uint32_t ib_size, bool dump)
+{
+ struct kgsl_mem_entry *ent = NULL;
+ uint8_t *base_addr = adreno_convertaddr(device, pt_base,
+ ib_base, ib_size*sizeof(uint32_t), &ent);
+
+ if (base_addr && dump)
+ print_hex_dump(KERN_ERR, buffId, DUMP_PREFIX_OFFSET,
+ 32, 4, base_addr, ib_size*4, 0);
+ else
+ KGSL_LOG_DUMP(device, "%s base:%8.8X ib_size:%d "
+ "offset:%5.5X%s\n",
+ buffId, ib_base, ib_size*4, base_offset,
+ base_addr ? "" : " [Invalid]");
+ if (ent) {
+ kgsl_memdesc_unmap(&ent->memdesc);
+ kgsl_mem_entry_put(ent);
+ }
+}
+
+#define IB_LIST_SIZE 64
+struct ib_list {
+ int count;
+ uint32_t bases[IB_LIST_SIZE];
+ uint32_t sizes[IB_LIST_SIZE];
+ uint32_t offsets[IB_LIST_SIZE];
+};
+
+static void dump_ib1(struct kgsl_device *device, phys_addr_t pt_base,
+ uint32_t base_offset,
+ uint32_t ib1_base, uint32_t ib1_size,
+ struct ib_list *ib_list, bool dump)
+{
+ int i, j;
+ uint32_t value;
+ uint32_t *ib1_addr;
+ struct kgsl_mem_entry *ent = NULL;
+
+ dump_ib(device, "IB1:", pt_base, base_offset, ib1_base,
+ ib1_size, dump);
+
+ /* fetch virtual address for given IB base */
+ ib1_addr = (uint32_t *)adreno_convertaddr(device, pt_base,
+ ib1_base, ib1_size*sizeof(uint32_t), &ent);
+ if (!ib1_addr)
+ return;
+
+ for (i = 0; i+3 < ib1_size; ) {
+ value = ib1_addr[i++];
+ if (adreno_cmd_is_ib(value)) {
+ uint32_t ib2_base = ib1_addr[i++];
+ uint32_t ib2_size = ib1_addr[i++];
+
+ /* find previous match */
+ for (j = 0; j < ib_list->count; ++j)
+ if (ib_list->sizes[j] == ib2_size
+ && ib_list->bases[j] == ib2_base)
+ break;
+
+ if (j < ib_list->count || ib_list->count
+ >= IB_LIST_SIZE)
+ continue;
+
+ /* store match */
+ ib_list->sizes[ib_list->count] = ib2_size;
+ ib_list->bases[ib_list->count] = ib2_base;
+ ib_list->offsets[ib_list->count] = i<<2;
+ ++ib_list->count;
+ }
+ }
+ if (ent) {
+ kgsl_memdesc_unmap(&ent->memdesc);
+ kgsl_mem_entry_put(ent);
+ }
+}
+
+static void adreno_dump_rb_buffer(struct kgsl_device *device, const void *buf,
+ size_t len, char *linebuf, size_t linebuflen, int *argp)
+{
+ const u32 *ptr4 = buf;
+ const int ngroups = len;
+ int lx = 0, j;
+ bool nxsp = 1;
+
+ for (j = 0; j < ngroups; j++) {
+ if (*argp < 0) {
+ lx += scnprintf(linebuf + lx, linebuflen - lx, " <");
+ *argp = -*argp;
+ } else if (nxsp)
+ lx += scnprintf(linebuf + lx, linebuflen - lx, " ");
+ else
+ nxsp = 1;
+ if (!*argp && adreno_is_pm4_type(device, ptr4[j])) {
+ lx += scnprintf(linebuf + lx, linebuflen - lx,
+ "%s", adreno_pm4_name(ptr4[j]));
+ *argp = -(adreno_is_pm4_len(ptr4[j])+1);
+ } else {
+ if (adreno_is_pm3_nop_value(ptr4[j]))
+ lx += scnprintf(linebuf + lx, linebuflen - lx,
+ "%s", adreno_pm3_nop_name(ptr4[j]));
+ else
+ lx += scnprintf(linebuf + lx, linebuflen - lx,
+ "%8.8X", ptr4[j]);
+
+ if (*argp > 1)
+ --*argp;
+ else if (*argp == 1) {
+ *argp = 0;
+ nxsp = 0;
+ lx += scnprintf(linebuf + lx, linebuflen - lx,
+ "> ");
+ }
+ }
+ }
+ linebuf[lx] = '\0';
+}
+
+static bool adreno_rb_use_hex(void)
+{
+#ifdef CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+void adreno_dump_rb(struct kgsl_device *device, const void *buf,
+ size_t len, int start, int size)
+{
+ const uint32_t *ptr = buf;
+ int i, remaining, args = 0;
+ unsigned char linebuf[32 * 3 + 2 + 32 + 1];
+ const int rowsize = 8;
+
+ len >>= 2;
+ remaining = len;
+ for (i = 0; i < len; i += rowsize) {
+ int linelen = min(remaining, rowsize);
+ remaining -= rowsize;
+
+ if (adreno_rb_use_hex())
+ hex_dump_to_buffer(ptr+i, linelen*4, rowsize*4, 4,
+ linebuf, sizeof(linebuf), 0);
+ else
+ adreno_dump_rb_buffer(device, ptr+i, linelen, linebuf,
+ sizeof(linebuf), &args);
+ KGSL_LOG_DUMP(device,
+ "RB: %4.4X:%s\n", (start+i)%size, linebuf);
+ }
+}
+
+static int adreno_dump_fields_line(struct kgsl_device *device,
+ const char *start, char *str, int slen,
+ const struct log_field **lines,
+ int num)
+{
+ const struct log_field *l = *lines;
+ int sptr, count = 0;
+
+ sptr = snprintf(str, slen, "%s", start);
+
+ for ( ; num && sptr < slen; num--, l++) {
+ int ilen = strlen(l->display);
+
+ if (!l->show)
+ continue;
+
+ if (count)
+ ilen += strlen(" | ");
+
+ if (ilen > (slen - sptr))
+ break;
+
+ if (count++)
+ sptr += snprintf(str + sptr, slen - sptr, " | ");
+
+ sptr += snprintf(str + sptr, slen - sptr, "%s", l->display);
+ }
+
+ KGSL_LOG_DUMP(device, "%s\n", str);
+
+ *lines = l;
+ return num;
+}
+
+void adreno_dump_fields(struct kgsl_device *device,
+ const char *start, const struct log_field *lines,
+ int num)
+{
+ char lb[90];
+ const char *sstr = start;
+
+ lb[sizeof(lb) - 1] = '\0';
+
+ while (num) {
+ int ret = adreno_dump_fields_line(device, sstr, lb,
+ sizeof(lb) - 1, &lines, num);
+
+ if (ret == num)
+ break;
+
+ num = ret;
+ sstr = " ";
+ }
+}
+EXPORT_SYMBOL(adreno_dump_fields);
+
+int adreno_dump(struct kgsl_device *device, int manual)
+{
+ unsigned int cp_ib1_base;
+ unsigned int cp_ib2_base;
+ phys_addr_t pt_base, cur_pt_base;
+ unsigned int cp_rb_base, cp_rb_ctrl, rb_count;
+ unsigned int cp_rb_wptr, cp_rb_rptr;
+ unsigned int i;
+ int result = 0;
+ uint32_t *rb_copy;
+ const uint32_t *rb_vaddr;
+ int num_item = 0;
+ int read_idx, write_idx;
+ unsigned int ts_processed = 0xdeaddead;
+ struct kgsl_context *context;
+ unsigned int context_id;
+
+ static struct ib_list ib_list;
+
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ int num_iommu_units = 0;
+
+ mb();
+
+ if (adreno_dev->gpudev->postmortem_dump)
+ adreno_dev->gpudev->postmortem_dump(adreno_dev);
+
+ pt_base = kgsl_mmu_get_current_ptbase(&device->mmu);
+ cur_pt_base = pt_base;
+
+ kgsl_regread(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_CP_RB_BASE),
+ &cp_rb_base);
+ kgsl_regread(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_CP_RB_CNTL),
+ &cp_rb_ctrl);
+ rb_count = 2 << (cp_rb_ctrl & (BIT(6) - 1));
+ kgsl_regread(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_CP_RB_RPTR),
+ &cp_rb_rptr);
+ kgsl_regread(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_CP_RB_WPTR),
+ &cp_rb_wptr);
+ kgsl_regread(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_CP_IB1_BASE),
+ &cp_ib1_base);
+ kgsl_regread(device,
+ adreno_getreg(adreno_dev, ADRENO_REG_CP_IB2_BASE),
+ &cp_ib2_base);
+
+ kgsl_sharedmem_readl(&device->memstore,
+ (unsigned int *) &context_id,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ current_context));
+
+ context = kgsl_context_get(device, context_id);
+
+ if (context) {
+ ts_processed = kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED);
+ KGSL_LOG_DUMP(device, "FT CTXT: %d TIMESTM RTRD: %08X\n",
+ context->id, ts_processed);
+ } else
+ KGSL_LOG_DUMP(device, "BAD CTXT: %d\n", context_id);
+
+ kgsl_context_put(context);
+
+ num_item = adreno_ringbuffer_count(&adreno_dev->ringbuffer,
+ cp_rb_rptr);
+ if (num_item <= 0)
+ KGSL_LOG_POSTMORTEM_WRITE(device, "Ringbuffer is Empty.\n");
+
+ rb_copy = vmalloc(rb_count<<2);
+ if (!rb_copy) {
+ KGSL_LOG_POSTMORTEM_WRITE(device,
+ "vmalloc(%d) failed\n", rb_count << 2);
+ result = -ENOMEM;
+ goto end;
+ }
+
+ KGSL_LOG_DUMP(device, "RB: rd_addr:%8.8x rb_size:%d num_item:%d\n",
+ cp_rb_base, rb_count<<2, num_item);
+
+ if (adreno_dev->ringbuffer.buffer_desc.gpuaddr != cp_rb_base)
+ KGSL_LOG_POSTMORTEM_WRITE(device,
+ "rb address mismatch, should be 0x%08x\n",
+ adreno_dev->ringbuffer.buffer_desc.gpuaddr);
+
+ rb_vaddr = adreno_dev->ringbuffer.buffer_desc.hostptr;
+ if (!rb_vaddr) {
+ KGSL_LOG_POSTMORTEM_WRITE(device,
+ "rb has no kernel mapping!\n");
+ goto error_vfree;
+ }
+
+ read_idx = (int)cp_rb_rptr - NUM_DWORDS_OF_RINGBUFFER_HISTORY;
+ if (read_idx < 0)
+ read_idx += rb_count;
+ write_idx = (int)cp_rb_wptr + 16;
+ if (write_idx > rb_count)
+ write_idx -= rb_count;
+ num_item += NUM_DWORDS_OF_RINGBUFFER_HISTORY+16;
+ if (num_item > rb_count)
+ num_item = rb_count;
+ if (write_idx >= read_idx)
+ memcpy(rb_copy, rb_vaddr+read_idx, num_item<<2);
+ else {
+ int part1_c = rb_count-read_idx;
+ memcpy(rb_copy, rb_vaddr+read_idx, part1_c<<2);
+ memcpy(rb_copy+part1_c, rb_vaddr, (num_item-part1_c)<<2);
+ }
+
+ /* extract the latest ib commands from the buffer */
+ ib_list.count = 0;
+ i = 0;
+ /* get the register mapped array in case we are using IOMMU */
+ num_iommu_units = kgsl_mmu_get_num_iommu_units(&device->mmu);
+ for (read_idx = 0; read_idx < num_item; ) {
+ uint32_t this_cmd = rb_copy[read_idx++];
+ if (adreno_cmd_is_ib(this_cmd)) {
+ uint32_t ib_addr = rb_copy[read_idx++];
+ uint32_t ib_size = rb_copy[read_idx++];
+ dump_ib1(device, cur_pt_base, (read_idx-3)<<2, ib_addr,
+ ib_size, &ib_list, 0);
+ for (; i < ib_list.count; ++i)
+ dump_ib(device, "IB2:", cur_pt_base,
+ ib_list.offsets[i],
+ ib_list.bases[i],
+ ib_list.sizes[i], 0);
+ } else if (this_cmd == cp_type0_packet(MH_MMU_PT_BASE, 1) ||
+ (num_iommu_units && this_cmd ==
+ kgsl_mmu_get_reg_gpuaddr(&device->mmu, 0,
+ KGSL_IOMMU_CONTEXT_USER,
+ KGSL_IOMMU_CTX_TTBR0)) ||
+ (num_iommu_units && this_cmd == cp_type0_packet(
+ kgsl_mmu_get_reg_ahbaddr(
+ &device->mmu, 0,
+ KGSL_IOMMU_CONTEXT_USER,
+ KGSL_IOMMU_CTX_TTBR0), 1))) {
+ KGSL_LOG_DUMP(device,
+ "Current pagetable: %x\t pagetable base: %pa\n",
+ kgsl_mmu_get_ptname_from_ptbase(&device->mmu,
+ cur_pt_base),
+ &cur_pt_base);
+
+ /* Set cur_pt_base to the new pagetable base */
+ cur_pt_base = rb_copy[read_idx++];
+
+ KGSL_LOG_DUMP(device,
+ "New pagetable: %x\t pagetable base: %pa\n",
+ kgsl_mmu_get_ptname_from_ptbase(&device->mmu,
+ cur_pt_base),
+ &cur_pt_base);
+ }
+ }
+
+ /* Restore cur_pt_base back to the pt_base of
+ the process in whose context the GPU hung */
+ cur_pt_base = pt_base;
+
+ read_idx = (int)cp_rb_rptr - NUM_DWORDS_OF_RINGBUFFER_HISTORY;
+ if (read_idx < 0)
+ read_idx += rb_count;
+ KGSL_LOG_DUMP(device,
+ "RB: addr=%8.8x window:%4.4x-%4.4x, start:%4.4x\n",
+ cp_rb_base, cp_rb_rptr, cp_rb_wptr, read_idx);
+ adreno_dump_rb(device, rb_copy, num_item<<2, read_idx, rb_count);
+
+ if (device->pm_ib_enabled) {
+ for (read_idx = NUM_DWORDS_OF_RINGBUFFER_HISTORY;
+ read_idx >= 0; --read_idx) {
+ uint32_t this_cmd = rb_copy[read_idx];
+ if (adreno_cmd_is_ib(this_cmd)) {
+ uint32_t ib_addr = rb_copy[read_idx+1];
+ uint32_t ib_size = rb_copy[read_idx+2];
+ if (ib_size && cp_ib1_base == ib_addr) {
+ KGSL_LOG_DUMP(device,
+ "IB1: base:%8.8X "
+ "count:%d\n", ib_addr, ib_size);
+ dump_ib(device, "IB1: ", cur_pt_base,
+ read_idx<<2, ib_addr, ib_size,
+ 1);
+ }
+ }
+ }
+ for (i = 0; i < ib_list.count; ++i) {
+ uint32_t ib_size = ib_list.sizes[i];
+ uint32_t ib_offset = ib_list.offsets[i];
+ if (ib_size && cp_ib2_base == ib_list.bases[i]) {
+ KGSL_LOG_DUMP(device,
+ "IB2: base:%8.8X count:%d\n",
+ cp_ib2_base, ib_size);
+ dump_ib(device, "IB2: ", cur_pt_base, ib_offset,
+ ib_list.bases[i], ib_size, 1);
+ }
+ }
+ }
+
+ /* Dump the registers if the user asked for it */
+ if (device->pm_regs_enabled) {
+ if (adreno_is_a20x(adreno_dev))
+ adreno_dump_regs(device, a200_registers,
+ a200_registers_count);
+ else if (adreno_is_a22x(adreno_dev))
+ adreno_dump_regs(device, a220_registers,
+ a220_registers_count);
+ else if (adreno_is_a225(adreno_dev))
+ adreno_dump_regs(device, a225_registers,
+ a225_registers_count);
+ else if (adreno_is_a3xx(adreno_dev)) {
+ adreno_dump_regs(device, a3xx_registers,
+ a3xx_registers_count);
+
+ if (adreno_is_a330(adreno_dev) ||
+ adreno_is_a305b(adreno_dev))
+ adreno_dump_regs(device, a330_registers,
+ a330_registers_count);
+ }
+ }
+
+error_vfree:
+ vfree(rb_copy);
+end:
+ /* Restart the dispatcher after a manually triggered dump */
+ if (manual)
+ adreno_dispatcher_start(adreno_dev);
+
+ return result;
+}
diff --git a/drivers/gpu/msm2/adreno_ringbuffer.c b/drivers/gpu/msm2/adreno_ringbuffer.c
new file mode 100644
index 0000000..2fe2c4c
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_ringbuffer.c
@@ -0,0 +1,1166 @@
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/log2.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+
+#include "kgsl.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_cffdump.h"
+
+#include "adreno.h"
+#include "adreno_pm4types.h"
+#include "adreno_ringbuffer.h"
+
+#include "a2xx_reg.h"
+#include "a3xx_reg.h"
+
+#define GSL_RB_NOP_SIZEDWORDS 2
+
+/*
+ * CP DEBUG settings for all cores:
+ * DYNAMIC_CLK_DISABLE [27] - turn off the dynamic clock control
+ * PROG_END_PTR_ENABLE [25] - Allow 128 bit writes to the VBIF
+ */
+
+#define CP_DEBUG_DEFAULT ((1 << 27) | (1 << 25))
+
+void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
+ BUG_ON(rb->wptr == 0);
+
+ /* Let the pwrscale policy know that new commands have
+ been submitted. */
+ kgsl_pwrscale_busy(rb->device);
+
+ /*synchronize memory before informing the hardware of the
+ *new commands.
+ */
+ mb();
+
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR, rb->wptr);
+}
+
+static int
+adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb,
+ struct adreno_context *context,
+ unsigned int numcmds, int wptr_ahead)
+{
+ int nopcount;
+ unsigned int freecmds;
+ unsigned int *cmds;
+ uint cmds_gpu;
+ unsigned long wait_time;
+ unsigned long wait_timeout = msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
+ unsigned long wait_time_part;
+ unsigned int rptr;
+
+ /* if wptr ahead, fill the remaining with NOPs */
+ if (wptr_ahead) {
+ /* -1 for header */
+ nopcount = rb->sizedwords - rb->wptr - 1;
+
+ cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
+ cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*rb->wptr;
+
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu,
+ cp_nop_packet(nopcount));
+
+ /* Make sure that rptr is not 0 before submitting
+ * commands at the end of ringbuffer. We do not
+ * want the rptr and wptr to become equal when
+ * the ringbuffer is not empty */
+ do {
+ rptr = adreno_get_rptr(rb);
+ } while (!rptr);
+
+ rb->wptr = 0;
+ }
+
+ wait_time = jiffies + wait_timeout;
+ wait_time_part = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART);
+ /* wait for space in ringbuffer */
+ while (1) {
+ rptr = adreno_get_rptr(rb);
+
+ freecmds = rptr - rb->wptr;
+
+ if (freecmds == 0 || freecmds > numcmds)
+ break;
+
+ if (time_after(jiffies, wait_time)) {
+ KGSL_DRV_ERR(rb->device,
+ "Timed out while waiting for freespace in ringbuffer "
+ "rptr: 0x%x, wptr: 0x%x\n", rptr, rb->wptr);
+ return -ETIMEDOUT;
+ }
+
+ }
+ return 0;
+}
+
+unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
+ struct adreno_context *context,
+ unsigned int numcmds)
+{
+ unsigned int *ptr = NULL;
+ int ret = 0;
+ unsigned int rptr;
+ BUG_ON(numcmds >= rb->sizedwords);
+
+ rptr = adreno_get_rptr(rb);
+ /* check for available space */
+ if (rb->wptr >= rptr) {
+ /* wptr ahead or equal to rptr */
+ /* reserve dwords for nop packet */
+ if ((rb->wptr + numcmds) > (rb->sizedwords -
+ GSL_RB_NOP_SIZEDWORDS))
+ ret = adreno_ringbuffer_waitspace(rb, context,
+ numcmds, 1);
+ } else {
+ /* wptr behind rptr */
+ if ((rb->wptr + numcmds) >= rptr)
+ ret = adreno_ringbuffer_waitspace(rb, context,
+ numcmds, 0);
+ /* check for remaining space */
+ /* reserve dwords for nop packet */
+ if (!ret && (rb->wptr + numcmds) > (rb->sizedwords -
+ GSL_RB_NOP_SIZEDWORDS))
+ ret = adreno_ringbuffer_waitspace(rb, context,
+ numcmds, 1);
+ }
+
+ if (!ret) {
+ ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
+ rb->wptr += numcmds;
+ } else
+ ptr = ERR_PTR(ret);
+
+ return ptr;
+}
+
+static int _load_firmware(struct kgsl_device *device, const char *fwfile,
+ void **data, int *len)
+{
+ const struct firmware *fw = NULL;
+ int ret;
+
+ ret = request_firmware(&fw, fwfile, device->dev);
+
+ if (ret) {
+ KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n",
+ fwfile, ret);
+ return ret;
+ }
+
+ *data = kmalloc(fw->size, GFP_KERNEL);
+
+ if (*data) {
+ memcpy(*data, fw->data, fw->size);
+ *len = fw->size;
+ } else
+ KGSL_MEM_ERR(device, "kmalloc(%d) failed\n", fw->size);
+
+ release_firmware(fw);
+ return (*data != NULL) ? 0 : -ENOMEM;
+}
+
+int adreno_ringbuffer_read_pm4_ucode(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int ret = 0;
+
+ if (adreno_dev->pm4_fw == NULL) {
+ int len;
+ void *ptr;
+
+ ret = _load_firmware(device, adreno_dev->pm4_fwfile,
+ &ptr, &len);
+
+ if (ret)
+ goto err;
+
+ /* PM4 size is 3 dword aligned plus 1 dword of version */
+ if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) {
+ KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
+ ret = -EINVAL;
+ kfree(ptr);
+ goto err;
+ }
+
+ adreno_dev->pm4_fw_size = len / sizeof(uint32_t);
+ adreno_dev->pm4_fw = ptr;
+ adreno_dev->pm4_fw_version = adreno_dev->pm4_fw[1];
+ }
+
+err:
+ return ret;
+}
+
+/**
+ * adreno_ringbuffer_load_pm4_ucode() - Load pm4 ucode
+ * @device: Pointer to a KGSL device
+ * @start: Starting index in pm4 ucode to load
+ * @addr: Address to load the pm4 ucode
+ *
+ * Load the pm4 ucode from @start at @addr.
+ */
+int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device,
+ unsigned int start, unsigned int addr)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int i;
+
+ if (adreno_dev->pm4_fw == NULL) {
+ int ret = adreno_ringbuffer_read_pm4_ucode(device);
+ if (ret)
+ return ret;
+ }
+
+ KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n",
+ adreno_dev->pm4_fw_version);
+
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_DEBUG, CP_DEBUG_DEFAULT);
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_RAM_WADDR, addr);
+ for (i = 1; i < adreno_dev->pm4_fw_size; i++)
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_RAM_DATA,
+ adreno_dev->pm4_fw[i]);
+
+ return 0;
+}
+
+int adreno_ringbuffer_read_pfp_ucode(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int ret = 0;
+
+ if (adreno_dev->pfp_fw == NULL) {
+ int len;
+ void *ptr;
+
+ ret = _load_firmware(device, adreno_dev->pfp_fwfile,
+ &ptr, &len);
+ if (ret)
+ goto err;
+
+ /* PFP size shold be dword aligned */
+ if (len % sizeof(uint32_t) != 0) {
+ KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
+ ret = -EINVAL;
+ kfree(ptr);
+ goto err;
+ }
+
+ adreno_dev->pfp_fw_size = len / sizeof(uint32_t);
+ adreno_dev->pfp_fw = ptr;
+ adreno_dev->pfp_fw_version = adreno_dev->pfp_fw[5];
+ }
+
+err:
+ return ret;
+}
+
+/**
+ * adreno_ringbuffer_load_pfp_ucode() - Load pfp ucode
+ * @device: Pointer to a KGSL device
+ * @start: Starting index in pfp ucode to load
+ * @addr: Address to load the pfp ucode
+ *
+ * Load the pfp ucode from @start at @addr.
+ */
+int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device,
+ unsigned int start, unsigned int addr)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int i;
+
+ if (adreno_dev->pfp_fw == NULL) {
+ int ret = adreno_ringbuffer_read_pfp_ucode(device);
+ if (ret)
+ return ret;
+ }
+
+ KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n",
+ adreno_dev->pfp_fw_version);
+
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_PFP_UCODE_ADDR, addr);
+ for (i = 1; i < adreno_dev->pfp_fw_size; i++)
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_PFP_UCODE_DATA,
+ adreno_dev->pfp_fw[i]);
+
+ return 0;
+}
+
+/**
+ * _ringbuffer_start_common() - Ringbuffer start
+ * @rb: Pointer to adreno ringbuffer
+ *
+ * Setup ringbuffer for GPU.
+ */
+int _ringbuffer_start_common(struct adreno_ringbuffer *rb)
+{
+ int status;
+ union reg_cp_rb_cntl cp_rb_cntl;
+ unsigned int rb_cntl;
+ struct kgsl_device *device = rb->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ if (rb->flags & KGSL_FLAGS_STARTED)
+ return 0;
+
+ kgsl_sharedmem_set(rb->device, &rb->memptrs_desc, 0, 0,
+ sizeof(struct kgsl_rbmemptrs));
+
+ kgsl_sharedmem_set(rb->device, &rb->buffer_desc, 0, 0xAA,
+ (rb->sizedwords << 2));
+
+ if (adreno_is_a2xx(adreno_dev)) {
+ kgsl_regwrite(device, REG_CP_RB_WPTR_BASE,
+ (rb->memptrs_desc.gpuaddr
+ + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
+
+ /* setup WPTR delay */
+ kgsl_regwrite(device, REG_CP_RB_WPTR_DELAY,
+ 0 /*0x70000010 */);
+ }
+
+ /*setup REG_CP_RB_CNTL */
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_CNTL, &rb_cntl);
+ cp_rb_cntl.val = rb_cntl;
+
+ /*
+ * The size of the ringbuffer in the hardware is the log2
+ * representation of the size in quadwords (sizedwords / 2)
+ */
+ cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);
+
+ /*
+ * Specify the quadwords to read before updating mem RPTR.
+ * Like above, pass the log2 representation of the blocksize
+ * in quadwords.
+ */
+ cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);
+
+ if (adreno_is_a2xx(adreno_dev)) {
+ /* WPTR polling */
+ cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN;
+ }
+
+ /* mem RPTR writebacks */
+ cp_rb_cntl.f.rb_no_update = GSL_RB_CNTL_NO_UPDATE;
+
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL, cp_rb_cntl.val);
+
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
+ rb->buffer_desc.gpuaddr);
+
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_RPTR_ADDR,
+ rb->memptrs_desc.gpuaddr +
+ GSL_RB_MEMPTRS_RPTR_OFFSET);
+
+ if (adreno_is_a2xx(adreno_dev)) {
+ /* explicitly clear all cp interrupts */
+ kgsl_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
+ }
+
+ /* setup scratch/timestamp */
+ adreno_writereg(adreno_dev, ADRENO_REG_SCRATCH_ADDR,
+ device->memstore.gpuaddr +
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ soptimestamp));
+
+ adreno_writereg(adreno_dev, ADRENO_REG_SCRATCH_UMSK,
+ GSL_RB_MEMPTRS_SCRATCH_MASK);
+
+ /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
+ if (adreno_is_a305(adreno_dev) || adreno_is_a305c(adreno_dev) ||
+ adreno_is_a320(adreno_dev))
+ kgsl_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000E0602);
+ else if (adreno_is_a330(adreno_dev) || adreno_is_a305b(adreno_dev))
+ kgsl_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x003E2008);
+
+ rb->wptr = 0;
+
+ /* clear ME_HALT to start micro engine */
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, 0);
+
+ /* ME init is GPU specific, so jump into the sub-function */
+ status = adreno_dev->gpudev->rb_init(adreno_dev, rb);
+ if (status)
+ return status;
+
+ /* idle device to validate ME INIT */
+ status = adreno_idle(device);
+
+ if (status == 0)
+ rb->flags |= KGSL_FLAGS_STARTED;
+
+ return status;
+}
+
+/**
+ * adreno_ringbuffer_warm_start() - Ringbuffer warm start
+ * @rb: Pointer to adreno ringbuffer
+ *
+ * Start the ringbuffer but load only jump tables part of the
+ * microcode.
+ */
+int adreno_ringbuffer_warm_start(struct adreno_ringbuffer *rb)
+{
+ int status;
+ struct kgsl_device *device = rb->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ /* load the CP ucode */
+ status = adreno_ringbuffer_load_pm4_ucode(device,
+ adreno_dev->pm4_jt_idx, adreno_dev->pm4_jt_addr);
+ if (status != 0)
+ return status;
+
+ /* load the prefetch parser ucode */
+ status = adreno_ringbuffer_load_pfp_ucode(device,
+ adreno_dev->pfp_jt_idx, adreno_dev->pfp_jt_addr);
+ if (status != 0)
+ return status;
+
+ return _ringbuffer_start_common(rb);
+}
+
+int adreno_ringbuffer_start(struct adreno_ringbuffer *rb)
+{
+ int status;
+
+ if (rb->flags & KGSL_FLAGS_STARTED)
+ return 0;
+
+ /* load the CP ucode */
+ status = adreno_ringbuffer_load_pm4_ucode(rb->device, 1, 0);
+ if (status != 0)
+ return status;
+
+ /* load the prefetch parser ucode */
+ status = adreno_ringbuffer_load_pfp_ucode(rb->device, 1, 0);
+ if (status != 0)
+ return status;
+
+ return _ringbuffer_start_common(rb);
+}
+
+void adreno_ringbuffer_stop(struct adreno_ringbuffer *rb)
+{
+ struct kgsl_device *device = rb->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ if (rb->flags & KGSL_FLAGS_STARTED) {
+ if (adreno_is_a200(adreno_dev))
+ kgsl_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000);
+
+ rb->flags &= ~KGSL_FLAGS_STARTED;
+ }
+}
+
+int adreno_ringbuffer_init(struct kgsl_device *device)
+{
+ int status;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+ rb->device = device;
+ /*
+ * It is silly to convert this to words and then back to bytes
+ * immediately below, but most of the rest of the code deals
+ * in words, so we might as well only do the math once
+ */
+ rb->sizedwords = KGSL_RB_SIZE >> 2;
+
+ rb->buffer_desc.flags = KGSL_MEMFLAGS_GPUREADONLY;
+ /* allocate memory for ringbuffer */
+ status = kgsl_allocate_contiguous(&rb->buffer_desc,
+ (rb->sizedwords << 2));
+
+ if (status != 0) {
+ adreno_ringbuffer_close(rb);
+ return status;
+ }
+
+ /* allocate memory for polling and timestamps */
+ /* This really can be at 4 byte alignment boundry but for using MMU
+ * we need to make it at page boundary */
+ status = kgsl_allocate_contiguous(&rb->memptrs_desc,
+ sizeof(struct kgsl_rbmemptrs));
+
+ if (status != 0) {
+ adreno_ringbuffer_close(rb);
+ return status;
+ }
+
+ /* overlay structure on memptrs memory */
+ rb->memptrs = (struct kgsl_rbmemptrs *) rb->memptrs_desc.hostptr;
+
+ rb->global_ts = 0;
+
+ return 0;
+}
+
+void adreno_ringbuffer_close(struct adreno_ringbuffer *rb)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
+
+ kgsl_sharedmem_free(&rb->buffer_desc);
+ kgsl_sharedmem_free(&rb->memptrs_desc);
+
+ kfree(adreno_dev->pfp_fw);
+ kfree(adreno_dev->pm4_fw);
+
+ adreno_dev->pfp_fw = NULL;
+ adreno_dev->pm4_fw = NULL;
+
+ memset(rb, 0, sizeof(struct adreno_ringbuffer));
+}
+
+static int
+adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
+ struct adreno_context *drawctxt,
+ unsigned int flags, unsigned int *cmds,
+ int sizedwords, uint32_t timestamp)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
+ unsigned int *ringcmds;
+ unsigned int total_sizedwords = sizedwords;
+ unsigned int i;
+ unsigned int rcmd_gpu;
+ unsigned int context_id;
+ unsigned int gpuaddr = rb->device->memstore.gpuaddr;
+
+ if (drawctxt != NULL && kgsl_context_detached(&drawctxt->base))
+ return -EINVAL;
+
+ rb->global_ts++;
+
+ /* If this is a internal IB, use the global timestamp for it */
+ if (!drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
+ timestamp = rb->global_ts;
+ context_id = KGSL_MEMSTORE_GLOBAL;
+ } else {
+ context_id = drawctxt->base.id;
+ }
+
+ /*
+ * Note that we cannot safely take drawctxt->mutex here without
+ * potential mutex inversion with device->mutex which is held
+ * here. As a result, any other code that accesses this variable
+ * must also use device->mutex.
+ */
+ if (drawctxt)
+ drawctxt->internal_timestamp = rb->global_ts;
+
+ /* reserve space to temporarily turn off protected mode
+ * error checking if needed
+ */
+ total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
+ /* 2 dwords to store the start of command sequence */
+ total_sizedwords += 2;
+ /* internal ib command identifier for the ringbuffer */
+ total_sizedwords += (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) ? 2 : 0;
+
+ /* Add two dwords for the CP_INTERRUPT */
+ total_sizedwords += drawctxt ? 2 : 0;
+
+ /* context rollover */
+ if (adreno_is_a3xx(adreno_dev))
+ total_sizedwords += 3;
+
+ /* For HLSQ updates below */
+ if (adreno_is_a4xx(adreno_dev) || adreno_is_a3xx(adreno_dev))
+ total_sizedwords += 4;
+
+ if (adreno_is_a2xx(adreno_dev))
+ total_sizedwords += 2; /* CP_WAIT_FOR_IDLE */
+
+ total_sizedwords += 3; /* sop timestamp */
+ total_sizedwords += 4; /* eop timestamp */
+
+ if (drawctxt) {
+ total_sizedwords += 3; /* global timestamp without cache
+ * flush for non-zero context */
+ }
+
+ if (adreno_is_a20x(adreno_dev))
+ total_sizedwords += 2; /* CACHE_FLUSH */
+
+ if (flags & KGSL_CMD_FLAGS_WFI)
+ total_sizedwords += 2; /* WFI */
+
+ /* Add space for the power on shader fixup if we need it */
+ if (flags & KGSL_CMD_FLAGS_PWRON_FIXUP)
+ total_sizedwords += 5;
+
+ ringcmds = adreno_ringbuffer_allocspace(rb, drawctxt, total_sizedwords);
+
+ if (IS_ERR(ringcmds))
+ return PTR_ERR(ringcmds);
+ if (ringcmds == NULL)
+ return -ENOSPC;
+
+ rcmd_gpu = rb->buffer_desc.gpuaddr
+ + sizeof(uint)*(rb->wptr-total_sizedwords);
+
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_nop_packet(1));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
+
+ if (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) {
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_nop_packet(1));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ KGSL_CMD_INTERNAL_IDENTIFIER);
+ }
+
+ if (flags & KGSL_CMD_FLAGS_PWRON_FIXUP) {
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_nop_packet(1));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ KGSL_PWRON_FIXUP_IDENTIFIER);
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ CP_HDR_INDIRECT_BUFFER_PFD);
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ adreno_dev->pwron_fixup.gpuaddr);
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ adreno_dev->pwron_fixup_dwords);
+ }
+
+ /* start-of-pipeline timestamp */
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_MEM_WRITE, 2));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, (gpuaddr +
+ KGSL_MEMSTORE_OFFSET(context_id, soptimestamp)));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, timestamp);
+
+ if (flags & KGSL_CMD_FLAGS_PMODE) {
+ /* disable protected mode error checking */
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0);
+ }
+
+ for (i = 0; i < sizedwords; i++) {
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, *cmds);
+ cmds++;
+ }
+
+ if (flags & KGSL_CMD_FLAGS_PMODE) {
+ /* re-enable protected mode error checking */
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 1);
+ }
+
+ /* HW Workaround for MMU Page fault
+ * due to memory getting free early before
+ * GPU completes it.
+ */
+ if (adreno_is_a2xx(adreno_dev)) {
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_WAIT_FOR_IDLE, 1));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0x00);
+ }
+
+ if (adreno_is_a3xx(adreno_dev) || adreno_is_a4xx(adreno_dev)) {
+ /*
+ * Flush HLSQ lazy updates to make sure there are no
+ * resources pending for indirect loads after the timestamp
+ */
+
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_EVENT_WRITE, 1));
+ GSL_RB_WRITE(rb->device, ringcmds,
+ rcmd_gpu, 0x07); /* HLSQ_FLUSH */
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_WAIT_FOR_IDLE, 1));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0x00);
+ }
+
+ /*
+ * end-of-pipeline timestamp. If per context timestamps is not
+ * enabled, then context_id will be KGSL_MEMSTORE_GLOBAL so all
+ * eop timestamps will work out.
+ */
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_EVENT_WRITE, 3));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, (gpuaddr +
+ KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp)));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, timestamp);
+
+ if (drawctxt) {
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_MEM_WRITE, 2));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, (gpuaddr +
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp)));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, rb->global_ts);
+ }
+
+ if (adreno_is_a20x(adreno_dev)) {
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_EVENT_WRITE, 1));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, CACHE_FLUSH);
+ }
+
+ if (drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_INTERRUPT, 1));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ CP_INT_CNTL__RB_INT_MASK);
+ }
+
+ if (adreno_is_a3xx(adreno_dev)) {
+ /* Dummy set-constant to trigger context rollover */
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_SET_CONSTANT, 2));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ (0x4<<16)|(A3XX_HLSQ_CL_KERNEL_GROUP_X_REG - 0x2000));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0);
+ }
+
+ if (flags & KGSL_CMD_FLAGS_WFI) {
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_WAIT_FOR_IDLE, 1));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0x00000000);
+ }
+
+ adreno_ringbuffer_submit(rb);
+
+ return 0;
+}
+
+unsigned int
+adreno_ringbuffer_issuecmds(struct kgsl_device *device,
+ struct adreno_context *drawctxt,
+ unsigned int flags,
+ unsigned int *cmds,
+ int sizedwords)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+ flags |= KGSL_CMD_FLAGS_INTERNAL_ISSUE;
+
+ return adreno_ringbuffer_addcmds(rb, drawctxt, flags, cmds,
+ sizedwords, 0);
+}
+
+static bool _parse_ibs(struct kgsl_device_private *dev_priv, uint gpuaddr,
+ int sizedwords);
+
+static bool
+_handle_type3(struct kgsl_device_private *dev_priv, uint *hostaddr)
+{
+ unsigned int opcode = cp_type3_opcode(*hostaddr);
+ switch (opcode) {
+ case CP_INDIRECT_BUFFER_PFD:
+ case CP_INDIRECT_BUFFER_PFE:
+ case CP_COND_INDIRECT_BUFFER_PFE:
+ case CP_COND_INDIRECT_BUFFER_PFD:
+ return _parse_ibs(dev_priv, hostaddr[1], hostaddr[2]);
+ case CP_NOP:
+ case CP_WAIT_FOR_IDLE:
+ case CP_WAIT_REG_MEM:
+ case CP_WAIT_REG_EQ:
+ case CP_WAT_REG_GTE:
+ case CP_WAIT_UNTIL_READ:
+ case CP_WAIT_IB_PFD_COMPLETE:
+ case CP_REG_RMW:
+ case CP_REG_TO_MEM:
+ case CP_MEM_WRITE:
+ case CP_MEM_WRITE_CNTR:
+ case CP_COND_EXEC:
+ case CP_COND_WRITE:
+ case CP_EVENT_WRITE:
+ case CP_EVENT_WRITE_SHD:
+ case CP_EVENT_WRITE_CFL:
+ case CP_EVENT_WRITE_ZPD:
+ case CP_DRAW_INDX:
+ case CP_DRAW_INDX_2:
+ case CP_DRAW_INDX_BIN:
+ case CP_DRAW_INDX_2_BIN:
+ case CP_VIZ_QUERY:
+ case CP_SET_STATE:
+ case CP_SET_CONSTANT:
+ case CP_IM_LOAD:
+ case CP_IM_LOAD_IMMEDIATE:
+ case CP_LOAD_CONSTANT_CONTEXT:
+ case CP_INVALIDATE_STATE:
+ case CP_SET_SHADER_BASES:
+ case CP_SET_BIN_MASK:
+ case CP_SET_BIN_SELECT:
+ case CP_SET_BIN_BASE_OFFSET:
+ case CP_SET_BIN_DATA:
+ case CP_CONTEXT_UPDATE:
+ case CP_INTERRUPT:
+ case CP_IM_STORE:
+ case CP_LOAD_STATE:
+ break;
+ /* these shouldn't come from userspace */
+ case CP_ME_INIT:
+ case CP_SET_PROTECTED_MODE:
+ default:
+ KGSL_CMD_ERR(dev_priv->device, "bad CP opcode %0x\n", opcode);
+ return false;
+ break;
+ }
+
+ return true;
+}
+
+static bool
+_handle_type0(struct kgsl_device_private *dev_priv, uint *hostaddr)
+{
+ unsigned int reg = type0_pkt_offset(*hostaddr);
+ unsigned int cnt = type0_pkt_size(*hostaddr);
+ if (reg < 0x0192 || (reg + cnt) >= 0x8000) {
+ KGSL_CMD_ERR(dev_priv->device, "bad type0 reg: 0x%0x cnt: %d\n",
+ reg, cnt);
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Traverse IBs and dump them to test vector. Detect swap by inspecting
+ * register writes, keeping note of the current state, and dump
+ * framebuffer config to test vector
+ */
+static bool _parse_ibs(struct kgsl_device_private *dev_priv,
+ uint gpuaddr, int sizedwords)
+{
+ static uint level; /* recursion level */
+ bool ret = false;
+ uint *hostaddr, *hoststart;
+ int dwords_left = sizedwords; /* dwords left in the current command
+ buffer */
+ struct kgsl_mem_entry *entry;
+
+ entry = kgsl_sharedmem_find_region(dev_priv->process_priv,
+ gpuaddr, sizedwords * sizeof(uint));
+ if (entry == NULL) {
+ KGSL_CMD_ERR(dev_priv->device,
+ "no mapping for gpuaddr: 0x%08x\n", gpuaddr);
+ return false;
+ }
+
+ hostaddr = (uint *)kgsl_gpuaddr_to_vaddr(&entry->memdesc, gpuaddr);
+ if (hostaddr == NULL) {
+ KGSL_CMD_ERR(dev_priv->device,
+ "no mapping for gpuaddr: 0x%08x\n", gpuaddr);
+ return false;
+ }
+
+ hoststart = hostaddr;
+
+ level++;
+
+ KGSL_CMD_INFO(dev_priv->device, "ib: gpuaddr:0x%08x, wc:%d, hptr:%p\n",
+ gpuaddr, sizedwords, hostaddr);
+
+ mb();
+ while (dwords_left > 0) {
+ bool cur_ret = true;
+ int count = 0; /* dword count including packet header */
+
+ switch (*hostaddr >> 30) {
+ case 0x0: /* type-0 */
+ count = (*hostaddr >> 16)+2;
+ cur_ret = _handle_type0(dev_priv, hostaddr);
+ break;
+ case 0x1: /* type-1 */
+ count = 2;
+ break;
+ case 0x3: /* type-3 */
+ count = ((*hostaddr >> 16) & 0x3fff) + 2;
+ cur_ret = _handle_type3(dev_priv, hostaddr);
+ break;
+ default:
+ KGSL_CMD_ERR(dev_priv->device, "unexpected type: "
+ "type:%d, word:0x%08x @ 0x%p, gpu:0x%08x\n",
+ *hostaddr >> 30, *hostaddr, hostaddr,
+ gpuaddr+4*(sizedwords-dwords_left));
+ cur_ret = false;
+ count = dwords_left;
+ break;
+ }
+
+ if (!cur_ret) {
+ KGSL_CMD_ERR(dev_priv->device,
+ "bad sub-type: #:%d/%d, v:0x%08x"
+ " @ 0x%p[gb:0x%08x], level:%d\n",
+ sizedwords-dwords_left, sizedwords, *hostaddr,
+ hostaddr, gpuaddr+4*(sizedwords-dwords_left),
+ level);
+
+ if (ADRENO_DEVICE(dev_priv->device)->ib_check_level
+ >= 2)
+ print_hex_dump(KERN_ERR,
+ level == 1 ? "IB1:" : "IB2:",
+ DUMP_PREFIX_OFFSET, 32, 4, hoststart,
+ sizedwords*4, 0);
+ goto done;
+ }
+
+ /* jump to next packet */
+ dwords_left -= count;
+ hostaddr += count;
+ if (dwords_left < 0) {
+ KGSL_CMD_ERR(dev_priv->device,
+ "bad count: c:%d, #:%d/%d, "
+ "v:0x%08x @ 0x%p[gb:0x%08x], level:%d\n",
+ count, sizedwords-(dwords_left+count),
+ sizedwords, *(hostaddr-count), hostaddr-count,
+ gpuaddr+4*(sizedwords-(dwords_left+count)),
+ level);
+ if (ADRENO_DEVICE(dev_priv->device)->ib_check_level
+ >= 2)
+ print_hex_dump(KERN_ERR,
+ level == 1 ? "IB1:" : "IB2:",
+ DUMP_PREFIX_OFFSET, 32, 4, hoststart,
+ sizedwords*4, 0);
+ goto done;
+ }
+ }
+
+ ret = true;
+done:
+ if (!ret)
+ KGSL_DRV_ERR(dev_priv->device,
+ "parsing failed: gpuaddr:0x%08x, "
+ "host:0x%p, wc:%d\n", gpuaddr, hoststart, sizedwords);
+
+ level--;
+
+ return ret;
+}
+
+/**
+ * _ringbuffer_verify_ib() - parse an IB and verify that it is correct
+ * @dev_priv: Pointer to the process struct
+ * @ibdesc: Pointer to the IB descriptor
+ *
+ * This function only gets called if debugging is enabled - it walks the IB and
+ * does additional level parsing and verification above and beyond what KGSL
+ * core does
+ */
+static inline bool _ringbuffer_verify_ib(struct kgsl_device_private *dev_priv,
+ struct kgsl_ibdesc *ibdesc)
+{
+ struct kgsl_device *device = dev_priv->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ /* Check that the size of the IBs is under the allowable limit */
+ if (ibdesc->sizedwords == 0 || ibdesc->sizedwords > 0xFFFFF) {
+ KGSL_DRV_ERR(device, "Invalid IB size 0x%X\n",
+ ibdesc->sizedwords);
+ return false;
+ }
+
+ if (unlikely(adreno_dev->ib_check_level >= 1) &&
+ !_parse_ibs(dev_priv, ibdesc->gpuaddr, ibdesc->sizedwords)) {
+ KGSL_DRV_ERR(device, "Could not verify the IBs\n");
+ return false;
+ }
+
+ return true;
+}
+
+int
+adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context,
+ struct kgsl_cmdbatch *cmdbatch,
+ uint32_t *timestamp)
+{
+ struct kgsl_device *device = dev_priv->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+ int i, ret;
+
+ if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
+ return -EDEADLK;
+
+ /* Verify the IBs before they get queued */
+
+ for (i = 0; i < cmdbatch->ibcount; i++) {
+ if (!_ringbuffer_verify_ib(dev_priv, &cmdbatch->ibdesc[i]))
+ return -EINVAL;
+ }
+
+ /* For now everybody has the same priority */
+ cmdbatch->priority = ADRENO_CONTEXT_DEFAULT_PRIORITY;
+
+ /* Queue the command in the ringbuffer */
+ ret = adreno_dispatcher_queue_cmd(adreno_dev, drawctxt, cmdbatch,
+ timestamp);
+
+ if (ret)
+ KGSL_DRV_ERR(device, "adreno_dispatcher_queue_cmd returned %d\n",
+ ret);
+ else {
+ /*
+ * only call trace_gpu_job_enqueue for actual commands - dummy
+ * sync command batches won't get scheduled on the GPU
+ */
+
+ if (!(cmdbatch->flags & KGSL_CONTEXT_SYNC)) {
+ const char *str = "3D";
+ if (drawctxt->type == KGSL_CONTEXT_TYPE_CL ||
+ drawctxt->type == KGSL_CONTEXT_TYPE_RS)
+ str = "compute";
+
+ kgsl_trace_gpu_job_enqueue(drawctxt->base.id,
+ cmdbatch->timestamp, str);
+ }
+ }
+
+ return ret;
+}
+
+/* adreno_rindbuffer_submitcmd - submit userspace IBs to the GPU */
+int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
+ struct kgsl_cmdbatch *cmdbatch)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct kgsl_ibdesc *ibdesc;
+ unsigned int numibs;
+ unsigned int *link;
+ unsigned int *cmds;
+ unsigned int i;
+ struct kgsl_context *context;
+ struct adreno_context *drawctxt;
+ unsigned int start_index = 0;
+ int flags = KGSL_CMD_FLAGS_NONE;
+ int ret;
+
+ context = cmdbatch->context;
+ drawctxt = ADRENO_CONTEXT(context);
+
+ ibdesc = cmdbatch->ibdesc;
+ numibs = cmdbatch->ibcount;
+
+ /*When preamble is enabled, the preamble buffer with state restoration
+ commands are stored in the first node of the IB chain. We can skip that
+ if a context switch hasn't occured */
+
+ if ((drawctxt->flags & CTXT_FLAGS_PREAMBLE) &&
+ !test_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv) &&
+ (adreno_dev->drawctxt_active == drawctxt))
+ start_index = 1;
+
+ /*
+ * In skip mode don't issue the draw IBs but keep all the other
+ * accoutrements of a submision (including the interrupt) to keep
+ * the accounting sane. Set start_index and numibs to 0 to just
+ * generate the start and end markers and skip everything else
+ */
+
+ if (test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv)) {
+ start_index = 0;
+ numibs = 0;
+ }
+
+ cmds = link = kzalloc(sizeof(unsigned int) * (numibs * 3 + 4),
+ GFP_KERNEL);
+ if (!link) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ if (!start_index) {
+ *cmds++ = cp_nop_packet(1);
+ *cmds++ = KGSL_START_OF_IB_IDENTIFIER;
+ } else {
+ *cmds++ = cp_nop_packet(4);
+ *cmds++ = KGSL_START_OF_IB_IDENTIFIER;
+ *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
+ *cmds++ = ibdesc[0].gpuaddr;
+ *cmds++ = ibdesc[0].sizedwords;
+ }
+ for (i = start_index; i < numibs; i++) {
+
+ /*
+ * Skip 0 sized IBs - these are presumed to have been removed
+ * from consideration by the FT policy
+ */
+
+ if (ibdesc[i].sizedwords == 0)
+ *cmds++ = cp_nop_packet(2);
+ else
+ *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
+
+ *cmds++ = ibdesc[i].gpuaddr;
+ *cmds++ = ibdesc[i].sizedwords;
+ }
+
+ *cmds++ = cp_nop_packet(1);
+ *cmds++ = KGSL_END_OF_IB_IDENTIFIER;
+
+ ret = kgsl_setstate(&device->mmu, context->id,
+ kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
+ device->id));
+
+ if (ret)
+ goto done;
+
+ ret = adreno_drawctxt_switch(adreno_dev, drawctxt, cmdbatch->flags);
+
+ /*
+ * In the unlikely event of an error in the drawctxt switch,
+ * treat it like a hang
+ */
+ if (ret)
+ goto done;
+
+ if (test_bit(CMDBATCH_FLAG_WFI, &cmdbatch->priv))
+ flags = KGSL_CMD_FLAGS_WFI;
+
+ /*
+ * For some targets, we need to execute a dummy shader operation after a
+ * power collapse
+ */
+
+ if (test_and_clear_bit(ADRENO_DEVICE_PWRON, &adreno_dev->priv) &&
+ test_bit(ADRENO_DEVICE_PWRON_FIXUP, &adreno_dev->priv))
+ flags |= KGSL_CMD_FLAGS_PWRON_FIXUP;
+
+ ret = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
+ drawctxt,
+ flags,
+ &link[0], (cmds - link),
+ cmdbatch->timestamp);
+
+#ifdef CONFIG_MSM_KGSL_CFF_DUMP
+ /*
+ * insert wait for idle after every IB1
+ * this is conservative but works reliably and is ok
+ * even for performance simulations
+ */
+ adreno_idle(device);
+#endif
+
+done:
+ device->pwrctrl.irq_last = 0;
+ kgsl_trace_issueibcmds(device, context->id, cmdbatch,
+ cmdbatch->timestamp, cmdbatch->flags, ret,
+ drawctxt ? drawctxt->type : 0);
+
+ kfree(link);
+ return ret;
+}
diff --git a/drivers/gpu/msm2/adreno_ringbuffer.h b/drivers/gpu/msm2/adreno_ringbuffer.h
new file mode 100644
index 0000000..3aa0101
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_ringbuffer.h
@@ -0,0 +1,157 @@
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ADRENO_RINGBUFFER_H
+#define __ADRENO_RINGBUFFER_H
+
+/*
+ * Adreno ringbuffer sizes in bytes - these are converted to
+ * the appropriate log2 values in the code
+ */
+
+#define KGSL_RB_SIZE (32 * 1024)
+#define KGSL_RB_BLKSIZE 16
+
+/* CP timestamp register */
+#define REG_CP_TIMESTAMP REG_SCRATCH_REG0
+
+
+struct kgsl_device;
+struct kgsl_device_private;
+
+#define GSL_RB_MEMPTRS_SCRATCH_COUNT 8
+struct kgsl_rbmemptrs {
+ int rptr;
+ int wptr_poll;
+};
+
+#define GSL_RB_MEMPTRS_RPTR_OFFSET \
+ (offsetof(struct kgsl_rbmemptrs, rptr))
+
+#define GSL_RB_MEMPTRS_WPTRPOLL_OFFSET \
+ (offsetof(struct kgsl_rbmemptrs, wptr_poll))
+
+struct adreno_ringbuffer {
+ struct kgsl_device *device;
+ uint32_t flags;
+
+ struct kgsl_memdesc buffer_desc;
+
+ struct kgsl_memdesc memptrs_desc;
+ struct kgsl_rbmemptrs *memptrs;
+
+ /*ringbuffer size */
+ unsigned int sizedwords;
+
+ unsigned int wptr; /* write pointer offset in dwords from baseaddr */
+
+ unsigned int global_ts;
+};
+
+
+#define GSL_RB_WRITE(device, ring, gpuaddr, data) \
+ do { \
+ *ring = data; \
+ wmb(); \
+ kgsl_cffdump_setmem(device, gpuaddr, data, 4); \
+ ring++; \
+ gpuaddr += sizeof(uint); \
+ } while (0)
+
+/* enable timestamp (...scratch0) memory shadowing */
+#define GSL_RB_MEMPTRS_SCRATCH_MASK 0x1
+
+/* mem rptr */
+#define GSL_RB_CNTL_NO_UPDATE 0x0 /* enable */
+
+/**
+ * adreno_get_rptr - Get the current ringbuffer read pointer
+ * @rb - the ringbuffer
+ *
+ * Get the current read pointer, which is written by the GPU.
+ */
+static inline unsigned int
+adreno_get_rptr(struct adreno_ringbuffer *rb)
+{
+ unsigned int result = rb->memptrs->rptr;
+ rmb();
+ return result;
+}
+
+#define GSL_RB_CNTL_POLL_EN 0x0 /* disable */
+
+/*
+ * protected mode error checking below register address 0x800
+ * note: if CP_INTERRUPT packet is used then checking needs
+ * to change to below register address 0x7C8
+ */
+#define GSL_RB_PROTECTED_MODE_CONTROL 0x200001F2
+
+int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context,
+ struct kgsl_cmdbatch *cmdbatch,
+ uint32_t *timestamp);
+
+int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
+ struct kgsl_cmdbatch *cmdbatch);
+
+int adreno_ringbuffer_init(struct kgsl_device *device);
+
+int adreno_ringbuffer_warm_start(struct adreno_ringbuffer *rb);
+
+int adreno_ringbuffer_start(struct adreno_ringbuffer *rb);
+
+void adreno_ringbuffer_stop(struct adreno_ringbuffer *rb);
+
+void adreno_ringbuffer_close(struct adreno_ringbuffer *rb);
+
+unsigned int adreno_ringbuffer_issuecmds(struct kgsl_device *device,
+ struct adreno_context *drawctxt,
+ unsigned int flags,
+ unsigned int *cmdaddr,
+ int sizedwords);
+
+void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb);
+
+void kgsl_cp_intrcallback(struct kgsl_device *device);
+
+unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
+ struct adreno_context *context,
+ unsigned int numcmds);
+
+int adreno_ringbuffer_read_pfp_ucode(struct kgsl_device *device);
+
+int adreno_ringbuffer_read_pm4_ucode(struct kgsl_device *device);
+
+static inline int adreno_ringbuffer_count(struct adreno_ringbuffer *rb,
+ unsigned int rptr)
+{
+ if (rb->wptr >= rptr)
+ return rb->wptr - rptr;
+ return rb->wptr + rb->sizedwords - rptr;
+}
+
+/* Increment a value by 4 bytes with wrap-around based on size */
+static inline unsigned int adreno_ringbuffer_inc_wrapped(unsigned int val,
+ unsigned int size)
+{
+ return (val + sizeof(unsigned int)) % size;
+}
+
+/* Decrement a value by 4 bytes with wrap-around based on size */
+static inline unsigned int adreno_ringbuffer_dec_wrapped(unsigned int val,
+ unsigned int size)
+{
+ return (val + size - sizeof(unsigned int)) % size;
+}
+
+#endif /* __ADRENO_RINGBUFFER_H */
diff --git a/drivers/gpu/msm2/adreno_snapshot.c b/drivers/gpu/msm2/adreno_snapshot.c
new file mode 100644
index 0000000..fcb1349
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_snapshot.c
@@ -0,0 +1,620 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "kgsl.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_snapshot.h"
+
+#include "adreno.h"
+#include "adreno_pm4types.h"
+#include "a2xx_reg.h"
+#include "a3xx_reg.h"
+#include "adreno_cp_parser.h"
+
+/* Number of dwords of ringbuffer history to record */
+#define NUM_DWORDS_OF_RINGBUFFER_HISTORY 100
+
+/* Maintain a list of the objects we see during parsing */
+
+#define SNAPSHOT_OBJ_BUFSIZE 64
+
+#define SNAPSHOT_OBJ_TYPE_IB 0
+
+/* Keep track of how many bytes are frozen after a snapshot and tell the user */
+static int snapshot_frozen_objsize;
+
+static struct kgsl_snapshot_obj {
+ int type;
+ uint32_t gpuaddr;
+ phys_addr_t ptbase;
+ void *ptr;
+ int dwords;
+ struct kgsl_mem_entry *entry;
+} objbuf[SNAPSHOT_OBJ_BUFSIZE];
+
+/* Pointer to the next open entry in the object list */
+static int objbufptr;
+
+/* Push a new buffer object onto the list */
+static void push_object(struct kgsl_device *device, int type,
+ phys_addr_t ptbase,
+ uint32_t gpuaddr, int dwords)
+{
+ int index;
+ void *ptr;
+ struct kgsl_mem_entry *entry = NULL;
+
+ /*
+ * Sometimes IBs can be reused in the same dump. Because we parse from
+ * oldest to newest, if we come across an IB that has already been used,
+ * assume that it has been reused and update the list with the newest
+ * size.
+ */
+
+ for (index = 0; index < objbufptr; index++) {
+ if (objbuf[index].gpuaddr == gpuaddr &&
+ objbuf[index].ptbase == ptbase) {
+ objbuf[index].dwords = dwords;
+ return;
+ }
+ }
+
+ if (objbufptr == SNAPSHOT_OBJ_BUFSIZE) {
+ KGSL_DRV_ERR(device, "snapshot: too many snapshot objects\n");
+ return;
+ }
+
+ /*
+ * adreno_convertaddr verifies that the IB size is valid - at least in
+ * the context of it being smaller then the allocated memory space
+ */
+ ptr = adreno_convertaddr(device, ptbase, gpuaddr, dwords << 2, &entry);
+
+ if (ptr == NULL) {
+ KGSL_DRV_ERR(device,
+ "snapshot: Can't find GPU address for %x\n", gpuaddr);
+ return;
+ }
+
+ /* Put it on the list of things to parse */
+ objbuf[objbufptr].type = type;
+ objbuf[objbufptr].gpuaddr = gpuaddr;
+ objbuf[objbufptr].ptbase = ptbase;
+ objbuf[objbufptr].dwords = dwords;
+ objbuf[objbufptr].entry = entry;
+ objbuf[objbufptr++].ptr = ptr;
+}
+
+/*
+ * Return a 1 if the specified object is already on the list of buffers
+ * to be dumped
+ */
+
+static int find_object(int type, unsigned int gpuaddr, phys_addr_t ptbase)
+{
+ int index;
+
+ for (index = 0; index < objbufptr; index++) {
+ if (objbuf[index].gpuaddr == gpuaddr &&
+ objbuf[index].ptbase == ptbase &&
+ objbuf[index].type == type)
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * snapshot_freeze_obj_list() - Take a list of ib objects and freeze their
+ * memory for snapshot
+ * @device: Device being snapshotted
+ * @ptbase: The pagetable base of the process to which IB belongs
+ * @ib_obj_list: List of the IB objects
+ *
+ * Returns 0 on success else error code
+ */
+static int snapshot_freeze_obj_list(struct kgsl_device *device,
+ phys_addr_t ptbase, struct adreno_ib_object_list *ib_obj_list)
+{
+ int ret = 0;
+ struct adreno_ib_object *ib_objs;
+ unsigned int ib2base;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int i;
+
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BASE, &ib2base);
+
+ for (i = 0; i < ib_obj_list->num_objs; i++) {
+ int temp_ret;
+ int index;
+ int freeze = 1;
+
+ ib_objs = &(ib_obj_list->obj_list[i]);
+ /* Make sure this object is not going to be saved statically */
+ for (index = 0; index < objbufptr; index++) {
+ if ((objbuf[index].gpuaddr <= ib_objs->gpuaddr) &&
+ ((objbuf[index].gpuaddr +
+ (objbuf[index].dwords << 2)) >=
+ (ib_objs->gpuaddr + ib_objs->size)) &&
+ (objbuf[index].ptbase == ptbase)) {
+ freeze = 0;
+ break;
+ }
+ }
+
+ if (freeze) {
+ /* Save current IB2 statically */
+ if (ib2base == ib_objs->gpuaddr) {
+ push_object(device, SNAPSHOT_OBJ_TYPE_IB,
+ ptbase, ib_objs->gpuaddr, ib_objs->size >> 2);
+ } else {
+ temp_ret = kgsl_snapshot_get_object(device,
+ ptbase, ib_objs->gpuaddr, ib_objs->size,
+ ib_objs->snapshot_obj_type);
+ if (temp_ret < 0) {
+ if (ret >= 0)
+ ret = temp_ret;
+ } else {
+ snapshot_frozen_objsize += temp_ret;
+ }
+ }
+ }
+ }
+ return ret;
+}
+
+/*
+ * We want to store the last executed IB1 and IB2 in the static region to ensure
+ * that we get at least some information out of the snapshot even if we can't
+ * access the dynamic data from the sysfs file. Push all other IBs on the
+ * dynamic list
+ */
+static inline int parse_ib(struct kgsl_device *device, phys_addr_t ptbase,
+ unsigned int gpuaddr, unsigned int dwords)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned int ib1base;
+ int ret = 0;
+ struct adreno_ib_object_list *ib_obj_list;
+
+ /*
+ * Check the IB address - if it is either the last executed IB1
+ * then push it into the static blob otherwise put it in the dynamic
+ * list
+ */
+
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BASE, &ib1base);
+
+ if (gpuaddr == ib1base) {
+ push_object(device, SNAPSHOT_OBJ_TYPE_IB, ptbase,
+ gpuaddr, dwords);
+ goto done;
+ }
+
+ if (kgsl_snapshot_have_object(device, ptbase, gpuaddr, dwords << 2))
+ goto done;
+
+ ret = adreno_ib_create_object_list(device, ptbase,
+ gpuaddr, dwords, &ib_obj_list);
+ if (ret)
+ goto done;
+
+ ret = kgsl_snapshot_add_ib_obj_list(device, ptbase, ib_obj_list);
+
+ if (ret)
+ adreno_ib_destroy_obj_list(ib_obj_list);
+done:
+ return ret;
+}
+
+/* Snapshot the ringbuffer memory */
+static int snapshot_rb(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_rb *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ unsigned int rptr, *rbptr, ibbase;
+ phys_addr_t ptbase;
+ int index, size, i;
+ int parse_ibs = 0, ib_parse_start;
+
+ /* Get the physical address of the MMU pagetable */
+ ptbase = kgsl_mmu_get_current_ptbase(&device->mmu);
+
+ /* Get the current read pointers for the RB */
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR, &rptr);
+
+ /* Address of the last processed IB */
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BASE, &ibbase);
+
+ /*
+ * Figure out the window of ringbuffer data to dump. First we need to
+ * find where the last processed IB ws submitted. Start walking back
+ * from the rptr
+ */
+
+ index = rptr;
+ rbptr = rb->buffer_desc.hostptr;
+
+ do {
+ index--;
+
+ if (index < 0) {
+ index = rb->sizedwords - 3;
+
+ /* We wrapped without finding what we wanted */
+ if (index < rb->wptr) {
+ index = rb->wptr;
+ break;
+ }
+ }
+
+ if (adreno_cmd_is_ib(rbptr[index]) &&
+ rbptr[index + 1] == ibbase)
+ break;
+ } while (index != rb->wptr);
+
+ /*
+ * index points at the last submitted IB. We can only trust that the
+ * memory between the context switch and the hanging IB is valid, so
+ * the next step is to find the context switch before the submission
+ */
+
+ while (index != rb->wptr) {
+ index--;
+
+ if (index < 0) {
+ index = rb->sizedwords - 2;
+
+ /*
+ * Wrapped without finding the context switch. This is
+ * harmless - we should still have enough data to dump a
+ * valid state
+ */
+
+ if (index < rb->wptr) {
+ index = rb->wptr;
+ break;
+ }
+ }
+
+ /* Break if the current packet is a context switch identifier */
+ if ((rbptr[index] == cp_nop_packet(1)) &&
+ (rbptr[index + 1] == KGSL_CONTEXT_TO_MEM_IDENTIFIER))
+ break;
+ }
+
+ /*
+ * Index represents the start of the window of interest. We will try
+ * to dump all buffers between here and the rptr
+ */
+
+ ib_parse_start = index;
+
+ /*
+ * Dump the entire ringbuffer - the parser can choose how much of it to
+ * process
+ */
+
+ size = (rb->sizedwords << 2);
+
+ if (remain < size + sizeof(*header)) {
+ KGSL_DRV_ERR(device,
+ "snapshot: Not enough memory for the rb section");
+ return 0;
+ }
+
+ /* Write the sub-header for the section */
+ header->start = rb->wptr;
+ header->end = rb->wptr;
+ header->wptr = rb->wptr;
+ header->rbsize = rb->sizedwords;
+ header->count = rb->sizedwords;
+
+ /*
+ * Loop through the RB, copying the data and looking for indirect
+ * buffers and MMU pagetable changes
+ */
+
+ index = rb->wptr;
+ for (i = 0; i < rb->sizedwords; i++) {
+ *data = rbptr[index];
+
+ /*
+ * Only parse IBs between the start and the rptr or the next
+ * context switch, whichever comes first
+ */
+
+ if (parse_ibs == 0 && index == ib_parse_start)
+ parse_ibs = 1;
+ else if (index == rptr || adreno_rb_ctxtswitch(&rbptr[index]))
+ parse_ibs = 0;
+
+ if (parse_ibs && adreno_cmd_is_ib(rbptr[index])) {
+ unsigned int ibaddr = rbptr[index + 1];
+ unsigned int ibsize = rbptr[index + 2];
+
+ /*
+ * This will return non NULL if the IB happens to be
+ * part of the context memory (i.e - context switch
+ * command buffers)
+ */
+
+ struct kgsl_memdesc *memdesc =
+ adreno_find_ctxtmem(device, ptbase, ibaddr,
+ ibsize << 2);
+
+ /* IOMMU uses a NOP IB placed in setsate memory */
+ if (NULL == memdesc)
+ if (kgsl_gpuaddr_in_memdesc(
+ &device->mmu.setstate_memory,
+ ibaddr, ibsize << 2))
+ memdesc = &device->mmu.setstate_memory;
+ /*
+ * The IB from CP_IB1_BASE and the IBs for legacy
+ * context switch go into the snapshot all
+ * others get marked at GPU objects
+ */
+
+ if (memdesc != NULL)
+ push_object(device, SNAPSHOT_OBJ_TYPE_IB,
+ ptbase, ibaddr, ibsize);
+ else
+ parse_ib(device, ptbase, ibaddr, ibsize);
+ }
+
+ index = index + 1;
+
+ if (index == rb->sizedwords)
+ index = 0;
+
+ data++;
+ }
+
+ /* Return the size of the section */
+ return size + sizeof(*header);
+}
+
+static int snapshot_capture_mem_list(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_replay_mem_list *header = snapshot;
+ struct kgsl_process_private *private = NULL;
+ struct kgsl_process_private *tmp_private;
+ phys_addr_t ptbase;
+ struct rb_node *node;
+ struct kgsl_mem_entry *entry = NULL;
+ int num_mem;
+ unsigned int *data = snapshot + sizeof(*header);
+
+ ptbase = kgsl_mmu_get_current_ptbase(&device->mmu);
+ mutex_lock(&kgsl_driver.process_mutex);
+ list_for_each_entry(tmp_private, &kgsl_driver.process_list, list) {
+ if (kgsl_mmu_pt_equal(&device->mmu, tmp_private->pagetable,
+ ptbase)) {
+ private = tmp_private;
+ break;
+ }
+ }
+ mutex_unlock(&kgsl_driver.process_mutex);
+ if (!private) {
+ KGSL_DRV_ERR(device,
+ "Failed to get pointer to process private structure\n");
+ return 0;
+ }
+ /* We need to know the number of memory objects that the process has */
+ spin_lock(&private->mem_lock);
+ for (node = rb_first(&private->mem_rb), num_mem = 0; node; ) {
+ entry = rb_entry(node, struct kgsl_mem_entry, node);
+ node = rb_next(&entry->node);
+ num_mem++;
+ }
+
+ if (remain < ((num_mem * 3 * sizeof(unsigned int)) +
+ sizeof(*header))) {
+ KGSL_DRV_ERR(device,
+ "snapshot: Not enough memory for the mem list section");
+ spin_unlock(&private->mem_lock);
+ return 0;
+ }
+ header->num_entries = num_mem;
+ header->ptbase = (__u32)ptbase;
+ /*
+ * Walk throught the memory list and store the
+ * tuples(gpuaddr, size, memtype) in snapshot
+ */
+ for (node = rb_first(&private->mem_rb); node; ) {
+ entry = rb_entry(node, struct kgsl_mem_entry, node);
+ node = rb_next(&entry->node);
+
+ *data++ = entry->memdesc.gpuaddr;
+ *data++ = entry->memdesc.size;
+ *data++ = (entry->memdesc.priv & KGSL_MEMTYPE_MASK) >>
+ KGSL_MEMTYPE_SHIFT;
+ }
+ spin_unlock(&private->mem_lock);
+ return sizeof(*header) + (num_mem * 3 * sizeof(unsigned int));
+}
+
+/* Snapshot the memory for an indirect buffer */
+static int snapshot_ib(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_ib *header = snapshot;
+ struct kgsl_snapshot_obj *obj = priv;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned int *src = obj->ptr;
+ unsigned int *dst = snapshot + sizeof(*header);
+ struct adreno_ib_object_list *ib_obj_list;
+ unsigned int ib1base;
+
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BASE, &ib1base);
+
+ if (remain < (obj->dwords << 2) + sizeof(*header)) {
+ KGSL_DRV_ERR(device,
+ "snapshot: Not enough memory for the ib section");
+ return 0;
+ }
+
+ /* only do this for IB1 because the IB2's are part of IB1 objects */
+ if (ib1base == obj->gpuaddr) {
+ if (!adreno_ib_create_object_list(device, obj->ptbase,
+ obj->gpuaddr, obj->dwords,
+ &ib_obj_list)) {
+ /* freeze the IB objects in the IB */
+ snapshot_freeze_obj_list(device, obj->ptbase,
+ ib_obj_list);
+ adreno_ib_destroy_obj_list(ib_obj_list);
+ }
+ }
+
+ /* Write the sub-header for the section */
+ header->gpuaddr = obj->gpuaddr;
+ header->ptbase = (__u32)obj->ptbase;
+ header->size = obj->dwords;
+
+ /* Write the contents of the ib */
+ memcpy((void *)dst, (void *)src, obj->dwords << 2);
+ /* Write the contents of the ib */
+
+ return (obj->dwords << 2) + sizeof(*header);
+}
+
+/* Dump another item on the current pending list */
+static void *dump_object(struct kgsl_device *device, int obj, void *snapshot,
+ int *remain)
+{
+ switch (objbuf[obj].type) {
+ case SNAPSHOT_OBJ_TYPE_IB:
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_IB, snapshot, remain,
+ snapshot_ib, &objbuf[obj]);
+ if (objbuf[obj].entry) {
+ kgsl_memdesc_unmap(&(objbuf[obj].entry->memdesc));
+ kgsl_mem_entry_put(objbuf[obj].entry);
+ }
+ break;
+ default:
+ KGSL_DRV_ERR(device,
+ "snapshot: Invalid snapshot object type: %d\n",
+ objbuf[obj].type);
+ break;
+ }
+
+ return snapshot;
+}
+
+/* adreno_snapshot - Snapshot the Adreno GPU state
+ * @device - KGSL device to snapshot
+ * @snapshot - Pointer to the start of memory to write into
+ * @remain - A pointer to how many bytes of memory are remaining in the snapshot
+ * @hang - set if this snapshot was automatically triggered by a GPU hang
+ * This is a hook function called by kgsl_snapshot to snapshot the
+ * Adreno specific information for the GPU snapshot. In turn, this function
+ * calls the GPU specific snapshot function to get core specific information.
+ */
+
+void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
+ int hang)
+{
+ int i;
+ uint32_t ibbase, ibsize;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ phys_addr_t ptbase;
+
+ /* Reset the list of objects */
+ objbufptr = 0;
+
+ snapshot_frozen_objsize = 0;
+
+ /* Get the physical address of the MMU pagetable */
+ ptbase = kgsl_mmu_get_current_ptbase(&device->mmu);
+
+ /* Dump the ringbuffer */
+ snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_RB,
+ snapshot, remain, snapshot_rb, NULL);
+
+ /*
+ * Add a section that lists (gpuaddr, size, memtype) tuples of the
+ * hanging process
+ */
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_MEMLIST, snapshot, remain,
+ snapshot_capture_mem_list, NULL);
+ /*
+ * Make sure that the last IB1 that was being executed is dumped.
+ * Since this was the last IB1 that was processed, we should have
+ * already added it to the list during the ringbuffer parse but we
+ * want to be double plus sure.
+ */
+
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BASE, &ibbase);
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BUFSZ, &ibsize);
+
+ /*
+ * The problem is that IB size from the register is the unprocessed size
+ * of the buffer not the original size, so if we didn't catch this
+ * buffer being directly used in the RB, then we might not be able to
+ * dump the whle thing. Print a warning message so we can try to
+ * figure how often this really happens.
+ */
+
+ if (!find_object(SNAPSHOT_OBJ_TYPE_IB, ibbase, ptbase) && ibsize) {
+ push_object(device, SNAPSHOT_OBJ_TYPE_IB, ptbase,
+ ibbase, ibsize);
+ KGSL_DRV_ERR(device, "CP_IB1_BASE not found in the ringbuffer. "
+ "Dumping %x dwords of the buffer.\n", ibsize);
+ }
+
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BASE, &ibbase);
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ, &ibsize);
+
+ /*
+ * Add the last parsed IB2 to the list. The IB2 should be found as we
+ * parse the objects below, but we try to add it to the list first, so
+ * it too can be parsed. Don't print an error message in this case - if
+ * the IB2 is found during parsing, the list will be updated with the
+ * correct size.
+ */
+
+ if (!find_object(SNAPSHOT_OBJ_TYPE_IB, ibbase, ptbase) && ibsize) {
+ push_object(device, SNAPSHOT_OBJ_TYPE_IB, ptbase,
+ ibbase, ibsize);
+ }
+
+ /*
+ * Go through the list of found objects and dump each one. As the IBs
+ * are parsed, more objects might be found, and objbufptr will increase
+ */
+ for (i = 0; i < objbufptr; i++)
+ snapshot = dump_object(device, i, snapshot, remain);
+
+ /* Add GPU specific sections - registers mainly, but other stuff too */
+ if (adreno_dev->gpudev->snapshot)
+ snapshot = adreno_dev->gpudev->snapshot(adreno_dev, snapshot,
+ remain, hang);
+
+ if (snapshot_frozen_objsize)
+ KGSL_DRV_ERR(device, "GPU snapshot froze %dKb of GPU buffers\n",
+ snapshot_frozen_objsize / 1024);
+
+ /*
+ * Queue a work item that will save the IB data in snapshot into
+ * static memory to prevent loss of data due to overwriting of
+ * memory
+ */
+ queue_work(device->work_queue, &device->snapshot_obj_ws);
+
+ return snapshot;
+}
diff --git a/drivers/gpu/msm2/adreno_trace.c b/drivers/gpu/msm2/adreno_trace.c
new file mode 100644
index 0000000..607ba8c
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_trace.c
@@ -0,0 +1,18 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "adreno.h"
+
+/* Instantiate tracepoints */
+#define CREATE_TRACE_POINTS
+#include "adreno_trace.h"
diff --git a/drivers/gpu/msm2/adreno_trace.h b/drivers/gpu/msm2/adreno_trace.h
new file mode 100644
index 0000000..8a9046c
--- /dev/null
+++ b/drivers/gpu/msm2/adreno_trace.h
@@ -0,0 +1,259 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#if !defined(_ADRENO_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _ADRENO_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kgsl
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE adreno_trace
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(adreno_cmdbatch_queued,
+ TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int queued),
+ TP_ARGS(cmdbatch, queued),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, timestamp)
+ __field(unsigned int, queued)
+ ),
+ TP_fast_assign(
+ __entry->id = cmdbatch->context->id;
+ __entry->timestamp = cmdbatch->timestamp;
+ __entry->queued = queued;
+ ),
+ TP_printk(
+ "ctx=%u ts=%u queued=%u",
+ __entry->id, __entry->timestamp, __entry->queued
+ )
+);
+
+DECLARE_EVENT_CLASS(adreno_cmdbatch_template,
+ TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight),
+ TP_ARGS(cmdbatch, inflight),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, timestamp)
+ __field(unsigned int, inflight)
+ ),
+ TP_fast_assign(
+ __entry->id = cmdbatch->context->id;
+ __entry->timestamp = cmdbatch->timestamp;
+ __entry->inflight = inflight;
+ ),
+ TP_printk(
+ "ctx=%u ts=%u inflight=%u",
+ __entry->id, __entry->timestamp,
+ __entry->inflight
+ )
+);
+
+DEFINE_EVENT(adreno_cmdbatch_template, adreno_cmdbatch_submitted,
+ TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight),
+ TP_ARGS(cmdbatch, inflight)
+);
+
+TRACE_EVENT(adreno_cmdbatch_retired,
+ TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight),
+ TP_ARGS(cmdbatch, inflight),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, timestamp)
+ __field(unsigned int, inflight)
+ __field(unsigned int, recovery)
+ ),
+ TP_fast_assign(
+ __entry->id = cmdbatch->context->id;
+ __entry->timestamp = cmdbatch->timestamp;
+ __entry->inflight = inflight;
+ __entry->recovery = cmdbatch->fault_recovery;
+ ),
+ TP_printk(
+ "ctx=%u ts=%u inflight=%u recovery=%s",
+ __entry->id, __entry->timestamp,
+ __entry->inflight,
+ __entry->recovery ?
+ __print_flags(__entry->recovery, "|",
+ ADRENO_FT_TYPES) : "none"
+ )
+);
+
+TRACE_EVENT(adreno_cmdbatch_fault,
+ TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int fault),
+ TP_ARGS(cmdbatch, fault),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, timestamp)
+ __field(unsigned int, fault)
+ ),
+ TP_fast_assign(
+ __entry->id = cmdbatch->context->id;
+ __entry->timestamp = cmdbatch->timestamp;
+ __entry->fault = fault;
+ ),
+ TP_printk(
+ "ctx=%u ts=%u type=%s",
+ __entry->id, __entry->timestamp,
+ __print_symbolic(__entry->fault,
+ { 0, "none" },
+ { ADRENO_SOFT_FAULT, "soft" },
+ { ADRENO_HARD_FAULT, "hard" },
+ { ADRENO_TIMEOUT_FAULT, "timeout" })
+ )
+);
+
+TRACE_EVENT(adreno_cmdbatch_recovery,
+ TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int action),
+ TP_ARGS(cmdbatch, action),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, timestamp)
+ __field(unsigned int, action)
+ ),
+ TP_fast_assign(
+ __entry->id = cmdbatch->context->id;
+ __entry->timestamp = cmdbatch->timestamp;
+ __entry->action = action;
+ ),
+ TP_printk(
+ "ctx=%u ts=%u action=%s",
+ __entry->id, __entry->timestamp,
+ __print_symbolic(__entry->action, ADRENO_FT_TYPES)
+ )
+);
+
+DECLARE_EVENT_CLASS(adreno_drawctxt_template,
+ TP_PROTO(struct adreno_context *drawctxt),
+ TP_ARGS(drawctxt),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ ),
+ TP_fast_assign(
+ __entry->id = drawctxt->base.id;
+ ),
+ TP_printk("ctx=%u", __entry->id)
+);
+
+DEFINE_EVENT(adreno_drawctxt_template, adreno_context_sleep,
+ TP_PROTO(struct adreno_context *drawctxt),
+ TP_ARGS(drawctxt)
+);
+
+DEFINE_EVENT(adreno_drawctxt_template, adreno_context_wake,
+ TP_PROTO(struct adreno_context *drawctxt),
+ TP_ARGS(drawctxt)
+);
+
+DEFINE_EVENT(adreno_drawctxt_template, dispatch_queue_context,
+ TP_PROTO(struct adreno_context *drawctxt),
+ TP_ARGS(drawctxt)
+);
+
+TRACE_EVENT(adreno_drawctxt_wait_start,
+ TP_PROTO(unsigned int id, unsigned int ts),
+ TP_ARGS(id, ts),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, ts)
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->ts = ts;
+ ),
+ TP_printk(
+ "ctx=%u ts=%u",
+ __entry->id, __entry->ts
+ )
+);
+
+TRACE_EVENT(adreno_drawctxt_wait_done,
+ TP_PROTO(unsigned int id, unsigned int ts, int status),
+ TP_ARGS(id, ts, status),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, ts)
+ __field(int, status)
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->ts = ts;
+ __entry->status = status;
+ ),
+ TP_printk(
+ "ctx=%u ts=%u status=%d",
+ __entry->id, __entry->ts, __entry->status
+ )
+);
+
+TRACE_EVENT(adreno_drawctxt_switch,
+ TP_PROTO(struct adreno_context *oldctx,
+ struct adreno_context *newctx,
+ unsigned int flags),
+ TP_ARGS(oldctx, newctx, flags),
+ TP_STRUCT__entry(
+ __field(unsigned int, oldctx)
+ __field(unsigned int, newctx)
+ __field(unsigned int, flags)
+ ),
+ TP_fast_assign(
+ __entry->oldctx = oldctx ? oldctx->base.id : 0;
+ __entry->newctx = newctx ? newctx->base.id : 0;
+ ),
+ TP_printk(
+ "oldctx=%u newctx=%u flags=%X",
+ __entry->oldctx, __entry->newctx, flags
+ )
+);
+
+TRACE_EVENT(adreno_gpu_fault,
+ TP_PROTO(unsigned int ctx, unsigned int ts,
+ unsigned int status, unsigned int rptr, unsigned int wptr,
+ unsigned int ib1base, unsigned int ib1size,
+ unsigned int ib2base, unsigned int ib2size),
+ TP_ARGS(ctx, ts, status, rptr, wptr, ib1base, ib1size, ib2base,
+ ib2size),
+ TP_STRUCT__entry(
+ __field(unsigned int, ctx)
+ __field(unsigned int, ts)
+ __field(unsigned int, status)
+ __field(unsigned int, rptr)
+ __field(unsigned int, wptr)
+ __field(unsigned int, ib1base)
+ __field(unsigned int, ib1size)
+ __field(unsigned int, ib2base)
+ __field(unsigned int, ib2size)
+ ),
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->ts = ts;
+ __entry->status = status;
+ __entry->rptr = rptr;
+ __entry->wptr = wptr;
+ __entry->ib1base = ib1base;
+ __entry->ib1size = ib1size;
+ __entry->ib2base = ib2base;
+ __entry->ib2size = ib2size;
+ ),
+ TP_printk("ctx=%d ts=%d status=%X RB=%X/%X IB1=%X/%X IB2=%X/%X",
+ __entry->ctx, __entry->ts, __entry->status, __entry->wptr,
+ __entry->rptr, __entry->ib1base, __entry->ib1size,
+ __entry->ib2base, __entry->ib2size)
+);
+
+#endif /* _ADRENO_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/msm2/kgsl.c b/drivers/gpu/msm2/kgsl.c
new file mode 100644
index 0000000..8ca70e1
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl.c
@@ -0,0 +1,4256 @@
+/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/fb.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/dma-buf.h>
+
+#include <linux/vmalloc.h>
+#include <linux/pm_runtime.h>
+#include <linux/genlock.h>
+#include <linux/rbtree.h>
+#include <linux/ashmem.h>
+#include <linux/major.h>
+#include <linux/io.h>
+#include <mach/socinfo.h>
+#include <linux/mman.h>
+#include <linux/sort.h>
+#include <asm/cacheflush.h>
+
+#include "kgsl.h"
+#include "kgsl_debugfs.h"
+#include "kgsl_cffdump.h"
+#include "kgsl_log.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_device.h"
+#include "kgsl_trace.h"
+#include "kgsl_sync.h"
+
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "kgsl."
+
+static int kgsl_pagetable_count = KGSL_PAGETABLE_COUNT;
+static char *ksgl_mmu_type;
+module_param_named(ptcount, kgsl_pagetable_count, int, 0);
+MODULE_PARM_DESC(kgsl_pagetable_count,
+"Minimum number of pagetables for KGSL to allocate at initialization time");
+module_param_named(mmutype, ksgl_mmu_type, charp, 0);
+MODULE_PARM_DESC(ksgl_mmu_type,
+"Type of MMU to be used for graphics. Valid values are 'iommu' or 'gpummu' or 'nommu'");
+
+struct kgsl_dma_buf_meta {
+ struct dma_buf_attachment *attach;
+ struct dma_buf *dmabuf;
+ struct sg_table *table;
+};
+
+static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry);
+
+static void
+kgsl_put_process_private(struct kgsl_device *device,
+ struct kgsl_process_private *private);
+/**
+ * kgsl_trace_issueibcmds() - Call trace_issueibcmds by proxy
+ * device: KGSL device
+ * id: ID of the context submitting the command
+ * cmdbatch: Pointer to kgsl_cmdbatch describing these commands
+ * timestamp: Timestamp assigned to the command batch
+ * flags: Flags sent by the user
+ * result: Result of the submission attempt
+ * type: Type of context issuing the command
+ *
+ * Wrap the issueibcmds ftrace hook into a function that can be called from the
+ * GPU specific modules.
+ */
+void kgsl_trace_issueibcmds(struct kgsl_device *device, int id,
+ struct kgsl_cmdbatch *cmdbatch,
+ unsigned int timestamp, unsigned int flags,
+ int result, unsigned int type)
+{
+ trace_kgsl_issueibcmds(device, id, cmdbatch,
+ timestamp, flags, result, type);
+}
+EXPORT_SYMBOL(kgsl_trace_issueibcmds);
+
+/**
+ * kgsl_trace_regwrite - call regwrite ftrace function by proxy
+ * device: KGSL device
+ * offset: dword offset of the register being written
+ * value: Value of the register being written
+ *
+ * Wrap the regwrite ftrace hook into a function that can be called from the
+ * GPU specific modules.
+ */
+void kgsl_trace_regwrite(struct kgsl_device *device, unsigned int offset,
+ unsigned int value)
+{
+ trace_kgsl_regwrite(device, offset, value);
+}
+EXPORT_SYMBOL(kgsl_trace_regwrite);
+
+int kgsl_memfree_hist_init(void)
+{
+ void *base;
+
+ base = kzalloc(KGSL_MEMFREE_HIST_SIZE, GFP_KERNEL);
+ kgsl_driver.memfree_hist.base_hist_rb = base;
+ if (base == NULL)
+ return -ENOMEM;
+ kgsl_driver.memfree_hist.size = KGSL_MEMFREE_HIST_SIZE;
+ kgsl_driver.memfree_hist.wptr = base;
+ return 0;
+}
+
+void kgsl_memfree_hist_exit(void)
+{
+ kfree(kgsl_driver.memfree_hist.base_hist_rb);
+ kgsl_driver.memfree_hist.base_hist_rb = NULL;
+}
+
+void kgsl_memfree_hist_set_event(unsigned int pid, unsigned int gpuaddr,
+ unsigned int size, int flags)
+{
+ struct kgsl_memfree_hist_elem *p;
+
+ void *base = kgsl_driver.memfree_hist.base_hist_rb;
+ int rbsize = kgsl_driver.memfree_hist.size;
+
+ if (base == NULL)
+ return;
+
+ mutex_lock(&kgsl_driver.memfree_hist_mutex);
+ p = kgsl_driver.memfree_hist.wptr;
+ p->pid = pid;
+ p->gpuaddr = gpuaddr;
+ p->size = size;
+ p->flags = flags;
+
+ kgsl_driver.memfree_hist.wptr++;
+ if ((void *)kgsl_driver.memfree_hist.wptr >= base+rbsize) {
+ kgsl_driver.memfree_hist.wptr =
+ (struct kgsl_memfree_hist_elem *)base;
+ }
+ mutex_unlock(&kgsl_driver.memfree_hist_mutex);
+}
+
+
+/* kgsl_get_mem_entry - get the mem_entry structure for the specified object
+ * @device - Pointer to the device structure
+ * @ptbase - the pagetable base of the object
+ * @gpuaddr - the GPU address of the object
+ * @size - Size of the region to search
+ *
+ * Caller must kgsl_mem_entry_put() the returned entry when finished using it.
+ */
+
+struct kgsl_mem_entry * __must_check
+kgsl_get_mem_entry(struct kgsl_device *device,
+ phys_addr_t ptbase, unsigned int gpuaddr, unsigned int size)
+{
+ struct kgsl_process_private *priv;
+ struct kgsl_mem_entry *entry;
+
+ mutex_lock(&kgsl_driver.process_mutex);
+
+ list_for_each_entry(priv, &kgsl_driver.process_list, list) {
+ if (!kgsl_mmu_pt_equal(&device->mmu, priv->pagetable, ptbase))
+ continue;
+ entry = kgsl_sharedmem_find_region(priv, gpuaddr, size);
+
+ if (entry) {
+ mutex_unlock(&kgsl_driver.process_mutex);
+ return entry;
+ }
+ }
+ mutex_unlock(&kgsl_driver.process_mutex);
+
+ return NULL;
+}
+EXPORT_SYMBOL(kgsl_get_mem_entry);
+
+static inline struct kgsl_mem_entry *
+kgsl_mem_entry_create(void)
+{
+ struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+
+ if (!entry)
+ KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*entry));
+ else
+ kref_init(&entry->refcount);
+
+ return entry;
+}
+
+static void kgsl_destroy_ion(struct kgsl_dma_buf_meta *meta)
+{
+ if (meta != NULL) {
+ dma_buf_unmap_attachment(meta->attach, meta->table,
+ DMA_FROM_DEVICE);
+ dma_buf_detach(meta->dmabuf, meta->attach);
+ dma_buf_put(meta->dmabuf);
+ kfree(meta);
+ }
+}
+
+void
+kgsl_mem_entry_destroy(struct kref *kref)
+{
+ struct kgsl_mem_entry *entry = container_of(kref,
+ struct kgsl_mem_entry,
+ refcount);
+
+ /* Detach from process list */
+ kgsl_mem_entry_detach_process(entry);
+
+ if (entry->memtype != KGSL_MEM_ENTRY_KERNEL)
+ kgsl_driver.stats.mapped -= entry->memdesc.size;
+
+ /*
+ * Ion takes care of freeing the sglist for us so
+ * clear the sg before freeing the sharedmem so kgsl_sharedmem_free
+ * doesn't try to free it again
+ */
+
+ if (entry->memtype == KGSL_MEM_ENTRY_ION) {
+ entry->memdesc.sg = NULL;
+ }
+
+ kgsl_sharedmem_free(&entry->memdesc);
+
+ switch (entry->memtype) {
+ case KGSL_MEM_ENTRY_PMEM:
+ case KGSL_MEM_ENTRY_ASHMEM:
+ if (entry->priv_data)
+ fput(entry->priv_data);
+ break;
+ case KGSL_MEM_ENTRY_ION:
+ kgsl_destroy_ion(entry->priv_data);
+ break;
+ }
+
+ kfree(entry);
+}
+EXPORT_SYMBOL(kgsl_mem_entry_destroy);
+
+/**
+ * kgsl_mem_entry_track_gpuaddr - Insert a mem_entry in the address tree and
+ * assign it with a gpu address space before insertion
+ * @process: the process that owns the memory
+ * @entry: the memory entry
+ *
+ * @returns - 0 on succcess else error code
+ *
+ * Insert the kgsl_mem_entry in to the rb_tree for searching by GPU address.
+ * The assignment of gpu address and insertion into list needs to
+ * happen with the memory lock held to avoid race conditions between
+ * gpu address being selected and some other thread looking through the
+ * rb list in search of memory based on gpuaddr
+ * This function should be called with processes memory spinlock held
+ */
+static int
+kgsl_mem_entry_track_gpuaddr(struct kgsl_process_private *process,
+ struct kgsl_mem_entry *entry)
+{
+ int ret = 0;
+ struct rb_node **node;
+ struct rb_node *parent = NULL;
+
+ assert_spin_locked(&process->mem_lock);
+ /*
+ * If cpu=gpu map is used then caller needs to set the
+ * gpu address
+ */
+ if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
+ if (!entry->memdesc.gpuaddr)
+ goto done;
+ } else if (entry->memdesc.gpuaddr) {
+ WARN_ONCE(1, "gpuaddr assigned w/o holding memory lock\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ if (!kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
+ ret = kgsl_mmu_get_gpuaddr(process->pagetable, &entry->memdesc);
+ if (ret)
+ goto done;
+ }
+
+ node = &process->mem_rb.rb_node;
+
+ while (*node) {
+ struct kgsl_mem_entry *cur;
+
+ parent = *node;
+ cur = rb_entry(parent, struct kgsl_mem_entry, node);
+
+ if (entry->memdesc.gpuaddr < cur->memdesc.gpuaddr)
+ node = &parent->rb_left;
+ else
+ node = &parent->rb_right;
+ }
+
+ rb_link_node(&entry->node, parent, node);
+ rb_insert_color(&entry->node, &process->mem_rb);
+
+done:
+ return ret;
+}
+
+/**
+ * kgsl_mem_entry_untrack_gpuaddr() - Untrack memory that is previously tracked
+ * process - Pointer to process private to which memory belongs
+ * entry - Memory entry to untrack
+ *
+ * Function just does the opposite of kgsl_mem_entry_track_gpuaddr. Needs to be
+ * called with processes spin lock held
+ */
+static void
+kgsl_mem_entry_untrack_gpuaddr(struct kgsl_process_private *process,
+ struct kgsl_mem_entry *entry)
+{
+ assert_spin_locked(&process->mem_lock);
+ if (entry->memdesc.gpuaddr) {
+ kgsl_mmu_put_gpuaddr(process->pagetable, &entry->memdesc);
+ rb_erase(&entry->node, &entry->priv->mem_rb);
+ }
+}
+
+/**
+ * kgsl_mem_entry_attach_process - Attach a mem_entry to its owner process
+ * @entry: the memory entry
+ * @process: the owner process
+ *
+ * Attach a newly created mem_entry to its owner process so that
+ * it can be found later. The mem_entry will be added to mem_idr and have
+ * its 'id' field assigned. If the GPU address has been set, the entry
+ * will also be added to the mem_rb tree.
+ *
+ * @returns - 0 on success or error code on failure.
+ */
+static int
+kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
+ struct kgsl_process_private *process)
+{
+ int ret;
+
+ while (1) {
+ if (idr_pre_get(&process->mem_idr, GFP_KERNEL) == 0) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ spin_lock(&process->mem_lock);
+ ret = idr_get_new_above(&process->mem_idr, entry, 1,
+ &entry->id);
+ spin_unlock(&process->mem_lock);
+
+ if (ret == 0)
+ break;
+ else if (ret != -EAGAIN)
+ goto err;
+ }
+ entry->priv = process;
+
+ spin_lock(&process->mem_lock);
+ ret = kgsl_mem_entry_track_gpuaddr(process, entry);
+ if (ret)
+ idr_remove(&process->mem_idr, entry->id);
+ spin_unlock(&process->mem_lock);
+ if (ret)
+ goto err;
+ /* map the memory after unlocking if gpuaddr has been assigned */
+ if (entry->memdesc.gpuaddr) {
+ ret = kgsl_mmu_map(process->pagetable, &entry->memdesc);
+ if (ret)
+ kgsl_mem_entry_detach_process(entry);
+ }
+err:
+ return ret;
+}
+
+/* Detach a memory entry from a process and unmap it from the MMU */
+
+static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry)
+{
+ if (entry == NULL)
+ return;
+
+ /* Unmap here so that below we can call kgsl_mmu_put_gpuaddr */
+ kgsl_mmu_unmap(entry->priv->pagetable, &entry->memdesc);
+
+ spin_lock(&entry->priv->mem_lock);
+
+ kgsl_mem_entry_untrack_gpuaddr(entry->priv, entry);
+ if (entry->id != 0)
+ idr_remove(&entry->priv->mem_idr, entry->id);
+ entry->id = 0;
+
+ entry->priv->stats[entry->memtype].cur -= entry->memdesc.size;
+ spin_unlock(&entry->priv->mem_lock);
+
+ entry->priv = NULL;
+}
+
+/**
+ * kgsl_context_init() - helper to initialize kgsl_context members
+ * @dev_priv: the owner of the context
+ * @context: the newly created context struct, should be allocated by
+ * the device specific drawctxt_create function.
+ *
+ * This is a helper function for the device specific drawctxt_create
+ * function to initialize the common members of its context struct.
+ * If this function succeeds, reference counting is active in the context
+ * struct and the caller should kgsl_context_put() it on error.
+ * If it fails, the caller should just free the context structure
+ * it passed in.
+ */
+int kgsl_context_init(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context)
+{
+ int ret = 0, id;
+ struct kgsl_device *device = dev_priv->device;
+
+ while (1) {
+ if (idr_pre_get(&device->context_idr, GFP_KERNEL) == 0) {
+ KGSL_DRV_INFO(device, "idr_pre_get: ENOMEM\n");
+ ret = -ENOMEM;
+ break;
+ }
+
+ write_lock(&device->context_lock);
+ ret = idr_get_new_above(&device->context_idr, context, 1, &id);
+ context->id = id;
+ write_unlock(&device->context_lock);
+
+ if (ret != -EAGAIN)
+ break;
+ }
+
+ if (ret)
+ goto fail;
+
+ /* MAX - 1, there is one memdesc in memstore for device info */
+ if (id >= KGSL_MEMSTORE_MAX) {
+ KGSL_DRV_INFO(device, "cannot have more than %d "
+ "ctxts due to memstore limitation\n",
+ KGSL_MEMSTORE_MAX);
+ ret = -ENOSPC;
+ goto fail_free_id;
+ }
+
+ kref_init(&context->refcount);
+ /*
+ * Get a refernce to the process private so its not destroyed, until
+ * the context is destroyed. This will also prevent the pagetable
+ * from being destroyed
+ */
+ if (!kref_get_unless_zero(&dev_priv->process_priv->refcount))
+ goto fail_free_id;
+ context->device = dev_priv->device;
+ context->dev_priv = dev_priv;
+ context->proc_priv = dev_priv->process_priv;
+ context->pid = task_tgid_nr(current);
+
+ ret = kgsl_sync_timeline_create(context);
+ if (ret)
+ goto fail_free_id;
+
+ /* Initialize the pending event list */
+ INIT_LIST_HEAD(&context->events);
+
+ /*
+ * Initialize the node that is used to maintain the master list of
+ * contexts with pending events in the device structure. Normally we
+ * wouldn't take the time to initalize a node but at event add time we
+ * call list_empty() on the node as a quick way of determining if the
+ * context is already in the master list so it needs to always be either
+ * active or in an unused but initialized state
+ */
+
+ INIT_LIST_HEAD(&context->events_list);
+ return 0;
+fail_free_id:
+ write_lock(&device->context_lock);
+ idr_remove(&dev_priv->device->context_idr, id);
+ write_unlock(&device->context_lock);
+fail:
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_context_init);
+
+/**
+ * kgsl_context_detach() - Release the "master" context reference
+ * @context: The context that will be detached
+ *
+ * This is called when a context becomes unusable, because userspace
+ * has requested for it to be destroyed. The context itself may
+ * exist a bit longer until its reference count goes to zero.
+ * Other code referencing the context can detect that it has been
+ * detached by checking the KGSL_CONTEXT_DETACHED bit in
+ * context->priv.
+ */
+int kgsl_context_detach(struct kgsl_context *context)
+{
+ struct kgsl_device *device;
+ int ret;
+
+ if (context == NULL || kgsl_context_detached(context))
+ return -EINVAL;
+
+ device = context->device;
+
+ trace_kgsl_context_detach(device, context);
+
+ /*
+ * Mark the context as detached to keep others from using
+ * the context before it gets fully removed
+ */
+ set_bit(KGSL_CONTEXT_DETACHED, &context->priv);
+
+ ret = device->ftbl->drawctxt_detach(context);
+
+ /*
+ * Cancel events after the device-specific context is
+ * detached, to avoid possibly freeing memory while
+ * it is still in use by the GPU.
+ */
+
+ kgsl_context_cancel_events(device, context);
+
+ kgsl_context_put(context);
+
+ return ret;
+}
+
+void
+kgsl_context_destroy(struct kref *kref)
+{
+ struct kgsl_context *context = container_of(kref, struct kgsl_context,
+ refcount);
+ struct kgsl_device *device = context->device;
+
+ trace_kgsl_context_destroy(device, context);
+
+ BUG_ON(!kgsl_context_detached(context));
+
+ write_lock(&device->context_lock);
+ if (context->id != KGSL_CONTEXT_INVALID) {
+ idr_remove(&device->context_idr, context->id);
+ context->id = KGSL_CONTEXT_INVALID;
+ }
+ write_unlock(&device->context_lock);
+ kgsl_sync_timeline_destroy(context);
+ kgsl_put_process_private(device,
+ context->proc_priv);
+
+ device->ftbl->drawctxt_destroy(context);
+}
+
+struct kgsl_device *kgsl_get_device(int dev_idx)
+{
+ int i;
+ struct kgsl_device *ret = NULL;
+
+ mutex_lock(&kgsl_driver.devlock);
+
+ for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+ if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->id == dev_idx) {
+ ret = kgsl_driver.devp[i];
+ break;
+ }
+ }
+
+ mutex_unlock(&kgsl_driver.devlock);
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_get_device);
+
+static struct kgsl_device *kgsl_get_minor(int minor)
+{
+ struct kgsl_device *ret = NULL;
+
+ if (minor < 0 || minor >= KGSL_DEVICE_MAX)
+ return NULL;
+
+ mutex_lock(&kgsl_driver.devlock);
+ ret = kgsl_driver.devp[minor];
+ mutex_unlock(&kgsl_driver.devlock);
+
+ return ret;
+}
+
+int kgsl_check_timestamp(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int timestamp)
+{
+ unsigned int ts_processed;
+
+ ts_processed = kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED);
+
+ return (timestamp_cmp(ts_processed, timestamp) >= 0);
+}
+EXPORT_SYMBOL(kgsl_check_timestamp);
+
+static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state)
+{
+ int status = -EINVAL;
+ struct kgsl_pwrscale_policy *policy_saved;
+
+ if (!device)
+ return -EINVAL;
+
+ KGSL_PWR_WARN(device, "suspend start\n");
+
+ mutex_lock(&device->mutex);
+ policy_saved = device->pwrscale.policy;
+ device->pwrscale.policy = NULL;
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_SUSPEND);
+
+ /* Tell the device to drain the submission queue */
+ device->ftbl->drain(device);
+
+ /* Wait for the active count to hit zero */
+ status = kgsl_active_count_wait(device, 0);
+ if (status)
+ goto end;
+
+ /*
+ * An interrupt could have snuck in and requested NAP in
+ * the meantime, make sure we're on the SUSPEND path.
+ */
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_SUSPEND);
+
+ /* Don't let the timer wake us during suspended sleep. */
+ del_timer_sync(&device->idle_timer);
+ switch (device->state) {
+ case KGSL_STATE_INIT:
+ break;
+ case KGSL_STATE_ACTIVE:
+ case KGSL_STATE_NAP:
+ case KGSL_STATE_SLEEP:
+ /* make sure power is on to stop the device */
+ kgsl_pwrctrl_enable(device);
+ /* Get the completion ready to be waited upon. */
+ INIT_COMPLETION(device->hwaccess_gate);
+ device->ftbl->suspend_context(device);
+ device->ftbl->stop(device);
+ pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
+ PM_QOS_DEFAULT_VALUE);
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND);
+ break;
+ case KGSL_STATE_SLUMBER:
+ INIT_COMPLETION(device->hwaccess_gate);
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND);
+ break;
+ default:
+ KGSL_PWR_ERR(device, "suspend fail, device %d\n",
+ device->id);
+ goto end;
+ }
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ device->pwrscale.policy = policy_saved;
+ status = 0;
+
+end:
+ if (status) {
+ /* On failure, re-resume normal activity */
+ if (device->ftbl->resume)
+ device->ftbl->resume(device);
+ }
+
+ mutex_unlock(&device->mutex);
+ KGSL_PWR_WARN(device, "suspend end\n");
+
+ return status;
+}
+
+static int kgsl_resume_device(struct kgsl_device *device)
+{
+ if (!device)
+ return -EINVAL;
+
+ KGSL_PWR_WARN(device, "resume start\n");
+ mutex_lock(&device->mutex);
+ if (device->state == KGSL_STATE_SUSPEND) {
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
+ complete_all(&device->hwaccess_gate);
+ } else if (device->state != KGSL_STATE_INIT) {
+ /*
+ * This is an error situation,so wait for the device
+ * to idle and then put the device to SLUMBER state.
+ * This will put the device to the right state when
+ * we resume.
+ */
+ if (device->state == KGSL_STATE_ACTIVE)
+ device->ftbl->idle(device);
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
+ kgsl_pwrctrl_sleep(device);
+ KGSL_PWR_ERR(device,
+ "resume invoked without a suspend\n");
+ }
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+
+ /* Call the GPU specific resume function */
+ if (device->ftbl->resume)
+ device->ftbl->resume(device);
+
+ mutex_unlock(&device->mutex);
+ KGSL_PWR_WARN(device, "resume end\n");
+ return 0;
+}
+
+static int kgsl_suspend(struct device *dev)
+{
+
+ pm_message_t arg = {0};
+ struct kgsl_device *device = dev_get_drvdata(dev);
+ return kgsl_suspend_device(device, arg);
+}
+
+static int kgsl_resume(struct device *dev)
+{
+ struct kgsl_device *device = dev_get_drvdata(dev);
+ return kgsl_resume_device(device);
+}
+
+static int kgsl_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int kgsl_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+const struct dev_pm_ops kgsl_pm_ops = {
+ .suspend = kgsl_suspend,
+ .resume = kgsl_resume,
+ .runtime_suspend = kgsl_runtime_suspend,
+ .runtime_resume = kgsl_runtime_resume,
+};
+EXPORT_SYMBOL(kgsl_pm_ops);
+
+int kgsl_suspend_driver(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
+ return kgsl_suspend_device(device, state);
+}
+EXPORT_SYMBOL(kgsl_suspend_driver);
+
+int kgsl_resume_driver(struct platform_device *pdev)
+{
+ struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
+ return kgsl_resume_device(device);
+}
+EXPORT_SYMBOL(kgsl_resume_driver);
+
+/**
+ * kgsl_destroy_process_private() - Cleanup function to free process private
+ * @kref: - Pointer to object being destroyed's kref struct
+ * Free struct object and all other resources attached to it.
+ * Since the function can be used when not all resources inside process
+ * private have been allocated, there is a check to (before each resource
+ * cleanup) see if the struct member being cleaned is in fact allocated or not.
+ * If the value is not NULL, resource is freed.
+ */
+static void kgsl_destroy_process_private(struct kref *kref)
+{
+
+ struct kgsl_mem_entry *entry = NULL;
+ int next = 0;
+
+
+ struct kgsl_process_private *private = container_of(kref,
+ struct kgsl_process_private, refcount);
+
+ /*
+ * Remove this process from global process list
+ * We do not acquire a lock first as it is expected that
+ * kgsl_destroy_process_private() is only going to be called
+ * through kref_put() which is only called after acquiring
+ * the lock.
+ */
+ if (!private) {
+ KGSL_CORE_ERR("Cannot destroy null process private\n");
+ mutex_unlock(&kgsl_driver.process_mutex);
+ return;
+ }
+ list_del(&private->list);
+ mutex_unlock(&kgsl_driver.process_mutex);
+
+ if (private->kobj.state_in_sysfs)
+ kgsl_process_uninit_sysfs(private);
+ if (private->debug_root)
+ debugfs_remove_recursive(private->debug_root);
+
+ while (1) {
+ spin_lock(&private->mem_lock);
+ entry = idr_get_next(&private->mem_idr, &next);
+ spin_unlock(&private->mem_lock);
+ if (entry == NULL)
+ break;
+ kgsl_mem_entry_put(entry);
+ /*
+ * Always start back at the beginning, to
+ * ensure all entries are removed,
+ * like list_for_each_entry_safe.
+ */
+ next = 0;
+ }
+ idr_destroy(&private->mem_idr);
+ kgsl_mmu_putpagetable(private->pagetable);
+
+ kfree(private);
+ return;
+}
+
+static void
+kgsl_put_process_private(struct kgsl_device *device,
+ struct kgsl_process_private *private)
+{
+ mutex_lock(&kgsl_driver.process_mutex);
+
+ /*
+ * kref_put() returns 1 when the refcnt has reached 0 and the destroy
+ * function is called. Mutex is released in the destroy function if
+ * its called, so only release mutex if kref_put() return 0
+ */
+ if (!kref_put(&private->refcount, kgsl_destroy_process_private))
+ mutex_unlock(&kgsl_driver.process_mutex);
+ return;
+}
+
+/*
+ * find_process_private() - Helper function to search for process private
+ * @cur_dev_priv: Pointer to device private structure which contains pointers
+ * to device and process_private structs.
+ * Returns: Pointer to the found/newly created private struct
+ */
+static struct kgsl_process_private *
+kgsl_find_process_private(struct kgsl_device_private *cur_dev_priv)
+{
+ struct kgsl_process_private *private;
+
+ /* Search in the process list */
+ mutex_lock(&kgsl_driver.process_mutex);
+ list_for_each_entry(private, &kgsl_driver.process_list, list) {
+ if (private->pid == task_tgid_nr(current)) {
+ kref_get(&private->refcount);
+ goto done;
+ }
+ }
+
+ /* no existing process private found for this dev_priv, create one */
+ private = kzalloc(sizeof(struct kgsl_process_private), GFP_KERNEL);
+ if (private == NULL) {
+ KGSL_DRV_ERR(cur_dev_priv->device, "kzalloc(%d) failed\n",
+ sizeof(struct kgsl_process_private));
+ goto done;
+ }
+
+ kref_init(&private->refcount);
+
+ private->pid = task_tgid_nr(current);
+ spin_lock_init(&private->mem_lock);
+ mutex_init(&private->process_private_mutex);
+ /* Add the newly created process struct obj to the process list */
+ list_add(&private->list, &kgsl_driver.process_list);
+done:
+ mutex_unlock(&kgsl_driver.process_mutex);
+ return private;
+}
+
+/*
+ * kgsl_get_process_private() - Used to find the process private structure
+ * @cur_dev_priv: Current device pointer
+ * Finds or creates a new porcess private structire and initializes its members
+ * Returns: Pointer to the private process struct obj found/created or
+ * NULL if pagetable creation for this process private obj failed.
+ */
+static struct kgsl_process_private *
+kgsl_get_process_private(struct kgsl_device_private *cur_dev_priv)
+{
+ struct kgsl_process_private *private;
+
+ private = kgsl_find_process_private(cur_dev_priv);
+
+ mutex_lock(&private->process_private_mutex);
+
+ /*
+ * If debug root initialized then it means the rest of the fields
+ * are also initialized
+ */
+ if (private->debug_root)
+ goto done;
+
+ private->mem_rb = RB_ROOT;
+ idr_init(&private->mem_idr);
+
+ if ((!private->pagetable) && kgsl_mmu_enabled()) {
+ unsigned long pt_name;
+ struct kgsl_mmu *mmu = &cur_dev_priv->device->mmu;
+
+ pt_name = task_tgid_nr(current);
+ private->pagetable = kgsl_mmu_getpagetable(mmu, pt_name);
+ if (private->pagetable == NULL)
+ goto error;
+ }
+
+ if (kgsl_process_init_sysfs(cur_dev_priv->device, private))
+ goto error;
+ if (kgsl_process_init_debugfs(private))
+ goto error;
+
+done:
+ mutex_unlock(&private->process_private_mutex);
+ return private;
+
+error:
+ mutex_unlock(&private->process_private_mutex);
+ kgsl_put_process_private(cur_dev_priv->device, private);
+ return NULL;
+}
+
+int kgsl_close_device(struct kgsl_device *device)
+{
+ int result = 0;
+ device->open_count--;
+ if (device->open_count == 0) {
+
+ /* Wait for the active count to go to 1 */
+ kgsl_active_count_wait(device, 1);
+
+ /* Fail if the wait times out */
+ BUG_ON(atomic_read(&device->active_cnt) > 1);
+
+ result = device->ftbl->stop(device);
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+ /*
+ * active_cnt special case: we just stopped the device,
+ * so no need to use kgsl_active_count_put()
+ */
+ atomic_dec(&device->active_cnt);
+ } else {
+ kgsl_active_count_put(device);
+ }
+ return result;
+
+}
+EXPORT_SYMBOL(kgsl_close_device);
+
+static int kgsl_release(struct inode *inodep, struct file *filep)
+{
+ int result = 0;
+ struct kgsl_device_private *dev_priv = filep->private_data;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_device *device = dev_priv->device;
+ struct kgsl_context *context;
+ int next = 0;
+
+ filep->private_data = NULL;
+
+ mutex_lock(&device->mutex);
+ kgsl_active_count_get(device);
+
+ while (1) {
+ read_lock(&device->context_lock);
+ context = idr_get_next(&device->context_idr, &next);
+ read_unlock(&device->context_lock);
+
+ if (context == NULL)
+ break;
+
+ if (context->dev_priv == dev_priv) {
+ /*
+ * Hold a reference to the context in case somebody
+ * tries to put it while we are detaching
+ */
+
+ if (_kgsl_context_get(context)) {
+ kgsl_context_detach(context);
+ kgsl_context_put(context);
+ }
+ }
+
+ next = next + 1;
+ }
+ /*
+ * Clean up any to-be-freed entries that belong to this
+ * process and this device. This is done after the context
+ * are destroyed to avoid possibly freeing memory while
+ * it is still in use by the GPU.
+ */
+ kgsl_cancel_events(device, dev_priv);
+
+ result = kgsl_close_device(device);
+ mutex_unlock(&device->mutex);
+ kfree(dev_priv);
+
+ kgsl_put_process_private(device, private);
+
+ pm_runtime_put(device->parentdev);
+ return result;
+}
+
+int kgsl_open_device(struct kgsl_device *device)
+{
+ int result = 0;
+ if (device->open_count == 0) {
+ /*
+ * active_cnt special case: we are starting up for the first
+ * time, so use this sequence instead of the kgsl_pwrctrl_wake()
+ * which will be called by kgsl_active_count_get().
+ */
+ atomic_inc(&device->active_cnt);
+ kgsl_sharedmem_set(device, &device->memstore, 0, 0,
+ device->memstore.size);
+
+ result = device->ftbl->init(device);
+ if (result)
+ goto err;
+
+ result = device->ftbl->start(device);
+ if (result)
+ goto err;
+ /*
+ * Make sure the gates are open, so they don't block until
+ * we start suspend or FT.
+ */
+ complete_all(&device->hwaccess_gate);
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
+ kgsl_active_count_put(device);
+ }
+ device->open_count++;
+err:
+ if (result)
+ atomic_dec(&device->active_cnt);
+
+ return result;
+}
+EXPORT_SYMBOL(kgsl_open_device);
+
+static int kgsl_open(struct inode *inodep, struct file *filep)
+{
+ int result;
+ struct kgsl_device_private *dev_priv;
+ struct kgsl_device *device;
+ unsigned int minor = iminor(inodep);
+
+ device = kgsl_get_minor(minor);
+ BUG_ON(device == NULL);
+
+ if (filep->f_flags & O_EXCL) {
+ KGSL_DRV_ERR(device, "O_EXCL not allowed\n");
+ return -EBUSY;
+ }
+
+ result = pm_runtime_get_sync(device->parentdev);
+ if (result < 0) {
+ KGSL_DRV_ERR(device,
+ "Runtime PM: Unable to wake up the device, rc = %d\n",
+ result);
+ return result;
+ }
+ result = 0;
+
+ dev_priv = kzalloc(sizeof(struct kgsl_device_private), GFP_KERNEL);
+ if (dev_priv == NULL) {
+ KGSL_DRV_ERR(device, "kzalloc failed(%d)\n",
+ sizeof(struct kgsl_device_private));
+ result = -ENOMEM;
+ goto err_pmruntime;
+ }
+
+ dev_priv->device = device;
+ filep->private_data = dev_priv;
+
+ mutex_lock(&device->mutex);
+
+ result = kgsl_open_device(device);
+ if (result)
+ goto err_freedevpriv;
+ mutex_unlock(&device->mutex);
+
+ /*
+ * Get file (per process) private struct. This must be done
+ * after the first start so that the global pagetable mappings
+ * are set up before we create the per-process pagetable.
+ */
+ dev_priv->process_priv = kgsl_get_process_private(dev_priv);
+ if (dev_priv->process_priv == NULL) {
+ result = -ENOMEM;
+ goto err_stop;
+ }
+
+ KGSL_DRV_INFO(device, "Initialized %s: mmu=%s pagetable_count=%d\n",
+ device->name, kgsl_mmu_enabled() ? "on" : "off",
+ kgsl_pagetable_count);
+
+ return result;
+
+err_stop:
+ mutex_lock(&device->mutex);
+ device->open_count--;
+ if (device->open_count == 0) {
+ /* make sure power is on to stop the device */
+ kgsl_pwrctrl_enable(device);
+ result = device->ftbl->stop(device);
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+ atomic_dec(&device->active_cnt);
+ }
+err_freedevpriv:
+ mutex_unlock(&device->mutex);
+ filep->private_data = NULL;
+ kfree(dev_priv);
+err_pmruntime:
+ pm_runtime_put(device->parentdev);
+ return result;
+}
+
+/**
+ * kgsl_sharedmem_find_region() - Find a gpu memory allocation
+ *
+ * @private: private data for the process to check.
+ * @gpuaddr: start address of the region
+ * @size: size of the region
+ *
+ * Find a gpu allocation. Caller must kgsl_mem_entry_put()
+ * the returned entry when finished using it.
+ */
+struct kgsl_mem_entry * __must_check
+kgsl_sharedmem_find_region(struct kgsl_process_private *private,
+ unsigned int gpuaddr, size_t size)
+{
+ struct rb_node *node;
+
+ if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, gpuaddr))
+ return NULL;
+
+ spin_lock(&private->mem_lock);
+ node = private->mem_rb.rb_node;
+ while (node != NULL) {
+ struct kgsl_mem_entry *entry;
+
+ entry = rb_entry(node, struct kgsl_mem_entry, node);
+
+ if (kgsl_gpuaddr_in_memdesc(&entry->memdesc, gpuaddr, size)) {
+ if (!kgsl_mem_entry_get(entry))
+ break;
+ spin_unlock(&private->mem_lock);
+ return entry;
+ }
+ if (gpuaddr < entry->memdesc.gpuaddr)
+ node = node->rb_left;
+ else if (gpuaddr >=
+ (entry->memdesc.gpuaddr + entry->memdesc.size))
+ node = node->rb_right;
+ else {
+ spin_unlock(&private->mem_lock);
+ return NULL;
+ }
+ }
+ spin_unlock(&private->mem_lock);
+
+ return NULL;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_find_region);
+
+/**
+ * kgsl_sharedmem_find() - Find a gpu memory allocation
+ *
+ * @private: private data for the process to check.
+ * @gpuaddr: start address of the region
+ *
+ * Find a gpu allocation. Caller must kgsl_mem_entry_put()
+ * the returned entry when finished using it.
+ */
+static inline struct kgsl_mem_entry * __must_check
+kgsl_sharedmem_find(struct kgsl_process_private *private, unsigned int gpuaddr)
+{
+ return kgsl_sharedmem_find_region(private, gpuaddr, 1);
+}
+
+/**
+ * kgsl_sharedmem_region_empty() - Check if an addression region is empty
+ *
+ * @private: private data for the process to check.
+ * @gpuaddr: start address of the region
+ * @size: length of the region.
+ *
+ * Checks that there are no existing allocations within an address
+ * region. This function should be called with processes spin lock
+ * held.
+ */
+static int
+kgsl_sharedmem_region_empty(struct kgsl_process_private *private,
+ unsigned int gpuaddr, size_t size)
+{
+ int result = 1;
+ unsigned int gpuaddr_end = gpuaddr + size;
+
+ struct rb_node *node;
+
+ assert_spin_locked(&private->mem_lock);
+
+ if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, gpuaddr))
+ return 0;
+
+ /* don't overflow */
+ if (gpuaddr_end < gpuaddr)
+ return 0;
+
+ node = private->mem_rb.rb_node;
+ while (node != NULL) {
+ struct kgsl_mem_entry *entry;
+ unsigned int memdesc_start, memdesc_end;
+
+ entry = rb_entry(node, struct kgsl_mem_entry, node);
+
+ memdesc_start = entry->memdesc.gpuaddr;
+ memdesc_end = memdesc_start
+ + kgsl_memdesc_mmapsize(&entry->memdesc);
+
+ if (gpuaddr_end <= memdesc_start)
+ node = node->rb_left;
+ else if (memdesc_end <= gpuaddr)
+ node = node->rb_right;
+ else {
+ result = 0;
+ break;
+ }
+ }
+ return result;
+}
+
+/**
+ * kgsl_sharedmem_find_id() - find a memory entry by id
+ * @process: the owning process
+ * @id: id to find
+ *
+ * @returns - the mem_entry or NULL
+ *
+ * Caller must kgsl_mem_entry_put() the returned entry, when finished using
+ * it.
+ */
+static inline struct kgsl_mem_entry * __must_check
+kgsl_sharedmem_find_id(struct kgsl_process_private *process, unsigned int id)
+{
+ int result = 0;
+ struct kgsl_mem_entry *entry;
+
+ spin_lock(&process->mem_lock);
+ entry = idr_find(&process->mem_idr, id);
+ if (entry)
+ result = kgsl_mem_entry_get(entry);
+ spin_unlock(&process->mem_lock);
+
+ if (!result)
+ return NULL;
+ return entry;
+}
+
+/**
+ * kgsl_mem_entry_set_pend() - Set the pending free flag of a memory entry
+ * @entry - The memory entry
+ *
+ * @returns - true if pending flag was 0 else false
+ *
+ * This function will set the pending free flag if it is previously unset. Used
+ * to prevent race condition between ioctls calling free/freememontimestamp
+ * on the same entry. Whichever thread set's the flag first will do the free.
+ */
+static inline bool kgsl_mem_entry_set_pend(struct kgsl_mem_entry *entry)
+{
+ bool ret = false;
+ spin_lock(&entry->priv->mem_lock);
+ if (entry && entry->pending_free) {
+ ret = false;
+ } else if (entry) {
+ entry->pending_free = 1;
+ ret = true;
+ }
+ spin_unlock(&entry->priv->mem_lock);
+ return ret;
+}
+
+/*call all ioctl sub functions with driver locked*/
+static long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ int result = 0;
+ struct kgsl_device_getproperty *param = data;
+
+ switch (param->type) {
+ case KGSL_PROP_VERSION:
+ {
+ struct kgsl_version version;
+ if (param->sizebytes != sizeof(version)) {
+ result = -EINVAL;
+ break;
+ }
+
+ version.drv_major = KGSL_VERSION_MAJOR;
+ version.drv_minor = KGSL_VERSION_MINOR;
+ version.dev_major = dev_priv->device->ver_major;
+ version.dev_minor = dev_priv->device->ver_minor;
+
+ if (copy_to_user(param->value, &version, sizeof(version)))
+ result = -EFAULT;
+
+ break;
+ }
+ case KGSL_PROP_GPU_RESET_STAT:
+ {
+ /* Return reset status of given context and clear it */
+ uint32_t id;
+ struct kgsl_context *context;
+
+ if (param->sizebytes != sizeof(unsigned int)) {
+ result = -EINVAL;
+ break;
+ }
+ /* We expect the value passed in to contain the context id */
+ if (copy_from_user(&id, param->value,
+ sizeof(unsigned int))) {
+ result = -EFAULT;
+ break;
+ }
+ context = kgsl_context_get_owner(dev_priv, id);
+ if (!context) {
+ result = -EINVAL;
+ break;
+ }
+ /*
+ * Copy the reset status to value which also serves as
+ * the out parameter
+ */
+ if (copy_to_user(param->value, &(context->reset_status),
+ sizeof(unsigned int)))
+ result = -EFAULT;
+ else {
+ /* Clear reset status once its been queried */
+ context->reset_status = KGSL_CTX_STAT_NO_ERROR;
+ }
+
+ kgsl_context_put(context);
+ break;
+ }
+ default:
+ result = dev_priv->device->ftbl->getproperty(
+ dev_priv->device, param->type,
+ param->value, param->sizebytes);
+ }
+
+
+ return result;
+}
+
+static long kgsl_ioctl_device_setproperty(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ int result = 0;
+ /* The getproperty struct is reused for setproperty too */
+ struct kgsl_device_getproperty *param = data;
+
+ if (dev_priv->device->ftbl->setproperty)
+ result = dev_priv->device->ftbl->setproperty(
+ dev_priv->device, param->type,
+ param->value, param->sizebytes);
+
+ return result;
+}
+
+static long _device_waittimestamp(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context,
+ unsigned int timestamp,
+ unsigned int timeout)
+{
+ int result = 0;
+ struct kgsl_device *device = dev_priv->device;
+ unsigned int context_id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
+
+ trace_kgsl_waittimestamp_entry(device, context_id,
+ kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED),
+ timestamp, timeout);
+
+ result = device->ftbl->waittimestamp(dev_priv->device,
+ context, timestamp, timeout);
+
+ trace_kgsl_waittimestamp_exit(device,
+ kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED),
+ result);
+
+ return result;
+}
+
+static long kgsl_ioctl_device_waittimestamp(struct kgsl_device_private
+ *dev_priv, unsigned int cmd,
+ void *data)
+{
+ struct kgsl_device_waittimestamp *param = data;
+
+ return _device_waittimestamp(dev_priv, NULL,
+ param->timestamp, param->timeout);
+}
+
+static long kgsl_ioctl_device_waittimestamp_ctxtid(struct kgsl_device_private
+ *dev_priv, unsigned int cmd,
+ void *data)
+{
+ struct kgsl_device_waittimestamp_ctxtid *param = data;
+ struct kgsl_context *context;
+ long result = -EINVAL;
+
+ context = kgsl_context_get_owner(dev_priv, param->context_id);
+
+ if (context)
+ result = _device_waittimestamp(dev_priv, context,
+ param->timestamp, param->timeout);
+
+ kgsl_context_put(context);
+ return result;
+}
+
+/*
+ * KGSL command batch management
+ * A command batch is a single submission from userland. The cmdbatch
+ * encapsulates everything about the submission : command buffers, flags and
+ * sync points.
+ *
+ * Sync points are events that need to expire before the
+ * cmdbatch can be queued to the hardware. For each sync point a
+ * kgsl_cmdbatch_sync_event struct is created and added to a list in the
+ * cmdbatch. There can be multiple types of events both internal ones (GPU
+ * events) and external triggers. As the events expire the struct is deleted
+ * from the list. The GPU will submit the command batch as soon as the list
+ * goes empty indicating that all the sync points have been met.
+ */
+
+/**
+ * struct kgsl_cmdbatch_sync_event
+ * @type: Syncpoint type
+ * @node: Local list node for the cmdbatch sync point list
+ * @cmdbatch: Pointer to the cmdbatch that owns the sync event
+ * @context: Pointer to the KGSL context that owns the cmdbatch
+ * @timestamp: Pending timestamp for the event
+ * @handle: Pointer to a sync fence handle
+ * @device: Pointer to the KGSL device
+ */
+struct kgsl_cmdbatch_sync_event {
+ int type;
+ struct list_head node;
+ struct kgsl_cmdbatch *cmdbatch;
+ struct kgsl_context *context;
+ unsigned int timestamp;
+ struct kgsl_sync_fence_waiter *handle;
+ struct kgsl_device *device;
+ spinlock_t lock;
+};
+
+/**
+ * kgsl_cmdbatch_destroy_object() - Destroy a cmdbatch object
+ * @kref: Pointer to the kref structure for this object
+ *
+ * Actually destroy a command batch object. Called from kgsl_cmdbatch_put
+ */
+void kgsl_cmdbatch_destroy_object(struct kref *kref)
+{
+ struct kgsl_cmdbatch *cmdbatch = container_of(kref,
+ struct kgsl_cmdbatch, refcount);
+
+ kgsl_context_put(cmdbatch->context);
+ kfree(cmdbatch->ibdesc);
+
+ kfree(cmdbatch);
+}
+EXPORT_SYMBOL(kgsl_cmdbatch_destroy_object);
+
+static void kgsl_cmdbatch_sync_expire(struct kgsl_device *device,
+ struct kgsl_cmdbatch_sync_event *event)
+{
+ int sched = 0;
+
+ spin_lock(&event->cmdbatch->lock);
+ list_del(&event->node);
+ sched = list_empty(&event->cmdbatch->synclist) ? 1 : 0;
+ spin_unlock(&event->cmdbatch->lock);
+
+ /*
+ * if this is the last event in the list then tell
+ * the GPU device that the cmdbatch can be submitted
+ */
+
+ if (sched && device->ftbl->drawctxt_sched)
+ device->ftbl->drawctxt_sched(device, event->cmdbatch->context);
+}
+
+
+/*
+ * This function is called by the GPU event when the sync event timestamp
+ * expires
+ */
+static void kgsl_cmdbatch_sync_func(struct kgsl_device *device, void *priv,
+ u32 id, u32 timestamp, u32 type)
+{
+ struct kgsl_cmdbatch_sync_event *event = priv;
+
+ kgsl_cmdbatch_sync_expire(device, event);
+
+ kgsl_context_put(event->context);
+ kgsl_cmdbatch_put(event->cmdbatch);
+
+ kfree(event);
+}
+
+/**
+ * kgsl_cmdbatch_destroy() - Destroy a cmdbatch structure
+ * @cmdbatch: Pointer to the command batch object to destroy
+ *
+ * Start the process of destroying a command batch. Cancel any pending events
+ * and decrement the refcount.
+ */
+void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch)
+{
+ struct kgsl_cmdbatch_sync_event *event, *tmp;
+ int canceled = 0;
+
+ spin_lock(&cmdbatch->lock);
+
+ /* Delete any pending sync points for this command batch */
+ list_for_each_entry_safe(event, tmp, &cmdbatch->synclist, node) {
+
+ switch (event->type) {
+ case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: {
+ /* Cancel the event if it still exists */
+ mutex_lock(&cmdbatch->device->mutex);
+ kgsl_cancel_event(cmdbatch->device, event->context,
+ event->timestamp, kgsl_cmdbatch_sync_func,
+ event);
+ canceled = 1;
+ mutex_unlock(&cmdbatch->device->mutex);
+ kgsl_context_put(event->context);
+ break;
+ }
+ case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
+ canceled = kgsl_sync_fence_async_cancel(event->handle);
+ break;
+ default:
+ break;
+ }
+
+ if(canceled) {
+ list_del(&event->node);
+ kfree(event);
+
+ /*
+ * Put back a instance of the cmdbatch for each pending event
+ * that we canceled
+ */
+
+ kgsl_cmdbatch_put(cmdbatch);
+ }
+ }
+ spin_unlock(&cmdbatch->lock);
+
+ kgsl_cmdbatch_put(cmdbatch);
+}
+EXPORT_SYMBOL(kgsl_cmdbatch_destroy);
+
+static void kgsl_cmdbatch_sync_fence_func(void *priv)
+{
+ struct kgsl_cmdbatch_sync_event *event = priv;
+
+ spin_lock(&event->lock);
+ kgsl_cmdbatch_sync_expire(event->device, event);
+ kgsl_cmdbatch_put(event->cmdbatch);
+ spin_unlock(&event->lock);
+ kfree(event);
+}
+
+/* kgsl_cmdbatch_add_sync_fence() - Add a new sync fence syncpoint
+ * @device: KGSL device
+ * @cmdbatch: KGSL cmdbatch to add the sync point to
+ * @priv: Private sructure passed by the user
+ *
+ * Add a new fence sync syncpoint to the cmdbatch.
+ */
+static int kgsl_cmdbatch_add_sync_fence(struct kgsl_device *device,
+ struct kgsl_cmdbatch *cmdbatch, void *priv)
+{
+ struct kgsl_cmd_syncpoint_fence *sync = priv;
+ struct kgsl_cmdbatch_sync_event *event;
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+
+ if (event == NULL)
+ return -ENOMEM;
+
+ kref_get(&cmdbatch->refcount);
+
+ event->type = KGSL_CMD_SYNCPOINT_TYPE_FENCE;
+ event->cmdbatch = cmdbatch;
+ event->device = device;
+ spin_lock_init(&event->lock);
+
+ /*
+ * Add it to the list first to account for the possiblity that the
+ * callback will happen immediately after the call to
+ * kgsl_sync_fence_async_wait
+ */
+
+ spin_lock(&cmdbatch->lock);
+ list_add(&event->node, &cmdbatch->synclist);
+ spin_unlock(&cmdbatch->lock);
+
+ /*
+ * There is a distinct race condition that can occur if the fence
+ * callback is fired before the function has a chance to return. The
+ * event struct would be freed before we could write event->handle and
+ * hilarity ensued. Protect against this by protecting the call to
+ * kgsl_sync_fence_async_wait and the kfree in the callback with a lock.
+ */
+
+ spin_lock(&event->lock);
+
+ event->handle = kgsl_sync_fence_async_wait(sync->fd,
+ kgsl_cmdbatch_sync_fence_func, event);
+
+
+ if (IS_ERR_OR_NULL(event->handle)) {
+ int ret = PTR_ERR(event->handle);
+
+ spin_lock(&cmdbatch->lock);
+ list_del(&event->node);
+ spin_unlock(&cmdbatch->lock);
+
+ kgsl_cmdbatch_put(cmdbatch);
+ spin_unlock(&event->lock);
+ kfree(event);
+
+ return ret;
+ }
+
+ spin_unlock(&event->lock);
+ return 0;
+}
+
+/* kgsl_cmdbatch_add_sync_timestamp() - Add a new sync point for a cmdbatch
+ * @device: KGSL device
+ * @cmdbatch: KGSL cmdbatch to add the sync point to
+ * @priv: Private sructure passed by the user
+ *
+ * Add a new sync point timestamp event to the cmdbatch.
+ */
+static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device,
+ struct kgsl_cmdbatch *cmdbatch, void *priv)
+{
+ struct kgsl_cmd_syncpoint_timestamp *sync = priv;
+ struct kgsl_context *context = kgsl_context_get(cmdbatch->device,
+ sync->context_id);
+ struct kgsl_cmdbatch_sync_event *event;
+ int ret = -EINVAL;
+
+ if (context == NULL)
+ return -EINVAL;
+
+ /*
+ * We allow somebody to create a sync point on their own context.
+ * This has the effect of delaying a command from submitting until the
+ * dependent command has cleared. That said we obviously can't let them
+ * create a sync point on a future timestamp.
+ */
+
+ if (context == cmdbatch->context) {
+ KGSL_DRV_ERR(device,
+ "Cannot create a sync point on your own context\n");
+ goto done;
+ }
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (event == NULL) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ kref_get(&cmdbatch->refcount);
+
+ event->type = KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP;
+ event->cmdbatch = cmdbatch;
+ event->context = context;
+ event->timestamp = sync->timestamp;
+
+ spin_lock(&cmdbatch->lock);
+ list_add(&event->node, &cmdbatch->synclist);
+ spin_unlock(&cmdbatch->lock);
+
+ mutex_lock(&device->mutex);
+ kgsl_active_count_get(device);
+ ret = kgsl_add_event(device, context->id, sync->timestamp,
+ kgsl_cmdbatch_sync_func, event, NULL);
+ kgsl_active_count_put(device);
+ mutex_unlock(&device->mutex);
+
+ if (ret) {
+ spin_lock(&cmdbatch->lock);
+ list_del(&event->node);
+ spin_unlock(&cmdbatch->lock);
+
+ kgsl_cmdbatch_put(cmdbatch);
+ kfree(event);
+ }
+
+done:
+ if (ret)
+ kgsl_context_put(context);
+
+ return ret;
+}
+
+/**
+ * kgsl_cmdbatch_add_sync() - Add a sync point to a command batch
+ * @device: Pointer to the KGSL device struct for the GPU
+ * @cmdbatch: Pointer to the cmdbatch
+ * @sync: Pointer to the user-specified struct defining the syncpoint
+ *
+ * Create a new sync point in the cmdbatch based on the user specified
+ * parameters
+ */
+static int kgsl_cmdbatch_add_sync(struct kgsl_device *device,
+ struct kgsl_cmdbatch *cmdbatch,
+ struct kgsl_cmd_syncpoint *sync)
+{
+ void *priv;
+ int ret, psize;
+ int (*func)(struct kgsl_device *device, struct kgsl_cmdbatch *cmdbatch,
+ void *priv);
+
+ switch (sync->type) {
+ case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
+ psize = sizeof(struct kgsl_cmd_syncpoint_timestamp);
+ func = kgsl_cmdbatch_add_sync_timestamp;
+ break;
+ case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
+ psize = sizeof(struct kgsl_cmd_syncpoint_fence);
+ func = kgsl_cmdbatch_add_sync_fence;
+ break;
+ default:
+ KGSL_DRV_ERR(device, "Invalid sync type 0x%x\n", sync->type);
+ return -EINVAL;
+ }
+
+ if (sync->size != psize) {
+ KGSL_DRV_ERR(device, "Invalid sync size %d\n", sync->size);
+ return -EINVAL;
+ }
+
+ priv = kzalloc(sync->size, GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(priv, sync->priv, sync->size)) {
+ kfree(priv);
+ return -EFAULT;
+ }
+
+ ret = func(device, cmdbatch, priv);
+ kfree(priv);
+
+ return ret;
+}
+
+/**
+ * kgsl_cmdbatch_create() - Create a new cmdbatch structure
+ * @device: Pointer to a KGSL device struct
+ * @context: Pointer to a KGSL context struct
+ * @numibs: Number of indirect buffers to make room for in the cmdbatch
+ *
+ * Allocate an new cmdbatch structure and add enough room to store the list of
+ * indirect buffers
+ */
+static struct kgsl_cmdbatch *kgsl_cmdbatch_create(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int flags,
+ unsigned int numibs)
+{
+ struct kgsl_cmdbatch *cmdbatch = kzalloc(sizeof(*cmdbatch), GFP_KERNEL);
+ if (cmdbatch == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Increase the reference count on the context so it doesn't disappear
+ * during the lifetime of this command batch
+ */
+
+ if (!_kgsl_context_get(context)) {
+ kfree(cmdbatch);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!(flags & KGSL_CONTEXT_SYNC)) {
+ cmdbatch->ibdesc = kzalloc(sizeof(*cmdbatch->ibdesc) * numibs,
+ GFP_KERNEL);
+ if (cmdbatch->ibdesc == NULL) {
+ kgsl_context_put(context);
+ kfree(cmdbatch);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ kref_init(&cmdbatch->refcount);
+ INIT_LIST_HEAD(&cmdbatch->synclist);
+ spin_lock_init(&cmdbatch->lock);
+
+ cmdbatch->device = device;
+ cmdbatch->ibcount = (flags & KGSL_CONTEXT_SYNC) ? 0 : numibs;
+ cmdbatch->context = context;
+ cmdbatch->flags = flags;
+
+ return cmdbatch;
+}
+
+/**
+ * _kgsl_cmdbatch_verify() - Perform a quick sanity check on a command batch
+ * @device: Pointer to a KGSL device that owns the command batch
+ * @cmdbatch: Number of indirect buffers to make room for in the cmdbatch
+ *
+ * Do a quick sanity test on the list of indirect buffers in a command batch
+ * verifying that the size and GPU address
+ */
+static bool _kgsl_cmdbatch_verify(struct kgsl_device_private *dev_priv,
+ struct kgsl_cmdbatch *cmdbatch)
+{
+ int i;
+
+ struct kgsl_process_private *private = dev_priv->process_priv;
+
+ for (i = 0; i < cmdbatch->ibcount; i++) {
+ if (cmdbatch->ibdesc[i].sizedwords == 0) {
+ KGSL_DRV_ERR(dev_priv->device,
+ "Invalid IB: size is 0\n");
+ return false;
+ }
+
+ if (!kgsl_mmu_gpuaddr_in_range(private->pagetable,
+ cmdbatch->ibdesc[i].gpuaddr)) {
+ KGSL_DRV_ERR(dev_priv->device,
+ "Invalid IB: address 0x%X is out of range\n",
+ cmdbatch->ibdesc[i].gpuaddr);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * _kgsl_cmdbatch_create_legacy() - Create a cmdbatch from a legacy ioctl struct
+ * @device: Pointer to the KGSL device struct for the GPU
+ * @context: Pointer to the KGSL context that issued the command batch
+ * @param: Pointer to the kgsl_ringbuffer_issueibcmds struct that the user sent
+ *
+ * Create a command batch from the legacy issueibcmds format.
+ */
+static struct kgsl_cmdbatch *_kgsl_cmdbatch_create_legacy(
+ struct kgsl_device *device,
+ struct kgsl_context *context,
+ struct kgsl_ringbuffer_issueibcmds *param)
+{
+ struct kgsl_cmdbatch *cmdbatch =
+ kgsl_cmdbatch_create(device, context, param->flags, 1);
+
+ if (IS_ERR(cmdbatch))
+ return cmdbatch;
+
+ cmdbatch->ibdesc[0].gpuaddr = param->ibdesc_addr;
+ cmdbatch->ibdesc[0].sizedwords = param->numibs;
+ cmdbatch->ibcount = 1;
+ cmdbatch->flags = param->flags;
+
+ return cmdbatch;
+}
+
+/**
+ * _kgsl_cmdbatch_create() - Create a cmdbatch from a ioctl struct
+ * @device: Pointer to the KGSL device struct for the GPU
+ * @context: Pointer to the KGSL context that issued the command batch
+ * @flags: Flags passed in from the user command
+ * @cmdlist: Pointer to the list of commands from the user
+ * @numcmds: Number of commands in the list
+ * @synclist: Pointer to the list of syncpoints from the user
+ * @numsyncs: Number of syncpoints in the list
+ *
+ * Create a command batch from the standard issueibcmds format sent by the user.
+ */
+static struct kgsl_cmdbatch *_kgsl_cmdbatch_create(struct kgsl_device *device,
+ struct kgsl_context *context,
+ unsigned int flags,
+ unsigned int cmdlist, unsigned int numcmds,
+ unsigned int synclist, unsigned int numsyncs)
+{
+ struct kgsl_cmdbatch *cmdbatch =
+ kgsl_cmdbatch_create(device, context, flags, numcmds);
+ int ret = 0;
+
+ if (IS_ERR(cmdbatch))
+ return cmdbatch;
+
+ if (!(flags & KGSL_CONTEXT_SYNC)) {
+ if (copy_from_user(cmdbatch->ibdesc, (void __user *) cmdlist,
+ sizeof(struct kgsl_ibdesc) * numcmds)) {
+ ret = -EFAULT;
+ goto done;
+ }
+ }
+
+ if (synclist && numsyncs) {
+ struct kgsl_cmd_syncpoint sync;
+ void __user *uptr = (void __user *) synclist;
+ int i;
+
+ for (i = 0; i < numsyncs; i++) {
+ memset(&sync, 0, sizeof(sync));
+
+ if (copy_from_user(&sync, uptr, sizeof(sync))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync);
+
+ if (ret)
+ break;
+
+ uptr += sizeof(sync);
+ }
+ }
+
+done:
+ if (ret) {
+ kgsl_cmdbatch_destroy(cmdbatch);
+ return ERR_PTR(ret);
+ }
+
+ cmdbatch->flags = flags;
+
+ return cmdbatch;
+}
+
+static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ struct kgsl_ringbuffer_issueibcmds *param = data;
+ struct kgsl_device *device = dev_priv->device;
+ struct kgsl_context *context;
+ struct kgsl_cmdbatch *cmdbatch;
+ long result = -EINVAL;
+
+ /* The legacy functions don't support synchronization commands */
+ if (param->flags & KGSL_CONTEXT_SYNC)
+ return -EINVAL;
+
+ /* Get the context */
+ context = kgsl_context_get_owner(dev_priv, param->drawctxt_id);
+ if (context == NULL)
+ goto done;
+
+ if (param->flags & KGSL_CONTEXT_SUBMIT_IB_LIST) {
+ /*
+ * Do a quick sanity check on the number of IBs in the
+ * submission
+ */
+
+ if (param->numibs == 0 || param->numibs > 100000)
+ goto done;
+
+ cmdbatch = _kgsl_cmdbatch_create(device, context, param->flags,
+ param->ibdesc_addr, param->numibs, 0, 0);
+ } else
+ cmdbatch = _kgsl_cmdbatch_create_legacy(device, context, param);
+
+ if (IS_ERR(cmdbatch)) {
+ result = PTR_ERR(cmdbatch);
+ goto done;
+ }
+
+ /* Run basic sanity checking on the command */
+ if (!_kgsl_cmdbatch_verify(dev_priv, cmdbatch)) {
+ KGSL_DRV_ERR(device, "Unable to verify the IBs\n");
+ goto free_cmdbatch;
+ }
+
+ result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
+ cmdbatch, ¶m->timestamp);
+
+free_cmdbatch:
+ if (result)
+ kgsl_cmdbatch_destroy(cmdbatch);
+
+done:
+ kgsl_context_put(context);
+ return result;
+}
+
+static long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ struct kgsl_submit_commands *param = data;
+ struct kgsl_device *device = dev_priv->device;
+ struct kgsl_context *context;
+ struct kgsl_cmdbatch *cmdbatch;
+
+ long result = -EINVAL;
+
+ /* The number of IBs are completely ignored for sync commands */
+ if (!(param->flags & KGSL_CONTEXT_SYNC)) {
+ if (param->numcmds == 0 || param->numcmds > 100000)
+ return -EINVAL;
+ } else if (param->numcmds != 0) {
+ KGSL_DRV_ERR(device,
+ "Commands specified with the SYNC flag. They will be ignored\n");
+ }
+
+ context = kgsl_context_get_owner(dev_priv, param->context_id);
+ if (context == NULL)
+ return -EINVAL;
+
+ cmdbatch = _kgsl_cmdbatch_create(device, context, param->flags,
+ (unsigned int) param->cmdlist, param->numcmds,
+ (unsigned int) param->synclist, param->numsyncs);
+
+ if (IS_ERR(cmdbatch)) {
+ result = PTR_ERR(cmdbatch);
+ goto done;
+ }
+
+ /* Run basic sanity checking on the command */
+ if (!_kgsl_cmdbatch_verify(dev_priv, cmdbatch)) {
+ KGSL_DRV_ERR(device, "Unable to verify the IBs\n");
+ goto free_cmdbatch;
+ }
+
+ result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
+ cmdbatch, ¶m->timestamp);
+
+free_cmdbatch:
+ if (result)
+ kgsl_cmdbatch_destroy(cmdbatch);
+
+done:
+ kgsl_context_put(context);
+ return result;
+}
+
+static long _cmdstream_readtimestamp(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, unsigned int type,
+ unsigned int *timestamp)
+{
+ *timestamp = kgsl_readtimestamp(dev_priv->device, context, type);
+
+ trace_kgsl_readtimestamp(dev_priv->device,
+ context ? context->id : KGSL_MEMSTORE_GLOBAL,
+ type, *timestamp);
+
+ return 0;
+}
+
+static long kgsl_ioctl_cmdstream_readtimestamp(struct kgsl_device_private
+ *dev_priv, unsigned int cmd,
+ void *data)
+{
+ struct kgsl_cmdstream_readtimestamp *param = data;
+
+ return _cmdstream_readtimestamp(dev_priv, NULL,
+ param->type, ¶m->timestamp);
+}
+
+static long kgsl_ioctl_cmdstream_readtimestamp_ctxtid(struct kgsl_device_private
+ *dev_priv, unsigned int cmd,
+ void *data)
+{
+ struct kgsl_cmdstream_readtimestamp_ctxtid *param = data;
+ struct kgsl_context *context;
+ long result = -EINVAL;
+
+ context = kgsl_context_get_owner(dev_priv, param->context_id);
+
+ if (context)
+ result = _cmdstream_readtimestamp(dev_priv, context,
+ param->type, ¶m->timestamp);
+
+ kgsl_context_put(context);
+ return result;
+}
+
+static void kgsl_freemem_event_cb(struct kgsl_device *device,
+ void *priv, u32 id, u32 timestamp, u32 type)
+{
+ struct kgsl_mem_entry *entry = priv;
+
+ /* Free the memory for all event types */
+ trace_kgsl_mem_timestamp_free(device, entry, id, timestamp, 0);
+ kgsl_mem_entry_put(entry);
+}
+
+static long _cmdstream_freememontimestamp(struct kgsl_device_private *dev_priv,
+ unsigned int gpuaddr, struct kgsl_context *context,
+ unsigned int timestamp, unsigned int type)
+{
+ int result = 0;
+ struct kgsl_mem_entry *entry = NULL;
+ struct kgsl_device *device = dev_priv->device;
+ unsigned int context_id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
+
+ entry = kgsl_sharedmem_find(dev_priv->process_priv, gpuaddr);
+
+ if (!entry) {
+ KGSL_DRV_ERR(dev_priv->device,
+ "invalid gpuaddr %08x\n", gpuaddr);
+ return -EINVAL;
+ }
+ if (!kgsl_mem_entry_set_pend(entry)) {
+ KGSL_DRV_WARN(dev_priv->device,
+ "Cannot set pending bit for gpuaddr %08x\n", gpuaddr);
+ kgsl_mem_entry_put(entry);
+ return -EBUSY;
+ }
+
+ trace_kgsl_mem_timestamp_queue(device, entry, context_id,
+ kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED),
+ timestamp);
+ result = kgsl_add_event(dev_priv->device, context_id, timestamp,
+ kgsl_freemem_event_cb, entry, dev_priv);
+ kgsl_mem_entry_put(entry);
+ return result;
+}
+
+static long kgsl_ioctl_cmdstream_freememontimestamp(struct kgsl_device_private
+ *dev_priv, unsigned int cmd,
+ void *data)
+{
+ struct kgsl_cmdstream_freememontimestamp *param = data;
+
+ return _cmdstream_freememontimestamp(dev_priv, param->gpuaddr,
+ NULL, param->timestamp, param->type);
+}
+
+static long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid(
+ struct kgsl_device_private
+ *dev_priv, unsigned int cmd,
+ void *data)
+{
+ struct kgsl_cmdstream_freememontimestamp_ctxtid *param = data;
+ struct kgsl_context *context;
+ long result = -EINVAL;
+
+ context = kgsl_context_get_owner(dev_priv, param->context_id);
+ if (context)
+ result = _cmdstream_freememontimestamp(dev_priv, param->gpuaddr,
+ context, param->timestamp, param->type);
+ kgsl_context_put(context);
+ return result;
+}
+
+static long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ int result = 0;
+ struct kgsl_drawctxt_create *param = data;
+ struct kgsl_context *context = NULL;
+ struct kgsl_device *device = dev_priv->device;
+
+ context = device->ftbl->drawctxt_create(dev_priv, ¶m->flags);
+ if (IS_ERR(context)) {
+ result = PTR_ERR(context);
+ goto done;
+ }
+ trace_kgsl_context_create(dev_priv->device, context, param->flags);
+ param->drawctxt_id = context->id;
+done:
+ return result;
+}
+
+static long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ struct kgsl_drawctxt_destroy *param = data;
+ struct kgsl_context *context;
+ long result;
+
+ context = kgsl_context_get_owner(dev_priv, param->drawctxt_id);
+
+ result = kgsl_context_detach(context);
+
+ kgsl_context_put(context);
+ return result;
+}
+
+static long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ struct kgsl_sharedmem_free *param = data;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_mem_entry *entry = NULL;
+
+ entry = kgsl_sharedmem_find(private, param->gpuaddr);
+ if (!entry) {
+ KGSL_MEM_INFO(dev_priv->device, "invalid gpuaddr %08x\n",
+ param->gpuaddr);
+ return -EINVAL;
+ }
+ if (!kgsl_mem_entry_set_pend(entry)) {
+ kgsl_mem_entry_put(entry);
+ return -EBUSY;
+ }
+
+ trace_kgsl_mem_free(entry);
+
+ kgsl_memfree_hist_set_event(entry->priv->pid,
+ entry->memdesc.gpuaddr,
+ entry->memdesc.size,
+ entry->memdesc.flags);
+
+ /*
+ * First kgsl_mem_entry_put is for the reference that we took in
+ * this function when calling kgsl_sharedmem_find, second one is
+ * to free the memory since this is a free ioctl
+ */
+ kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_put(entry);
+ return 0;
+}
+
+static long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ struct kgsl_gpumem_free_id *param = data;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_mem_entry *entry = NULL;
+
+ entry = kgsl_sharedmem_find_id(private, param->id);
+
+ if (!entry) {
+ KGSL_MEM_INFO(dev_priv->device, "invalid id %d\n", param->id);
+ return -EINVAL;
+ }
+
+ if (!kgsl_mem_entry_set_pend(entry)) {
+ kgsl_mem_entry_put(entry);
+ return -EBUSY;
+ }
+
+ trace_kgsl_mem_free(entry);
+
+ /*
+ * First kgsl_mem_entry_put is for the reference that we took in
+ * this function when calling kgsl_sharedmem_find_id, second one is
+ * to free the memory since this is a free ioctl
+ */
+ kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_put(entry);
+ return 0;
+}
+
+static struct vm_area_struct *kgsl_get_vma_from_start_addr(unsigned int addr)
+{
+ struct vm_area_struct *vma;
+
+ down_read(¤t->mm->mmap_sem);
+ vma = find_vma(current->mm, addr);
+ up_read(¤t->mm->mmap_sem);
+ if (!vma)
+ KGSL_CORE_ERR("find_vma(%x) failed\n", addr);
+
+ return vma;
+}
+
+static inline int _check_region(unsigned long start, unsigned long size,
+ uint64_t len)
+{
+ uint64_t end = ((uint64_t) start) + size;
+ return (end > len);
+}
+
+static int kgsl_get_phys_file(int fd, unsigned long *start, unsigned long *len,
+ unsigned long *vstart, struct file **filep)
+{
+ struct file *fbfile;
+ int ret = 0;
+ dev_t rdev;
+ struct fb_info *info;
+
+ *start = 0;
+ *vstart = 0;
+ *len = 0;
+ *filep = NULL;
+
+ fbfile = fget(fd);
+ if (fbfile == NULL) {
+ KGSL_CORE_ERR("fget_light failed\n");
+ return -1;
+ }
+
+ rdev = fbfile->f_dentry->d_inode->i_rdev;
+ info = MAJOR(rdev) == FB_MAJOR ? registered_fb[MINOR(rdev)] : NULL;
+ if (info) {
+ *start = info->fix.smem_start;
+ *len = info->fix.smem_len;
+ *vstart = (unsigned long)__va(info->fix.smem_start);
+ ret = 0;
+ } else {
+ KGSL_CORE_ERR("framebuffer minor %d not found\n",
+ MINOR(rdev));
+ ret = -1;
+ }
+
+ fput(fbfile);
+
+ return ret;
+}
+
+static int kgsl_setup_phys_file(struct kgsl_mem_entry *entry,
+ struct kgsl_pagetable *pagetable,
+ unsigned int fd, unsigned int offset,
+ size_t size)
+{
+ int ret;
+ unsigned long phys, virt, len;
+ struct file *filep;
+
+ ret = kgsl_get_phys_file(fd, &phys, &len, &virt, &filep);
+ if (ret)
+ return ret;
+
+ ret = -ERANGE;
+
+ if (phys == 0) {
+ KGSL_CORE_ERR("kgsl_get_phys_file returned phys=0\n");
+ goto err;
+ }
+
+ /* Make sure the length of the region, the offset and the desired
+ * size are all page aligned or bail
+ */
+ if ((len & ~PAGE_MASK) ||
+ (offset & ~PAGE_MASK) ||
+ (size & ~PAGE_MASK)) {
+ KGSL_CORE_ERR("length %lu, offset %u or size %u "
+ "is not page aligned\n",
+ len, offset, size);
+ goto err;
+ }
+
+ /* The size or offset can never be greater than the PMEM length */
+ if (offset >= len || size > len) {
+ KGSL_CORE_ERR("offset %u or size %u "
+ "exceeds pmem length %lu\n",
+ offset, size, len);
+ goto err;
+ }
+
+ /* If size is 0, then adjust it to default to the size of the region
+ * minus the offset. If size isn't zero, then make sure that it will
+ * fit inside of the region.
+ */
+ if (size == 0)
+ size = len - offset;
+
+ else if (_check_region(offset, size, len))
+ goto err;
+
+ entry->priv_data = filep;
+
+ entry->memdesc.pagetable = pagetable;
+ entry->memdesc.size = size;
+ entry->memdesc.physaddr = phys + offset;
+ entry->memdesc.hostptr = (void *) (virt + offset);
+ /* USE_CPU_MAP is not impemented for PMEM. */
+ entry->memdesc.flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
+
+ ret = memdesc_sg_phys(&entry->memdesc, phys + offset, size);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ return ret;
+}
+
+static int memdesc_sg_virt(struct kgsl_memdesc *memdesc,
+ unsigned long paddr, int size)
+{
+ int i;
+ int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
+
+ memdesc->sg = kgsl_sg_alloc(sglen);
+
+ if (memdesc->sg == NULL)
+ return -ENOMEM;
+
+ memdesc->sglen = sglen;
+ memdesc->sglen_alloc = sglen;
+
+ sg_init_table(memdesc->sg, sglen);
+
+ spin_lock(¤t->mm->page_table_lock);
+
+ for (i = 0; i < sglen; i++, paddr += PAGE_SIZE) {
+ struct page *page;
+ pmd_t *ppmd;
+ pte_t *ppte;
+ pgd_t *ppgd = pgd_offset(current->mm, paddr);
+
+ if (pgd_none(*ppgd) || pgd_bad(*ppgd))
+ goto err;
+
+ ppmd = pmd_offset(pud_offset(ppgd, paddr), paddr);
+ if (pmd_none(*ppmd) || pmd_bad(*ppmd))
+ goto err;
+
+ ppte = pte_offset_map(ppmd, paddr);
+ if (ppte == NULL)
+ goto err;
+
+ page = pfn_to_page(pte_pfn(*ppte));
+ if (!page)
+ goto err;
+
+ sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
+ pte_unmap(ppte);
+ }
+
+ spin_unlock(¤t->mm->page_table_lock);
+
+ return 0;
+
+err:
+ spin_unlock(¤t->mm->page_table_lock);
+ kgsl_sg_free(memdesc->sg, sglen);
+ memdesc->sg = NULL;
+
+ return -EINVAL;
+}
+
+static int kgsl_setup_useraddr(struct kgsl_mem_entry *entry,
+ struct kgsl_pagetable *pagetable,
+ unsigned long useraddr, unsigned int offset,
+ size_t size)
+{
+ struct vm_area_struct *vma;
+ unsigned int len;
+
+ down_read(¤t->mm->mmap_sem);
+ vma = find_vma(current->mm, useraddr);
+ up_read(¤t->mm->mmap_sem);
+
+ if (!vma) {
+ KGSL_CORE_ERR("find_vma(%lx) failed\n", useraddr);
+ return -EINVAL;
+ }
+
+ /* We don't necessarily start at vma->vm_start */
+ len = vma->vm_end - useraddr;
+
+ if (offset >= len)
+ return -EINVAL;
+
+ if (!KGSL_IS_PAGE_ALIGNED(useraddr) ||
+ !KGSL_IS_PAGE_ALIGNED(len)) {
+ KGSL_CORE_ERR("bad alignment: start(%lx) len(%u)\n",
+ useraddr, len);
+ return -EINVAL;
+ }
+
+ if (size == 0)
+ size = len;
+
+ /* Adjust the size of the region to account for the offset */
+ size += offset & ~PAGE_MASK;
+
+ size = ALIGN(size, PAGE_SIZE);
+
+ if (_check_region(offset & PAGE_MASK, size, len)) {
+ KGSL_CORE_ERR("Offset (%ld) + size (%d) is larger"
+ "than region length %d\n",
+ offset & PAGE_MASK, size, len);
+ return -EINVAL;
+ }
+
+ entry->memdesc.pagetable = pagetable;
+ entry->memdesc.size = size;
+ entry->memdesc.useraddr = useraddr + (offset & PAGE_MASK);
+ if (kgsl_memdesc_use_cpu_map(&entry->memdesc))
+ entry->memdesc.gpuaddr = entry->memdesc.useraddr;
+
+ return memdesc_sg_virt(&entry->memdesc, entry->memdesc.useraddr,
+ size);
+}
+
+#ifdef CONFIG_ASHMEM
+static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
+ struct kgsl_pagetable *pagetable,
+ int fd, unsigned long useraddr, size_t size)
+{
+ int ret;
+ struct vm_area_struct *vma;
+ struct file *filep, *vmfile;
+ unsigned long len;
+
+ vma = kgsl_get_vma_from_start_addr(useraddr);
+ if (vma == NULL)
+ return -EINVAL;
+
+ if (vma->vm_pgoff || vma->vm_start != useraddr) {
+ KGSL_CORE_ERR("Invalid vma region\n");
+ return -EINVAL;
+ }
+
+ len = vma->vm_end - vma->vm_start;
+
+ if (size == 0)
+ size = len;
+
+ if (size != len) {
+ KGSL_CORE_ERR("Invalid size %d for vma region %lx\n",
+ size, useraddr);
+ return -EINVAL;
+ }
+
+ ret = get_ashmem_file(fd, &filep, &vmfile, &len);
+
+ if (ret) {
+ KGSL_CORE_ERR("get_ashmem_file failed\n");
+ return ret;
+ }
+
+ if (vmfile != vma->vm_file) {
+ KGSL_CORE_ERR("ashmem shmem file does not match vma\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ entry->priv_data = filep;
+ entry->memdesc.pagetable = pagetable;
+ entry->memdesc.size = ALIGN(size, PAGE_SIZE);
+ entry->memdesc.useraddr = useraddr;
+ if (kgsl_memdesc_use_cpu_map(&entry->memdesc))
+ entry->memdesc.gpuaddr = entry->memdesc.useraddr;
+
+ ret = memdesc_sg_virt(&entry->memdesc, useraddr, size);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ put_ashmem_file(filep);
+ return ret;
+}
+#else
+static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
+ struct kgsl_pagetable *pagetable,
+ int fd, unsigned long useraddr, size_t size)
+{
+ return -EINVAL;
+}
+#endif
+
+static int kgsl_setup_ion(struct kgsl_mem_entry *entry,
+ struct kgsl_pagetable *pagetable, void *data,
+ struct kgsl_device *device)
+{
+ struct scatterlist *s;
+ struct sg_table *sg_table;
+ struct kgsl_map_user_mem *param = data;
+ int fd = param->fd;
+ struct dma_buf *dmabuf;
+ struct dma_buf_attachment *attach = NULL;
+ struct kgsl_dma_buf_meta *meta;
+ int ret = 0;
+
+ if (!param->len)
+ return -EINVAL;
+
+ meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+ if (!meta)
+ return -ENOMEM;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(dmabuf)) {
+ ret = PTR_ERR(dmabuf);
+ goto out;
+ }
+
+ attach = dma_buf_attach(dmabuf, device->dev);
+ if (IS_ERR_OR_NULL(attach)) {
+ ret = PTR_ERR(attach);
+ goto out;
+ }
+
+ meta->dmabuf = dmabuf;
+ meta->attach = attach;
+
+ entry->memtype = KGSL_MEM_ENTRY_ION;
+ entry->memdesc.pagetable = pagetable;
+ entry->memdesc.size = 0;
+ /* USE_CPU_MAP is not impemented for ION. */
+ entry->memdesc.flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
+
+ sg_table = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
+
+ if (IS_ERR_OR_NULL(sg_table)) {
+ ret = PTR_ERR(sg_table);
+ goto out;
+ }
+
+ meta->table = sg_table;
+ entry->priv_data = meta;
+ entry->memdesc.sg = sg_table->sgl;
+
+ /* Calculate the size of the memdesc from the sglist */
+
+ entry->memdesc.sglen = 0;
+
+ for (s = entry->memdesc.sg; s != NULL; s = sg_next(s)) {
+ entry->memdesc.size += s->length;
+ entry->memdesc.sglen++;
+ }
+
+ entry->memdesc.size = PAGE_ALIGN(entry->memdesc.size);
+
+out:
+ if (ret) {
+ if (!IS_ERR_OR_NULL(attach))
+ dma_buf_detach(dmabuf, attach);
+
+ if (!IS_ERR_OR_NULL(dmabuf))
+ dma_buf_put(dmabuf);
+
+ kfree(meta);
+ }
+
+ return ret;
+}
+
+static long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ int result = -EINVAL;
+ struct kgsl_map_user_mem *param = data;
+ struct kgsl_mem_entry *entry = NULL;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ enum kgsl_user_mem_type memtype;
+
+ entry = kgsl_mem_entry_create();
+
+ if (entry == NULL)
+ return -ENOMEM;
+
+ if (_IOC_SIZE(cmd) == sizeof(struct kgsl_sharedmem_from_pmem))
+ memtype = KGSL_USER_MEM_TYPE_PMEM;
+ else
+ memtype = param->memtype;
+
+ /*
+ * Mask off unknown flags from userspace. This way the caller can
+ * check if a flag is supported by looking at the returned flags.
+ * Note: CACHEMODE is ignored for this call. Caching should be
+ * determined by type of allocation being mapped.
+ */
+ param->flags &= KGSL_MEMFLAGS_GPUREADONLY
+ | KGSL_MEMTYPE_MASK
+ | KGSL_MEMALIGN_MASK
+ | KGSL_MEMFLAGS_USE_CPU_MAP;
+
+ entry->memdesc.flags = param->flags;
+ if (!kgsl_mmu_use_cpu_map(private->pagetable->mmu))
+ entry->memdesc.flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
+
+ if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU)
+ entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE;
+
+ switch (memtype) {
+ case KGSL_USER_MEM_TYPE_PMEM:
+ if (param->fd == 0 || param->len == 0)
+ break;
+
+ result = kgsl_setup_phys_file(entry, private->pagetable,
+ param->fd, param->offset,
+ param->len);
+ entry->memtype = KGSL_MEM_ENTRY_PMEM;
+ break;
+
+ case KGSL_USER_MEM_TYPE_ADDR:
+ KGSL_DEV_ERR_ONCE(dev_priv->device, "User mem type "
+ "KGSL_USER_MEM_TYPE_ADDR is deprecated\n");
+ if (!kgsl_mmu_enabled()) {
+ KGSL_DRV_ERR(dev_priv->device,
+ "Cannot map paged memory with the "
+ "MMU disabled\n");
+ break;
+ }
+
+ if (param->hostptr == 0)
+ break;
+
+ result = kgsl_setup_useraddr(entry, private->pagetable,
+ param->hostptr,
+ param->offset, param->len);
+ entry->memtype = KGSL_MEM_ENTRY_USER;
+ break;
+
+ case KGSL_USER_MEM_TYPE_ASHMEM:
+ if (!kgsl_mmu_enabled()) {
+ KGSL_DRV_ERR(dev_priv->device,
+ "Cannot map paged memory with the "
+ "MMU disabled\n");
+ break;
+ }
+
+ if (param->hostptr == 0)
+ break;
+
+ result = kgsl_setup_ashmem(entry, private->pagetable,
+ param->fd, param->hostptr,
+ param->len);
+
+ entry->memtype = KGSL_MEM_ENTRY_ASHMEM;
+ break;
+ case KGSL_USER_MEM_TYPE_ION:
+ result = kgsl_setup_ion(entry, private->pagetable, data,
+ dev_priv->device);
+ break;
+ default:
+ KGSL_CORE_ERR("Invalid memory type: %x\n", memtype);
+ break;
+ }
+
+ if (result)
+ goto error;
+
+ if (entry->memdesc.size >= SZ_1M)
+ kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_1M));
+ else if (entry->memdesc.size >= SZ_64K)
+ kgsl_memdesc_set_align(&entry->memdesc, ilog2(SZ_64));
+
+ /* echo back flags */
+ param->flags = entry->memdesc.flags;
+
+ result = kgsl_mem_entry_attach_process(entry, private);
+ if (result)
+ goto error_attach;
+
+ /* Adjust the returned value for a non 4k aligned offset */
+ param->gpuaddr = entry->memdesc.gpuaddr + (param->offset & ~PAGE_MASK);
+
+ KGSL_STATS_ADD(param->len, kgsl_driver.stats.mapped,
+ kgsl_driver.stats.mapped_max);
+
+ kgsl_process_add_stats(private, entry->memtype, param->len);
+
+ trace_kgsl_mem_map(entry, param->fd);
+
+ return result;
+
+error_attach:
+ switch (entry->memtype) {
+ case KGSL_MEM_ENTRY_PMEM:
+ case KGSL_MEM_ENTRY_ASHMEM:
+ if (entry->priv_data)
+ fput(entry->priv_data);
+ break;
+ case KGSL_MEM_ENTRY_ION:
+ kgsl_destroy_ion(entry->priv_data);
+ break;
+ default:
+ break;
+ }
+error:
+ kfree(entry);
+ return result;
+}
+
+static int _kgsl_gpumem_sync_cache(struct kgsl_mem_entry *entry, int op)
+{
+ int ret = 0;
+ int cacheop;
+ int mode;
+
+ /*
+ * Flush is defined as (clean | invalidate). If both bits are set, then
+ * do a flush, otherwise check for the individual bits and clean or inv
+ * as requested
+ */
+
+ if ((op & KGSL_GPUMEM_CACHE_FLUSH) == KGSL_GPUMEM_CACHE_FLUSH)
+ cacheop = KGSL_CACHE_OP_FLUSH;
+ else if (op & KGSL_GPUMEM_CACHE_CLEAN)
+ cacheop = KGSL_CACHE_OP_CLEAN;
+ else if (op & KGSL_GPUMEM_CACHE_INV)
+ cacheop = KGSL_CACHE_OP_INV;
+ else {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ mode = kgsl_memdesc_get_cachemode(&entry->memdesc);
+ if (mode != KGSL_CACHEMODE_UNCACHED
+ && mode != KGSL_CACHEMODE_WRITECOMBINE) {
+ trace_kgsl_mem_sync_cache(entry, op);
+ kgsl_cache_range_op(&entry->memdesc, cacheop);
+ }
+
+done:
+ return ret;
+}
+
+/* New cache sync function - supports both directions (clean and invalidate) */
+
+static long
+kgsl_ioctl_gpumem_sync_cache(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ struct kgsl_gpumem_sync_cache *param = data;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_mem_entry *entry = NULL;
+ long ret;
+
+ if (param->id != 0) {
+ entry = kgsl_sharedmem_find_id(private, param->id);
+ if (entry == NULL) {
+ KGSL_MEM_INFO(dev_priv->device, "can't find id %d\n",
+ param->id);
+ return -EINVAL;
+ }
+ } else if (param->gpuaddr != 0) {
+ entry = kgsl_sharedmem_find(private, param->gpuaddr);
+ if (entry == NULL) {
+ KGSL_MEM_INFO(dev_priv->device,
+ "can't find gpuaddr %x\n",
+ param->gpuaddr);
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ ret = _kgsl_gpumem_sync_cache(entry, param->op);
+ kgsl_mem_entry_put(entry);
+ return ret;
+}
+
+static int mem_id_cmp(const void *_a, const void *_b)
+{
+ const unsigned int *a = _a, *b = _b;
+ int cmp = a - b;
+ return (cmp < 0) ? -1 : (cmp > 0);
+}
+
+static long
+kgsl_ioctl_gpumem_sync_cache_bulk(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ int i;
+ struct kgsl_gpumem_sync_cache_bulk *param = data;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ unsigned int id, last_id = 0, *id_list = NULL, actual_count = 0;
+ struct kgsl_mem_entry **entries = NULL;
+ long ret = 0;
+ size_t op_size = 0;
+ bool full_flush = false;
+
+ if (param->id_list == NULL || param->count == 0
+ || param->count > (UINT_MAX/sizeof(unsigned int)))
+ return -EINVAL;
+
+ id_list = kzalloc(param->count * sizeof(unsigned int), GFP_KERNEL);
+ if (id_list == NULL)
+ return -ENOMEM;
+
+ entries = kzalloc(param->count * sizeof(*entries), GFP_KERNEL);
+ if (entries == NULL) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ if (copy_from_user(id_list, param->id_list,
+ param->count * sizeof(unsigned int))) {
+ ret = -EFAULT;
+ goto end;
+ }
+ /* sort the ids so we can weed out duplicates */
+ sort(id_list, param->count, sizeof(int), mem_id_cmp, NULL);
+
+ for (i = 0; i < param->count; i++) {
+ unsigned int cachemode;
+ struct kgsl_mem_entry *entry = NULL;
+
+ id = id_list[i];
+ /* skip 0 ids or duplicates */
+ if (id == last_id)
+ continue;
+
+ entry = kgsl_sharedmem_find_id(private, id);
+ if (entry == NULL)
+ continue;
+
+ /* skip uncached memory */
+ cachemode = kgsl_memdesc_get_cachemode(&entry->memdesc);
+ if (cachemode != KGSL_CACHEMODE_WRITETHROUGH &&
+ cachemode != KGSL_CACHEMODE_WRITEBACK) {
+ kgsl_mem_entry_put(entry);
+ continue;
+ }
+
+ op_size += entry->memdesc.size;
+ entries[actual_count++] = entry;
+
+ /* If we exceed the breakeven point, flush the entire cache */
+ if (kgsl_driver.full_cache_threshold != 0 &&
+ op_size >= kgsl_driver.full_cache_threshold &&
+ param->op == KGSL_GPUMEM_CACHE_FLUSH) {
+ full_flush = true;
+ break;
+ }
+ last_id = id;
+ }
+ if (full_flush) {
+ trace_kgsl_mem_sync_full_cache(actual_count, op_size,
+ param->op);
+ __cpuc_flush_kern_all();
+ }
+
+ for (i = 0; i < actual_count; i++) {
+ if (!full_flush)
+ _kgsl_gpumem_sync_cache(entries[i], param->op);
+ kgsl_mem_entry_put(entries[i]);
+ }
+end:
+ kfree(entries);
+ kfree(id_list);
+ return ret;
+}
+
+/* Legacy cache function, does a flush (clean + invalidate) */
+
+static long
+kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ struct kgsl_sharedmem_free *param = data;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_mem_entry *entry = NULL;
+ long ret;
+
+ entry = kgsl_sharedmem_find(private, param->gpuaddr);
+ if (entry == NULL) {
+ KGSL_MEM_INFO(dev_priv->device,
+ "can't find gpuaddr %x\n",
+ param->gpuaddr);
+ return -EINVAL;
+ }
+
+ ret = _kgsl_gpumem_sync_cache(entry, KGSL_GPUMEM_CACHE_FLUSH);
+ kgsl_mem_entry_put(entry);
+ return ret;
+}
+
+/*
+ * The common parts of kgsl_ioctl_gpumem_alloc and kgsl_ioctl_gpumem_alloc_id.
+ */
+int
+_gpumem_alloc(struct kgsl_device_private *dev_priv,
+ struct kgsl_mem_entry **ret_entry,
+ unsigned int size, unsigned int flags)
+{
+ int result;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_mem_entry *entry;
+ int align;
+
+ /*
+ * Mask off unknown flags from userspace. This way the caller can
+ * check if a flag is supported by looking at the returned flags.
+ */
+ flags &= KGSL_MEMFLAGS_GPUREADONLY
+ | KGSL_CACHEMODE_MASK
+ | KGSL_MEMTYPE_MASK
+ | KGSL_MEMALIGN_MASK
+ | KGSL_MEMFLAGS_USE_CPU_MAP;
+
+ /* Cap the alignment bits to the highest number we can handle */
+
+ align = (flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT;
+ if (align >= 32) {
+ KGSL_CORE_ERR("Alignment too big, restricting to 2^32\n");
+ flags &= ~KGSL_MEMALIGN_MASK;
+ flags |= (31 << KGSL_MEMALIGN_SHIFT) & KGSL_MEMALIGN_MASK;
+ }
+
+ entry = kgsl_mem_entry_create();
+ if (entry == NULL)
+ return -ENOMEM;
+
+ if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU)
+ entry->memdesc.priv |= KGSL_MEMDESC_GUARD_PAGE;
+
+ result = kgsl_allocate_user(&entry->memdesc, private->pagetable, size,
+ flags);
+ if (result != 0)
+ goto err;
+
+ entry->memtype = KGSL_MEM_ENTRY_KERNEL;
+
+ *ret_entry = entry;
+ return result;
+err:
+ kfree(entry);
+ *ret_entry = NULL;
+ return result;
+}
+
+static long
+kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_gpumem_alloc *param = data;
+ struct kgsl_mem_entry *entry = NULL;
+ int result;
+
+ param->flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
+ result = _gpumem_alloc(dev_priv, &entry, param->size, param->flags);
+ if (result)
+ return result;
+
+ result = kgsl_mem_entry_attach_process(entry, private);
+ if (result != 0)
+ goto err;
+
+ kgsl_process_add_stats(private, entry->memtype, param->size);
+ trace_kgsl_mem_alloc(entry);
+
+ param->gpuaddr = entry->memdesc.gpuaddr;
+ param->size = entry->memdesc.size;
+ param->flags = entry->memdesc.flags;
+ return result;
+err:
+ kgsl_sharedmem_free(&entry->memdesc);
+ kfree(entry);
+ return result;
+}
+
+static long
+kgsl_ioctl_gpumem_alloc_id(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_gpumem_alloc_id *param = data;
+ struct kgsl_mem_entry *entry = NULL;
+ int result;
+
+ if (!kgsl_mmu_use_cpu_map(private->pagetable->mmu))
+ param->flags &= ~KGSL_MEMFLAGS_USE_CPU_MAP;
+
+ result = _gpumem_alloc(dev_priv, &entry, param->size, param->flags);
+ if (result != 0)
+ goto err;
+
+ result = kgsl_mem_entry_attach_process(entry, private);
+ if (result != 0)
+ goto err;
+
+ kgsl_process_add_stats(private, entry->memtype, param->size);
+ trace_kgsl_mem_alloc(entry);
+
+ param->id = entry->id;
+ param->flags = entry->memdesc.flags;
+ param->size = entry->memdesc.size;
+ param->mmapsize = kgsl_memdesc_mmapsize(&entry->memdesc);
+ param->gpuaddr = entry->memdesc.gpuaddr;
+ return result;
+err:
+ if (entry)
+ kgsl_sharedmem_free(&entry->memdesc);
+ kfree(entry);
+ return result;
+}
+
+static long
+kgsl_ioctl_gpumem_get_info(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_gpumem_get_info *param = data;
+ struct kgsl_mem_entry *entry = NULL;
+ int result = 0;
+
+ if (param->id != 0) {
+ entry = kgsl_sharedmem_find_id(private, param->id);
+ if (entry == NULL) {
+ KGSL_MEM_INFO(dev_priv->device, "can't find id %d\n",
+ param->id);
+ return -EINVAL;
+ }
+ } else if (param->gpuaddr != 0) {
+ entry = kgsl_sharedmem_find(private, param->gpuaddr);
+ if (entry == NULL) {
+ KGSL_MEM_INFO(dev_priv->device,
+ "can't find gpuaddr %lx\n",
+ param->gpuaddr);
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+ param->gpuaddr = entry->memdesc.gpuaddr;
+ param->id = entry->id;
+ param->flags = entry->memdesc.flags;
+ param->size = entry->memdesc.size;
+ param->mmapsize = kgsl_memdesc_mmapsize(&entry->memdesc);
+ param->useraddr = entry->memdesc.useraddr;
+
+ kgsl_mem_entry_put(entry);
+ return result;
+}
+
+static long kgsl_ioctl_cff_syncmem(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ int result = 0;
+ struct kgsl_cff_syncmem *param = data;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_mem_entry *entry = NULL;
+
+ entry = kgsl_sharedmem_find_region(private, param->gpuaddr, param->len);
+ if (!entry)
+ return -EINVAL;
+
+ kgsl_cffdump_syncmem(dev_priv->device, &entry->memdesc, param->gpuaddr,
+ param->len, true);
+
+ kgsl_mem_entry_put(entry);
+ return result;
+}
+
+static long kgsl_ioctl_cff_user_event(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ int result = 0;
+ struct kgsl_cff_user_event *param = data;
+
+ kgsl_cffdump_user_event(dev_priv->device, param->cff_opcode,
+ param->op1, param->op2,
+ param->op3, param->op4, param->op5);
+
+ return result;
+}
+
+#ifdef CONFIG_GENLOCK
+struct kgsl_genlock_event_priv {
+ struct genlock_handle *handle;
+ struct genlock *lock;
+};
+
+/**
+ * kgsl_genlock_event_cb - Event callback for a genlock timestamp event
+ * @device - The KGSL device that expired the timestamp
+ * @priv - private data for the event
+ * @context_id - the context id that goes with the timestamp
+ * @timestamp - the timestamp that triggered the event
+ *
+ * Release a genlock lock following the expiration of a timestamp
+ */
+
+static void kgsl_genlock_event_cb(struct kgsl_device *device,
+ void *priv, u32 context_id, u32 timestamp, u32 type)
+{
+ struct kgsl_genlock_event_priv *ev = priv;
+ int ret;
+
+ /* Signal the lock for every event type */
+ ret = genlock_lock(ev->handle, GENLOCK_UNLOCK, 0, 0);
+ if (ret)
+ KGSL_CORE_ERR("Error while unlocking genlock: %d\n", ret);
+
+ genlock_put_handle(ev->handle);
+
+ kfree(ev);
+}
+
+/**
+ * kgsl_add_genlock-event - Create a new genlock event
+ * @device - KGSL device to create the event on
+ * @timestamp - Timestamp to trigger the event
+ * @data - User space buffer containing struct kgsl_genlock_event_priv
+ * @len - length of the userspace buffer
+ * @owner - driver instance that owns this event
+ * @returns 0 on success or error code on error
+ *
+ * Attack to a genlock handle and register an event to release the
+ * genlock lock when the timestamp expires
+ */
+
+static int kgsl_add_genlock_event(struct kgsl_device *device,
+ u32 context_id, u32 timestamp, void __user *data, int len,
+ struct kgsl_device_private *owner)
+{
+ struct kgsl_genlock_event_priv *event;
+ struct kgsl_timestamp_event_genlock priv;
+ int ret;
+
+ if (len != sizeof(priv))
+ return -EINVAL;
+
+ if (copy_from_user(&priv, data, sizeof(priv)))
+ return -EFAULT;
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+
+ if (event == NULL)
+ return -ENOMEM;
+
+ event->handle = genlock_get_handle_fd(priv.handle);
+
+ if (IS_ERR(event->handle)) {
+ int ret = PTR_ERR(event->handle);
+ kfree(event);
+ return ret;
+ }
+
+ ret = kgsl_add_event(device, context_id, timestamp,
+ kgsl_genlock_event_cb, event, owner);
+ if (ret)
+ kfree(event);
+
+ return ret;
+}
+#else
+static long kgsl_add_genlock_event(struct kgsl_device *device,
+ u32 context_id, u32 timestamp, void __user *data, int len,
+ struct kgsl_device_private *owner)
+{
+ return -EINVAL;
+}
+#endif
+
+/**
+ * kgsl_ioctl_timestamp_event - Register a new timestamp event from userspace
+ * @dev_priv - pointer to the private device structure
+ * @cmd - the ioctl cmd passed from kgsl_ioctl
+ * @data - the user data buffer from kgsl_ioctl
+ * @returns 0 on success or error code on failure
+ */
+
+static long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ struct kgsl_timestamp_event *param = data;
+ int ret;
+
+ switch (param->type) {
+ case KGSL_TIMESTAMP_EVENT_GENLOCK:
+ ret = kgsl_add_genlock_event(dev_priv->device,
+ param->context_id, param->timestamp, param->priv,
+ param->len, dev_priv);
+ break;
+ case KGSL_TIMESTAMP_EVENT_FENCE:
+ ret = kgsl_add_fence_event(dev_priv->device,
+ param->context_id, param->timestamp, param->priv,
+ param->len, dev_priv);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+typedef long (*kgsl_ioctl_func_t)(struct kgsl_device_private *,
+ unsigned int, void *);
+
+#define KGSL_IOCTL_FUNC(_cmd, _func, _flags) \
+ [_IOC_NR((_cmd))] = \
+ { .cmd = (_cmd), .func = (_func), .flags = (_flags) }
+
+#define KGSL_IOCTL_LOCK BIT(0)
+#define KGSL_IOCTL_WAKE BIT(1)
+
+static const struct {
+ unsigned int cmd;
+ kgsl_ioctl_func_t func;
+ int flags;
+} kgsl_ioctl_funcs[] = {
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_GETPROPERTY,
+ kgsl_ioctl_device_getproperty,
+ KGSL_IOCTL_LOCK),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP,
+ kgsl_ioctl_device_waittimestamp,
+ KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID,
+ kgsl_ioctl_device_waittimestamp_ctxtid,
+ KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS,
+ kgsl_ioctl_rb_issueibcmds, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_SUBMIT_COMMANDS,
+ kgsl_ioctl_submit_commands, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP,
+ kgsl_ioctl_cmdstream_readtimestamp,
+ KGSL_IOCTL_LOCK),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID,
+ kgsl_ioctl_cmdstream_readtimestamp_ctxtid,
+ KGSL_IOCTL_LOCK),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP,
+ kgsl_ioctl_cmdstream_freememontimestamp,
+ KGSL_IOCTL_LOCK),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID,
+ kgsl_ioctl_cmdstream_freememontimestamp_ctxtid,
+ KGSL_IOCTL_LOCK),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_CREATE,
+ kgsl_ioctl_drawctxt_create,
+ KGSL_IOCTL_LOCK),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_DESTROY,
+ kgsl_ioctl_drawctxt_destroy,
+ KGSL_IOCTL_LOCK | KGSL_IOCTL_WAKE),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_MAP_USER_MEM,
+ kgsl_ioctl_map_user_mem, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_PMEM,
+ kgsl_ioctl_map_user_mem, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FREE,
+ kgsl_ioctl_sharedmem_free, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE,
+ kgsl_ioctl_sharedmem_flush_cache, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC,
+ kgsl_ioctl_gpumem_alloc, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_SYNCMEM,
+ kgsl_ioctl_cff_syncmem, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_USER_EVENT,
+ kgsl_ioctl_cff_user_event, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMESTAMP_EVENT,
+ kgsl_ioctl_timestamp_event,
+ KGSL_IOCTL_LOCK),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_SETPROPERTY,
+ kgsl_ioctl_device_setproperty,
+ KGSL_IOCTL_LOCK),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC_ID,
+ kgsl_ioctl_gpumem_alloc_id, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_FREE_ID,
+ kgsl_ioctl_gpumem_free_id, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_GET_INFO,
+ kgsl_ioctl_gpumem_get_info, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_SYNC_CACHE,
+ kgsl_ioctl_gpumem_sync_cache, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_SYNC_CACHE_BULK,
+ kgsl_ioctl_gpumem_sync_cache_bulk, 0),
+};
+
+static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ struct kgsl_device_private *dev_priv = filep->private_data;
+ unsigned int nr;
+ kgsl_ioctl_func_t func;
+ int lock, ret, use_hw;
+ char ustack[64];
+ void *uptr = NULL;
+
+ BUG_ON(dev_priv == NULL);
+
+ /* Workaround for an previously incorrectly defined ioctl code.
+ This helps ensure binary compatability */
+
+ if (cmd == IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD)
+ cmd = IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP;
+ else if (cmd == IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD)
+ cmd = IOCTL_KGSL_CMDSTREAM_READTIMESTAMP;
+ else if (cmd == IOCTL_KGSL_TIMESTAMP_EVENT_OLD)
+ cmd = IOCTL_KGSL_TIMESTAMP_EVENT;
+
+ nr = _IOC_NR(cmd);
+
+ if (cmd & (IOC_IN | IOC_OUT)) {
+ if (_IOC_SIZE(cmd) < sizeof(ustack))
+ uptr = ustack;
+ else {
+ uptr = kzalloc(_IOC_SIZE(cmd), GFP_KERNEL);
+ if (uptr == NULL) {
+ KGSL_MEM_ERR(dev_priv->device,
+ "kzalloc(%d) failed\n", _IOC_SIZE(cmd));
+ ret = -ENOMEM;
+ goto done;
+ }
+ }
+
+ if (cmd & IOC_IN) {
+ if (copy_from_user(uptr, (void __user *) arg,
+ _IOC_SIZE(cmd))) {
+ ret = -EFAULT;
+ goto done;
+ }
+ } else
+ memset(uptr, 0, _IOC_SIZE(cmd));
+ }
+
+ if (nr < ARRAY_SIZE(kgsl_ioctl_funcs) &&
+ kgsl_ioctl_funcs[nr].func != NULL) {
+
+ /*
+ * Make sure that nobody tried to send us a malformed ioctl code
+ * with a valid NR but bogus flags
+ */
+
+ if (kgsl_ioctl_funcs[nr].cmd != cmd) {
+ KGSL_DRV_ERR(dev_priv->device,
+ "Malformed ioctl code %08x\n", cmd);
+ ret = -ENOIOCTLCMD;
+ goto done;
+ }
+
+ func = kgsl_ioctl_funcs[nr].func;
+ lock = kgsl_ioctl_funcs[nr].flags & KGSL_IOCTL_LOCK;
+ use_hw = kgsl_ioctl_funcs[nr].flags & KGSL_IOCTL_WAKE;
+ } else {
+ func = dev_priv->device->ftbl->ioctl;
+ if (!func) {
+ KGSL_DRV_INFO(dev_priv->device,
+ "invalid ioctl code %08x\n", cmd);
+ ret = -ENOIOCTLCMD;
+ goto done;
+ }
+ lock = 1;
+ use_hw = 1;
+ }
+
+ if (lock) {
+ mutex_lock(&dev_priv->device->mutex);
+ if (use_hw) {
+ ret = kgsl_active_count_get(dev_priv->device);
+ if (ret < 0) {
+ use_hw = 0;
+ goto unlock;
+ }
+ }
+ }
+
+ ret = func(dev_priv, cmd, uptr);
+
+unlock:
+ if (lock) {
+ if (use_hw)
+ kgsl_active_count_put(dev_priv->device);
+ mutex_unlock(&dev_priv->device->mutex);
+ }
+
+ /*
+ * Still copy back on failure, but assume function took
+ * all necessary precautions sanitizing the return values.
+ */
+ if (cmd & IOC_OUT) {
+ if (copy_to_user((void __user *) arg, uptr, _IOC_SIZE(cmd)))
+ ret = -EFAULT;
+ }
+
+done:
+ if (_IOC_SIZE(cmd) >= sizeof(ustack))
+ kfree(uptr);
+
+ return ret;
+}
+
+static int
+kgsl_mmap_memstore(struct kgsl_device *device, struct vm_area_struct *vma)
+{
+ struct kgsl_memdesc *memdesc = &device->memstore;
+ int result;
+ unsigned int vma_size = vma->vm_end - vma->vm_start;
+
+ /* The memstore can only be mapped as read only */
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+
+ if (memdesc->size != vma_size) {
+ KGSL_MEM_ERR(device, "memstore bad size: %d should be %d\n",
+ vma_size, memdesc->size);
+ return -EINVAL;
+ }
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ result = remap_pfn_range(vma, vma->vm_start,
+ device->memstore.physaddr >> PAGE_SHIFT,
+ vma_size, vma->vm_page_prot);
+ if (result != 0)
+ KGSL_MEM_ERR(device, "remap_pfn_range failed: %d\n",
+ result);
+
+ return result;
+}
+
+/*
+ * kgsl_gpumem_vm_open is called whenever a vma region is copied or split.
+ * Increase the refcount to make sure that the accounting stays correct
+ */
+
+static void kgsl_gpumem_vm_open(struct vm_area_struct *vma)
+{
+ struct kgsl_mem_entry *entry = vma->vm_private_data;
+ if (!kgsl_mem_entry_get(entry))
+ vma->vm_private_data = NULL;
+}
+
+static int
+kgsl_gpumem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct kgsl_mem_entry *entry = vma->vm_private_data;
+
+ if (!entry)
+ return VM_FAULT_SIGBUS;
+ if (!entry->memdesc.ops || !entry->memdesc.ops->vmfault)
+ return VM_FAULT_SIGBUS;
+
+ return entry->memdesc.ops->vmfault(&entry->memdesc, vma, vmf);
+}
+
+static void
+kgsl_gpumem_vm_close(struct vm_area_struct *vma)
+{
+ struct kgsl_mem_entry *entry = vma->vm_private_data;
+
+ if (!entry)
+ return;
+
+ entry->memdesc.useraddr = 0;
+ kgsl_mem_entry_put(entry);
+}
+
+static struct vm_operations_struct kgsl_gpumem_vm_ops = {
+ .open = kgsl_gpumem_vm_open,
+ .fault = kgsl_gpumem_vm_fault,
+ .close = kgsl_gpumem_vm_close,
+};
+
+static int
+get_mmap_entry(struct kgsl_process_private *private,
+ struct kgsl_mem_entry **out_entry, unsigned long pgoff,
+ unsigned long len)
+{
+ int ret = 0;
+ struct kgsl_mem_entry *entry;
+
+ entry = kgsl_sharedmem_find_id(private, pgoff);
+ if (entry == NULL) {
+ entry = kgsl_sharedmem_find(private, pgoff << PAGE_SHIFT);
+ }
+
+ if (!entry)
+ return -EINVAL;
+
+ if (!entry->memdesc.ops ||
+ !entry->memdesc.ops->vmflags ||
+ !entry->memdesc.ops->vmfault) {
+ ret = -EINVAL;
+ goto err_put;
+ }
+
+ if (entry->memdesc.useraddr != 0) {
+ ret = -EBUSY;
+ goto err_put;
+ }
+
+ if (len != kgsl_memdesc_mmapsize(&entry->memdesc)) {
+ ret = -ERANGE;
+ goto err_put;
+ }
+
+ *out_entry = entry;
+ return 0;
+err_put:
+ kgsl_mem_entry_put(entry);
+ return ret;
+}
+
+static inline bool
+mmap_range_valid(unsigned long addr, unsigned long len)
+{
+ return (addr + len) > addr && (addr + len) < TASK_SIZE;
+}
+
+static unsigned long
+kgsl_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ unsigned long ret = 0, orig_len = len;
+ unsigned long vma_offset = pgoff << PAGE_SHIFT;
+ struct kgsl_device_private *dev_priv = file->private_data;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_device *device = dev_priv->device;
+ struct kgsl_mem_entry *entry = NULL;
+ unsigned int align;
+ unsigned int retry = 0;
+
+ if (vma_offset == device->memstore.gpuaddr)
+ return get_unmapped_area(NULL, addr, len, pgoff, flags);
+
+ ret = get_mmap_entry(private, &entry, pgoff, len);
+ if (ret)
+ return ret;
+
+ if (!kgsl_memdesc_use_cpu_map(&entry->memdesc)) {
+ /*
+ * If we're not going to use the same mapping on the gpu,
+ * any address is fine.
+ * For MAP_FIXED, hopefully the caller knows what they're doing,
+ * but we may fail in mmap() if there is already something
+ * at the virtual address chosen.
+ */
+ ret = get_unmapped_area(NULL, addr, len, pgoff, flags);
+ goto put;
+ }
+ if (entry->memdesc.gpuaddr != 0) {
+ KGSL_MEM_INFO(device,
+ "pgoff %lx already mapped to gpuaddr %x\n",
+ pgoff, entry->memdesc.gpuaddr);
+ ret = -EBUSY;
+ goto put;
+ }
+
+ align = kgsl_memdesc_get_align(&entry->memdesc);
+ if (align >= ilog2(SZ_1M))
+ align = ilog2(SZ_1M);
+ else if (align >= ilog2(SZ_64K))
+ align = ilog2(SZ_64K);
+ else if (align <= PAGE_SHIFT)
+ align = 0;
+
+ if (align)
+ len += 1 << align;
+
+ if (!mmap_range_valid(addr, len))
+ addr = 0;
+ do {
+ ret = get_unmapped_area(NULL, addr, len, pgoff, flags);
+ if (IS_ERR_VALUE(ret)) {
+ /*
+ * If we are really fragmented, there may not be room
+ * for the alignment padding, so try again without it.
+ */
+ if (!retry && (ret == (unsigned long)-ENOMEM)
+ && (align > PAGE_SHIFT)) {
+ align = 0;
+ addr = 0;
+ len = orig_len;
+ retry = 1;
+ continue;
+ }
+ break;
+ }
+ if (align)
+ ret = ALIGN(ret, (1 << align));
+
+ /*make sure there isn't a GPU only mapping at this address */
+ spin_lock(&private->mem_lock);
+ if (kgsl_sharedmem_region_empty(private, ret, orig_len)) {
+ int ret_val;
+ /*
+ * We found a free memory map, claim it here with
+ * memory lock held
+ */
+ entry->memdesc.gpuaddr = ret;
+ /* This should never fail */
+ ret_val = kgsl_mem_entry_track_gpuaddr(private, entry);
+ spin_unlock(&private->mem_lock);
+ BUG_ON(ret_val);
+ /* map cannot be called with lock held */
+ ret_val = kgsl_mmu_map(private->pagetable,
+ &entry->memdesc);
+ if (ret_val) {
+ spin_lock(&private->mem_lock);
+ kgsl_mem_entry_untrack_gpuaddr(private, entry);
+ spin_unlock(&private->mem_lock);
+ ret = ret_val;
+ }
+ break;
+ }
+ spin_unlock(&private->mem_lock);
+
+ trace_kgsl_mem_unmapped_area_collision(entry, addr, orig_len,
+ ret);
+
+ /*
+ * If we collided, bump the hint address so that
+ * get_umapped_area knows to look somewhere else.
+ */
+ addr = (addr == 0) ? ret + orig_len : addr + orig_len;
+
+ /*
+ * The addr hint can be set by userspace to be near
+ * the end of the address space. Make sure we search
+ * the whole address space at least once by wrapping
+ * back around once.
+ */
+ if (!retry && !mmap_range_valid(addr, len)) {
+ addr = 0;
+ retry = 1;
+ } else {
+ ret = -EBUSY;
+ }
+ } while (!(flags & MAP_FIXED) && mmap_range_valid(addr, len));
+
+ if (IS_ERR_VALUE(ret))
+ KGSL_MEM_ERR(device,
+ "pid %d pgoff %lx len %ld failed error %ld\n",
+ private->pid, pgoff, len, ret);
+put:
+ kgsl_mem_entry_put(entry);
+ return ret;
+}
+
+static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned int ret, cache;
+ unsigned long vma_offset = vma->vm_pgoff << PAGE_SHIFT;
+ struct kgsl_device_private *dev_priv = file->private_data;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_mem_entry *entry = NULL;
+ struct kgsl_device *device = dev_priv->device;
+
+ /* Handle leagacy behavior for memstore */
+
+ if (vma_offset == device->memstore.gpuaddr)
+ return kgsl_mmap_memstore(device, vma);
+
+ /*
+ * The reference count on the entry that we get from
+ * get_mmap_entry() will be held until kgsl_gpumem_vm_close().
+ */
+ ret = get_mmap_entry(private, &entry, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start);
+ if (ret)
+ return ret;
+
+ vma->vm_flags |= entry->memdesc.ops->vmflags(&entry->memdesc);
+
+ vma->vm_private_data = entry;
+
+ /* Determine user-side caching policy */
+
+ cache = kgsl_memdesc_get_cachemode(&entry->memdesc);
+
+ switch (cache) {
+ case KGSL_CACHEMODE_UNCACHED:
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ break;
+ case KGSL_CACHEMODE_WRITETHROUGH:
+ vma->vm_page_prot = pgprot_writethroughcache(vma->vm_page_prot);
+ break;
+ case KGSL_CACHEMODE_WRITEBACK:
+ vma->vm_page_prot = pgprot_writebackcache(vma->vm_page_prot);
+ break;
+ case KGSL_CACHEMODE_WRITECOMBINE:
+ default:
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ break;
+ }
+
+ vma->vm_ops = &kgsl_gpumem_vm_ops;
+
+ if (cache == KGSL_CACHEMODE_WRITEBACK
+ || cache == KGSL_CACHEMODE_WRITETHROUGH) {
+ struct scatterlist *s;
+ int i;
+ int sglen = entry->memdesc.sglen;
+ unsigned long addr = vma->vm_start;
+
+ for_each_sg(entry->memdesc.sg, s, sglen, i) {
+ int j;
+ for (j = 0; j < (sg_dma_len(s) >> PAGE_SHIFT); j++) {
+ struct page *page = sg_page(s);
+ page = nth_page(page, j);
+ vm_insert_page(vma, addr, page);
+ addr += PAGE_SIZE;
+ }
+ }
+ }
+
+ vma->vm_file = file;
+
+ entry->memdesc.useraddr = vma->vm_start;
+
+ trace_kgsl_mem_mmap(entry);
+ return 0;
+}
+
+static irqreturn_t kgsl_irq_handler(int irq, void *data)
+{
+ struct kgsl_device *device = data;
+
+ return device->ftbl->irq_handler(device);
+
+}
+
+static const struct file_operations kgsl_fops = {
+ .owner = THIS_MODULE,
+ .release = kgsl_release,
+ .open = kgsl_open,
+ .mmap = kgsl_mmap,
+ .get_unmapped_area = kgsl_get_unmapped_area,
+ .unlocked_ioctl = kgsl_ioctl,
+};
+
+struct kgsl_driver kgsl_driver = {
+ .process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex),
+ .ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock),
+ .devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock),
+ .memfree_hist_mutex =
+ __MUTEX_INITIALIZER(kgsl_driver.memfree_hist_mutex),
+ /*
+ * Full cache flushes are faster than line by line on at least
+ * 8064 and 8974 once the region to be flushed is > 16mb.
+ */
+ .full_cache_threshold = SZ_16M,
+};
+EXPORT_SYMBOL(kgsl_driver);
+
+static void _unregister_device(struct kgsl_device *device)
+{
+ int minor;
+
+ mutex_lock(&kgsl_driver.devlock);
+ for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
+ if (device == kgsl_driver.devp[minor])
+ break;
+ }
+ if (minor != KGSL_DEVICE_MAX) {
+ device_destroy(kgsl_driver.class,
+ MKDEV(MAJOR(kgsl_driver.major), minor));
+ kgsl_driver.devp[minor] = NULL;
+ }
+ mutex_unlock(&kgsl_driver.devlock);
+}
+
+static int _register_device(struct kgsl_device *device)
+{
+ int minor, ret;
+ dev_t dev;
+
+ /* Find a minor for the device */
+
+ mutex_lock(&kgsl_driver.devlock);
+ for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
+ if (kgsl_driver.devp[minor] == NULL) {
+ kgsl_driver.devp[minor] = device;
+ break;
+ }
+ }
+ mutex_unlock(&kgsl_driver.devlock);
+
+ if (minor == KGSL_DEVICE_MAX) {
+ KGSL_CORE_ERR("minor devices exhausted\n");
+ return -ENODEV;
+ }
+
+ /* Create the device */
+ dev = MKDEV(MAJOR(kgsl_driver.major), minor);
+ device->dev = device_create(kgsl_driver.class,
+ device->parentdev,
+ dev, device,
+ device->name);
+
+ if (IS_ERR(device->dev)) {
+ mutex_lock(&kgsl_driver.devlock);
+ kgsl_driver.devp[minor] = NULL;
+ mutex_unlock(&kgsl_driver.devlock);
+ ret = PTR_ERR(device->dev);
+ KGSL_CORE_ERR("device_create(%s): %d\n", device->name, ret);
+ return ret;
+ }
+
+ dev_set_drvdata(device->parentdev, device);
+ return 0;
+}
+
+int kgsl_device_platform_probe(struct kgsl_device *device)
+{
+ int result;
+ int status = -EINVAL;
+ struct resource *res;
+ struct platform_device *pdev =
+ container_of(device->parentdev, struct platform_device, dev);
+
+ status = _register_device(device);
+ if (status)
+ return status;
+
+ /* Initialize logging first, so that failures below actually print. */
+ kgsl_device_debugfs_init(device);
+
+ status = kgsl_pwrctrl_init(device);
+ if (status)
+ goto error;
+
+ /* Get starting physical address of device registers */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ device->iomemname);
+ if (res == NULL) {
+ KGSL_DRV_ERR(device, "platform_get_resource_byname failed\n");
+ status = -EINVAL;
+ goto error_pwrctrl_close;
+ }
+ if (res->start == 0 || resource_size(res) == 0) {
+ KGSL_DRV_ERR(device, "dev %d invalid register region\n",
+ device->id);
+ status = -EINVAL;
+ goto error_pwrctrl_close;
+ }
+
+ device->reg_phys = res->start;
+ device->reg_len = resource_size(res);
+
+ /*
+ * Check if a shadermemname is defined, and then get shader memory
+ * details including shader memory starting physical address
+ * and shader memory length
+ */
+ if (device->shadermemname != NULL) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ device->shadermemname);
+
+ if (res == NULL) {
+ KGSL_DRV_ERR(device,
+ "Shader memory: platform_get_resource_byname failed\n");
+ }
+
+ else {
+ device->shader_mem_phys = res->start;
+ device->shader_mem_len = resource_size(res);
+ }
+
+ if (!devm_request_mem_region(device->dev,
+ device->shader_mem_phys,
+ device->shader_mem_len,
+ device->name)) {
+ KGSL_DRV_ERR(device, "request_mem_region_failed\n");
+ }
+ }
+
+ if (!devm_request_mem_region(device->dev, device->reg_phys,
+ device->reg_len, device->name)) {
+ KGSL_DRV_ERR(device, "request_mem_region failed\n");
+ status = -ENODEV;
+ goto error_pwrctrl_close;
+ }
+
+ device->reg_virt = devm_ioremap(device->dev, device->reg_phys,
+ device->reg_len);
+
+ if (device->reg_virt == NULL) {
+ KGSL_DRV_ERR(device, "ioremap failed\n");
+ status = -ENODEV;
+ goto error_pwrctrl_close;
+ }
+ /*acquire interrupt */
+ device->pwrctrl.interrupt_num =
+ platform_get_irq_byname(pdev, device->pwrctrl.irq_name);
+
+ if (device->pwrctrl.interrupt_num <= 0) {
+ KGSL_DRV_ERR(device, "platform_get_irq_byname failed: %d\n",
+ device->pwrctrl.interrupt_num);
+ status = -EINVAL;
+ goto error_pwrctrl_close;
+ }
+
+ status = devm_request_irq(device->dev, device->pwrctrl.interrupt_num,
+ kgsl_irq_handler, IRQF_TRIGGER_HIGH,
+ device->name, device);
+ if (status) {
+ KGSL_DRV_ERR(device, "request_irq(%d) failed: %d\n",
+ device->pwrctrl.interrupt_num, status);
+ goto error_pwrctrl_close;
+ }
+ disable_irq(device->pwrctrl.interrupt_num);
+
+ KGSL_DRV_INFO(device,
+ "dev_id %d regs phys 0x%08lx size 0x%08x virt %p\n",
+ device->id, device->reg_phys, device->reg_len,
+ device->reg_virt);
+
+ rwlock_init(&device->context_lock);
+
+ result = kgsl_drm_init(pdev);
+ if (result)
+ goto error_pwrctrl_close;
+
+
+ setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
+ status = kgsl_create_device_workqueue(device);
+ if (status)
+ goto error_pwrctrl_close;
+
+ status = kgsl_mmu_init(device);
+ if (status != 0) {
+ KGSL_DRV_ERR(device, "kgsl_mmu_init failed %d\n", status);
+ goto error_dest_work_q;
+ }
+
+ status = kgsl_allocate_contiguous(&device->memstore,
+ KGSL_MEMSTORE_SIZE);
+
+ if (status != 0) {
+ KGSL_DRV_ERR(device, "kgsl_allocate_contiguous failed %d\n",
+ status);
+ goto error_close_mmu;
+ }
+
+ pm_qos_add_request(&device->pwrctrl.pm_qos_req_dma,
+ PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
+ /* Initalize the snapshot engine */
+ kgsl_device_snapshot_init(device);
+
+ /* Initialize common sysfs entries */
+ kgsl_pwrctrl_init_sysfs(device);
+
+ return 0;
+
+error_close_mmu:
+ kgsl_mmu_close(device);
+error_dest_work_q:
+ destroy_workqueue(device->work_queue);
+ device->work_queue = NULL;
+error_pwrctrl_close:
+ kgsl_pwrctrl_close(device);
+error:
+ _unregister_device(device);
+ return status;
+}
+EXPORT_SYMBOL(kgsl_device_platform_probe);
+
+int kgsl_postmortem_dump(struct kgsl_device *device, int manual)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+ BUG_ON(device == NULL);
+
+ kgsl_cffdump_hang(device);
+
+ /* For a manual dump, make sure that the system is idle */
+
+ if (manual) {
+ kgsl_active_count_wait(device, 0);
+
+ if (device->state == KGSL_STATE_ACTIVE)
+ kgsl_idle(device);
+
+ }
+
+ if (device->pm_dump_enable) {
+
+ KGSL_LOG_DUMP(device,
+ "POWER: START_STOP_SLEEP_WAKE = %d\n",
+ pwr->strtstp_sleepwake);
+
+ KGSL_LOG_DUMP(device,
+ "POWER: FLAGS = %08lX | ACTIVE POWERLEVEL = %08X",
+ pwr->power_flags, pwr->active_pwrlevel);
+
+ KGSL_LOG_DUMP(device, "POWER: INTERVAL TIMEOUT = %08X ",
+ pwr->interval_timeout);
+
+ }
+
+ /* Disable the idle timer so we don't get interrupted */
+ del_timer_sync(&device->idle_timer);
+
+ /* Force on the clocks */
+ kgsl_pwrctrl_wake(device);
+
+ /* Disable the irq */
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+
+ /*Call the device specific postmortem dump function*/
+ device->ftbl->postmortem_dump(device, manual);
+
+ /* On a manual trigger, turn on the interrupts and put
+ the clocks to sleep. They will recover themselves
+ on the next event. For a hang, leave things as they
+ are until fault tolerance kicks in. */
+
+ if (manual) {
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
+
+ /* try to go into a sleep mode until the next event */
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
+ kgsl_pwrctrl_sleep(device);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_postmortem_dump);
+
+void kgsl_device_platform_remove(struct kgsl_device *device)
+{
+ kgsl_device_snapshot_close(device);
+
+ kgsl_pwrctrl_uninit_sysfs(device);
+
+ pm_qos_remove_request(&device->pwrctrl.pm_qos_req_dma);
+
+ idr_destroy(&device->context_idr);
+
+ kgsl_sharedmem_free(&device->memstore);
+
+ kgsl_mmu_close(device);
+
+ if (device->work_queue) {
+ destroy_workqueue(device->work_queue);
+ device->work_queue = NULL;
+ }
+ kgsl_pwrctrl_close(device);
+
+ _unregister_device(device);
+}
+EXPORT_SYMBOL(kgsl_device_platform_remove);
+
+static int __devinit
+kgsl_ptdata_init(void)
+{
+ kgsl_driver.ptpool = kgsl_mmu_ptpool_init(kgsl_pagetable_count);
+
+ if (!kgsl_driver.ptpool)
+ return -ENOMEM;
+ return 0;
+}
+
+static void kgsl_core_exit(void)
+{
+ kgsl_mmu_ptpool_destroy(kgsl_driver.ptpool);
+ kgsl_driver.ptpool = NULL;
+
+ kgsl_drm_exit();
+ kgsl_cffdump_destroy();
+ kgsl_core_debugfs_close();
+
+ /*
+ * We call kgsl_sharedmem_uninit_sysfs() and device_unregister()
+ * only if kgsl_driver.virtdev has been populated.
+ * We check at least one member of kgsl_driver.virtdev to
+ * see if it is not NULL (and thus, has been populated).
+ */
+ if (kgsl_driver.virtdev.class) {
+ kgsl_sharedmem_uninit_sysfs();
+ device_unregister(&kgsl_driver.virtdev);
+ }
+
+ if (kgsl_driver.class) {
+ class_destroy(kgsl_driver.class);
+ kgsl_driver.class = NULL;
+ }
+
+ kgsl_memfree_hist_exit();
+ unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
+}
+
+static int __init kgsl_core_init(void)
+{
+ int result = 0;
+ /* alloc major and minor device numbers */
+ result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX,
+ KGSL_NAME);
+ if (result < 0) {
+ KGSL_CORE_ERR("alloc_chrdev_region failed err = %d\n", result);
+ goto err;
+ }
+
+ cdev_init(&kgsl_driver.cdev, &kgsl_fops);
+ kgsl_driver.cdev.owner = THIS_MODULE;
+ kgsl_driver.cdev.ops = &kgsl_fops;
+ result = cdev_add(&kgsl_driver.cdev, MKDEV(MAJOR(kgsl_driver.major), 0),
+ KGSL_DEVICE_MAX);
+
+ if (result) {
+ KGSL_CORE_ERR("kgsl: cdev_add() failed, dev_num= %d,"
+ " result= %d\n", kgsl_driver.major, result);
+ goto err;
+ }
+
+ kgsl_driver.class = class_create(THIS_MODULE, KGSL_NAME);
+
+ if (IS_ERR(kgsl_driver.class)) {
+ result = PTR_ERR(kgsl_driver.class);
+ KGSL_CORE_ERR("failed to create class %s", KGSL_NAME);
+ goto err;
+ }
+
+ /* Make a virtual device for managing core related things
+ in sysfs */
+ kgsl_driver.virtdev.class = kgsl_driver.class;
+ dev_set_name(&kgsl_driver.virtdev, "kgsl");
+ result = device_register(&kgsl_driver.virtdev);
+ if (result) {
+ KGSL_CORE_ERR("driver_register failed\n");
+ goto err;
+ }
+
+ /* Make kobjects in the virtual device for storing statistics */
+
+ kgsl_driver.ptkobj =
+ kobject_create_and_add("pagetables",
+ &kgsl_driver.virtdev.kobj);
+
+ kgsl_driver.prockobj =
+ kobject_create_and_add("proc",
+ &kgsl_driver.virtdev.kobj);
+
+ kgsl_core_debugfs_init();
+
+ kgsl_sharedmem_init_sysfs();
+ kgsl_cffdump_init();
+
+ INIT_LIST_HEAD(&kgsl_driver.process_list);
+
+ INIT_LIST_HEAD(&kgsl_driver.pagetable_list);
+
+ kgsl_mmu_set_mmutype(ksgl_mmu_type);
+
+ if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype()) {
+ result = kgsl_ptdata_init();
+ if (result)
+ goto err;
+ }
+
+ if (kgsl_memfree_hist_init())
+ KGSL_CORE_ERR("failed to init memfree_hist");
+
+ return 0;
+
+err:
+ kgsl_core_exit();
+ return result;
+}
+
+module_init(kgsl_core_init);
+module_exit(kgsl_core_exit);
+
+MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
+MODULE_DESCRIPTION("MSM GPU driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/msm2/kgsl.h b/drivers/gpu/msm2/kgsl.h
new file mode 100644
index 0000000..651d597
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl.h
@@ -0,0 +1,358 @@
+/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_H
+#define __KGSL_H
+
+#include <linux/types.h>
+#include <linux/msm_kgsl.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/cdev.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mm.h>
+
+#include <mach/kgsl.h>
+
+#define KGSL_NAME "kgsl"
+
+/* The number of memstore arrays limits the number of contexts allowed.
+ * If more contexts are needed, update multiple for MEMSTORE_SIZE
+ */
+#define KGSL_MEMSTORE_SIZE ((int)(PAGE_SIZE * 2))
+#define KGSL_MEMSTORE_GLOBAL (0)
+#define KGSL_MEMSTORE_MAX (KGSL_MEMSTORE_SIZE / \
+ sizeof(struct kgsl_devmemstore) - 1)
+
+/* Timestamp window used to detect rollovers (half of integer range) */
+#define KGSL_TIMESTAMP_WINDOW 0x80000000
+
+/*cache coherency ops */
+#define DRM_KGSL_GEM_CACHE_OP_TO_DEV 0x0001
+#define DRM_KGSL_GEM_CACHE_OP_FROM_DEV 0x0002
+
+/* The size of each entry in a page table */
+#define KGSL_PAGETABLE_ENTRY_SIZE 4
+
+/* Pagetable Virtual Address base */
+#ifndef CONFIG_MSM_KGSL_CFF_DUMP
+#define KGSL_PAGETABLE_BASE 0x10000000
+#else
+#define KGSL_PAGETABLE_BASE SZ_4M
+#endif
+
+/* Extra accounting entries needed in the pagetable */
+#define KGSL_PT_EXTRA_ENTRIES 16
+
+#define KGSL_PAGETABLE_ENTRIES(_sz) (((_sz) >> PAGE_SHIFT) + \
+ KGSL_PT_EXTRA_ENTRIES)
+
+#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
+#define KGSL_PAGETABLE_COUNT (CONFIG_MSM_KGSL_PAGE_TABLE_COUNT)
+#else
+#define KGSL_PAGETABLE_COUNT 1
+#endif
+
+/* Casting using container_of() for structures that kgsl owns. */
+#define KGSL_CONTAINER_OF(ptr, type, member) \
+ container_of(ptr, type, member)
+
+/* A macro for memory statistics - add the new size to the stat and if
+ the statisic is greater then _max, set _max
+*/
+
+#define KGSL_STATS_ADD(_size, _stat, _max) \
+ do { _stat += (_size); if (_stat > _max) _max = _stat; } while (0)
+
+
+#define KGSL_MEMFREE_HIST_SIZE ((int)(PAGE_SIZE * 2))
+
+struct kgsl_memfree_hist_elem {
+ unsigned int pid;
+ unsigned int gpuaddr;
+ unsigned int size;
+ unsigned int flags;
+};
+
+struct kgsl_memfree_hist {
+ void *base_hist_rb;
+ unsigned int size;
+ struct kgsl_memfree_hist_elem *wptr;
+};
+
+
+struct kgsl_device;
+struct kgsl_context;
+
+struct kgsl_driver {
+ struct cdev cdev;
+ dev_t major;
+ struct class *class;
+ /* Virtual device for managing the core */
+ struct device virtdev;
+ /* Kobjects for storing pagetable and process statistics */
+ struct kobject *ptkobj;
+ struct kobject *prockobj;
+ struct kgsl_device *devp[KGSL_DEVICE_MAX];
+
+ /* Global lilst of open processes */
+ struct list_head process_list;
+ /* Global list of pagetables */
+ struct list_head pagetable_list;
+ /* Spinlock for accessing the pagetable list */
+ spinlock_t ptlock;
+ /* Mutex for accessing the process list */
+ struct mutex process_mutex;
+
+ /* Mutex for protecting the device list */
+ struct mutex devlock;
+
+ void *ptpool;
+
+ struct mutex memfree_hist_mutex;
+ struct kgsl_memfree_hist memfree_hist;
+
+ struct {
+ unsigned int vmalloc;
+ unsigned int vmalloc_max;
+ unsigned int page_alloc;
+ unsigned int page_alloc_max;
+ unsigned int coherent;
+ unsigned int coherent_max;
+ unsigned int mapped;
+ unsigned int mapped_max;
+ unsigned int histogram[16];
+ } stats;
+ unsigned int full_cache_threshold;
+};
+
+extern struct kgsl_driver kgsl_driver;
+
+struct kgsl_pagetable;
+struct kgsl_memdesc;
+struct kgsl_cmdbatch;
+
+struct kgsl_memdesc_ops {
+ int (*vmflags)(struct kgsl_memdesc *);
+ int (*vmfault)(struct kgsl_memdesc *, struct vm_area_struct *,
+ struct vm_fault *);
+ void (*free)(struct kgsl_memdesc *memdesc);
+ int (*map_kernel)(struct kgsl_memdesc *);
+ void (*unmap_kernel)(struct kgsl_memdesc *);
+};
+
+/* Internal definitions for memdesc->priv */
+#define KGSL_MEMDESC_GUARD_PAGE BIT(0)
+/* Set if the memdesc is mapped into all pagetables */
+#define KGSL_MEMDESC_GLOBAL BIT(1)
+/* The memdesc is frozen during a snapshot */
+#define KGSL_MEMDESC_FROZEN BIT(2)
+/* The memdesc is mapped into a pagetable */
+#define KGSL_MEMDESC_MAPPED BIT(3)
+
+/* shared memory allocation */
+struct kgsl_memdesc {
+ struct kgsl_pagetable *pagetable;
+ void *hostptr; /* kernel virtual address */
+ unsigned int hostptr_count; /* number of threads using hostptr */
+ unsigned long useraddr; /* userspace address */
+ unsigned int gpuaddr;
+ phys_addr_t physaddr;
+ unsigned int size;
+ unsigned int priv; /* Internal flags and settings */
+ struct scatterlist *sg;
+ unsigned int sglen; /* Active entries in the sglist */
+ unsigned int sglen_alloc; /* Allocated entries in the sglist */
+ struct kgsl_memdesc_ops *ops;
+ unsigned int flags; /* Flags set from userspace */
+};
+
+/* List of different memory entry types */
+
+#define KGSL_MEM_ENTRY_KERNEL 0
+#define KGSL_MEM_ENTRY_PMEM 1
+#define KGSL_MEM_ENTRY_ASHMEM 2
+#define KGSL_MEM_ENTRY_USER 3
+#define KGSL_MEM_ENTRY_ION 4
+#define KGSL_MEM_ENTRY_MAX 5
+
+struct kgsl_mem_entry {
+ struct kref refcount;
+ struct kgsl_memdesc memdesc;
+ int memtype;
+ void *priv_data;
+ struct rb_node node;
+ unsigned int id;
+ unsigned int context_id;
+ /* back pointer to private structure under whose context this
+ * allocation is made */
+ struct kgsl_process_private *priv;
+ /* Initialized to 0, set to 1 when entry is marked for freeing */
+ int pending_free;
+};
+
+#ifdef CONFIG_MSM_KGSL_MMU_PAGE_FAULT
+#define MMU_CONFIG 2
+#else
+#define MMU_CONFIG 1
+#endif
+
+void kgsl_mem_entry_destroy(struct kref *kref);
+int kgsl_postmortem_dump(struct kgsl_device *device, int manual);
+
+struct kgsl_mem_entry *kgsl_get_mem_entry(struct kgsl_device *device,
+ phys_addr_t ptbase, unsigned int gpuaddr, unsigned int size);
+
+struct kgsl_mem_entry *kgsl_sharedmem_find_region(
+ struct kgsl_process_private *private, unsigned int gpuaddr,
+ size_t size);
+
+void kgsl_get_memory_usage(char *str, size_t len, unsigned int memflags);
+
+void kgsl_signal_event(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int timestamp,
+ unsigned int type);
+
+void kgsl_signal_events(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int type);
+
+void kgsl_cancel_events(struct kgsl_device *device,
+ void *owner);
+
+extern const struct dev_pm_ops kgsl_pm_ops;
+
+int kgsl_suspend_driver(struct platform_device *pdev, pm_message_t state);
+int kgsl_resume_driver(struct platform_device *pdev);
+
+void kgsl_trace_regwrite(struct kgsl_device *device, unsigned int offset,
+ unsigned int value);
+
+void kgsl_trace_issueibcmds(struct kgsl_device *device, int id,
+ struct kgsl_cmdbatch *cmdbatch,
+ unsigned int timestamp, unsigned int flags,
+ int result, unsigned int type);
+
+int kgsl_open_device(struct kgsl_device *device);
+
+int kgsl_close_device(struct kgsl_device *device);
+
+#ifdef CONFIG_MSM_KGSL_DRM
+extern int kgsl_drm_init(struct platform_device *dev);
+extern void kgsl_drm_exit(void);
+#else
+static inline int kgsl_drm_init(struct platform_device *dev)
+{
+ return 0;
+}
+
+static inline void kgsl_drm_exit(void)
+{
+}
+#endif
+
+static inline int kgsl_gpuaddr_in_memdesc(const struct kgsl_memdesc *memdesc,
+ unsigned int gpuaddr, unsigned int size)
+{
+ /* set a minimum size to search for */
+ if (!size)
+ size = 1;
+
+ /* don't overflow */
+ if ((gpuaddr + size) < gpuaddr)
+ return 0;
+
+ if (gpuaddr >= memdesc->gpuaddr &&
+ ((gpuaddr + size) <= (memdesc->gpuaddr + memdesc->size))) {
+ return 1;
+ }
+ return 0;
+}
+
+static inline void *kgsl_memdesc_map(struct kgsl_memdesc *memdesc)
+{
+ if (memdesc->ops && memdesc->ops->map_kernel)
+ memdesc->ops->map_kernel(memdesc);
+
+ return memdesc->hostptr;
+}
+
+static inline void kgsl_memdesc_unmap(struct kgsl_memdesc *memdesc)
+{
+ if (memdesc->ops && memdesc->ops->unmap_kernel)
+ memdesc->ops->unmap_kernel(memdesc);
+}
+
+static inline uint8_t *kgsl_gpuaddr_to_vaddr(struct kgsl_memdesc *memdesc,
+ unsigned int gpuaddr)
+{
+ void *hostptr = NULL;
+
+ if ((gpuaddr >= memdesc->gpuaddr) &&
+ (gpuaddr < (memdesc->gpuaddr + memdesc->size)))
+ hostptr = kgsl_memdesc_map(memdesc);
+
+ return hostptr != NULL ? hostptr + (gpuaddr - memdesc->gpuaddr) : NULL;
+}
+
+static inline int timestamp_cmp(unsigned int a, unsigned int b)
+{
+ /* check for equal */
+ if (a == b)
+ return 0;
+
+ /* check for greater-than for non-rollover case */
+ if ((a > b) && (a - b < KGSL_TIMESTAMP_WINDOW))
+ return 1;
+
+ /* check for greater-than for rollover case
+ * note that <= is required to ensure that consistent
+ * results are returned for values whose difference is
+ * equal to the window size
+ */
+ a += KGSL_TIMESTAMP_WINDOW;
+ b += KGSL_TIMESTAMP_WINDOW;
+ return ((a > b) && (a - b <= KGSL_TIMESTAMP_WINDOW)) ? 1 : -1;
+}
+
+static inline int
+kgsl_mem_entry_get(struct kgsl_mem_entry *entry)
+{
+ return kref_get_unless_zero(&entry->refcount);
+}
+
+static inline void
+kgsl_mem_entry_put(struct kgsl_mem_entry *entry)
+{
+ kref_put(&entry->refcount, kgsl_mem_entry_destroy);
+}
+
+/*
+ * kgsl_addr_range_overlap() - Checks if 2 ranges overlap
+ * @gpuaddr1: Start of first address range
+ * @size1: Size of first address range
+ * @gpuaddr2: Start of second address range
+ * @size2: Size of second address range
+ *
+ * Function returns true if the 2 given address ranges overlap
+ * else false
+ */
+static inline bool kgsl_addr_range_overlap(unsigned int gpuaddr1,
+ unsigned int size1,
+ unsigned int gpuaddr2, unsigned int size2)
+{
+ return !(((gpuaddr1 + size1) < gpuaddr2) ||
+ (gpuaddr1 > (gpuaddr2 + size2)));
+}
+
+#endif /* __KGSL_H */
diff --git a/drivers/gpu/msm2/kgsl_cffdump.c b/drivers/gpu/msm2/kgsl_cffdump.c
new file mode 100644
index 0000000..ca2d1ee
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_cffdump.c
@@ -0,0 +1,649 @@
+/* Copyright (c) 2010-2012,2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+#define ALIGN_CPU
+
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#include <linux/relay.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/sched.h>
+#include <mach/socinfo.h>
+
+#include "kgsl.h"
+#include "kgsl_cffdump.h"
+#include "kgsl_debugfs.h"
+#include "kgsl_log.h"
+#include "kgsl_sharedmem.h"
+#include "adreno_pm4types.h"
+#include "adreno.h"
+
+static struct rchan *chan;
+static struct dentry *dir;
+static int suspended;
+static size_t dropped;
+static size_t subbuf_size = 256*1024;
+static size_t n_subbufs = 64;
+
+/* forward declarations */
+static void destroy_channel(void);
+static struct rchan *create_channel(unsigned subbuf_size, unsigned n_subbufs);
+
+static spinlock_t cffdump_lock;
+static ulong serial_nr;
+static ulong total_bytes;
+static ulong total_syncmem;
+static long last_sec;
+
+#define MEMBUF_SIZE 64
+
+#define CFF_OP_WRITE_REG 0x00000002
+struct cff_op_write_reg {
+ unsigned char op;
+ uint addr;
+ uint value;
+} __packed;
+
+#define CFF_OP_POLL_REG 0x00000004
+struct cff_op_poll_reg {
+ unsigned char op;
+ uint addr;
+ uint value;
+ uint mask;
+} __packed;
+
+#define CFF_OP_WAIT_IRQ 0x00000005
+struct cff_op_wait_irq {
+ unsigned char op;
+} __packed;
+
+#define CFF_OP_RMW 0x0000000a
+
+#define CFF_OP_WRITE_MEM 0x0000000b
+struct cff_op_write_mem {
+ unsigned char op;
+ uint addr;
+ uint value;
+} __packed;
+
+#define CFF_OP_WRITE_MEMBUF 0x0000000c
+struct cff_op_write_membuf {
+ unsigned char op;
+ uint addr;
+ ushort count;
+ uint buffer[MEMBUF_SIZE];
+} __packed;
+
+#define CFF_OP_MEMORY_BASE 0x0000000d
+struct cff_op_memory_base {
+ unsigned char op;
+ uint base;
+ uint size;
+ uint gmemsize;
+} __packed;
+
+#define CFF_OP_HANG 0x0000000e
+struct cff_op_hang {
+ unsigned char op;
+} __packed;
+
+#define CFF_OP_EOF 0xffffffff
+struct cff_op_eof {
+ unsigned char op;
+} __packed;
+
+#define CFF_OP_VERIFY_MEM_FILE 0x00000007
+#define CFF_OP_WRITE_SURFACE_PARAMS 0x00000011
+struct cff_op_user_event {
+ unsigned char op;
+ unsigned int op1;
+ unsigned int op2;
+ unsigned int op3;
+ unsigned int op4;
+ unsigned int op5;
+} __packed;
+
+
+static void b64_encodeblock(unsigned char in[3], unsigned char out[4], int len)
+{
+ static const char tob64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmno"
+ "pqrstuvwxyz0123456789+/";
+
+ out[0] = tob64[in[0] >> 2];
+ out[1] = tob64[((in[0] & 0x03) << 4) | ((in[1] & 0xf0) >> 4)];
+ out[2] = (unsigned char) (len > 1 ? tob64[((in[1] & 0x0f) << 2)
+ | ((in[2] & 0xc0) >> 6)] : '=');
+ out[3] = (unsigned char) (len > 2 ? tob64[in[2] & 0x3f] : '=');
+}
+
+static void b64_encode(const unsigned char *in_buf, int in_size,
+ unsigned char *out_buf, int out_bufsize, int *out_size)
+{
+ unsigned char in[3], out[4];
+ int i, len;
+
+ *out_size = 0;
+ while (in_size > 0) {
+ len = 0;
+ for (i = 0; i < 3; ++i) {
+ if (in_size-- > 0) {
+ in[i] = *in_buf++;
+ ++len;
+ } else
+ in[i] = 0;
+ }
+ if (len) {
+ b64_encodeblock(in, out, len);
+ if (out_bufsize < 4) {
+ pr_warn("kgsl: cffdump: %s: out of buffer\n",
+ __func__);
+ return;
+ }
+ for (i = 0; i < 4; ++i)
+ *out_buf++ = out[i];
+ *out_size += 4;
+ out_bufsize -= 4;
+ }
+ }
+}
+
+#define KLOG_TMPBUF_SIZE (1024)
+static void klog_printk(const char *fmt, ...)
+{
+ /* per-cpu klog formatting temporary buffer */
+ static char klog_buf[NR_CPUS][KLOG_TMPBUF_SIZE];
+
+ va_list args;
+ int len;
+ char *cbuf;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ cbuf = klog_buf[smp_processor_id()];
+ va_start(args, fmt);
+ len = vsnprintf(cbuf, KLOG_TMPBUF_SIZE, fmt, args);
+ total_bytes += len;
+ va_end(args);
+ relay_write(chan, cbuf, len);
+ local_irq_restore(flags);
+}
+
+static struct cff_op_write_membuf cff_op_write_membuf;
+static void cffdump_membuf(int id, unsigned char *out_buf, int out_bufsize)
+{
+ void *data;
+ int len, out_size;
+ struct cff_op_write_mem cff_op_write_mem;
+
+ uint addr = cff_op_write_membuf.addr
+ - sizeof(uint)*cff_op_write_membuf.count;
+
+ if (!cff_op_write_membuf.count) {
+ pr_warn("kgsl: cffdump: membuf: count == 0, skipping");
+ return;
+ }
+
+ if (cff_op_write_membuf.count != 1) {
+ cff_op_write_membuf.op = CFF_OP_WRITE_MEMBUF;
+ cff_op_write_membuf.addr = addr;
+ len = sizeof(cff_op_write_membuf) -
+ sizeof(uint)*(MEMBUF_SIZE - cff_op_write_membuf.count);
+ data = &cff_op_write_membuf;
+ } else {
+ cff_op_write_mem.op = CFF_OP_WRITE_MEM;
+ cff_op_write_mem.addr = addr;
+ cff_op_write_mem.value = cff_op_write_membuf.buffer[0];
+ data = &cff_op_write_mem;
+ len = sizeof(cff_op_write_mem);
+ }
+ b64_encode(data, len, out_buf, out_bufsize, &out_size);
+ out_buf[out_size] = 0;
+ klog_printk("%ld:%d;%s\n", ++serial_nr, id, out_buf);
+ cff_op_write_membuf.count = 0;
+ cff_op_write_membuf.addr = 0;
+}
+
+static void cffdump_printline(int id, uint opcode, uint op1, uint op2,
+ uint op3, uint op4, uint op5)
+{
+ struct cff_op_write_reg cff_op_write_reg;
+ struct cff_op_poll_reg cff_op_poll_reg;
+ struct cff_op_wait_irq cff_op_wait_irq;
+ struct cff_op_memory_base cff_op_memory_base;
+ struct cff_op_hang cff_op_hang;
+ struct cff_op_eof cff_op_eof;
+ struct cff_op_user_event cff_op_user_event;
+ unsigned char out_buf[sizeof(cff_op_write_membuf)/3*4 + 16];
+ void *data;
+ int len = 0, out_size;
+ long cur_secs;
+
+ spin_lock(&cffdump_lock);
+ if (opcode == CFF_OP_WRITE_MEM) {
+ if ((cff_op_write_membuf.addr != op1 &&
+ cff_op_write_membuf.count)
+ || (cff_op_write_membuf.count == MEMBUF_SIZE))
+ cffdump_membuf(id, out_buf, sizeof(out_buf));
+
+ cff_op_write_membuf.buffer[cff_op_write_membuf.count++] = op2;
+ cff_op_write_membuf.addr = op1 + sizeof(uint);
+ spin_unlock(&cffdump_lock);
+ return;
+ } else if (cff_op_write_membuf.count)
+ cffdump_membuf(id, out_buf, sizeof(out_buf));
+ spin_unlock(&cffdump_lock);
+
+ switch (opcode) {
+ case CFF_OP_WRITE_REG:
+ cff_op_write_reg.op = opcode;
+ cff_op_write_reg.addr = op1;
+ cff_op_write_reg.value = op2;
+ data = &cff_op_write_reg;
+ len = sizeof(cff_op_write_reg);
+ break;
+
+ case CFF_OP_POLL_REG:
+ cff_op_poll_reg.op = opcode;
+ cff_op_poll_reg.addr = op1;
+ cff_op_poll_reg.value = op2;
+ cff_op_poll_reg.mask = op3;
+ data = &cff_op_poll_reg;
+ len = sizeof(cff_op_poll_reg);
+ break;
+
+ case CFF_OP_WAIT_IRQ:
+ cff_op_wait_irq.op = opcode;
+ data = &cff_op_wait_irq;
+ len = sizeof(cff_op_wait_irq);
+ break;
+
+ case CFF_OP_MEMORY_BASE:
+ cff_op_memory_base.op = opcode;
+ cff_op_memory_base.base = op1;
+ cff_op_memory_base.size = op2;
+ cff_op_memory_base.gmemsize = op3;
+ data = &cff_op_memory_base;
+ len = sizeof(cff_op_memory_base);
+ break;
+
+ case CFF_OP_HANG:
+ cff_op_hang.op = opcode;
+ data = &cff_op_hang;
+ len = sizeof(cff_op_hang);
+ break;
+
+ case CFF_OP_EOF:
+ cff_op_eof.op = opcode;
+ data = &cff_op_eof;
+ len = sizeof(cff_op_eof);
+ break;
+
+ case CFF_OP_WRITE_SURFACE_PARAMS:
+ case CFF_OP_VERIFY_MEM_FILE:
+ cff_op_user_event.op = opcode;
+ cff_op_user_event.op1 = op1;
+ cff_op_user_event.op2 = op2;
+ cff_op_user_event.op3 = op3;
+ cff_op_user_event.op4 = op4;
+ cff_op_user_event.op5 = op5;
+ data = &cff_op_user_event;
+ len = sizeof(cff_op_user_event);
+ break;
+ }
+
+ if (len) {
+ b64_encode(data, len, out_buf, sizeof(out_buf), &out_size);
+ out_buf[out_size] = 0;
+ klog_printk("%ld:%d;%s\n", ++serial_nr, id, out_buf);
+ } else
+ pr_warn("kgsl: cffdump: unhandled opcode: %d\n", opcode);
+
+ cur_secs = get_seconds();
+ if ((cur_secs - last_sec) > 10 || (last_sec - cur_secs) > 10) {
+ pr_info("kgsl: cffdump: total [bytes:%lu kB, syncmem:%lu kB], "
+ "seq#: %lu\n", total_bytes/1024, total_syncmem/1024,
+ serial_nr);
+ last_sec = cur_secs;
+ }
+}
+
+void kgsl_cffdump_init()
+{
+ struct dentry *debugfs_dir = kgsl_get_debugfs_dir();
+
+#ifdef ALIGN_CPU
+ cpumask_t mask;
+
+ cpumask_clear(&mask);
+ cpumask_set_cpu(0, &mask);
+ sched_setaffinity(0, &mask);
+#endif
+ if (!debugfs_dir || IS_ERR(debugfs_dir)) {
+ KGSL_CORE_ERR("Debugfs directory is bad\n");
+ return;
+ }
+
+ spin_lock_init(&cffdump_lock);
+
+ dir = debugfs_create_dir("cff", debugfs_dir);
+ if (!dir) {
+ KGSL_CORE_ERR("debugfs_create_dir failed\n");
+ return;
+ }
+
+ chan = create_channel(subbuf_size, n_subbufs);
+}
+
+void kgsl_cffdump_destroy()
+{
+ if (chan)
+ relay_flush(chan);
+ destroy_channel();
+ if (dir)
+ debugfs_remove(dir);
+}
+
+void kgsl_cffdump_open(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ if (!device->cff_dump_enable)
+ return;
+
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
+ kgsl_cffdump_memory_base(device,
+ KGSL_PAGETABLE_BASE,
+ KGSL_IOMMU_GLOBAL_MEM_BASE +
+ KGSL_IOMMU_GLOBAL_MEM_SIZE -
+ KGSL_PAGETABLE_BASE,
+ adreno_dev->gmem_size);
+ } else {
+ kgsl_cffdump_memory_base(device,
+ kgsl_mmu_get_base_addr(&device->mmu),
+ kgsl_mmu_get_ptsize(&device->mmu),
+ adreno_dev->gmem_size);
+ }
+}
+
+void kgsl_cffdump_memory_base(struct kgsl_device *device, unsigned int base,
+ unsigned int range, unsigned gmemsize)
+{
+ if (!device->cff_dump_enable)
+ return;
+ cffdump_printline(device->id, CFF_OP_MEMORY_BASE, base,
+ range, gmemsize, 0, 0);
+}
+
+void kgsl_cffdump_hang(struct kgsl_device *device)
+{
+ if (!device->cff_dump_enable)
+ return;
+ cffdump_printline(device->id, CFF_OP_HANG, 0, 0, 0, 0, 0);
+}
+
+void kgsl_cffdump_close(struct kgsl_device *device)
+{
+ if (!device->cff_dump_enable)
+ return;
+ cffdump_printline(device->id, CFF_OP_EOF, 0, 0, 0, 0, 0);
+}
+
+
+void kgsl_cffdump_user_event(struct kgsl_device *device,
+ unsigned int cff_opcode, unsigned int op1,
+ unsigned int op2, unsigned int op3,
+ unsigned int op4, unsigned int op5)
+{
+ if (!device->cff_dump_enable)
+ return;
+ cffdump_printline(-1, cff_opcode, op1, op2, op3, op4, op5);
+}
+
+void kgsl_cffdump_syncmem(struct kgsl_device *device,
+ struct kgsl_memdesc *memdesc, uint gpuaddr,
+ uint sizebytes, bool clean_cache)
+{
+ const void *src;
+
+ if (!device->cff_dump_enable)
+ return;
+
+ BUG_ON(memdesc == NULL);
+
+ total_syncmem += sizebytes;
+
+ src = (uint *)kgsl_gpuaddr_to_vaddr(memdesc, gpuaddr);
+ if (memdesc->hostptr == NULL) {
+ KGSL_CORE_ERR(
+ "no kernel map for gpuaddr: 0x%08x, m->host: 0x%p, phys: %pa\n",
+ gpuaddr, memdesc->hostptr, &memdesc->physaddr);
+ return;
+ }
+
+ if (clean_cache) {
+ /* Ensure that this memory region is not read from the
+ * cache but fetched fresh */
+
+ mb();
+
+ kgsl_cache_range_op((struct kgsl_memdesc *)memdesc,
+ KGSL_CACHE_OP_INV);
+ }
+
+ while (sizebytes > 3) {
+ cffdump_printline(-1, CFF_OP_WRITE_MEM, gpuaddr, *(uint *)src,
+ 0, 0, 0);
+ gpuaddr += 4;
+ src += 4;
+ sizebytes -= 4;
+ }
+ if (sizebytes > 0)
+ cffdump_printline(-1, CFF_OP_WRITE_MEM, gpuaddr, *(uint *)src,
+ 0, 0, 0);
+ /* Unmap memory since kgsl_gpuaddr_to_vaddr was called */
+ kgsl_memdesc_unmap(memdesc);
+}
+
+void kgsl_cffdump_setmem(struct kgsl_device *device,
+ uint addr, uint value, uint sizebytes)
+{
+ if (!device || !device->cff_dump_enable)
+ return;
+
+ while (sizebytes > 3) {
+ /* Use 32bit memory writes as long as there's at least
+ * 4 bytes left */
+ cffdump_printline(-1, CFF_OP_WRITE_MEM, addr, value,
+ 0, 0, 0);
+ addr += 4;
+ sizebytes -= 4;
+ }
+ if (sizebytes > 0)
+ cffdump_printline(-1, CFF_OP_WRITE_MEM, addr, value,
+ 0, 0, 0);
+}
+
+void kgsl_cffdump_regwrite(struct kgsl_device *device, uint addr,
+ uint value)
+{
+ if (!device->cff_dump_enable)
+ return;
+
+ cffdump_printline(device->id, CFF_OP_WRITE_REG, addr, value,
+ 0, 0, 0);
+}
+
+void kgsl_cffdump_regpoll(struct kgsl_device *device, uint addr,
+ uint value, uint mask)
+{
+ if (!device->cff_dump_enable)
+ return;
+
+ cffdump_printline(device->id, CFF_OP_POLL_REG, addr, value,
+ mask, 0, 0);
+}
+
+void kgsl_cffdump_slavewrite(struct kgsl_device *device, uint addr, uint value)
+{
+ if (!device->cff_dump_enable)
+ return;
+
+ cffdump_printline(-1, CFF_OP_WRITE_REG, addr, value, 0, 0, 0);
+}
+
+int kgsl_cffdump_waitirq(struct kgsl_device *device)
+{
+ if (!device->cff_dump_enable)
+ return 0;
+
+ cffdump_printline(-1, CFF_OP_WAIT_IRQ, 0, 0, 0, 0, 0);
+
+ return 1;
+}
+EXPORT_SYMBOL(kgsl_cffdump_waitirq);
+
+static int subbuf_start_handler(struct rchan_buf *buf,
+ void *subbuf, void *prev_subbuf, uint prev_padding)
+{
+ pr_debug("kgsl: cffdump: subbuf_start_handler(subbuf=%p, prev_subbuf"
+ "=%p, prev_padding=%08x)\n", subbuf, prev_subbuf, prev_padding);
+
+ if (relay_buf_full(buf)) {
+ if (!suspended) {
+ suspended = 1;
+ pr_warn("kgsl: cffdump: relay: cpu %d buffer full!!!\n",
+ smp_processor_id());
+ }
+ dropped++;
+ return 0;
+ } else if (suspended) {
+ suspended = 0;
+ pr_warn("kgsl: cffdump: relay: cpu %d buffer no longer full.\n",
+ smp_processor_id());
+ }
+
+ subbuf_start_reserve(buf, 0);
+ return 1;
+}
+
+static struct dentry *create_buf_file_handler(const char *filename,
+ struct dentry *parent, unsigned short mode, struct rchan_buf *buf,
+ int *is_global)
+{
+ return debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+}
+
+/*
+ * file_remove() default callback. Removes relay file in debugfs.
+ */
+static int remove_buf_file_handler(struct dentry *dentry)
+{
+ pr_info("kgsl: cffdump: %s()\n", __func__);
+ debugfs_remove(dentry);
+ return 0;
+}
+
+/*
+ * relay callbacks
+ */
+static struct rchan_callbacks relay_callbacks = {
+ .subbuf_start = subbuf_start_handler,
+ .create_buf_file = create_buf_file_handler,
+ .remove_buf_file = remove_buf_file_handler,
+};
+
+/**
+ * create_channel - creates channel /debug/klog/cpuXXX
+ *
+ * Creates channel along with associated produced/consumed control files
+ *
+ * Returns channel on success, NULL otherwise
+ */
+static struct rchan *create_channel(unsigned subbuf_size, unsigned n_subbufs)
+{
+ struct rchan *chan;
+
+ pr_info("kgsl: cffdump: relay: create_channel: subbuf_size %u, "
+ "n_subbufs %u, dir 0x%p\n", subbuf_size, n_subbufs, dir);
+
+ chan = relay_open("cpu", dir, subbuf_size,
+ n_subbufs, &relay_callbacks, NULL);
+ if (!chan) {
+ KGSL_CORE_ERR("relay_open failed\n");
+ return NULL;
+ }
+
+ suspended = 0;
+ dropped = 0;
+
+ return chan;
+}
+
+/**
+ * destroy_channel - destroys channel /debug/kgsl/cff/cpuXXX
+ *
+ * Destroys channel along with associated produced/consumed control files
+ */
+static void destroy_channel(void)
+{
+ pr_info("kgsl: cffdump: relay: destroy_channel\n");
+ if (chan) {
+ relay_close(chan);
+ chan = NULL;
+ }
+}
+
+int kgsl_cff_dump_enable_set(void *data, u64 val)
+{
+ int ret = 0;
+ struct kgsl_device *device = (struct kgsl_device *)data;
+ int i;
+
+ mutex_lock(&kgsl_driver.devlock);
+ if (val) {
+ /* Check if CFF is on for some other device already */
+ for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+ if (kgsl_driver.devp[i]) {
+ struct kgsl_device *device_temp =
+ kgsl_driver.devp[i];
+ if (device_temp->cff_dump_enable &&
+ device != device_temp) {
+ KGSL_CORE_ERR(
+ "CFF is on for another device %d\n",
+ device_temp->id);
+ ret = -EINVAL;
+ goto done;
+ }
+ }
+ }
+ if (!device->cff_dump_enable) {
+ device->cff_dump_enable = 1;
+ }
+ } else if (device->cff_dump_enable && !val) {
+ device->cff_dump_enable = 0;
+ }
+done:
+ mutex_unlock(&kgsl_driver.devlock);
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_cff_dump_enable_set);
+
+int kgsl_cff_dump_enable_get(void *data, u64 *val)
+{
+ struct kgsl_device *device = (struct kgsl_device *)data;
+ *val = device->cff_dump_enable;
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_cff_dump_enable_get);
diff --git a/drivers/gpu/msm2/kgsl_cffdump.h b/drivers/gpu/msm2/kgsl_cffdump.h
new file mode 100644
index 0000000..2852e0f
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_cffdump.h
@@ -0,0 +1,139 @@
+/* Copyright (c) 2010-2011,2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __KGSL_CFFDUMP_H
+#define __KGSL_CFFDUMP_H
+
+#include <linux/types.h>
+
+extern unsigned int kgsl_cff_dump_enable;
+
+static inline bool kgsl_cffdump_flags_no_memzero(void) { return true; }
+
+struct kgsl_device_private;
+
+#ifdef CONFIG_MSM_KGSL_CFF_DUMP
+
+void kgsl_cffdump_init(void);
+void kgsl_cffdump_destroy(void);
+void kgsl_cffdump_open(struct kgsl_device *device);
+void kgsl_cffdump_close(struct kgsl_device *device);
+void kgsl_cffdump_syncmem(struct kgsl_device *,
+ struct kgsl_memdesc *memdesc, uint physaddr, uint sizebytes,
+ bool clean_cache);
+void kgsl_cffdump_setmem(struct kgsl_device *device, uint addr,
+ uint value, uint sizebytes);
+void kgsl_cffdump_regwrite(struct kgsl_device *device, uint addr,
+ uint value);
+void kgsl_cffdump_regpoll(struct kgsl_device *device, uint addr,
+ uint value, uint mask);
+bool kgsl_cffdump_parse_ibs(struct kgsl_device_private *dev_priv,
+ const struct kgsl_memdesc *memdesc, uint gpuaddr, int sizedwords,
+ bool check_only);
+void kgsl_cffdump_user_event(struct kgsl_device *device,
+ unsigned int cff_opcode, unsigned int op1,
+ unsigned int op2, unsigned int op3,
+ unsigned int op4, unsigned int op5);
+
+void kgsl_cffdump_memory_base(struct kgsl_device *device, unsigned int base,
+ unsigned int range, unsigned int gmemsize);
+
+void kgsl_cffdump_hang(struct kgsl_device *device);
+int kgsl_cff_dump_enable_set(void *data, u64 val);
+int kgsl_cff_dump_enable_get(void *data, u64 *val);
+
+#else
+
+static inline void kgsl_cffdump_init(void)
+{
+ return;
+}
+
+static inline void kgsl_cffdump_destroy(void)
+{
+ return;
+}
+
+static inline void kgsl_cffdump_open(struct kgsl_device *device)
+{
+ return;
+}
+
+static inline void kgsl_cffdump_close(struct kgsl_device *device)
+{
+ return;
+}
+
+static inline void kgsl_cffdump_syncmem(struct kgsl_device *device,
+ struct kgsl_memdesc *memdesc, uint physaddr, uint sizebytes,
+ bool clean_cache)
+{
+ return;
+}
+
+static inline void kgsl_cffdump_setmem(struct kgsl_device *device, uint addr,
+ uint value, uint sizebytes)
+{
+ return;
+}
+
+static inline void kgsl_cffdump_regwrite(struct kgsl_device *device, uint addr,
+ uint value)
+{
+ return;
+}
+
+static inline void kgsl_cffdump_regpoll(struct kgsl_device *device, uint addr,
+ uint value, uint mask)
+{
+ return;
+}
+
+static inline bool kgsl_cffdump_parse_ibs(struct kgsl_device_private *dev_priv,
+ const struct kgsl_memdesc *memdesc, uint gpuaddr, int sizedwords,
+ bool check_only)
+{
+ return false;
+}
+
+static inline void kgsl_cffdump_memory_base(struct kgsl_device *device,
+ unsigned int base, unsigned int range, unsigned int gmemsize)
+{
+ return;
+}
+
+static inline void kgsl_cffdump_hang(struct kgsl_device *device)
+{
+ return;
+}
+
+static inline void kgsl_cffdump_user_event(struct kgsl_device *device,
+ unsigned int cff_opcode, unsigned int op1,
+ unsigned int op2, unsigned int op3,
+ unsigned int op4, unsigned int op5)
+{
+ return;
+}
+static inline int kgsl_cff_dump_enable_set(void *data, u64 val)
+{
+ return -EINVAL;
+}
+
+static inline int kgsl_cff_dump_enable_get(void *data, u64 *val)
+{
+ return -EINVAL;
+}
+
+#endif /* CONFIG_MSM_KGSL_CFF_DUMP */
+
+#endif /* __KGSL_CFFDUMP_H */
diff --git a/drivers/gpu/msm2/kgsl_debugfs.c b/drivers/gpu/msm2/kgsl_debugfs.c
new file mode 100644
index 0000000..d62a222
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_debugfs.c
@@ -0,0 +1,378 @@
+/* Copyright (c) 2002,2008-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+
+#include "kgsl.h"
+#include "kgsl_device.h"
+#include "kgsl_sharedmem.h"
+
+/*default log levels is error for everything*/
+#define KGSL_LOG_LEVEL_MAX 7
+
+struct dentry *kgsl_debugfs_dir;
+static struct dentry *pm_d_debugfs;
+struct dentry *proc_d_debugfs;
+
+static int pm_dump_set(void *data, u64 val)
+{
+ struct kgsl_device *device = data;
+
+ if (val) {
+ mutex_lock(&device->mutex);
+ kgsl_postmortem_dump(device, 1);
+ mutex_unlock(&device->mutex);
+ }
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(pm_dump_fops,
+ NULL,
+ pm_dump_set, "%llu\n");
+
+static int pm_regs_enabled_set(void *data, u64 val)
+{
+ struct kgsl_device *device = data;
+ device->pm_regs_enabled = val ? 1 : 0;
+ return 0;
+}
+
+static int pm_regs_enabled_get(void *data, u64 *val)
+{
+ struct kgsl_device *device = data;
+ *val = device->pm_regs_enabled;
+ return 0;
+}
+
+static int pm_ib_enabled_set(void *data, u64 val)
+{
+ struct kgsl_device *device = data;
+ device->pm_ib_enabled = val ? 1 : 0;
+ return 0;
+}
+
+static int pm_ib_enabled_get(void *data, u64 *val)
+{
+ struct kgsl_device *device = data;
+ *val = device->pm_ib_enabled;
+ return 0;
+}
+
+static int pm_enabled_set(void *data, u64 val)
+{
+ struct kgsl_device *device = data;
+ device->pm_dump_enable = val;
+ return 0;
+}
+
+static int pm_enabled_get(void *data, u64 *val)
+{
+ struct kgsl_device *device = data;
+ *val = device->pm_dump_enable;
+ return 0;
+}
+
+
+DEFINE_SIMPLE_ATTRIBUTE(pm_regs_enabled_fops,
+ pm_regs_enabled_get,
+ pm_regs_enabled_set, "%llu\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(pm_ib_enabled_fops,
+ pm_ib_enabled_get,
+ pm_ib_enabled_set, "%llu\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(pm_enabled_fops,
+ pm_enabled_get,
+ pm_enabled_set, "%llu\n");
+
+static inline int kgsl_log_set(unsigned int *log_val, void *data, u64 val)
+{
+ *log_val = min((unsigned int)val, (unsigned int)KGSL_LOG_LEVEL_MAX);
+ return 0;
+}
+
+#define KGSL_DEBUGFS_LOG(__log) \
+static int __log ## _set(void *data, u64 val) \
+{ \
+ struct kgsl_device *device = data; \
+ return kgsl_log_set(&device->__log, data, val); \
+} \
+static int __log ## _get(void *data, u64 *val) \
+{ \
+ struct kgsl_device *device = data; \
+ *val = device->__log; \
+ return 0; \
+} \
+DEFINE_SIMPLE_ATTRIBUTE(__log ## _fops, \
+__log ## _get, __log ## _set, "%llu\n"); \
+
+KGSL_DEBUGFS_LOG(drv_log);
+KGSL_DEBUGFS_LOG(cmd_log);
+KGSL_DEBUGFS_LOG(ctxt_log);
+KGSL_DEBUGFS_LOG(mem_log);
+KGSL_DEBUGFS_LOG(pwr_log);
+
+static int memfree_hist_print(struct seq_file *s, void *unused)
+{
+ void *base = kgsl_driver.memfree_hist.base_hist_rb;
+
+ struct kgsl_memfree_hist_elem *wptr = kgsl_driver.memfree_hist.wptr;
+ struct kgsl_memfree_hist_elem *p;
+ char str[16];
+
+ seq_printf(s, "%8s %8s %8s %11s\n",
+ "pid", "gpuaddr", "size", "flags");
+
+ mutex_lock(&kgsl_driver.memfree_hist_mutex);
+ p = wptr;
+ for (;;) {
+ kgsl_get_memory_usage(str, sizeof(str), p->flags);
+ /*
+ * if the ring buffer is not filled up yet
+ * all its empty elems have size==0
+ * just skip them ...
+ */
+ if (p->size)
+ seq_printf(s, "%8d %08x %8d %11s\n",
+ p->pid, p->gpuaddr, p->size, str);
+ p++;
+ if ((void *)p >= base + kgsl_driver.memfree_hist.size)
+ p = (struct kgsl_memfree_hist_elem *) base;
+
+ if (p == kgsl_driver.memfree_hist.wptr)
+ break;
+ }
+ mutex_unlock(&kgsl_driver.memfree_hist_mutex);
+ return 0;
+}
+
+static int memfree_hist_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, memfree_hist_print, inode->i_private);
+}
+
+static const struct file_operations memfree_hist_fops = {
+ .open = memfree_hist_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void kgsl_device_debugfs_init(struct kgsl_device *device)
+{
+ if (kgsl_debugfs_dir && !IS_ERR(kgsl_debugfs_dir))
+ device->d_debugfs = debugfs_create_dir(device->name,
+ kgsl_debugfs_dir);
+
+ if (!device->d_debugfs || IS_ERR(device->d_debugfs))
+ return;
+
+ debugfs_create_file("log_level_cmd", 0644, device->d_debugfs, device,
+ &cmd_log_fops);
+ debugfs_create_file("log_level_ctxt", 0644, device->d_debugfs, device,
+ &ctxt_log_fops);
+ debugfs_create_file("log_level_drv", 0644, device->d_debugfs, device,
+ &drv_log_fops);
+ debugfs_create_file("log_level_mem", 0644, device->d_debugfs, device,
+ &mem_log_fops);
+ debugfs_create_file("log_level_pwr", 0644, device->d_debugfs, device,
+ &pwr_log_fops);
+ debugfs_create_file("memfree_history", 0444, device->d_debugfs, device,
+ &memfree_hist_fops);
+
+ /* Create postmortem dump control files */
+
+ pm_d_debugfs = debugfs_create_dir("postmortem", device->d_debugfs);
+
+ if (IS_ERR(pm_d_debugfs))
+ return;
+
+ debugfs_create_file("dump", 0600, pm_d_debugfs, device,
+ &pm_dump_fops);
+ debugfs_create_file("regs_enabled", 0644, pm_d_debugfs, device,
+ &pm_regs_enabled_fops);
+ debugfs_create_file("ib_enabled", 0644, pm_d_debugfs, device,
+ &pm_ib_enabled_fops);
+ debugfs_create_file("enable", 0644, pm_d_debugfs, device,
+ &pm_enabled_fops);
+
+}
+
+static const char * const memtype_strings[] = {
+ "gpumem",
+ "pmem",
+ "ashmem",
+ "usermap",
+ "ion",
+};
+
+static const char *memtype_str(int memtype)
+{
+ if (memtype < ARRAY_SIZE(memtype_strings))
+ return memtype_strings[memtype];
+ return "unknown";
+}
+
+static char get_alignflag(const struct kgsl_memdesc *m)
+{
+ int align = kgsl_memdesc_get_align(m);
+ if (align >= ilog2(SZ_1M))
+ return 'L';
+ else if (align >= ilog2(SZ_64K))
+ return 'l';
+ return '-';
+}
+
+static char get_cacheflag(const struct kgsl_memdesc *m)
+{
+ static const char table[] = {
+ [KGSL_CACHEMODE_WRITECOMBINE] = '-',
+ [KGSL_CACHEMODE_UNCACHED] = 'u',
+ [KGSL_CACHEMODE_WRITEBACK] = 'b',
+ [KGSL_CACHEMODE_WRITETHROUGH] = 't',
+ };
+ return table[kgsl_memdesc_get_cachemode(m)];
+}
+
+static void print_mem_entry(struct seq_file *s, struct kgsl_mem_entry *entry)
+{
+ char flags[6];
+ char usage[16];
+ struct kgsl_memdesc *m = &entry->memdesc;
+
+ flags[0] = kgsl_memdesc_is_global(m) ? 'g' : '-';
+ flags[1] = m->flags & KGSL_MEMFLAGS_GPUREADONLY ? 'r' : '-';
+ flags[2] = get_alignflag(m);
+ flags[3] = get_cacheflag(m);
+ flags[4] = kgsl_memdesc_use_cpu_map(m) ? 'p' : '-';
+ flags[5] = '\0';
+
+ kgsl_get_memory_usage(usage, sizeof(usage), m->flags);
+
+ seq_printf(s, "%08x %08lx %8d %5d %5s %10s %16s %5d\n",
+ m->gpuaddr, m->useraddr, m->size, entry->id, flags,
+ memtype_str(entry->memtype), usage, m->sglen);
+}
+
+static int process_mem_print(struct seq_file *s, void *unused)
+{
+ struct kgsl_mem_entry *entry;
+ struct rb_node *node;
+ struct kgsl_process_private *private = s->private;
+ int next = 0;
+
+ seq_printf(s, "%8s %8s %8s %5s %5s %10s %16s %5s\n",
+ "gpuaddr", "useraddr", "size", "id", "flags", "type",
+ "usage", "sglen");
+
+ /* print all entries with a GPU address */
+ spin_lock(&private->mem_lock);
+
+ for (node = rb_first(&private->mem_rb); node; node = rb_next(node)) {
+ entry = rb_entry(node, struct kgsl_mem_entry, node);
+ print_mem_entry(s, entry);
+ }
+
+
+ /* now print all the unbound entries */
+ while (1) {
+ entry = idr_get_next(&private->mem_idr, &next);
+ if (entry == NULL)
+ break;
+ if (entry->memdesc.gpuaddr == 0)
+ print_mem_entry(s, entry);
+ next++;
+ }
+ spin_unlock(&private->mem_lock);
+
+ return 0;
+}
+
+static int process_mem_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, process_mem_print, inode->i_private);
+}
+
+static const struct file_operations process_mem_fops = {
+ .open = process_mem_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+
+/**
+ * kgsl_process_init_debugfs() - Initialize debugfs for a process
+ * @private: Pointer to process private structure created for the process
+ *
+ * @returns: 0 on success, error code otherwise
+ *
+ * kgsl_process_init_debugfs() is called at the time of creating the
+ * process struct when a process opens kgsl device for the first time.
+ * The function creates the debugfs files for the process. If debugfs is
+ * disabled in the kernel, we ignore that error and return as successful.
+ */
+int
+kgsl_process_init_debugfs(struct kgsl_process_private *private)
+{
+ unsigned char name[16];
+ int ret = 0;
+ struct dentry *dentry;
+
+ snprintf(name, sizeof(name), "%d", private->pid);
+
+ private->debug_root = debugfs_create_dir(name, proc_d_debugfs);
+
+ if (!private->debug_root)
+ return -EINVAL;
+
+ private->debug_root->d_inode->i_uid = proc_d_debugfs->d_inode->i_uid;
+ private->debug_root->d_inode->i_gid = proc_d_debugfs->d_inode->i_gid;
+
+ /*
+ * debugfs_create_dir() and debugfs_create_file() both
+ * return -ENODEV if debugfs is disabled in the kernel.
+ * We make a distinction between these two functions
+ * failing and debugfs being disabled in the kernel.
+ * In the first case, we abort process private struct
+ * creation, in the second we continue without any changes.
+ * So if debugfs is disabled in kernel, return as
+ * success.
+ */
+ dentry = debugfs_create_file("mem", 0400, private->debug_root, private,
+ &process_mem_fops);
+
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+
+ if (ret == -ENODEV)
+ ret = 0;
+ } else if (dentry) {
+ dentry->d_inode->i_uid = proc_d_debugfs->d_inode->i_uid;
+ dentry->d_inode->i_gid = proc_d_debugfs->d_inode->i_gid;
+ }
+
+ return ret;
+}
+
+void kgsl_core_debugfs_init(void)
+{
+ kgsl_debugfs_dir = debugfs_create_dir("kgsl", 0);
+ proc_d_debugfs = debugfs_create_dir("proc", kgsl_debugfs_dir);
+}
+
+void kgsl_core_debugfs_close(void)
+{
+ debugfs_remove_recursive(kgsl_debugfs_dir);
+}
diff --git a/drivers/gpu/msm2/kgsl_debugfs.h b/drivers/gpu/msm2/kgsl_debugfs.h
new file mode 100644
index 0000000..b2f137c
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_debugfs.h
@@ -0,0 +1,45 @@
+/* Copyright (c) 2002,2008-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _KGSL_DEBUGFS_H
+#define _KGSL_DEBUGFS_H
+
+struct kgsl_device;
+struct kgsl_process_private;
+
+#ifdef CONFIG_DEBUG_FS
+void kgsl_core_debugfs_init(void);
+void kgsl_core_debugfs_close(void);
+
+int kgsl_device_debugfs_init(struct kgsl_device *device);
+
+extern struct dentry *kgsl_debugfs_dir;
+static inline struct dentry *kgsl_get_debugfs_dir(void)
+{
+ return kgsl_debugfs_dir;
+}
+
+int kgsl_process_init_debugfs(struct kgsl_process_private *);
+#else
+static inline void kgsl_core_debugfs_init(void) { }
+static inline void kgsl_device_debugfs_init(struct kgsl_device *device) { }
+static inline void kgsl_core_debugfs_close(void) { }
+static inline struct dentry *kgsl_get_debugfs_dir(void) { return NULL; }
+static inline int kgsl_process_init_debugfs(struct kgsl_process_private *)
+{
+ return 0;
+}
+
+#endif
+
+#endif
diff --git a/drivers/gpu/msm2/kgsl_device.h b/drivers/gpu/msm2/kgsl_device.h
new file mode 100644
index 0000000..c9be418
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_device.h
@@ -0,0 +1,733 @@
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_DEVICE_H
+#define __KGSL_DEVICE_H
+
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/pm_qos.h>
+#include <linux/sched.h>
+
+#include "kgsl.h"
+#include "kgsl_mmu.h"
+#include "kgsl_pwrctrl.h"
+#include "kgsl_log.h"
+#include "kgsl_pwrscale.h"
+#include <linux/sync.h>
+
+#define KGSL_TIMEOUT_NONE 0
+#define KGSL_TIMEOUT_DEFAULT 0xFFFFFFFF
+#define KGSL_TIMEOUT_PART 50 /* 50 msec */
+#define KGSL_TIMEOUT_LONG_IB_DETECTION 2000 /* 2 sec*/
+
+#define FIRST_TIMEOUT (HZ / 2)
+
+
+/* KGSL device state is initialized to INIT when platform_probe *
+ * sucessfully initialized the device. Once a device has been opened *
+ * (started) it becomes active. NAP implies that only low latency *
+ * resources (for now clocks on some platforms) are off. SLEEP implies *
+ * that the KGSL module believes a device is idle (has been inactive *
+ * past its timer) and all system resources are released. SUSPEND is *
+ * requested by the kernel and will be enforced upon all open devices. */
+
+#define KGSL_STATE_NONE 0x00000000
+#define KGSL_STATE_INIT 0x00000001
+#define KGSL_STATE_ACTIVE 0x00000002
+#define KGSL_STATE_NAP 0x00000004
+#define KGSL_STATE_SLEEP 0x00000008
+#define KGSL_STATE_SUSPEND 0x00000010
+#define KGSL_STATE_HUNG 0x00000020
+#define KGSL_STATE_SLUMBER 0x00000080
+
+#define KGSL_GRAPHICS_MEMORY_LOW_WATERMARK 0x1000000
+
+#define KGSL_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
+
+/*
+ * KGSL event types - these are passed to the event callback when the event
+ * expires or is cancelled
+ */
+
+#define KGSL_EVENT_TIMESTAMP_RETIRED 0
+#define KGSL_EVENT_CANCELLED 1
+
+/*
+ * "list" of event types for ftrace symbolic magic
+ */
+
+#define KGSL_EVENT_TYPES \
+ { KGSL_EVENT_TIMESTAMP_RETIRED, "retired" }, \
+ { KGSL_EVENT_CANCELLED, "cancelled" }
+
+struct kgsl_device;
+struct platform_device;
+struct kgsl_device_private;
+struct kgsl_context;
+struct kgsl_power_stats;
+struct kgsl_event;
+struct kgsl_cmdbatch;
+
+struct kgsl_functable {
+ /* Mandatory functions - these functions must be implemented
+ by the client device. The driver will not check for a NULL
+ pointer before calling the hook.
+ */
+ void (*regread) (struct kgsl_device *device,
+ unsigned int offsetwords, unsigned int *value);
+ void (*regwrite) (struct kgsl_device *device,
+ unsigned int offsetwords, unsigned int value);
+ int (*idle) (struct kgsl_device *device);
+ bool (*isidle) (struct kgsl_device *device);
+ int (*suspend_context) (struct kgsl_device *device);
+ int (*init) (struct kgsl_device *device);
+ int (*start) (struct kgsl_device *device);
+ int (*stop) (struct kgsl_device *device);
+ int (*getproperty) (struct kgsl_device *device,
+ enum kgsl_property_type type, void *value,
+ unsigned int sizebytes);
+ int (*waittimestamp) (struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int timestamp,
+ unsigned int msecs);
+ unsigned int (*readtimestamp) (struct kgsl_device *device,
+ struct kgsl_context *context, enum kgsl_timestamp_type type);
+ int (*issueibcmds) (struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, struct kgsl_cmdbatch *cmdbatch,
+ uint32_t *timestamps);
+ int (*setup_pt)(struct kgsl_device *device,
+ struct kgsl_pagetable *pagetable);
+ void (*cleanup_pt)(struct kgsl_device *device,
+ struct kgsl_pagetable *pagetable);
+ void (*power_stats)(struct kgsl_device *device,
+ struct kgsl_power_stats *stats);
+ void (*irqctrl)(struct kgsl_device *device, int state);
+ unsigned int (*gpuid)(struct kgsl_device *device, unsigned int *chipid);
+ void * (*snapshot)(struct kgsl_device *device, void *snapshot,
+ int *remain, int hang);
+ irqreturn_t (*irq_handler)(struct kgsl_device *device);
+ int (*drain)(struct kgsl_device *device);
+ /* Optional functions - these functions are not mandatory. The
+ driver will check that the function pointer is not NULL before
+ calling the hook */
+ int (*setstate) (struct kgsl_device *device, unsigned int context_id,
+ uint32_t flags);
+ struct kgsl_context *(*drawctxt_create) (struct kgsl_device_private *,
+ uint32_t *flags);
+ int (*drawctxt_detach) (struct kgsl_context *context);
+ void (*drawctxt_destroy) (struct kgsl_context *context);
+ long (*ioctl) (struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data);
+ int (*setproperty) (struct kgsl_device *device,
+ enum kgsl_property_type type, void *value,
+ unsigned int sizebytes);
+ int (*postmortem_dump) (struct kgsl_device *device, int manual);
+ void (*drawctxt_sched)(struct kgsl_device *device,
+ struct kgsl_context *context);
+ void (*resume)(struct kgsl_device *device);
+};
+
+/* MH register values */
+struct kgsl_mh {
+ unsigned int mharb;
+ unsigned int mh_intf_cfg1;
+ unsigned int mh_intf_cfg2;
+ uint32_t mpu_base;
+ int mpu_range;
+};
+
+typedef void (*kgsl_event_func)(struct kgsl_device *, void *, u32, u32, u32);
+
+struct kgsl_event {
+ struct kgsl_context *context;
+ uint32_t timestamp;
+ kgsl_event_func func;
+ void *priv;
+ struct list_head list;
+ void *owner;
+ unsigned int created;
+};
+
+/**
+ * struct kgsl_cmdbatch - KGSl command descriptor
+ * @device: KGSL GPU device that the command was created for
+ * @context: KGSL context that created the command
+ * @timestamp: Timestamp assigned to the command
+ * @flags: flags
+ * @priv: Internal flags
+ * @fault_policy: Internal policy describing how to handle this command in case
+ * of a fault
+ * @fault_recovery: recovery actions actually tried for this batch
+ * @ibcount: Number of IBs in the command list
+ * @ibdesc: Pointer to the list of IBs
+ * @expires: Point in time when the cmdbatch is considered to be hung
+ * @invalid: non-zero if the dispatcher determines the command and the owning
+ * context should be invalidated
+ * @refcount: kref structure to maintain the reference count
+ * @synclist: List of context/timestamp tuples to wait for before issuing
+ * @priority: Priority of the cmdbatch (inherited from the context)
+ *
+ * This struture defines an atomic batch of command buffers issued from
+ * userspace.
+ */
+struct kgsl_cmdbatch {
+ struct kgsl_device *device;
+ struct kgsl_context *context;
+ spinlock_t lock;
+ uint32_t timestamp;
+ uint32_t flags;
+ unsigned long priv;
+ unsigned long fault_policy;
+ unsigned long fault_recovery;
+ uint32_t ibcount;
+ struct kgsl_ibdesc *ibdesc;
+ unsigned long expires;
+ int invalid;
+ struct kref refcount;
+ struct list_head synclist;
+ int priority;
+};
+
+/**
+ * enum kgsl_cmdbatch_priv - Internal cmdbatch flags
+ * @CMDBATCH_FLAG_SKIP - skip the entire command batch
+ * @CMDBATCH_FLAG_FORCE_PREAMBLE - Force the preamble on for the cmdbatch
+ * @CMDBATCH_FLAG_WFI - Force wait-for-idle for the submission
+ */
+
+enum kgsl_cmdbatch_priv {
+ CMDBATCH_FLAG_SKIP = 0,
+ CMDBATCH_FLAG_FORCE_PREAMBLE,
+ CMDBATCH_FLAG_WFI,
+};
+
+struct kgsl_device {
+ struct device *dev;
+ const char *name;
+ unsigned int ver_major;
+ unsigned int ver_minor;
+ uint32_t flags;
+ enum kgsl_deviceid id;
+
+ /* Starting physical address for GPU registers */
+ unsigned long reg_phys;
+
+ /* Starting Kernel virtual address for GPU registers */
+ void *reg_virt;
+
+ /* Total memory size for all GPU registers */
+ unsigned int reg_len;
+
+ /* Kernel virtual address for GPU shader memory */
+ void *shader_mem_virt;
+
+ /* Starting physical address for GPU shader memory */
+ unsigned long shader_mem_phys;
+
+ /* GPU shader memory size */
+ unsigned int shader_mem_len;
+ struct kgsl_memdesc memstore;
+ const char *iomemname;
+ const char *shadermemname;
+
+ struct kgsl_mh mh;
+ struct kgsl_mmu mmu;
+ struct completion hwaccess_gate;
+ const struct kgsl_functable *ftbl;
+ struct work_struct idle_check_ws;
+ struct timer_list idle_timer;
+ struct kgsl_pwrctrl pwrctrl;
+ int open_count;
+
+ struct mutex mutex;
+ uint32_t state;
+ uint32_t requested_state;
+
+ atomic_t active_cnt;
+
+ wait_queue_head_t wait_queue;
+ wait_queue_head_t active_cnt_wq;
+ struct workqueue_struct *work_queue;
+ struct device *parentdev;
+ struct dentry *d_debugfs;
+ struct idr context_idr;
+ rwlock_t context_lock;
+
+ void *snapshot; /* Pointer to the snapshot memory region */
+ int snapshot_maxsize; /* Max size of the snapshot region */
+ int snapshot_size; /* Current size of the snapshot region */
+ u32 snapshot_timestamp; /* Timestamp of the last valid snapshot */
+ u32 snapshot_faultcount; /* Total number of faults since boot */
+ int snapshot_frozen; /* 1 if the snapshot output is frozen until
+ it gets read by the user. This avoids
+ losing the output on multiple hangs */
+ struct kobject snapshot_kobj;
+
+ /*
+ * List of GPU buffers that have been frozen in memory until they can be
+ * dumped
+ */
+ struct list_head snapshot_obj_list;
+ /* List of IB's to be dumped */
+ struct list_head snapshot_cp_list;
+ /* Work item that saves snapshot's frozen object data */
+ struct work_struct snapshot_obj_ws;
+ /* snapshot memory holding the hanging IB's objects in snapshot */
+ void *snapshot_cur_ib_objs;
+ /* Size of snapshot_cur_ib_objs */
+ int snapshot_cur_ib_objs_size;
+
+ /* Logging levels */
+ int cmd_log;
+ int ctxt_log;
+ int drv_log;
+ int mem_log;
+ int pwr_log;
+ int pm_dump_enable;
+ struct kgsl_pwrscale pwrscale;
+ struct kobject pwrscale_kobj;
+ struct work_struct ts_expired_ws;
+ struct list_head events;
+ struct list_head events_pending_list;
+ unsigned int events_last_timestamp;
+ s64 on_time;
+
+ /* Postmortem Control switches */
+ int pm_regs_enabled;
+ int pm_ib_enabled;
+
+ int reset_counter; /* Track how many GPU core resets have occured */
+ int cff_dump_enable;
+};
+
+void kgsl_process_events(struct work_struct *work);
+void kgsl_check_fences(struct work_struct *work);
+
+#define KGSL_DEVICE_COMMON_INIT(_dev) \
+ .hwaccess_gate = COMPLETION_INITIALIZER((_dev).hwaccess_gate),\
+ .idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\
+ kgsl_idle_check),\
+ .ts_expired_ws = __WORK_INITIALIZER((_dev).ts_expired_ws,\
+ kgsl_process_events),\
+ .snapshot_obj_ws = \
+ __WORK_INITIALIZER((_dev).snapshot_obj_ws,\
+ kgsl_snapshot_save_frozen_objs),\
+ .context_idr = IDR_INIT((_dev).context_idr),\
+ .events = LIST_HEAD_INIT((_dev).events),\
+ .events_pending_list = LIST_HEAD_INIT((_dev).events_pending_list), \
+ .wait_queue = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).wait_queue),\
+ .active_cnt_wq = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).active_cnt_wq),\
+ .mutex = __MUTEX_INITIALIZER((_dev).mutex),\
+ .state = KGSL_STATE_INIT,\
+ .ver_major = DRIVER_VERSION_MAJOR,\
+ .ver_minor = DRIVER_VERSION_MINOR
+
+
+/* bits for struct kgsl_context.priv */
+/* the context has been destroyed by userspace and is no longer using the gpu */
+#define KGSL_CONTEXT_DETACHED 0
+/* the context has caused a pagefault */
+#define KGSL_CONTEXT_PAGEFAULT 1
+
+struct kgsl_process_private;
+/**
+ * struct kgsl_context - Master structure for a KGSL context object
+ * @refcount: kref object for reference counting the context
+ * @id: integer identifier for the context
+ * @priv: in-kernel context flags, use KGSL_CONTEXT_* values
+ * @dev_priv: pointer to the owning device instance
+ * @reset_status: status indication whether a gpu reset occured and whether
+ * this context was responsible for causing it
+ * @wait_on_invalid_ts: flag indicating if this context has tried to wait on a
+ * bad timestamp
+ * @timeline: sync timeline used to create fences that can be signaled when a
+ * sync_pt timestamp expires
+ * @events: list head of pending events for this context
+ * @events_list: list node for the list of all contexts that have pending events
+ * @pid: process that owns this context.
+ * @pagefault: flag set if this context caused a pagefault.
+ * @pagefault_ts: global timestamp of the pagefault, if KGSL_CONTEXT_PAGEFAULT
+ * is set.
+ */
+struct kgsl_context {
+ struct kref refcount;
+ uint32_t id;
+ pid_t pid;
+ struct kgsl_device_private *dev_priv;
+ struct kgsl_process_private *proc_priv;
+ unsigned long priv;
+ struct kgsl_device *device;
+ unsigned int reset_status;
+ bool wait_on_invalid_ts;
+ struct sync_timeline *timeline;
+ struct list_head events;
+ struct list_head events_list;
+ unsigned int pagefault_ts;
+};
+
+struct kgsl_process_private {
+ unsigned int refcnt;
+ pid_t pid;
+ spinlock_t mem_lock;
+
+ /* General refcount for process private struct obj */
+ struct kref refcount;
+ /* Mutex to synchronize access to each process_private struct obj */
+ struct mutex process_private_mutex;
+
+ struct rb_root mem_rb;
+ struct idr mem_idr;
+ struct kgsl_pagetable *pagetable;
+ struct list_head list;
+ struct kobject kobj;
+ struct dentry *debug_root;
+
+ struct {
+ unsigned int cur;
+ unsigned int max;
+ } stats[KGSL_MEM_ENTRY_MAX];
+};
+
+struct kgsl_device_private {
+ struct kgsl_device *device;
+ struct kgsl_process_private *process_priv;
+};
+
+struct kgsl_power_stats {
+ s64 total_time;
+ s64 busy_time;
+};
+
+struct kgsl_device *kgsl_get_device(int dev_idx);
+
+int kgsl_add_event(struct kgsl_device *device, u32 id, u32 ts,
+ kgsl_event_func func, void *priv, void *owner);
+
+void kgsl_cancel_event(struct kgsl_device *device, struct kgsl_context *context,
+ unsigned int timestamp, kgsl_event_func func, void *priv);
+
+static inline void kgsl_process_add_stats(struct kgsl_process_private *priv,
+ unsigned int type, size_t size)
+{
+ priv->stats[type].cur += size;
+ if (priv->stats[type].max < priv->stats[type].cur)
+ priv->stats[type].max = priv->stats[type].cur;
+}
+
+static inline void kgsl_regread(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int *value)
+{
+ device->ftbl->regread(device, offsetwords, value);
+}
+
+static inline void kgsl_regwrite(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int value)
+{
+ device->ftbl->regwrite(device, offsetwords, value);
+}
+
+static inline int kgsl_idle(struct kgsl_device *device)
+{
+ return device->ftbl->idle(device);
+}
+
+static inline unsigned int kgsl_gpuid(struct kgsl_device *device,
+ unsigned int *chipid)
+{
+ return device->ftbl->gpuid(device, chipid);
+}
+
+static inline unsigned int kgsl_readtimestamp(struct kgsl_device *device,
+ struct kgsl_context *context,
+ enum kgsl_timestamp_type type)
+{
+ return device->ftbl->readtimestamp(device, context, type);
+}
+
+static inline int kgsl_create_device_sysfs_files(struct device *root,
+ const struct device_attribute **list)
+{
+ int ret = 0, i;
+ for (i = 0; list[i] != NULL; i++)
+ ret |= device_create_file(root, list[i]);
+ return ret;
+}
+
+static inline void kgsl_remove_device_sysfs_files(struct device *root,
+ const struct device_attribute **list)
+{
+ int i;
+ for (i = 0; list[i] != NULL; i++)
+ device_remove_file(root, list[i]);
+}
+
+static inline struct kgsl_mmu *
+kgsl_get_mmu(struct kgsl_device *device)
+{
+ return (struct kgsl_mmu *) (device ? &device->mmu : NULL);
+}
+
+static inline struct kgsl_device *kgsl_device_from_dev(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+ if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->dev == dev)
+ return kgsl_driver.devp[i];
+ }
+
+ return NULL;
+}
+
+static inline int kgsl_create_device_workqueue(struct kgsl_device *device)
+{
+ device->work_queue = create_singlethread_workqueue(device->name);
+ if (!device->work_queue) {
+ KGSL_DRV_ERR(device,
+ "create_singlethread_workqueue(%s) failed\n",
+ device->name);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int kgsl_check_timestamp(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int timestamp);
+
+int kgsl_device_platform_probe(struct kgsl_device *device);
+
+void kgsl_device_platform_remove(struct kgsl_device *device);
+
+const char *kgsl_pwrstate_to_str(unsigned int state);
+
+int kgsl_device_snapshot_init(struct kgsl_device *device);
+int kgsl_device_snapshot(struct kgsl_device *device, int hang);
+void kgsl_device_snapshot_close(struct kgsl_device *device);
+void kgsl_snapshot_save_frozen_objs(struct work_struct *work);
+
+static inline struct kgsl_device_platform_data *
+kgsl_device_get_drvdata(struct kgsl_device *dev)
+{
+ struct platform_device *pdev =
+ container_of(dev->parentdev, struct platform_device, dev);
+
+ return pdev->dev.platform_data;
+}
+
+void kgsl_context_destroy(struct kref *kref);
+
+int kgsl_context_init(struct kgsl_device_private *, struct kgsl_context
+ *context);
+int kgsl_context_detach(struct kgsl_context *context);
+
+/**
+ * kgsl_context_put() - Release context reference count
+ * @context: Pointer to the KGSL context to be released
+ *
+ * Reduce the reference count on a KGSL context and destroy it if it is no
+ * longer needed
+ */
+static inline void
+kgsl_context_put(struct kgsl_context *context)
+{
+ if (context)
+ kref_put(&context->refcount, kgsl_context_destroy);
+}
+
+/**
+ * kgsl_context_detached() - check if a context is detached
+ * @context: the context
+ *
+ * Check if a context has been destroyed by userspace and is only waiting
+ * for reference counts to go away. This check is used to weed out
+ * contexts that shouldn't use the gpu so NULL is considered detached.
+ */
+static inline bool kgsl_context_detached(struct kgsl_context *context)
+{
+ return (context == NULL || test_bit(KGSL_CONTEXT_DETACHED,
+ &context->priv));
+}
+
+
+/**
+ * kgsl_context_get() - get a pointer to a KGSL context
+ * @device: Pointer to the KGSL device that owns the context
+ * @id: Context ID
+ *
+ * Find the context associated with the given ID number, increase the reference
+ * count on it and return it. The caller must make sure that this call is
+ * paired with a kgsl_context_put. This function is for internal use because it
+ * doesn't validate the ownership of the context with the calling process - use
+ * kgsl_context_get_owner for that
+ */
+static inline struct kgsl_context *kgsl_context_get(struct kgsl_device *device,
+ uint32_t id)
+{
+ int result = 0;
+ struct kgsl_context *context = NULL;
+
+ read_lock(&device->context_lock);
+
+ context = idr_find(&device->context_idr, id);
+
+ /* Don't return a context that has been detached */
+ if (kgsl_context_detached(context))
+ context = NULL;
+ else
+ result = kref_get_unless_zero(&context->refcount);
+
+ read_unlock(&device->context_lock);
+
+ if (!result)
+ return NULL;
+ return context;
+}
+
+/**
+* _kgsl_context_get() - lightweight function to just increment the ref count
+* @context: Pointer to the KGSL context
+*
+* Get a reference to the specified KGSL context structure. This is a
+* lightweight way to just increase the refcount on a known context rather than
+* walking through kgsl_context_get and searching the iterator
+*/
+static inline int _kgsl_context_get(struct kgsl_context *context)
+{
+ int ret = 0;
+
+ if (context) {
+ ret = kref_get_unless_zero(&context->refcount);
+ /*
+ * We shouldn't realistically fail kref_get_unless_zero unless
+ * we did something really dumb so make the failure both public
+ * and painful
+ */
+
+ WARN_ON(!ret);
+ }
+
+ return ret;
+}
+
+/**
+ * kgsl_context_get_owner() - get a pointer to a KGSL context in a specific
+ * process
+ * @dev_priv: Pointer to the process struct
+ * @id: Context ID to return
+ *
+ * Find the context associated with the given ID number, increase the reference
+ * count on it and return it. The caller must make sure that this call is
+ * paired with a kgsl_context_put. This function validates that the context id
+ * given is owned by the dev_priv instancet that is passed in. see
+ * kgsl_context_get for the internal version that doesn't do the check
+ */
+static inline struct kgsl_context *kgsl_context_get_owner(
+ struct kgsl_device_private *dev_priv, uint32_t id)
+{
+ struct kgsl_context *context;
+
+ context = kgsl_context_get(dev_priv->device, id);
+
+ /* Verify that the context belongs to current calling process. */
+ if (context != NULL && context->pid != dev_priv->process_priv->pid) {
+ kgsl_context_put(context);
+ return NULL;
+ }
+
+ return context;
+}
+
+/**
+ * kgsl_context_cancel_events() - Cancel all events for a context
+ * @device: Pointer to the KGSL device structure for the GPU
+ * @context: Pointer to the KGSL context
+ *
+ * Signal all pending events on the context with KGSL_EVENT_CANCELLED
+ */
+static inline void kgsl_context_cancel_events(struct kgsl_device *device,
+ struct kgsl_context *context)
+{
+ kgsl_signal_events(device, context, KGSL_EVENT_CANCELLED);
+}
+
+/**
+ * kgsl_context_cancel_events_timestamp() - cancel events for a given timestamp
+ * @device: Pointer to the KGSL device that owns the context
+ * @context: Pointer to the context that owns the event or NULL for global
+ * @timestamp: Timestamp to cancel events for
+ *
+ * Cancel events pending for a specific timestamp
+ */
+static inline void kgsl_cancel_events_timestamp(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int timestamp)
+{
+ kgsl_signal_event(device, context, timestamp, KGSL_EVENT_CANCELLED);
+}
+
+void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch);
+
+void kgsl_cmdbatch_destroy_object(struct kref *kref);
+
+/**
+ * kgsl_cmdbatch_put() - Decrement the refcount for a command batch object
+ * @cmdbatch: Pointer to the command batch object
+ */
+static inline void kgsl_cmdbatch_put(struct kgsl_cmdbatch *cmdbatch)
+{
+ kref_put(&cmdbatch->refcount, kgsl_cmdbatch_destroy_object);
+}
+
+/**
+ * kgsl_cmdbatch_sync_pending() - return true if the cmdbatch is waiting
+ * @cmdbatch: Pointer to the command batch object to check
+ *
+ * Return non-zero if the specified command batch is still waiting for sync
+ * point dependencies to be satisfied
+ */
+static inline int kgsl_cmdbatch_sync_pending(struct kgsl_cmdbatch *cmdbatch)
+{
+ return list_empty(&cmdbatch->synclist) ? 0 : 1;
+}
+
+#if defined(CONFIG_GPU_TRACEPOINTS)
+
+#include <trace/events/gpu.h>
+
+static inline void kgsl_trace_gpu_job_enqueue(unsigned int ctxt_id,
+ unsigned int timestamp, const char *type)
+{
+ trace_gpu_job_enqueue(ctxt_id, timestamp, type);
+}
+
+static inline void kgsl_trace_gpu_sched_switch(const char *name,
+ u64 time, u32 ctxt_id, s32 prio, u32 timestamp)
+{
+ trace_gpu_sched_switch(name, time, ctxt_id, prio, timestamp);
+}
+
+#else
+
+static inline void kgsl_trace_gpu_job_enqueue(unsigned int ctxt_id,
+ unsigned int timestamp, const char *type)
+{
+}
+
+static inline void kgsl_trace_gpu_sched_switch(const char *name,
+ u64 time, u32 ctxt_id, s32 prio, u32 timestamp)
+{
+}
+
+#endif
+
+#endif /* __KGSL_DEVICE_H */
diff --git a/drivers/gpu/msm2/kgsl_drm.c b/drivers/gpu/msm2/kgsl_drm.c
new file mode 100644
index 0000000..6402bf4
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_drm.c
@@ -0,0 +1,1576 @@
+/* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* Implements an interface between KGSL and the DRM subsystem. For now this
+ * is pretty simple, but it will take on more of the workload as time goes
+ * on
+ */
+#include "drmP.h"
+#include "drm.h"
+
+#include <linux/msm_ion.h>
+#include <linux/genlock.h>
+
+#include "kgsl.h"
+#include "kgsl_device.h"
+#include "kgsl_drm.h"
+#include "kgsl_mmu.h"
+#include "kgsl_sharedmem.h"
+
+#define DRIVER_AUTHOR "Qualcomm"
+#define DRIVER_NAME "kgsl"
+#define DRIVER_DESC "KGSL DRM"
+#define DRIVER_DATE "20121107"
+
+#define DRIVER_MAJOR 2
+#define DRIVER_MINOR 1
+#define DRIVER_PATCHLEVEL 1
+
+#define DRM_KGSL_GEM_FLAG_MAPPED (1 << 0)
+
+#define ENTRY_EMPTY -1
+#define ENTRY_NEEDS_CLEANUP -2
+
+#define DRM_KGSL_NOT_INITED -1
+#define DRM_KGSL_INITED 1
+
+#define DRM_KGSL_NUM_FENCE_ENTRIES (DRM_KGSL_HANDLE_WAIT_ENTRIES << 2)
+#define DRM_KGSL_HANDLE_WAIT_ENTRIES 5
+
+/* Returns true if the memory type is in PMEM */
+
+#ifdef CONFIG_KERNEL_PMEM_SMI_REGION
+#define TYPE_IS_PMEM(_t) \
+ (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
+ ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_SMI) || \
+ ((_t) & DRM_KGSL_GEM_TYPE_PMEM))
+#else
+#define TYPE_IS_PMEM(_t) \
+ (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_EBI) || \
+ ((_t) & (DRM_KGSL_GEM_TYPE_PMEM | DRM_KGSL_GEM_PMEM_EBI)))
+#endif
+
+/* Returns true if the memory type is regular */
+
+#define TYPE_IS_MEM(_t) \
+ (((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM) || \
+ ((_t & DRM_KGSL_GEM_TYPE_MEM_MASK) == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
+ ((_t) & DRM_KGSL_GEM_TYPE_MEM))
+
+#define TYPE_IS_FD(_t) ((_t) & DRM_KGSL_GEM_TYPE_FD_MASK)
+
+/* Returns true if KMEM region is uncached */
+
+#define IS_MEM_UNCACHED(_t) \
+ ((_t == DRM_KGSL_GEM_TYPE_KMEM_NOCACHE) || \
+ (_t == DRM_KGSL_GEM_TYPE_KMEM) || \
+ (TYPE_IS_MEM(_t) && (_t & DRM_KGSL_GEM_CACHE_WCOMBINE)))
+
+struct drm_kgsl_gem_object_wait_list_entry {
+ struct list_head list;
+ int pid;
+ int in_use;
+ wait_queue_head_t process_wait_q;
+};
+
+struct drm_kgsl_gem_object_fence {
+ int32_t fence_id;
+ unsigned int num_buffers;
+ int ts_valid;
+ unsigned int timestamp;
+ int ts_device;
+ int lockpid;
+ struct list_head buffers_in_fence;
+};
+
+struct drm_kgsl_gem_object_fence_list_entry {
+ struct list_head list;
+ int in_use;
+ struct drm_gem_object *gem_obj;
+};
+
+static int32_t fence_id = 0x1;
+
+static struct drm_kgsl_gem_object_fence
+ gem_buf_fence[DRM_KGSL_NUM_FENCE_ENTRIES];
+
+struct drm_kgsl_gem_object {
+ struct drm_gem_object *obj;
+ uint32_t type;
+ struct kgsl_memdesc memdesc;
+ struct kgsl_pagetable *pagetable;
+ struct ion_handle *ion_handle;
+ uint64_t mmap_offset;
+ int bufcount;
+ int flags;
+ struct list_head list;
+ int active;
+
+ struct {
+ uint32_t offset;
+ uint32_t gpuaddr;
+ } bufs[DRM_KGSL_GEM_MAX_BUFFERS];
+
+ struct genlock_handle *glock_handle[DRM_KGSL_GEM_MAX_BUFFERS];
+
+ int bound;
+ int lockpid;
+ /* Put these here to avoid allocing all the time */
+ struct drm_kgsl_gem_object_wait_list_entry
+ wait_entries[DRM_KGSL_HANDLE_WAIT_ENTRIES];
+ /* Each object can only appear in a single fence */
+ struct drm_kgsl_gem_object_fence_list_entry
+ fence_entries[DRM_KGSL_NUM_FENCE_ENTRIES];
+
+ struct list_head wait_list;
+};
+
+static struct ion_client *kgsl_drm_ion_client;
+
+static int kgsl_drm_inited = DRM_KGSL_NOT_INITED;
+
+/* This is a global list of all the memory currently mapped in the MMU */
+static struct list_head kgsl_mem_list;
+
+struct kgsl_drm_device_priv {
+ struct kgsl_device *device[KGSL_DEVICE_MAX];
+ struct kgsl_device_private *devpriv[KGSL_DEVICE_MAX];
+};
+
+static int
+kgsl_gem_memory_allocated(struct drm_gem_object *obj)
+{
+ struct drm_kgsl_gem_object *priv = obj->driver_private;
+ return priv->memdesc.size ? 1 : 0;
+}
+
+static int
+kgsl_gem_alloc_memory(struct drm_gem_object *obj)
+{
+ struct drm_kgsl_gem_object *priv = obj->driver_private;
+ struct kgsl_mmu *mmu;
+ struct sg_table *sg_table;
+ struct scatterlist *s;
+ int index;
+ int result = 0;
+
+ /* Return if the memory is already allocated */
+
+ if (kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
+ return 0;
+
+ if (priv->pagetable == NULL) {
+ /* Hard coded to use A2X device for MSM7X27 and MSM8625
+ * Others to use A3X device
+ */
+#if defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM8625)
+ mmu = &kgsl_get_device(KGSL_DEVICE_2D0)->mmu;
+#else
+ mmu = &kgsl_get_device(KGSL_DEVICE_3D0)->mmu;
+#endif
+
+ priv->pagetable = kgsl_mmu_getpagetable(mmu,
+ KGSL_MMU_GLOBAL_PT);
+
+ if (priv->pagetable == NULL) {
+ DRM_ERROR("Unable to get the GPU MMU pagetable\n");
+ return -EINVAL;
+ }
+ }
+
+ if (TYPE_IS_PMEM(priv->type)) {
+ if (priv->type == DRM_KGSL_GEM_TYPE_EBI ||
+ priv->type & DRM_KGSL_GEM_PMEM_EBI) {
+ priv->ion_handle = ion_alloc(kgsl_drm_ion_client,
+ obj->size * priv->bufcount, PAGE_SIZE,
+ ION_HEAP(ION_SF_HEAP_ID), 0);
+ if (IS_ERR_OR_NULL(priv->ion_handle)) {
+ DRM_ERROR(
+ "Unable to allocate ION Phys memory handle\n");
+ return -ENOMEM;
+ }
+
+ priv->memdesc.pagetable = priv->pagetable;
+
+ result = ion_phys(kgsl_drm_ion_client,
+ priv->ion_handle, (ion_phys_addr_t *)
+ &priv->memdesc.physaddr, &priv->memdesc.size);
+ if (result) {
+ DRM_ERROR(
+ "Unable to get ION Physical memory address\n");
+ ion_free(kgsl_drm_ion_client,
+ priv->ion_handle);
+ priv->ion_handle = NULL;
+ return result;
+ }
+
+ result = memdesc_sg_phys(&priv->memdesc,
+ priv->memdesc.physaddr, priv->memdesc.size);
+ if (result) {
+ DRM_ERROR(
+ "Unable to get sg list\n");
+ ion_free(kgsl_drm_ion_client,
+ priv->ion_handle);
+ priv->ion_handle = NULL;
+ return result;
+ }
+
+ result = kgsl_mmu_map(priv->pagetable, &priv->memdesc,
+ GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+ if (result) {
+ DRM_ERROR(
+ "kgsl_mmu_map failed. result = %d\n", result);
+ ion_free(kgsl_drm_ion_client,
+ priv->ion_handle);
+ priv->ion_handle = NULL;
+ return result;
+ }
+ }
+ else
+ return -EINVAL;
+
+ } else if (TYPE_IS_MEM(priv->type)) {
+
+ if (priv->type == DRM_KGSL_GEM_TYPE_KMEM ||
+ priv->type & DRM_KGSL_GEM_CACHE_MASK)
+ list_add(&priv->list, &kgsl_mem_list);
+
+ priv->memdesc.pagetable = priv->pagetable;
+
+ priv->ion_handle = ion_alloc(kgsl_drm_ion_client,
+ obj->size * priv->bufcount, PAGE_SIZE,
+ ION_HEAP(ION_IOMMU_HEAP_ID), 0);
+ if (IS_ERR_OR_NULL(priv->ion_handle)) {
+ DRM_ERROR(
+ "Unable to allocate ION IOMMU memory handle\n");
+ return -ENOMEM;
+ }
+
+ sg_table = ion_sg_table(kgsl_drm_ion_client,
+ priv->ion_handle);
+ if (IS_ERR_OR_NULL(priv->ion_handle)) {
+ DRM_ERROR(
+ "Unable to get ION sg table\n");
+ goto memerr;
+ }
+
+ priv->memdesc.sg = sg_table->sgl;
+
+ /* Calculate the size of the memdesc from the sglist */
+
+ priv->memdesc.sglen = 0;
+
+ for (s = priv->memdesc.sg; s != NULL; s = sg_next(s)) {
+ priv->memdesc.size += s->length;
+ priv->memdesc.sglen++;
+ }
+
+ result = kgsl_mmu_map(priv->pagetable, &priv->memdesc);
+ if (result) {
+ DRM_ERROR(
+ "kgsl_mmu_map failed. result = %d\n", result);
+ goto memerr;
+ }
+
+ } else
+ return -EINVAL;
+
+ for (index = 0; index < priv->bufcount; index++) {
+ priv->bufs[index].offset = index * obj->size;
+ priv->bufs[index].gpuaddr =
+ priv->memdesc.gpuaddr +
+ priv->bufs[index].offset;
+ }
+ priv->flags |= DRM_KGSL_GEM_FLAG_MAPPED;
+
+
+ return 0;
+
+memerr:
+ ion_free(kgsl_drm_ion_client,
+ priv->ion_handle);
+ priv->ion_handle = NULL;
+ return -ENOMEM;
+
+}
+
+static void
+kgsl_gem_free_memory(struct drm_gem_object *obj)
+{
+ struct drm_kgsl_gem_object *priv = obj->driver_private;
+ int index;
+
+ if (!kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
+ return;
+
+ if (priv->memdesc.gpuaddr)
+ kgsl_mmu_unmap(priv->memdesc.pagetable, &priv->memdesc);
+
+ /* ION will take care of freeing the sg table. */
+ priv->memdesc.sg = NULL;
+ priv->memdesc.sglen = 0;
+
+ if (priv->ion_handle)
+ ion_free(kgsl_drm_ion_client, priv->ion_handle);
+
+ priv->ion_handle = NULL;
+
+ memset(&priv->memdesc, 0, sizeof(priv->memdesc));
+
+ for (index = 0; index < priv->bufcount; index++) {
+ if (priv->glock_handle[index])
+ genlock_put_handle(priv->glock_handle[index]);
+ }
+
+ kgsl_mmu_putpagetable(priv->pagetable);
+ priv->pagetable = NULL;
+
+ if ((priv->type == DRM_KGSL_GEM_TYPE_KMEM) ||
+ (priv->type & DRM_KGSL_GEM_CACHE_MASK))
+ list_del(&priv->list);
+
+ priv->flags &= ~DRM_KGSL_GEM_FLAG_MAPPED;
+
+}
+
+int
+kgsl_gem_init_object(struct drm_gem_object *obj)
+{
+ struct drm_kgsl_gem_object *priv;
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (priv == NULL) {
+ DRM_ERROR("Unable to create GEM object\n");
+ return -ENOMEM;
+ }
+
+ obj->driver_private = priv;
+ priv->obj = obj;
+
+ return 0;
+}
+
+void
+kgsl_gem_free_object(struct drm_gem_object *obj)
+{
+ kgsl_gem_free_memory(obj);
+ drm_gem_object_release(obj);
+ kfree(obj->driver_private);
+}
+
+int
+kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
+ unsigned long *len)
+{
+ struct file *filp;
+ struct drm_device *dev;
+ struct drm_file *file_priv;
+ struct drm_gem_object *obj;
+ struct drm_kgsl_gem_object *priv;
+ int ret = 0;
+
+ filp = fget(drm_fd);
+ if (unlikely(filp == NULL)) {
+ DRM_ERROR("Unable to get the DRM file descriptor\n");
+ return -EINVAL;
+ }
+ file_priv = filp->private_data;
+ if (unlikely(file_priv == NULL)) {
+ DRM_ERROR("Unable to get the file private data\n");
+ fput(filp);
+ return -EINVAL;
+ }
+ dev = file_priv->minor->dev;
+ if (unlikely(dev == NULL)) {
+ DRM_ERROR("Unable to get the minor device\n");
+ fput(filp);
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (unlikely(obj == NULL)) {
+ DRM_ERROR("Invalid GEM handle %x\n", handle);
+ fput(filp);
+ return -EBADF;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ priv = obj->driver_private;
+
+ /* We can only use the MDP for PMEM regions */
+
+ if (TYPE_IS_PMEM(priv->type)) {
+ *start = priv->memdesc.physaddr +
+ priv->bufs[priv->active].offset;
+
+ *len = priv->memdesc.size;
+ } else {
+ *start = 0;
+ *len = 0;
+ ret = -EINVAL;
+ }
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ fput(filp);
+ return ret;
+}
+
+static int
+kgsl_gem_init_obj(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ int *handle)
+{
+ struct drm_kgsl_gem_object *priv;
+ int ret, i;
+
+ mutex_lock(&dev->struct_mutex);
+ priv = obj->driver_private;
+
+ memset(&priv->memdesc, 0, sizeof(priv->memdesc));
+ priv->bufcount = 1;
+ priv->active = 0;
+ priv->bound = 0;
+
+ priv->type = DRM_KGSL_GEM_TYPE_KMEM;
+
+ ret = drm_gem_handle_create(file_priv, obj, handle);
+
+ drm_gem_object_unreference(obj);
+ INIT_LIST_HEAD(&priv->wait_list);
+
+ for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
+ INIT_LIST_HEAD((struct list_head *) &priv->wait_entries[i]);
+ priv->wait_entries[i].pid = 0;
+ init_waitqueue_head(&priv->wait_entries[i].process_wait_q);
+ }
+
+ for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
+ INIT_LIST_HEAD((struct list_head *) &priv->fence_entries[i]);
+ priv->fence_entries[i].in_use = 0;
+ priv->fence_entries[i].gem_obj = obj;
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int
+kgsl_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_kgsl_gem_create *create = data;
+ struct drm_gem_object *obj;
+ int ret, handle;
+
+ /* Page align the size so we can allocate multiple buffers */
+ create->size = ALIGN(create->size, 4096);
+
+ obj = drm_gem_object_alloc(dev, create->size);
+
+ if (obj == NULL) {
+ DRM_ERROR("Unable to allocate the GEM object\n");
+ return -ENOMEM;
+ }
+
+ ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
+ if (ret) {
+ drm_gem_object_release(obj);
+ DRM_ERROR("Unable to initialize GEM object ret = %d\n", ret);
+ return ret;
+ }
+
+ create->handle = handle;
+ return 0;
+}
+
+int
+kgsl_gem_create_fd_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_kgsl_gem_create_fd *args = data;
+ struct file *file;
+ dev_t rdev;
+ struct fb_info *info;
+ struct drm_gem_object *obj;
+ struct drm_kgsl_gem_object *priv;
+ int ret, put_needed, handle;
+
+ file = fget_light(args->fd, &put_needed);
+
+ if (file == NULL) {
+ DRM_ERROR("Unable to get the file object\n");
+ return -EBADF;
+ }
+
+ rdev = file->f_dentry->d_inode->i_rdev;
+
+ /* Only framebuffer objects are supported ATM */
+
+ if (MAJOR(rdev) != FB_MAJOR) {
+ DRM_ERROR("File descriptor is not a framebuffer\n");
+ ret = -EBADF;
+ goto error_fput;
+ }
+
+ info = registered_fb[MINOR(rdev)];
+
+ if (info == NULL) {
+ DRM_ERROR("Framebuffer minor %d is not registered\n",
+ MINOR(rdev));
+ ret = -EBADF;
+ goto error_fput;
+ }
+
+ obj = drm_gem_object_alloc(dev, info->fix.smem_len);
+
+ if (obj == NULL) {
+ DRM_ERROR("Unable to allocate GEM object\n");
+ ret = -ENOMEM;
+ goto error_fput;
+ }
+
+ ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
+
+ if (ret)
+ goto error_fput;
+
+ mutex_lock(&dev->struct_mutex);
+
+ priv = obj->driver_private;
+ priv->memdesc.physaddr = info->fix.smem_start;
+ priv->type = DRM_KGSL_GEM_TYPE_FD_FBMEM;
+
+ mutex_unlock(&dev->struct_mutex);
+ args->handle = handle;
+
+error_fput:
+ fput_light(file, put_needed);
+
+ return ret;
+}
+
+int
+kgsl_gem_create_from_ion_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_kgsl_gem_create_from_ion *args = data;
+ struct drm_gem_object *obj;
+ struct ion_handle *ion_handle;
+ struct drm_kgsl_gem_object *priv;
+ struct sg_table *sg_table;
+ struct scatterlist *s;
+ int ret, handle;
+ unsigned long size;
+ struct kgsl_mmu *mmu;
+
+ ion_handle = ion_import_dma_buf(kgsl_drm_ion_client, args->ion_fd);
+ if (IS_ERR_OR_NULL(ion_handle)) {
+ DRM_ERROR("Unable to import dmabuf. Error number = %d\n",
+ (int)PTR_ERR(ion_handle));
+ return -EINVAL;
+ }
+
+ ion_handle_get_size(kgsl_drm_ion_client, ion_handle, &size);
+
+ if (size == 0) {
+ ion_free(kgsl_drm_ion_client, ion_handle);
+ DRM_ERROR(
+ "cannot create GEM object from zero size ION buffer\n");
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_alloc(dev, size);
+
+ if (obj == NULL) {
+ ion_free(kgsl_drm_ion_client, ion_handle);
+ DRM_ERROR("Unable to allocate the GEM object\n");
+ return -ENOMEM;
+ }
+
+ ret = kgsl_gem_init_obj(dev, file_priv, obj, &handle);
+ if (ret) {
+ ion_free(kgsl_drm_ion_client, ion_handle);
+ drm_gem_object_release(obj);
+ DRM_ERROR("Unable to initialize GEM object ret = %d\n", ret);
+ return ret;
+ }
+
+ priv = obj->driver_private;
+ priv->ion_handle = ion_handle;
+
+ priv->type = DRM_KGSL_GEM_TYPE_KMEM;
+ list_add(&priv->list, &kgsl_mem_list);
+
+#if defined(CONFIG_ARCH_MSM7X27) || defined(CONFIG_ARCH_MSM8625)
+ mmu = &kgsl_get_device(KGSL_DEVICE_2D0)->mmu;
+#else
+ mmu = &kgsl_get_device(KGSL_DEVICE_3D0)->mmu;
+#endif
+
+ priv->pagetable = kgsl_mmu_getpagetable(mmu, KGSL_MMU_GLOBAL_PT);
+
+ priv->memdesc.pagetable = priv->pagetable;
+
+ sg_table = ion_sg_table(kgsl_drm_ion_client,
+ priv->ion_handle);
+ if (IS_ERR_OR_NULL(priv->ion_handle)) {
+ DRM_ERROR("Unable to get ION sg table\n");
+ ion_free(kgsl_drm_ion_client,
+ priv->ion_handle);
+ priv->ion_handle = NULL;
+ kgsl_mmu_putpagetable(priv->pagetable);
+ drm_gem_object_release(obj);
+ kfree(priv);
+ return -ENOMEM;
+ }
+
+ priv->memdesc.sg = sg_table->sgl;
+
+ /* Calculate the size of the memdesc from the sglist */
+
+ priv->memdesc.sglen = 0;
+
+ for (s = priv->memdesc.sg; s != NULL; s = sg_next(s)) {
+ priv->memdesc.size += s->length;
+ priv->memdesc.sglen++;
+ }
+
+ ret = kgsl_mmu_map(priv->pagetable, &priv->memdesc);
+ if (ret) {
+ DRM_ERROR("kgsl_mmu_map failed. ret = %d\n", ret);
+ ion_free(kgsl_drm_ion_client,
+ priv->ion_handle);
+ priv->ion_handle = NULL;
+ kgsl_mmu_putpagetable(priv->pagetable);
+ drm_gem_object_release(obj);
+ kfree(priv);
+ return -ENOMEM;
+ }
+
+ priv->bufs[0].offset = 0;
+ priv->bufs[0].gpuaddr = priv->memdesc.gpuaddr;
+ priv->flags |= DRM_KGSL_GEM_FLAG_MAPPED;
+
+ args->handle = handle;
+ return 0;
+}
+
+int
+kgsl_gem_get_ion_fd_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_kgsl_gem_get_ion_fd *args = data;
+ struct drm_gem_object *obj;
+ struct drm_kgsl_gem_object *priv;
+ int ret = 0;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+ if (obj == NULL) {
+ DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+ return -EBADF;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ priv = obj->driver_private;
+
+ if (TYPE_IS_FD(priv->type))
+ ret = -EINVAL;
+ else if (TYPE_IS_PMEM(priv->type) || TYPE_IS_MEM(priv->type)) {
+ if (priv->ion_handle) {
+ args->ion_fd = ion_share_dma_buf_fd(
+ kgsl_drm_ion_client, priv->ion_handle);
+ if (args->ion_fd < 0) {
+ DRM_ERROR(
+ "Could not share ion buffer. Error = %d\n",
+ args->ion_fd);
+ ret = -EINVAL;
+ }
+ } else {
+ DRM_ERROR("GEM object has no ion memory allocated.\n");
+ ret = -EINVAL;
+ }
+ }
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+int
+kgsl_gem_setmemtype_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_kgsl_gem_memtype *args = data;
+ struct drm_gem_object *obj;
+ struct drm_kgsl_gem_object *priv;
+ int ret = 0;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+ if (obj == NULL) {
+ DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+ return -EBADF;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ priv = obj->driver_private;
+
+ if (TYPE_IS_FD(priv->type))
+ ret = -EINVAL;
+ else {
+ if (TYPE_IS_PMEM(args->type) || TYPE_IS_MEM(args->type))
+ priv->type = args->type;
+ else
+ ret = -EINVAL;
+ }
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+int
+kgsl_gem_getmemtype_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_kgsl_gem_memtype *args = data;
+ struct drm_gem_object *obj;
+ struct drm_kgsl_gem_object *priv;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+ if (obj == NULL) {
+ DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+ return -EBADF;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ priv = obj->driver_private;
+
+ args->type = priv->type;
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+int
+kgsl_gem_unbind_gpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return 0;
+}
+
+int
+kgsl_gem_bind_gpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return 0;
+}
+
+/* Allocate the memory and prepare it for CPU mapping */
+
+int
+kgsl_gem_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_kgsl_gem_alloc *args = data;
+ struct drm_gem_object *obj;
+ struct drm_kgsl_gem_object *priv;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+ if (obj == NULL) {
+ DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+ return -EBADF;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ priv = obj->driver_private;
+
+ ret = kgsl_gem_alloc_memory(obj);
+
+ if (ret) {
+ DRM_ERROR("Unable to allocate object memory\n");
+ }
+
+ args->offset = 0;
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+int
+kgsl_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ /* Ion is used for mmap at this time */
+ return 0;
+}
+
+/* This function is deprecated */
+
+int
+kgsl_gem_prep_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_kgsl_gem_prep *args = data;
+ struct drm_gem_object *obj;
+ struct drm_kgsl_gem_object *priv;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+ if (obj == NULL) {
+ DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+ return -EBADF;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ priv = obj->driver_private;
+
+ ret = kgsl_gem_alloc_memory(obj);
+ if (ret) {
+ DRM_ERROR("Unable to allocate object memory\n");
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+int
+kgsl_gem_get_bufinfo_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_kgsl_gem_bufinfo *args = data;
+ struct drm_gem_object *obj;
+ struct drm_kgsl_gem_object *priv;
+ int ret = -EINVAL;
+ int index;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+ if (obj == NULL) {
+ DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+ return -EBADF;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ priv = obj->driver_private;
+
+ if (!kgsl_gem_memory_allocated(obj)) {
+ DRM_ERROR("Memory not allocated for this object\n");
+ goto out;
+ }
+
+ for (index = 0; index < priv->bufcount; index++) {
+ args->offset[index] = priv->bufs[index].offset;
+ args->gpuaddr[index] = priv->bufs[index].gpuaddr;
+ }
+
+ args->count = priv->bufcount;
+ args->active = priv->active;
+
+ ret = 0;
+
+out:
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+/* Get the genlock handles base off the GEM handle
+ */
+
+int
+kgsl_gem_get_glock_handles_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_kgsl_gem_glockinfo *args = data;
+ struct drm_gem_object *obj;
+ struct drm_kgsl_gem_object *priv;
+ int index;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+ if (obj == NULL) {
+ DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+ return -EBADF;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ priv = obj->driver_private;
+
+ for (index = 0; index < priv->bufcount; index++) {
+ args->glockhandle[index] = genlock_get_fd_handle(
+ priv->glock_handle[index]);
+ }
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+int
+kgsl_gem_set_glock_handles_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_kgsl_gem_glockinfo *args = data;
+ struct drm_gem_object *obj;
+ struct drm_kgsl_gem_object *priv;
+ int index;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+ if (obj == NULL) {
+ DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+ return -EBADF;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ priv = obj->driver_private;
+
+ for (index = 0; index < priv->bufcount; index++) {
+ priv->glock_handle[index] = genlock_get_handle_fd(
+ args->glockhandle[index]);
+ }
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+int
+kgsl_gem_set_bufcount_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_kgsl_gem_bufcount *args = data;
+ struct drm_gem_object *obj;
+ struct drm_kgsl_gem_object *priv;
+ int ret = -EINVAL;
+
+ if (args->bufcount < 1 || args->bufcount > DRM_KGSL_GEM_MAX_BUFFERS)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+ if (obj == NULL) {
+ DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+ return -EBADF;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ priv = obj->driver_private;
+
+ /* It is too much math to worry about what happens if we are already
+ allocated, so just bail if we are */
+
+ if (kgsl_gem_memory_allocated(obj)) {
+ DRM_ERROR("Memory already allocated - cannot change"
+ "number of buffers\n");
+ goto out;
+ }
+
+ priv->bufcount = args->bufcount;
+ ret = 0;
+
+out:
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+int
+kgsl_gem_get_bufcount_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_kgsl_gem_bufcount *args = data;
+ struct drm_gem_object *obj;
+ struct drm_kgsl_gem_object *priv;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+ if (obj == NULL) {
+ DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+ return -EBADF;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ priv = obj->driver_private;
+
+ args->bufcount = priv->bufcount;
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+int
+kgsl_gem_set_active_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_kgsl_gem_active *args = data;
+ struct drm_gem_object *obj;
+ struct drm_kgsl_gem_object *priv;
+ int ret = -EINVAL;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+
+ if (obj == NULL) {
+ DRM_ERROR("Invalid GEM handle %x\n", args->handle);
+ return -EBADF;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ priv = obj->driver_private;
+
+ if (args->active < 0 || args->active >= priv->bufcount) {
+ DRM_ERROR("Invalid active buffer %d\n", args->active);
+ goto out;
+ }
+
+ priv->active = args->active;
+ ret = 0;
+
+out:
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+int kgsl_gem_kmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_device *dev = obj->dev;
+ struct drm_kgsl_gem_object *priv;
+ unsigned long offset;
+ struct page *page;
+ int i;
+
+ mutex_lock(&dev->struct_mutex);
+
+ priv = obj->driver_private;
+
+ offset = (unsigned long) vmf->virtual_address - vma->vm_start;
+ i = offset >> PAGE_SHIFT;
+ page = sg_page(&(priv->memdesc.sg[i]));
+
+ if (!page) {
+ mutex_unlock(&dev->struct_mutex);
+ return VM_FAULT_SIGBUS;
+ }
+
+ get_page(page);
+ vmf->page = page;
+
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+int kgsl_gem_phys_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_device *dev = obj->dev;
+ struct drm_kgsl_gem_object *priv;
+ unsigned long offset, pfn;
+ int ret = 0;
+
+ offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
+ PAGE_SHIFT;
+
+ mutex_lock(&dev->struct_mutex);
+
+ priv = obj->driver_private;
+
+ pfn = (priv->memdesc.physaddr >> PAGE_SHIFT) + offset;
+ ret = vm_insert_pfn(vma,
+ (unsigned long) vmf->virtual_address, pfn);
+ mutex_unlock(&dev->struct_mutex);
+
+ switch (ret) {
+ case -ENOMEM:
+ case -EAGAIN:
+ return VM_FAULT_OOM;
+ case -EFAULT:
+ return VM_FAULT_SIGBUS;
+ default:
+ return VM_FAULT_NOPAGE;
+ }
+}
+
+void
+cleanup_fence(struct drm_kgsl_gem_object_fence *fence, int check_waiting)
+{
+ int j;
+ struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
+ struct drm_kgsl_gem_object *unlock_obj;
+ struct drm_gem_object *obj;
+ struct drm_kgsl_gem_object_wait_list_entry *lock_next;
+
+ fence->ts_valid = 0;
+ fence->timestamp = -1;
+ fence->ts_device = -1;
+
+ /* Walk the list of buffers in this fence and clean up the */
+ /* references. Note that this can cause memory allocations */
+ /* to be freed */
+ for (j = fence->num_buffers; j > 0; j--) {
+ this_fence_entry =
+ (struct drm_kgsl_gem_object_fence_list_entry *)
+ fence->buffers_in_fence.prev;
+
+ this_fence_entry->in_use = 0;
+ obj = this_fence_entry->gem_obj;
+ unlock_obj = obj->driver_private;
+
+ /* Delete it from the list */
+
+ list_del(&this_fence_entry->list);
+
+ /* we are unlocking - see if there are other pids waiting */
+ if (check_waiting) {
+ if (!list_empty(&unlock_obj->wait_list)) {
+ lock_next =
+ (struct drm_kgsl_gem_object_wait_list_entry *)
+ unlock_obj->wait_list.prev;
+
+ list_del((struct list_head *)&lock_next->list);
+
+ unlock_obj->lockpid = 0;
+ wake_up_interruptible(
+ &lock_next->process_wait_q);
+ lock_next->pid = 0;
+
+ } else {
+ /* List is empty so set pid to 0 */
+ unlock_obj->lockpid = 0;
+ }
+ }
+
+ drm_gem_object_unreference(obj);
+ }
+ /* here all the buffers in the fence are released */
+ /* clear the fence entry */
+ fence->fence_id = ENTRY_EMPTY;
+}
+
+int
+find_empty_fence(void)
+{
+ int i;
+
+ for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
+ if (gem_buf_fence[i].fence_id == ENTRY_EMPTY) {
+ gem_buf_fence[i].fence_id = fence_id++;
+ gem_buf_fence[i].ts_valid = 0;
+ INIT_LIST_HEAD(&(gem_buf_fence[i].buffers_in_fence));
+ if (fence_id == 0xFFFFFFF0)
+ fence_id = 1;
+ return i;
+ } else {
+
+ /* Look for entries to be cleaned up */
+ if (gem_buf_fence[i].fence_id == ENTRY_NEEDS_CLEANUP)
+ cleanup_fence(&gem_buf_fence[i], 0);
+ }
+ }
+
+ return ENTRY_EMPTY;
+}
+
+int
+find_fence(int index)
+{
+ int i;
+
+ for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
+ if (gem_buf_fence[i].fence_id == index)
+ return i;
+ }
+
+ return ENTRY_EMPTY;
+}
+
+void
+wakeup_fence_entries(struct drm_kgsl_gem_object_fence *fence)
+{
+ struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
+ struct drm_kgsl_gem_object_wait_list_entry *lock_next;
+ struct drm_kgsl_gem_object *unlock_obj;
+ struct drm_gem_object *obj;
+
+ /* TS has expired when we get here */
+ fence->ts_valid = 0;
+ fence->timestamp = -1;
+ fence->ts_device = -1;
+
+ list_for_each_entry(this_fence_entry, &fence->buffers_in_fence, list) {
+ obj = this_fence_entry->gem_obj;
+ unlock_obj = obj->driver_private;
+
+ if (!list_empty(&unlock_obj->wait_list)) {
+ lock_next =
+ (struct drm_kgsl_gem_object_wait_list_entry *)
+ unlock_obj->wait_list.prev;
+
+ /* Unblock the pid */
+ lock_next->pid = 0;
+
+ /* Delete it from the list */
+ list_del((struct list_head *)&lock_next->list);
+
+ unlock_obj->lockpid = 0;
+ wake_up_interruptible(&lock_next->process_wait_q);
+
+ } else {
+ /* List is empty so set pid to 0 */
+ unlock_obj->lockpid = 0;
+ }
+ }
+ fence->fence_id = ENTRY_NEEDS_CLEANUP; /* Mark it as needing cleanup */
+}
+
+int
+kgsl_gem_lock_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ /* The purpose of this function is to lock a given set of handles. */
+ /* The driver will maintain a list of locked handles. */
+ /* If a request comes in for a handle that's locked the thread will */
+ /* block until it's no longer in use. */
+
+ struct drm_kgsl_gem_lock_handles *args = data;
+ struct drm_gem_object *obj;
+ struct drm_kgsl_gem_object *priv;
+ struct drm_kgsl_gem_object_fence_list_entry *this_fence_entry = NULL;
+ struct drm_kgsl_gem_object_fence *fence;
+ struct drm_kgsl_gem_object_wait_list_entry *lock_item;
+ int i, j;
+ int result = 0;
+ uint32_t *lock_list;
+ uint32_t *work_list = NULL;
+ int32_t fence_index;
+
+ /* copy in the data from user space */
+ lock_list = kzalloc(sizeof(uint32_t) * args->num_handles, GFP_KERNEL);
+ if (!lock_list) {
+ DRM_ERROR("Unable allocate memory for lock list\n");
+ result = -ENOMEM;
+ goto error;
+ }
+
+ if (copy_from_user(lock_list, args->handle_list,
+ sizeof(uint32_t) * args->num_handles)) {
+ DRM_ERROR("Unable to copy the lock list from the user\n");
+ result = -EFAULT;
+ goto free_handle_list;
+ }
+
+
+ work_list = lock_list;
+ mutex_lock(&dev->struct_mutex);
+
+ /* build the fence for this group of handles */
+ fence_index = find_empty_fence();
+ if (fence_index == ENTRY_EMPTY) {
+ DRM_ERROR("Unable to find a empty fence\n");
+ args->lock_id = 0xDEADBEEF;
+ result = -EFAULT;
+ goto out_unlock;
+ }
+
+ fence = &gem_buf_fence[fence_index];
+ gem_buf_fence[fence_index].num_buffers = args->num_handles;
+ args->lock_id = gem_buf_fence[fence_index].fence_id;
+
+ for (j = args->num_handles; j > 0; j--, lock_list++) {
+ obj = drm_gem_object_lookup(dev, file_priv, *lock_list);
+
+ if (obj == NULL) {
+ DRM_ERROR("Invalid GEM handle %x\n", *lock_list);
+ result = -EBADF;
+ goto out_unlock;
+ }
+
+ priv = obj->driver_private;
+ this_fence_entry = NULL;
+
+ /* get a fence entry to hook into the fence */
+ for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
+ if (!priv->fence_entries[i].in_use) {
+ this_fence_entry = &priv->fence_entries[i];
+ this_fence_entry->in_use = 1;
+ break;
+ }
+ }
+
+ if (this_fence_entry == NULL) {
+ fence->num_buffers = 0;
+ fence->fence_id = ENTRY_EMPTY;
+ args->lock_id = 0xDEADBEAD;
+ result = -EFAULT;
+ drm_gem_object_unreference(obj);
+ goto out_unlock;
+ }
+
+ /* We're trying to lock - add to a fence */
+ list_add((struct list_head *)this_fence_entry,
+ &gem_buf_fence[fence_index].buffers_in_fence);
+ if (priv->lockpid) {
+
+ if (priv->lockpid == args->pid) {
+ /* now that things are running async this */
+ /* happens when an op isn't done */
+ /* so it's already locked by the calling pid */
+ continue;
+ }
+
+
+ /* if a pid already had it locked */
+ /* create and add to wait list */
+ for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
+ if (priv->wait_entries[i].in_use == 0) {
+ /* this one is empty */
+ lock_item = &priv->wait_entries[i];
+ lock_item->in_use = 1;
+ lock_item->pid = args->pid;
+ INIT_LIST_HEAD((struct list_head *)
+ &priv->wait_entries[i]);
+ break;
+ }
+ }
+
+ if (i == DRM_KGSL_HANDLE_WAIT_ENTRIES) {
+
+ result = -EFAULT;
+ drm_gem_object_unreference(obj);
+ goto out_unlock;
+ }
+
+ list_add_tail((struct list_head *)&lock_item->list,
+ &priv->wait_list);
+ mutex_unlock(&dev->struct_mutex);
+ /* here we need to block */
+ wait_event_interruptible_timeout(
+ priv->wait_entries[i].process_wait_q,
+ (priv->lockpid == 0),
+ msecs_to_jiffies(64));
+ mutex_lock(&dev->struct_mutex);
+ lock_item->in_use = 0;
+ }
+
+ /* Getting here means no one currently holds the lock */
+ priv->lockpid = args->pid;
+
+ args->lock_id = gem_buf_fence[fence_index].fence_id;
+ }
+ fence->lockpid = args->pid;
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+
+free_handle_list:
+ kfree(work_list);
+
+error:
+ return result;
+}
+
+int
+kgsl_gem_unlock_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_kgsl_gem_unlock_handles *args = data;
+ int result = 0;
+ int32_t fence_index;
+
+ mutex_lock(&dev->struct_mutex);
+ fence_index = find_fence(args->lock_id);
+ if (fence_index == ENTRY_EMPTY) {
+ DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
+ result = -EFAULT;
+ goto out_unlock;
+ }
+
+ cleanup_fence(&gem_buf_fence[fence_index], 1);
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+
+ return result;
+}
+
+
+int
+kgsl_gem_unlock_on_ts_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_kgsl_gem_unlock_on_ts *args = data;
+ int result = 0;
+ int ts_done = 0;
+ int32_t fence_index, ts_device;
+ struct drm_kgsl_gem_object_fence *fence;
+ struct kgsl_device *device;
+
+ if (args->type == DRM_KGSL_GEM_TS_3D)
+ ts_device = KGSL_DEVICE_3D0;
+ else if (args->type == DRM_KGSL_GEM_TS_2D)
+ ts_device = KGSL_DEVICE_2D0;
+ else {
+ result = -EINVAL;
+ goto error;
+ }
+
+ device = kgsl_get_device(ts_device);
+ ts_done = kgsl_check_timestamp(device, NULL, args->timestamp);
+
+ mutex_lock(&dev->struct_mutex);
+
+ fence_index = find_fence(args->lock_id);
+ if (fence_index == ENTRY_EMPTY) {
+ DRM_ERROR("Invalid lock ID: %x\n", args->lock_id);
+ result = -EFAULT;
+ goto out_unlock;
+ }
+
+ fence = &gem_buf_fence[fence_index];
+ fence->ts_device = ts_device;
+
+ if (!ts_done)
+ fence->ts_valid = 1;
+ else
+ cleanup_fence(fence, 1);
+
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+
+error:
+ return result;
+}
+
+struct drm_ioctl_desc kgsl_drm_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE, kgsl_gem_create_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_PREP, kgsl_gem_prep_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_SETMEMTYPE, kgsl_gem_setmemtype_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_GETMEMTYPE, kgsl_gem_getmemtype_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_BIND_GPU, kgsl_gem_bind_gpu_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_UNBIND_GPU, kgsl_gem_unbind_gpu_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_ALLOC, kgsl_gem_alloc_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_MMAP, kgsl_gem_mmap_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_GET_BUFINFO, kgsl_gem_get_bufinfo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_GET_ION_FD, kgsl_gem_get_ion_fd_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE_FROM_ION,
+ kgsl_gem_create_from_ion_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_BUFCOUNT,
+ kgsl_gem_set_bufcount_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_GET_BUFCOUNT,
+ kgsl_gem_get_bufcount_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_GLOCK_HANDLES_INFO,
+ kgsl_gem_set_glock_handles_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_GET_GLOCK_HANDLES_INFO,
+ kgsl_gem_get_glock_handles_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_SET_ACTIVE, kgsl_gem_set_active_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_LOCK_HANDLE,
+ kgsl_gem_lock_handle_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_UNLOCK_HANDLE,
+ kgsl_gem_unlock_handle_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_UNLOCK_ON_TS,
+ kgsl_gem_unlock_on_ts_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(KGSL_GEM_CREATE_FD, kgsl_gem_create_fd_ioctl,
+ DRM_MASTER),
+};
+
+static const struct file_operations kgsl_drm_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+};
+
+static struct drm_driver driver = {
+ .driver_features = DRIVER_GEM,
+ .gem_init_object = kgsl_gem_init_object,
+ .gem_free_object = kgsl_gem_free_object,
+ .ioctls = kgsl_drm_ioctls,
+ .fops = &kgsl_drm_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+int kgsl_drm_init(struct platform_device *dev)
+{
+ int i;
+
+ /* Only initialize once */
+ if (kgsl_drm_inited == DRM_KGSL_INITED)
+ return 0;
+
+ kgsl_drm_inited = DRM_KGSL_INITED;
+
+ driver.num_ioctls = DRM_ARRAY_SIZE(kgsl_drm_ioctls);
+
+ INIT_LIST_HEAD(&kgsl_mem_list);
+
+ for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
+ gem_buf_fence[i].num_buffers = 0;
+ gem_buf_fence[i].ts_valid = 0;
+ gem_buf_fence[i].fence_id = ENTRY_EMPTY;
+ }
+
+ /* Create ION Client */
+ kgsl_drm_ion_client = msm_ion_client_create(
+ 0xffffffff, "kgsl_drm");
+ if (!kgsl_drm_ion_client) {
+ DRM_ERROR("Unable to create ION client\n");
+ return -ENOMEM;
+ }
+
+ return drm_platform_init(&driver, dev);
+}
+
+void kgsl_drm_exit(void)
+{
+ kgsl_drm_inited = DRM_KGSL_NOT_INITED;
+
+ if (kgsl_drm_ion_client)
+ ion_client_destroy(kgsl_drm_ion_client);
+ kgsl_drm_ion_client = NULL;
+
+ drm_platform_exit(&driver, driver.kdriver.platform_device);
+}
diff --git a/drivers/gpu/msm2/kgsl_events.c b/drivers/gpu/msm2/kgsl_events.c
new file mode 100644
index 0000000..277eae0
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_events.c
@@ -0,0 +1,414 @@
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <kgsl_device.h>
+
+#include "kgsl_trace.h"
+#include "adreno.h"
+
+static inline struct list_head *_get_list_head(struct kgsl_device *device,
+ struct kgsl_context *context)
+{
+ return (context) ? &context->events : &device->events;
+}
+
+static void _add_event_to_list(struct list_head *head, struct kgsl_event *event)
+{
+ struct list_head *n;
+
+ for (n = head->next; n != head; n = n->next) {
+ struct kgsl_event *e =
+ list_entry(n, struct kgsl_event, list);
+
+ if (timestamp_cmp(e->timestamp, event->timestamp) > 0) {
+ list_add(&event->list, n->prev);
+ break;
+ }
+ }
+
+ if (n == head)
+ list_add_tail(&event->list, head);
+}
+
+static inline void _do_signal_event(struct kgsl_device *device,
+ struct kgsl_event *event, unsigned int timestamp,
+ unsigned int type)
+{
+ int id = event->context ? event->context->id : KGSL_MEMSTORE_GLOBAL;
+
+ trace_kgsl_fire_event(id, timestamp, type, jiffies - event->created,
+ event->func);
+
+ if (event->func)
+ event->func(device, event->priv, id, timestamp, type);
+
+ list_del(&event->list);
+ kgsl_context_put(event->context);
+ kfree(event);
+
+ kgsl_active_count_put(device);
+}
+
+static void _retire_events(struct kgsl_device *device,
+ struct list_head *head, unsigned int timestamp)
+{
+ struct kgsl_event *event, *tmp;
+
+ list_for_each_entry_safe(event, tmp, head, list) {
+ if (timestamp_cmp(timestamp, event->timestamp) < 0)
+ break;
+
+ _do_signal_event(device, event, event->timestamp,
+ KGSL_EVENT_TIMESTAMP_RETIRED);
+ }
+}
+
+static struct kgsl_event *_find_event(struct kgsl_device *device,
+ struct list_head *head, unsigned int timestamp,
+ kgsl_event_func func, void *priv)
+{
+ struct kgsl_event *event, *tmp;
+
+ list_for_each_entry_safe(event, tmp, head, list) {
+ if (timestamp == event->timestamp && func == event->func &&
+ event->priv == priv)
+ return event;
+ }
+
+ return NULL;
+}
+
+/**
+ * _signal_event() - send a signal to a specific event in the list
+ * @device: Pointer to the KGSL device struct
+ * @head: Pointer to the event list to process
+ * @timestamp: timestamp of the event to signal
+ * @cur: timestamp value to send to the callback
+ * @type: Signal ID to send to the callback
+ *
+ * Send the specified signal to the events in the list with the specified
+ * timestamp. The timestamp 'cur' is sent to the callback so it knows
+ * when the signal was delivered
+ */
+static void _signal_event(struct kgsl_device *device,
+ struct list_head *head, unsigned int timestamp,
+ unsigned int cur, unsigned int type)
+{
+ struct kgsl_event *event, *tmp;
+
+ list_for_each_entry_safe(event, tmp, head, list) {
+ if (timestamp_cmp(timestamp, event->timestamp) == 0)
+ _do_signal_event(device, event, cur, type);
+ }
+}
+
+/**
+ * _signal_events() - send a signal to all the events in a list
+ * @device: Pointer to the KGSL device struct
+ * @head: Pointer to the event list to process
+ * @timestamp: Timestamp to pass to the events (this should be the current
+ * timestamp when the signal is sent)
+ * @type: Signal ID to send to the callback
+ *
+ * Send the specified signal to all the events in the list and destroy them
+ */
+static void _signal_events(struct kgsl_device *device,
+ struct list_head *head, uint32_t timestamp,
+ unsigned int type)
+{
+ struct kgsl_event *event, *tmp;
+
+ list_for_each_entry_safe(event, tmp, head, list)
+ _do_signal_event(device, event, timestamp, type);
+
+}
+
+/**
+ * kgsl_signal_event() - send a signal to a specific event in the context
+ * @device: Pointer to the KGSL device struct
+ * @context: Pointer to the KGSL context
+ * @timestamp: Timestamp of the event to signal
+ * @type: Signal ID to send to the callback
+ *
+ * Send the specified signal to all the events in the context with the given
+ * timestamp
+ */
+void kgsl_signal_event(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int timestamp,
+ unsigned int type)
+{
+ struct list_head *head = _get_list_head(device, context);
+ uint32_t cur;
+
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ cur = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED);
+ _signal_event(device, head, timestamp, cur, type);
+
+ if (context && list_empty(&context->events))
+ list_del_init(&context->events_list);
+}
+EXPORT_SYMBOL(kgsl_signal_event);
+
+/**
+ * kgsl_signal_events() - send a signal to all events in the context
+ * @device: Pointer to the KGSL device struct
+ * @context: Pointer to the KGSL context
+ * @type: Signal ID to send to the callback function
+ *
+ * Send the specified signal to all the events in the context
+ */
+void kgsl_signal_events(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int type)
+{
+ struct list_head *head = _get_list_head(device, context);
+ uint32_t cur;
+
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ /*
+ * Send the current timestamp to the callback so it knows when the
+ * signal occured
+ */
+
+ cur = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED);
+
+ _signal_events(device, head, cur, type);
+
+ /*
+ * Remove the context from the master list since we know everything on
+ * it has been removed
+ */
+
+ if (context)
+ list_del_init(&context->events_list);
+}
+EXPORT_SYMBOL(kgsl_signal_events);
+
+/**
+ * kgsl_add_event - Add a new timstamp event for the KGSL device
+ * @device - KGSL device for the new event
+ * @id - the context ID that the event should be added to
+ * @ts - the timestamp to trigger the event on
+ * @func - callback function to call when the timestamp expires
+ * @priv - private data for the specific event type
+ * @owner - driver instance that owns this event
+ *
+ * @returns - 0 on success or error code on failure
+ */
+int kgsl_add_event(struct kgsl_device *device, u32 id, u32 ts,
+ kgsl_event_func func, void *priv, void *owner)
+{
+ int ret;
+ struct kgsl_event *event;
+ unsigned int cur_ts;
+ struct kgsl_context *context = NULL;
+ struct adreno_context *drawctxt;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ if (func == NULL)
+ return -EINVAL;
+
+ if (id != KGSL_MEMSTORE_GLOBAL) {
+ context = kgsl_context_get(device, id);
+ if (context == NULL)
+ return -EINVAL;
+ /* Do not allow registering of event with invalid timestamp */
+ drawctxt = ADRENO_CONTEXT(context);
+ if (timestamp_cmp(ts, drawctxt->timestamp) > 0) {
+ kgsl_context_put(context);
+ return -EINVAL;
+ }
+ } else {
+ if (timestamp_cmp(ts, adreno_dev->ringbuffer.global_ts) > 0)
+ return -EINVAL;
+ }
+ cur_ts = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED);
+
+ /*
+ * Check to see if the requested timestamp has already fired. If it
+ * did do the callback right away. Make sure to send the timestamp that
+ * the event expected instead of the current timestamp because sometimes
+ * the event handlers can get confused.
+ */
+
+ if (timestamp_cmp(cur_ts, ts) >= 0) {
+ trace_kgsl_fire_event(id, cur_ts, ts, 0, func);
+
+ func(device, priv, id, ts, KGSL_EVENT_TIMESTAMP_RETIRED);
+ kgsl_context_put(context);
+ queue_work(device->work_queue, &device->ts_expired_ws);
+ return 0;
+ }
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (event == NULL) {
+ kgsl_context_put(context);
+ return -ENOMEM;
+ }
+
+ /*
+ * Increase the active count on the device to avoid going into power
+ * saving modes while events are pending
+ */
+ ret = kgsl_active_count_get(device);
+ if (ret < 0) {
+ kgsl_context_put(context);
+ kfree(event);
+ return ret;
+ }
+
+ event->context = context;
+ event->timestamp = ts;
+ event->priv = priv;
+ event->func = func;
+ event->owner = owner;
+ event->created = jiffies;
+
+ trace_kgsl_register_event(id, ts, func);
+
+ /* Add the event to either the owning context or the global list */
+
+ if (context) {
+ _add_event_to_list(&context->events, event);
+
+ /*
+ * Add it to the master list of contexts with pending events if
+ * it isn't already there
+ */
+
+ if (list_empty(&context->events_list))
+ list_add_tail(&context->events_list,
+ &device->events_pending_list);
+
+ } else
+ _add_event_to_list(&device->events, event);
+
+ queue_work(device->work_queue, &device->ts_expired_ws);
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_add_event);
+
+/**
+ * kgsl_cancel_events() - Cancel all global events owned by a process
+ * @device: Pointer to the KGSL device struct
+ * @owner: driver instance that owns the events to cancel
+ *
+ * Cancel all global events that match the owner pointer
+ */
+void kgsl_cancel_events(struct kgsl_device *device, void *owner)
+{
+ struct kgsl_event *event, *event_tmp;
+ unsigned int cur;
+
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ cur = kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED);
+
+ list_for_each_entry_safe(event, event_tmp, &device->events, list) {
+ if (event->owner != owner)
+ continue;
+
+ _do_signal_event(device, event, cur, KGSL_EVENT_CANCELLED);
+ }
+}
+EXPORT_SYMBOL(kgsl_cancel_events);
+
+/**
+ * kgsl_cancel_event() - send a cancel signal to a specific event
+ * @device: Pointer to the KGSL device struct
+ * @context: Pointer to the KGSL context
+ * @timestamp: Timestamp of the event to cancel
+ * @func: Callback function of the event - this is used to match the actual
+ * event
+ * @priv: Private data for the callback function - this is used to match to the
+ * actual event
+ *
+ * Send the a cancel signal to a specific event that matches all the parameters
+ */
+
+void kgsl_cancel_event(struct kgsl_device *device, struct kgsl_context *context,
+ unsigned int timestamp, kgsl_event_func func,
+ void *priv)
+{
+ struct kgsl_event *event;
+ struct list_head *head = _get_list_head(device, context);
+
+ event = _find_event(device, head, timestamp, func, priv);
+
+ if (event) {
+ unsigned int cur = kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED);
+
+ _do_signal_event(device, event, cur, KGSL_EVENT_CANCELLED);
+ }
+}
+EXPORT_SYMBOL(kgsl_cancel_event);
+
+static int kgsl_process_context_events(struct kgsl_device *device,
+ struct kgsl_context *context)
+{
+ unsigned int timestamp = kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED);
+
+ _retire_events(device, &context->events, timestamp);
+
+ /*
+ * Return 0 if the list is empty so the calling function can remove the
+ * context from the pending list
+ */
+
+ return list_empty(&context->events) ? 0 : 1;
+}
+
+void kgsl_process_events(struct work_struct *work)
+{
+ struct kgsl_device *device = container_of(work, struct kgsl_device,
+ ts_expired_ws);
+ struct kgsl_context *context, *tmp;
+ uint32_t timestamp;
+
+ mutex_lock(&device->mutex);
+
+ timestamp = kgsl_readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED);
+ _retire_events(device, &device->events, timestamp);
+
+ /* Now process all of the pending contexts */
+ list_for_each_entry_safe(context, tmp, &device->events_pending_list,
+ events_list) {
+
+ /*
+ * Increment the refcount to make sure that the list_del_init
+ * is called with a valid context's list
+ */
+ if (_kgsl_context_get(context)) {
+ /*
+ * If kgsl_timestamp_expired_context returns 0 then it
+ * no longer has any pending events and can be removed
+ * from the list
+ */
+
+ if (kgsl_process_context_events(device, context) == 0)
+ list_del_init(&context->events_list);
+ kgsl_context_put(context);
+ }
+ }
+
+ mutex_unlock(&device->mutex);
+}
+EXPORT_SYMBOL(kgsl_process_events);
diff --git a/drivers/gpu/msm2/kgsl_gpummu.c b/drivers/gpu/msm2/kgsl_gpummu.c
new file mode 100644
index 0000000..1a1e2e3
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_gpummu.c
@@ -0,0 +1,774 @@
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "kgsl.h"
+#include "kgsl_mmu.h"
+#include "kgsl_gpummu.h"
+#include "kgsl_device.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_trace.h"
+
+#define KGSL_PAGETABLE_SIZE \
+ ALIGN(KGSL_PAGETABLE_ENTRIES(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE) * \
+ KGSL_PAGETABLE_ENTRY_SIZE, PAGE_SIZE)
+
+static ssize_t
+sysfs_show_ptpool_entries(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
+ kgsl_driver.ptpool;
+ return snprintf(buf, PAGE_SIZE, "%d\n", pool->entries);
+}
+
+static ssize_t
+sysfs_show_ptpool_min(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
+ kgsl_driver.ptpool;
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pool->static_entries);
+}
+
+static ssize_t
+sysfs_show_ptpool_chunks(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
+ kgsl_driver.ptpool;
+ return snprintf(buf, PAGE_SIZE, "%d\n", pool->chunks);
+}
+
+static ssize_t
+sysfs_show_ptpool_ptsize(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
+ kgsl_driver.ptpool;
+ return snprintf(buf, PAGE_SIZE, "%d\n", pool->ptsize);
+}
+
+static struct kobj_attribute attr_ptpool_entries = {
+ .attr = { .name = "ptpool_entries", .mode = 0444 },
+ .show = sysfs_show_ptpool_entries,
+ .store = NULL,
+};
+
+static struct kobj_attribute attr_ptpool_min = {
+ .attr = { .name = "ptpool_min", .mode = 0444 },
+ .show = sysfs_show_ptpool_min,
+ .store = NULL,
+};
+
+static struct kobj_attribute attr_ptpool_chunks = {
+ .attr = { .name = "ptpool_chunks", .mode = 0444 },
+ .show = sysfs_show_ptpool_chunks,
+ .store = NULL,
+};
+
+static struct kobj_attribute attr_ptpool_ptsize = {
+ .attr = { .name = "ptpool_ptsize", .mode = 0444 },
+ .show = sysfs_show_ptpool_ptsize,
+ .store = NULL,
+};
+
+static struct attribute *ptpool_attrs[] = {
+ &attr_ptpool_entries.attr,
+ &attr_ptpool_min.attr,
+ &attr_ptpool_chunks.attr,
+ &attr_ptpool_ptsize.attr,
+ NULL,
+};
+
+static struct attribute_group ptpool_attr_group = {
+ .attrs = ptpool_attrs,
+};
+
+static int
+_kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
+{
+ struct kgsl_ptpool_chunk *chunk;
+ size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
+
+ BUG_ON(count == 0);
+
+ if (get_order(size) >= MAX_ORDER) {
+ KGSL_CORE_ERR("ptpool allocation is too big: %d\n", size);
+ return -EINVAL;
+ }
+
+ chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+ if (chunk == NULL) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
+ return -ENOMEM;
+ }
+
+ chunk->size = size;
+ chunk->count = count;
+ chunk->dynamic = dynamic;
+
+ chunk->data = dma_alloc_coherent(NULL, size,
+ &chunk->phys, GFP_KERNEL);
+
+ if (chunk->data == NULL) {
+ KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
+ goto err;
+ }
+
+ chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
+
+ if (chunk->bitmap == NULL) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n",
+ BITS_TO_LONGS(count) * 4);
+ goto err_dma;
+ }
+
+ list_add_tail(&chunk->list, &pool->list);
+
+ pool->chunks++;
+ pool->entries += count;
+
+ if (!dynamic)
+ pool->static_entries += count;
+
+ return 0;
+
+err_dma:
+ dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
+err:
+ kfree(chunk);
+ return -ENOMEM;
+}
+
+static void *
+_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, phys_addr_t *physaddr)
+{
+ struct kgsl_ptpool_chunk *chunk;
+
+ list_for_each_entry(chunk, &pool->list, list) {
+ int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
+
+ if (bit >= chunk->count)
+ continue;
+
+ set_bit(bit, chunk->bitmap);
+ *physaddr = chunk->phys + (bit * pool->ptsize);
+
+ return chunk->data + (bit * pool->ptsize);
+ }
+
+ return NULL;
+}
+
+/**
+ * kgsl_ptpool_add
+ * @pool: A pointer to a ptpool structure
+ * @entries: Number of entries to add
+ *
+ * Add static entries to the pagetable pool.
+ */
+
+static int
+kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
+{
+ int ret = 0;
+ BUG_ON(count == 0);
+
+ mutex_lock(&pool->lock);
+
+ /* Only 4MB can be allocated in one chunk, so larger allocations
+ need to be split into multiple sections */
+
+ while (count) {
+ int entries = ((count * pool->ptsize) > SZ_4M) ?
+ SZ_4M / pool->ptsize : count;
+
+ /* Add the entries as static, i.e. they don't ever stand
+ a chance of being removed */
+
+ ret = _kgsl_ptpool_add_entries(pool, entries, 0);
+ if (ret)
+ break;
+
+ count -= entries;
+ }
+
+ mutex_unlock(&pool->lock);
+ return ret;
+}
+
+/**
+ * kgsl_ptpool_alloc
+ * @pool: A pointer to a ptpool structure
+ * @addr: A pointer to store the physical address of the chunk
+ *
+ * Allocate a pagetable from the pool. Returns the virtual address
+ * of the pagetable, the physical address is returned in physaddr
+ */
+
+static void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool,
+ phys_addr_t *physaddr)
+{
+ void *addr = NULL;
+ int ret;
+
+ mutex_lock(&pool->lock);
+ addr = _kgsl_ptpool_get_entry(pool, physaddr);
+ if (addr)
+ goto done;
+
+ /* Add a chunk for 1 more pagetable and mark it as dynamic */
+ ret = _kgsl_ptpool_add_entries(pool, 1, 1);
+
+ if (ret)
+ goto done;
+
+ addr = _kgsl_ptpool_get_entry(pool, physaddr);
+done:
+ mutex_unlock(&pool->lock);
+ return addr;
+}
+
+static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
+{
+ list_del(&chunk->list);
+
+ if (chunk->data)
+ dma_free_coherent(NULL, chunk->size, chunk->data,
+ chunk->phys);
+ kfree(chunk->bitmap);
+ kfree(chunk);
+}
+
+/**
+ * kgsl_ptpool_free
+ * @pool: A pointer to a ptpool structure
+ * @addr: A pointer to the virtual address to free
+ *
+ * Free a pagetable allocated from the pool
+ */
+
+static void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
+{
+ struct kgsl_ptpool_chunk *chunk, *tmp;
+
+ if (pool == NULL || addr == NULL)
+ return;
+
+ mutex_lock(&pool->lock);
+ list_for_each_entry_safe(chunk, tmp, &pool->list, list) {
+ if (addr >= chunk->data &&
+ addr < chunk->data + chunk->size) {
+ int bit = ((unsigned long) (addr - chunk->data)) /
+ pool->ptsize;
+
+ clear_bit(bit, chunk->bitmap);
+ memset(addr, 0, pool->ptsize);
+
+ if (chunk->dynamic &&
+ bitmap_empty(chunk->bitmap, chunk->count))
+ _kgsl_ptpool_rm_chunk(chunk);
+
+ break;
+ }
+ }
+
+ mutex_unlock(&pool->lock);
+}
+
+void kgsl_gpummu_ptpool_destroy(void *ptpool)
+{
+ struct kgsl_ptpool *pool = (struct kgsl_ptpool *)ptpool;
+ struct kgsl_ptpool_chunk *chunk, *tmp;
+
+ if (pool == NULL)
+ return;
+
+ mutex_lock(&pool->lock);
+ list_for_each_entry_safe(chunk, tmp, &pool->list, list)
+ _kgsl_ptpool_rm_chunk(chunk);
+ mutex_unlock(&pool->lock);
+
+ kfree(pool);
+}
+
+/**
+ * kgsl_ptpool_init
+ * @pool: A pointer to a ptpool structure to initialize
+ * @entries: The number of inital entries to add to the pool
+ *
+ * Initalize a pool and allocate an initial chunk of entries.
+ */
+void *kgsl_gpummu_ptpool_init(int entries)
+{
+ int ptsize = KGSL_PAGETABLE_SIZE;
+ struct kgsl_ptpool *pool;
+ int ret = 0;
+
+ pool = kzalloc(sizeof(struct kgsl_ptpool), GFP_KERNEL);
+ if (!pool) {
+ KGSL_CORE_ERR("Failed to allocate memory "
+ "for ptpool\n");
+ return NULL;
+ }
+
+ pool->ptsize = ptsize;
+ mutex_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->list);
+
+ if (entries) {
+ ret = kgsl_ptpool_add(pool, entries);
+ if (ret)
+ goto err_ptpool_remove;
+ }
+
+ ret = sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
+ if (ret) {
+ KGSL_CORE_ERR("sysfs_create_group failed for ptpool "
+ "statistics: %d\n", ret);
+ goto err_ptpool_remove;
+ }
+ return (void *)pool;
+
+err_ptpool_remove:
+ kgsl_gpummu_ptpool_destroy(pool);
+ return NULL;
+}
+
+int kgsl_gpummu_pt_equal(struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pt,
+ phys_addr_t pt_base)
+{
+ struct kgsl_gpummu_pt *gpummu_pt = pt ? pt->priv : NULL;
+ return gpummu_pt && pt_base && (gpummu_pt->base.gpuaddr == pt_base);
+}
+
+void kgsl_gpummu_destroy_pagetable(struct kgsl_pagetable *pt)
+{
+ struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
+ kgsl_ptpool_free((struct kgsl_ptpool *)kgsl_driver.ptpool,
+ gpummu_pt->base.hostptr);
+
+ kgsl_driver.stats.coherent -= KGSL_PAGETABLE_SIZE;
+
+ kfree(gpummu_pt->tlbflushfilter.base);
+
+ kfree(gpummu_pt);
+}
+
+static inline uint32_t
+kgsl_pt_entry_get(unsigned int va_base, uint32_t va)
+{
+ return (va - va_base) >> PAGE_SHIFT;
+}
+
+static inline void
+kgsl_pt_map_set(struct kgsl_gpummu_pt *pt, uint32_t pte, uint32_t val)
+{
+ uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
+ BUG_ON(pte*sizeof(uint32_t) >= pt->base.size);
+ baseptr[pte] = val;
+}
+
+static inline uint32_t
+kgsl_pt_map_get(struct kgsl_gpummu_pt *pt, uint32_t pte)
+{
+ uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
+ BUG_ON(pte*sizeof(uint32_t) >= pt->base.size);
+ return baseptr[pte] & GSL_PT_PAGE_ADDR_MASK;
+}
+
+static void kgsl_gpummu_pagefault(struct kgsl_mmu *mmu)
+{
+ unsigned int reg;
+ unsigned int ptbase;
+
+ kgsl_regread(mmu->device, MH_MMU_PAGE_FAULT, ®);
+ kgsl_regread(mmu->device, MH_MMU_PT_BASE, &ptbase);
+
+ KGSL_MEM_CRIT(mmu->device,
+ "mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
+ reg & ~(PAGE_SIZE - 1),
+ kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase),
+ reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
+ trace_kgsl_mmu_pagefault(mmu->device, reg & ~(PAGE_SIZE - 1),
+ kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase),
+ reg & 0x02 ? "WRITE" : "READ");
+}
+
+static void *kgsl_gpummu_create_pagetable(void)
+{
+ struct kgsl_gpummu_pt *gpummu_pt;
+
+ gpummu_pt = kzalloc(sizeof(struct kgsl_gpummu_pt),
+ GFP_KERNEL);
+ if (!gpummu_pt)
+ return NULL;
+
+ gpummu_pt->last_superpte = 0;
+
+ gpummu_pt->tlbflushfilter.size = (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE /
+ (PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
+ gpummu_pt->tlbflushfilter.base = (unsigned int *)
+ kzalloc(gpummu_pt->tlbflushfilter.size, GFP_KERNEL);
+ if (!gpummu_pt->tlbflushfilter.base) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n",
+ gpummu_pt->tlbflushfilter.size);
+ goto err_free_gpummu;
+ }
+ GSL_TLBFLUSH_FILTER_RESET();
+
+ gpummu_pt->base.hostptr = kgsl_ptpool_alloc((struct kgsl_ptpool *)
+ kgsl_driver.ptpool,
+ &gpummu_pt->base.physaddr);
+
+ if (gpummu_pt->base.hostptr == NULL)
+ goto err_flushfilter;
+
+ /* Do a check before truncating phys_addr_t to unsigned 32 */
+ if (sizeof(phys_addr_t) > sizeof(unsigned int)) {
+ WARN_ONCE(1, "Cannot use LPAE with gpummu\n");
+ goto err_flushfilter;
+ }
+ gpummu_pt->base.gpuaddr = gpummu_pt->base.physaddr;
+ gpummu_pt->base.size = KGSL_PAGETABLE_SIZE;
+
+ /* ptpool allocations are from coherent memory, so update the
+ device statistics acordingly */
+
+ KGSL_STATS_ADD(KGSL_PAGETABLE_SIZE, kgsl_driver.stats.coherent,
+ kgsl_driver.stats.coherent_max);
+
+ return (void *)gpummu_pt;
+
+err_flushfilter:
+ kfree(gpummu_pt->tlbflushfilter.base);
+err_free_gpummu:
+ kfree(gpummu_pt);
+
+ return NULL;
+}
+
+static int kgsl_gpummu_default_setstate(struct kgsl_mmu *mmu,
+ uint32_t flags)
+{
+ struct kgsl_gpummu_pt *gpummu_pt;
+ if (!kgsl_mmu_enabled())
+ return 0;
+
+ if (flags & KGSL_MMUFLAGS_PTUPDATE) {
+ int ret = kgsl_idle(mmu->device);
+ if (ret)
+ return ret;
+ gpummu_pt = mmu->hwpagetable->priv;
+ kgsl_regwrite(mmu->device, MH_MMU_PT_BASE,
+ gpummu_pt->base.gpuaddr);
+ }
+
+ if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
+ /* Invalidate all and tc */
+ kgsl_regwrite(mmu->device, MH_MMU_INVALIDATE, 0x00000003);
+ }
+
+ return 0;
+}
+
+static int kgsl_gpummu_setstate(struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pagetable,
+ unsigned int context_id)
+{
+ int ret = 0;
+
+ if (mmu->flags & KGSL_FLAGS_STARTED) {
+ /* page table not current, then setup mmu to use new
+ * specified page table
+ */
+ if (mmu->hwpagetable != pagetable) {
+ mmu->hwpagetable = pagetable;
+ /* Since we do a TLB flush the tlb_flags should
+ * be cleared by calling kgsl_mmu_pt_get_flags
+ */
+ kgsl_mmu_pt_get_flags(pagetable, mmu->device->id);
+
+ /* call device specific set page table */
+ ret = kgsl_setstate(mmu, context_id,
+ KGSL_MMUFLAGS_TLBFLUSH |
+ KGSL_MMUFLAGS_PTUPDATE);
+ }
+ }
+
+ return ret;
+}
+
+static int kgsl_gpummu_init(struct kgsl_mmu *mmu)
+{
+ /*
+ * intialize device mmu
+ *
+ * call this with the global lock held
+ */
+ int status = 0;
+
+ mmu->pt_base = KGSL_PAGETABLE_BASE;
+ mmu->pt_size = CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
+ mmu->pt_per_process = KGSL_MMU_USE_PER_PROCESS_PT;
+ mmu->use_cpu_map = false;
+
+ /* sub-client MMU lookups require address translation */
+ if ((mmu->config & ~0x1) > 0) {
+ /*make sure virtual address range is a multiple of 64Kb */
+ if (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1)) {
+ KGSL_CORE_ERR("Invalid pagetable size requested "
+ "for GPUMMU: %x\n", CONFIG_MSM_KGSL_PAGE_TABLE_SIZE);
+ return -EINVAL;
+ }
+ }
+
+ dev_info(mmu->device->dev, "|%s| MMU type set for device is GPUMMU\n",
+ __func__);
+ return status;
+}
+
+static int kgsl_gpummu_start(struct kgsl_mmu *mmu)
+{
+ /*
+ * intialize device mmu
+ *
+ * call this with the global lock held
+ */
+
+ struct kgsl_device *device = mmu->device;
+ struct kgsl_gpummu_pt *gpummu_pt;
+ int ret;
+
+ if (mmu->flags & KGSL_FLAGS_STARTED)
+ return 0;
+
+ /* MMU not enabled */
+ if ((mmu->config & 0x1) == 0)
+ return 0;
+
+ /* setup MMU and sub-client behavior */
+ kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
+
+ /* enable axi interrupts */
+ kgsl_regwrite(device, MH_INTERRUPT_MASK,
+ GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
+
+ kgsl_sharedmem_set(device, &mmu->setstate_memory, 0, 0,
+ mmu->setstate_memory.size);
+
+ /* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
+ * to complete transactions in case of an MMU fault. Note that
+ * we'll leave the bottom 32 bytes of the setstate_memory for other
+ * purposes (e.g. use it when dummy read cycles are needed
+ * for other blocks) */
+ kgsl_regwrite(device, MH_MMU_TRAN_ERROR,
+ mmu->setstate_memory.physaddr + 32);
+
+ if (mmu->defaultpagetable == NULL)
+ mmu->defaultpagetable =
+ kgsl_mmu_getpagetable(mmu, KGSL_MMU_GLOBAL_PT);
+
+ /* Return error if the default pagetable doesn't exist */
+ if (mmu->defaultpagetable == NULL)
+ return -ENOMEM;
+
+ mmu->hwpagetable = mmu->defaultpagetable;
+ gpummu_pt = mmu->hwpagetable->priv;
+ kgsl_regwrite(mmu->device, MH_MMU_PT_BASE,
+ gpummu_pt->base.gpuaddr);
+ kgsl_regwrite(mmu->device, MH_MMU_VA_RANGE,
+ (KGSL_PAGETABLE_BASE |
+ (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE >> 16)));
+
+ ret = kgsl_setstate(mmu, KGSL_MEMSTORE_GLOBAL, KGSL_MMUFLAGS_TLBFLUSH);
+ if (!ret)
+ mmu->flags |= KGSL_FLAGS_STARTED;
+
+ return ret;
+}
+
+static int
+kgsl_gpummu_unmap(struct kgsl_pagetable *pt,
+ struct kgsl_memdesc *memdesc,
+ unsigned int *tlb_flags)
+{
+ unsigned int numpages;
+ unsigned int pte, ptefirst, ptelast, superpte;
+ unsigned int range = memdesc->size;
+ struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
+
+ /* All GPU addresses as assigned are page aligned, but some
+ functions purturb the gpuaddr with an offset, so apply the
+ mask here to make sure we have the right address */
+
+ unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
+
+ numpages = (range >> PAGE_SHIFT);
+ if (range & (PAGE_SIZE - 1))
+ numpages++;
+
+ ptefirst = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, gpuaddr);
+ ptelast = ptefirst + numpages;
+
+ superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
+ GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
+ for (pte = ptefirst; pte < ptelast; pte++) {
+#ifdef VERBOSE_DEBUG
+ /* check if PTE exists */
+ if (!kgsl_pt_map_get(gpummu_pt, pte))
+ KGSL_CORE_ERR("pt entry %x is already "
+ "unmapped for pagetable %p\n", pte, gpummu_pt);
+#endif
+ kgsl_pt_map_set(gpummu_pt, pte, GSL_PT_PAGE_DIRTY);
+ superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
+ if (pte == superpte)
+ GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
+ GSL_PT_SUPER_PTE);
+ }
+
+ /* Post all writes to the pagetable */
+ wmb();
+
+ return 0;
+}
+
+#define SUPERPTE_IS_DIRTY(_p) \
+(((_p) & (GSL_PT_SUPER_PTE - 1)) == 0 && \
+GSL_TLBFLUSH_FILTER_ISDIRTY((_p) / GSL_PT_SUPER_PTE))
+
+static int
+kgsl_gpummu_map(struct kgsl_pagetable *pt,
+ struct kgsl_memdesc *memdesc,
+ unsigned int protflags,
+ unsigned int *tlb_flags)
+{
+ unsigned int pte;
+ struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
+ struct scatterlist *s;
+ int flushtlb = 0;
+ int i;
+
+ pte = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, memdesc->gpuaddr);
+
+ /* Flush the TLB if the first PTE isn't at the superpte boundary */
+ if (pte & (GSL_PT_SUPER_PTE - 1))
+ flushtlb = 1;
+
+ for_each_sg(memdesc->sg, s, memdesc->sglen, i) {
+ unsigned int paddr = kgsl_get_sg_pa(s);
+ unsigned int j;
+
+ /* Each sg entry might be multiple pages long */
+ for (j = paddr; j < paddr + s->length; pte++, j += PAGE_SIZE) {
+ if (SUPERPTE_IS_DIRTY(pte))
+ flushtlb = 1;
+ kgsl_pt_map_set(gpummu_pt, pte, j | protflags);
+ }
+ }
+
+ /* Flush the TLB if the last PTE isn't at the superpte boundary */
+ if ((pte + 1) & (GSL_PT_SUPER_PTE - 1))
+ flushtlb = 1;
+
+ wmb();
+
+ if (flushtlb) {
+ /*set all devices as needing flushing*/
+ *tlb_flags = UINT_MAX;
+ GSL_TLBFLUSH_FILTER_RESET();
+ }
+
+ return 0;
+}
+
+static void kgsl_gpummu_stop(struct kgsl_mmu *mmu)
+{
+ mmu->flags &= ~KGSL_FLAGS_STARTED;
+}
+
+static int kgsl_gpummu_close(struct kgsl_mmu *mmu)
+{
+ /*
+ * close device mmu
+ *
+ * call this with the global lock held
+ */
+ if (mmu->setstate_memory.gpuaddr)
+ kgsl_sharedmem_free(&mmu->setstate_memory);
+
+ if (mmu->defaultpagetable)
+ kgsl_mmu_putpagetable(mmu->defaultpagetable);
+
+ return 0;
+}
+
+static phys_addr_t
+kgsl_gpummu_get_current_ptbase(struct kgsl_mmu *mmu)
+{
+ unsigned int ptbase;
+ kgsl_regread(mmu->device, MH_MMU_PT_BASE, &ptbase);
+ return ptbase;
+}
+
+static phys_addr_t
+kgsl_gpummu_get_pt_base_addr(struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pt)
+{
+ struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
+ return gpummu_pt->base.gpuaddr;
+}
+
+static int kgsl_gpummu_get_num_iommu_units(struct kgsl_mmu *mmu)
+{
+ return 1;
+}
+
+struct kgsl_mmu_ops gpummu_ops = {
+ .mmu_init = kgsl_gpummu_init,
+ .mmu_close = kgsl_gpummu_close,
+ .mmu_start = kgsl_gpummu_start,
+ .mmu_stop = kgsl_gpummu_stop,
+ .mmu_setstate = kgsl_gpummu_setstate,
+ .mmu_device_setstate = kgsl_gpummu_default_setstate,
+ .mmu_pagefault = kgsl_gpummu_pagefault,
+ .mmu_get_current_ptbase = kgsl_gpummu_get_current_ptbase,
+ .mmu_pt_equal = kgsl_gpummu_pt_equal,
+ .mmu_get_pt_base_addr = kgsl_gpummu_get_pt_base_addr,
+ .mmu_enable_clk = NULL,
+ .mmu_disable_clk_on_ts = NULL,
+ .mmu_get_default_ttbr0 = NULL,
+ .mmu_get_reg_gpuaddr = NULL,
+ .mmu_get_reg_ahbaddr = NULL,
+ .mmu_get_num_iommu_units = kgsl_gpummu_get_num_iommu_units,
+ .mmu_hw_halt_supported = NULL,
+};
+
+struct kgsl_mmu_pt_ops gpummu_pt_ops = {
+ .mmu_map = kgsl_gpummu_map,
+ .mmu_unmap = kgsl_gpummu_unmap,
+ .mmu_create_pagetable = kgsl_gpummu_create_pagetable,
+ .mmu_destroy_pagetable = kgsl_gpummu_destroy_pagetable,
+};
diff --git a/drivers/gpu/msm2/kgsl_gpummu.h b/drivers/gpu/msm2/kgsl_gpummu.h
new file mode 100644
index 0000000..1753aff
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_gpummu.h
@@ -0,0 +1,78 @@
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __KGSL_GPUMMU_H
+#define __KGSL_GPUMMU_H
+
+#define GSL_PT_PAGE_BITS_MASK 0x00000007
+#define GSL_PT_PAGE_ADDR_MASK PAGE_MASK
+
+#define GSL_MMU_INT_MASK \
+ (MH_INTERRUPT_MASK__AXI_READ_ERROR | \
+ MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
+
+/* Macros to manage TLB flushing */
+#define GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS (sizeof(unsigned char) * 8)
+#define GSL_TLBFLUSH_FILTER_GET(superpte) \
+ (*((unsigned char *) \
+ (((unsigned int)gpummu_pt->tlbflushfilter.base) \
+ + (superpte / GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))))
+#define GSL_TLBFLUSH_FILTER_SETDIRTY(superpte) \
+ (GSL_TLBFLUSH_FILTER_GET((superpte)) |= 1 << \
+ (superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))
+#define GSL_TLBFLUSH_FILTER_ISDIRTY(superpte) \
+ (GSL_TLBFLUSH_FILTER_GET((superpte)) & \
+ (1 << (superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS)))
+#define GSL_TLBFLUSH_FILTER_RESET() memset(gpummu_pt->tlbflushfilter.base,\
+ 0, gpummu_pt->tlbflushfilter.size)
+
+extern struct kgsl_mmu_ops gpummu_ops;
+extern struct kgsl_mmu_pt_ops gpummu_pt_ops;
+
+struct kgsl_tlbflushfilter {
+ unsigned int *base;
+ unsigned int size;
+};
+
+struct kgsl_gpummu_pt {
+ struct kgsl_memdesc base;
+ unsigned int last_superpte;
+ /* Maintain filter to manage tlb flushing */
+ struct kgsl_tlbflushfilter tlbflushfilter;
+};
+
+struct kgsl_ptpool_chunk {
+ size_t size;
+ unsigned int count;
+ int dynamic;
+
+ void *data;
+ phys_addr_t phys;
+
+ unsigned long *bitmap;
+ struct list_head list;
+};
+
+struct kgsl_ptpool {
+ size_t ptsize;
+ struct mutex lock;
+ struct list_head list;
+ int entries;
+ int static_entries;
+ int chunks;
+};
+
+void *kgsl_gpummu_ptpool_init(int entries);
+void kgsl_gpummu_ptpool_destroy(void *ptpool);
+
+#endif /* __KGSL_GPUMMU_H */
diff --git a/drivers/gpu/msm2/kgsl_iommu.c b/drivers/gpu/msm2/kgsl_iommu.c
new file mode 100644
index 0000000..976d4a8
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_iommu.c
@@ -0,0 +1,2085 @@
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/msm_kgsl.h>
+#include <mach/socinfo.h>
+#include <mach/msm_iomap.h>
+#include <mach/board.h>
+#include <mach/iommu_domains.h>
+#include <stddef.h>
+
+#include "kgsl.h"
+#include "kgsl_device.h"
+#include "kgsl_mmu.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_iommu.h"
+#include "adreno_pm4types.h"
+#include "adreno.h"
+#include "kgsl_trace.h"
+#include "z180.h"
+#include "kgsl_cffdump.h"
+
+
+static struct kgsl_iommu_register_list kgsl_iommuv0_reg[KGSL_IOMMU_REG_MAX] = {
+ { 0, 0 }, /* GLOBAL_BASE */
+ { 0x0, 1 }, /* SCTLR */
+ { 0x10, 1 }, /* TTBR0 */
+ { 0x14, 1 }, /* TTBR1 */
+ { 0x20, 1 }, /* FSR */
+ { 0x800, 1 }, /* TLBIALL */
+ { 0x820, 1 }, /* RESUME */
+ { 0x03C, 1 }, /* TLBLKCR */
+ { 0x818, 1 }, /* V2PUR */
+ { 0x2C, 1 }, /* FSYNR0 */
+ { 0x30, 1 }, /* FSYNR0 */
+ { 0, 0 }, /* TLBSYNC, not in v0 */
+ { 0, 0 }, /* TLBSTATUS, not in v0 */
+ { 0, 0 } /* IMPLDEF_MICRO_MMU_CRTL, not in v0 */
+};
+
+static struct kgsl_iommu_register_list kgsl_iommuv1_reg[KGSL_IOMMU_REG_MAX] = {
+ { 0, 0 }, /* GLOBAL_BASE */
+ { 0x0, 1 }, /* SCTLR */
+ { 0x20, 1 }, /* TTBR0 */
+ { 0x28, 1 }, /* TTBR1 */
+ { 0x58, 1 }, /* FSR */
+ { 0x618, 1 }, /* TLBIALL */
+ { 0x008, 1 }, /* RESUME */
+ { 0, 0 }, /* TLBLKCR not in V1 */
+ { 0, 0 }, /* V2PUR not in V1 */
+ { 0x68, 1 }, /* FSYNR0 */
+ { 0x6C, 1 }, /* FSYNR1 */
+ { 0x7F0, 1 }, /* TLBSYNC */
+ { 0x7F4, 1 }, /* TLBSTATUS */
+ { 0x2000, 0 } /* IMPLDEF_MICRO_MMU_CRTL */
+};
+
+/* naming mismatch with iommu things */
+static void _iommu_lock(void)
+{
+ return;
+}
+
+/* naming mismatch with iommu things */
+static void _iommu_unlock(void)
+{
+ return;
+}
+
+
+struct remote_iommu_petersons_spinlock kgsl_iommu_sync_lock_vars;
+
+/*
+ * One page allocation for a guard region to protect against over-zealous
+ * GPU pre-fetch
+ */
+
+static struct page *kgsl_guard_page;
+
+static int get_iommu_unit(struct device *dev, struct kgsl_mmu **mmu_out,
+ struct kgsl_iommu_unit **iommu_unit_out)
+{
+ int i, j, k;
+
+ for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+ struct kgsl_mmu *mmu;
+ struct kgsl_iommu *iommu;
+
+ if (kgsl_driver.devp[i] == NULL)
+ continue;
+
+ mmu = kgsl_get_mmu(kgsl_driver.devp[i]);
+ if (mmu == NULL || mmu->priv == NULL)
+ continue;
+
+ iommu = mmu->priv;
+
+ for (j = 0; j < iommu->unit_count; j++) {
+ struct kgsl_iommu_unit *iommu_unit =
+ &iommu->iommu_units[j];
+ for (k = 0; k < iommu_unit->dev_count; k++) {
+ if (iommu_unit->dev[k].dev == dev) {
+ *mmu_out = mmu;
+ *iommu_unit_out = iommu_unit;
+ return 0;
+ }
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+static struct kgsl_iommu_device *get_iommu_device(struct kgsl_iommu_unit *unit,
+ struct device *dev)
+{
+ int k;
+
+ for (k = 0; unit && k < unit->dev_count; k++) {
+ if (unit->dev[k].dev == dev)
+ return &(unit->dev[k]);
+ }
+
+ return NULL;
+}
+
+/* These functions help find the nearest allocated memory entries on either side
+ * of a faulting address. If we know the nearby allocations memory we can
+ * get a better determination of what we think should have been located in the
+ * faulting region
+ */
+
+/*
+ * A local structure to make it easy to store the interesting bits for the
+ * memory entries on either side of the faulting address
+ */
+
+struct _mem_entry {
+ unsigned int gpuaddr;
+ unsigned int size;
+ unsigned int flags;
+ unsigned int priv;
+ pid_t pid;
+};
+
+/*
+ * Find the closest alloated memory block with an smaller GPU address then the
+ * given address
+ */
+
+static void _prev_entry(struct kgsl_process_private *priv,
+ unsigned int faultaddr, struct _mem_entry *ret)
+{
+ struct rb_node *node;
+ struct kgsl_mem_entry *entry;
+
+ for (node = rb_first(&priv->mem_rb); node; ) {
+ entry = rb_entry(node, struct kgsl_mem_entry, node);
+
+ if (entry->memdesc.gpuaddr > faultaddr)
+ break;
+
+ /*
+ * If this is closer to the faulting address, then copy
+ * the entry
+ */
+
+ if (entry->memdesc.gpuaddr > ret->gpuaddr) {
+ ret->gpuaddr = entry->memdesc.gpuaddr;
+ ret->size = entry->memdesc.size;
+ ret->flags = entry->memdesc.flags;
+ ret->priv = entry->memdesc.priv;
+ ret->pid = priv->pid;
+ }
+
+ node = rb_next(&entry->node);
+ }
+}
+
+/*
+ * Find the closest alloated memory block with a greater starting GPU address
+ * then the given address
+ */
+
+static void _next_entry(struct kgsl_process_private *priv,
+ unsigned int faultaddr, struct _mem_entry *ret)
+{
+ struct rb_node *node;
+ struct kgsl_mem_entry *entry;
+
+ for (node = rb_last(&priv->mem_rb); node; ) {
+ entry = rb_entry(node, struct kgsl_mem_entry, node);
+
+ if (entry->memdesc.gpuaddr < faultaddr)
+ break;
+
+ /*
+ * If this is closer to the faulting address, then copy
+ * the entry
+ */
+
+ if (entry->memdesc.gpuaddr < ret->gpuaddr) {
+ ret->gpuaddr = entry->memdesc.gpuaddr;
+ ret->size = entry->memdesc.size;
+ ret->flags = entry->memdesc.flags;
+ ret->priv = entry->memdesc.priv;
+ ret->pid = priv->pid;
+ }
+
+ node = rb_prev(&entry->node);
+ }
+}
+
+static void _find_mem_entries(struct kgsl_mmu *mmu, unsigned int faultaddr,
+ unsigned int ptbase, struct _mem_entry *preventry,
+ struct _mem_entry *nextentry)
+{
+ struct kgsl_process_private *private;
+ int id = kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase);
+
+ memset(preventry, 0, sizeof(*preventry));
+ memset(nextentry, 0, sizeof(*nextentry));
+
+ /* Set the maximum possible size as an initial value */
+ nextentry->gpuaddr = 0xFFFFFFFF;
+
+ mutex_lock(&kgsl_driver.process_mutex);
+
+ list_for_each_entry(private, &kgsl_driver.process_list, list) {
+
+ if (private->pagetable && (private->pagetable->name != id))
+ continue;
+
+ spin_lock(&private->mem_lock);
+ _prev_entry(private, faultaddr, preventry);
+ _next_entry(private, faultaddr, nextentry);
+ spin_unlock(&private->mem_lock);
+ }
+
+ mutex_unlock(&kgsl_driver.process_mutex);
+}
+
+static void _print_entry(struct kgsl_device *device, struct _mem_entry *entry)
+{
+ char name[32];
+ memset(name, 0, sizeof(name));
+
+ kgsl_get_memory_usage(name, sizeof(name) - 1, entry->flags);
+
+ KGSL_LOG_DUMP(device,
+ "[%8.8X - %8.8X] %s (pid = %d) (%s)\n",
+ entry->gpuaddr,
+ entry->gpuaddr + entry->size,
+ entry->priv & KGSL_MEMDESC_GUARD_PAGE ? "(+guard)" : "",
+ entry->pid, name);
+}
+
+static void _check_if_freed(struct kgsl_iommu_device *iommu_dev,
+ unsigned long addr, unsigned int pid)
+{
+ void *base = kgsl_driver.memfree_hist.base_hist_rb;
+ struct kgsl_memfree_hist_elem *wptr;
+ struct kgsl_memfree_hist_elem *p;
+ char name[32];
+ memset(name, 0, sizeof(name));
+
+ mutex_lock(&kgsl_driver.memfree_hist_mutex);
+ wptr = kgsl_driver.memfree_hist.wptr;
+ p = wptr;
+ for (;;) {
+ if (p->size && p->pid == pid)
+ if (addr >= p->gpuaddr &&
+ addr < (p->gpuaddr + p->size)) {
+
+ kgsl_get_memory_usage(name, sizeof(name) - 1,
+ p->flags);
+ KGSL_LOG_DUMP(iommu_dev->kgsldev,
+ "---- premature free ----\n");
+ KGSL_LOG_DUMP(iommu_dev->kgsldev,
+ "[%8.8X-%8.8X] (%s) was already freed by pid %d\n",
+ p->gpuaddr,
+ p->gpuaddr + p->size,
+ name,
+ p->pid);
+ }
+ p++;
+ if ((void *)p >= base + kgsl_driver.memfree_hist.size)
+ p = (struct kgsl_memfree_hist_elem *) base;
+
+ if (p == kgsl_driver.memfree_hist.wptr)
+ break;
+ }
+ mutex_unlock(&kgsl_driver.memfree_hist_mutex);
+}
+
+static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long addr, int flags, void *token)
+{
+ int ret = 0;
+ struct kgsl_mmu *mmu;
+ struct kgsl_iommu *iommu;
+ struct kgsl_iommu_unit *iommu_unit;
+ struct kgsl_iommu_device *iommu_dev;
+ unsigned int ptbase, fsr;
+ struct kgsl_device *device;
+ struct adreno_device *adreno_dev;
+ unsigned int no_page_fault_log = 0;
+ unsigned int pid;
+ unsigned int fsynr0, fsynr1;
+ int write;
+ struct _mem_entry prev, next;
+ unsigned int curr_context_id = 0;
+ unsigned int curr_global_ts = 0;
+ struct kgsl_context *context;
+
+ ret = get_iommu_unit(dev, &mmu, &iommu_unit);
+ if (ret)
+ goto done;
+
+ device = mmu->device;
+ adreno_dev = ADRENO_DEVICE(device);
+ if (atomic_read(&mmu->fault)) {
+ if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
+ ret = -EBUSY;
+ goto done;
+ }
+
+ iommu_dev = get_iommu_device(iommu_unit, dev);
+ if (!iommu_dev) {
+ KGSL_CORE_ERR("Invalid IOMMU device %p\n", dev);
+ ret = -ENOSYS;
+ goto done;
+ }
+ iommu = mmu->priv;
+
+ /*
+ * set the fault bits and stuff before any printks so that if fault
+ * handler runs then it will know it's dealing with a pagefault
+ */
+ kgsl_sharedmem_readl(&device->memstore, &curr_context_id,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context));
+
+ context = kgsl_context_get(device, curr_context_id);
+
+ if (context != NULL) {
+ kgsl_sharedmem_readl(&device->memstore, &curr_global_ts,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp));
+
+ /* save pagefault timestamp for GFT */
+ set_bit(KGSL_CONTEXT_PAGEFAULT, &context->priv);
+ context->pagefault_ts = curr_global_ts;
+
+ kgsl_context_put(context);
+ context = NULL;
+ }
+
+ atomic_set(&mmu->fault, 1);
+ iommu_dev->fault = 1;
+
+ if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE) {
+ adreno_set_gpu_fault(adreno_dev, ADRENO_IOMMU_PAGE_FAULT);
+ /* turn off GPU IRQ so we don't get faults from it too */
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+ adreno_dispatcher_schedule(device);
+ }
+
+ ptbase = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
+ iommu_dev->ctx_id, TTBR0);
+
+ fsr = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
+ iommu_dev->ctx_id, FSR);
+ fsynr0 = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
+ iommu_dev->ctx_id, FSYNR0);
+ fsynr1 = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
+ iommu_dev->ctx_id, FSYNR1);
+
+ if (!msm_soc_version_supports_iommu_v1())
+ write = ((fsynr1 & (KGSL_IOMMU_FSYNR1_AWRITE_MASK <<
+ KGSL_IOMMU_FSYNR1_AWRITE_SHIFT)) ? 1 : 0);
+ else
+ write = ((fsynr0 & (KGSL_IOMMU_V1_FSYNR0_WNR_MASK <<
+ KGSL_IOMMU_V1_FSYNR0_WNR_SHIFT)) ? 1 : 0);
+
+ pid = kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase);
+
+ if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE)
+ no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, addr);
+
+ if (!no_page_fault_log) {
+ KGSL_MEM_CRIT(iommu_dev->kgsldev,
+ "GPU PAGE FAULT: addr = %lX pid = %d\n", addr, pid);
+ KGSL_MEM_CRIT(iommu_dev->kgsldev,
+ "context = %d FSR = %X FSYNR0 = %X FSYNR1 = %X(%s fault)\n",
+ iommu_dev->ctx_id, fsr, fsynr0, fsynr1,
+ write ? "write" : "read");
+
+ _check_if_freed(iommu_dev, addr, pid);
+
+ KGSL_LOG_DUMP(iommu_dev->kgsldev, "---- nearby memory ----\n");
+
+ _find_mem_entries(mmu, addr, ptbase, &prev, &next);
+
+ if (prev.gpuaddr)
+ _print_entry(iommu_dev->kgsldev, &prev);
+ else
+ KGSL_LOG_DUMP(iommu_dev->kgsldev, "*EMPTY*\n");
+
+ KGSL_LOG_DUMP(iommu_dev->kgsldev, " <- fault @ %8.8lX\n", addr);
+
+ if (next.gpuaddr != 0xFFFFFFFF)
+ _print_entry(iommu_dev->kgsldev, &next);
+ else
+ KGSL_LOG_DUMP(iommu_dev->kgsldev, "*EMPTY*\n");
+ }
+
+ trace_kgsl_mmu_pagefault(iommu_dev->kgsldev, addr,
+ kgsl_mmu_get_ptname_from_ptbase(mmu, ptbase),
+ write ? "write" : "read");
+
+ /*
+ * We do not want the h/w to resume fetching data from an iommu unit
+ * that has faulted, this is better for debugging as it will stall
+ * the GPU and trigger a snapshot. To stall the transaction return
+ * EBUSY error.
+ */
+ if (adreno_dev->ft_pf_policy & KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)
+ ret = -EBUSY;
+done:
+ return ret;
+}
+
+/*
+ * kgsl_iommu_disable_clk - Disable iommu clocks
+ * @mmu - Pointer to mmu structure
+ *
+ * Disables iommu clocks
+ * Return - void
+ */
+static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ int i, j;
+
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+ for (j = 0; j < iommu_unit->dev_count; j++) {
+ if (!iommu_unit->dev[j].clk_enabled)
+ continue;
+ iommu_drvdata = dev_get_drvdata(
+ iommu_unit->dev[j].dev->parent);
+ if (iommu_drvdata->aclk)
+ clk_disable_unprepare(iommu_drvdata->aclk);
+ if (iommu_drvdata->clk)
+ clk_disable_unprepare(iommu_drvdata->clk);
+ clk_disable_unprepare(iommu_drvdata->pclk);
+ iommu_unit->dev[j].clk_enabled = false;
+ }
+ }
+}
+
+/*
+ * kgsl_iommu_disable_clk_event - An event function that is executed when
+ * the required timestamp is reached. It disables the IOMMU clocks if
+ * the timestamp on which the clocks can be disabled has expired.
+ * @device - The kgsl device pointer
+ * @data - The data passed during event creation, it is the MMU pointer
+ * @id - Context ID, should always be KGSL_MEMSTORE_GLOBAL
+ * @ts - The current timestamp that has expired for the device
+ *
+ * Disables IOMMU clocks if timestamp has expired
+ * Return - void
+ */
+static void kgsl_iommu_clk_disable_event(struct kgsl_device *device, void *data,
+ unsigned int id, unsigned int ts,
+ u32 type)
+{
+ struct kgsl_mmu *mmu = data;
+ struct kgsl_iommu *iommu = mmu->priv;
+
+ if (!iommu->clk_event_queued) {
+ if (0 > timestamp_cmp(ts, iommu->iommu_last_cmd_ts))
+ KGSL_DRV_ERR(device,
+ "IOMMU disable clock event being cancelled, "
+ "iommu_last_cmd_ts: %x, retired ts: %x\n",
+ iommu->iommu_last_cmd_ts, ts);
+ return;
+ }
+
+ if (0 <= timestamp_cmp(ts, iommu->iommu_last_cmd_ts)) {
+ kgsl_iommu_disable_clk(mmu);
+ iommu->clk_event_queued = false;
+ } else {
+ /* add new event to fire when ts is reached, this can happen
+ * if we queued an event and someone requested the clocks to
+ * be disbaled on a later timestamp */
+ if (kgsl_add_event(device, id, iommu->iommu_last_cmd_ts,
+ kgsl_iommu_clk_disable_event, mmu, mmu)) {
+ KGSL_DRV_ERR(device,
+ "Failed to add IOMMU disable clk event\n");
+ iommu->clk_event_queued = false;
+ }
+ }
+}
+
+/*
+ * kgsl_iommu_disable_clk_on_ts - Sets up event to disable IOMMU clocks
+ * @mmu - The kgsl MMU pointer
+ * @ts - Timestamp on which the clocks should be disabled
+ * @ts_valid - Indicates whether ts parameter is valid, if this parameter
+ * is false then it means that the calling function wants to disable the
+ * IOMMU clocks immediately without waiting for any timestamp
+ *
+ * Creates an event to disable the IOMMU clocks on timestamp and if event
+ * already exists then updates the timestamp of disabling the IOMMU clocks
+ * with the passed in ts if it is greater than the current value at which
+ * the clocks will be disabled
+ * Return - void
+ */
+static void
+kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu, unsigned int ts,
+ bool ts_valid)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+
+ if (iommu->clk_event_queued) {
+ if (ts_valid && (0 <
+ timestamp_cmp(ts, iommu->iommu_last_cmd_ts)))
+ iommu->iommu_last_cmd_ts = ts;
+ } else {
+ if (ts_valid) {
+ iommu->iommu_last_cmd_ts = ts;
+ iommu->clk_event_queued = true;
+ if (kgsl_add_event(mmu->device, KGSL_MEMSTORE_GLOBAL,
+ ts, kgsl_iommu_clk_disable_event, mmu, mmu)) {
+ KGSL_DRV_ERR(mmu->device,
+ "Failed to add IOMMU disable clk event\n");
+ iommu->clk_event_queued = false;
+ }
+ } else {
+ kgsl_iommu_disable_clk(mmu);
+ }
+ }
+}
+
+/*
+ * kgsl_iommu_enable_clk - Enable iommu clocks
+ * @mmu - Pointer to mmu structure
+ * @ctx_id - The context bank whose clocks are to be turned on
+ *
+ * Enables iommu clocks of a given context
+ * Return: 0 on success else error code
+ */
+static int kgsl_iommu_enable_clk(struct kgsl_mmu *mmu,
+ int ctx_id)
+{
+ int ret = 0;
+ int i, j;
+ struct kgsl_iommu *iommu = mmu->priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+ for (j = 0; j < iommu_unit->dev_count; j++) {
+ if (iommu_unit->dev[j].clk_enabled ||
+ ctx_id != iommu_unit->dev[j].ctx_id)
+ continue;
+ iommu_drvdata =
+ dev_get_drvdata(iommu_unit->dev[j].dev->parent);
+ ret = clk_prepare_enable(iommu_drvdata->pclk);
+ if (ret)
+ goto done;
+ if (iommu_drvdata->clk) {
+ ret = clk_prepare_enable(iommu_drvdata->clk);
+ if (ret) {
+ clk_disable_unprepare(
+ iommu_drvdata->pclk);
+ goto done;
+ }
+ }
+ if (iommu_drvdata->aclk) {
+ ret = clk_prepare_enable(iommu_drvdata->aclk);
+ if (ret) {
+ if (iommu_drvdata->clk)
+ clk_disable_unprepare(
+ iommu_drvdata->clk);
+ clk_disable_unprepare(
+ iommu_drvdata->pclk);
+ goto done;
+ }
+ }
+ iommu_unit->dev[j].clk_enabled = true;
+ }
+ }
+done:
+ if (ret)
+ kgsl_iommu_disable_clk(mmu);
+ return ret;
+}
+
+/*
+ * kgsl_iommu_pt_equal - Check if pagetables are equal
+ * @mmu - Pointer to mmu structure
+ * @pt - Pointer to pagetable
+ * @pt_base - Address of a pagetable that the IOMMU register is
+ * programmed with
+ *
+ * Checks whether the pt_base is equal to the base address of
+ * the pagetable which is contained in the pt structure
+ * Return - Non-zero if the pagetable addresses are equal else 0
+ */
+static int kgsl_iommu_pt_equal(struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pt,
+ phys_addr_t pt_base)
+{
+ struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
+ phys_addr_t domain_ptbase = iommu_pt ?
+ iommu_get_pt_base_addr(iommu_pt->domain) : 0;
+
+ /* Only compare the valid address bits of the pt_base */
+ domain_ptbase &= KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
+
+ pt_base &= KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
+
+ return domain_ptbase && pt_base &&
+ (domain_ptbase == pt_base);
+}
+
+/*
+ * kgsl_iommu_destroy_pagetable - Free up reaources help by a pagetable
+ * @mmu_specific_pt - Pointer to pagetable which is to be freed
+ *
+ * Return - void
+ */
+static void kgsl_iommu_destroy_pagetable(struct kgsl_pagetable *pt)
+{
+ struct kgsl_iommu_pt *iommu_pt = pt->priv;
+ if (iommu_pt->domain)
+ msm_unregister_domain(iommu_pt->domain);
+
+ kfree(iommu_pt);
+ iommu_pt = NULL;
+}
+
+/*
+ * kgsl_iommu_create_pagetable - Create a IOMMU pagetable
+ *
+ * Allocate memory to hold a pagetable and allocate the IOMMU
+ * domain which is the actual IOMMU pagetable
+ * Return - void
+ */
+void *kgsl_iommu_create_pagetable(void)
+{
+ int domain_num;
+ struct kgsl_iommu_pt *iommu_pt;
+
+ struct msm_iova_partition kgsl_partition = {
+ .start = 0,
+ .size = 0xFFFFFFFF,
+ };
+ struct msm_iova_layout kgsl_layout = {
+ .partitions = &kgsl_partition,
+ .npartitions = 1,
+ .client_name = "kgsl",
+ .domain_flags = 0,
+ };
+
+ iommu_pt = kzalloc(sizeof(struct kgsl_iommu_pt), GFP_KERNEL);
+ if (!iommu_pt) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n",
+ sizeof(struct kgsl_iommu_pt));
+ return NULL;
+ }
+ /* L2 redirect is not stable on IOMMU v2 */
+ if (msm_soc_version_supports_iommu_v1())
+ kgsl_layout.domain_flags = MSM_IOMMU_DOMAIN_PT_CACHEABLE;
+
+ domain_num = msm_register_domain(&kgsl_layout);
+ if (domain_num >= 0) {
+ iommu_pt->domain = msm_get_iommu_domain(domain_num);
+
+ if (iommu_pt->domain) {
+ iommu_set_fault_handler(iommu_pt->domain,
+ kgsl_iommu_fault_handler, NULL);
+
+ return iommu_pt;
+ }
+ }
+
+ KGSL_CORE_ERR("Failed to create iommu domain\n");
+ kfree(iommu_pt);
+ return NULL;
+}
+
+/*
+ * kgsl_detach_pagetable_iommu_domain - Detach the IOMMU unit from a
+ * pagetable
+ * @mmu - Pointer to the device mmu structure
+ * @priv - Flag indicating whether the private or user context is to be
+ * detached
+ *
+ * Detach the IOMMU unit with the domain that is contained in the
+ * hwpagetable of the given mmu. After detaching the IOMMU unit is not
+ * in use because the PTBR will not be set after a detach
+ * Return - void
+ */
+static void kgsl_detach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu_pt *iommu_pt;
+ struct kgsl_iommu *iommu = mmu->priv;
+ int i, j;
+
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+ iommu_pt = mmu->defaultpagetable->priv;
+ for (j = 0; j < iommu_unit->dev_count; j++) {
+ /*
+ * If there is a 2nd default pagetable then priv domain
+ * is attached with this pagetable
+ */
+ if (mmu->priv_bank_table &&
+ (KGSL_IOMMU_CONTEXT_PRIV == j))
+ iommu_pt = mmu->priv_bank_table->priv;
+ if (iommu_unit->dev[j].attached) {
+ iommu_detach_device(iommu_pt->domain,
+ iommu_unit->dev[j].dev);
+ iommu_unit->dev[j].attached = false;
+ KGSL_MEM_INFO(mmu->device, "iommu %p detached "
+ "from user dev of MMU: %p\n",
+ iommu_pt->domain, mmu);
+ }
+ }
+ }
+}
+
+/*
+ * kgsl_attach_pagetable_iommu_domain - Attach the IOMMU unit to a
+ * pagetable, i.e set the IOMMU's PTBR to the pagetable address and
+ * setup other IOMMU registers for the device so that it becomes
+ * active
+ * @mmu - Pointer to the device mmu structure
+ * @priv - Flag indicating whether the private or user context is to be
+ * attached
+ *
+ * Attach the IOMMU unit with the domain that is contained in the
+ * hwpagetable of the given mmu.
+ * Return - 0 on success else error code
+ */
+static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu_pt *iommu_pt;
+ struct kgsl_iommu *iommu = mmu->priv;
+ int i, j, ret = 0;
+
+ /*
+ * Loop through all the iommu devcies under all iommu units and
+ * attach the domain
+ */
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+ iommu_pt = mmu->defaultpagetable->priv;
+ for (j = 0; j < iommu_unit->dev_count; j++) {
+ /*
+ * If there is a 2nd default pagetable then priv domain
+ * is attached to this pagetable
+ */
+ if (mmu->priv_bank_table &&
+ (KGSL_IOMMU_CONTEXT_PRIV == j))
+ iommu_pt = mmu->priv_bank_table->priv;
+ if (!iommu_unit->dev[j].attached) {
+ ret = iommu_attach_device(iommu_pt->domain,
+ iommu_unit->dev[j].dev);
+ if (ret) {
+ KGSL_MEM_ERR(mmu->device,
+ "Failed to attach device, err %d\n",
+ ret);
+ goto done;
+ }
+ iommu_unit->dev[j].attached = true;
+ KGSL_MEM_INFO(mmu->device,
+ "iommu pt %p attached to dev %p, ctx_id %d\n",
+ iommu_pt->domain, iommu_unit->dev[j].dev,
+ iommu_unit->dev[j].ctx_id);
+ }
+ }
+ }
+done:
+ return ret;
+}
+
+/*
+ * _get_iommu_ctxs - Get device pointer to IOMMU contexts
+ * @mmu - Pointer to mmu device
+ * data - Pointer to the platform data containing information about
+ * iommu devices for one iommu unit
+ * unit_id - The IOMMU unit number. This is not a specific ID but just
+ * a serial number. The serial numbers are treated as ID's of the
+ * IOMMU units
+ *
+ * Return - 0 on success else error code
+ */
+static int _get_iommu_ctxs(struct kgsl_mmu *mmu,
+ struct kgsl_device_iommu_data *data, unsigned int unit_id)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[unit_id];
+ int i, j;
+ int found_ctx;
+
+ for (j = 0; j < KGSL_IOMMU_MAX_DEVS_PER_UNIT; j++) {
+ found_ctx = 0;
+ for (i = 0; i < data->iommu_ctx_count; i++) {
+ if (j == data->iommu_ctxs[i].ctx_id) {
+ found_ctx = 1;
+ break;
+ }
+ }
+ if (!found_ctx)
+ break;
+ if (!data->iommu_ctxs[i].iommu_ctx_name) {
+ KGSL_CORE_ERR("Context name invalid\n");
+ return -EINVAL;
+ }
+
+ iommu_unit->dev[iommu_unit->dev_count].dev =
+ msm_iommu_get_ctx(data->iommu_ctxs[i].iommu_ctx_name);
+ if (iommu_unit->dev[iommu_unit->dev_count].dev == NULL) {
+ KGSL_CORE_ERR("Failed to get iommu dev handle for "
+ "device %s\n", data->iommu_ctxs[i].iommu_ctx_name);
+ return -EINVAL;
+ }
+ iommu_unit->dev[iommu_unit->dev_count].ctx_id =
+ data->iommu_ctxs[i].ctx_id;
+ iommu_unit->dev[iommu_unit->dev_count].kgsldev = mmu->device;
+
+ KGSL_DRV_INFO(mmu->device,
+ "Obtained dev handle %p for iommu context %s\n",
+ iommu_unit->dev[iommu_unit->dev_count].dev,
+ data->iommu_ctxs[i].iommu_ctx_name);
+
+ iommu_unit->dev_count++;
+ }
+ if (!j) {
+ KGSL_CORE_ERR("No ctxts initialized, user ctxt absent\n ");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * kgsl_iommu_start_sync_lock - Initialize some variables during MMU start up
+ * for GPU CPU synchronization
+ * @mmu - Pointer to mmu device
+ *
+ * Return - 0 on success else error code
+ */
+static int kgsl_iommu_start_sync_lock(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ uint32_t lock_gpu_addr = 0;
+
+ /* iommu v1 or v0 here, cp has v1 */
+ if (KGSL_DEVICE_3D0 != mmu->device->id ||
+ !msm_soc_version_supports_iommu_v1() ||
+ !kgsl_mmu_is_perprocess(mmu) ||
+ iommu->sync_lock_vars)
+ return 0;
+
+ if (!(mmu->flags & KGSL_MMU_FLAGS_IOMMU_SYNC)) {
+ KGSL_DRV_ERR(mmu->device,
+ "The GPU microcode does not support IOMMUv1 sync opcodes\n");
+ return -ENXIO;
+ }
+ /* Store Lock variables GPU address */
+ lock_gpu_addr = (iommu->sync_lock_desc.gpuaddr +
+ iommu->sync_lock_offset);
+
+ kgsl_iommu_sync_lock_vars.flag[PROC_APPS] = (lock_gpu_addr +
+ (offsetof(struct remote_iommu_petersons_spinlock,
+ flag[PROC_APPS])));
+ kgsl_iommu_sync_lock_vars.flag[PROC_GPU] = (lock_gpu_addr +
+ (offsetof(struct remote_iommu_petersons_spinlock,
+ flag[PROC_GPU])));
+ kgsl_iommu_sync_lock_vars.turn = (lock_gpu_addr +
+ (offsetof(struct remote_iommu_petersons_spinlock, turn)));
+
+ iommu->sync_lock_vars = &kgsl_iommu_sync_lock_vars;
+
+ return 0;
+}
+
+/*
+ * kgsl_get_sync_lock - Init Sync Lock between GPU and CPU
+ * @mmu - Pointer to mmu device
+ *
+ * Return - 0 on success else error code
+ */
+static int kgsl_iommu_init_sync_lock(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu *iommu = mmu->device->mmu.priv;
+ int status = 0;
+ uint32_t lock_phy_addr = 0;
+ uint32_t page_offset = 0;
+
+ if (KGSL_DEVICE_3D0 != mmu->device->id ||
+ !msm_soc_version_supports_iommu_v1() ||
+ !kgsl_mmu_is_perprocess(mmu))
+ return status;
+
+ /* Return if already initialized */
+ if (iommu->sync_lock_initialized)
+ return status;
+
+ /* Get the physical address of the Lock variables */
+ lock_phy_addr = (msm_iommu_lock_initialize()
+ - MSM_SHARED_RAM_BASE + msm_shared_ram_phys);
+
+ if (!lock_phy_addr) {
+ KGSL_DRV_ERR(mmu->device,
+ "GPU CPU sync lock is not supported by kernel\n");
+ return -ENXIO;
+ }
+
+ /* Align the physical address to PAGE boundary and store the offset */
+ page_offset = (lock_phy_addr & (PAGE_SIZE - 1));
+ lock_phy_addr = (lock_phy_addr & ~(PAGE_SIZE - 1));
+ iommu->sync_lock_desc.physaddr = (unsigned int)lock_phy_addr;
+ iommu->sync_lock_offset = page_offset;
+
+ iommu->sync_lock_desc.size =
+ PAGE_ALIGN(sizeof(kgsl_iommu_sync_lock_vars));
+ status = memdesc_sg_phys(&iommu->sync_lock_desc,
+ iommu->sync_lock_desc.physaddr,
+ iommu->sync_lock_desc.size);
+
+ if (status)
+ return status;
+
+ /* Flag Sync Lock is Initialized */
+ iommu->sync_lock_initialized = 1;
+
+ return status;
+}
+
+/*
+ * kgsl_iommu_sync_lock - Acquire Sync Lock between GPU and CPU
+ * @mmu - Pointer to mmu device
+ * @cmds - Pointer to array of commands
+ *
+ * Return - int - number of commands.
+ */
+inline unsigned int kgsl_iommu_sync_lock(struct kgsl_mmu *mmu,
+ unsigned int *cmds)
+{
+ struct kgsl_device *device = mmu->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_iommu *iommu = mmu->device->mmu.priv;
+ struct remote_iommu_petersons_spinlock *lock_vars =
+ iommu->sync_lock_vars;
+ unsigned int *start = cmds;
+
+ if (!iommu->sync_lock_initialized)
+ return 0;
+
+ *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
+ *cmds++ = lock_vars->flag[PROC_GPU];
+ *cmds++ = 1;
+
+ cmds += adreno_add_idle_cmds(adreno_dev, cmds);
+
+ *cmds++ = cp_type3_packet(CP_WAIT_REG_MEM, 5);
+ /* MEM SPACE = memory, FUNCTION = equals */
+ *cmds++ = 0x13;
+ *cmds++ = lock_vars->flag[PROC_GPU];
+ *cmds++ = 0x1;
+ *cmds++ = 0x1;
+ *cmds++ = 0x1;
+
+ *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
+ *cmds++ = lock_vars->turn;
+ *cmds++ = 0;
+
+ cmds += adreno_add_idle_cmds(adreno_dev, cmds);
+
+ *cmds++ = cp_type3_packet(CP_WAIT_REG_MEM, 5);
+ /* MEM SPACE = memory, FUNCTION = equals */
+ *cmds++ = 0x13;
+ *cmds++ = lock_vars->flag[PROC_GPU];
+ *cmds++ = 0x1;
+ *cmds++ = 0x1;
+ *cmds++ = 0x1;
+
+ *cmds++ = cp_type3_packet(CP_TEST_TWO_MEMS, 3);
+ *cmds++ = lock_vars->flag[PROC_APPS];
+ *cmds++ = lock_vars->turn;
+ *cmds++ = 0;
+
+ cmds += adreno_add_idle_cmds(adreno_dev, cmds);
+
+ return cmds - start;
+}
+
+/*
+ * kgsl_iommu_sync_lock - Release Sync Lock between GPU and CPU
+ * @mmu - Pointer to mmu device
+ * @cmds - Pointer to array of commands
+ *
+ * Return - int - number of commands.
+ */
+inline unsigned int kgsl_iommu_sync_unlock(struct kgsl_mmu *mmu,
+ unsigned int *cmds)
+{
+ struct kgsl_device *device = mmu->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_iommu *iommu = mmu->device->mmu.priv;
+ struct remote_iommu_petersons_spinlock *lock_vars =
+ iommu->sync_lock_vars;
+ unsigned int *start = cmds;
+
+ if (!iommu->sync_lock_initialized)
+ return 0;
+
+ *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
+ *cmds++ = lock_vars->flag[PROC_GPU];
+ *cmds++ = 0;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_REG_MEM, 5);
+ /* MEM SPACE = memory, FUNCTION = equals */
+ *cmds++ = 0x13;
+ *cmds++ = lock_vars->flag[PROC_GPU];
+ *cmds++ = 0x0;
+ *cmds++ = 0x1;
+ *cmds++ = 0x1;
+
+ cmds += adreno_add_idle_cmds(adreno_dev, cmds);
+
+ return cmds - start;
+}
+
+/*
+ * kgsl_get_iommu_ctxt - Get device pointer to IOMMU contexts
+ * @mmu - Pointer to mmu device
+ *
+ * Get the device pointers for the IOMMU user and priv contexts of the
+ * kgsl device
+ * Return - 0 on success else error code
+ */
+static int kgsl_get_iommu_ctxt(struct kgsl_mmu *mmu)
+{
+ struct platform_device *pdev =
+ container_of(mmu->device->parentdev, struct platform_device,
+ dev);
+ struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
+ struct kgsl_iommu *iommu = mmu->device->mmu.priv;
+ int i, ret = 0;
+
+ /* Go through the IOMMU data and get all the context devices */
+ if (KGSL_IOMMU_MAX_UNITS < pdata_dev->iommu_count) {
+ KGSL_CORE_ERR("Too many IOMMU units defined\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ for (i = 0; i < pdata_dev->iommu_count; i++) {
+ ret = _get_iommu_ctxs(mmu, &pdata_dev->iommu_data[i], i);
+ if (ret)
+ break;
+ }
+ iommu->unit_count = pdata_dev->iommu_count;
+done:
+ return ret;
+}
+
+/*
+ * kgsl_set_register_map - Map the IOMMU regsiters in the memory descriptors
+ * of the respective iommu units
+ * @mmu - Pointer to mmu structure
+ *
+ * Return - 0 on success else error code
+ */
+static int kgsl_set_register_map(struct kgsl_mmu *mmu)
+{
+ struct platform_device *pdev =
+ container_of(mmu->device->parentdev, struct platform_device,
+ dev);
+ struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
+ struct kgsl_iommu *iommu = mmu->device->mmu.priv;
+ struct kgsl_iommu_unit *iommu_unit;
+ int i = 0, ret = 0;
+
+ for (; i < pdata_dev->iommu_count; i++) {
+ struct kgsl_device_iommu_data data = pdata_dev->iommu_data[i];
+ iommu_unit = &iommu->iommu_units[i];
+ /* set up the IOMMU register map for the given IOMMU unit */
+ if (!data.physstart || !data.physend) {
+ KGSL_CORE_ERR("The register range for IOMMU unit not"
+ " specified\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ iommu_unit->reg_map.hostptr = ioremap(data.physstart,
+ data.physend - data.physstart + 1);
+ if (!iommu_unit->reg_map.hostptr) {
+ KGSL_CORE_ERR("Failed to map SMMU register address "
+ "space from %x to %x\n", data.physstart,
+ data.physend - data.physstart + 1);
+ ret = -ENOMEM;
+ i--;
+ goto err;
+ }
+ iommu_unit->reg_map.size = data.physend - data.physstart + 1;
+ iommu_unit->reg_map.physaddr = data.physstart;
+ ret = memdesc_sg_phys(&iommu_unit->reg_map, data.physstart,
+ iommu_unit->reg_map.size);
+ if (ret)
+ goto err;
+
+ iommu_unit->iommu_halt_enable = data.iommu_halt_enable;
+ iommu_unit->ahb_base = data.physstart - mmu->device->reg_phys;
+ }
+ iommu->unit_count = pdata_dev->iommu_count;
+ return ret;
+err:
+ /* Unmap any mapped IOMMU regions */
+ for (; i >= 0; i--) {
+ iommu_unit = &iommu->iommu_units[i];
+ iounmap(iommu_unit->reg_map.hostptr);
+ iommu_unit->reg_map.size = 0;
+ iommu_unit->reg_map.physaddr = 0;
+ }
+ return ret;
+}
+
+/*
+ * kgsl_iommu_get_pt_base_addr - Get the address of the pagetable that the
+ * IOMMU ttbr0 register is programmed with
+ * @mmu - Pointer to mmu
+ * @pt - kgsl pagetable pointer that contains the IOMMU domain pointer
+ *
+ * Return - actual pagetable address that the ttbr0 register is programmed
+ * with
+ */
+static phys_addr_t kgsl_iommu_get_pt_base_addr(struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pt)
+{
+ struct kgsl_iommu_pt *iommu_pt = pt->priv;
+ return iommu_get_pt_base_addr(iommu_pt->domain) &
+ KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
+}
+
+/*
+ * kgsl_iommu_get_default_ttbr0 - Return the ttbr0 value programmed by
+ * iommu driver
+ * @mmu - Pointer to mmu structure
+ * @hostptr - Pointer to the IOMMU register map. This is used to match
+ * the iommu device whose lsb value is to be returned
+ * @ctx_id - The context bank whose lsb valus is to be returned
+ * Return - returns the ttbr0 value programmed by iommu driver
+ */
+static phys_addr_t kgsl_iommu_get_default_ttbr0(struct kgsl_mmu *mmu,
+ unsigned int unit_id,
+ enum kgsl_iommu_context_id ctx_id)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ int i, j;
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+ for (j = 0; j < iommu_unit->dev_count; j++)
+ if (unit_id == i &&
+ ctx_id == iommu_unit->dev[j].ctx_id)
+ return iommu_unit->dev[j].default_ttbr0;
+ }
+ return 0;
+}
+
+static int kgsl_iommu_setstate(struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pagetable,
+ unsigned int context_id)
+{
+ int ret = 0;
+
+ if (mmu->flags & KGSL_FLAGS_STARTED) {
+ /* page table not current, then setup mmu to use new
+ * specified page table
+ */
+ if (mmu->hwpagetable != pagetable) {
+ unsigned int flags = 0;
+ mmu->hwpagetable = pagetable;
+ flags |= kgsl_mmu_pt_get_flags(mmu->hwpagetable,
+ mmu->device->id) |
+ KGSL_MMUFLAGS_TLBFLUSH;
+ ret = kgsl_setstate(mmu, context_id,
+ KGSL_MMUFLAGS_PTUPDATE | flags);
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * kgsl_iommu_setup_regs - map iommu registers into a pagetable
+ * @mmu: Pointer to mmu structure
+ * @pt: the pagetable
+ *
+ * To do pagetable switches from the GPU command stream, the IOMMU
+ * registers need to be mapped into the GPU's pagetable. This function
+ * is used differently on different targets. On 8960, the registers
+ * are mapped into every pagetable during kgsl_setup_pt(). On
+ * all other targets, the registers are mapped only into the second
+ * context bank.
+ *
+ * Return - 0 on success else error code
+ */
+static int kgsl_iommu_setup_regs(struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pt)
+{
+ int status;
+ int i = 0;
+ struct kgsl_iommu *iommu = mmu->priv;
+
+ if (!msm_soc_version_supports_iommu_v1())
+ return 0;
+
+ for (i = 0; i < iommu->unit_count; i++) {
+ status = kgsl_mmu_map_global(pt,
+ &(iommu->iommu_units[i].reg_map));
+ if (status)
+ goto err;
+ }
+
+ /* Map Lock variables to GPU pagetable */
+ if (iommu->sync_lock_initialized) {
+ status = kgsl_mmu_map_global(pt, &iommu->sync_lock_desc);
+ if (status)
+ goto err;
+ }
+
+ return 0;
+err:
+ for (i--; i >= 0; i--)
+ kgsl_mmu_unmap(pt,
+ &(iommu->iommu_units[i].reg_map));
+
+ return status;
+}
+
+/*
+ * kgsl_iommu_cleanup_regs - unmap iommu registers from a pagetable
+ * @mmu: Pointer to mmu structure
+ * @pt: the pagetable
+ *
+ * Removes mappings created by kgsl_iommu_setup_regs().
+ *
+ * Return - 0 on success else error code
+ */
+static void kgsl_iommu_cleanup_regs(struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pt)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ int i;
+ for (i = 0; i < iommu->unit_count; i++)
+ kgsl_mmu_unmap(pt, &(iommu->iommu_units[i].reg_map));
+
+ if (iommu->sync_lock_desc.gpuaddr)
+ kgsl_mmu_unmap(pt, &iommu->sync_lock_desc);
+}
+
+
+/*
+ * kgsl_iommu_get_reg_ahbaddr - Returns the ahb address of the register
+ * @mmu - Pointer to mmu structure
+ * @iommu_unit - The iommu unit for which base address is requested
+ * @ctx_id - The context ID of the IOMMU ctx
+ * @reg - The register for which address is required
+ *
+ * Return - The address of register which can be used in type0 packet
+ */
+static unsigned int kgsl_iommu_get_reg_ahbaddr(struct kgsl_mmu *mmu,
+ int iommu_unit, int ctx_id,
+ enum kgsl_iommu_reg_map reg)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+
+ if (iommu->iommu_reg_list[reg].ctx_reg)
+ return iommu->iommu_units[iommu_unit].ahb_base +
+ iommu->iommu_reg_list[reg].reg_offset +
+ (ctx_id << KGSL_IOMMU_CTX_SHIFT) + iommu->ctx_offset;
+ else
+ return iommu->iommu_units[iommu_unit].ahb_base +
+ iommu->iommu_reg_list[reg].reg_offset;
+}
+
+static int kgsl_iommu_init(struct kgsl_mmu *mmu)
+{
+ /*
+ * intialize device mmu
+ *
+ * call this with the global lock held
+ */
+ int status = 0;
+ struct kgsl_iommu *iommu;
+
+ atomic_set(&mmu->fault, 0);
+ iommu = kzalloc(sizeof(struct kgsl_iommu), GFP_KERNEL);
+ if (!iommu) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n",
+ sizeof(struct kgsl_iommu));
+ return -ENOMEM;
+ }
+
+ mmu->priv = iommu;
+ status = kgsl_get_iommu_ctxt(mmu);
+ if (status)
+ goto done;
+ status = kgsl_set_register_map(mmu);
+ if (status)
+ goto done;
+
+ /*
+ * IOMMU-v1 requires hardware halt support to do in stream
+ * pagetable switching. This check assumes that if there are
+ * multiple units, they will be matching hardware.
+ */
+ /* name mismatch fixup between v0 and v1 here */
+ mmu->pt_per_process = KGSL_MMU_USE_PER_PROCESS_PT &&
+ (msm_soc_version_supports_iommu_v1() ||
+ iommu->iommu_units[0].iommu_halt_enable);
+
+ /*
+ * For IOMMU per-process pagetables, the allocatable range
+ * and the kernel global range must both be outside
+ * the userspace address range. There is a 1Mb gap
+ * between these address ranges to make overrun
+ * detection easier.
+ * For the shared pagetable case use 2GB and because
+ * mirroring the CPU address space is not possible and
+ * we're better off with extra room.
+ */
+ if (mmu->pt_per_process) {
+#ifndef CONFIG_MSM_KGSL_CFF_DUMP
+ mmu->pt_base = PAGE_OFFSET;
+ mmu->pt_size = KGSL_IOMMU_GLOBAL_MEM_BASE
+ - kgsl_mmu_get_base_addr(mmu) - SZ_1M;
+ mmu->use_cpu_map = true;
+#else
+ mmu->pt_base = KGSL_PAGETABLE_BASE;
+ mmu->pt_size = KGSL_IOMMU_GLOBAL_MEM_BASE +
+ KGSL_IOMMU_GLOBAL_MEM_SIZE -
+ KGSL_PAGETABLE_BASE;
+ mmu->use_cpu_map = false;
+#endif
+ } else {
+ mmu->pt_base = KGSL_PAGETABLE_BASE;
+#ifndef CONFIG_MSM_KGSL_CFF_DUMP
+ mmu->pt_size = SZ_2G;
+#else
+ mmu->pt_size = KGSL_IOMMU_GLOBAL_MEM_BASE +
+ KGSL_IOMMU_GLOBAL_MEM_SIZE -
+ KGSL_PAGETABLE_BASE;
+#endif
+ mmu->use_cpu_map = false;
+ }
+
+ status = kgsl_iommu_init_sync_lock(mmu);
+ if (status)
+ goto done;
+
+ iommu->iommu_reg_list = kgsl_iommuv0_reg;
+ iommu->ctx_offset = KGSL_IOMMU_CTX_OFFSET_V0;
+
+ /*
+ * Due to not bringing in the iommu rename, iommu_v1 is
+ * actually iommu_v0. Keep our internal representation
+ * constant, but our interface with iommu drive needs the
+ * correct vixes
+ */
+ if (msm_soc_version_supports_iommu_v1()) {
+ iommu->iommu_reg_list = kgsl_iommuv0_reg;
+ iommu->ctx_offset = KGSL_IOMMU_CTX_OFFSET_V0;
+ } else {
+ iommu->iommu_reg_list = kgsl_iommuv1_reg;
+ iommu->ctx_offset = KGSL_IOMMU_CTX_OFFSET_V1;
+ }
+
+ /* A nop is required in an indirect buffer when switching
+ * pagetables in-stream */
+ kgsl_sharedmem_writel(mmu->device, &mmu->setstate_memory,
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET,
+ cp_nop_packet(1));
+
+ if (cpu_is_msm8960()) {
+ /*
+ * 8960 doesn't have a second context bank, so the IOMMU
+ * registers must be mapped into every pagetable.
+ */
+ iommu_ops.mmu_setup_pt = kgsl_iommu_setup_regs;
+ iommu_ops.mmu_cleanup_pt = kgsl_iommu_cleanup_regs;
+ }
+
+ if (kgsl_guard_page == NULL) {
+ kgsl_guard_page = alloc_page(GFP_KERNEL | __GFP_ZERO |
+ __GFP_HIGHMEM);
+ if (kgsl_guard_page == NULL) {
+ status = -ENOMEM;
+ goto done;
+ }
+ }
+
+ dev_info(mmu->device->dev, "|%s| MMU type set for device is IOMMU\n",
+ __func__);
+done:
+ if (status) {
+ kfree(iommu);
+ mmu->priv = NULL;
+ }
+ return status;
+}
+
+/*
+ * kgsl_iommu_setup_defaultpagetable - Setup the initial defualtpagetable
+ * for iommu. This function is only called once during first start, successive
+ * start do not call this funciton.
+ * @mmu - Pointer to mmu structure
+ *
+ * Create the initial defaultpagetable and setup the iommu mappings to it
+ * Return - 0 on success else error code
+ */
+static int kgsl_iommu_setup_defaultpagetable(struct kgsl_mmu *mmu)
+{
+ int status = 0;
+
+ /* If chip is not 8960 then we use the 2nd context bank for pagetable
+ * switching on the 3D side for which a separate table is allocated */
+ if (!cpu_is_msm8960() && msm_soc_version_supports_iommu_v1()) {
+ mmu->priv_bank_table =
+ kgsl_mmu_getpagetable(mmu,
+ KGSL_MMU_PRIV_BANK_TABLE_NAME);
+ if (mmu->priv_bank_table == NULL) {
+ status = -ENOMEM;
+ goto err;
+ }
+ status = kgsl_iommu_setup_regs(mmu, mmu->priv_bank_table);
+ if (status)
+ goto err;
+ }
+ mmu->defaultpagetable = kgsl_mmu_getpagetable(mmu, KGSL_MMU_GLOBAL_PT);
+ /* Return error if the default pagetable doesn't exist */
+ if (mmu->defaultpagetable == NULL) {
+ status = -ENOMEM;
+ goto err;
+ }
+ return status;
+err:
+ if (mmu->priv_bank_table) {
+ kgsl_iommu_cleanup_regs(mmu, mmu->priv_bank_table);
+ kgsl_mmu_putpagetable(mmu->priv_bank_table);
+ mmu->priv_bank_table = NULL;
+ }
+ if (mmu->defaultpagetable) {
+ kgsl_mmu_putpagetable(mmu->defaultpagetable);
+ mmu->defaultpagetable = NULL;
+ }
+ return status;
+}
+
+/*
+ * kgsl_iommu_lock_rb_in_tlb - Allocates tlb entries and locks the
+ * virtual to physical address translation of ringbuffer for 3D
+ * device into tlb.
+ * @mmu - Pointer to mmu structure
+ *
+ * Return - void
+ */
+static void kgsl_iommu_lock_rb_in_tlb(struct kgsl_mmu *mmu)
+{
+ struct kgsl_device *device = mmu->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb;
+ struct kgsl_iommu *iommu = mmu->priv;
+ unsigned int num_tlb_entries;
+ unsigned int tlblkcr = 0;
+ unsigned int v2pxx = 0;
+ unsigned int vaddr = 0;
+ int i, j, k, l;
+
+ if (!iommu->sync_lock_initialized)
+ return;
+
+ rb = &adreno_dev->ringbuffer;
+ num_tlb_entries = rb->buffer_desc.size / PAGE_SIZE;
+
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+ for (j = 0; j < iommu_unit->dev_count; j++) {
+ tlblkcr = 0;
+ if (cpu_is_msm8960())
+ tlblkcr |= ((num_tlb_entries &
+ KGSL_IOMMU_TLBLKCR_FLOOR_MASK) <<
+ KGSL_IOMMU_TLBLKCR_FLOOR_SHIFT);
+ else
+ tlblkcr |= (((num_tlb_entries *
+ iommu_unit->dev_count) &
+ KGSL_IOMMU_TLBLKCR_FLOOR_MASK) <<
+ KGSL_IOMMU_TLBLKCR_FLOOR_SHIFT);
+ /* Do not invalidate locked entries on tlbiall flush */
+ tlblkcr |= ((1 & KGSL_IOMMU_TLBLKCR_TLBIALLCFG_MASK)
+ << KGSL_IOMMU_TLBLKCR_TLBIALLCFG_SHIFT);
+ tlblkcr |= ((1 & KGSL_IOMMU_TLBLKCR_TLBIASIDCFG_MASK)
+ << KGSL_IOMMU_TLBLKCR_TLBIASIDCFG_SHIFT);
+ tlblkcr |= ((1 & KGSL_IOMMU_TLBLKCR_TLBIVAACFG_MASK)
+ << KGSL_IOMMU_TLBLKCR_TLBIVAACFG_SHIFT);
+ /* Enable tlb locking */
+ tlblkcr |= ((1 & KGSL_IOMMU_TLBLKCR_LKE_MASK)
+ << KGSL_IOMMU_TLBLKCR_LKE_SHIFT);
+ KGSL_IOMMU_SET_CTX_REG(iommu, iommu_unit,
+ iommu_unit->dev[j].ctx_id,
+ TLBLKCR, tlblkcr);
+ }
+ for (j = 0; j < iommu_unit->dev_count; j++) {
+ /* skip locking entries for private bank on 8960 */
+ if (cpu_is_msm8960() && KGSL_IOMMU_CONTEXT_PRIV == j)
+ continue;
+ /* Lock the ringbuffer virtual address into tlb */
+ vaddr = rb->buffer_desc.gpuaddr;
+ for (k = 0; k < num_tlb_entries; k++) {
+ v2pxx = 0;
+ v2pxx |= (((k + j * num_tlb_entries) &
+ KGSL_IOMMU_V2PXX_INDEX_MASK)
+ << KGSL_IOMMU_V2PXX_INDEX_SHIFT);
+ v2pxx |= vaddr & (KGSL_IOMMU_V2PXX_VA_MASK <<
+ KGSL_IOMMU_V2PXX_VA_SHIFT);
+
+ KGSL_IOMMU_SET_CTX_REG(iommu, iommu_unit,
+ iommu_unit->dev[j].ctx_id,
+ V2PUR, v2pxx);
+ mb();
+ vaddr += PAGE_SIZE;
+ for (l = 0; l < iommu_unit->dev_count; l++) {
+ tlblkcr = KGSL_IOMMU_GET_CTX_REG(iommu,
+ iommu_unit,
+ iommu_unit->dev[l].ctx_id,
+ TLBLKCR);
+ mb();
+ tlblkcr &=
+ ~(KGSL_IOMMU_TLBLKCR_VICTIM_MASK
+ << KGSL_IOMMU_TLBLKCR_VICTIM_SHIFT);
+ tlblkcr |= (((k + 1 +
+ (j * num_tlb_entries)) &
+ KGSL_IOMMU_TLBLKCR_VICTIM_MASK) <<
+ KGSL_IOMMU_TLBLKCR_VICTIM_SHIFT);
+ KGSL_IOMMU_SET_CTX_REG(iommu,
+ iommu_unit,
+ iommu_unit->dev[l].ctx_id,
+ TLBLKCR, tlblkcr);
+ }
+ }
+ }
+ for (j = 0; j < iommu_unit->dev_count; j++) {
+ tlblkcr = KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit,
+ iommu_unit->dev[j].ctx_id,
+ TLBLKCR);
+ mb();
+ /* Disable tlb locking */
+ tlblkcr &= ~(KGSL_IOMMU_TLBLKCR_LKE_MASK
+ << KGSL_IOMMU_TLBLKCR_LKE_SHIFT);
+ KGSL_IOMMU_SET_CTX_REG(iommu, iommu_unit,
+ iommu_unit->dev[j].ctx_id, TLBLKCR, tlblkcr);
+ }
+ }
+}
+
+static int kgsl_iommu_start(struct kgsl_mmu *mmu)
+{
+ struct kgsl_device *device = mmu->device;
+ int status;
+ struct kgsl_iommu *iommu = mmu->priv;
+ int i, j;
+ int sctlr_val = 0;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(mmu->device);
+
+ if (mmu->flags & KGSL_FLAGS_STARTED)
+ return 0;
+
+ if (mmu->defaultpagetable == NULL) {
+ status = kgsl_iommu_setup_defaultpagetable(mmu);
+ if (status)
+ return -ENOMEM;
+
+ /* Initialize the sync lock between GPU and CPU */
+ if (msm_soc_version_supports_iommu_v1() &&
+ (device->id == KGSL_DEVICE_3D0))
+ kgsl_iommu_init_sync_lock(mmu);
+ }
+ status = kgsl_iommu_start_sync_lock(mmu);
+ if (status)
+ return status;
+
+ /* We use the GPU MMU to control access to IOMMU registers on 8960 with
+ * a225, hence we still keep the MMU active on 8960 */
+ if (cpu_is_msm8960() && KGSL_DEVICE_3D0 == mmu->device->id) {
+ struct kgsl_mh *mh = &(mmu->device->mh);
+ BUG_ON(iommu->iommu_units[0].reg_map.gpuaddr != 0 &&
+ mh->mpu_base > iommu->iommu_units[0].reg_map.gpuaddr);
+ kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000001);
+
+ kgsl_regwrite(mmu->device, MH_MMU_MPU_END,
+ mh->mpu_base + mh->mpu_range);
+ }
+
+ mmu->hwpagetable = mmu->defaultpagetable;
+
+ status = kgsl_attach_pagetable_iommu_domain(mmu);
+ if (status) {
+ mmu->hwpagetable = NULL;
+ goto done;
+ }
+ status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
+ if (status) {
+ KGSL_CORE_ERR("clk enable failed\n");
+ goto done;
+ }
+ status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
+ if (status) {
+ KGSL_CORE_ERR("clk enable failed\n");
+ goto done;
+ }
+ /* Get the lsb value of pagetables set in the IOMMU ttbr0 register as
+ * that value should not change when we change pagetables, so while
+ * changing pagetables we can use this lsb value of the pagetable w/o
+ * having to read it again
+ */
+ msm_iommu_lock();
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+ for (j = 0; j < iommu_unit->dev_count; j++) {
+
+ /*
+ * For IOMMU V1 do not halt IOMMU on pagefault if
+ * FT pagefault policy is set accordingly
+ */
+ if ((!msm_soc_version_supports_iommu_v1()) &&
+ (!(adreno_dev->ft_pf_policy &
+ KGSL_FT_PAGEFAULT_GPUHALT_ENABLE))) {
+ sctlr_val = KGSL_IOMMU_GET_CTX_REG(iommu,
+ iommu_unit,
+ iommu_unit->dev[j].ctx_id,
+ SCTLR);
+ sctlr_val |= (0x1 <<
+ KGSL_IOMMU_SCTLR_HUPCF_SHIFT);
+ KGSL_IOMMU_SET_CTX_REG(iommu,
+ iommu_unit,
+ iommu_unit->dev[j].ctx_id,
+ SCTLR, sctlr_val);
+ }
+ if (sizeof(phys_addr_t) > sizeof(unsigned long)) {
+ iommu_unit->dev[j].default_ttbr0 =
+ KGSL_IOMMU_GET_CTX_REG_LL(iommu,
+ iommu_unit,
+ iommu_unit->dev[j].ctx_id,
+ TTBR0);
+ } else {
+ iommu_unit->dev[j].default_ttbr0 =
+ KGSL_IOMMU_GET_CTX_REG(iommu,
+ iommu_unit,
+ iommu_unit->dev[j].ctx_id,
+ TTBR0);
+ }
+ }
+ }
+ kgsl_iommu_lock_rb_in_tlb(mmu);
+ msm_iommu_unlock();
+
+ /* For complete CFF */
+ kgsl_cffdump_setmem(mmu->device, mmu->setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET,
+ cp_nop_packet(1), sizeof(unsigned int));
+
+ kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ mmu->flags |= KGSL_FLAGS_STARTED;
+
+done:
+ if (status) {
+ kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ kgsl_detach_pagetable_iommu_domain(mmu);
+ }
+ return status;
+}
+
+static int
+kgsl_iommu_unmap(struct kgsl_pagetable *pt,
+ struct kgsl_memdesc *memdesc,
+ unsigned int *tlb_flags)
+{
+ int ret;
+ unsigned int range = memdesc->size;
+ struct kgsl_iommu_pt *iommu_pt = pt->priv;
+
+ /* All GPU addresses as assigned are page aligned, but some
+ functions purturb the gpuaddr with an offset, so apply the
+ mask here to make sure we have the right address */
+
+ unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
+
+ if (range == 0 || gpuaddr == 0)
+ return 0;
+
+ if (kgsl_memdesc_has_guard_page(memdesc))
+ range += PAGE_SIZE;
+
+ ret = iommu_unmap_range(iommu_pt->domain, gpuaddr, range);
+ if (ret)
+ KGSL_CORE_ERR("iommu_unmap_range(%p, %x, %d) failed "
+ "with err: %d\n", iommu_pt->domain, gpuaddr,
+ range, ret);
+
+ /*
+ * Flushing only required if per process pagetables are used. With
+ * global case, flushing will happen inside iommu_map function
+ */
+ if (!ret && kgsl_mmu_is_perprocess(pt->mmu))
+ *tlb_flags = UINT_MAX;
+ return 0;
+}
+
+static int
+kgsl_iommu_map(struct kgsl_pagetable *pt,
+ struct kgsl_memdesc *memdesc,
+ unsigned int protflags,
+ unsigned int *tlb_flags)
+{
+ int ret;
+ unsigned int iommu_virt_addr;
+ struct kgsl_iommu_pt *iommu_pt = pt->priv;
+ int size = memdesc->size;
+
+ BUG_ON(NULL == iommu_pt);
+
+ iommu_virt_addr = memdesc->gpuaddr;
+
+ ret = iommu_map_range(iommu_pt->domain, iommu_virt_addr, memdesc->sg,
+ size, protflags);
+ if (ret) {
+ KGSL_CORE_ERR("iommu_map_range(%p, %x, %p, %d, %x) err: %d\n",
+ iommu_pt->domain, iommu_virt_addr, memdesc->sg, size,
+ protflags, ret);
+ return ret;
+ }
+ if (kgsl_memdesc_has_guard_page(memdesc)) {
+ ret = iommu_map(iommu_pt->domain, iommu_virt_addr + size,
+ page_to_phys(kgsl_guard_page), PAGE_SIZE,
+ protflags & ~IOMMU_WRITE);
+ if (ret) {
+ KGSL_CORE_ERR("iommu_map(%p, %x, %x, %x) err: %d\n",
+ iommu_pt->domain, iommu_virt_addr + size,
+ page_to_phys(kgsl_guard_page),
+ protflags & ~IOMMU_WRITE,
+ ret);
+ /* cleanup the partial mapping */
+ iommu_unmap_range(iommu_pt->domain, iommu_virt_addr,
+ size);
+ }
+ }
+ return ret;
+}
+
+void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ int i, j;
+
+ if (atomic_read(&mmu->fault)) {
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_iommu_unit *iommu_unit =
+ &iommu->iommu_units[i];
+ for (j = 0; j < iommu_unit->dev_count; j++) {
+ if (iommu_unit->dev[j].fault) {
+ kgsl_iommu_enable_clk(mmu, j);
+ _iommu_lock();
+ KGSL_IOMMU_SET_CTX_REG(iommu,
+ iommu_unit,
+ iommu_unit->dev[j].ctx_id,
+ RESUME, 1);
+ KGSL_IOMMU_SET_CTX_REG(iommu,
+ iommu_unit,
+ iommu_unit->dev[j].ctx_id,
+ FSR, 0);
+ _iommu_unlock();
+ iommu_unit->dev[j].fault = 0;
+ }
+ }
+ }
+ atomic_set(&mmu->fault, 0);
+ }
+}
+
+
+static void kgsl_iommu_stop(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ /*
+ * stop device mmu
+ *
+ * call this with the global lock held
+ */
+ if (mmu->flags & KGSL_FLAGS_STARTED) {
+ /* detach iommu attachment */
+ kgsl_detach_pagetable_iommu_domain(mmu);
+ mmu->hwpagetable = NULL;
+
+ mmu->flags &= ~KGSL_FLAGS_STARTED;
+
+ kgsl_iommu_pagefault_resume(mmu);
+ }
+ /* switch off MMU clocks and cancel any events it has queued */
+ iommu->clk_event_queued = false;
+ kgsl_cancel_events(mmu->device, mmu);
+ kgsl_iommu_disable_clk(mmu);
+}
+
+static int kgsl_iommu_close(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ int i;
+
+ if (mmu->priv_bank_table != NULL) {
+ kgsl_iommu_cleanup_regs(mmu, mmu->priv_bank_table);
+ kgsl_mmu_putpagetable(mmu->priv_bank_table);
+ }
+
+ if (mmu->defaultpagetable != NULL)
+ kgsl_mmu_putpagetable(mmu->defaultpagetable);
+
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_memdesc *reg_map = &iommu->iommu_units[i].reg_map;
+
+ if (reg_map->hostptr)
+ iounmap(reg_map->hostptr);
+ kgsl_sg_free(reg_map->sg, reg_map->sglen);
+ reg_map->priv &= ~KGSL_MEMDESC_GLOBAL;
+ }
+ /* clear IOMMU GPU CPU sync structures */
+ kgsl_sg_free(iommu->sync_lock_desc.sg, iommu->sync_lock_desc.sglen);
+ memset(&iommu->sync_lock_desc, 0, sizeof(iommu->sync_lock_desc));
+ iommu->sync_lock_vars = NULL;
+
+ kfree(iommu);
+
+ if (kgsl_guard_page != NULL) {
+ __free_page(kgsl_guard_page);
+ kgsl_guard_page = NULL;
+ }
+
+ return 0;
+}
+
+static phys_addr_t
+kgsl_iommu_get_current_ptbase(struct kgsl_mmu *mmu)
+{
+ phys_addr_t pt_base;
+ struct kgsl_iommu *iommu = mmu->priv;
+ /* We cannot enable or disable the clocks in interrupt context, this
+ function is called from interrupt context if there is an axi error */
+ if (in_interrupt())
+ return 0;
+ /* Return the current pt base by reading IOMMU pt_base register */
+ kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
+ pt_base = KGSL_IOMMU_GET_CTX_REG(iommu, (&iommu->iommu_units[0]),
+ KGSL_IOMMU_CONTEXT_USER,
+ TTBR0);
+ kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ return pt_base & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
+}
+
+/*
+ * kgsl_iommu_default_setstate - Change the IOMMU pagetable or flush IOMMU tlb
+ * of the primary context bank
+ * @mmu - Pointer to mmu structure
+ * @flags - Flags indicating whether pagetable has to chnage or tlb is to be
+ * flushed or both
+ *
+ * Based on flags set the new pagetable fo the IOMMU unit or flush it's tlb or
+ * do both by doing direct register writes to the IOMMu registers through the
+ * cpu
+ * Return - void
+ */
+static int kgsl_iommu_default_setstate(struct kgsl_mmu *mmu,
+ uint32_t flags)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ int temp;
+ int i;
+ int ret = 0;
+ unsigned int pt_base = kgsl_iommu_get_pt_base_addr(mmu,
+ mmu->hwpagetable);
+ phys_addr_t pt_val;
+
+ ret = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
+
+ if (ret) {
+ KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
+ return ret;
+ }
+
+ /* For v0 SMMU GPU needs to be idle for tlb invalidate as well */
+ /* naming mismatch for iommu */
+ if (msm_soc_version_supports_iommu_v1()) {
+ ret = kgsl_idle(mmu->device);
+ if (ret)
+ return ret;
+ }
+
+ /* Acquire GPU-CPU sync Lock here */
+ msm_iommu_lock();
+
+ if (flags & KGSL_MMUFLAGS_PTUPDATE) {
+ /* naming mismatch for iommu */
+ if (!msm_soc_version_supports_iommu_v1()) {
+ ret = kgsl_idle(mmu->device);
+ if (ret)
+ goto unlock;
+ }
+ for (i = 0; i < iommu->unit_count; i++) {
+ /* get the lsb value which should not change when
+ * changing ttbr0 */
+ pt_val = kgsl_iommu_get_default_ttbr0(mmu, i,
+ KGSL_IOMMU_CONTEXT_USER);
+
+ pt_base &= KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
+ pt_val &= ~KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
+ pt_val |= pt_base;
+ if (sizeof(phys_addr_t) > sizeof(unsigned long)) {
+ KGSL_IOMMU_SET_CTX_REG_LL(iommu,
+ (&iommu->iommu_units[i]),
+ KGSL_IOMMU_CONTEXT_USER, TTBR0, pt_val);
+ } else {
+ KGSL_IOMMU_SET_CTX_REG(iommu,
+ (&iommu->iommu_units[i]),
+ KGSL_IOMMU_CONTEXT_USER, TTBR0, pt_val);
+ }
+
+ mb();
+ temp = KGSL_IOMMU_GET_CTX_REG(iommu,
+ (&iommu->iommu_units[i]),
+ KGSL_IOMMU_CONTEXT_USER, TTBR0);
+ }
+ }
+ /* Flush tlb */
+ if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
+ unsigned long wait_for_flush;
+ for (i = 0; i < iommu->unit_count; i++) {
+ KGSL_IOMMU_SET_CTX_REG(iommu, (&iommu->iommu_units[i]),
+ KGSL_IOMMU_CONTEXT_USER, TLBIALL, 1);
+ mb();
+ /*
+ * Wait for flush to complete by polling the flush
+ * status bit of TLBSTATUS register for not more than
+ * 2 s. After 2s just exit, at that point the SMMU h/w
+ * may be stuck and will eventually cause GPU to hang
+ * or bring the system down.
+ */
+ /* naming mismatch */
+ if (!msm_soc_version_supports_iommu_v1()) {
+ wait_for_flush = jiffies +
+ msecs_to_jiffies(2000);
+ KGSL_IOMMU_SET_CTX_REG(iommu,
+ (&iommu->iommu_units[i]),
+ KGSL_IOMMU_CONTEXT_USER, TLBSYNC, 0);
+ while (KGSL_IOMMU_GET_CTX_REG(iommu,
+ (&iommu->iommu_units[i]),
+ KGSL_IOMMU_CONTEXT_USER, TLBSTATUS) &
+ (KGSL_IOMMU_CTX_TLBSTATUS_SACTIVE)) {
+ if (time_after(jiffies,
+ wait_for_flush)) {
+ KGSL_DRV_ERR(mmu->device,
+ "Wait limit reached for IOMMU tlb flush\n");
+ break;
+ }
+ cpu_relax();
+ }
+ }
+ }
+ }
+unlock:
+
+ /* Release GPU-CPU sync Lock here */
+ msm_iommu_unlock();
+
+ /* Disable smmu clock */
+ kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ return ret;
+}
+
+/*
+ * kgsl_iommu_get_reg_gpuaddr - Returns the gpu address of IOMMU regsiter
+ * @mmu - Pointer to mmu structure
+ * @iommu_unit - The iommu unit for which base address is requested
+ * @ctx_id - The context ID of the IOMMU ctx
+ * @reg - The register for which address is required
+ *
+ * Return - The gpu address of register which can be used in type3 packet
+ */
+static unsigned int kgsl_iommu_get_reg_gpuaddr(struct kgsl_mmu *mmu,
+ int iommu_unit, int ctx_id, int reg)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+
+ if (KGSL_IOMMU_GLOBAL_BASE == reg)
+ return iommu->iommu_units[iommu_unit].reg_map.gpuaddr;
+
+ if (iommu->iommu_reg_list[reg].ctx_reg)
+ return iommu->iommu_units[iommu_unit].reg_map.gpuaddr +
+ iommu->iommu_reg_list[reg].reg_offset +
+ (ctx_id << KGSL_IOMMU_CTX_SHIFT) + iommu->ctx_offset;
+ else
+ return iommu->iommu_units[iommu_unit].reg_map.gpuaddr +
+ iommu->iommu_reg_list[reg].reg_offset;
+}
+/*
+ * kgsl_iommu_hw_halt_supported - Returns whether IOMMU halt command is
+ * supported
+ * @mmu - Pointer to mmu structure
+ * @iommu_unit - The iommu unit for which the property is requested
+ */
+static int kgsl_iommu_hw_halt_supported(struct kgsl_mmu *mmu, int iommu_unit)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ return iommu->iommu_units[iommu_unit].iommu_halt_enable;
+}
+
+static int kgsl_iommu_get_num_iommu_units(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ return iommu->unit_count;
+}
+
+struct kgsl_mmu_ops iommu_ops = {
+ .mmu_init = kgsl_iommu_init,
+ .mmu_close = kgsl_iommu_close,
+ .mmu_start = kgsl_iommu_start,
+ .mmu_stop = kgsl_iommu_stop,
+ .mmu_setstate = kgsl_iommu_setstate,
+ .mmu_device_setstate = kgsl_iommu_default_setstate,
+ .mmu_pagefault = NULL,
+ .mmu_pagefault_resume = kgsl_iommu_pagefault_resume,
+ .mmu_get_current_ptbase = kgsl_iommu_get_current_ptbase,
+ .mmu_enable_clk = kgsl_iommu_enable_clk,
+ .mmu_disable_clk = kgsl_iommu_disable_clk,
+ .mmu_disable_clk_on_ts = kgsl_iommu_disable_clk_on_ts,
+ .mmu_get_default_ttbr0 = kgsl_iommu_get_default_ttbr0,
+ .mmu_get_reg_gpuaddr = kgsl_iommu_get_reg_gpuaddr,
+ .mmu_get_reg_ahbaddr = kgsl_iommu_get_reg_ahbaddr,
+ .mmu_get_num_iommu_units = kgsl_iommu_get_num_iommu_units,
+ .mmu_pt_equal = kgsl_iommu_pt_equal,
+ .mmu_get_pt_base_addr = kgsl_iommu_get_pt_base_addr,
+ .mmu_hw_halt_supported = kgsl_iommu_hw_halt_supported,
+ /* These callbacks will be set on some chipsets */
+ .mmu_setup_pt = NULL,
+ .mmu_cleanup_pt = NULL,
+ .mmu_sync_lock = kgsl_iommu_sync_lock,
+ .mmu_sync_unlock = kgsl_iommu_sync_unlock,
+};
+
+struct kgsl_mmu_pt_ops iommu_pt_ops = {
+ .mmu_map = kgsl_iommu_map,
+ .mmu_unmap = kgsl_iommu_unmap,
+ .mmu_create_pagetable = kgsl_iommu_create_pagetable,
+ .mmu_destroy_pagetable = kgsl_iommu_destroy_pagetable,
+};
diff --git a/drivers/gpu/msm2/kgsl_iommu.h b/drivers/gpu/msm2/kgsl_iommu.h
new file mode 100644
index 0000000..7dca40e
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_iommu.h
@@ -0,0 +1,225 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_IOMMU_H
+#define __KGSL_IOMMU_H
+
+#include <mach/iommu.h>
+
+#define KGSL_IOMMU_CTX_OFFSET_V0 0
+#define KGSL_IOMMU_CTX_OFFSET_V1 0x8000
+#define KGSL_IOMMU_CTX_SHIFT 12
+
+/* TLBLKCR fields */
+#define KGSL_IOMMU_TLBLKCR_LKE_MASK 0x00000001
+#define KGSL_IOMMU_TLBLKCR_LKE_SHIFT 0
+#define KGSL_IOMMU_TLBLKCR_TLBIALLCFG_MASK 0x00000001
+#define KGSL_IOMMU_TLBLKCR_TLBIALLCFG_SHIFT 1
+#define KGSL_IOMMU_TLBLKCR_TLBIASIDCFG_MASK 0x00000001
+#define KGSL_IOMMU_TLBLKCR_TLBIASIDCFG_SHIFT 2
+#define KGSL_IOMMU_TLBLKCR_TLBIVAACFG_MASK 0x00000001
+#define KGSL_IOMMU_TLBLKCR_TLBIVAACFG_SHIFT 3
+#define KGSL_IOMMU_TLBLKCR_FLOOR_MASK 0x000000FF
+#define KGSL_IOMMU_TLBLKCR_FLOOR_SHIFT 8
+#define KGSL_IOMMU_TLBLKCR_VICTIM_MASK 0x000000FF
+#define KGSL_IOMMU_TLBLKCR_VICTIM_SHIFT 16
+
+/* V2PXX fields */
+#define KGSL_IOMMU_V2PXX_INDEX_MASK 0x000000FF
+#define KGSL_IOMMU_V2PXX_INDEX_SHIFT 0
+#define KGSL_IOMMU_V2PXX_VA_MASK 0x000FFFFF
+#define KGSL_IOMMU_V2PXX_VA_SHIFT 12
+
+/* FSYNR1 V0 fields */
+#define KGSL_IOMMU_FSYNR1_AWRITE_MASK 0x00000001
+#define KGSL_IOMMU_FSYNR1_AWRITE_SHIFT 8
+/* FSYNR0 V1 fields */
+#define KGSL_IOMMU_V1_FSYNR0_WNR_MASK 0x00000001
+#define KGSL_IOMMU_V1_FSYNR0_WNR_SHIFT 4
+
+/* TTBR0 register fields */
+#ifdef CONFIG_ARM_LPAE
+#define KGSL_IOMMU_CTX_TTBR0_ADDR_MASK_LPAE 0x000000FFFFFFFFE0ULL
+#define KGSL_IOMMU_CTX_TTBR0_ADDR_MASK KGSL_IOMMU_CTX_TTBR0_ADDR_MASK_LPAE
+#else
+#define KGSL_IOMMU_CTX_TTBR0_ADDR_MASK 0xFFFFC000
+#endif
+
+/* TLBSTATUS register fields */
+#define KGSL_IOMMU_CTX_TLBSTATUS_SACTIVE BIT(0)
+
+/* IMPLDEF_MICRO_MMU_CTRL register fields */
+#define KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_HALT BIT(2)
+#define KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_IDLE BIT(3)
+
+/* SCTLR fields */
+#define KGSL_IOMMU_SCTLR_HUPCF_SHIFT 8
+
+enum kgsl_iommu_reg_map {
+ KGSL_IOMMU_GLOBAL_BASE = 0,
+ KGSL_IOMMU_CTX_SCTLR,
+ KGSL_IOMMU_CTX_TTBR0,
+ KGSL_IOMMU_CTX_TTBR1,
+ KGSL_IOMMU_CTX_FSR,
+ KGSL_IOMMU_CTX_TLBIALL,
+ KGSL_IOMMU_CTX_RESUME,
+ KGSL_IOMMU_CTX_TLBLKCR,
+ KGSL_IOMMU_CTX_V2PUR,
+ KGSL_IOMMU_CTX_FSYNR0,
+ KGSL_IOMMU_CTX_FSYNR1,
+ KGSL_IOMMU_CTX_TLBSYNC,
+ KGSL_IOMMU_CTX_TLBSTATUS,
+ KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL,
+ KGSL_IOMMU_REG_MAX
+};
+
+struct kgsl_iommu_register_list {
+ unsigned int reg_offset;
+ int ctx_reg;
+};
+
+/*
+ * Max number of iommu units that the gpu core can have
+ * On APQ8064, KGSL can control a maximum of 2 IOMMU units.
+ */
+#define KGSL_IOMMU_MAX_UNITS 2
+
+/* Max number of iommu contexts per IOMMU unit */
+#define KGSL_IOMMU_MAX_DEVS_PER_UNIT 2
+
+/* Macros to read/write IOMMU registers */
+#define KGSL_IOMMU_SET_CTX_REG_LL(iommu, iommu_unit, ctx, REG, val) \
+ writell_relaxed(val, \
+ iommu_unit->reg_map.hostptr + \
+ iommu->iommu_reg_list[KGSL_IOMMU_CTX_##REG].reg_offset +\
+ (ctx << KGSL_IOMMU_CTX_SHIFT) + \
+ iommu->ctx_offset)
+
+#define KGSL_IOMMU_GET_CTX_REG_LL(iommu, iommu_unit, ctx, REG) \
+ readl_relaxed( \
+ iommu_unit->reg_map.hostptr + \
+ iommu->iommu_reg_list[KGSL_IOMMU_CTX_##REG].reg_offset +\
+ (ctx << KGSL_IOMMU_CTX_SHIFT) + \
+ iommu->ctx_offset)
+
+#define KGSL_IOMMU_SET_CTX_REG(iommu, iommu_unit, ctx, REG, val) \
+ writel_relaxed(val, \
+ iommu_unit->reg_map.hostptr + \
+ iommu->iommu_reg_list[KGSL_IOMMU_CTX_##REG].reg_offset +\
+ (ctx << KGSL_IOMMU_CTX_SHIFT) + \
+ iommu->ctx_offset)
+
+#define KGSL_IOMMU_GET_CTX_REG(iommu, iommu_unit, ctx, REG) \
+ readl_relaxed( \
+ iommu_unit->reg_map.hostptr + \
+ iommu->iommu_reg_list[KGSL_IOMMU_CTX_##REG].reg_offset +\
+ (ctx << KGSL_IOMMU_CTX_SHIFT) + \
+ iommu->ctx_offset)
+
+/* Gets the lsb value of pagetable */
+#define KGSL_IOMMMU_PT_LSB(iommu, pt_val) \
+ (pt_val & ~(KGSL_IOMMU_CTX_TTBR0_ADDR_MASK))
+
+/* offset at which a nop command is placed in setstate_memory */
+#define KGSL_IOMMU_SETSTATE_NOP_OFFSET 1024
+
+/*
+ * struct kgsl_iommu_device - Structure holding data about iommu contexts
+ * @dev: Device pointer to iommu context
+ * @attached: Indicates whether this iommu context is presently attached to
+ * a pagetable/domain or not
+ * @default_ttbr0: The TTBR0 value set by iommu driver on start up
+ * @ctx_id: This iommu units context id. It can be either 0 or 1
+ * @clk_enabled: If set indicates that iommu clocks of this iommu context
+ * are on, else the clocks are off
+ * fault: Flag when set indicates that this iommu device has caused a page
+ * fault
+ */
+struct kgsl_iommu_device {
+ struct device *dev;
+ bool attached;
+ phys_addr_t default_ttbr0;
+ enum kgsl_iommu_context_id ctx_id;
+ bool clk_enabled;
+ struct kgsl_device *kgsldev;
+ int fault;
+};
+
+/*
+ * struct kgsl_iommu_unit - Structure holding data about iommu units. An IOMMU
+ * units is basically a separte IOMMU h/w block with it's own IOMMU contexts
+ * @dev: Pointer to array of struct kgsl_iommu_device which has information
+ * about the IOMMU contexts under this IOMMU unit
+ * @dev_count: Number of IOMMU contexts that are valid in the previous feild
+ * @reg_map: Memory descriptor which holds the mapped address of this IOMMU
+ * units register range
+ * @ahb_base - The base address from where IOMMU registers can be accesed from
+ * ahb bus
+ * @iommu_halt_enable: Valid only on IOMMU-v1, when set indicates that the iommu
+ * unit supports halting of the IOMMU, which can be enabled while programming
+ * the IOMMU registers for synchronization
+ */
+struct kgsl_iommu_unit {
+ struct kgsl_iommu_device dev[KGSL_IOMMU_MAX_DEVS_PER_UNIT];
+ unsigned int dev_count;
+ struct kgsl_memdesc reg_map;
+ unsigned int ahb_base;
+ int iommu_halt_enable;
+};
+
+/*
+ * struct kgsl_iommu - Structure holding iommu data for kgsl driver
+ * @dev: Array of kgsl_iommu_device which contain information about
+ * iommu contexts owned by graphics cores
+ * @unit_count: Number of IOMMU units that are available for this
+ * instance of the IOMMU driver
+ * @iommu_last_cmd_ts: The timestamp of last command submitted that
+ * aceeses iommu registers
+ * @clk_event_queued: Indicates whether an event to disable clocks
+ * is already queued or not
+ * @device: Pointer to kgsl device
+ * @ctx_offset: The context offset to be added to base address when
+ * accessing IOMMU registers
+ * @iommu_reg_list: List of IOMMU registers { offset, map, shift } array
+ * @sync_lock_vars: Pointer to the IOMMU spinlock for serializing access to the
+ * IOMMU registers
+ * @sync_lock_desc: GPU Memory descriptor for the memory containing the
+ * spinlocks
+ * @sync_lock_offset - The page offset within a page at which the sync
+ * variables are located
+ * @sync_lock_initialized: True if the sync_lock feature is enabled
+ */
+struct kgsl_iommu {
+ struct kgsl_iommu_unit iommu_units[KGSL_IOMMU_MAX_UNITS];
+ unsigned int unit_count;
+ unsigned int iommu_last_cmd_ts;
+ bool clk_event_queued;
+ struct kgsl_device *device;
+ unsigned int ctx_offset;
+ struct kgsl_iommu_register_list *iommu_reg_list;
+ struct remote_iommu_petersons_spinlock *sync_lock_vars;
+ struct kgsl_memdesc sync_lock_desc;
+ unsigned int sync_lock_offset;
+ bool sync_lock_initialized;
+};
+
+/*
+ * struct kgsl_iommu_pt - Iommu pagetable structure private to kgsl driver
+ * @domain: Pointer to the iommu domain that contains the iommu pagetable
+ * @iommu: Pointer to iommu structure
+ */
+struct kgsl_iommu_pt {
+ struct iommu_domain *domain;
+ struct kgsl_iommu *iommu;
+};
+
+#endif
diff --git a/drivers/gpu/msm2/kgsl_log.h b/drivers/gpu/msm2/kgsl_log.h
new file mode 100644
index 0000000..81a35e0
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_log.h
@@ -0,0 +1,112 @@
+/* Copyright (c) 2002,2008-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_LOG_H
+#define __KGSL_LOG_H
+
+extern unsigned int kgsl_cff_dump_enable;
+
+#define KGSL_LOG_INFO(dev, lvl, fmt, args...) \
+ do { \
+ if ((lvl) >= 6) \
+ dev_info(dev, "|%s| " fmt, \
+ __func__, ##args);\
+ } while (0)
+
+#define KGSL_LOG_WARN(dev, lvl, fmt, args...) \
+ do { \
+ if ((lvl) >= 4) \
+ dev_warn(dev, "|%s| " fmt, \
+ __func__, ##args);\
+ } while (0)
+
+#define KGSL_LOG_ERR(dev, lvl, fmt, args...) \
+ do { \
+ if ((lvl) >= 3) \
+ dev_err(dev, "|%s| " fmt, \
+ __func__, ##args);\
+ } while (0)
+
+#define KGSL_LOG_CRIT(dev, lvl, fmt, args...) \
+ do { \
+ if ((lvl) >= 2) \
+ dev_crit(dev, "|%s| " fmt, \
+ __func__, ##args);\
+ } while (0)
+
+#define KGSL_LOG_POSTMORTEM_WRITE(_dev, fmt, args...) \
+ do { dev_crit(_dev->dev, fmt, ##args); } while (0)
+
+#define KGSL_LOG_DUMP(_dev, fmt, args...) dev_err(_dev->dev, fmt, ##args)
+
+#define KGSL_DEV_ERR_ONCE(_dev, fmt, args...) \
+({ \
+ static bool kgsl_dev_err_once; \
+ \
+ if (!kgsl_dev_err_once) { \
+ kgsl_dev_err_once = true; \
+ dev_crit(_dev->dev, "|%s| " fmt, __func__, ##args); \
+ } \
+})
+
+#define KGSL_DRV_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->drv_log, fmt, ##args)
+#define KGSL_DRV_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->drv_log, fmt, ##args)
+#define KGSL_DRV_ERR(_dev, fmt, args...) \
+KGSL_LOG_ERR(_dev->dev, _dev->drv_log, fmt, ##args)
+#define KGSL_DRV_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->drv_log, fmt, ##args)
+
+#define KGSL_CMD_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->cmd_log, fmt, ##args)
+#define KGSL_CMD_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->cmd_log, fmt, ##args)
+#define KGSL_CMD_ERR(_dev, fmt, args...) \
+KGSL_LOG_ERR(_dev->dev, _dev->cmd_log, fmt, ##args)
+#define KGSL_CMD_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->cmd_log, fmt, ##args)
+
+#define KGSL_CTXT_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->ctxt_log, fmt, ##args)
+#define KGSL_CTXT_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->ctxt_log, fmt, ##args)
+#define KGSL_CTXT_ERR(_dev, fmt, args...) \
+KGSL_LOG_ERR(_dev->dev, _dev->ctxt_log, fmt, ##args)
+#define KGSL_CTXT_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->ctxt_log, fmt, ##args)
+
+#define KGSL_MEM_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->mem_log, fmt, ##args)
+#define KGSL_MEM_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->mem_log, fmt, ##args)
+#define KGSL_MEM_ERR(_dev, fmt, args...) \
+KGSL_LOG_ERR(_dev->dev, _dev->mem_log, fmt, ##args)
+#define KGSL_MEM_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->mem_log, fmt, ##args)
+
+#define KGSL_PWR_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->pwr_log, fmt, ##args)
+#define KGSL_PWR_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->pwr_log, fmt, ##args)
+#define KGSL_PWR_ERR(_dev, fmt, args...) \
+KGSL_LOG_ERR(_dev->dev, _dev->pwr_log, fmt, ##args)
+#define KGSL_PWR_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->pwr_log, fmt, ##args)
+
+/* Core error messages - these are for core KGSL functions that have
+ no device associated with them (such as memory) */
+
+#define KGSL_CORE_ERR(fmt, args...) \
+pr_err("kgsl: %s: " fmt, __func__, ##args)
+
+#endif /* __KGSL_LOG_H */
diff --git a/drivers/gpu/msm2/kgsl_mmu.c b/drivers/gpu/msm2/kgsl_mmu.c
new file mode 100644
index 0000000..1910a46
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_mmu.c
@@ -0,0 +1,994 @@
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/iommu.h>
+#include <mach/iommu.h>
+#include <mach/socinfo.h>
+
+#include "kgsl.h"
+#include "kgsl_mmu.h"
+#include "kgsl_gpummu.h"
+#include "kgsl_device.h"
+#include "kgsl_sharedmem.h"
+#include "adreno.h"
+
+static enum kgsl_mmutype kgsl_mmu_type;
+
+static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
+
+static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
+{
+ int i;
+ struct kgsl_device *device;
+
+ for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+ device = kgsl_driver.devp[i];
+ if (device)
+ device->ftbl->cleanup_pt(device, pt);
+ }
+ /* Only the 3d device needs mmu specific pt entries */
+ device = kgsl_driver.devp[KGSL_DEVICE_3D0];
+ if (device->mmu.mmu_ops->mmu_cleanup_pt != NULL)
+ device->mmu.mmu_ops->mmu_cleanup_pt(&device->mmu, pt);
+
+ return 0;
+}
+
+
+static int kgsl_setup_pt(struct kgsl_pagetable *pt)
+{
+ int i = 0;
+ int status = 0;
+ struct kgsl_device *device;
+
+ for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+ device = kgsl_driver.devp[i];
+ if (device) {
+ status = device->ftbl->setup_pt(device, pt);
+ if (status)
+ goto error_pt;
+ }
+ }
+ /* Only the 3d device needs mmu specific pt entries */
+ device = kgsl_driver.devp[KGSL_DEVICE_3D0];
+ if (device->mmu.mmu_ops->mmu_setup_pt != NULL) {
+ status = device->mmu.mmu_ops->mmu_setup_pt(&device->mmu, pt);
+ if (status) {
+ i = KGSL_DEVICE_MAX - 1;
+ goto error_pt;
+ }
+ }
+ return status;
+error_pt:
+ while (i >= 0) {
+ struct kgsl_device *device = kgsl_driver.devp[i];
+ if (device)
+ device->ftbl->cleanup_pt(device, pt);
+ i--;
+ }
+ return status;
+}
+
+static void kgsl_destroy_pagetable(struct kref *kref)
+{
+ struct kgsl_pagetable *pagetable = container_of(kref,
+ struct kgsl_pagetable, refcount);
+ unsigned long flags;
+
+ spin_lock_irqsave(&kgsl_driver.ptlock, flags);
+ list_del(&pagetable->list);
+ spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
+
+ pagetable_remove_sysfs_objects(pagetable);
+
+ kgsl_cleanup_pt(pagetable);
+
+ if (pagetable->kgsl_pool)
+ gen_pool_destroy(pagetable->kgsl_pool);
+ if (pagetable->pool)
+ gen_pool_destroy(pagetable->pool);
+
+ pagetable->pt_ops->mmu_destroy_pagetable(pagetable);
+
+ kfree(pagetable);
+}
+
+static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
+{
+ if (pagetable)
+ kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
+}
+
+static struct kgsl_pagetable *
+kgsl_get_pagetable(unsigned long name)
+{
+ struct kgsl_pagetable *pt, *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kgsl_driver.ptlock, flags);
+ list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
+ if (kref_get_unless_zero(&pt->refcount)) {
+ if (pt->name == name) {
+ ret = pt;
+ break;
+ }
+ kref_put(&pt->refcount, kgsl_destroy_pagetable);
+ }
+ }
+
+ spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
+ return ret;
+}
+
+static struct kgsl_pagetable *
+_get_pt_from_kobj(struct kobject *kobj)
+{
+ unsigned long ptname;
+
+ if (!kobj)
+ return NULL;
+
+ if (sscanf(kobj->name, "%ld", &ptname) != 1)
+ return NULL;
+
+ return kgsl_get_pagetable(ptname);
+}
+
+static ssize_t
+sysfs_show_entries(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct kgsl_pagetable *pt;
+ int ret = 0;
+
+ pt = _get_pt_from_kobj(kobj);
+
+ if (pt)
+ ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.entries);
+
+ kgsl_put_pagetable(pt);
+ return ret;
+}
+
+static ssize_t
+sysfs_show_mapped(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct kgsl_pagetable *pt;
+ int ret = 0;
+
+ pt = _get_pt_from_kobj(kobj);
+
+ if (pt)
+ ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.mapped);
+
+ kgsl_put_pagetable(pt);
+ return ret;
+}
+
+static ssize_t
+sysfs_show_va_range(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct kgsl_pagetable *pt;
+ int ret = 0;
+
+ pt = _get_pt_from_kobj(kobj);
+
+ if (pt) {
+ ret += snprintf(buf, PAGE_SIZE, "0x%x\n",
+ kgsl_mmu_get_ptsize(pt->mmu));
+ }
+
+ kgsl_put_pagetable(pt);
+ return ret;
+}
+
+static ssize_t
+sysfs_show_max_mapped(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct kgsl_pagetable *pt;
+ int ret = 0;
+
+ pt = _get_pt_from_kobj(kobj);
+
+ if (pt)
+ ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_mapped);
+
+ kgsl_put_pagetable(pt);
+ return ret;
+}
+
+static ssize_t
+sysfs_show_max_entries(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct kgsl_pagetable *pt;
+ int ret = 0;
+
+ pt = _get_pt_from_kobj(kobj);
+
+ if (pt)
+ ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_entries);
+
+ kgsl_put_pagetable(pt);
+ return ret;
+}
+
+static struct kobj_attribute attr_entries = {
+ .attr = { .name = "entries", .mode = 0444 },
+ .show = sysfs_show_entries,
+ .store = NULL,
+};
+
+static struct kobj_attribute attr_mapped = {
+ .attr = { .name = "mapped", .mode = 0444 },
+ .show = sysfs_show_mapped,
+ .store = NULL,
+};
+
+static struct kobj_attribute attr_va_range = {
+ .attr = { .name = "va_range", .mode = 0444 },
+ .show = sysfs_show_va_range,
+ .store = NULL,
+};
+
+static struct kobj_attribute attr_max_mapped = {
+ .attr = { .name = "max_mapped", .mode = 0444 },
+ .show = sysfs_show_max_mapped,
+ .store = NULL,
+};
+
+static struct kobj_attribute attr_max_entries = {
+ .attr = { .name = "max_entries", .mode = 0444 },
+ .show = sysfs_show_max_entries,
+ .store = NULL,
+};
+
+static struct attribute *pagetable_attrs[] = {
+ &attr_entries.attr,
+ &attr_mapped.attr,
+ &attr_va_range.attr,
+ &attr_max_mapped.attr,
+ &attr_max_entries.attr,
+ NULL,
+};
+
+static struct attribute_group pagetable_attr_group = {
+ .attrs = pagetable_attrs,
+};
+
+static void
+pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
+{
+ if (pagetable->kobj)
+ sysfs_remove_group(pagetable->kobj,
+ &pagetable_attr_group);
+
+ kobject_put(pagetable->kobj);
+}
+
+static int
+pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
+{
+ char ptname[16];
+ int ret = -ENOMEM;
+
+ snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
+ pagetable->kobj = kobject_create_and_add(ptname,
+ kgsl_driver.ptkobj);
+ if (pagetable->kobj == NULL)
+ goto err;
+
+ ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
+
+err:
+ if (ret) {
+ if (pagetable->kobj)
+ kobject_put(pagetable->kobj);
+
+ pagetable->kobj = NULL;
+ }
+
+ return ret;
+}
+
+int
+kgsl_mmu_get_ptname_from_ptbase(struct kgsl_mmu *mmu, phys_addr_t pt_base)
+{
+ struct kgsl_pagetable *pt;
+ int ptid = -1;
+
+ if (!mmu->mmu_ops || !mmu->mmu_ops->mmu_pt_equal)
+ return KGSL_MMU_GLOBAL_PT;
+ spin_lock(&kgsl_driver.ptlock);
+ list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
+ if (kref_get_unless_zero(&pt->refcount)) {
+ if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
+ ptid = (int) pt->name;
+ kref_put(&pt->refcount, kgsl_destroy_pagetable);
+ break;
+ }
+ kref_put(&pt->refcount, kgsl_destroy_pagetable);
+ }
+ }
+ spin_unlock(&kgsl_driver.ptlock);
+
+ return ptid;
+}
+EXPORT_SYMBOL(kgsl_mmu_get_ptname_from_ptbase);
+
+unsigned int
+kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu, phys_addr_t pt_base,
+ unsigned int addr)
+{
+ struct kgsl_pagetable *pt;
+ unsigned int ret = 0;
+
+ if (!mmu->mmu_ops || !mmu->mmu_ops->mmu_pt_equal)
+ return KGSL_MMU_GLOBAL_PT;
+ spin_lock(&kgsl_driver.ptlock);
+ list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
+ if (kref_get_unless_zero(&pt->refcount)) {
+ if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
+ if ((addr & ~(PAGE_SIZE-1)) == pt->fault_addr) {
+ ret = 1;
+ kref_put(&pt->refcount,
+ kgsl_destroy_pagetable);
+ break;
+ } else {
+ pt->fault_addr =
+ (addr & ~(PAGE_SIZE-1));
+ ret = 0;
+ kref_put(&pt->refcount,
+ kgsl_destroy_pagetable);
+ break;
+ }
+ }
+ kref_put(&pt->refcount, kgsl_destroy_pagetable);
+ }
+ }
+ spin_unlock(&kgsl_driver.ptlock);
+
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_mmu_log_fault_addr);
+
+int kgsl_mmu_init(struct kgsl_device *device)
+{
+ int status = 0;
+ struct kgsl_mmu *mmu = &device->mmu;
+
+ mmu->device = device;
+ status = kgsl_allocate_contiguous(&mmu->setstate_memory, PAGE_SIZE);
+ if (status)
+ return status;
+ kgsl_sharedmem_set(device, &mmu->setstate_memory, 0, 0,
+ mmu->setstate_memory.size);
+
+ if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) {
+ dev_info(device->dev, "|%s| MMU type set for device is "
+ "NOMMU\n", __func__);
+ goto done;
+ } else if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
+ mmu->mmu_ops = &gpummu_ops;
+ else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
+ mmu->mmu_ops = &iommu_ops;
+
+ status = mmu->mmu_ops->mmu_init(mmu);
+done:
+ if (status)
+ kgsl_sharedmem_free(&mmu->setstate_memory);
+ return status;
+}
+EXPORT_SYMBOL(kgsl_mmu_init);
+
+int kgsl_mmu_start(struct kgsl_device *device)
+{
+ struct kgsl_mmu *mmu = &device->mmu;
+
+ if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
+ kgsl_regwrite(device, MH_MMU_CONFIG, 0);
+ /* Setup gpuaddr of global mappings */
+ if (!mmu->setstate_memory.gpuaddr)
+ kgsl_setup_pt(NULL);
+ return 0;
+ } else {
+ return mmu->mmu_ops->mmu_start(mmu);
+ }
+}
+EXPORT_SYMBOL(kgsl_mmu_start);
+
+static void mh_axi_error(struct kgsl_device *device, const char* type)
+{
+ unsigned int reg, gpu_err, phys_err;
+ phys_addr_t pt_base;
+
+ kgsl_regread(device, MH_AXI_ERROR, ®);
+ pt_base = kgsl_mmu_get_current_ptbase(&device->mmu);
+ /*
+ * Read gpu virtual and physical addresses that
+ * caused the error from the debug data.
+ */
+ kgsl_regwrite(device, MH_DEBUG_CTRL, 44);
+ kgsl_regread(device, MH_DEBUG_DATA, &gpu_err);
+ kgsl_regwrite(device, MH_DEBUG_CTRL, 45);
+ kgsl_regread(device, MH_DEBUG_DATA, &phys_err);
+ KGSL_MEM_CRIT(device,
+ "axi %s error: %08x pt %pa gpu %08x phys %08x\n",
+ type, reg, &pt_base, gpu_err, phys_err);
+}
+
+void kgsl_mh_intrcallback(struct kgsl_device *device)
+{
+ unsigned int status = 0;
+
+ kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
+
+ if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
+ mh_axi_error(device, "read");
+ if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
+ mh_axi_error(device, "write");
+ if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
+ device->mmu.mmu_ops->mmu_pagefault(&device->mmu);
+
+ status &= KGSL_MMU_INT_MASK;
+ kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
+}
+EXPORT_SYMBOL(kgsl_mh_intrcallback);
+
+static struct kgsl_pagetable *
+kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu,
+ unsigned int name)
+{
+ int status = 0;
+ struct kgsl_pagetable *pagetable = NULL;
+ unsigned long flags;
+ unsigned int ptsize;
+
+ pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
+ if (pagetable == NULL) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n",
+ sizeof(struct kgsl_pagetable));
+ return NULL;
+ }
+
+ kref_init(&pagetable->refcount);
+
+ spin_lock_init(&pagetable->lock);
+
+ ptsize = kgsl_mmu_get_ptsize(mmu);
+ pagetable->mmu = mmu;
+ pagetable->name = name;
+ pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(ptsize);
+ pagetable->fault_addr = 0xFFFFFFFF;
+
+ /*
+ * create a separate kgsl pool for IOMMU, global mappings can be mapped
+ * just once from this pool of the defaultpagetable
+ */
+ if ((KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) &&
+ ((KGSL_MMU_GLOBAL_PT == name) ||
+ (KGSL_MMU_PRIV_BANK_TABLE_NAME == name))) {
+ pagetable->kgsl_pool = gen_pool_create(ilog2(SZ_8K), -1);
+ if (pagetable->kgsl_pool == NULL) {
+ KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
+ ilog2(SZ_8K));
+ goto err_alloc;
+ }
+ if (gen_pool_add(pagetable->kgsl_pool,
+ KGSL_IOMMU_GLOBAL_MEM_BASE,
+ KGSL_IOMMU_GLOBAL_MEM_SIZE, -1)) {
+ KGSL_CORE_ERR("gen_pool_add failed\n");
+ goto err_kgsl_pool;
+ }
+ }
+
+ pagetable->pool = gen_pool_create(PAGE_SHIFT, -1);
+ if (pagetable->pool == NULL) {
+ KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
+ PAGE_SHIFT);
+ goto err_kgsl_pool;
+ }
+
+ if (gen_pool_add(pagetable->pool, kgsl_mmu_get_base_addr(mmu),
+ ptsize, -1)) {
+ KGSL_CORE_ERR("gen_pool_add failed\n");
+ goto err_pool;
+ }
+
+ if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
+ pagetable->pt_ops = &gpummu_pt_ops;
+ else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
+ pagetable->pt_ops = &iommu_pt_ops;
+
+ pagetable->priv = pagetable->pt_ops->mmu_create_pagetable();
+ if (!pagetable->priv)
+ goto err_pool;
+
+ status = kgsl_setup_pt(pagetable);
+ if (status)
+ goto err_mmu_create;
+
+ spin_lock_irqsave(&kgsl_driver.ptlock, flags);
+ list_add(&pagetable->list, &kgsl_driver.pagetable_list);
+ spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
+
+ /* Create the sysfs entries */
+ pagetable_add_sysfs_objects(pagetable);
+
+ return pagetable;
+
+err_mmu_create:
+ pagetable->pt_ops->mmu_destroy_pagetable(pagetable);
+err_pool:
+ gen_pool_destroy(pagetable->pool);
+err_kgsl_pool:
+ if (pagetable->kgsl_pool)
+ gen_pool_destroy(pagetable->kgsl_pool);
+err_alloc:
+ kfree(pagetable);
+
+ return NULL;
+}
+
+struct kgsl_pagetable *kgsl_mmu_getpagetable(struct kgsl_mmu *mmu,
+ unsigned long name)
+{
+ struct kgsl_pagetable *pt;
+
+ if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
+ return (void *)(-1);
+
+ if (!kgsl_mmu_is_perprocess(mmu))
+ name = KGSL_MMU_GLOBAL_PT;
+
+ pt = kgsl_get_pagetable(name);
+
+ if (pt == NULL)
+ pt = kgsl_mmu_createpagetableobject(mmu, name);
+
+ return pt;
+}
+
+void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
+{
+ kgsl_put_pagetable(pagetable);
+}
+EXPORT_SYMBOL(kgsl_mmu_putpagetable);
+
+int kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
+ uint32_t flags)
+{
+ struct kgsl_device *device = mmu->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ if (!(flags & (KGSL_MMUFLAGS_TLBFLUSH | KGSL_MMUFLAGS_PTUPDATE))
+ && !adreno_is_a2xx(adreno_dev))
+ return 0;
+
+ if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
+ return 0;
+ else if (device->ftbl->setstate)
+ return device->ftbl->setstate(device, context_id, flags);
+ else if (mmu->mmu_ops->mmu_device_setstate)
+ return mmu->mmu_ops->mmu_device_setstate(mmu, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_setstate);
+
+void kgsl_mh_start(struct kgsl_device *device)
+{
+ struct kgsl_mh *mh = &device->mh;
+ /* force mmu off to for now*/
+ kgsl_regwrite(device, MH_MMU_CONFIG, 0);
+
+ /* define physical memory range accessible by the core */
+ kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
+ kgsl_regwrite(device, MH_MMU_MPU_END,
+ mh->mpu_base + mh->mpu_range);
+ kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb);
+
+ if (mh->mh_intf_cfg1 != 0)
+ kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1,
+ mh->mh_intf_cfg1);
+
+ if (mh->mh_intf_cfg2 != 0)
+ kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2,
+ mh->mh_intf_cfg2);
+
+ /*
+ * Interrupts are enabled on a per-device level when
+ * kgsl_pwrctrl_irq() is called
+ */
+}
+EXPORT_SYMBOL(kgsl_mh_start);
+
+/**
+ * kgsl_mmu_get_gpuaddr - Assign a memdesc with a gpuadddr from the gen pool
+ * @pagetable - pagetable whose pool is to be used
+ * @memdesc - memdesc to which gpuaddr is assigned
+ *
+ * returns - 0 on success else error code
+ */
+int
+kgsl_mmu_get_gpuaddr(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc)
+{
+ struct gen_pool *pool = NULL;
+ int size;
+ int page_align = ilog2(PAGE_SIZE);
+
+ if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
+ if (memdesc->sglen == 1) {
+ memdesc->gpuaddr = sg_dma_address(memdesc->sg);
+ if (!memdesc->gpuaddr)
+ memdesc->gpuaddr = sg_phys(memdesc->sg);
+ if (!memdesc->gpuaddr) {
+ KGSL_CORE_ERR("Unable to get a valid physical "
+ "address for memdesc\n");
+ return -EINVAL;
+ }
+ return 0;
+ } else {
+ KGSL_CORE_ERR("Memory is not contigious "
+ "(sglen = %d)\n", memdesc->sglen);
+ return -EINVAL;
+ }
+ }
+
+ /* Add space for the guard page when allocating the mmu VA. */
+ size = memdesc->size;
+ if (kgsl_memdesc_has_guard_page(memdesc))
+ size += PAGE_SIZE;
+
+ pool = pagetable->pool;
+
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
+ /* Allocate aligned virtual addresses for iommu. This allows
+ * more efficient pagetable entries if the physical memory
+ * is also aligned. Don't do this for GPUMMU, because
+ * the address space is so small.
+ */
+ if (kgsl_memdesc_get_align(memdesc) > 0)
+ page_align = kgsl_memdesc_get_align(memdesc);
+ if (kgsl_memdesc_is_global(memdesc)) {
+ /*
+ * Only the default pagetable has a kgsl_pool, and
+ * it is responsible for creating the mapping for
+ * each global buffer. The mapping will be reused
+ * in all other pagetables and it must already exist
+ * when we're creating other pagetables which do not
+ * have a kgsl_pool.
+ */
+ pool = pagetable->kgsl_pool;
+ if (pool == NULL && memdesc->gpuaddr == 0) {
+ KGSL_CORE_ERR(
+ "No address for global mapping into pt %d\n",
+ pagetable->name);
+ return -EINVAL;
+ }
+ } else if (kgsl_memdesc_use_cpu_map(memdesc)) {
+ if (memdesc->gpuaddr == 0)
+ return -EINVAL;
+ pool = NULL;
+ }
+ }
+ if (pool) {
+ memdesc->gpuaddr = gen_pool_alloc_aligned(pool, size,
+ page_align);
+ if (memdesc->gpuaddr == 0) {
+ KGSL_CORE_ERR("gen_pool_alloc(%d) failed, pool: %s\n",
+ size,
+ (pool == pagetable->kgsl_pool) ?
+ "kgsl_pool" : "general_pool");
+ KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
+ pagetable->name,
+ pagetable->stats.mapped,
+ pagetable->stats.entries);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_mmu_get_gpuaddr);
+
+int
+kgsl_mmu_map(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc)
+{
+ int ret = 0;
+ int size;
+ unsigned int protflags = kgsl_memdesc_protflags(memdesc);
+
+ if (!memdesc->gpuaddr)
+ return -EINVAL;
+ /* Only global mappings should be mapped multiple times */
+ if (!kgsl_memdesc_is_global(memdesc) &&
+ (KGSL_MEMDESC_MAPPED & memdesc->priv))
+ return -EINVAL;
+ /* Add space for the guard page when allocating the mmu VA. */
+ size = memdesc->size;
+ if (kgsl_memdesc_has_guard_page(memdesc))
+ size += PAGE_SIZE;
+
+ if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
+ spin_lock(&pagetable->lock);
+ ret = pagetable->pt_ops->mmu_map(pagetable, memdesc, protflags,
+ &pagetable->tlb_flags);
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
+ spin_lock(&pagetable->lock);
+
+ if (ret)
+ goto done;
+
+ /* Keep track of the statistics for the sysfs files */
+
+ KGSL_STATS_ADD(1, pagetable->stats.entries,
+ pagetable->stats.max_entries);
+
+ KGSL_STATS_ADD(size, pagetable->stats.mapped,
+ pagetable->stats.max_mapped);
+
+ spin_unlock(&pagetable->lock);
+ memdesc->priv |= KGSL_MEMDESC_MAPPED;
+
+ return 0;
+
+done:
+ spin_unlock(&pagetable->lock);
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_mmu_map);
+
+/**
+ * kgsl_mmu_put_gpuaddr - Free a gpuaddress from memory pool
+ * @pagetable - pagetable whose pool memory is freed from
+ * @memdesc - memdesc whose gpuaddress is freed
+ *
+ * returns - 0 on success else error code
+ */
+int
+kgsl_mmu_put_gpuaddr(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc)
+{
+ struct gen_pool *pool;
+ int size;
+
+ if (memdesc->size == 0 || memdesc->gpuaddr == 0)
+ return 0;
+
+ if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
+ goto done;
+
+ /* Add space for the guard page when freeing the mmu VA. */
+ size = memdesc->size;
+ if (kgsl_memdesc_has_guard_page(memdesc))
+ size += PAGE_SIZE;
+
+ pool = pagetable->pool;
+
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
+ if (kgsl_memdesc_is_global(memdesc))
+ pool = pagetable->kgsl_pool;
+ else if (kgsl_memdesc_use_cpu_map(memdesc))
+ pool = NULL;
+ }
+ if (pool)
+ gen_pool_free(pool, memdesc->gpuaddr, size);
+ /*
+ * Don't clear the gpuaddr on global mappings because they
+ * may be in use by other pagetables
+ */
+done:
+ if (!kgsl_memdesc_is_global(memdesc))
+ memdesc->gpuaddr = 0;
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_mmu_put_gpuaddr);
+
+int
+kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc)
+{
+ int size;
+ unsigned int start_addr = 0;
+ unsigned int end_addr = 0;
+
+ if (memdesc->size == 0 || memdesc->gpuaddr == 0 ||
+ !(KGSL_MEMDESC_MAPPED & memdesc->priv))
+ return -EINVAL;
+
+ if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
+ return 0;
+
+ /* Add space for the guard page when freeing the mmu VA. */
+ size = memdesc->size;
+ if (kgsl_memdesc_has_guard_page(memdesc))
+ size += PAGE_SIZE;
+
+ start_addr = memdesc->gpuaddr;
+ end_addr = (memdesc->gpuaddr + size);
+
+ if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
+ spin_lock(&pagetable->lock);
+ pagetable->pt_ops->mmu_unmap(pagetable, memdesc,
+ &pagetable->tlb_flags);
+
+ /* If buffer is unmapped 0 fault addr */
+ if ((pagetable->fault_addr >= start_addr) &&
+ (pagetable->fault_addr < end_addr))
+ pagetable->fault_addr = 0;
+
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
+ spin_lock(&pagetable->lock);
+ /* Remove the statistics */
+ pagetable->stats.entries--;
+ pagetable->stats.mapped -= size;
+
+ spin_unlock(&pagetable->lock);
+ if (!kgsl_memdesc_is_global(memdesc))
+ memdesc->priv &= ~KGSL_MEMDESC_MAPPED;
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_mmu_unmap);
+
+int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc)
+{
+ int result = -EINVAL;
+ unsigned int gpuaddr = 0;
+
+ if (memdesc == NULL) {
+ KGSL_CORE_ERR("invalid memdesc\n");
+ goto error;
+ }
+ /* Not all global mappings are needed for all MMU types */
+ if (!memdesc->size)
+ return 0;
+ gpuaddr = memdesc->gpuaddr;
+ memdesc->priv |= KGSL_MEMDESC_GLOBAL;
+
+ result = kgsl_mmu_get_gpuaddr(pagetable, memdesc);
+ if (result)
+ goto error;
+ result = kgsl_mmu_map(pagetable, memdesc);
+ if (result)
+ goto error_put_gpuaddr;
+
+ /*global mappings must have the same gpu address in all pagetables*/
+ if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
+ KGSL_CORE_ERR("pt %p addr mismatch phys %pa gpu 0x%0x 0x%08x",
+ pagetable, &memdesc->physaddr, gpuaddr, memdesc->gpuaddr);
+ goto error_unmap;
+ }
+ return result;
+error_unmap:
+ kgsl_mmu_unmap(pagetable, memdesc);
+error_put_gpuaddr:
+ kgsl_mmu_put_gpuaddr(pagetable, memdesc);
+error:
+ return result;
+}
+EXPORT_SYMBOL(kgsl_mmu_map_global);
+
+int kgsl_mmu_close(struct kgsl_device *device)
+{
+ struct kgsl_mmu *mmu = &device->mmu;
+
+ kgsl_sharedmem_free(&mmu->setstate_memory);
+ if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
+ return 0;
+ else
+ return mmu->mmu_ops->mmu_close(mmu);
+}
+EXPORT_SYMBOL(kgsl_mmu_close);
+
+int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
+ enum kgsl_deviceid id)
+{
+ unsigned int result = 0;
+
+ if (pt == NULL)
+ return 0;
+
+ spin_lock(&pt->lock);
+ if (pt->tlb_flags & (1<<id)) {
+ result = KGSL_MMUFLAGS_TLBFLUSH;
+ pt->tlb_flags &= ~(1<<id);
+ }
+ spin_unlock(&pt->lock);
+ return result;
+}
+EXPORT_SYMBOL(kgsl_mmu_pt_get_flags);
+
+void kgsl_mmu_ptpool_destroy(void *ptpool)
+{
+ if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
+ kgsl_gpummu_ptpool_destroy(ptpool);
+ ptpool = 0;
+}
+EXPORT_SYMBOL(kgsl_mmu_ptpool_destroy);
+
+void *kgsl_mmu_ptpool_init(int entries)
+{
+ if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
+ return kgsl_gpummu_ptpool_init(entries);
+ else
+ return (void *)(-1);
+}
+EXPORT_SYMBOL(kgsl_mmu_ptpool_init);
+
+int kgsl_mmu_enabled(void)
+{
+ if (KGSL_MMU_TYPE_NONE != kgsl_mmu_type)
+ return 1;
+ else
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_mmu_enabled);
+
+enum kgsl_mmutype kgsl_mmu_get_mmutype(void)
+{
+ return kgsl_mmu_type;
+}
+EXPORT_SYMBOL(kgsl_mmu_get_mmutype);
+
+void kgsl_mmu_set_mmutype(char *mmutype)
+{
+ /* Set the default MMU - GPU on <=8960 and nothing on >= 8064 */
+ kgsl_mmu_type =
+ cpu_is_apq8064() ? KGSL_MMU_TYPE_NONE : KGSL_MMU_TYPE_GPU;
+
+ /* Use the IOMMU if it is found */
+ if (iommu_present(&platform_bus_type))
+ kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
+
+ if (mmutype && !strncmp(mmutype, "gpummu", 6))
+ kgsl_mmu_type = KGSL_MMU_TYPE_GPU;
+ if (iommu_present(&platform_bus_type) && mmutype &&
+ !strncmp(mmutype, "iommu", 5))
+ kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
+ if (mmutype && !strncmp(mmutype, "nommu", 5))
+ kgsl_mmu_type = KGSL_MMU_TYPE_NONE;
+}
+EXPORT_SYMBOL(kgsl_mmu_set_mmutype);
+
+int kgsl_mmu_gpuaddr_in_range(struct kgsl_pagetable *pt, unsigned int gpuaddr)
+{
+ if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
+ return 1;
+ if (gpuaddr >= kgsl_mmu_get_base_addr(pt->mmu) &&
+ gpuaddr < kgsl_mmu_get_base_addr(pt->mmu) +
+ kgsl_mmu_get_ptsize(pt->mmu))
+ return 1;
+ if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU
+ && kgsl_mmu_is_perprocess(pt->mmu))
+ return (gpuaddr > 0 && gpuaddr < TASK_SIZE);
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_mmu_gpuaddr_in_range);
+
diff --git a/drivers/gpu/msm2/kgsl_mmu.h b/drivers/gpu/msm2/kgsl_mmu.h
new file mode 100644
index 0000000..de6bafc
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_mmu.h
@@ -0,0 +1,484 @@
+/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_MMU_H
+#define __KGSL_MMU_H
+
+#include <mach/iommu.h>
+#include "kgsl_iommu.h"
+/*
+ * These defines control the address range for allocations that
+ * are mapped into all pagetables.
+ */
+#ifndef CONFIG_MSM_KGSL_CFF_DUMP
+#define KGSL_IOMMU_GLOBAL_MEM_BASE 0xf8000000
+#else
+#define KGSL_IOMMU_GLOBAL_MEM_BASE (0x09F00000 - SZ_4M)
+#endif
+#define KGSL_IOMMU_GLOBAL_MEM_SIZE SZ_4M
+
+#define KGSL_MMU_ALIGN_MASK (~((1 << PAGE_SHIFT) - 1))
+
+/* defconfig option for disabling per process pagetables */
+#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
+#define KGSL_MMU_USE_PER_PROCESS_PT true
+#else
+#define KGSL_MMU_USE_PER_PROCESS_PT false
+#endif
+
+/* Identifier for the global page table */
+/* Per process page tables will probably pass in the thread group
+ as an identifier */
+
+#define KGSL_MMU_GLOBAL_PT 0
+#define KGSL_MMU_PRIV_BANK_TABLE_NAME 0xFFFFFFFF
+
+struct kgsl_device;
+
+#define GSL_PT_SUPER_PTE 8
+#define GSL_PT_PAGE_WV 0x00000001
+#define GSL_PT_PAGE_RV 0x00000002
+#define GSL_PT_PAGE_DIRTY 0x00000004
+
+/* MMU registers - the register locations for all cores are the
+ same. The method for getting to those locations differs between
+ 2D and 3D, but the 2D and 3D register functions do that magic
+ for us */
+
+#define MH_MMU_CONFIG 0x0040
+#define MH_MMU_VA_RANGE 0x0041
+#define MH_MMU_PT_BASE 0x0042
+#define MH_MMU_PAGE_FAULT 0x0043
+#define MH_MMU_TRAN_ERROR 0x0044
+#define MH_MMU_INVALIDATE 0x0045
+#define MH_MMU_MPU_BASE 0x0046
+#define MH_MMU_MPU_END 0x0047
+
+#define MH_INTERRUPT_MASK 0x0A42
+#define MH_INTERRUPT_STATUS 0x0A43
+#define MH_INTERRUPT_CLEAR 0x0A44
+#define MH_AXI_ERROR 0x0A45
+#define MH_ARBITER_CONFIG 0x0A40
+#define MH_DEBUG_CTRL 0x0A4E
+#define MH_DEBUG_DATA 0x0A4F
+#define MH_AXI_HALT_CONTROL 0x0A50
+#define MH_CLNT_INTF_CTRL_CONFIG1 0x0A54
+#define MH_CLNT_INTF_CTRL_CONFIG2 0x0A55
+
+/* MH_MMU_CONFIG bit definitions */
+
+#define MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT 0x00000004
+#define MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT 0x00000006
+#define MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT 0x00000008
+#define MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT 0x0000000a
+#define MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT 0x0000000c
+#define MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT 0x0000000e
+#define MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT 0x00000010
+#define MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT 0x00000012
+#define MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT 0x00000014
+#define MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT 0x00000016
+#define MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT 0x00000018
+
+/* MMU Flags */
+#define KGSL_MMUFLAGS_TLBFLUSH 0x10000000
+#define KGSL_MMUFLAGS_PTUPDATE 0x20000000
+
+#define MH_INTERRUPT_MASK__AXI_READ_ERROR 0x00000001L
+#define MH_INTERRUPT_MASK__AXI_WRITE_ERROR 0x00000002L
+#define MH_INTERRUPT_MASK__MMU_PAGE_FAULT 0x00000004L
+
+#define KGSL_MMU_INT_MASK \
+ (MH_INTERRUPT_MASK__AXI_READ_ERROR | \
+ MH_INTERRUPT_MASK__AXI_WRITE_ERROR | \
+ MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
+
+enum kgsl_mmutype {
+ KGSL_MMU_TYPE_GPU = 0,
+ KGSL_MMU_TYPE_IOMMU,
+ KGSL_MMU_TYPE_NONE
+};
+
+struct kgsl_pagetable {
+ spinlock_t lock;
+ struct kref refcount;
+ unsigned int max_entries;
+ struct gen_pool *pool;
+ struct gen_pool *kgsl_pool;
+ struct list_head list;
+ unsigned int name;
+ struct kobject *kobj;
+
+ struct {
+ unsigned int entries;
+ unsigned int mapped;
+ unsigned int max_mapped;
+ unsigned int max_entries;
+ } stats;
+ const struct kgsl_mmu_pt_ops *pt_ops;
+ unsigned int tlb_flags;
+ unsigned int fault_addr;
+ void *priv;
+ struct kgsl_mmu *mmu;
+};
+
+struct kgsl_mmu;
+
+struct kgsl_mmu_ops {
+ int (*mmu_init) (struct kgsl_mmu *mmu);
+ int (*mmu_close) (struct kgsl_mmu *mmu);
+ int (*mmu_start) (struct kgsl_mmu *mmu);
+ void (*mmu_stop) (struct kgsl_mmu *mmu);
+ int (*mmu_setstate) (struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pagetable,
+ unsigned int context_id);
+ int (*mmu_device_setstate) (struct kgsl_mmu *mmu,
+ uint32_t flags);
+ void (*mmu_pagefault) (struct kgsl_mmu *mmu);
+ phys_addr_t (*mmu_get_current_ptbase)
+ (struct kgsl_mmu *mmu);
+ void (*mmu_pagefault_resume)
+ (struct kgsl_mmu *mmu);
+ void (*mmu_disable_clk_on_ts)
+ (struct kgsl_mmu *mmu, uint32_t ts, bool ts_valid);
+ int (*mmu_enable_clk)
+ (struct kgsl_mmu *mmu, int ctx_id);
+ void (*mmu_disable_clk)
+ (struct kgsl_mmu *mmu);
+ phys_addr_t (*mmu_get_default_ttbr0)(struct kgsl_mmu *mmu,
+ unsigned int unit_id,
+ enum kgsl_iommu_context_id ctx_id);
+ unsigned int (*mmu_get_reg_gpuaddr)(struct kgsl_mmu *mmu,
+ int iommu_unit_num, int ctx_id, int reg);
+ unsigned int (*mmu_get_reg_ahbaddr)(struct kgsl_mmu *mmu,
+ int iommu_unit_num, int ctx_id,
+ enum kgsl_iommu_reg_map reg);
+ int (*mmu_get_num_iommu_units)(struct kgsl_mmu *mmu);
+ int (*mmu_pt_equal) (struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pt,
+ phys_addr_t pt_base);
+ phys_addr_t (*mmu_get_pt_base_addr)
+ (struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pt);
+ int (*mmu_setup_pt) (struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pt);
+ void (*mmu_cleanup_pt) (struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pt);
+ unsigned int (*mmu_sync_lock)
+ (struct kgsl_mmu *mmu, unsigned int *cmds);
+ unsigned int (*mmu_sync_unlock)
+ (struct kgsl_mmu *mmu, unsigned int *cmds);
+ int (*mmu_hw_halt_supported)(struct kgsl_mmu *mmu, int iommu_unit_num);
+};
+
+struct kgsl_mmu_pt_ops {
+ int (*mmu_map) (struct kgsl_pagetable *pt,
+ struct kgsl_memdesc *memdesc,
+ unsigned int protflags,
+ unsigned int *tlb_flags);
+ int (*mmu_unmap) (struct kgsl_pagetable *pt,
+ struct kgsl_memdesc *memdesc,
+ unsigned int *tlb_flags);
+ void *(*mmu_create_pagetable) (void);
+ void (*mmu_destroy_pagetable) (struct kgsl_pagetable *);
+};
+
+#define KGSL_MMU_FLAGS_IOMMU_SYNC BIT(31)
+
+struct kgsl_mmu {
+ unsigned int refcnt;
+ uint32_t flags;
+ struct kgsl_device *device;
+ unsigned int config;
+ struct kgsl_memdesc setstate_memory;
+ /* current page table object being used by device mmu */
+ struct kgsl_pagetable *defaultpagetable;
+ /* pagetable object used for priv bank of IOMMU */
+ struct kgsl_pagetable *priv_bank_table;
+ struct kgsl_pagetable *hwpagetable;
+ const struct kgsl_mmu_ops *mmu_ops;
+ void *priv;
+ atomic_t fault;
+ unsigned long pt_base;
+ unsigned long pt_size;
+ bool pt_per_process;
+ bool use_cpu_map;
+};
+
+extern struct kgsl_mmu_ops iommu_ops;
+extern struct kgsl_mmu_pt_ops iommu_pt_ops;
+extern struct kgsl_mmu_ops gpummu_ops;
+extern struct kgsl_mmu_pt_ops gpummu_pt_ops;
+
+struct kgsl_pagetable *kgsl_mmu_getpagetable(struct kgsl_mmu *,
+ unsigned long name);
+void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable);
+void kgsl_mh_start(struct kgsl_device *device);
+void kgsl_mh_intrcallback(struct kgsl_device *device);
+int kgsl_mmu_init(struct kgsl_device *device);
+int kgsl_mmu_start(struct kgsl_device *device);
+int kgsl_mmu_close(struct kgsl_device *device);
+int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc);
+int kgsl_mmu_get_gpuaddr(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc);
+int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc);
+int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc);
+int kgsl_mmu_put_gpuaddr(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc);
+unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr);
+int kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
+ uint32_t flags);
+int kgsl_mmu_get_ptname_from_ptbase(struct kgsl_mmu *mmu,
+ phys_addr_t pt_base);
+unsigned int kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu,
+ phys_addr_t pt_base, unsigned int addr);
+int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
+ enum kgsl_deviceid id);
+void kgsl_mmu_ptpool_destroy(void *ptpool);
+void *kgsl_mmu_ptpool_init(int entries);
+int kgsl_mmu_enabled(void);
+void kgsl_mmu_set_mmutype(char *mmutype);
+enum kgsl_mmutype kgsl_mmu_get_mmutype(void);
+int kgsl_mmu_gpuaddr_in_range(struct kgsl_pagetable *pt, unsigned int gpuaddr);
+
+/*
+ * Static inline functions of MMU that simply call the SMMU specific
+ * function using a function pointer. These functions can be thought
+ * of as wrappers around the actual function
+ */
+
+static inline phys_addr_t kgsl_mmu_get_current_ptbase(struct kgsl_mmu *mmu)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_current_ptbase)
+ return mmu->mmu_ops->mmu_get_current_ptbase(mmu);
+ else
+ return 0;
+}
+
+static inline int kgsl_mmu_setstate(struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pagetable,
+ unsigned int context_id)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_setstate)
+ return mmu->mmu_ops->mmu_setstate(mmu, pagetable, context_id);
+
+ return 0;
+}
+
+static inline int kgsl_mmu_device_setstate(struct kgsl_mmu *mmu,
+ uint32_t flags)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_device_setstate)
+ return mmu->mmu_ops->mmu_device_setstate(mmu, flags);
+
+ return 0;
+}
+
+static inline void kgsl_mmu_stop(struct kgsl_mmu *mmu)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_stop)
+ mmu->mmu_ops->mmu_stop(mmu);
+}
+
+static inline int kgsl_mmu_pt_equal(struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pt,
+ phys_addr_t pt_base)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_pt_equal)
+ return mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base);
+ else
+ return 1;
+}
+
+static inline phys_addr_t kgsl_mmu_get_pt_base_addr(struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pt)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_pt_base_addr)
+ return mmu->mmu_ops->mmu_get_pt_base_addr(mmu, pt);
+ else
+ return 0;
+}
+
+static inline phys_addr_t kgsl_mmu_get_default_ttbr0(struct kgsl_mmu *mmu,
+ unsigned int unit_id,
+ enum kgsl_iommu_context_id ctx_id)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_default_ttbr0)
+ return mmu->mmu_ops->mmu_get_default_ttbr0(mmu, unit_id,
+ ctx_id);
+ else
+ return 0;
+}
+
+static inline int kgsl_mmu_enable_clk(struct kgsl_mmu *mmu,
+ int ctx_id)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_enable_clk)
+ return mmu->mmu_ops->mmu_enable_clk(mmu, ctx_id);
+ else
+ return 0;
+}
+
+static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk)
+ mmu->mmu_ops->mmu_disable_clk(mmu);
+}
+
+static inline void kgsl_mmu_disable_clk_on_ts(struct kgsl_mmu *mmu,
+ unsigned int ts, bool ts_valid)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk_on_ts)
+ mmu->mmu_ops->mmu_disable_clk_on_ts(mmu, ts, ts_valid);
+}
+
+static inline unsigned int kgsl_mmu_get_int_mask(void)
+{
+ /* Dont enable gpummu interrupts, if iommu is enabled */
+ if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype())
+ return KGSL_MMU_INT_MASK;
+ else
+ return (MH_INTERRUPT_MASK__AXI_READ_ERROR |
+ MH_INTERRUPT_MASK__AXI_WRITE_ERROR);
+}
+
+static inline unsigned int kgsl_mmu_get_reg_gpuaddr(struct kgsl_mmu *mmu,
+ int iommu_unit_num,
+ int ctx_id, int reg)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_reg_gpuaddr)
+ return mmu->mmu_ops->mmu_get_reg_gpuaddr(mmu, iommu_unit_num,
+ ctx_id, reg);
+ else
+ return 0;
+}
+
+/*
+ * kgsl_mmu_get_reg_ahbaddr() - Calls the mmu specific function pointer to
+ * return the address that GPU can use to access register
+ * @mmu: Pointer to the device mmu
+ * @iommu_unit_num: There can be multiple iommu units used for graphics.
+ * This parameter is an index to the iommu unit being used
+ * @ctx_id: The context id within the iommu unit
+ * @reg: Register whose address is to be returned
+ *
+ * Returns the ahb address of reg else 0
+ */
+static inline unsigned int kgsl_mmu_get_reg_ahbaddr(struct kgsl_mmu *mmu,
+ int iommu_unit_num,
+ int ctx_id,
+ enum kgsl_iommu_reg_map reg)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_reg_ahbaddr)
+ return mmu->mmu_ops->mmu_get_reg_ahbaddr(mmu, iommu_unit_num,
+ ctx_id, reg);
+ else
+ return 0;
+}
+
+static inline int kgsl_mmu_get_num_iommu_units(struct kgsl_mmu *mmu)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_num_iommu_units)
+ return mmu->mmu_ops->mmu_get_num_iommu_units(mmu);
+ else
+ return 0;
+}
+
+/*
+ * kgsl_mmu_hw_halt_supported() - Runtime check for iommu hw halt
+ * @mmu: the mmu
+ *
+ * Returns non-zero if the iommu supports hw halt,
+ * 0 if not.
+ */
+static inline int kgsl_mmu_hw_halt_supported(struct kgsl_mmu *mmu,
+ int iommu_unit_num)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_hw_halt_supported)
+ return mmu->mmu_ops->mmu_hw_halt_supported(mmu, iommu_unit_num);
+ else
+ return 0;
+}
+
+/*
+ * kgsl_mmu_is_perprocess() - Runtime check for per-process
+ * pagetables.
+ * @mmu: the mmu
+ *
+ * Returns true if per-process pagetables are enabled,
+ * false if not.
+ */
+static inline int kgsl_mmu_is_perprocess(struct kgsl_mmu *mmu)
+{
+ return mmu->pt_per_process;
+}
+
+/*
+ * kgsl_mmu_use_cpu_map() - Runtime check for matching the CPU
+ * address space on the GPU.
+ * @mmu: the mmu
+ *
+ * Returns true if supported false if not.
+ */
+static inline int kgsl_mmu_use_cpu_map(struct kgsl_mmu *mmu)
+{
+ return mmu->use_cpu_map;
+}
+
+/*
+ * kgsl_mmu_base_addr() - Get gpu virtual address base.
+ * @mmu: the mmu
+ *
+ * Returns the start address of the allocatable gpu
+ * virtual address space. Other mappings that mirror
+ * the CPU address space are possible outside this range.
+ */
+static inline unsigned int kgsl_mmu_get_base_addr(struct kgsl_mmu *mmu)
+{
+ return mmu->pt_base;
+}
+
+/*
+ * kgsl_mmu_get_ptsize() - Get gpu pagetable size
+ * @mmu: the mmu
+ *
+ * Returns the usable size of the gpu allocatable
+ * address space.
+ */
+static inline unsigned int kgsl_mmu_get_ptsize(struct kgsl_mmu *mmu)
+{
+ return mmu->pt_size;
+}
+
+static inline int kgsl_mmu_sync_lock(struct kgsl_mmu *mmu,
+ unsigned int *cmds)
+{
+ if ((mmu->flags & KGSL_MMU_FLAGS_IOMMU_SYNC) &&
+ mmu->mmu_ops && mmu->mmu_ops->mmu_sync_lock)
+ return mmu->mmu_ops->mmu_sync_lock(mmu, cmds);
+ else
+ return 0;
+}
+
+static inline int kgsl_mmu_sync_unlock(struct kgsl_mmu *mmu,
+ unsigned int *cmds)
+{
+ if ((mmu->flags & KGSL_MMU_FLAGS_IOMMU_SYNC) &&
+ mmu->mmu_ops && mmu->mmu_ops->mmu_sync_unlock)
+ return mmu->mmu_ops->mmu_sync_unlock(mmu, cmds);
+ else
+ return 0;
+}
+
+#endif /* __KGSL_MMU_H */
diff --git a/drivers/gpu/msm2/kgsl_pwrctrl.c b/drivers/gpu/msm2/kgsl_pwrctrl.c
new file mode 100644
index 0000000..f2398a5
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_pwrctrl.c
@@ -0,0 +1,1649 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <asm/page.h>
+#include <linux/pm_runtime.h>
+#include <mach/msm_iomap.h>
+#include <mach/msm_bus.h>
+#include <linux/ktime.h>
+#include <linux/delay.h>
+
+#include "kgsl.h"
+#include "kgsl_pwrscale.h"
+#include "kgsl_device.h"
+#include "kgsl_trace.h"
+#include "kgsl_sharedmem.h"
+
+#define KGSL_PWRFLAGS_POWER_ON 0
+#define KGSL_PWRFLAGS_CLK_ON 1
+#define KGSL_PWRFLAGS_AXI_ON 2
+#define KGSL_PWRFLAGS_IRQ_ON 3
+
+#define UPDATE_BUSY_VAL 1000000
+#define UPDATE_BUSY 50
+
+/*
+ * Expected delay for post-interrupt processing on A3xx.
+ * The delay may be longer, gradually increase the delay
+ * to compensate. If the GPU isn't done by max delay,
+ * it's working on something other than just the final
+ * command sequence so stop waiting for it to be idle.
+ */
+#define INIT_UDELAY 200
+#define MAX_UDELAY 2000
+
+struct clk_pair {
+ const char *name;
+ uint map;
+};
+
+struct clk_pair clks[KGSL_MAX_CLKS] = {
+ {
+ .name = "src_clk",
+ .map = KGSL_CLK_SRC,
+ },
+ {
+ .name = "core_clk",
+ .map = KGSL_CLK_CORE,
+ },
+ {
+ .name = "iface_clk",
+ .map = KGSL_CLK_IFACE,
+ },
+ {
+ .name = "mem_clk",
+ .map = KGSL_CLK_MEM,
+ },
+ {
+ .name = "mem_iface_clk",
+ .map = KGSL_CLK_MEM_IFACE,
+ },
+ {
+ .name = "alt_mem_iface_clk",
+ .map = KGSL_CLK_ALT_MEM_IFACE,
+ },
+};
+
+static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
+ int requested_state);
+static void kgsl_pwrctrl_axi(struct kgsl_device *device, int state);
+static void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state);
+
+/* Update the elapsed time at a particular clock level
+ * if the device is active(on_time = true).Otherwise
+ * store it as sleep time.
+ */
+static void update_clk_statistics(struct kgsl_device *device,
+ bool on_time)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ struct kgsl_clk_stats *clkstats = &pwr->clk_stats;
+ ktime_t elapsed;
+ int elapsed_us;
+ if (clkstats->start.tv64 == 0)
+ clkstats->start = ktime_get();
+ clkstats->stop = ktime_get();
+ elapsed = ktime_sub(clkstats->stop, clkstats->start);
+ elapsed_us = ktime_to_us(elapsed);
+ clkstats->elapsed += elapsed_us;
+ if (on_time)
+ clkstats->clock_time[pwr->active_pwrlevel] += elapsed_us;
+ else
+ clkstats->clock_time[pwr->num_pwrlevels - 1] += elapsed_us;
+ clkstats->start = ktime_get();
+}
+
+/*
+ * Given a requested power level do bounds checking on the constraints and
+ * return the nearest possible level
+ */
+
+static inline int _adjust_pwrlevel(struct kgsl_pwrctrl *pwr, int level)
+{
+ int max_pwrlevel = max_t(int, pwr->thermal_pwrlevel, pwr->max_pwrlevel);
+ int min_pwrlevel = max_t(int, pwr->thermal_pwrlevel, pwr->min_pwrlevel);
+
+ if (level < max_pwrlevel)
+ return max_pwrlevel;
+ if (level > min_pwrlevel)
+ return min_pwrlevel;
+
+ return level;
+}
+
+void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
+ unsigned int new_level)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ struct kgsl_pwrlevel *pwrlevel;
+ int delta;
+ int level;
+
+ /* Adjust the power level to the current constraints */
+ new_level = _adjust_pwrlevel(pwr, new_level);
+
+ if (new_level == pwr->active_pwrlevel)
+ return;
+
+ delta = new_level < pwr->active_pwrlevel ? -1 : 1;
+
+ update_clk_statistics(device, true);
+
+ level = pwr->active_pwrlevel;
+
+ /*
+ * Set the active powerlevel first in case the clocks are off - if we
+ * don't do this then the pwrlevel change won't take effect when the
+ * clocks come back
+ */
+
+ pwr->active_pwrlevel = new_level;
+ pwrlevel = &pwr->pwrlevels[pwr->active_pwrlevel];
+
+ if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
+
+ if (pwr->pcl)
+ msm_bus_scale_client_update_request(pwr->pcl,
+ pwrlevel->bus_freq);
+ else if (pwr->ebi1_clk)
+ clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
+ }
+
+ if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags) ||
+ (device->state == KGSL_STATE_NAP)) {
+
+ /*
+ * On some platforms, instability is caused on
+ * changing clock freq when the core is busy.
+ * Idle the gpu core before changing the clock freq.
+ */
+
+ if (pwr->idle_needed == true)
+ device->ftbl->idle(device);
+
+ /*
+ * Don't shift by more than one level at a time to
+ * avoid glitches.
+ */
+
+ while (level != new_level) {
+ level += delta;
+
+ clk_set_rate(pwr->grp_clks[0],
+ pwr->pwrlevels[level].gpu_freq);
+ }
+ }
+
+
+ trace_kgsl_pwrlevel(device, pwr->active_pwrlevel, pwrlevel->gpu_freq);
+}
+
+EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
+
+static int kgsl_pwrctrl_thermal_pwrlevel_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ int ret, level;
+
+ if (device == NULL)
+ return 0;
+
+ pwr = &device->pwrctrl;
+
+ ret = sscanf(buf, "%d", &level);
+ if (ret != 1)
+ return count;
+
+ if (level < 0)
+ return count;
+
+ mutex_lock(&device->mutex);
+
+ if (level > pwr->num_pwrlevels - 2)
+ level = pwr->num_pwrlevels - 2;
+
+ pwr->thermal_pwrlevel = level;
+
+ /*
+ * If there is no power policy set the clock to the requested thermal
+ * level - if thermal now happens to be higher than max, then that will
+ * be limited by the pwrlevel change function. Otherwise if there is
+ * a policy only change the active clock if it is higher then the new
+ * thermal level
+ */
+
+ if (device->pwrscale.policy == NULL ||
+ pwr->thermal_pwrlevel > pwr->active_pwrlevel)
+ kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
+
+ mutex_unlock(&device->mutex);
+
+ return count;
+}
+
+static int kgsl_pwrctrl_thermal_pwrlevel_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ if (device == NULL)
+ return 0;
+ pwr = &device->pwrctrl;
+ return snprintf(buf, PAGE_SIZE, "%d\n", pwr->thermal_pwrlevel);
+}
+
+static int kgsl_pwrctrl_max_pwrlevel_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ int ret, level, max_level;
+
+ if (device == NULL)
+ return 0;
+
+ pwr = &device->pwrctrl;
+
+ ret = sscanf(buf, "%d", &level);
+ if (ret != 1)
+ return count;
+
+ /* If the use specifies a negative number, then don't change anything */
+ if (level < 0)
+ return count;
+
+ mutex_lock(&device->mutex);
+
+ /* You can't set a maximum power level lower than the minimum */
+ if (level > pwr->min_pwrlevel)
+ level = pwr->min_pwrlevel;
+
+ pwr->max_pwrlevel = level;
+
+
+ max_level = max_t(int, pwr->thermal_pwrlevel, pwr->max_pwrlevel);
+
+ /*
+ * If there is no policy then move to max by default. Otherwise only
+ * move max if the current level happens to be higher then the new max
+ */
+
+ if (device->pwrscale.policy == NULL ||
+ (max_level > pwr->active_pwrlevel))
+ kgsl_pwrctrl_pwrlevel_change(device, max_level);
+
+ mutex_unlock(&device->mutex);
+
+ return count;
+}
+
+static int kgsl_pwrctrl_max_pwrlevel_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ if (device == NULL)
+ return 0;
+ pwr = &device->pwrctrl;
+ return snprintf(buf, PAGE_SIZE, "%d\n", pwr->max_pwrlevel);
+}
+
+static int kgsl_pwrctrl_min_pwrlevel_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ int ret, level, min_level;
+
+ if (device == NULL)
+ return 0;
+
+ pwr = &device->pwrctrl;
+
+ ret = sscanf(buf, "%d", &level);
+ if (ret != 1)
+ return count;
+
+ /* Don't do anything on obviously incorrect values */
+ if (level < 0)
+ return count;
+
+ mutex_lock(&device->mutex);
+ if (level > pwr->num_pwrlevels - 2)
+ level = pwr->num_pwrlevels - 2;
+
+ /* You can't set a minimum power level lower than the maximum */
+ if (level < pwr->max_pwrlevel)
+ level = pwr->max_pwrlevel;
+
+ pwr->min_pwrlevel = level;
+
+ min_level = max_t(int, pwr->thermal_pwrlevel, pwr->min_pwrlevel);
+
+ /* Only move the power level higher if minimum is higher then the
+ * current level
+ */
+
+ if (min_level < pwr->active_pwrlevel)
+ kgsl_pwrctrl_pwrlevel_change(device, min_level);
+
+ mutex_unlock(&device->mutex);
+
+ return count;
+}
+
+static int kgsl_pwrctrl_min_pwrlevel_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ if (device == NULL)
+ return 0;
+ pwr = &device->pwrctrl;
+ return snprintf(buf, PAGE_SIZE, "%d\n", pwr->min_pwrlevel);
+}
+
+static int kgsl_pwrctrl_num_pwrlevels_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ if (device == NULL)
+ return 0;
+ pwr = &device->pwrctrl;
+ return snprintf(buf, PAGE_SIZE, "%d\n", pwr->num_pwrlevels - 1);
+}
+
+/* Given a GPU clock value, return the lowest matching powerlevel */
+
+static int _get_nearest_pwrlevel(struct kgsl_pwrctrl *pwr, unsigned int clock)
+{
+ int i;
+
+ for (i = pwr->num_pwrlevels - 1; i >= 0; i--) {
+ if (abs(pwr->pwrlevels[i].gpu_freq - clock) < 5000000)
+ return i;
+ }
+
+ return -ERANGE;
+}
+
+static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ unsigned long val;
+ int ret, level;
+
+ if (device == NULL)
+ return 0;
+
+ pwr = &device->pwrctrl;
+
+ ret = sscanf(buf, "%ld", &val);
+ if (ret != 1)
+ return count;
+
+ mutex_lock(&device->mutex);
+ level = _get_nearest_pwrlevel(pwr, val);
+ if (level < 0)
+ goto done;
+
+ pwr->thermal_pwrlevel = level;
+
+ /*
+ * if the thermal limit is lower than the current setting,
+ * move the speed down immediately
+ */
+
+ if (pwr->thermal_pwrlevel > pwr->active_pwrlevel)
+ kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
+
+done:
+ mutex_unlock(&device->mutex);
+ return count;
+}
+
+static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ if (device == NULL)
+ return 0;
+ pwr = &device->pwrctrl;
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
+}
+
+static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ unsigned long val;
+ int ret, level;
+
+ if (device == NULL)
+ return 0;
+
+ pwr = &device->pwrctrl;
+
+ ret = sscanf(buf, "%ld", &val);
+ if (ret != 1)
+ return count;
+
+ mutex_lock(&device->mutex);
+ level = _get_nearest_pwrlevel(pwr, val);
+ if (level >= 0)
+ kgsl_pwrctrl_pwrlevel_change(device, level);
+
+ mutex_unlock(&device->mutex);
+ return count;
+}
+
+static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ if (device == NULL)
+ return 0;
+ pwr = &device->pwrctrl;
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
+}
+
+static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ char temp[20];
+ unsigned long val;
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ const long div = 1000/HZ;
+ int rc;
+
+ if (device == NULL)
+ return 0;
+ pwr = &device->pwrctrl;
+
+ snprintf(temp, sizeof(temp), "%.*s",
+ (int)min(count, sizeof(temp) - 1), buf);
+ rc = strict_strtoul(temp, 0, &val);
+ if (rc)
+ return rc;
+
+ mutex_lock(&device->mutex);
+
+ /* Let the timeout be requested in ms, but convert to jiffies. */
+ val /= div;
+ pwr->interval_timeout = val;
+
+ mutex_unlock(&device->mutex);
+
+ return count;
+}
+
+static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ int mul = 1000/HZ;
+ if (device == NULL)
+ return 0;
+ /* Show the idle_timeout converted to msec */
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ device->pwrctrl.interval_timeout * mul);
+}
+
+static int kgsl_pwrctrl_pmqos_latency_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ char temp[20];
+ unsigned long val;
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ int rc;
+
+ if (device == NULL)
+ return 0;
+
+ snprintf(temp, sizeof(temp), "%.*s",
+ (int)min(count, sizeof(temp) - 1), buf);
+ rc = kstrtoul(temp, 0, &val);
+ if (rc)
+ return rc;
+
+ mutex_lock(&device->mutex);
+ device->pwrctrl.pm_qos_latency = val;
+ mutex_unlock(&device->mutex);
+
+ return count;
+}
+
+static int kgsl_pwrctrl_pmqos_latency_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ if (device == NULL)
+ return 0;
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ device->pwrctrl.pm_qos_latency);
+}
+
+static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_clk_stats *clkstats;
+
+ if (device == NULL)
+ return 0;
+ clkstats = &device->pwrctrl.clk_stats;
+ ret = snprintf(buf, PAGE_SIZE, "%7d %7d\n",
+ clkstats->on_time_old, clkstats->elapsed_old);
+ if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
+ clkstats->on_time_old = 0;
+ clkstats->elapsed_old = 0;
+ }
+ return ret;
+}
+
+static int kgsl_pwrctrl_gputop_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_clk_stats *clkstats;
+ int i = 0;
+ char *ptr = buf;
+
+ if (device == NULL)
+ return 0;
+ clkstats = &device->pwrctrl.clk_stats;
+ ret = snprintf(buf, PAGE_SIZE, "%7d %7d ", clkstats->on_time_old,
+ clkstats->elapsed_old);
+ for (i = 0, ptr += ret; i < device->pwrctrl.num_pwrlevels;
+ i++, ptr += ret)
+ ret = snprintf(ptr, PAGE_SIZE, "%7d ",
+ clkstats->old_clock_time[i]);
+
+ if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
+ clkstats->on_time_old = 0;
+ clkstats->elapsed_old = 0;
+ for (i = 0; i < KGSL_MAX_PWRLEVELS ; i++)
+ clkstats->old_clock_time[i] = 0;
+ }
+ return (unsigned int) (ptr - buf);
+}
+
+static int kgsl_pwrctrl_gpu_available_frequencies_show(
+ struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ int index, num_chars = 0;
+
+ if (device == NULL)
+ return 0;
+ pwr = &device->pwrctrl;
+ for (index = 0; index < pwr->num_pwrlevels - 1; index++)
+ num_chars += snprintf(buf + num_chars, PAGE_SIZE, "%d ",
+ pwr->pwrlevels[index].gpu_freq);
+ buf[num_chars++] = '\n';
+ return num_chars;
+}
+
+static int kgsl_pwrctrl_reset_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ if (device == NULL)
+ return 0;
+ return snprintf(buf, PAGE_SIZE, "%d\n", device->reset_counter);
+}
+
+static void __force_on(struct kgsl_device *device, int flag, int on)
+{
+ if (on) {
+ switch (flag) {
+ case KGSL_PWRFLAGS_CLK_ON:
+ kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON,
+ KGSL_STATE_ACTIVE);
+ break;
+ case KGSL_PWRFLAGS_AXI_ON:
+ kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
+ break;
+ case KGSL_PWRFLAGS_POWER_ON:
+ kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
+ break;
+ }
+ set_bit(flag, &device->pwrctrl.ctrl_flags);
+ } else {
+ clear_bit(flag, &device->pwrctrl.ctrl_flags);
+ }
+}
+
+static int __force_on_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, int flag)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ int i = test_bit(flag, &device->pwrctrl.ctrl_flags);
+ if (device == NULL)
+ return 0;
+ return snprintf(buf, PAGE_SIZE, "%d\n", i);
+}
+
+static int __force_on_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count,
+ int flag)
+{
+ char temp[20];
+ unsigned long val;
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ int rc;
+
+ if (device == NULL)
+ return 0;
+
+ snprintf(temp, sizeof(temp), "%.*s",
+ (int)min(count, sizeof(temp) - 1), buf);
+ rc = kstrtoul(temp, 0, &val);
+ if (rc)
+ return rc;
+
+ mutex_lock(&device->mutex);
+ __force_on(device, flag, val);
+ mutex_unlock(&device->mutex);
+
+ return count;
+}
+
+static int kgsl_pwrctrl_force_clk_on_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return __force_on_show(dev, attr, buf, KGSL_PWRFLAGS_CLK_ON);
+}
+
+static int kgsl_pwrctrl_force_clk_on_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_CLK_ON);
+}
+
+static int kgsl_pwrctrl_force_bus_on_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return __force_on_show(dev, attr, buf, KGSL_PWRFLAGS_AXI_ON);
+}
+
+static int kgsl_pwrctrl_force_bus_on_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_AXI_ON);
+}
+
+static int kgsl_pwrctrl_force_rail_on_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return __force_on_show(dev, attr, buf, KGSL_PWRFLAGS_POWER_ON);
+}
+
+static int kgsl_pwrctrl_force_rail_on_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return __force_on_store(dev, attr, buf, count, KGSL_PWRFLAGS_POWER_ON);
+}
+
+DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
+DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
+ kgsl_pwrctrl_max_gpuclk_store);
+DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
+ kgsl_pwrctrl_idle_timer_store);
+DEVICE_ATTR(gpubusy, 0444, kgsl_pwrctrl_gpubusy_show,
+ NULL);
+DEVICE_ATTR(gputop, 0444, kgsl_pwrctrl_gputop_show,
+ NULL);
+DEVICE_ATTR(gpu_available_frequencies, 0444,
+ kgsl_pwrctrl_gpu_available_frequencies_show,
+ NULL);
+DEVICE_ATTR(max_pwrlevel, 0644,
+ kgsl_pwrctrl_max_pwrlevel_show,
+ kgsl_pwrctrl_max_pwrlevel_store);
+DEVICE_ATTR(min_pwrlevel, 0644,
+ kgsl_pwrctrl_min_pwrlevel_show,
+ kgsl_pwrctrl_min_pwrlevel_store);
+DEVICE_ATTR(thermal_pwrlevel, 0644,
+ kgsl_pwrctrl_thermal_pwrlevel_show,
+ kgsl_pwrctrl_thermal_pwrlevel_store);
+DEVICE_ATTR(num_pwrlevels, 0444,
+ kgsl_pwrctrl_num_pwrlevels_show,
+ NULL);
+DEVICE_ATTR(reset_count, 0444,
+ kgsl_pwrctrl_reset_count_show,
+ NULL);
+DEVICE_ATTR(pmqos_latency, 0644,
+ kgsl_pwrctrl_pmqos_latency_show,
+ kgsl_pwrctrl_pmqos_latency_store);
+DEVICE_ATTR(force_clk_on, 0644,
+ kgsl_pwrctrl_force_clk_on_show,
+ kgsl_pwrctrl_force_clk_on_store);
+DEVICE_ATTR(force_bus_on, 0644,
+ kgsl_pwrctrl_force_bus_on_show,
+ kgsl_pwrctrl_force_bus_on_store);
+DEVICE_ATTR(force_rail_on, 0644,
+ kgsl_pwrctrl_force_rail_on_show,
+ kgsl_pwrctrl_force_rail_on_store);
+
+static const struct device_attribute *pwrctrl_attr_list[] = {
+ &dev_attr_gpuclk,
+ &dev_attr_max_gpuclk,
+ &dev_attr_idle_timer,
+ &dev_attr_gpubusy,
+ &dev_attr_gputop,
+ &dev_attr_gpu_available_frequencies,
+ &dev_attr_max_pwrlevel,
+ &dev_attr_min_pwrlevel,
+ &dev_attr_thermal_pwrlevel,
+ &dev_attr_num_pwrlevels,
+ &dev_attr_reset_count,
+ &dev_attr_pmqos_latency,
+ &dev_attr_force_clk_on,
+ &dev_attr_force_bus_on,
+ &dev_attr_force_rail_on,
+ NULL
+};
+
+int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
+{
+ return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
+}
+
+void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
+{
+ kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
+}
+
+static void update_statistics(struct kgsl_device *device)
+{
+ struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
+ unsigned int on_time = 0;
+ int i;
+ int num_pwrlevels = device->pwrctrl.num_pwrlevels - 1;
+ /*PER CLK TIME*/
+ for (i = 0; i < num_pwrlevels; i++) {
+ clkstats->old_clock_time[i] = clkstats->clock_time[i];
+ on_time += clkstats->clock_time[i];
+ clkstats->clock_time[i] = 0;
+ }
+ clkstats->old_clock_time[num_pwrlevels] =
+ clkstats->clock_time[num_pwrlevels];
+ clkstats->clock_time[num_pwrlevels] = 0;
+ clkstats->on_time_old = on_time;
+ clkstats->elapsed_old = clkstats->elapsed;
+ clkstats->elapsed = 0;
+
+ trace_kgsl_gpubusy(device, clkstats->on_time_old,
+ clkstats->elapsed_old);
+}
+
+/* Track the amount of time the gpu is on vs the total system time. *
+ * Regularly update the percentage of busy time displayed by sysfs. */
+static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
+{
+ struct kgsl_clk_stats *clkstats = &device->pwrctrl.clk_stats;
+ update_clk_statistics(device, on_time);
+ /* Update the output regularly and reset the counters. */
+ if ((clkstats->elapsed > UPDATE_BUSY_VAL) ||
+ !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
+ update_statistics(device);
+ }
+}
+
+static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
+ int requested_state)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ int i = 0;
+
+ if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->ctrl_flags))
+ return;
+
+ if (state == KGSL_PWRFLAGS_OFF) {
+ if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
+ &pwr->power_flags)) {
+ trace_kgsl_clk(device, state);
+ for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
+ if (pwr->grp_clks[i])
+ clk_disable(pwr->grp_clks[i]);
+ /* High latency clock maintenance. */
+ if ((pwr->pwrlevels[0].gpu_freq > 0) &&
+ (requested_state != KGSL_STATE_NAP)) {
+ for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
+ if (pwr->grp_clks[i])
+ clk_unprepare(pwr->grp_clks[i]);
+ clk_set_rate(pwr->grp_clks[0],
+ pwr->pwrlevels[pwr->num_pwrlevels - 1].
+ gpu_freq);
+ }
+ kgsl_pwrctrl_busy_time(device, true);
+ } else if (requested_state == KGSL_STATE_SLEEP) {
+ /* High latency clock maintenance. */
+ for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
+ if (pwr->grp_clks[i])
+ clk_unprepare(pwr->grp_clks[i]);
+ if ((pwr->pwrlevels[0].gpu_freq > 0))
+ clk_set_rate(pwr->grp_clks[0],
+ pwr->pwrlevels[pwr->num_pwrlevels - 1].
+ gpu_freq);
+ }
+ } else if (state == KGSL_PWRFLAGS_ON) {
+ if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
+ &pwr->power_flags)) {
+ trace_kgsl_clk(device, state);
+ /* High latency clock maintenance. */
+ if (device->state != KGSL_STATE_NAP) {
+ if (pwr->pwrlevels[0].gpu_freq > 0)
+ clk_set_rate(pwr->grp_clks[0],
+ pwr->pwrlevels
+ [pwr->active_pwrlevel].
+ gpu_freq);
+ for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
+ if (pwr->grp_clks[i])
+ clk_prepare(pwr->grp_clks[i]);
+ }
+ /* as last step, enable grp_clk
+ this is to let GPU interrupt to come */
+ for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
+ if (pwr->grp_clks[i])
+ clk_enable(pwr->grp_clks[i]);
+ kgsl_pwrctrl_busy_time(device, false);
+ }
+ }
+}
+
+static void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+ if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->ctrl_flags))
+ return;
+
+ if (state == KGSL_PWRFLAGS_OFF) {
+ if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
+ &pwr->power_flags)) {
+ trace_kgsl_bus(device, state);
+ if (pwr->ebi1_clk) {
+ clk_set_rate(pwr->ebi1_clk, 0);
+ clk_disable_unprepare(pwr->ebi1_clk);
+ }
+ if (pwr->pcl)
+ msm_bus_scale_client_update_request(pwr->pcl,
+ 0);
+ }
+ } else if (state == KGSL_PWRFLAGS_ON) {
+ if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
+ &pwr->power_flags)) {
+ trace_kgsl_bus(device, state);
+ if (pwr->ebi1_clk) {
+ clk_prepare_enable(pwr->ebi1_clk);
+ clk_set_rate(pwr->ebi1_clk,
+ pwr->pwrlevels[pwr->active_pwrlevel].
+ bus_freq);
+ }
+ if (pwr->pcl)
+ msm_bus_scale_client_update_request(pwr->pcl,
+ pwr->pwrlevels[pwr->active_pwrlevel].
+ bus_freq);
+ }
+ }
+}
+
+static void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+ if (test_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->ctrl_flags))
+ return;
+
+ if (state == KGSL_PWRFLAGS_OFF) {
+ if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
+ &pwr->power_flags)) {
+ trace_kgsl_rail(device, state);
+ if (pwr->gpu_cx)
+ regulator_disable(pwr->gpu_cx);
+ if (pwr->gpu_reg)
+ regulator_disable(pwr->gpu_reg);
+ }
+ } else if (state == KGSL_PWRFLAGS_ON) {
+ if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
+ &pwr->power_flags)) {
+ trace_kgsl_rail(device, state);
+ if (pwr->gpu_reg) {
+ int status = regulator_enable(pwr->gpu_reg);
+ if (status)
+ KGSL_DRV_ERR(device,
+ "core regulator_enable "
+ "failed: %d\n",
+ status);
+ }
+ if (pwr->gpu_cx) {
+ int status = regulator_enable(pwr->gpu_cx);
+ if (status)
+ KGSL_DRV_ERR(device,
+ "cx regulator_enable "
+ "failed: %d\n",
+ status);
+ }
+ }
+ }
+}
+
+void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+ if (state == KGSL_PWRFLAGS_ON) {
+ if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
+ &pwr->power_flags)) {
+ trace_kgsl_irq(device, state);
+ enable_irq(pwr->interrupt_num);
+ }
+ } else if (state == KGSL_PWRFLAGS_OFF) {
+ if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
+ &pwr->power_flags)) {
+ trace_kgsl_irq(device, state);
+ if (in_interrupt())
+ disable_irq_nosync(pwr->interrupt_num);
+ else
+ disable_irq(pwr->interrupt_num);
+ }
+ }
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_irq);
+
+int kgsl_pwrctrl_init(struct kgsl_device *device)
+{
+ int i, result = 0;
+ struct clk *clk;
+ struct platform_device *pdev =
+ container_of(device->parentdev, struct platform_device, dev);
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
+
+ /*acquire clocks */
+ for (i = 0; i < KGSL_MAX_CLKS; i++) {
+ if (pdata->clk_map & clks[i].map) {
+ clk = clk_get(&pdev->dev, clks[i].name);
+ if (IS_ERR(clk))
+ goto clk_err;
+ pwr->grp_clks[i] = clk;
+ }
+ }
+ /* Make sure we have a source clk for freq setting */
+ if (pwr->grp_clks[0] == NULL)
+ pwr->grp_clks[0] = pwr->grp_clks[1];
+
+ /* put the AXI bus into asynchronous mode with the graphics cores */
+ if (pdata->set_grp_async != NULL)
+ pdata->set_grp_async();
+
+ if (pdata->num_levels > KGSL_MAX_PWRLEVELS ||
+ pdata->num_levels < 1) {
+ KGSL_PWR_ERR(device, "invalid power level count: %d\n",
+ pdata->num_levels);
+ result = -EINVAL;
+ goto done;
+ }
+ pwr->num_pwrlevels = pdata->num_levels;
+
+ /* Initialize the user and thermal clock constraints */
+
+ pwr->max_pwrlevel = 0;
+ pwr->min_pwrlevel = pdata->num_levels - 2;
+ pwr->thermal_pwrlevel = 0;
+
+ pwr->active_pwrlevel = pdata->init_level;
+ pwr->default_pwrlevel = pdata->init_level;
+ pwr->init_pwrlevel = pdata->init_level;
+ for (i = 0; i < pdata->num_levels; i++) {
+ pwr->pwrlevels[i].gpu_freq =
+ (pdata->pwrlevel[i].gpu_freq > 0) ?
+ clk_round_rate(pwr->grp_clks[0],
+ pdata->pwrlevel[i].
+ gpu_freq) : 0;
+ pwr->pwrlevels[i].bus_freq =
+ pdata->pwrlevel[i].bus_freq;
+ pwr->pwrlevels[i].io_fraction =
+ pdata->pwrlevel[i].io_fraction;
+ }
+ /* Do not set_rate for targets in sync with AXI */
+ if (pwr->pwrlevels[0].gpu_freq > 0)
+ clk_set_rate(pwr->grp_clks[0], pwr->
+ pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
+
+ pwr->gpu_reg = regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(pwr->gpu_reg))
+ pwr->gpu_reg = NULL;
+
+ if (pwr->gpu_reg) {
+ pwr->gpu_cx = regulator_get(&pdev->dev, "vddcx");
+ if (IS_ERR(pwr->gpu_cx))
+ pwr->gpu_cx = NULL;
+ } else
+ pwr->gpu_cx = NULL;
+
+ pwr->power_flags = 0;
+
+ pwr->idle_needed = pdata->idle_needed;
+ pwr->interval_timeout = pdata->idle_timeout;
+ pwr->strtstp_sleepwake = pdata->strtstp_sleepwake;
+ pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
+ if (IS_ERR(pwr->ebi1_clk))
+ pwr->ebi1_clk = NULL;
+ else
+ clk_set_rate(pwr->ebi1_clk,
+ pwr->pwrlevels[pwr->active_pwrlevel].
+ bus_freq);
+ if (pdata->bus_scale_table != NULL) {
+ pwr->pcl = msm_bus_scale_register_client(pdata->
+ bus_scale_table);
+ if (!pwr->pcl) {
+ KGSL_PWR_ERR(device,
+ "msm_bus_scale_register_client failed: "
+ "id %d table %p", device->id,
+ pdata->bus_scale_table);
+ result = -EINVAL;
+ goto done;
+ }
+ }
+
+ /* Set the power level step multiplier with 1 as the default */
+ pwr->step_mul = pdata->step_mul ? pdata->step_mul : 1;
+
+ /* Set the CPU latency to 501usec to allow low latency PC modes */
+ pwr->pm_qos_latency = 501;
+
+ pm_runtime_enable(device->parentdev);
+ return result;
+
+clk_err:
+ result = PTR_ERR(clk);
+ KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
+ clks[i].name, result);
+
+done:
+ return result;
+}
+
+void kgsl_pwrctrl_close(struct kgsl_device *device)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ int i;
+
+ KGSL_PWR_INFO(device, "close device %d\n", device->id);
+
+ pm_runtime_disable(device->parentdev);
+
+ clk_put(pwr->ebi1_clk);
+
+ if (pwr->pcl)
+ msm_bus_scale_unregister_client(pwr->pcl);
+
+ pwr->pcl = 0;
+
+ if (pwr->gpu_reg) {
+ regulator_put(pwr->gpu_reg);
+ pwr->gpu_reg = NULL;
+ }
+
+ if (pwr->gpu_cx) {
+ regulator_put(pwr->gpu_cx);
+ pwr->gpu_cx = NULL;
+ }
+
+ for (i = 1; i < KGSL_MAX_CLKS; i++)
+ if (pwr->grp_clks[i]) {
+ clk_put(pwr->grp_clks[i]);
+ pwr->grp_clks[i] = NULL;
+ }
+
+ pwr->grp_clks[0] = NULL;
+ pwr->power_flags = 0;
+}
+
+/**
+ * kgsl_idle_check() - Work function for GPU interrupts and idle timeouts.
+ * @device: The device
+ *
+ * This function is called for work that is queued by the interrupt
+ * handler or the idle timer. It attempts to transition to a clocks
+ * off state if the active_cnt is 0 and the hardware is idle.
+ */
+void kgsl_idle_check(struct work_struct *work)
+{
+ int delay = INIT_UDELAY;
+ int requested_state;
+ struct kgsl_device *device = container_of(work, struct kgsl_device,
+ idle_check_ws);
+ WARN_ON(device == NULL);
+ if (device == NULL)
+ return;
+
+ mutex_lock(&device->mutex);
+
+ kgsl_pwrscale_idle(device);
+
+ if (device->state == KGSL_STATE_ACTIVE
+ || device->state == KGSL_STATE_NAP) {
+ /*
+ * If no user is explicitly trying to use the GPU
+ * (active_cnt is zero), then loop with increasing delay,
+ * waiting for the GPU to become idle.
+ */
+ while (!atomic_read(&device->active_cnt) &&
+ (delay < MAX_UDELAY)) {
+ requested_state = device->requested_state;
+ if (!kgsl_pwrctrl_sleep(device))
+ break;
+ /*
+ * If no new commands have been issued since the
+ * last interrupt, stay in this loop waiting for
+ * the GPU to become idle.
+ */
+ if (!device->pwrctrl.irq_last)
+ break;
+ kgsl_pwrctrl_request_state(device, requested_state);
+ mutex_unlock(&device->mutex);
+ udelay(delay);
+ delay *= 2;
+ mutex_lock(&device->mutex);
+ }
+
+
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ if (device->state == KGSL_STATE_ACTIVE) {
+ mod_timer(&device->idle_timer,
+ jiffies +
+ device->pwrctrl.interval_timeout);
+ /*
+ * If the GPU has been too busy to sleep, make sure
+ * that is acurately reflected in the % busy numbers.
+ */
+ device->pwrctrl.clk_stats.no_nap_cnt++;
+ if (device->pwrctrl.clk_stats.no_nap_cnt >
+ UPDATE_BUSY) {
+ kgsl_pwrctrl_busy_time(device, true);
+ device->pwrctrl.clk_stats.no_nap_cnt = 0;
+ }
+ } else {
+ device->pwrctrl.irq_last = 0;
+ }
+ }
+
+ mutex_unlock(&device->mutex);
+}
+EXPORT_SYMBOL(kgsl_idle_check);
+
+void kgsl_timer(unsigned long data)
+{
+ struct kgsl_device *device = (struct kgsl_device *) data;
+
+ KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
+ if (device->requested_state != KGSL_STATE_SUSPEND) {
+ if (device->pwrctrl.strtstp_sleepwake)
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
+ else
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
+ /* Have work run in a non-interrupt context. */
+ queue_work(device->work_queue, &device->idle_check_ws);
+ }
+}
+
+bool kgsl_pwrctrl_isenabled(struct kgsl_device *device)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ return (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags) != 0);
+}
+
+/**
+ * kgsl_pre_hwaccess - Enforce preconditions for touching registers
+ * @device: The device
+ *
+ * This function ensures that the correct lock is held and that the GPU
+ * clock is on immediately before a register is read or written. Note
+ * that this function does not check active_cnt because the registers
+ * must be accessed during device start and stop, when the active_cnt
+ * may legitimately be 0.
+ */
+void kgsl_pre_hwaccess(struct kgsl_device *device)
+{
+ /* In order to touch a register you must hold the device mutex...*/
+ BUG_ON(!mutex_is_locked(&device->mutex));
+ /* and have the clock on! */
+ BUG_ON(!kgsl_pwrctrl_isenabled(device));
+}
+EXPORT_SYMBOL(kgsl_pre_hwaccess);
+
+static int
+_nap(struct kgsl_device *device)
+{
+ switch (device->state) {
+ case KGSL_STATE_ACTIVE:
+ if (!device->ftbl->isidle(device)) {
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ return -EBUSY;
+ }
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+ kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
+ case KGSL_STATE_NAP:
+ case KGSL_STATE_SLEEP:
+ case KGSL_STATE_SLUMBER:
+ break;
+ default:
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ break;
+ }
+ return 0;
+}
+
+static void
+_sleep_accounting(struct kgsl_device *device)
+{
+ kgsl_pwrctrl_busy_time(device, false);
+ device->pwrctrl.clk_stats.start = ktime_set(0, 0);
+ device->pwrctrl.time = 0;
+ kgsl_pwrscale_sleep(device);
+}
+
+static int
+_sleep(struct kgsl_device *device)
+{
+ switch (device->state) {
+ case KGSL_STATE_ACTIVE:
+ if (!device->ftbl->isidle(device)) {
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ return -EBUSY;
+ }
+ /* fall through */
+ case KGSL_STATE_NAP:
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+ kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
+ _sleep_accounting(device);
+ kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
+ pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
+ PM_QOS_DEFAULT_VALUE);
+ break;
+ case KGSL_STATE_SLEEP:
+ case KGSL_STATE_SLUMBER:
+ break;
+ default:
+ KGSL_PWR_WARN(device, "unhandled state %s\n",
+ kgsl_pwrstate_to_str(device->state));
+ break;
+ }
+
+ kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
+
+ return 0;
+}
+
+static int
+_slumber(struct kgsl_device *device)
+{
+ switch (device->state) {
+ case KGSL_STATE_ACTIVE:
+ if (!device->ftbl->isidle(device)) {
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ return -EBUSY;
+ }
+ /* fall through */
+ case KGSL_STATE_NAP:
+ case KGSL_STATE_SLEEP:
+ del_timer_sync(&device->idle_timer);
+ /* make sure power is on to stop the device*/
+ kgsl_pwrctrl_enable(device);
+ device->ftbl->suspend_context(device);
+ device->ftbl->stop(device);
+ _sleep_accounting(device);
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
+ pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
+ PM_QOS_DEFAULT_VALUE);
+ break;
+ case KGSL_STATE_SLUMBER:
+ break;
+ default:
+ KGSL_PWR_WARN(device, "unhandled state %s\n",
+ kgsl_pwrstate_to_str(device->state));
+ break;
+ }
+ return 0;
+}
+
+/******************************************************************/
+/* Caller must hold the device mutex. */
+int kgsl_pwrctrl_sleep(struct kgsl_device *device)
+{
+ int status = 0;
+ KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
+
+ /* Work through the legal state transitions */
+ switch (device->requested_state) {
+ case KGSL_STATE_NAP:
+ status = _nap(device);
+ break;
+ case KGSL_STATE_SLEEP:
+ status = _sleep(device);
+ kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
+ break;
+ case KGSL_STATE_SLUMBER:
+ status = _slumber(device);
+ break;
+ default:
+ KGSL_PWR_INFO(device, "bad state request 0x%x\n",
+ device->requested_state);
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ status = -EINVAL;
+ break;
+ }
+ return status;
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
+
+/******************************************************************/
+/* Caller must hold the device mutex. */
+int kgsl_pwrctrl_wake(struct kgsl_device *device)
+{
+ int status = 0;
+ unsigned int context_id;
+ unsigned int state = device->state;
+ unsigned int ts_processed = 0xdeaddead;
+ struct kgsl_context *context;
+
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
+ switch (device->state) {
+ case KGSL_STATE_SLUMBER:
+ status = device->ftbl->start(device);
+ if (status) {
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ KGSL_DRV_ERR(device, "start failed %d\n", status);
+ break;
+ }
+ /* fall through */
+ case KGSL_STATE_SLEEP:
+ kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
+ kgsl_pwrscale_wake(device);
+ kgsl_sharedmem_readl(&device->memstore,
+ (unsigned int *) &context_id,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ current_context));
+ context = kgsl_context_get(device, context_id);
+ if (context)
+ ts_processed = kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED);
+ KGSL_PWR_INFO(device, "Wake from %s state. CTXT: %d RTRD TS: %08X\n",
+ kgsl_pwrstate_to_str(state),
+ context ? context->id : -1, ts_processed);
+ kgsl_context_put(context);
+ /* fall through */
+ case KGSL_STATE_NAP:
+ /* Turn on the core clocks */
+ kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
+ /* Enable state before turning on irq */
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
+ pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma,
+ device->pwrctrl.pm_qos_latency);
+ case KGSL_STATE_ACTIVE:
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ break;
+ default:
+ KGSL_PWR_WARN(device, "unhandled state %s\n",
+ kgsl_pwrstate_to_str(device->state));
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ status = -EINVAL;
+ break;
+ }
+ return status;
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_wake);
+
+void kgsl_pwrctrl_enable(struct kgsl_device *device)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ /* Order pwrrail/clk sequence based upon platform */
+ kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
+ kgsl_pwrctrl_pwrlevel_change(device, pwr->default_pwrlevel);
+ kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
+ kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_enable);
+
+void kgsl_pwrctrl_disable(struct kgsl_device *device)
+{
+ /* Order pwrrail/clk sequence based upon platform */
+ kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
+ kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
+ kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_disable);
+
+void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state)
+{
+ trace_kgsl_pwr_set_state(device, state);
+ device->state = state;
+ device->requested_state = KGSL_STATE_NONE;
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_set_state);
+
+void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state)
+{
+ if (state != KGSL_STATE_NONE && state != device->requested_state)
+ trace_kgsl_pwr_request_state(device, state);
+ device->requested_state = state;
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_request_state);
+
+const char *kgsl_pwrstate_to_str(unsigned int state)
+{
+ switch (state) {
+ case KGSL_STATE_NONE:
+ return "NONE";
+ case KGSL_STATE_INIT:
+ return "INIT";
+ case KGSL_STATE_ACTIVE:
+ return "ACTIVE";
+ case KGSL_STATE_NAP:
+ return "NAP";
+ case KGSL_STATE_SLEEP:
+ return "SLEEP";
+ case KGSL_STATE_SUSPEND:
+ return "SUSPEND";
+ case KGSL_STATE_SLUMBER:
+ return "SLUMBER";
+ default:
+ break;
+ }
+ return "UNKNOWN";
+}
+EXPORT_SYMBOL(kgsl_pwrstate_to_str);
+
+
+/**
+ * kgsl_active_count_get() - Increase the device active count
+ * @device: Pointer to a KGSL device
+ *
+ * Increase the active count for the KGSL device and turn on
+ * clocks if this is the first reference. Code paths that need
+ * to touch the hardware or wait for the hardware to complete
+ * an operation must hold an active count reference until they
+ * are finished. An error code will be returned if waking the
+ * device fails. The device mutex must be held while *calling
+ * this function.
+ */
+int kgsl_active_count_get(struct kgsl_device *device)
+{
+ int ret = 0;
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ if (atomic_read(&device->active_cnt) == 0) {
+ if (device->requested_state == KGSL_STATE_SUSPEND ||
+ device->state == KGSL_STATE_SUSPEND) {
+ mutex_unlock(&device->mutex);
+ wait_for_completion(&device->hwaccess_gate);
+ mutex_lock(&device->mutex);
+ }
+
+ /* Stop the idle timer */
+ del_timer_sync(&device->idle_timer);
+
+ ret = kgsl_pwrctrl_wake(device);
+ }
+ if (ret == 0)
+ atomic_inc(&device->active_cnt);
+ trace_kgsl_active_count(device,
+ (unsigned long) __builtin_return_address(0));
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_active_count_get);
+
+/**
+ * kgsl_active_count_get_light() - Increase the device active count
+ * @device: Pointer to a KGSL device
+ *
+ * Increase the active count for the KGSL device WITHOUT
+ * turning on the clocks based on the assumption that the clocks are already
+ * on from a previous active_count_get(). Currently this is only used for
+ * creating kgsl_events.
+ */
+int kgsl_active_count_get_light(struct kgsl_device *device)
+{
+ if (atomic_inc_not_zero(&device->active_cnt) == 0) {
+ dev_WARN_ONCE(device->dev, 1, "active count is 0!\n");
+ return -EINVAL;
+ }
+
+ trace_kgsl_active_count(device,
+ (unsigned long) __builtin_return_address(0));
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_active_count_get_light);
+
+/**
+ * kgsl_active_count_put() - Decrease the device active count
+ * @device: Pointer to a KGSL device
+ *
+ * Decrease the active count for the KGSL device and turn off
+ * clocks if there are no remaining references. This function will
+ * transition the device to NAP if there are no other pending state
+ * changes. It also completes the suspend gate. The device mutex must
+ * be held while calling this function.
+ */
+void kgsl_active_count_put(struct kgsl_device *device)
+{
+ BUG_ON(!mutex_is_locked(&device->mutex));
+ BUG_ON(atomic_read(&device->active_cnt) == 0);
+
+ kgsl_pwrscale_idle(device);
+
+ if (atomic_dec_and_test(&device->active_cnt)) {
+ if (device->state == KGSL_STATE_ACTIVE &&
+ device->requested_state == KGSL_STATE_NONE) {
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
+ if (kgsl_pwrctrl_sleep(device)) {
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
+ queue_work(device->work_queue, &device->idle_check_ws);
+ }
+ }
+
+ mod_timer(&device->idle_timer,
+ jiffies + device->pwrctrl.interval_timeout);
+ }
+
+ trace_kgsl_active_count(device,
+ (unsigned long) __builtin_return_address(0));
+
+ wake_up(&device->active_cnt_wq);
+}
+EXPORT_SYMBOL(kgsl_active_count_put);
+
+static int _check_active_count(struct kgsl_device *device, int count)
+{
+ /* Return 0 if the active count is greater than the desired value */
+ return atomic_read(&device->active_cnt) > count ? 0 : 1;
+}
+
+/**
+ * kgsl_active_count_wait() - Wait for activity to finish.
+ * @device: Pointer to a KGSL device
+ * @count: Active count value to wait for
+ *
+ * Block until the active_cnt value hits the desired value
+ */
+int kgsl_active_count_wait(struct kgsl_device *device, int count)
+{
+ int result = 0;
+
+ BUG_ON(!mutex_is_locked(&device->mutex));
+
+ if (atomic_read(&device->active_cnt) > count) {
+ int ret;
+ mutex_unlock(&device->mutex);
+ ret = wait_event_timeout(device->active_cnt_wq,
+ _check_active_count(device, count), HZ);
+ mutex_lock(&device->mutex);
+ result = ret == 0 ? -ETIMEDOUT : 0;
+ }
+
+ return result;
+}
+EXPORT_SYMBOL(kgsl_active_count_wait);
diff --git a/drivers/gpu/msm2/kgsl_pwrctrl.h b/drivers/gpu/msm2/kgsl_pwrctrl.h
new file mode 100644
index 0000000..71a0fdd
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_pwrctrl.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_PWRCTRL_H
+#define __KGSL_PWRCTRL_H
+
+/*****************************************************************************
+** power flags
+*****************************************************************************/
+#define KGSL_PWRFLAGS_ON 1
+#define KGSL_PWRFLAGS_OFF 0
+
+#define KGSL_PWRLEVEL_TURBO 0
+#define KGSL_PWRLEVEL_NOMINAL 1
+#define KGSL_PWRLEVEL_LAST_OFFSET 2
+
+#define KGSL_PWR_ON 0xFFFF
+
+#define KGSL_MAX_CLKS 6
+
+struct platform_device;
+
+struct kgsl_clk_stats {
+ unsigned int old_clock_time[KGSL_MAX_PWRLEVELS];
+ unsigned int clock_time[KGSL_MAX_PWRLEVELS];
+ unsigned int on_time_old;
+ ktime_t start;
+ ktime_t stop;
+ unsigned int no_nap_cnt;
+ unsigned int elapsed;
+ unsigned int elapsed_old;
+};
+
+/**
+ * struct kgsl_pwrctrl - Power control settings for a KGSL device
+ * @interrupt_num - The interrupt number for the device
+ * @ebi1_clk - Pointer to the EBI clock structure
+ * @grp_clks - Array of clocks structures that we control
+ * @power_flags - Control flags for power
+ * @pwrlevels - List of supported power levels
+ * @active_pwrlevel - The currently active power level
+ * @thermal_pwrlevel - maximum powerlevel constraint from thermal
+ * @default_pwrlevel - device wake up power level
+ * @init_pwrlevel - device inital power level
+ * @max_pwrlevel - maximum allowable powerlevel per the user
+ * @min_pwrlevel - minimum allowable powerlevel per the user
+ * @num_pwrlevels - number of available power levels
+ * @interval_timeout - timeout in jiffies to be idle before a power event
+ * @strtstp_sleepwake - true if the device supports low latency GPU start/stop
+ * @gpu_reg - pointer to the regulator structure for gpu_reg
+ * @gpu_cx - pointer to the regulator structure for gpu_cx
+ * @pcl - bus scale identifier
+ * @idle_needed - true if the device needs a idle before clock change
+ * @irq_name - resource name for the IRQ
+ * @clk_stats - structure of clock statistics
+ * @pm_qos_req_dma - the power management quality of service structure
+ * @pm_qos_latency - allowed CPU latency in microseconds
+ * @step_mul - multiplier for moving between power levels
+ */
+
+struct kgsl_pwrctrl {
+ int interrupt_num;
+ struct clk *ebi1_clk;
+ struct clk *grp_clks[KGSL_MAX_CLKS];
+ unsigned long power_flags;
+ unsigned long ctrl_flags;
+ struct kgsl_pwrlevel pwrlevels[KGSL_MAX_PWRLEVELS];
+ unsigned int active_pwrlevel;
+ int thermal_pwrlevel;
+ unsigned int default_pwrlevel;
+ unsigned int init_pwrlevel;
+ unsigned int max_pwrlevel;
+ unsigned int min_pwrlevel;
+ unsigned int num_pwrlevels;
+ unsigned int interval_timeout;
+ bool strtstp_sleepwake;
+ struct regulator *gpu_reg;
+ struct regulator *gpu_cx;
+ uint32_t pcl;
+ unsigned int idle_needed;
+ const char *irq_name;
+ s64 time;
+ struct kgsl_clk_stats clk_stats;
+ struct pm_qos_request pm_qos_req_dma;
+ unsigned int pm_qos_latency;
+ unsigned int step_mul;
+ unsigned int irq_last;
+};
+
+void kgsl_pwrctrl_irq(struct kgsl_device *device, int state);
+int kgsl_pwrctrl_init(struct kgsl_device *device);
+void kgsl_pwrctrl_close(struct kgsl_device *device);
+void kgsl_timer(unsigned long data);
+void kgsl_idle_check(struct work_struct *work);
+void kgsl_pre_hwaccess(struct kgsl_device *device);
+int kgsl_pwrctrl_sleep(struct kgsl_device *device);
+int kgsl_pwrctrl_wake(struct kgsl_device *device);
+void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
+ unsigned int level);
+int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device);
+void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device);
+void kgsl_pwrctrl_enable(struct kgsl_device *device);
+void kgsl_pwrctrl_disable(struct kgsl_device *device);
+bool kgsl_pwrctrl_isenabled(struct kgsl_device *device);
+
+static inline unsigned long kgsl_get_clkrate(struct clk *clk)
+{
+ return (clk != NULL) ? clk_get_rate(clk) : 0;
+}
+
+void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state);
+void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state);
+
+int kgsl_active_count_get(struct kgsl_device *device);
+int kgsl_active_count_get_light(struct kgsl_device *device);
+void kgsl_active_count_put(struct kgsl_device *device);
+int kgsl_active_count_wait(struct kgsl_device *device, int count);
+
+#endif /* __KGSL_PWRCTRL_H */
diff --git a/drivers/gpu/msm2/kgsl_pwrscale.c b/drivers/gpu/msm2/kgsl_pwrscale.c
new file mode 100644
index 0000000..47554c4
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_pwrscale.c
@@ -0,0 +1,377 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+
+#include <asm/page.h>
+
+#include "kgsl.h"
+#include "kgsl_pwrscale.h"
+#include "kgsl_device.h"
+
+struct kgsl_pwrscale_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kgsl_device *device, char *buf);
+ ssize_t (*store)(struct kgsl_device *device, const char *buf,
+ size_t count);
+};
+
+#define to_pwrscale(k) container_of(k, struct kgsl_pwrscale, kobj)
+#define pwrscale_to_device(p) container_of(p, struct kgsl_device, pwrscale)
+#define to_device(k) container_of(k, struct kgsl_device, pwrscale_kobj)
+#define to_pwrscale_attr(a) \
+container_of(a, struct kgsl_pwrscale_attribute, attr)
+#define to_policy_attr(a) \
+container_of(a, struct kgsl_pwrscale_policy_attribute, attr)
+
+#define PWRSCALE_ATTR(_name, _mode, _show, _store) \
+struct kgsl_pwrscale_attribute pwrscale_attr_##_name = \
+__ATTR(_name, _mode, _show, _store)
+
+/* Master list of available policies */
+
+static struct kgsl_pwrscale_policy *kgsl_pwrscale_policies[] = {
+#ifdef CONFIG_MSM_SCM
+ &kgsl_pwrscale_policy_tz,
+#endif
+#ifdef CONFIG_MSM_SLEEP_STATS_DEVICE
+ &kgsl_pwrscale_policy_idlestats,
+#endif
+ NULL
+};
+
+static ssize_t pwrscale_policy_store(struct kgsl_device *device,
+ const char *buf, size_t count)
+{
+ int i;
+ struct kgsl_pwrscale_policy *policy = NULL;
+
+ /* The special keyword none allows the user to detach all
+ policies */
+ if (!strncmp("none", buf, 4)) {
+ kgsl_pwrscale_detach_policy(device);
+ return count;
+ }
+
+ for (i = 0; kgsl_pwrscale_policies[i]; i++) {
+ if (!strncmp(kgsl_pwrscale_policies[i]->name, buf,
+ strnlen(kgsl_pwrscale_policies[i]->name,
+ PAGE_SIZE))) {
+ policy = kgsl_pwrscale_policies[i];
+ break;
+ }
+ }
+
+ if (policy)
+ if (kgsl_pwrscale_attach_policy(device, policy))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t pwrscale_policy_show(struct kgsl_device *device, char *buf)
+{
+ int ret;
+
+ if (device->pwrscale.policy) {
+ ret = snprintf(buf, PAGE_SIZE, "%s",
+ device->pwrscale.policy->name);
+ if (device->pwrscale.enabled == 0)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ " (disabled)");
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ } else
+ ret = snprintf(buf, PAGE_SIZE, "none\n");
+
+ return ret;
+}
+
+PWRSCALE_ATTR(policy, 0664, pwrscale_policy_show, pwrscale_policy_store);
+
+static ssize_t pwrscale_avail_policies_show(struct kgsl_device *device,
+ char *buf)
+{
+ int i, ret = 0;
+
+ for (i = 0; kgsl_pwrscale_policies[i]; i++) {
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s ",
+ kgsl_pwrscale_policies[i]->name);
+ }
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "none\n");
+ return ret;
+}
+PWRSCALE_ATTR(avail_policies, 0444, pwrscale_avail_policies_show, NULL);
+
+static struct attribute *pwrscale_attrs[] = {
+ &pwrscale_attr_policy.attr,
+ &pwrscale_attr_avail_policies.attr,
+ NULL
+};
+
+static ssize_t policy_sysfs_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct kgsl_pwrscale *pwrscale = to_pwrscale(kobj);
+ struct kgsl_device *device = pwrscale_to_device(pwrscale);
+ struct kgsl_pwrscale_policy_attribute *pattr = to_policy_attr(attr);
+ ssize_t ret;
+
+ if (pattr->show)
+ ret = pattr->show(device, pwrscale, buf);
+ else
+ ret = -EIO;
+
+ return ret;
+}
+
+static ssize_t policy_sysfs_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kgsl_pwrscale *pwrscale = to_pwrscale(kobj);
+ struct kgsl_device *device = pwrscale_to_device(pwrscale);
+ struct kgsl_pwrscale_policy_attribute *pattr = to_policy_attr(attr);
+ ssize_t ret;
+
+ if (pattr->store)
+ ret = pattr->store(device, pwrscale, buf, count);
+ else
+ ret = -EIO;
+
+ return ret;
+}
+
+static void policy_sysfs_release(struct kobject *kobj)
+{
+}
+
+static ssize_t pwrscale_sysfs_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct kgsl_device *device = to_device(kobj);
+ struct kgsl_pwrscale_attribute *pattr = to_pwrscale_attr(attr);
+ ssize_t ret;
+
+ if (pattr->show)
+ ret = pattr->show(device, buf);
+ else
+ ret = -EIO;
+
+ return ret;
+}
+
+static ssize_t pwrscale_sysfs_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kgsl_device *device = to_device(kobj);
+ struct kgsl_pwrscale_attribute *pattr = to_pwrscale_attr(attr);
+ ssize_t ret;
+
+ if (pattr->store)
+ ret = pattr->store(device, buf, count);
+ else
+ ret = -EIO;
+
+ return ret;
+}
+
+static void pwrscale_sysfs_release(struct kobject *kobj)
+{
+}
+
+static const struct sysfs_ops policy_sysfs_ops = {
+ .show = policy_sysfs_show,
+ .store = policy_sysfs_store
+};
+
+static const struct sysfs_ops pwrscale_sysfs_ops = {
+ .show = pwrscale_sysfs_show,
+ .store = pwrscale_sysfs_store
+};
+
+static struct kobj_type ktype_pwrscale_policy = {
+ .sysfs_ops = &policy_sysfs_ops,
+ .default_attrs = NULL,
+ .release = policy_sysfs_release
+};
+
+static struct kobj_type ktype_pwrscale = {
+ .sysfs_ops = &pwrscale_sysfs_ops,
+ .default_attrs = pwrscale_attrs,
+ .release = pwrscale_sysfs_release
+};
+
+#define PWRSCALE_ACTIVE(_d) \
+ ((_d)->pwrscale.policy && (_d)->pwrscale.enabled)
+
+void kgsl_pwrscale_sleep(struct kgsl_device *device)
+{
+ if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->sleep)
+ device->pwrscale.policy->sleep(device, &device->pwrscale);
+}
+EXPORT_SYMBOL(kgsl_pwrscale_sleep);
+
+void kgsl_pwrscale_wake(struct kgsl_device *device)
+{
+ if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->wake)
+ device->pwrscale.policy->wake(device, &device->pwrscale);
+}
+EXPORT_SYMBOL(kgsl_pwrscale_wake);
+
+void kgsl_pwrscale_busy(struct kgsl_device *device)
+{
+ if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->busy)
+ device->pwrscale.policy->busy(device,
+ &device->pwrscale);
+}
+EXPORT_SYMBOL(kgsl_pwrscale_busy);
+
+void kgsl_pwrscale_idle(struct kgsl_device *device)
+{
+ if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->idle)
+ if (device->state == KGSL_STATE_ACTIVE)
+ device->pwrscale.policy->idle(device,
+ &device->pwrscale);
+}
+EXPORT_SYMBOL(kgsl_pwrscale_idle);
+
+void kgsl_pwrscale_disable(struct kgsl_device *device)
+{
+ device->pwrscale.enabled = 0;
+}
+EXPORT_SYMBOL(kgsl_pwrscale_disable);
+
+void kgsl_pwrscale_enable(struct kgsl_device *device)
+{
+ device->pwrscale.enabled = 1;
+}
+EXPORT_SYMBOL(kgsl_pwrscale_enable);
+
+int kgsl_pwrscale_policy_add_files(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ struct attribute_group *attr_group)
+{
+ int ret;
+
+ ret = kobject_add(&pwrscale->kobj, &device->pwrscale_kobj,
+ "%s", pwrscale->policy->name);
+
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_group(&pwrscale->kobj, attr_group);
+
+ if (ret) {
+ kobject_del(&pwrscale->kobj);
+ kobject_put(&pwrscale->kobj);
+ }
+
+ return ret;
+}
+
+void kgsl_pwrscale_policy_remove_files(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ struct attribute_group *attr_group)
+{
+ sysfs_remove_group(&pwrscale->kobj, attr_group);
+ kobject_del(&pwrscale->kobj);
+ kobject_put(&pwrscale->kobj);
+}
+
+static void _kgsl_pwrscale_detach_policy(struct kgsl_device *device)
+{
+ if (device->pwrscale.policy != NULL) {
+ device->pwrscale.policy->close(device, &device->pwrscale);
+
+ /*
+ * Try to set max pwrlevel which will be limited to thermal by
+ * kgsl_pwrctrl_pwrlevel_change if thermal is indeed lower
+ */
+
+ kgsl_pwrctrl_pwrlevel_change(device,
+ device->pwrctrl.max_pwrlevel);
+ device->pwrctrl.default_pwrlevel =
+ device->pwrctrl.max_pwrlevel;
+ }
+ device->pwrscale.policy = NULL;
+}
+
+void kgsl_pwrscale_detach_policy(struct kgsl_device *device)
+{
+ mutex_lock(&device->mutex);
+ _kgsl_pwrscale_detach_policy(device);
+ mutex_unlock(&device->mutex);
+}
+EXPORT_SYMBOL(kgsl_pwrscale_detach_policy);
+
+int kgsl_pwrscale_attach_policy(struct kgsl_device *device,
+ struct kgsl_pwrscale_policy *policy)
+{
+ int ret = 0;
+
+ mutex_lock(&device->mutex);
+
+ if (device->pwrscale.policy == policy)
+ goto done;
+
+ if (device->pwrctrl.num_pwrlevels < 3) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (device->pwrscale.policy != NULL)
+ _kgsl_pwrscale_detach_policy(device);
+
+ device->pwrscale.policy = policy;
+
+ device->pwrctrl.default_pwrlevel =
+ device->pwrctrl.init_pwrlevel;
+ /* Pwrscale is enabled by default at attach time */
+ kgsl_pwrscale_enable(device);
+
+ if (policy) {
+ ret = device->pwrscale.policy->init(device, &device->pwrscale);
+ if (ret)
+ device->pwrscale.policy = NULL;
+ }
+
+done:
+ mutex_unlock(&device->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_pwrscale_attach_policy);
+
+int kgsl_pwrscale_init(struct kgsl_device *device)
+{
+ int ret;
+
+ ret = kobject_init_and_add(&device->pwrscale_kobj, &ktype_pwrscale,
+ &device->dev->kobj, "pwrscale");
+
+ if (ret)
+ return ret;
+
+ kobject_init(&device->pwrscale.kobj, &ktype_pwrscale_policy);
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_pwrscale_init);
+
+void kgsl_pwrscale_close(struct kgsl_device *device)
+{
+ kobject_put(&device->pwrscale_kobj);
+}
+EXPORT_SYMBOL(kgsl_pwrscale_close);
diff --git a/drivers/gpu/msm2/kgsl_pwrscale.h b/drivers/gpu/msm2/kgsl_pwrscale.h
new file mode 100644
index 0000000..f17b394
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_pwrscale.h
@@ -0,0 +1,81 @@
+/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __KGSL_PWRSCALE_H
+#define __KGSL_PWRSCALE_H
+
+struct kgsl_pwrscale;
+
+struct kgsl_pwrscale_policy {
+ const char *name;
+ int (*init)(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale);
+ void (*close)(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale);
+ void (*idle)(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale);
+ void (*busy)(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale);
+ void (*sleep)(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale);
+ void (*wake)(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale);
+};
+
+struct kgsl_pwrscale {
+ struct kgsl_pwrscale_policy *policy;
+ struct kobject kobj;
+ void *priv;
+ int enabled;
+};
+
+struct kgsl_pwrscale_policy_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale, char *buf);
+ ssize_t (*store)(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale, const char *buf,
+ size_t count);
+};
+
+#define PWRSCALE_POLICY_ATTR(_name, _mode, _show, _store) \
+ struct kgsl_pwrscale_policy_attribute policy_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+extern struct kgsl_pwrscale_policy kgsl_pwrscale_policy_tz;
+extern struct kgsl_pwrscale_policy kgsl_pwrscale_policy_idlestats;
+extern struct kgsl_pwrscale_policy kgsl_pwrscale_policy_msm;
+
+int kgsl_pwrscale_init(struct kgsl_device *device);
+void kgsl_pwrscale_close(struct kgsl_device *device);
+
+int kgsl_pwrscale_attach_policy(struct kgsl_device *device,
+ struct kgsl_pwrscale_policy *policy);
+void kgsl_pwrscale_detach_policy(struct kgsl_device *device);
+
+void kgsl_pwrscale_idle(struct kgsl_device *device);
+void kgsl_pwrscale_busy(struct kgsl_device *device);
+void kgsl_pwrscale_sleep(struct kgsl_device *device);
+void kgsl_pwrscale_wake(struct kgsl_device *device);
+
+void kgsl_pwrscale_enable(struct kgsl_device *device);
+void kgsl_pwrscale_disable(struct kgsl_device *device);
+
+int kgsl_pwrscale_policy_add_files(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ struct attribute_group *attr_group);
+
+void kgsl_pwrscale_policy_remove_files(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ struct attribute_group *attr_group);
+#endif
diff --git a/drivers/gpu/msm2/kgsl_pwrscale_idlestats.c b/drivers/gpu/msm2/kgsl_pwrscale_idlestats.c
new file mode 100644
index 0000000..c3188a5
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_pwrscale_idlestats.c
@@ -0,0 +1,232 @@
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/idle_stats_device.h>
+#include <linux/cpufreq.h>
+#include <linux/notifier.h>
+#include <linux/cpumask.h>
+#include <linux/tick.h>
+
+#include "kgsl.h"
+#include "kgsl_pwrscale.h"
+#include "kgsl_device.h"
+
+#define MAX_CORES 4
+struct _cpu_info {
+ spinlock_t lock;
+ struct notifier_block cpu_nb;
+ u64 start[MAX_CORES];
+ u64 end[MAX_CORES];
+ int curr_freq[MAX_CORES];
+ int max_freq[MAX_CORES];
+};
+
+struct idlestats_priv {
+ char name[32];
+ struct msm_idle_stats_device idledev;
+ struct kgsl_device *device;
+ struct msm_idle_pulse pulse;
+ struct _cpu_info cpu_info;
+};
+
+static int idlestats_cpufreq_notifier(
+ struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct _cpu_info *cpu = container_of(nb,
+ struct _cpu_info, cpu_nb);
+ struct cpufreq_freqs *freq = data;
+
+ if (val != CPUFREQ_POSTCHANGE)
+ return 0;
+
+ spin_lock(&cpu->lock);
+ if (freq->cpu < num_possible_cpus())
+ cpu->curr_freq[freq->cpu] = freq->new / 1000;
+ spin_unlock(&cpu->lock);
+
+ return 0;
+}
+
+static void idlestats_get_sample(struct msm_idle_stats_device *idledev,
+ struct msm_idle_pulse *pulse)
+{
+ struct kgsl_power_stats stats;
+ struct idlestats_priv *priv = container_of(idledev,
+ struct idlestats_priv, idledev);
+ struct kgsl_device *device = priv->device;
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+ mutex_lock(&device->mutex);
+ /* If the GPU is asleep, don't wake it up - assume that we
+ are idle */
+
+ if (device->state == KGSL_STATE_ACTIVE) {
+ device->ftbl->power_stats(device, &stats);
+ pulse->busy_start_time = pwr->time - stats.busy_time;
+ pulse->busy_interval = stats.busy_time;
+ } else {
+ pulse->busy_start_time = pwr->time;
+ pulse->busy_interval = 0;
+ }
+ pulse->wait_interval = 0;
+ mutex_unlock(&device->mutex);
+}
+
+static void idlestats_busy(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale)
+{
+ struct idlestats_priv *priv = pwrscale->priv;
+ struct kgsl_power_stats stats;
+ int i, busy, nr_cpu = 1;
+
+ if (priv->pulse.busy_start_time != 0) {
+ priv->pulse.wait_interval = 0;
+ /* Calculate the total CPU busy time for this GPU pulse */
+ for (i = 0; i < num_possible_cpus(); i++) {
+ spin_lock(&priv->cpu_info.lock);
+ if (cpu_online(i)) {
+ priv->cpu_info.end[i] =
+ (u64)ktime_to_us(ktime_get()) -
+ get_cpu_idle_time_us(i, NULL);
+ busy = priv->cpu_info.end[i] -
+ priv->cpu_info.start[i];
+ /* Normalize the busy time by frequency */
+ busy = priv->cpu_info.curr_freq[i] *
+ (busy / priv->cpu_info.max_freq[i]);
+ priv->pulse.wait_interval += busy;
+ nr_cpu++;
+ }
+ spin_unlock(&priv->cpu_info.lock);
+ }
+ priv->pulse.wait_interval /= nr_cpu;
+
+ /* This is called from within a mutex protected function, so
+ no additional locking required */
+ device->ftbl->power_stats(device, &stats);
+
+ /* If total_time is zero, then we don't have
+ any interesting statistics to store */
+ if (stats.total_time == 0) {
+ priv->pulse.busy_start_time = 0;
+ return;
+ }
+
+ priv->pulse.busy_interval = stats.busy_time;
+ msm_idle_stats_idle_end(&priv->idledev, &priv->pulse);
+ }
+ priv->pulse.busy_start_time = ktime_to_us(ktime_get());
+}
+
+static void idlestats_idle(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale)
+{
+ int i, nr_cpu;
+ struct idlestats_priv *priv = pwrscale->priv;
+
+ nr_cpu = num_possible_cpus();
+ for (i = 0; i < nr_cpu; i++)
+ if (cpu_online(i))
+ priv->cpu_info.start[i] =
+ (u64)ktime_to_us(ktime_get()) -
+ get_cpu_idle_time_us(i, NULL);
+
+ msm_idle_stats_idle_start(&priv->idledev);
+}
+
+static void idlestats_sleep(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale)
+{
+ struct idlestats_priv *priv = pwrscale->priv;
+ msm_idle_stats_update_event(&priv->idledev,
+ MSM_IDLE_STATS_EVENT_IDLE_TIMER_EXPIRED);
+}
+
+static void idlestats_wake(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale)
+{
+ /* Use highest perf level on wake-up from
+ sleep for better performance */
+ kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO);
+}
+
+static int idlestats_init(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale)
+{
+ struct idlestats_priv *priv;
+ struct cpufreq_policy cpu_policy;
+ int ret, i;
+
+ priv = pwrscale->priv = kzalloc(sizeof(struct idlestats_priv),
+ GFP_KERNEL);
+ if (pwrscale->priv == NULL)
+ return -ENOMEM;
+
+ snprintf(priv->name, sizeof(priv->name), "idle_stats_%s",
+ device->name);
+
+ priv->device = device;
+
+ priv->idledev.name = (const char *) priv->name;
+ priv->idledev.get_sample = idlestats_get_sample;
+
+ spin_lock_init(&priv->cpu_info.lock);
+ priv->cpu_info.cpu_nb.notifier_call =
+ idlestats_cpufreq_notifier;
+ ret = cpufreq_register_notifier(&priv->cpu_info.cpu_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (ret)
+ goto err;
+ for (i = 0; i < num_possible_cpus(); i++) {
+ cpufreq_frequency_table_cpuinfo(&cpu_policy,
+ cpufreq_frequency_get_table(i));
+ priv->cpu_info.max_freq[i] = cpu_policy.max / 1000;
+ priv->cpu_info.curr_freq[i] = cpu_policy.max / 1000;
+ }
+ ret = msm_idle_stats_register_device(&priv->idledev);
+err:
+ if (ret) {
+ kfree(pwrscale->priv);
+ pwrscale->priv = NULL;
+ }
+
+ return ret;
+}
+
+static void idlestats_close(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale)
+{
+ struct idlestats_priv *priv = pwrscale->priv;
+
+ if (pwrscale->priv == NULL)
+ return;
+
+ cpufreq_unregister_notifier(&priv->cpu_info.cpu_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ msm_idle_stats_deregister_device(&priv->idledev);
+
+ kfree(pwrscale->priv);
+ pwrscale->priv = NULL;
+}
+
+struct kgsl_pwrscale_policy kgsl_pwrscale_policy_idlestats = {
+ .name = "idlestats",
+ .init = idlestats_init,
+ .idle = idlestats_idle,
+ .busy = idlestats_busy,
+ .sleep = idlestats_sleep,
+ .wake = idlestats_wake,
+ .close = idlestats_close
+};
diff --git a/drivers/gpu/msm2/kgsl_pwrscale_trustzone.c b/drivers/gpu/msm2/kgsl_pwrscale_trustzone.c
new file mode 100644
index 0000000..40649d2
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_pwrscale_trustzone.c
@@ -0,0 +1,265 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <mach/socinfo.h>
+#include <mach/scm.h>
+
+#include "kgsl.h"
+#include "kgsl_pwrscale.h"
+#include "kgsl_device.h"
+
+#define TZ_GOVERNOR_PERFORMANCE 0
+#define TZ_GOVERNOR_ONDEMAND 1
+
+struct tz_priv {
+ int governor;
+ struct kgsl_power_stats bin;
+ unsigned int idle_dcvs;
+};
+spinlock_t tz_lock;
+
+/* FLOOR is 5msec to capture up to 3 re-draws
+ * per frame for 60fps content.
+ */
+#define FLOOR 5000
+/* CEILING is 50msec, larger than any standard
+ * frame length, but less than the idle timer.
+ */
+#define CEILING 50000
+#define TZ_RESET_ID 0x3
+#define TZ_UPDATE_ID 0x4
+#define TZ_INIT_ID 0x6
+
+/* Trap into the TrustZone, and call funcs there. */
+static int __secure_tz_entry2(u32 cmd, u32 val1, u32 val2)
+{
+ int ret;
+ spin_lock(&tz_lock);
+ /* sync memory before sending the commands to tz*/
+ __iowmb();
+ ret = scm_call_atomic2(SCM_SVC_IO, cmd, val1, val2);
+ spin_unlock(&tz_lock);
+ return ret;
+}
+
+static int __secure_tz_entry3(u32 cmd, u32 val1, u32 val2,
+ u32 val3)
+{
+ int ret;
+ spin_lock(&tz_lock);
+ /* sync memory before sending the commands to tz*/
+ __iowmb();
+ ret = scm_call_atomic3(SCM_SVC_IO, cmd, val1, val2,
+ val3);
+ spin_unlock(&tz_lock);
+ return ret;
+}
+
+static ssize_t tz_governor_show(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ char *buf)
+{
+ struct tz_priv *priv = pwrscale->priv;
+ int ret;
+
+ if (priv->governor == TZ_GOVERNOR_ONDEMAND)
+ ret = snprintf(buf, 10, "ondemand\n");
+ else
+ ret = snprintf(buf, 13, "performance\n");
+
+ return ret;
+}
+
+static ssize_t tz_governor_store(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ const char *buf, size_t count)
+{
+ char str[20];
+ struct tz_priv *priv = pwrscale->priv;
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ int ret;
+
+ ret = sscanf(buf, "%20s", str);
+ if (ret != 1)
+ return -EINVAL;
+
+ mutex_lock(&device->mutex);
+
+ if (!strncmp(str, "ondemand", 8))
+ priv->governor = TZ_GOVERNOR_ONDEMAND;
+ else if (!strncmp(str, "performance", 11))
+ priv->governor = TZ_GOVERNOR_PERFORMANCE;
+
+ if (priv->governor == TZ_GOVERNOR_PERFORMANCE) {
+ kgsl_pwrctrl_pwrlevel_change(device, pwr->max_pwrlevel);
+ pwr->default_pwrlevel = pwr->max_pwrlevel;
+ } else {
+ pwr->default_pwrlevel = pwr->init_pwrlevel;
+ }
+
+ mutex_unlock(&device->mutex);
+ return count;
+}
+
+PWRSCALE_POLICY_ATTR(governor, 0644, tz_governor_show, tz_governor_store);
+
+static struct attribute *tz_attrs[] = {
+ &policy_attr_governor.attr,
+ NULL
+};
+
+static struct attribute_group tz_attr_group = {
+ .attrs = tz_attrs,
+};
+
+static void tz_wake(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
+{
+ struct tz_priv *priv = pwrscale->priv;
+ if (device->state != KGSL_STATE_NAP &&
+ priv->governor == TZ_GOVERNOR_ONDEMAND)
+ kgsl_pwrctrl_pwrlevel_change(device,
+ device->pwrctrl.default_pwrlevel);
+}
+
+static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ struct tz_priv *priv = pwrscale->priv;
+ struct kgsl_power_stats stats;
+ int val, idle;
+
+ /* In "performance" mode the clock speed always stays
+ the same */
+ if (priv->governor == TZ_GOVERNOR_PERFORMANCE)
+ return;
+
+ device->ftbl->power_stats(device, &stats);
+ priv->bin.total_time += stats.total_time;
+ priv->bin.busy_time += stats.busy_time;
+ /* Do not waste CPU cycles running this algorithm if
+ * the GPU just started, or if less than FLOOR time
+ * has passed since the last run.
+ */
+ if ((stats.total_time == 0) ||
+ (priv->bin.total_time < FLOOR))
+ return;
+
+ /* If there is an extended block of busy processing,
+ * increase frequency. Otherwise run the normal algorithm.
+ */
+ if (priv->bin.busy_time > CEILING) {
+ val = -1;
+ } else if (priv->idle_dcvs) {
+ idle = priv->bin.total_time - priv->bin.busy_time;
+ idle = (idle > 0) ? idle : 0;
+ val = __secure_tz_entry2(TZ_UPDATE_ID, idle, device->id);
+ } else {
+ if (pwr->step_mul > 1)
+ val = __secure_tz_entry3(TZ_UPDATE_ID,
+ (pwr->active_pwrlevel + 1)/2,
+ priv->bin.total_time, priv->bin.busy_time);
+ else
+ val = __secure_tz_entry3(TZ_UPDATE_ID,
+ pwr->active_pwrlevel,
+ priv->bin.total_time, priv->bin.busy_time);
+ }
+
+ priv->bin.total_time = 0;
+ priv->bin.busy_time = 0;
+
+ /* If the decision is to move to a lower level, make sure the GPU
+ * frequency drops.
+ */
+ if (val > 0)
+ val *= pwr->step_mul;
+ if (val)
+ kgsl_pwrctrl_pwrlevel_change(device,
+ pwr->active_pwrlevel + val);
+}
+
+static void tz_busy(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale)
+{
+ device->on_time = ktime_to_us(ktime_get());
+}
+
+static void tz_sleep(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale)
+{
+ struct tz_priv *priv = pwrscale->priv;
+
+ __secure_tz_entry2(TZ_RESET_ID, 0, 0);
+ priv->bin.total_time = 0;
+ priv->bin.busy_time = 0;
+}
+
+#ifdef CONFIG_MSM_SCM
+static int tz_init(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
+{
+ int i = 0, j = 1, ret = 0;
+ struct tz_priv *priv;
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ unsigned int tz_pwrlevels[KGSL_MAX_PWRLEVELS + 1];
+
+ priv = pwrscale->priv = kzalloc(sizeof(struct tz_priv), GFP_KERNEL);
+ if (pwrscale->priv == NULL)
+ return -ENOMEM;
+ priv->idle_dcvs = 0;
+ priv->governor = TZ_GOVERNOR_ONDEMAND;
+ spin_lock_init(&tz_lock);
+ kgsl_pwrscale_policy_add_files(device, pwrscale, &tz_attr_group);
+ for (i = 0; i < pwr->num_pwrlevels - 1; i++) {
+ if (i == 0)
+ tz_pwrlevels[j] = pwr->pwrlevels[i].gpu_freq;
+ else if (pwr->pwrlevels[i].gpu_freq !=
+ pwr->pwrlevels[i - 1].gpu_freq) {
+ j++;
+ tz_pwrlevels[j] = pwr->pwrlevels[i].gpu_freq;
+ }
+ }
+ tz_pwrlevels[0] = j;
+ ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID, tz_pwrlevels,
+ sizeof(tz_pwrlevels), NULL, 0);
+ if (ret)
+ priv->idle_dcvs = 1;
+ return 0;
+}
+#else
+static int tz_init(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_MSM_SCM */
+
+static void tz_close(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
+{
+ kgsl_pwrscale_policy_remove_files(device, pwrscale, &tz_attr_group);
+ kfree(pwrscale->priv);
+ pwrscale->priv = NULL;
+}
+
+struct kgsl_pwrscale_policy kgsl_pwrscale_policy_tz = {
+ .name = "trustzone",
+ .init = tz_init,
+ .busy = tz_busy,
+ .idle = tz_idle,
+ .sleep = tz_sleep,
+ .wake = tz_wake,
+ .close = tz_close
+};
+EXPORT_SYMBOL(kgsl_pwrscale_policy_tz);
diff --git a/drivers/gpu/msm2/kgsl_sharedmem.c b/drivers/gpu/msm2/kgsl_sharedmem.c
new file mode 100755
index 0000000..0d6f202
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_sharedmem.c
@@ -0,0 +1,1086 @@
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/vmalloc.h>
+#include <linux/memory_alloc.h>
+#include <asm/cacheflush.h>
+#include <linux/slab.h>
+#include <linux/kmemleak.h>
+#include <linux/highmem.h>
+
+#include "kgsl.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_cffdump.h"
+#include "kgsl_device.h"
+
+DEFINE_MUTEX(kernel_map_global_lock);
+
+/* An attribute for showing per-process memory statistics */
+struct kgsl_mem_entry_attribute {
+ struct attribute attr;
+ int memtype;
+ ssize_t (*show)(struct kgsl_process_private *priv,
+ int type, char *buf);
+};
+
+#define to_mem_entry_attr(a) \
+container_of(a, struct kgsl_mem_entry_attribute, attr)
+
+#define __MEM_ENTRY_ATTR(_type, _name, _show) \
+{ \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .memtype = _type, \
+ .show = _show, \
+}
+
+/*
+ * A structure to hold the attributes for a particular memory type.
+ * For each memory type in each process we store the current and maximum
+ * memory usage and display the counts in sysfs. This structure and
+ * the following macro allow us to simplify the definition for those
+ * adding new memory types
+ */
+
+struct mem_entry_stats {
+ int memtype;
+ struct kgsl_mem_entry_attribute attr;
+ struct kgsl_mem_entry_attribute max_attr;
+};
+
+
+#define MEM_ENTRY_STAT(_type, _name) \
+{ \
+ .memtype = _type, \
+ .attr = __MEM_ENTRY_ATTR(_type, _name, mem_entry_show), \
+ .max_attr = __MEM_ENTRY_ATTR(_type, _name##_max, \
+ mem_entry_max_show), \
+}
+
+/**
+ * Given a kobj, find the process structure attached to it
+ */
+
+static struct kgsl_process_private *
+_get_priv_from_kobj(struct kobject *kobj)
+{
+ struct kgsl_process_private *private;
+ unsigned long name;
+
+ if (!kobj)
+ return NULL;
+
+ if (sscanf(kobj->name, "%ld", &name) != 1)
+ return NULL;
+
+ list_for_each_entry(private, &kgsl_driver.process_list, list) {
+ if (private->pid == name)
+ return private;
+ }
+
+ return NULL;
+}
+
+/**
+ * Show the current amount of memory allocated for the given memtype
+ */
+
+static ssize_t
+mem_entry_show(struct kgsl_process_private *priv, int type, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].cur);
+}
+
+/**
+ * Show the maximum memory allocated for the given memtype through the life of
+ * the process
+ */
+
+static ssize_t
+mem_entry_max_show(struct kgsl_process_private *priv, int type, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].max);
+}
+
+
+static void mem_entry_sysfs_release(struct kobject *kobj)
+{
+}
+
+static ssize_t mem_entry_sysfs_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct kgsl_mem_entry_attribute *pattr = to_mem_entry_attr(attr);
+ struct kgsl_process_private *priv;
+ ssize_t ret;
+
+ mutex_lock(&kgsl_driver.process_mutex);
+ priv = _get_priv_from_kobj(kobj);
+
+ if (priv && pattr->show)
+ ret = pattr->show(priv, pattr->memtype, buf);
+ else
+ ret = -EIO;
+
+ mutex_unlock(&kgsl_driver.process_mutex);
+ return ret;
+}
+
+static const struct sysfs_ops mem_entry_sysfs_ops = {
+ .show = mem_entry_sysfs_show,
+};
+
+static struct kobj_type ktype_mem_entry = {
+ .sysfs_ops = &mem_entry_sysfs_ops,
+ .default_attrs = NULL,
+ .release = mem_entry_sysfs_release
+};
+
+static struct mem_entry_stats mem_stats[] = {
+ MEM_ENTRY_STAT(KGSL_MEM_ENTRY_KERNEL, kernel),
+ MEM_ENTRY_STAT(KGSL_MEM_ENTRY_PMEM, pmem),
+#ifdef CONFIG_ASHMEM
+ MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ASHMEM, ashmem),
+#endif
+ MEM_ENTRY_STAT(KGSL_MEM_ENTRY_USER, user),
+#ifdef CONFIG_ION
+ MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ION, ion),
+#endif
+};
+
+void
+kgsl_process_uninit_sysfs(struct kgsl_process_private *private)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
+ sysfs_remove_file(&private->kobj, &mem_stats[i].attr.attr);
+ sysfs_remove_file(&private->kobj,
+ &mem_stats[i].max_attr.attr);
+ }
+
+ kobject_put(&private->kobj);
+}
+
+/**
+ * kgsl_process_init_sysfs() - Initialize and create sysfs files for a process
+ *
+ * @device: Pointer to kgsl device struct
+ * @private: Pointer to the structure for the process
+ *
+ * @returns: 0 on success, error code otherwise
+ *
+ * kgsl_process_init_sysfs() is called at the time of creating the
+ * process struct when a process opens the kgsl device for the first time.
+ * This function creates the sysfs files for the process.
+ */
+int
+kgsl_process_init_sysfs(struct kgsl_device *device,
+ struct kgsl_process_private *private)
+{
+ unsigned char name[16];
+ int i, ret = 0;
+
+ snprintf(name, sizeof(name), "%d", private->pid);
+
+ ret = kobject_init_and_add(&private->kobj, &ktype_mem_entry,
+ kgsl_driver.prockobj, name);
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
+ /* We need to check the value of sysfs_create_file, but we
+ * don't really care if it passed or not */
+
+ ret = sysfs_create_file(&private->kobj,
+ &mem_stats[i].attr.attr);
+ ret = sysfs_create_file(&private->kobj,
+ &mem_stats[i].max_attr.attr);
+ }
+ return ret;
+}
+
+static int kgsl_drv_memstat_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned int val = 0;
+
+ if (!strncmp(attr->attr.name, "vmalloc", 7))
+ val = kgsl_driver.stats.vmalloc;
+ else if (!strncmp(attr->attr.name, "vmalloc_max", 11))
+ val = kgsl_driver.stats.vmalloc_max;
+ else if (!strncmp(attr->attr.name, "page_alloc", 10))
+ val = kgsl_driver.stats.page_alloc;
+ else if (!strncmp(attr->attr.name, "page_alloc_max", 14))
+ val = kgsl_driver.stats.page_alloc_max;
+ else if (!strncmp(attr->attr.name, "coherent", 8))
+ val = kgsl_driver.stats.coherent;
+ else if (!strncmp(attr->attr.name, "coherent_max", 12))
+ val = kgsl_driver.stats.coherent_max;
+ else if (!strncmp(attr->attr.name, "mapped", 6))
+ val = kgsl_driver.stats.mapped;
+ else if (!strncmp(attr->attr.name, "mapped_max", 10))
+ val = kgsl_driver.stats.mapped_max;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static int kgsl_drv_histogram_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int len = 0;
+ int i;
+
+ for (i = 0; i < 16; i++)
+ len += snprintf(buf + len, PAGE_SIZE - len, "%d ",
+ kgsl_driver.stats.histogram[i]);
+
+ len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ return len;
+}
+
+static int kgsl_drv_full_cache_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned int thresh;
+ ret = sscanf(buf, "%d", &thresh);
+ if (ret != 1)
+ return count;
+
+ kgsl_driver.full_cache_threshold = thresh;
+
+ return count;
+}
+
+static int kgsl_drv_full_cache_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ kgsl_driver.full_cache_threshold);
+}
+
+DEVICE_ATTR(vmalloc, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(vmalloc_max, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(page_alloc, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(page_alloc_max, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(coherent, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(coherent_max, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(mapped, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(mapped_max, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(histogram, 0444, kgsl_drv_histogram_show, NULL);
+DEVICE_ATTR(full_cache_threshold, 0644,
+ kgsl_drv_full_cache_threshold_show,
+ kgsl_drv_full_cache_threshold_store);
+
+static const struct device_attribute *drv_attr_list[] = {
+ &dev_attr_vmalloc,
+ &dev_attr_vmalloc_max,
+ &dev_attr_page_alloc,
+ &dev_attr_page_alloc_max,
+ &dev_attr_coherent,
+ &dev_attr_coherent_max,
+ &dev_attr_mapped,
+ &dev_attr_mapped_max,
+ &dev_attr_histogram,
+ &dev_attr_full_cache_threshold,
+ NULL
+};
+
+void
+kgsl_sharedmem_uninit_sysfs(void)
+{
+ kgsl_remove_device_sysfs_files(&kgsl_driver.virtdev, drv_attr_list);
+}
+
+int
+kgsl_sharedmem_init_sysfs(void)
+{
+ return kgsl_create_device_sysfs_files(&kgsl_driver.virtdev,
+ drv_attr_list);
+}
+
+#ifdef CONFIG_OUTER_CACHE
+static void _outer_cache_range_op(int op, unsigned long addr, size_t size)
+{
+ switch (op) {
+ case KGSL_CACHE_OP_FLUSH:
+ outer_flush_range(addr, addr + size);
+ break;
+ case KGSL_CACHE_OP_CLEAN:
+ outer_clean_range(addr, addr + size);
+ break;
+ case KGSL_CACHE_OP_INV:
+ outer_inv_range(addr, addr + size);
+ break;
+ }
+}
+
+static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, sglen, i) {
+ unsigned int paddr = kgsl_get_sg_pa(s);
+ _outer_cache_range_op(op, paddr, s->length);
+ }
+}
+
+#else
+static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
+{
+}
+#endif
+
+static int kgsl_page_alloc_vmfault(struct kgsl_memdesc *memdesc,
+ struct vm_area_struct *vma,
+ struct vm_fault *vmf)
+{
+ int i, pgoff;
+ struct scatterlist *s = memdesc->sg;
+ unsigned int offset;
+
+ offset = ((unsigned long) vmf->virtual_address - vma->vm_start);
+
+ if (offset >= memdesc->size)
+ return VM_FAULT_SIGBUS;
+
+ pgoff = offset >> PAGE_SHIFT;
+
+ /*
+ * The sglist might be comprised of mixed blocks of memory depending
+ * on how many 64K pages were allocated. This means we have to do math
+ * to find the actual 4K page to map in user space
+ */
+
+ for (i = 0; i < memdesc->sglen; i++) {
+ int npages = s->length >> PAGE_SHIFT;
+
+ if (pgoff < npages) {
+ struct page *page = sg_page(s);
+
+ page = nth_page(page, pgoff);
+
+ get_page(page);
+ vmf->page = page;
+
+ return 0;
+ }
+
+ pgoff -= npages;
+ s = sg_next(s);
+ }
+
+ return VM_FAULT_SIGBUS;
+}
+
+static int kgsl_page_alloc_vmflags(struct kgsl_memdesc *memdesc)
+{
+ return VM_RESERVED | VM_DONTEXPAND;
+}
+
+static int kgsl_contiguous_vmflags(struct kgsl_memdesc *memdesc)
+{
+ return VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
+}
+
+/*
+ * kgsl_page_alloc_unmap_kernel() - Unmap the memory in memdesc
+ *
+ * @memdesc: The memory descriptor which contains information about the memory
+ *
+ * Unmaps the memory mapped into kernel address space
+ */
+static void kgsl_page_alloc_unmap_kernel(struct kgsl_memdesc *memdesc)
+{
+ mutex_lock(&kernel_map_global_lock);
+ if (!memdesc->hostptr) {
+ BUG_ON(memdesc->hostptr_count);
+ goto done;
+ }
+ memdesc->hostptr_count--;
+ if (memdesc->hostptr_count)
+ goto done;
+ vunmap(memdesc->hostptr);
+ kgsl_driver.stats.vmalloc -= memdesc->size;
+ memdesc->hostptr = NULL;
+done:
+ mutex_unlock(&kernel_map_global_lock);
+}
+
+static void kgsl_page_alloc_free(struct kgsl_memdesc *memdesc)
+{
+ int i = 0;
+ struct scatterlist *sg;
+ int sglen = memdesc->sglen;
+
+ kgsl_driver.stats.page_alloc -= memdesc->size;
+
+ kgsl_page_alloc_unmap_kernel(memdesc);
+ /* we certainly do not expect the hostptr to still be mapped */
+ BUG_ON(memdesc->hostptr);
+
+ if (memdesc->sg)
+ for_each_sg(memdesc->sg, sg, sglen, i)
+ __free_pages(sg_page(sg), get_order(sg->length));
+}
+
+/*
+ * kgsl_page_alloc_map_kernel - Map the memory in memdesc to kernel address
+ * space
+ *
+ * @memdesc - The memory descriptor which contains information about the memory
+ *
+ * Return: 0 on success else error code
+ */
+static int kgsl_page_alloc_map_kernel(struct kgsl_memdesc *memdesc)
+{
+ int ret = 0;
+
+ mutex_lock(&kernel_map_global_lock);
+ if (!memdesc->hostptr) {
+ pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
+ struct page **pages = NULL;
+ struct scatterlist *sg;
+ int npages = PAGE_ALIGN(memdesc->size) >> PAGE_SHIFT;
+ int sglen = memdesc->sglen;
+ int i, count = 0;
+
+ /* create a list of pages to call vmap */
+ pages = vmalloc(npages * sizeof(struct page *));
+ if (!pages) {
+ KGSL_CORE_ERR("vmalloc(%d) failed\n",
+ npages * sizeof(struct page *));
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ for_each_sg(memdesc->sg, sg, sglen, i) {
+ struct page *page = sg_page(sg);
+ int j;
+
+ for (j = 0; j < sg->length >> PAGE_SHIFT; j++)
+ pages[count++] = page++;
+ }
+
+
+ memdesc->hostptr = vmap(pages, count,
+ VM_IOREMAP, page_prot);
+ if (memdesc->hostptr)
+ KGSL_STATS_ADD(memdesc->size, kgsl_driver.stats.vmalloc,
+ kgsl_driver.stats.vmalloc_max);
+ else
+ ret = -ENOMEM;
+ vfree(pages);
+ }
+ if (memdesc->hostptr)
+ memdesc->hostptr_count++;
+done:
+ mutex_unlock(&kernel_map_global_lock);
+
+ return ret;
+}
+
+static int kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc,
+ struct vm_area_struct *vma,
+ struct vm_fault *vmf)
+{
+ unsigned long offset, pfn;
+ int ret;
+
+ offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
+ PAGE_SHIFT;
+
+ pfn = (memdesc->physaddr >> PAGE_SHIFT) + offset;
+ ret = vm_insert_pfn(vma, (unsigned long) vmf->virtual_address, pfn);
+
+ if (ret == -ENOMEM || ret == -EAGAIN)
+ return VM_FAULT_OOM;
+ else if (ret == -EFAULT)
+ return VM_FAULT_SIGBUS;
+
+ return VM_FAULT_NOPAGE;
+}
+
+static void kgsl_ebimem_unmap_kernel(struct kgsl_memdesc *memdesc)
+{
+ mutex_lock(&kernel_map_global_lock);
+ if (!memdesc->hostptr) {
+ BUG_ON(memdesc->hostptr_count);
+ goto done;
+ }
+ memdesc->hostptr_count--;
+ if (memdesc->hostptr_count)
+ goto done;
+
+ iounmap(memdesc->hostptr);
+ memdesc->hostptr = NULL;
+done:
+ mutex_unlock(&kernel_map_global_lock);
+}
+
+static void kgsl_ebimem_free(struct kgsl_memdesc *memdesc)
+
+{
+ kgsl_driver.stats.coherent -= memdesc->size;
+ kgsl_ebimem_unmap_kernel(memdesc);
+ /* we certainly do not expect the hostptr to still be mapped */
+ BUG_ON(memdesc->hostptr);
+
+ free_contiguous_memory_by_paddr(memdesc->physaddr);
+}
+
+static int kgsl_ebimem_map_kernel(struct kgsl_memdesc *memdesc)
+{
+ int ret = 0;
+ mutex_lock(&kernel_map_global_lock);
+ if (!memdesc->hostptr) {
+ memdesc->hostptr = ioremap(memdesc->physaddr, memdesc->size);
+ if (!memdesc->hostptr) {
+ KGSL_CORE_ERR("ioremap failed, addr:0x%p, size:0x%x\n",
+ memdesc->hostptr, memdesc->size);
+ ret = -ENOMEM;
+ goto done;
+ }
+ }
+ memdesc->hostptr_count++;
+done:
+ mutex_unlock(&kernel_map_global_lock);
+ return ret;
+}
+
+static void kgsl_coherent_free(struct kgsl_memdesc *memdesc)
+{
+ kgsl_driver.stats.coherent -= memdesc->size;
+ dma_free_coherent(NULL, memdesc->size,
+ memdesc->hostptr, memdesc->physaddr);
+}
+
+/* Global - also used by kgsl_drm.c */
+struct kgsl_memdesc_ops kgsl_page_alloc_ops = {
+ .free = kgsl_page_alloc_free,
+ .vmflags = kgsl_page_alloc_vmflags,
+ .vmfault = kgsl_page_alloc_vmfault,
+ .map_kernel = kgsl_page_alloc_map_kernel,
+ .unmap_kernel = kgsl_page_alloc_unmap_kernel,
+};
+EXPORT_SYMBOL(kgsl_page_alloc_ops);
+
+static struct kgsl_memdesc_ops kgsl_ebimem_ops = {
+ .free = kgsl_ebimem_free,
+ .vmflags = kgsl_contiguous_vmflags,
+ .vmfault = kgsl_contiguous_vmfault,
+ .map_kernel = kgsl_ebimem_map_kernel,
+ .unmap_kernel = kgsl_ebimem_unmap_kernel,
+};
+
+static struct kgsl_memdesc_ops kgsl_coherent_ops = {
+ .free = kgsl_coherent_free,
+};
+
+void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op)
+{
+ /*
+ * If the buffer is mapped in the kernel operate on that address
+ * otherwise use the user address
+ */
+
+ void *addr = (memdesc->hostptr) ?
+ memdesc->hostptr : (void *) memdesc->useraddr;
+
+ int size = memdesc->size;
+
+ if (addr != NULL) {
+ switch (op) {
+ case KGSL_CACHE_OP_FLUSH:
+ dmac_flush_range(addr, addr + size);
+ break;
+ case KGSL_CACHE_OP_CLEAN:
+ dmac_clean_range(addr, addr + size);
+ break;
+ case KGSL_CACHE_OP_INV:
+ dmac_inv_range(addr, addr + size);
+ break;
+ }
+ }
+ outer_cache_range_op_sg(memdesc->sg, memdesc->sglen, op);
+}
+EXPORT_SYMBOL(kgsl_cache_range_op);
+
+static int
+_kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable,
+ size_t size)
+{
+ int pcount = 0, order, ret = 0;
+ int j, len, page_size, sglen_alloc, sglen = 0;
+ struct page **pages = NULL;
+ pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
+ void *ptr;
+ unsigned int align;
+ int step = ((VMALLOC_END - VMALLOC_START)/8) >> PAGE_SHIFT;
+
+ align = (memdesc->flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT;
+
+ page_size = (align >= ilog2(SZ_64K) && size >= SZ_64K)
+ ? SZ_64K : PAGE_SIZE;
+ /* update align flags for what we actually use */
+ if (page_size != PAGE_SIZE)
+ kgsl_memdesc_set_align(memdesc, ilog2(page_size));
+
+ /*
+ * There needs to be enough room in the sg structure to be able to
+ * service the allocation entirely with PAGE_SIZE sized chunks
+ */
+
+ sglen_alloc = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ memdesc->size = size;
+ memdesc->pagetable = pagetable;
+ memdesc->ops = &kgsl_page_alloc_ops;
+
+ memdesc->sglen_alloc = sglen_alloc;
+ memdesc->sg = kgsl_sg_alloc(memdesc->sglen_alloc);
+
+ if (memdesc->sg == NULL) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /*
+ * Allocate space to store the list of pages to send to vmap.
+ * This is an array of pointers so we can t rack 1024 pages per page
+ * of allocation. Since allocations can be as large as the user dares,
+ * we have to use the kmalloc/vmalloc trick here to make sure we can
+ * get the memory we need.
+ */
+
+ if ((memdesc->sglen_alloc * sizeof(struct page *)) > PAGE_SIZE)
+ pages = vmalloc(memdesc->sglen_alloc * sizeof(struct page *));
+ else
+ pages = kmalloc(PAGE_SIZE, GFP_KERNEL);
+
+ if (pages == NULL) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ kmemleak_not_leak(memdesc->sg);
+
+ sg_init_table(memdesc->sg, memdesc->sglen_alloc);
+
+ len = size;
+
+ while (len > 0) {
+ struct page *page;
+ unsigned int gfp_mask = __GFP_HIGHMEM;
+ int j;
+
+ /* don't waste space at the end of the allocation*/
+ if (len < page_size)
+ page_size = PAGE_SIZE;
+
+ /*
+ * Don't do some of the more aggressive memory recovery
+ * techniques for large order allocations
+ */
+ if (page_size != PAGE_SIZE)
+ gfp_mask |= __GFP_COMP | __GFP_NORETRY |
+ __GFP_NO_KSWAPD | __GFP_NOWARN;
+ else
+ gfp_mask |= GFP_KERNEL;
+
+ page = alloc_pages(gfp_mask, get_order(page_size));
+
+ if (page == NULL) {
+ if (page_size != PAGE_SIZE) {
+ page_size = PAGE_SIZE;
+ continue;
+ }
+
+ KGSL_CORE_ERR(
+ "Out of memory: only allocated %dKB of %dKB requested\n",
+ (size - len) >> 10, size >> 10);
+
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ for (j = 0; j < page_size >> PAGE_SHIFT; j++)
+ pages[pcount++] = nth_page(page, j);
+
+ sg_set_page(&memdesc->sg[sglen++], page, page_size, 0);
+ len -= page_size;
+ }
+
+ memdesc->sglen = sglen;
+
+ /*
+ * All memory that goes to the user has to be zeroed out before it gets
+ * exposed to userspace. This means that the memory has to be mapped in
+ * the kernel, zeroed (memset) and then unmapped. This also means that
+ * the dcache has to be flushed to ensure coherency between the kernel
+ * and user pages. We used to pass __GFP_ZERO to alloc_page which mapped
+ * zeroed and unmaped each individual page, and then we had to turn
+ * around and call flush_dcache_page() on that page to clear the caches.
+ * This was killing us for performance. Instead, we found it is much
+ * faster to allocate the pages without GFP_ZERO, map a chunk of the
+ * range ('step' pages), memset it, flush it and then unmap
+ * - this results in a factor of 4 improvement for speed for large
+ * buffers. There is a small decrease in speed for small buffers,
+ * but only on the order of a few microseconds at best. The 'step'
+ * size is based on a guess at the amount of free vmalloc space, but
+ * will scale down if there's not enough free space.
+ */
+ for (j = 0; j < pcount; j += step) {
+ step = min(step, pcount - j);
+
+ ptr = vmap(&pages[j], step, VM_IOREMAP, page_prot);
+
+ if (ptr != NULL) {
+ memset(ptr, 0, step * PAGE_SIZE);
+ dmac_flush_range(ptr, ptr + step * PAGE_SIZE);
+ vunmap(ptr);
+ } else {
+ int k;
+ /* Very, very, very slow path */
+
+ for (k = j; k < j + step; k++) {
+ ptr = kmap_atomic(pages[k]);
+ memset(ptr, 0, PAGE_SIZE);
+ dmac_flush_range(ptr, ptr + PAGE_SIZE);
+ kunmap_atomic(ptr);
+ }
+ /* scale down the step size to avoid this path */
+ if (step > 1)
+ step >>= 1;
+ }
+ }
+
+ outer_cache_range_op_sg(memdesc->sg, memdesc->sglen,
+ KGSL_CACHE_OP_FLUSH);
+
+ KGSL_STATS_ADD(size, kgsl_driver.stats.page_alloc,
+ kgsl_driver.stats.page_alloc_max);
+
+ order = get_order(size);
+
+ if (order < 16)
+ kgsl_driver.stats.histogram[order]++;
+
+done:
+ if ((memdesc->sglen_alloc * sizeof(struct page *)) > PAGE_SIZE)
+ vfree(pages);
+ else
+ kfree(pages);
+
+ if (ret)
+ kgsl_sharedmem_free(memdesc);
+
+ return ret;
+}
+
+int
+kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable, size_t size)
+{
+ int ret = 0;
+ BUG_ON(size == 0);
+
+ size = ALIGN(size, PAGE_SIZE * 2);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = _kgsl_sharedmem_page_alloc(memdesc, pagetable, size);
+ if (!ret)
+ ret = kgsl_page_alloc_map_kernel(memdesc);
+ if (ret)
+ kgsl_sharedmem_free(memdesc);
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_page_alloc);
+
+int
+kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable,
+ size_t size)
+{
+ size = PAGE_ALIGN(size);
+ if (size == 0)
+ return -EINVAL;
+
+ return _kgsl_sharedmem_page_alloc(memdesc, pagetable, size);
+}
+EXPORT_SYMBOL(kgsl_sharedmem_page_alloc_user);
+
+int
+kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size)
+{
+ int result = 0;
+
+ size = ALIGN(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ memdesc->size = size;
+ memdesc->ops = &kgsl_coherent_ops;
+
+ memdesc->hostptr = dma_alloc_coherent(NULL, size, &memdesc->physaddr,
+ GFP_KERNEL);
+ if (memdesc->hostptr == NULL) {
+ KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
+ result = -ENOMEM;
+ goto err;
+ }
+
+ result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
+ if (result)
+ goto err;
+
+ /* Record statistics */
+
+ KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
+ kgsl_driver.stats.coherent_max);
+
+err:
+ if (result)
+ kgsl_sharedmem_free(memdesc);
+
+ return result;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_alloc_coherent);
+
+void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
+{
+ if (memdesc == NULL || memdesc->size == 0)
+ return;
+
+ if (memdesc->gpuaddr) {
+ kgsl_mmu_unmap(memdesc->pagetable, memdesc);
+ kgsl_mmu_put_gpuaddr(memdesc->pagetable, memdesc);
+ }
+
+ if (memdesc->ops && memdesc->ops->free)
+ memdesc->ops->free(memdesc);
+
+ kgsl_sg_free(memdesc->sg, memdesc->sglen_alloc);
+
+ memset(memdesc, 0, sizeof(*memdesc));
+}
+EXPORT_SYMBOL(kgsl_sharedmem_free);
+
+static int
+_kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable, size_t size)
+{
+ int result = 0;
+
+ memdesc->size = size;
+ memdesc->pagetable = pagetable;
+ memdesc->ops = &kgsl_ebimem_ops;
+ memdesc->physaddr = allocate_contiguous_ebi_nomap(size, SZ_8K);
+
+ if (memdesc->physaddr == 0) {
+ KGSL_CORE_ERR("allocate_contiguous_ebi_nomap(%d) failed\n",
+ size);
+ return -ENOMEM;
+ }
+
+ result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
+
+ if (result)
+ goto err;
+
+ KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
+ kgsl_driver.stats.coherent_max);
+
+err:
+ if (result)
+ kgsl_sharedmem_free(memdesc);
+
+ return result;
+}
+
+int
+kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable,
+ size_t size)
+{
+ size = ALIGN(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ return _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
+}
+EXPORT_SYMBOL(kgsl_sharedmem_ebimem_user);
+
+int
+kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable, size_t size)
+{
+ int result;
+ size = ALIGN(size, 8192);
+ if (size == 0)
+ return -EINVAL;
+
+ result = _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
+
+ if (result)
+ return result;
+
+ result = kgsl_ebimem_map_kernel(memdesc);
+
+ if (result) {
+ KGSL_CORE_ERR("hostptr mapping failed\n");
+ kgsl_sharedmem_free(memdesc);
+ return result;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_ebimem);
+
+int
+kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
+ uint32_t *dst,
+ unsigned int offsetbytes)
+{
+ uint32_t *src;
+ BUG_ON(memdesc == NULL || memdesc->hostptr == NULL || dst == NULL);
+ WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
+ if (offsetbytes % sizeof(uint32_t) != 0)
+ return -EINVAL;
+
+ WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size);
+ if (offsetbytes + sizeof(uint32_t) > memdesc->size)
+ return -ERANGE;
+ src = (uint32_t *)(memdesc->hostptr + offsetbytes);
+ *dst = *src;
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_readl);
+
+int
+kgsl_sharedmem_writel(struct kgsl_device *device,
+ const struct kgsl_memdesc *memdesc,
+ unsigned int offsetbytes,
+ uint32_t src)
+{
+ uint32_t *dst;
+ BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
+ WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
+ if (offsetbytes % sizeof(uint32_t) != 0)
+ return -EINVAL;
+
+ WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size);
+ if (offsetbytes + sizeof(uint32_t) > memdesc->size)
+ return -ERANGE;
+ kgsl_cffdump_setmem(device,
+ memdesc->gpuaddr + offsetbytes,
+ src, sizeof(uint32_t));
+ dst = (uint32_t *)(memdesc->hostptr + offsetbytes);
+ *dst = src;
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_writel);
+
+int
+kgsl_sharedmem_set(struct kgsl_device *device,
+ const struct kgsl_memdesc *memdesc, unsigned int offsetbytes,
+ unsigned int value, unsigned int sizebytes)
+{
+ BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
+ BUG_ON(offsetbytes + sizebytes > memdesc->size);
+
+ kgsl_cffdump_setmem(device,
+ memdesc->gpuaddr + offsetbytes, value,
+ sizebytes);
+ memset(memdesc->hostptr + offsetbytes, value, sizebytes);
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_set);
+
+/*
+ * kgsl_sharedmem_map_vma - Map a user vma to physical memory
+ *
+ * @vma - The user vma to map
+ * @memdesc - The memory descriptor which contains information about the
+ * physical memory
+ *
+ * Return: 0 on success else error code
+ */
+int
+kgsl_sharedmem_map_vma(struct vm_area_struct *vma,
+ const struct kgsl_memdesc *memdesc)
+{
+ unsigned long addr = vma->vm_start;
+ unsigned long size = vma->vm_end - vma->vm_start;
+ int ret, i = 0;
+
+ if (!memdesc->sg || (size != memdesc->size) ||
+ (memdesc->sglen != (size / PAGE_SIZE)))
+ return -EINVAL;
+
+ for (; addr < vma->vm_end; addr += PAGE_SIZE, i++) {
+ ret = vm_insert_page(vma, addr, sg_page(&memdesc->sg[i]));
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_map_vma);
+
+static const char * const memtype_str[] = {
+ [KGSL_MEMTYPE_OBJECTANY] = "any(0)",
+ [KGSL_MEMTYPE_FRAMEBUFFER] = "framebuffer",
+ [KGSL_MEMTYPE_RENDERBUFFER] = "renderbuffer",
+ [KGSL_MEMTYPE_ARRAYBUFFER] = "arraybuffer",
+ [KGSL_MEMTYPE_ELEMENTARRAYBUFFER] = "elementarraybuffer",
+ [KGSL_MEMTYPE_VERTEXARRAYBUFFER] = "vertexarraybuffer",
+ [KGSL_MEMTYPE_TEXTURE] = "texture",
+ [KGSL_MEMTYPE_SURFACE] = "surface",
+ [KGSL_MEMTYPE_EGL_SURFACE] = "egl_surface",
+ [KGSL_MEMTYPE_GL] = "gl",
+ [KGSL_MEMTYPE_CL] = "cl",
+ [KGSL_MEMTYPE_CL_BUFFER_MAP] = "cl_buffer_map",
+ [KGSL_MEMTYPE_CL_BUFFER_NOMAP] = "cl_buffer_nomap",
+ [KGSL_MEMTYPE_CL_IMAGE_MAP] = "cl_image_map",
+ [KGSL_MEMTYPE_CL_IMAGE_NOMAP] = "cl_image_nomap",
+ [KGSL_MEMTYPE_CL_KERNEL_STACK] = "cl_kernel_stack",
+ [KGSL_MEMTYPE_COMMAND] = "command",
+ [KGSL_MEMTYPE_2D] = "2d",
+ [KGSL_MEMTYPE_EGL_IMAGE] = "egl_image",
+ [KGSL_MEMTYPE_EGL_SHADOW] = "egl_shadow",
+ [KGSL_MEMTYPE_MULTISAMPLE] = "egl_multisample",
+ /* KGSL_MEMTYPE_KERNEL handled below, to avoid huge array */
+};
+
+void kgsl_get_memory_usage(char *name, size_t name_size, unsigned int memflags)
+{
+ unsigned char type;
+
+ type = (memflags & KGSL_MEMTYPE_MASK) >> KGSL_MEMTYPE_SHIFT;
+ if (type == KGSL_MEMTYPE_KERNEL)
+ strlcpy(name, "kernel", name_size);
+ else if (type < ARRAY_SIZE(memtype_str) && memtype_str[type] != NULL)
+ strlcpy(name, memtype_str[type], name_size);
+ else
+ snprintf(name, name_size, "unknown(%3d)", type);
+}
+EXPORT_SYMBOL(kgsl_get_memory_usage);
diff --git a/drivers/gpu/msm2/kgsl_sharedmem.h b/drivers/gpu/msm2/kgsl_sharedmem.h
new file mode 100644
index 0000000..339575f
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_sharedmem.h
@@ -0,0 +1,311 @@
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_SHAREDMEM_H
+#define __KGSL_SHAREDMEM_H
+
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include "kgsl_mmu.h"
+#include <linux/slab.h>
+#include <linux/kmemleak.h>
+#include <linux/iommu.h>
+
+#include "kgsl_log.h"
+
+struct kgsl_device;
+struct kgsl_process_private;
+
+#define KGSL_CACHE_OP_INV 0x01
+#define KGSL_CACHE_OP_FLUSH 0x02
+#define KGSL_CACHE_OP_CLEAN 0x03
+
+extern struct kgsl_memdesc_ops kgsl_page_alloc_ops;
+
+int kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable, size_t size);
+
+int kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable,
+ size_t size);
+
+int kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size);
+
+int kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable,
+ size_t size);
+
+int kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable,
+ size_t size);
+
+void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc);
+
+int kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
+ uint32_t *dst,
+ unsigned int offsetbytes);
+
+int kgsl_sharedmem_writel(struct kgsl_device *device,
+ const struct kgsl_memdesc *memdesc,
+ unsigned int offsetbytes,
+ uint32_t src);
+
+int kgsl_sharedmem_set(struct kgsl_device *device,
+ const struct kgsl_memdesc *memdesc,
+ unsigned int offsetbytes, unsigned int value,
+ unsigned int sizebytes);
+
+void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op);
+
+int kgsl_process_init_sysfs(struct kgsl_device *device,
+ struct kgsl_process_private *private);
+void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
+
+int kgsl_sharedmem_init_sysfs(void);
+void kgsl_sharedmem_uninit_sysfs(void);
+
+/*
+ * kgsl_memdesc_get_align - Get alignment flags from a memdesc
+ * @memdesc - the memdesc
+ *
+ * Returns the alignment requested, as power of 2 exponent.
+ */
+static inline int
+kgsl_memdesc_get_align(const struct kgsl_memdesc *memdesc)
+{
+ return (memdesc->flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT;
+}
+
+/*
+ * kgsl_memdesc_get_cachemode - Get cache mode of a memdesc
+ * @memdesc: the memdesc
+ *
+ * Returns a KGSL_CACHEMODE* value.
+ */
+static inline int
+kgsl_memdesc_get_cachemode(const struct kgsl_memdesc *memdesc)
+{
+ return (memdesc->flags & KGSL_CACHEMODE_MASK) >> KGSL_CACHEMODE_SHIFT;
+}
+
+/*
+ * kgsl_memdesc_set_align - Set alignment flags of a memdesc
+ * @memdesc - the memdesc
+ * @align - alignment requested, as a power of 2 exponent.
+ */
+static inline int
+kgsl_memdesc_set_align(struct kgsl_memdesc *memdesc, unsigned int align)
+{
+ if (align > 32) {
+ KGSL_CORE_ERR("Alignment too big, restricting to 2^32\n");
+ align = 32;
+ }
+
+ memdesc->flags &= ~KGSL_MEMALIGN_MASK;
+ memdesc->flags |= (align << KGSL_MEMALIGN_SHIFT) & KGSL_MEMALIGN_MASK;
+ return 0;
+}
+
+static inline unsigned int kgsl_get_sg_pa(struct scatterlist *sg)
+{
+ /*
+ * Try sg_dma_address first to support ion carveout
+ * regions which do not work with sg_phys().
+ */
+ unsigned int pa = sg_dma_address(sg);
+ if (pa == 0)
+ pa = sg_phys(sg);
+ return pa;
+}
+
+int
+kgsl_sharedmem_map_vma(struct vm_area_struct *vma,
+ const struct kgsl_memdesc *memdesc);
+
+/*
+ * For relatively small sglists, it is preferable to use kzalloc
+ * rather than going down the vmalloc rat hole. If the size of
+ * the sglist is < PAGE_SIZE use kzalloc otherwise fallback to
+ * vmalloc
+ */
+
+static inline void *kgsl_sg_alloc(unsigned int sglen)
+{
+ if ((sglen * sizeof(struct scatterlist)) < PAGE_SIZE)
+ return kzalloc(sglen * sizeof(struct scatterlist), GFP_KERNEL);
+ else {
+ void *ptr = vmalloc(sglen * sizeof(struct scatterlist));
+ if (ptr)
+ memset(ptr, 0, sglen * sizeof(struct scatterlist));
+
+ return ptr;
+ }
+}
+
+static inline void kgsl_sg_free(void *ptr, unsigned int sglen)
+{
+ if ((sglen * sizeof(struct scatterlist)) < PAGE_SIZE)
+ kfree(ptr);
+ else
+ vfree(ptr);
+}
+
+static inline int
+memdesc_sg_phys(struct kgsl_memdesc *memdesc,
+ phys_addr_t physaddr, unsigned int size)
+{
+ memdesc->sg = kgsl_sg_alloc(1);
+ if (!memdesc->sg)
+ return -ENOMEM;
+
+ kmemleak_not_leak(memdesc->sg);
+
+ memdesc->sglen = 1;
+ sg_init_table(memdesc->sg, 1);
+ memdesc->sg[0].length = size;
+ memdesc->sg[0].offset = 0;
+ memdesc->sg[0].dma_address = physaddr;
+ return 0;
+}
+
+/*
+ * kgsl_memdesc_is_global - is this a globally mapped buffer?
+ * @memdesc: the memdesc
+ *
+ * Returns nonzero if this is a global mapping, 0 otherwise
+ */
+static inline int kgsl_memdesc_is_global(const struct kgsl_memdesc *memdesc)
+{
+ return (memdesc->priv & KGSL_MEMDESC_GLOBAL) != 0;
+}
+
+/*
+ * kgsl_memdesc_has_guard_page - is the last page a guard page?
+ * @memdesc - the memdesc
+ *
+ * Returns nonzero if there is a guard page, 0 otherwise
+ */
+static inline int
+kgsl_memdesc_has_guard_page(const struct kgsl_memdesc *memdesc)
+{
+ return (memdesc->priv & KGSL_MEMDESC_GUARD_PAGE) != 0;
+}
+
+/*
+ * kgsl_memdesc_protflags - get mmu protection flags
+ * @memdesc - the memdesc
+ * Returns a mask of GSL_PT_PAGE* or IOMMU* values based
+ * on the memdesc flags.
+ */
+static inline unsigned int
+kgsl_memdesc_protflags(const struct kgsl_memdesc *memdesc)
+{
+ unsigned int protflags = 0;
+ enum kgsl_mmutype mmutype = kgsl_mmu_get_mmutype();
+
+ if (mmutype == KGSL_MMU_TYPE_GPU) {
+ protflags = GSL_PT_PAGE_RV;
+ if (!(memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY))
+ protflags |= GSL_PT_PAGE_WV;
+ } else if (mmutype == KGSL_MMU_TYPE_IOMMU) {
+ protflags = IOMMU_READ;
+ if (!(memdesc->flags & KGSL_MEMFLAGS_GPUREADONLY))
+ protflags |= IOMMU_WRITE;
+ }
+ return protflags;
+}
+
+/*
+ * kgsl_memdesc_use_cpu_map - use the same virtual mapping on CPU and GPU?
+ * @memdesc - the memdesc
+ */
+static inline int
+kgsl_memdesc_use_cpu_map(const struct kgsl_memdesc *memdesc)
+{
+ return (memdesc->flags & KGSL_MEMFLAGS_USE_CPU_MAP) != 0;
+}
+
+/*
+ * kgsl_memdesc_mmapsize - get the size of the mmap region
+ * @memdesc - the memdesc
+ *
+ * The entire memdesc must be mapped. Additionally if the
+ * CPU mapping is going to be mirrored, there must be room
+ * for the guard page to be mapped so that the address spaces
+ * match up.
+ */
+static inline unsigned int
+kgsl_memdesc_mmapsize(const struct kgsl_memdesc *memdesc)
+{
+ unsigned int size = memdesc->size;
+ if (kgsl_memdesc_use_cpu_map(memdesc) &&
+ kgsl_memdesc_has_guard_page(memdesc))
+ size += SZ_4K;
+ return size;
+}
+
+static inline int
+kgsl_allocate(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable, size_t size)
+{
+ int ret;
+ memdesc->priv |= (KGSL_MEMTYPE_KERNEL << KGSL_MEMTYPE_SHIFT);
+ if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
+ return kgsl_sharedmem_ebimem(memdesc, pagetable, size);
+
+ ret = kgsl_sharedmem_page_alloc(memdesc, pagetable, size);
+ if (ret)
+ return ret;
+ ret = kgsl_mmu_get_gpuaddr(pagetable, memdesc);
+ if (ret) {
+ kgsl_sharedmem_free(memdesc);
+ return ret;
+ }
+ ret = kgsl_mmu_map(pagetable, memdesc);
+ if (ret)
+ kgsl_sharedmem_free(memdesc);
+ return ret;
+}
+
+static inline int
+kgsl_allocate_user(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable,
+ size_t size, unsigned int flags)
+{
+ int ret;
+
+ if (size == 0)
+ return -EINVAL;
+
+ memdesc->flags = flags;
+
+ if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
+ ret = kgsl_sharedmem_ebimem_user(memdesc, pagetable, size);
+ else
+ ret = kgsl_sharedmem_page_alloc_user(memdesc, pagetable, size);
+
+ return ret;
+}
+
+static inline int
+kgsl_allocate_contiguous(struct kgsl_memdesc *memdesc, size_t size)
+{
+ int ret = kgsl_sharedmem_alloc_coherent(memdesc, size);
+ if (!ret && (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE))
+ memdesc->gpuaddr = memdesc->physaddr;
+
+ memdesc->flags |= (KGSL_MEMTYPE_KERNEL << KGSL_MEMTYPE_SHIFT);
+ return ret;
+}
+
+#endif /* __KGSL_SHAREDMEM_H */
diff --git a/drivers/gpu/msm2/kgsl_snapshot.c b/drivers/gpu/msm2/kgsl_snapshot.c
new file mode 100644
index 0000000..a81e19c
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_snapshot.c
@@ -0,0 +1,1096 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/time.h>
+#include <linux/sysfs.h>
+#include <linux/utsname.h>
+#include <linux/sched.h>
+#include <linux/idr.h>
+
+#include "kgsl.h"
+#include "kgsl_log.h"
+#include "kgsl_device.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_snapshot.h"
+#include "adreno_cp_parser.h"
+
+/* Placeholder for the list of memory objects frozen after a hang */
+
+struct kgsl_snapshot_object {
+ unsigned int gpuaddr;
+ phys_addr_t ptbase;
+ unsigned int size;
+ unsigned int offset;
+ int type;
+ struct kgsl_mem_entry *entry;
+ struct list_head node;
+};
+
+/* Placeholder for list of ib objects that contain all objects in that IB */
+
+struct kgsl_snapshot_cp_obj {
+ struct adreno_ib_object_list *ib_obj_list;
+ unsigned int ptbase;
+ struct list_head node;
+};
+
+struct snapshot_obj_itr {
+ void *buf; /* Buffer pointer to write to */
+ int pos; /* Current position in the sequence */
+ loff_t offset; /* file offset to start writing from */
+ size_t remain; /* Bytes remaining in buffer */
+ size_t write; /* Bytes written so far */
+};
+
+static void obj_itr_init(struct snapshot_obj_itr *itr, void *buf,
+ loff_t offset, size_t remain)
+{
+ itr->buf = buf;
+ itr->offset = offset;
+ itr->remain = remain;
+ itr->pos = 0;
+ itr->write = 0;
+}
+
+static int obj_itr_out(struct snapshot_obj_itr *itr, void *src, int size)
+{
+ if (itr->remain == 0)
+ return 0;
+
+ if ((itr->pos + size) <= itr->offset)
+ goto done;
+
+ /* Handle the case that offset is in the middle of the buffer */
+
+ if (itr->offset > itr->pos) {
+ src += (itr->offset - itr->pos);
+ size -= (itr->offset - itr->pos);
+
+ /* Advance pos to the offset start */
+ itr->pos = itr->offset;
+ }
+
+ if (size > itr->remain)
+ size = itr->remain;
+
+ memcpy(itr->buf, src, size);
+
+ itr->buf += size;
+ itr->write += size;
+ itr->remain -= size;
+
+done:
+ itr->pos += size;
+ return size;
+}
+
+/* idr_for_each function to count the number of contexts */
+
+static int snapshot_context_count(int id, void *ptr, void *data)
+{
+ int *count = data;
+ *count = *count + 1;
+
+ return 0;
+}
+
+/*
+ * To simplify the iterator loop use a global pointer instead of trying
+ * to pass around double star references to the snapshot data
+ */
+
+static void *_ctxtptr;
+
+static int snapshot_context_info(int id, void *ptr, void *data)
+{
+ struct kgsl_snapshot_linux_context *header = _ctxtptr;
+ struct kgsl_context *context = ptr;
+ struct kgsl_device *device;
+
+ if (context)
+ device = context->device;
+ else
+ device = (struct kgsl_device *)data;
+
+ header->id = id;
+
+ /* Future-proof for per-context timestamps - for now, just
+ * return the global timestamp for all contexts
+ */
+
+ header->timestamp_queued = kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_QUEUED);
+ header->timestamp_retired = kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED);
+
+ _ctxtptr += sizeof(struct kgsl_snapshot_linux_context);
+
+ return 0;
+}
+
+/* Snapshot the Linux specific information */
+static int snapshot_os(struct kgsl_device *device,
+ void *snapshot, int remain, void *priv)
+{
+ struct kgsl_snapshot_linux *header = snapshot;
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ struct task_struct *task;
+ pid_t pid;
+ int hang = (int) priv;
+ int ctxtcount = 0;
+ int size = sizeof(*header);
+ phys_addr_t temp_ptbase;
+
+ /* Figure out how many active contexts there are - these will
+ * be appended on the end of the structure */
+
+ read_lock(&device->context_lock);
+ idr_for_each(&device->context_idr, snapshot_context_count, &ctxtcount);
+ read_unlock(&device->context_lock);
+
+ /* Increment ctxcount for the global memstore */
+ ctxtcount++;
+
+ size += ctxtcount * sizeof(struct kgsl_snapshot_linux_context);
+
+ /* Make sure there is enough room for the data */
+ if (remain < size) {
+ SNAPSHOT_ERR_NOMEM(device, "OS");
+ return 0;
+ }
+
+ memset(header, 0, sizeof(*header));
+
+ header->osid = KGSL_SNAPSHOT_OS_LINUX;
+
+ header->state = hang ? SNAPSHOT_STATE_HUNG : SNAPSHOT_STATE_RUNNING;
+
+ /* Get the kernel build information */
+ strlcpy(header->release, utsname()->release, sizeof(header->release));
+ strlcpy(header->version, utsname()->version, sizeof(header->version));
+
+ /* Get the Unix time for the timestamp */
+ header->seconds = get_seconds();
+
+ /* Remember the power information */
+ header->power_flags = pwr->power_flags;
+ header->power_level = pwr->active_pwrlevel;
+ header->power_interval_timeout = pwr->interval_timeout;
+ header->grpclk = kgsl_get_clkrate(pwr->grp_clks[0]);
+ header->busclk = kgsl_get_clkrate(pwr->ebi1_clk);
+
+ /* Save the last active context */
+ kgsl_sharedmem_readl(&device->memstore, &header->current_context,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context));
+
+
+ /* Get the current PT base */
+ temp_ptbase = kgsl_mmu_get_current_ptbase(&device->mmu);
+ /* Truncate to 32 bits in case LPAE is used */
+ header->ptbase = (__u32)temp_ptbase;
+ /* And the PID for the task leader */
+ pid = header->pid = kgsl_mmu_get_ptname_from_ptbase(&device->mmu,
+ temp_ptbase);
+
+ task = find_task_by_vpid(pid);
+
+ if (task)
+ get_task_comm(header->comm, task);
+
+ header->ctxtcount = ctxtcount;
+
+ _ctxtptr = snapshot + sizeof(*header);
+
+ /* append information for the global context */
+ snapshot_context_info(KGSL_MEMSTORE_GLOBAL, NULL, device);
+
+ /* append information for each context */
+
+ read_lock(&device->context_lock);
+ idr_for_each(&device->context_idr, snapshot_context_info, NULL);
+ read_unlock(&device->context_lock);
+
+ /* Return the size of the data segment */
+ return size;
+}
+/*
+ * kgsl_snapshot_dump_indexed_regs - helper function to dump indexed registers
+ * @device - the device to dump registers from
+ * @snapshot - pointer to the start of the region of memory for the snapshot
+ * @remain - a pointer to the number of bytes remaining in the snapshot
+ * @priv - A pointer to the kgsl_snapshot_indexed_registers data
+ *
+ * Given a indexed register cmd/data pair and a count, dump each indexed
+ * register
+ */
+
+static int kgsl_snapshot_dump_indexed_regs(struct kgsl_device *device,
+ void *snapshot, int remain, void *priv)
+{
+ struct kgsl_snapshot_indexed_registers *iregs = priv;
+ struct kgsl_snapshot_indexed_regs *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int i;
+
+ if (remain < (iregs->count * 4) + sizeof(*header)) {
+ SNAPSHOT_ERR_NOMEM(device, "INDEXED REGS");
+ return 0;
+ }
+
+ header->index_reg = iregs->index;
+ header->data_reg = iregs->data;
+ header->count = iregs->count;
+ header->start = iregs->start;
+
+ for (i = 0; i < iregs->count; i++) {
+ kgsl_regwrite(device, iregs->index, iregs->start + i);
+ kgsl_regread(device, iregs->data, &data[i]);
+ }
+
+ return (iregs->count * 4) + sizeof(*header);
+}
+
+#define GPU_OBJ_HEADER_SZ \
+ (sizeof(struct kgsl_snapshot_section_header) + \
+ sizeof(struct kgsl_snapshot_gpu_object))
+
+static int kgsl_snapshot_dump_object(struct kgsl_device *device,
+ struct kgsl_snapshot_object *obj, struct snapshot_obj_itr *itr)
+{
+ struct kgsl_snapshot_section_header sect;
+ struct kgsl_snapshot_gpu_object header;
+ int ret;
+
+ if (kgsl_memdesc_map(&obj->entry->memdesc) == NULL) {
+ KGSL_DRV_ERR(device, "Unable to map GPU buffer %X\n",
+ obj->gpuaddr);
+ return 0;
+ }
+
+ sect.magic = SNAPSHOT_SECTION_MAGIC;
+ sect.id = KGSL_SNAPSHOT_SECTION_GPU_OBJECT;
+
+ /*
+ * Header size is in dwords, object size is in bytes -
+ * round up if the object size isn't dword aligned
+ */
+
+ sect.size = GPU_OBJ_HEADER_SZ + ALIGN(obj->size, 4);
+
+ ret = obj_itr_out(itr, §, sizeof(sect));
+ if (ret == 0)
+ goto done;
+
+ header.size = ALIGN(obj->size, 4) >> 2;
+ header.gpuaddr = obj->gpuaddr;
+ header.ptbase = (__u32)obj->ptbase;
+ header.type = obj->type;
+
+ ret = obj_itr_out(itr, &header, sizeof(header));
+ if (ret == 0)
+ goto done;
+
+ ret = obj_itr_out(itr, obj->entry->memdesc.hostptr + obj->offset,
+ obj->size);
+ if (ret == 0)
+ goto done;
+
+ /* Pad the end to a dword boundary if we need to */
+
+ if (obj->size % 4) {
+ unsigned int dummy = 0;
+ ret = obj_itr_out(itr, &dummy, obj->size % 4);
+ }
+done:
+ kgsl_memdesc_unmap(&obj->entry->memdesc);
+ return ret;
+}
+
+static void kgsl_snapshot_put_object(struct kgsl_device *device,
+ struct kgsl_snapshot_object *obj)
+{
+ list_del(&obj->node);
+
+ obj->entry->memdesc.priv &= ~KGSL_MEMDESC_FROZEN;
+ kgsl_mem_entry_put(obj->entry);
+
+ kfree(obj);
+}
+
+/*
+ * ksgl_snapshot_find_object() - Return the snapshot object pointer
+ * for given address range
+ * @device: the device that is being snapshotted
+ * @ptbase: the pagetable base of the object to search
+ * @gpuaddr: The gpu address of the object to search
+ * @size: the size of the object (may not always be the size of the region)
+ *
+ * Return the object pointer if found else NULL
+ */
+struct kgsl_snapshot_object *kgsl_snapshot_find_object(
+ struct kgsl_device *device,
+ phys_addr_t ptbase, unsigned int gpuaddr,
+ unsigned int size)
+{
+ struct kgsl_snapshot_object *obj = NULL;
+ list_for_each_entry(obj, &device->snapshot_obj_list, node) {
+ if (obj->ptbase != ptbase)
+ continue;
+ if ((gpuaddr >= obj->gpuaddr) &&
+ ((gpuaddr + size) <= (obj->gpuaddr + obj->size)))
+ return obj;
+ }
+ return NULL;
+}
+
+/* ksgl_snapshot_have_object - Return 1 if the object has been processed
+ *@device - the device that is being snapshotted
+ * @ptbase - the pagetable base of the object to freeze
+ * @gpuaddr - The gpu address of the object to freeze
+ * @size - the size of the object (may not always be the size of the region)
+ *
+ * Return 1 if the object is already in the list - this can save us from
+ * having to parse the sme thing over again.
+*/
+int kgsl_snapshot_have_object(struct kgsl_device *device, phys_addr_t ptbase,
+ unsigned int gpuaddr, unsigned int size)
+{
+ struct kgsl_snapshot_object *obj;
+
+ list_for_each_entry(obj, &device->snapshot_obj_list, node) {
+ if (obj->ptbase != ptbase)
+ continue;
+
+ if ((gpuaddr >= obj->gpuaddr) &&
+ ((gpuaddr + size) <= (obj->gpuaddr + obj->size)))
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_snapshot_have_object);
+
+/* kgsl_snapshot_get_object - Mark a GPU buffer to be frozen
+ * @device - the device that is being snapshotted
+ * @ptbase - the pagetable base of the object to freeze
+ * @gpuaddr - The gpu address of the object to freeze
+ * @size - the size of the object (may not always be the size of the region)
+ * @type - the type of object being saved (shader, vbo, etc)
+ *
+ * Mark and freeze a GPU buffer object. This will prevent it from being
+ * freed until it can be copied out as part of the snapshot dump. Returns the
+ * size of the object being frozen
+ */
+
+int kgsl_snapshot_get_object(struct kgsl_device *device, phys_addr_t ptbase,
+ unsigned int gpuaddr, unsigned int size, unsigned int type)
+{
+ struct kgsl_mem_entry *entry;
+ struct kgsl_snapshot_object *obj;
+ int offset;
+ int ret = -EINVAL;
+ unsigned int mem_type;
+
+ if (!gpuaddr)
+ return 0;
+
+ entry = kgsl_get_mem_entry(device, ptbase, gpuaddr, size);
+
+ if (entry == NULL) {
+ KGSL_DRV_ERR(device, "Unable to find GPU buffer %8.8X\n",
+ gpuaddr);
+ return -EINVAL;
+ }
+
+ /* We can't freeze external memory, because we don't own it */
+ if (entry->memtype != KGSL_MEM_ENTRY_KERNEL) {
+ KGSL_DRV_ERR(device,
+ "Only internal GPU buffers can be frozen\n");
+ goto err_put;
+ }
+ /*
+ * Do not save texture and render targets in snapshot,
+ * they can be just too big
+ */
+ mem_type = (entry->memdesc.flags & KGSL_MEMTYPE_MASK) >>
+ KGSL_MEMTYPE_SHIFT;
+ if (KGSL_MEMTYPE_TEXTURE == mem_type ||
+ KGSL_MEMTYPE_EGL_SURFACE == mem_type ||
+ KGSL_MEMTYPE_EGL_IMAGE == mem_type) {
+ ret = 0;
+ goto err_put;
+ }
+
+ /*
+ * size indicates the number of bytes in the region to save. This might
+ * not always be the entire size of the region because some buffers are
+ * sub-allocated from a larger region. However, if size 0 was passed
+ * thats a flag that the caller wants to capture the entire buffer
+ */
+
+ if (size == 0) {
+ size = entry->memdesc.size;
+ offset = 0;
+
+ /* Adjust the gpuaddr to the start of the object */
+ gpuaddr = entry->memdesc.gpuaddr;
+ } else {
+ offset = gpuaddr - entry->memdesc.gpuaddr;
+ }
+
+ if (size + offset > entry->memdesc.size) {
+ KGSL_DRV_ERR(device, "Invalid size for GPU buffer %8.8X\n",
+ gpuaddr);
+ goto err_put;
+ }
+
+ /* If the buffer is already on the list, skip it */
+ list_for_each_entry(obj, &device->snapshot_obj_list, node) {
+ /* combine the range with existing object if they overlap */
+ if (obj->ptbase == ptbase && obj->type == type &&
+ kgsl_addr_range_overlap(obj->gpuaddr, obj->size,
+ gpuaddr, size)) {
+ unsigned int end1 = obj->gpuaddr + obj->size;
+ unsigned int end2 = gpuaddr + size;
+ if (obj->gpuaddr > gpuaddr)
+ obj->gpuaddr = gpuaddr;
+ if (end1 > end2)
+ obj->size = end1 - obj->gpuaddr;
+ else
+ obj->size = end2 - obj->gpuaddr;
+ obj->offset = obj->gpuaddr - entry->memdesc.gpuaddr;
+ ret = 0;
+ goto err_put;
+ }
+ }
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+
+ if (obj == NULL) {
+ KGSL_DRV_ERR(device, "Unable to allocate memory\n");
+ goto err_put;
+ }
+
+ obj->type = type;
+ obj->entry = entry;
+ obj->gpuaddr = gpuaddr;
+ obj->ptbase = ptbase;
+ obj->size = size;
+ obj->offset = offset;
+
+ list_add(&obj->node, &device->snapshot_obj_list);
+
+ /*
+ * Return the size of the entire mem entry that was frozen - this gets
+ * used for tracking how much memory is frozen for a hang. Also, mark
+ * the memory entry as frozen. If the entry was already marked as
+ * frozen, then another buffer already got to it. In that case, return
+ * 0 so it doesn't get counted twice
+ */
+
+ ret = (entry->memdesc.priv & KGSL_MEMDESC_FROZEN) ? 0
+ : entry->memdesc.size;
+
+ entry->memdesc.priv |= KGSL_MEMDESC_FROZEN;
+
+ return ret;
+err_put:
+ kgsl_mem_entry_put(entry);
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_snapshot_get_object);
+
+/*
+ * kgsl_snapshot_dump_regs - helper function to dump device registers
+ * @device - the device to dump registers from
+ * @snapshot - pointer to the start of the region of memory for the snapshot
+ * @remain - a pointer to the number of bytes remaining in the snapshot
+ * @priv - A pointer to the kgsl_snapshot_registers data
+ *
+ * Given an array of register ranges pairs (start,end [inclusive]), dump the
+ * registers into a snapshot register section. The snapshot region stores a
+ * part of dwords for each register - the word address of the register, and
+ * the value.
+ */
+int kgsl_snapshot_dump_regs(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_registers_list *list = priv;
+
+ struct kgsl_snapshot_regs *header = snapshot;
+ struct kgsl_snapshot_registers *regs;
+ unsigned int *data = snapshot + sizeof(*header);
+ int count = 0, i, j, k;
+
+ /* Figure out how many registers we are going to dump */
+
+ for (i = 0; i < list->count; i++) {
+ regs = &(list->registers[i]);
+
+ for (j = 0; j < regs->count; j++) {
+ int start = regs->regs[j * 2];
+ int end = regs->regs[j * 2 + 1];
+
+ count += (end - start + 1);
+ }
+ }
+
+ if (remain < (count * 8) + sizeof(*header)) {
+ SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+ return 0;
+ }
+
+
+ for (i = 0; i < list->count; i++) {
+ regs = &(list->registers[i]);
+ for (j = 0; j < regs->count; j++) {
+ unsigned int start = regs->regs[j * 2];
+ unsigned int end = regs->regs[j * 2 + 1];
+
+ for (k = start; k <= end; k++) {
+ unsigned int val;
+
+ kgsl_regread(device, k, &val);
+ *data++ = k;
+ *data++ = val;
+ }
+ }
+ }
+
+ header->count = count;
+
+ /* Return the size of the section */
+ return (count * 8) + sizeof(*header);
+}
+EXPORT_SYMBOL(kgsl_snapshot_dump_regs);
+
+void *kgsl_snapshot_indexed_registers(struct kgsl_device *device,
+ void *snapshot, int *remain,
+ unsigned int index, unsigned int data, unsigned int start,
+ unsigned int count)
+{
+ struct kgsl_snapshot_indexed_registers iregs;
+ iregs.index = index;
+ iregs.data = data;
+ iregs.start = start;
+ iregs.count = count;
+
+ return kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_INDEXED_REGS, snapshot,
+ remain, kgsl_snapshot_dump_indexed_regs, &iregs);
+}
+EXPORT_SYMBOL(kgsl_snapshot_indexed_registers);
+
+/*
+ * kgsl_snapshot - construct a device snapshot
+ * @device - device to snapshot
+ * @hang - set to 1 if the snapshot was triggered following a hnag
+ * Given a device, construct a binary snapshot dump of the current device state
+ * and store it in the device snapshot memory.
+ */
+int kgsl_device_snapshot(struct kgsl_device *device, int hang)
+{
+ struct kgsl_snapshot_header *header = device->snapshot;
+ int remain = device->snapshot_maxsize - sizeof(*header);
+ void *snapshot;
+ struct timespec boot;
+ int ret = 0;
+
+ /*
+ * Bail if failed to get active count for GPU,
+ * try again
+ */
+ if (kgsl_active_count_get(device)) {
+ KGSL_DRV_ERR(device, "Failed to get GPU active count");
+ return -EINVAL;
+ }
+
+ /* increment the hang count (on hang) for good book keeping */
+ if (hang)
+ device->snapshot_faultcount++;
+
+ /*
+ * The first hang is always the one we are interested in. To
+ * avoid a subsequent hang blowing away the first, the snapshot
+ * is frozen until it is dumped via sysfs.
+ *
+ * Note that triggered snapshots are always taken regardless
+ * of the state and never frozen.
+ */
+
+ if (hang && device->snapshot_frozen == 1) {
+ ret = 0;
+ goto done;
+ }
+
+ if (device->snapshot == NULL) {
+ KGSL_DRV_ERR(device,
+ "snapshot: No snapshot memory available\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ if (remain < sizeof(*header)) {
+ KGSL_DRV_ERR(device,
+ "snapshot: Not enough memory for the header\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ header->magic = SNAPSHOT_MAGIC;
+
+ header->gpuid = kgsl_gpuid(device, &header->chipid);
+
+ /* Get a pointer to the first section (right after the header) */
+ snapshot = ((void *) device->snapshot) + sizeof(*header);
+
+ /* Build the Linux specific header */
+ snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_OS,
+ snapshot, &remain, snapshot_os, (void *) hang);
+
+ /* Get the device specific sections */
+ if (device->ftbl->snapshot)
+ snapshot = device->ftbl->snapshot(device, snapshot, &remain,
+ hang);
+
+ /*
+ * The timestamp is the seconds since boot so it is easier to match to
+ * the kernel log
+ */
+
+ getboottime(&boot);
+ device->snapshot_timestamp = get_seconds() - boot.tv_sec;
+ device->snapshot_size = (int) (snapshot - device->snapshot);
+
+ /* Freeze the snapshot on a hang until it gets read */
+ device->snapshot_frozen = (hang) ? 1 : 0;
+
+ /* log buffer info to aid in ramdump fault tolerance */
+ KGSL_DRV_ERR(device, "snapshot created at pa %lx size %d\n",
+ __pa(device->snapshot), device->snapshot_size);
+ if (hang)
+ sysfs_notify(&device->snapshot_kobj, NULL, "timestamp");
+
+done:
+ kgsl_active_count_put(device);
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_device_snapshot);
+
+/* An attribute for showing snapshot details */
+struct kgsl_snapshot_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kgsl_device *device, char *buf);
+ ssize_t (*store)(struct kgsl_device *device, const char *buf,
+ size_t count);
+};
+
+/*
+ * kgsl_snapshot_process_ib_obj_list() - Go through the list of IB's which need
+ * to be dumped for snapshot and move them to the global snapshot list so
+ * they will get dumped when the global list is dumped
+ * @device: device being snapshotted
+ */
+static void kgsl_snapshot_process_ib_obj_list(struct kgsl_device *device)
+{
+ struct kgsl_snapshot_cp_obj *obj, *obj_temp;
+ struct adreno_ib_object *ib_obj;
+ int i;
+
+ list_for_each_entry_safe(obj, obj_temp, &device->snapshot_cp_list,
+ node) {
+ for (i = 0; i < obj->ib_obj_list->num_objs; i++) {
+ ib_obj = &(obj->ib_obj_list->obj_list[i]);
+ kgsl_snapshot_get_object(device, obj->ptbase,
+ ib_obj->gpuaddr, ib_obj->size,
+ ib_obj->snapshot_obj_type);
+ }
+ list_del(&obj->node);
+ adreno_ib_destroy_obj_list(obj->ib_obj_list);
+ kfree(obj);
+ }
+}
+
+#define to_snapshot_attr(a) \
+container_of(a, struct kgsl_snapshot_attribute, attr)
+
+#define kobj_to_device(a) \
+container_of(a, struct kgsl_device, snapshot_kobj)
+
+/* Dump the sysfs binary data to the user */
+static ssize_t snapshot_show(struct file *filep, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
+{
+ struct kgsl_device *device = kobj_to_device(kobj);
+ struct kgsl_snapshot_object *obj, *tmp;
+ struct kgsl_snapshot_section_header head;
+ struct snapshot_obj_itr itr;
+ int ret;
+
+ if (device == NULL)
+ return 0;
+
+ /* Return nothing if we haven't taken a snapshot yet */
+ if (device->snapshot_timestamp == 0)
+ return 0;
+
+ /* Get the mutex to keep things from changing while we are dumping */
+ mutex_lock(&device->mutex);
+
+ obj_itr_init(&itr, buf, off, count);
+
+ ret = obj_itr_out(&itr, device->snapshot, device->snapshot_size);
+
+ if (ret == 0)
+ goto done;
+
+ kgsl_snapshot_process_ib_obj_list(device);
+
+ if (device->snapshot_cur_ib_objs) {
+ obj_itr_out(&itr, device->snapshot_cur_ib_objs,
+ device->snapshot_cur_ib_objs_size);
+ }
+
+ list_for_each_entry(obj, &device->snapshot_obj_list, node)
+ kgsl_snapshot_dump_object(device, obj, &itr);
+
+ {
+ head.magic = SNAPSHOT_SECTION_MAGIC;
+ head.id = KGSL_SNAPSHOT_SECTION_END;
+ head.size = sizeof(head);
+
+ obj_itr_out(&itr, &head, sizeof(head));
+ }
+
+ /*
+ * Make sure everything has been written out before destroying things.
+ * The best way to confirm this is to go all the way through without
+ * writing any bytes - so only release if we get this far and
+ * itr->write is 0
+ */
+
+ if (itr.write == 0) {
+ list_for_each_entry_safe(obj, tmp, &device->snapshot_obj_list,
+ node)
+ kgsl_snapshot_put_object(device, obj);
+
+ if (device->snapshot_cur_ib_objs) {
+ vfree(device->snapshot_cur_ib_objs);
+ device->snapshot_cur_ib_objs = NULL;
+ device->snapshot_cur_ib_objs_size = 0;
+ }
+
+ if (device->snapshot_frozen)
+ KGSL_DRV_ERR(device, "Snapshot objects released\n");
+
+ device->snapshot_frozen = 0;
+ }
+
+done:
+ mutex_unlock(&device->mutex);
+
+ return itr.write;
+}
+
+/* Show the total number of hangs since device boot */
+static ssize_t faultcount_show(struct kgsl_device *device, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", device->snapshot_faultcount);
+}
+
+/* Reset the total number of hangs since device boot */
+static ssize_t faultcount_store(struct kgsl_device *device, const char *buf,
+ size_t count)
+{
+ if (device && count > 0)
+ device->snapshot_faultcount = 0;
+
+ return count;
+}
+
+/* Show the timestamp of the last collected snapshot */
+static ssize_t timestamp_show(struct kgsl_device *device, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", device->snapshot_timestamp);
+}
+
+/* manually trigger a new snapshot to be collected */
+static ssize_t trigger_store(struct kgsl_device *device, const char *buf,
+ size_t count)
+{
+ if (device && count > 0) {
+ mutex_lock(&device->mutex);
+ if (!kgsl_active_count_get(device)) {
+ kgsl_device_snapshot(device, 0);
+ kgsl_active_count_put(device);
+ }
+ mutex_unlock(&device->mutex);
+ }
+
+ return count;
+}
+
+static struct bin_attribute snapshot_attr = {
+ .attr.name = "dump",
+ .attr.mode = 0444,
+ .size = 0,
+ .read = snapshot_show
+};
+
+#define SNAPSHOT_ATTR(_name, _mode, _show, _store) \
+struct kgsl_snapshot_attribute attr_##_name = { \
+ .attr = { .name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+}
+
+SNAPSHOT_ATTR(trigger, 0600, NULL, trigger_store);
+SNAPSHOT_ATTR(timestamp, 0444, timestamp_show, NULL);
+SNAPSHOT_ATTR(faultcount, 0644, faultcount_show, faultcount_store);
+
+static void snapshot_sysfs_release(struct kobject *kobj)
+{
+}
+
+static ssize_t snapshot_sysfs_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct kgsl_snapshot_attribute *pattr = to_snapshot_attr(attr);
+ struct kgsl_device *device = kobj_to_device(kobj);
+ ssize_t ret;
+
+ if (device && pattr->show)
+ ret = pattr->show(device, buf);
+ else
+ ret = -EIO;
+
+ return ret;
+}
+
+static ssize_t snapshot_sysfs_store(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ struct kgsl_snapshot_attribute *pattr = to_snapshot_attr(attr);
+ struct kgsl_device *device = kobj_to_device(kobj);
+ ssize_t ret;
+
+ if (device && pattr->store)
+ ret = pattr->store(device, buf, count);
+ else
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct sysfs_ops snapshot_sysfs_ops = {
+ .show = snapshot_sysfs_show,
+ .store = snapshot_sysfs_store,
+};
+
+static struct kobj_type ktype_snapshot = {
+ .sysfs_ops = &snapshot_sysfs_ops,
+ .default_attrs = NULL,
+ .release = snapshot_sysfs_release,
+};
+
+/* kgsl_device_snapshot_init - Add resources for the device GPU snapshot
+ * @device - The device to initalize
+ *
+ * Allocate memory for a GPU snapshot for the specified device,
+ * and create the sysfs files to manage it
+ */
+
+int kgsl_device_snapshot_init(struct kgsl_device *device)
+{
+ int ret;
+
+ if (device->snapshot == NULL)
+ device->snapshot = kzalloc(KGSL_SNAPSHOT_MEMSIZE, GFP_KERNEL);
+
+ if (device->snapshot == NULL)
+ return -ENOMEM;
+
+ device->snapshot_maxsize = KGSL_SNAPSHOT_MEMSIZE;
+ device->snapshot_timestamp = 0;
+ device->snapshot_faultcount = 0;
+
+ INIT_LIST_HEAD(&device->snapshot_obj_list);
+ INIT_LIST_HEAD(&device->snapshot_cp_list);
+ device->snapshot_cur_ib_objs = NULL;
+ device->snapshot_cur_ib_objs_size = 0;
+
+ ret = kobject_init_and_add(&device->snapshot_kobj, &ktype_snapshot,
+ &device->dev->kobj, "snapshot");
+ if (ret)
+ goto done;
+
+ ret = sysfs_create_bin_file(&device->snapshot_kobj, &snapshot_attr);
+ if (ret)
+ goto done;
+
+ ret = sysfs_create_file(&device->snapshot_kobj, &attr_trigger.attr);
+ if (ret)
+ goto done;
+
+ ret = sysfs_create_file(&device->snapshot_kobj, &attr_timestamp.attr);
+ if (ret)
+ goto done;
+
+ ret = sysfs_create_file(&device->snapshot_kobj, &attr_faultcount.attr);
+
+done:
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_device_snapshot_init);
+
+/* kgsl_device_snapshot_close - Take down snapshot memory for a device
+ * @device - Pointer to the kgsl_device
+ *
+ * Remove the sysfs files and free the memory allocated for the GPU
+ * snapshot
+ */
+
+void kgsl_device_snapshot_close(struct kgsl_device *device)
+{
+ sysfs_remove_bin_file(&device->snapshot_kobj, &snapshot_attr);
+ sysfs_remove_file(&device->snapshot_kobj, &attr_trigger.attr);
+ sysfs_remove_file(&device->snapshot_kobj, &attr_timestamp.attr);
+
+ kobject_put(&device->snapshot_kobj);
+
+ kfree(device->snapshot);
+
+ device->snapshot = NULL;
+ device->snapshot_maxsize = 0;
+ device->snapshot_timestamp = 0;
+ device->snapshot_faultcount = 0;
+}
+EXPORT_SYMBOL(kgsl_device_snapshot_close);
+
+/*
+ * kgsl_snapshot_add_ib_obj_list() - Add a IB object list to the snapshot
+ * object list
+ * @device: the device that is being snapshotted
+ * @ib_obj_list: The IB list that has objects required to execute an IB
+ * @num_objs: Number of IB objects
+ * @ptbase: The pagetable base in which the IB is mapped
+ *
+ * Adds a new IB to the list of IB objects maintained when getting snapshot
+ * Returns 0 on success else -ENOMEM on error
+ */
+int kgsl_snapshot_add_ib_obj_list(struct kgsl_device *device,
+ phys_addr_t ptbase,
+ struct adreno_ib_object_list *ib_obj_list)
+{
+ struct kgsl_snapshot_cp_obj *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+ obj->ib_obj_list = ib_obj_list;
+ obj->ptbase = ptbase;
+ list_add(&obj->node, &device->snapshot_cp_list);
+ return 0;
+}
+
+/*
+ * snapshot_object() - Dump an IB object into memory
+ * @device - Device being snapshotted
+ * @snapshot - Snapshot memory
+ * @remain - Amount of bytes that the snapshot memory can take
+ * @priv - Pointer to the object being snapshotted
+ *
+ * Returns the amount of bytes written
+ */
+static int snapshot_object(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ int ret = 0;
+ struct kgsl_snapshot_object *obj = priv;
+ struct kgsl_snapshot_gpu_object *header = snapshot;
+ void *dest;
+
+ if (remain < sizeof(*header) + obj->size) {
+ KGSL_DRV_ERR(device, "Not enough space in snapshot\n");
+ return ret;
+ }
+ header->size = obj->size >> 2;
+ header->gpuaddr = obj->gpuaddr;
+ header->ptbase = (__u32)obj->ptbase;
+ header->type = obj->type;
+ dest = snapshot + sizeof(*header);
+
+ if (!kgsl_memdesc_map(&obj->entry->memdesc)) {
+ KGSL_DRV_ERR(device, "Failed to map memdesc\n");
+ return 0;
+ }
+ memcpy(dest, obj->entry->memdesc.hostptr + obj->offset, obj->size);
+ ret += sizeof(*header) + obj->size;
+ kgsl_memdesc_unmap(&obj->entry->memdesc);
+ return ret;
+}
+
+/*
+ * kgsl_snapshot_save_frozen_objs - Save the objects frozen in snapshot into
+ * memory so that the data reported in these objects is correct when snapshot
+ * is taken
+ * @work - The work item that scheduled this work
+ */
+void kgsl_snapshot_save_frozen_objs(struct work_struct *work)
+{
+ struct kgsl_device *device = container_of(work, struct kgsl_device,
+ snapshot_obj_ws);
+ struct kgsl_snapshot_object *snapshot_obj, *snapshot_obj_temp;
+ unsigned int remain = 0;
+ void *snapshot_dest;
+
+ mutex_lock(&device->mutex);
+
+ kgsl_snapshot_process_ib_obj_list(device);
+
+ /* If already exists then wait for it to be released */
+ if (device->snapshot_cur_ib_objs)
+ goto done;
+
+ list_for_each_entry_safe(snapshot_obj, snapshot_obj_temp,
+ &device->snapshot_obj_list, node) {
+ snapshot_obj->size = ALIGN(snapshot_obj->size, 4);
+ remain += (snapshot_obj->size +
+ sizeof(struct kgsl_snapshot_gpu_object) +
+ sizeof(struct kgsl_snapshot_section_header));
+ }
+ if (!remain)
+ goto done;
+
+ device->snapshot_cur_ib_objs = vmalloc(remain);
+ if (!device->snapshot_cur_ib_objs)
+ goto done;
+
+ KGSL_DRV_ERR(device,
+ "Allocated memory for snapshot objects at address %p, size %x\n",
+ device->snapshot_cur_ib_objs, remain);
+ snapshot_dest = device->snapshot_cur_ib_objs;
+ device->snapshot_cur_ib_objs_size = remain;
+
+ list_for_each_entry_safe(snapshot_obj, snapshot_obj_temp,
+ &device->snapshot_obj_list, node) {
+ snapshot_dest = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_GPU_OBJECT,
+ snapshot_dest, &remain, snapshot_object,
+ snapshot_obj);
+ kgsl_snapshot_put_object(device, snapshot_obj);
+ }
+done:
+ mutex_unlock(&device->mutex);
+}
diff --git a/drivers/gpu/msm2/kgsl_snapshot.h b/drivers/gpu/msm2/kgsl_snapshot.h
new file mode 100644
index 0000000..00a83ac
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_snapshot.h
@@ -0,0 +1,336 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _KGSL_SNAPSHOT_H_
+#define _KGSL_SNAPSHOT_H_
+
+#include <linux/types.h>
+
+/* Snapshot header */
+
+/* High word is static, low word is snapshot version ID */
+#define SNAPSHOT_MAGIC 0x504D0002
+
+/* GPU ID scheme:
+ * [16:31] - core identifer (0x0002 for 2D or 0x0003 for 3D)
+ * [00:16] - GPU specific identifier
+ */
+
+struct kgsl_snapshot_header {
+ __u32 magic; /* Magic identifier */
+ __u32 gpuid; /* GPU ID - see above */
+ /* Added in snapshot version 2 */
+ __u32 chipid; /* Chip ID from the GPU */
+} __packed;
+
+/* Section header */
+#define SNAPSHOT_SECTION_MAGIC 0xABCD
+
+struct kgsl_snapshot_section_header {
+ __u16 magic; /* Magic identifier */
+ __u16 id; /* Type of section */
+ __u32 size; /* Size of the section including this header */
+} __packed;
+
+/* Section identifiers */
+#define KGSL_SNAPSHOT_SECTION_OS 0x0101
+#define KGSL_SNAPSHOT_SECTION_REGS 0x0201
+#define KGSL_SNAPSHOT_SECTION_RB 0x0301
+#define KGSL_SNAPSHOT_SECTION_IB 0x0401
+#define KGSL_SNAPSHOT_SECTION_INDEXED_REGS 0x0501
+#define KGSL_SNAPSHOT_SECTION_ISTORE 0x0801
+#define KGSL_SNAPSHOT_SECTION_DEBUG 0x0901
+#define KGSL_SNAPSHOT_SECTION_DEBUGBUS 0x0A01
+#define KGSL_SNAPSHOT_SECTION_GPU_OBJECT 0x0B01
+#define KGSL_SNAPSHOT_SECTION_MEMLIST 0x0E01
+
+#define KGSL_SNAPSHOT_SECTION_END 0xFFFF
+
+/* OS sub-section header */
+#define KGSL_SNAPSHOT_OS_LINUX 0x0001
+
+/* Linux OS specific information */
+
+#define SNAPSHOT_STATE_HUNG 0
+#define SNAPSHOT_STATE_RUNNING 1
+
+struct kgsl_snapshot_linux {
+ int osid; /* subsection OS identifier */
+ int state; /* 1 if the thread is running, 0 for hung */
+ __u32 seconds; /* Unix timestamp for the snapshot */
+ __u32 power_flags; /* Current power flags */
+ __u32 power_level; /* Current power level */
+ __u32 power_interval_timeout; /* Power interval timeout */
+ __u32 grpclk; /* Current GP clock value */
+ __u32 busclk; /* Current busclk value */
+ __u32 ptbase; /* Current ptbase */
+ __u32 pid; /* PID of the process that owns the PT */
+ __u32 current_context; /* ID of the current context */
+ __u32 ctxtcount; /* Number of contexts appended to section */
+ unsigned char release[32]; /* kernel release */
+ unsigned char version[32]; /* kernel version */
+ unsigned char comm[16]; /* Name of the process that owns the PT */
+} __packed;
+
+/*
+ * This structure contains a record of an active context.
+ * These are appended one after another in the OS section below
+ * the header above
+ */
+
+struct kgsl_snapshot_linux_context {
+ __u32 id; /* The context ID */
+ __u32 timestamp_queued; /* The last queued timestamp */
+ __u32 timestamp_retired; /* The last timestamp retired by HW */
+};
+
+/* Ringbuffer sub-section header */
+struct kgsl_snapshot_rb {
+ int start; /* dword at the start of the dump */
+ int end; /* dword at the end of the dump */
+ int rbsize; /* Size (in dwords) of the ringbuffer */
+ int wptr; /* Current index of the CPU write pointer */
+ int rptr; /* Current index of the GPU read pointer */
+ int count; /* Number of dwords in the dump */
+} __packed;
+
+/* Replay or Memory list section, both sections have same header */
+struct kgsl_snapshot_replay_mem_list {
+ /*
+ * Number of IBs to replay for replay section or
+ * number of memory list entries for mem list section
+ */
+ int num_entries;
+ /* Pagetable base to which the replay IBs or memory entries belong */
+ __u32 ptbase;
+} __packed;
+
+/* Indirect buffer sub-section header */
+struct kgsl_snapshot_ib {
+ __u32 gpuaddr; /* GPU address of the the IB */
+ __u32 ptbase; /* Base for the pagetable the GPU address is valid in */
+ int size; /* Size of the IB */
+} __packed;
+
+/* Register sub-section header */
+struct kgsl_snapshot_regs {
+ __u32 count; /* Number of register pairs in the section */
+} __packed;
+
+/* Indexed register sub-section header */
+struct kgsl_snapshot_indexed_regs {
+ __u32 index_reg; /* Offset of the index register for this section */
+ __u32 data_reg; /* Offset of the data register for this section */
+ int start; /* Starting index */
+ int count; /* Number of dwords in the data */
+} __packed;
+
+/* Istore sub-section header */
+struct kgsl_snapshot_istore {
+ int count; /* Number of instructions in the istore */
+} __packed;
+
+/* Debug data sub-section header */
+
+/* A2XX debug sections */
+#define SNAPSHOT_DEBUG_SX 1
+#define SNAPSHOT_DEBUG_CP 2
+#define SNAPSHOT_DEBUG_SQ 3
+#define SNAPSHOT_DEBUG_SQTHREAD 4
+#define SNAPSHOT_DEBUG_MIU 5
+
+/* A3XX debug sections */
+#define SNAPSHOT_DEBUG_VPC_MEMORY 6
+#define SNAPSHOT_DEBUG_CP_MEQ 7
+#define SNAPSHOT_DEBUG_CP_PM4_RAM 8
+#define SNAPSHOT_DEBUG_CP_PFP_RAM 9
+#define SNAPSHOT_DEBUG_CP_ROQ 10
+#define SNAPSHOT_DEBUG_SHADER_MEMORY 11
+#define SNAPSHOT_DEBUG_CP_MERCIU 12
+
+struct kgsl_snapshot_debug {
+ int type; /* Type identifier for the attached tata */
+ int size; /* Size of the section in dwords */
+} __packed;
+
+struct kgsl_snapshot_debugbus {
+ int id; /* Debug bus ID */
+ int count; /* Number of dwords in the dump */
+} __packed;
+
+#define SNAPSHOT_GPU_OBJECT_SHADER 1
+#define SNAPSHOT_GPU_OBJECT_IB 2
+#define SNAPSHOT_GPU_OBJECT_GENERIC 3
+
+struct kgsl_snapshot_gpu_object {
+ int type; /* Type of GPU object */
+ __u32 gpuaddr; /* GPU address of the the object */
+ __u32 ptbase; /* Base for the pagetable the GPU address is valid in */
+ int size; /* Size of the object (in dwords) */
+};
+
+#ifdef __KERNEL__
+
+/* Allocate 512K for each device snapshot */
+#define KGSL_SNAPSHOT_MEMSIZE (512 * 1024)
+
+struct kgsl_device;
+/*
+ * A helper macro to print out "not enough memory functions" - this
+ * makes it easy to standardize the messages as well as cut down on
+ * the number of strings in the binary
+ */
+
+#define SNAPSHOT_ERR_NOMEM(_d, _s) \
+ KGSL_DRV_ERR((_d), \
+ "snapshot: not enough snapshot memory for section %s\n", (_s))
+
+/*
+ * kgsl_snapshot_add_section - Add a new section to the GPU snapshot
+ * @device - the KGSL device being snapshotted
+ * @id - the section id
+ * @snapshot - pointer to the memory for the snapshot
+ * @remain - pointer to the number of bytes left in the snapshot region
+ * @func - Function pointer to fill the section
+ * @priv - Priv pointer to pass to the function
+ *
+ * Set up a KGSL snapshot header by filling the memory with the callback
+ * function and adding the standard section header
+ */
+
+static inline void *kgsl_snapshot_add_section(struct kgsl_device *device,
+ u16 id, void *snapshot, int *remain,
+ int (*func)(struct kgsl_device *, void *, int, void *), void *priv)
+{
+ struct kgsl_snapshot_section_header *header = snapshot;
+ void *data = snapshot + sizeof(*header);
+ int ret = 0;
+
+ /*
+ * Sanity check to make sure there is enough for the header. The
+ * callback will check to make sure there is enough for the rest
+ * of the data. If there isn't enough room then don't advance the
+ * pointer.
+ */
+
+ if (*remain < sizeof(*header))
+ return snapshot;
+
+ /* It is legal to have no function (i.e. - make an empty section) */
+
+ if (func) {
+ ret = func(device, data, *remain, priv);
+
+ /*
+ * If there wasn't enough room for the data then don't bother
+ * setting up the header.
+ */
+
+ if (ret == 0)
+ return snapshot;
+ }
+
+ header->magic = SNAPSHOT_SECTION_MAGIC;
+ header->id = id;
+ header->size = ret + sizeof(*header);
+
+ /* Decrement the room left in the snapshot region */
+ *remain -= header->size;
+ /* Advance the pointer to the end of the next function */
+ return snapshot + header->size;
+}
+
+/* A common helper function to dump a range of registers. This will be used in
+ * the GPU specific devices like this:
+ *
+ * struct kgsl_snapshot_registers_list list;
+ * struct kgsl_snapshot_registers priv[2];
+ *
+ * priv[0].regs = registers_array;;
+ * priv[o].count = num_registers;
+ * priv[1].regs = registers_array_new;;
+ * priv[1].count = num_registers_new;
+ *
+ * list.registers = priv;
+ * list.count = 2;
+ *
+ * kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS, snapshot,
+ * remain, kgsl_snapshot_dump_regs, &list).
+ *
+ * Pass in a struct pointing to a list of register definitions as described
+ * below:
+ *
+ * Pass in an array of register range pairs in the form of:
+ * start reg, stop reg
+ * All the registers between start and stop inclusive will be dumped
+ */
+
+struct kgsl_snapshot_registers {
+ unsigned int *regs; /* Pointer to the array of register ranges */
+ int count; /* Number of entries in the array */
+};
+
+struct kgsl_snapshot_registers_list {
+ /* Pointer to an array of register lists */
+ struct kgsl_snapshot_registers *registers;
+ /* Number of registers lists in the array */
+ int count;
+};
+
+int kgsl_snapshot_dump_regs(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv);
+
+/*
+ * A common helper function to dump a set of indexed registers. Use it
+ * like this:
+ *
+ * struct kgsl_snapshot_indexed_registers priv;
+ * priv.index = REG_INDEX;
+ * priv.data = REG_DATA;
+ * priv.count = num_registers
+ *
+ * kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_INDEXED_REGS,
+ * snapshot, remain, kgsl_snapshot_dump_indexed_regs, &priv).
+ *
+ * The callback function will write an index from 0 to priv.count to
+ * the index register and read the data from the data register.
+ */
+
+struct kgsl_snapshot_indexed_registers {
+ unsigned int index; /* Offset of the index register */
+ unsigned int data; /* Offset of the data register */
+ unsigned int start; /* Index to start with */
+ unsigned int count; /* Number of values to read from the pair */
+};
+
+/* Helper function to snapshot a section of indexed registers */
+
+void *kgsl_snapshot_indexed_registers(struct kgsl_device *device,
+ void *snapshot, int *remain, unsigned int index,
+ unsigned int data, unsigned int start, unsigned int count);
+
+/* Freeze a GPU buffer so it can be dumped in the snapshot */
+int kgsl_snapshot_get_object(struct kgsl_device *device, phys_addr_t ptbase,
+ unsigned int gpuaddr, unsigned int size, unsigned int type);
+
+int kgsl_snapshot_have_object(struct kgsl_device *device, phys_addr_t ptbase,
+ unsigned int gpuaddr, unsigned int size);
+
+struct adreno_ib_object_list;
+
+int kgsl_snapshot_add_ib_obj_list(struct kgsl_device *device,
+ phys_addr_t ptbase,
+ struct adreno_ib_object_list *ib_obj_list);
+
+#endif
+#endif
diff --git a/drivers/gpu/msm2/kgsl_sync.c b/drivers/gpu/msm2/kgsl_sync.c
new file mode 100644
index 0000000..0e7606e
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_sync.c
@@ -0,0 +1,346 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/file.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <asm/current.h>
+
+#include "kgsl_sync.h"
+
+struct sync_pt *kgsl_sync_pt_create(struct sync_timeline *timeline,
+ unsigned int timestamp)
+{
+ struct sync_pt *pt;
+ pt = sync_pt_create(timeline, (int) sizeof(struct kgsl_sync_pt));
+ if (pt) {
+ struct kgsl_sync_pt *kpt = (struct kgsl_sync_pt *) pt;
+ kpt->timestamp = timestamp;
+ }
+ return pt;
+}
+
+/*
+ * This should only be called on sync_pts which have been created but
+ * not added to a fence.
+ */
+void kgsl_sync_pt_destroy(struct sync_pt *pt)
+{
+ sync_pt_free(pt);
+}
+
+static struct sync_pt *kgsl_sync_pt_dup(struct sync_pt *pt)
+{
+ struct kgsl_sync_pt *kpt = (struct kgsl_sync_pt *) pt;
+ return kgsl_sync_pt_create(pt->parent, kpt->timestamp);
+}
+
+static int kgsl_sync_pt_has_signaled(struct sync_pt *pt)
+{
+ struct kgsl_sync_pt *kpt = (struct kgsl_sync_pt *) pt;
+ struct kgsl_sync_timeline *ktimeline =
+ (struct kgsl_sync_timeline *) pt->parent;
+ unsigned int ts = kpt->timestamp;
+ unsigned int last_ts = ktimeline->last_timestamp;
+ if (timestamp_cmp(last_ts, ts) >= 0) {
+ /* signaled */
+ return 1;
+ }
+ return 0;
+}
+
+static int kgsl_sync_pt_compare(struct sync_pt *a, struct sync_pt *b)
+{
+ struct kgsl_sync_pt *kpt_a = (struct kgsl_sync_pt *) a;
+ struct kgsl_sync_pt *kpt_b = (struct kgsl_sync_pt *) b;
+ unsigned int ts_a = kpt_a->timestamp;
+ unsigned int ts_b = kpt_b->timestamp;
+ return timestamp_cmp(ts_a, ts_b);
+}
+
+struct kgsl_fence_event_priv {
+ struct kgsl_context *context;
+ unsigned int timestamp;
+};
+
+/**
+ * kgsl_fence_event_cb - Event callback for a fence timestamp event
+ * @device - The KGSL device that expired the timestamp
+ * @priv - private data for the event
+ * @context_id - the context id that goes with the timestamp
+ * @timestamp - the timestamp that triggered the event
+ *
+ * Signal a fence following the expiration of a timestamp
+ */
+
+static inline void kgsl_fence_event_cb(struct kgsl_device *device,
+ void *priv, u32 context_id, u32 timestamp, u32 type)
+{
+ struct kgsl_fence_event_priv *ev = priv;
+
+ /* Signal time timeline for every event type */
+ kgsl_sync_timeline_signal(ev->context->timeline, timestamp);
+ kgsl_context_put(ev->context);
+ kfree(ev);
+}
+
+/**
+ * kgsl_add_fence_event - Create a new fence event
+ * @device - KGSL device to create the event on
+ * @timestamp - Timestamp to trigger the event
+ * @data - Return fence fd stored in struct kgsl_timestamp_event_fence
+ * @len - length of the fence event
+ * @owner - driver instance that owns this event
+ * @returns 0 on success or error code on error
+ *
+ * Create a fence and register an event to signal the fence when
+ * the timestamp expires
+ */
+
+int kgsl_add_fence_event(struct kgsl_device *device,
+ u32 context_id, u32 timestamp, void __user *data, int len,
+ struct kgsl_device_private *owner)
+{
+ struct kgsl_fence_event_priv *event;
+ struct kgsl_timestamp_event_fence priv;
+ struct kgsl_context *context;
+ struct sync_pt *pt;
+ struct sync_fence *fence = NULL;
+ int ret = -EINVAL;
+
+ if (len != sizeof(priv))
+ return -EINVAL;
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (event == NULL)
+ return -ENOMEM;
+
+ context = kgsl_context_get_owner(owner, context_id);
+
+ if (context == NULL) {
+ kfree(event);
+ return -EINVAL;
+ }
+
+ event->context = context;
+ event->timestamp = timestamp;
+
+ pt = kgsl_sync_pt_create(context->timeline, timestamp);
+ if (pt == NULL) {
+ KGSL_DRV_ERR(device, "kgsl_sync_pt_create failed\n");
+ ret = -ENOMEM;
+ goto fail_pt;
+ }
+
+ fence = sync_fence_create("kgsl-fence", pt);
+ if (fence == NULL) {
+ /* only destroy pt when not added to fence */
+ kgsl_sync_pt_destroy(pt);
+ KGSL_DRV_ERR(device, "sync_fence_create failed\n");
+ ret = -ENOMEM;
+ goto fail_fence;
+ }
+
+ priv.fence_fd = get_unused_fd_flags(0);
+ if (priv.fence_fd < 0) {
+ KGSL_DRV_ERR(device, "invalid fence fd\n");
+ ret = -EINVAL;
+ goto fail_fd;
+ }
+ sync_fence_install(fence, priv.fence_fd);
+
+ if (copy_to_user(data, &priv, sizeof(priv))) {
+ ret = -EFAULT;
+ goto fail_copy_fd;
+ }
+
+ /*
+ * Hold the context ref-count for the event - it will get released in
+ * the callback
+ */
+ ret = kgsl_add_event(device, context_id, timestamp,
+ kgsl_fence_event_cb, event, owner);
+ if (ret)
+ goto fail_event;
+
+ return 0;
+
+fail_event:
+fail_copy_fd:
+ /* clean up sync_fence_install */
+ put_unused_fd(priv.fence_fd);
+fail_fd:
+ /* clean up sync_fence_create */
+ sync_fence_put(fence);
+fail_fence:
+fail_pt:
+ kgsl_context_put(context);
+ kfree(event);
+ return ret;
+}
+
+static unsigned int kgsl_sync_get_timestamp(
+ struct kgsl_sync_timeline *ktimeline, enum kgsl_timestamp_type type)
+{
+ struct kgsl_context *context = idr_find(&ktimeline->device->context_idr,
+ ktimeline->context_id);
+ if (context == NULL)
+ return 0;
+
+ return kgsl_readtimestamp(ktimeline->device, context, type);
+}
+
+static void kgsl_sync_timeline_value_str(struct sync_timeline *sync_timeline,
+ char *str, int size)
+{
+ struct kgsl_sync_timeline *ktimeline =
+ (struct kgsl_sync_timeline *) sync_timeline;
+ unsigned int timestamp_retired = kgsl_sync_get_timestamp(ktimeline,
+ KGSL_TIMESTAMP_RETIRED);
+ snprintf(str, size, "%u retired:%u", ktimeline->last_timestamp,
+ timestamp_retired);
+}
+
+static void kgsl_sync_pt_value_str(struct sync_pt *sync_pt,
+ char *str, int size)
+{
+ struct kgsl_sync_pt *kpt = (struct kgsl_sync_pt *) sync_pt;
+ snprintf(str, size, "%u", kpt->timestamp);
+}
+
+static void kgsl_sync_timeline_release_obj(struct sync_timeline *sync_timeline)
+{
+ /*
+ * Make sure to free the timeline only after destroy flag is set.
+ * This is to avoid further accessing to the timeline from KGSL and
+ * also to catch any unbalanced kref of timeline.
+ */
+ BUG_ON(sync_timeline && (sync_timeline->destroyed != true));
+}
+static const struct sync_timeline_ops kgsl_sync_timeline_ops = {
+ .driver_name = "kgsl-timeline",
+ .dup = kgsl_sync_pt_dup,
+ .has_signaled = kgsl_sync_pt_has_signaled,
+ .compare = kgsl_sync_pt_compare,
+ .timeline_value_str = kgsl_sync_timeline_value_str,
+ .pt_value_str = kgsl_sync_pt_value_str,
+ .release_obj = kgsl_sync_timeline_release_obj,
+};
+
+int kgsl_sync_timeline_create(struct kgsl_context *context)
+{
+ struct kgsl_sync_timeline *ktimeline;
+
+ /* Generate a name which includes the thread name, thread id, process
+ * name, process id, and context id. This makes it possible to
+ * identify the context of a timeline in the sync dump. */
+ char ktimeline_name[sizeof(context->timeline->name)] = {};
+ snprintf(ktimeline_name, sizeof(ktimeline_name),
+ "%s_%.15s(%d)-%.15s(%d)-%d",
+ context->device->name,
+ current->group_leader->comm, current->group_leader->pid,
+ current->comm, current->pid, context->id);
+
+ context->timeline = sync_timeline_create(&kgsl_sync_timeline_ops,
+ (int) sizeof(struct kgsl_sync_timeline), ktimeline_name);
+ if (context->timeline == NULL)
+ return -EINVAL;
+
+ ktimeline = (struct kgsl_sync_timeline *) context->timeline;
+ ktimeline->last_timestamp = 0;
+ ktimeline->device = context->dev_priv->device;
+ ktimeline->context_id = context->id;
+
+ return 0;
+}
+
+void kgsl_sync_timeline_signal(struct sync_timeline *timeline,
+ unsigned int timestamp)
+{
+ struct kgsl_sync_timeline *ktimeline =
+ (struct kgsl_sync_timeline *) timeline;
+
+ if (timestamp_cmp(timestamp, ktimeline->last_timestamp) > 0)
+ ktimeline->last_timestamp = timestamp;
+ sync_timeline_signal(timeline);
+}
+
+void kgsl_sync_timeline_destroy(struct kgsl_context *context)
+{
+ sync_timeline_destroy(context->timeline);
+}
+
+static void kgsl_sync_callback(struct sync_fence *fence,
+ struct sync_fence_waiter *waiter)
+{
+ struct kgsl_sync_fence_waiter *kwaiter =
+ (struct kgsl_sync_fence_waiter *) waiter;
+ kwaiter->func(kwaiter->priv);
+ sync_fence_put(kwaiter->fence);
+ kfree(kwaiter);
+}
+
+struct kgsl_sync_fence_waiter *kgsl_sync_fence_async_wait(int fd,
+ void (*func)(void *priv), void *priv)
+{
+ struct kgsl_sync_fence_waiter *kwaiter;
+ struct sync_fence *fence;
+ int status;
+
+ fence = sync_fence_fdget(fd);
+ if (fence == NULL)
+ return ERR_PTR(-EINVAL);
+
+ /* create the waiter */
+ kwaiter = kzalloc(sizeof(*kwaiter), GFP_KERNEL);
+ if (kwaiter == NULL) {
+ sync_fence_put(fence);
+ return ERR_PTR(-ENOMEM);
+ }
+ kwaiter->fence = fence;
+ kwaiter->priv = priv;
+ kwaiter->func = func;
+ sync_fence_waiter_init((struct sync_fence_waiter *) kwaiter,
+ kgsl_sync_callback);
+
+ /* if status then error or signaled */
+ status = sync_fence_wait_async(fence,
+ (struct sync_fence_waiter *) kwaiter);
+ if (status) {
+ kfree(kwaiter);
+ sync_fence_put(fence);
+ if (status < 0)
+ kwaiter = ERR_PTR(status);
+ else
+ kwaiter = NULL;
+ }
+
+ return kwaiter;
+}
+
+int kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_waiter *kwaiter)
+{
+ if (kwaiter == NULL)
+ return 0;
+
+ if(sync_fence_cancel_async(kwaiter->fence,
+ (struct sync_fence_waiter *) kwaiter) == 0) {
+ sync_fence_put(kwaiter->fence);
+ kfree(kwaiter);
+ return 1;
+ }
+ return 0;
+}
diff --git a/drivers/gpu/msm2/kgsl_sync.h b/drivers/gpu/msm2/kgsl_sync.h
new file mode 100644
index 0000000..275eaf0
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_sync.h
@@ -0,0 +1,101 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_SYNC_H
+#define __KGSL_SYNC_H
+
+#include <linux/sync.h>
+#include "kgsl_device.h"
+
+struct kgsl_sync_timeline {
+ struct sync_timeline timeline;
+ unsigned int last_timestamp;
+ struct kgsl_device *device;
+ u32 context_id;
+};
+
+struct kgsl_sync_pt {
+ struct sync_pt pt;
+ unsigned int timestamp;
+};
+
+struct kgsl_sync_fence_waiter {
+ struct sync_fence_waiter waiter;
+ struct sync_fence *fence;
+ void (*func)(void *priv);
+ void *priv;
+};
+
+#if defined(CONFIG_SYNC)
+struct sync_pt *kgsl_sync_pt_create(struct sync_timeline *timeline,
+ unsigned int timestamp);
+void kgsl_sync_pt_destroy(struct sync_pt *pt);
+int kgsl_add_fence_event(struct kgsl_device *device,
+ u32 context_id, u32 timestamp, void __user *data, int len,
+ struct kgsl_device_private *owner);
+int kgsl_sync_timeline_create(struct kgsl_context *context);
+void kgsl_sync_timeline_signal(struct sync_timeline *timeline,
+ unsigned int timestamp);
+void kgsl_sync_timeline_destroy(struct kgsl_context *context);
+struct kgsl_sync_fence_waiter *kgsl_sync_fence_async_wait(int fd,
+ void (*func)(void *priv), void *priv);
+int kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_waiter *waiter);
+#else
+static inline struct sync_pt
+*kgsl_sync_pt_create(struct sync_timeline *timeline, unsigned int timestamp)
+{
+ return NULL;
+}
+
+static inline void kgsl_sync_pt_destroy(struct sync_pt *pt)
+{
+}
+
+static inline int kgsl_add_fence_event(struct kgsl_device *device,
+ u32 context_id, u32 timestamp, void __user *data, int len,
+ struct kgsl_device_private *owner)
+{
+ return -EINVAL;
+}
+
+static int kgsl_sync_timeline_create(struct kgsl_context *context)
+{
+ context->timeline = NULL;
+ return 0;
+}
+
+static inline void
+kgsl_sync_timeline_signal(struct sync_timeline *timeline,
+ unsigned int timestamp)
+{
+}
+
+static inline void kgsl_sync_timeline_destroy(struct kgsl_context *context)
+{
+}
+
+static inline struct
+kgsl_sync_fence_waiter *kgsl_sync_fence_async_wait(int fd,
+ void (*func)(void *priv), void *priv)
+{
+ return NULL;
+}
+
+static inline int
+kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_waiter *waiter)
+{
+ return 1;
+}
+
+#endif
+
+#endif /* __KGSL_SYNC_H */
diff --git a/drivers/gpu/msm2/kgsl_trace.c b/drivers/gpu/msm2/kgsl_trace.c
new file mode 100644
index 0000000..e432729
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_trace.c
@@ -0,0 +1,19 @@
+/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "kgsl.h"
+#include "kgsl_device.h"
+
+/* Instantiate tracepoints */
+#define CREATE_TRACE_POINTS
+#include "kgsl_trace.h"
diff --git a/drivers/gpu/msm2/kgsl_trace.h b/drivers/gpu/msm2/kgsl_trace.h
new file mode 100644
index 0000000..5f39b8b
--- /dev/null
+++ b/drivers/gpu/msm2/kgsl_trace.h
@@ -0,0 +1,802 @@
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#if !defined(_KGSL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _KGSL_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kgsl
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE kgsl_trace
+
+#include <linux/tracepoint.h>
+#include "kgsl_device.h"
+
+#include "adreno_drawctxt.h"
+
+struct kgsl_device;
+struct kgsl_ringbuffer_issueibcmds;
+struct kgsl_device_waittimestamp;
+
+/*
+ * Tracepoint for kgsl issue ib commands
+ */
+TRACE_EVENT(kgsl_issueibcmds,
+
+ TP_PROTO(struct kgsl_device *device,
+ int drawctxt_id,
+ struct kgsl_cmdbatch *cmdbatch,
+ int timestamp,
+ int flags,
+ int result,
+ unsigned int type),
+
+ TP_ARGS(device, drawctxt_id, cmdbatch, timestamp, flags,
+ result, type),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, drawctxt_id)
+ __field(unsigned int, ibdesc_addr)
+ __field(unsigned int, numibs)
+ __field(unsigned int, timestamp)
+ __field(unsigned int, flags)
+ __field(int, result)
+ __field(unsigned int, drawctxt_type)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->drawctxt_id = drawctxt_id;
+ __entry->ibdesc_addr = cmdbatch->ibdesc[0].gpuaddr;
+ __entry->numibs = cmdbatch->ibcount;
+ __entry->timestamp = timestamp;
+ __entry->flags = flags;
+ __entry->result = result;
+ __entry->drawctxt_type = type;
+ ),
+
+ TP_printk(
+ "d_name=%s ctx=%u ib=0x%u numibs=%u ts=%u "
+ "flags=0x%x(%s) result=%d type=%s",
+ __get_str(device_name),
+ __entry->drawctxt_id,
+ __entry->ibdesc_addr,
+ __entry->numibs,
+ __entry->timestamp,
+ __entry->flags,
+ __entry->flags ? __print_flags(__entry->flags, "|",
+ { KGSL_CONTEXT_SAVE_GMEM, "SAVE_GMEM" },
+ { KGSL_CONTEXT_SUBMIT_IB_LIST, "IB_LIST" },
+ { KGSL_CONTEXT_CTX_SWITCH, "CTX_SWITCH" })
+ : "None",
+ __entry->result,
+ __print_symbolic(__entry->drawctxt_type,
+ ADRENO_DRAWCTXT_TYPES)
+ )
+);
+
+/*
+ * Tracepoint for kgsl readtimestamp
+ */
+TRACE_EVENT(kgsl_readtimestamp,
+
+ TP_PROTO(struct kgsl_device *device,
+ unsigned int context_id,
+ unsigned int type,
+ unsigned int timestamp),
+
+ TP_ARGS(device, context_id, type, timestamp),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, context_id)
+ __field(unsigned int, type)
+ __field(unsigned int, timestamp)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->context_id = context_id;
+ __entry->type = type;
+ __entry->timestamp = timestamp;
+ ),
+
+ TP_printk(
+ "d_name=%s context_id=%u type=%u ts=%u",
+ __get_str(device_name),
+ __entry->context_id,
+ __entry->type,
+ __entry->timestamp
+ )
+);
+
+/*
+ * Tracepoint for kgsl waittimestamp entry
+ */
+TRACE_EVENT(kgsl_waittimestamp_entry,
+
+ TP_PROTO(struct kgsl_device *device,
+ unsigned int context_id,
+ unsigned int curr_ts,
+ unsigned int wait_ts,
+ unsigned int timeout),
+
+ TP_ARGS(device, context_id, curr_ts, wait_ts, timeout),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, context_id)
+ __field(unsigned int, curr_ts)
+ __field(unsigned int, wait_ts)
+ __field(unsigned int, timeout)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->context_id = context_id;
+ __entry->curr_ts = curr_ts;
+ __entry->wait_ts = wait_ts;
+ __entry->timeout = timeout;
+ ),
+
+ TP_printk(
+ "d_name=%s ctx=%u curr_ts=%u ts=%u timeout=%u",
+ __get_str(device_name),
+ __entry->context_id,
+ __entry->curr_ts,
+ __entry->wait_ts,
+ __entry->timeout
+ )
+);
+
+/*
+ * Tracepoint for kgsl waittimestamp exit
+ */
+TRACE_EVENT(kgsl_waittimestamp_exit,
+
+ TP_PROTO(struct kgsl_device *device, unsigned int curr_ts,
+ int result),
+
+ TP_ARGS(device, curr_ts, result),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, curr_ts)
+ __field(int, result)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->curr_ts = curr_ts;
+ __entry->result = result;
+ ),
+
+ TP_printk(
+ "d_name=%s curr_ts=%u result=%d",
+ __get_str(device_name),
+ __entry->curr_ts,
+ __entry->result
+ )
+);
+
+DECLARE_EVENT_CLASS(kgsl_pwr_template,
+ TP_PROTO(struct kgsl_device *device, int on),
+
+ TP_ARGS(device, on),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(int, on)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->on = on;
+ ),
+
+ TP_printk(
+ "d_name=%s flag=%s",
+ __get_str(device_name),
+ __entry->on ? "on" : "off"
+ )
+);
+
+DEFINE_EVENT(kgsl_pwr_template, kgsl_clk,
+ TP_PROTO(struct kgsl_device *device, int on),
+ TP_ARGS(device, on)
+);
+
+DEFINE_EVENT(kgsl_pwr_template, kgsl_irq,
+ TP_PROTO(struct kgsl_device *device, int on),
+ TP_ARGS(device, on)
+);
+
+DEFINE_EVENT(kgsl_pwr_template, kgsl_bus,
+ TP_PROTO(struct kgsl_device *device, int on),
+ TP_ARGS(device, on)
+);
+
+DEFINE_EVENT(kgsl_pwr_template, kgsl_rail,
+ TP_PROTO(struct kgsl_device *device, int on),
+ TP_ARGS(device, on)
+);
+
+TRACE_EVENT(kgsl_pwrlevel,
+
+ TP_PROTO(struct kgsl_device *device, unsigned int pwrlevel,
+ unsigned int freq),
+
+ TP_ARGS(device, pwrlevel, freq),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, pwrlevel)
+ __field(unsigned int, freq)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->pwrlevel = pwrlevel;
+ __entry->freq = freq;
+ ),
+
+ TP_printk(
+ "d_name=%s pwrlevel=%d freq=%d",
+ __get_str(device_name),
+ __entry->pwrlevel,
+ __entry->freq
+ )
+);
+
+TRACE_EVENT(kgsl_gpubusy,
+ TP_PROTO(struct kgsl_device *device, unsigned int busy,
+ unsigned int elapsed),
+
+ TP_ARGS(device, busy, elapsed),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, busy)
+ __field(unsigned int, elapsed)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->busy = busy;
+ __entry->elapsed = elapsed;
+ ),
+
+ TP_printk(
+ "d_name=%s busy=%u elapsed=%d",
+ __get_str(device_name),
+ __entry->busy,
+ __entry->elapsed
+ )
+);
+
+DECLARE_EVENT_CLASS(kgsl_pwrstate_template,
+ TP_PROTO(struct kgsl_device *device, unsigned int state),
+
+ TP_ARGS(device, state),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, state)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->state = state;
+ ),
+
+ TP_printk(
+ "d_name=%s state=%s",
+ __get_str(device_name),
+ kgsl_pwrstate_to_str(__entry->state)
+ )
+);
+
+DEFINE_EVENT(kgsl_pwrstate_template, kgsl_pwr_set_state,
+ TP_PROTO(struct kgsl_device *device, unsigned int state),
+ TP_ARGS(device, state)
+);
+
+DEFINE_EVENT(kgsl_pwrstate_template, kgsl_pwr_request_state,
+ TP_PROTO(struct kgsl_device *device, unsigned int state),
+ TP_ARGS(device, state)
+);
+
+TRACE_EVENT(kgsl_mem_alloc,
+
+ TP_PROTO(struct kgsl_mem_entry *mem_entry),
+
+ TP_ARGS(mem_entry),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, gpuaddr)
+ __field(unsigned int, size)
+ __field(unsigned int, tgid)
+ __array(char, usage, 16)
+ __field(unsigned int, id)
+ __field(unsigned int, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->gpuaddr = mem_entry->memdesc.gpuaddr;
+ __entry->size = mem_entry->memdesc.size;
+ __entry->tgid = mem_entry->priv->pid;
+ kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage),
+ mem_entry->memdesc.flags);
+ __entry->id = mem_entry->id;
+ __entry->flags = mem_entry->memdesc.flags;
+ ),
+
+ TP_printk(
+ "gpuaddr=0x%08x size=%u tgid=%u usage=%s id=%u flags=0x%08x",
+ __entry->gpuaddr, __entry->size, __entry->tgid,
+ __entry->usage, __entry->id, __entry->flags
+ )
+);
+
+TRACE_EVENT(kgsl_mem_mmap,
+
+ TP_PROTO(struct kgsl_mem_entry *mem_entry),
+
+ TP_ARGS(mem_entry),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, useraddr)
+ __field(unsigned int, gpuaddr)
+ __field(unsigned int, size)
+ __array(char, usage, 16)
+ __field(unsigned int, id)
+ __field(unsigned int, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->useraddr = mem_entry->memdesc.useraddr;
+ __entry->gpuaddr = mem_entry->memdesc.gpuaddr;
+ __entry->size = mem_entry->memdesc.size;
+ kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage),
+ mem_entry->memdesc.flags);
+ __entry->id = mem_entry->id;
+ __entry->flags = mem_entry->memdesc.flags;
+ ),
+
+ TP_printk(
+ "useraddr=0x%lx gpuaddr=0x%08x size=%u usage=%s id=%u"
+ " flags=0x%08x",
+ __entry->useraddr, __entry->gpuaddr, __entry->size,
+ __entry->usage, __entry->id, __entry->flags
+ )
+);
+
+TRACE_EVENT(kgsl_mem_unmapped_area_collision,
+
+ TP_PROTO(struct kgsl_mem_entry *mem_entry,
+ unsigned long hint,
+ unsigned long len,
+ unsigned long addr),
+
+ TP_ARGS(mem_entry, hint, len, addr),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned long, hint)
+ __field(unsigned long, len)
+ __field(unsigned long, addr)
+ ),
+
+ TP_fast_assign(
+ __entry->id = mem_entry->id;
+ __entry->hint = hint;
+ __entry->len = len;
+ __entry->addr = addr;
+ ),
+
+ TP_printk(
+ "id=%u hint=0x%lx len=%lu addr=0x%lx",
+ __entry->id, __entry->hint, __entry->len, __entry->addr
+ )
+);
+
+TRACE_EVENT(kgsl_mem_map,
+
+ TP_PROTO(struct kgsl_mem_entry *mem_entry, int fd),
+
+ TP_ARGS(mem_entry, fd),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, gpuaddr)
+ __field(unsigned int, size)
+ __field(int, fd)
+ __field(int, type)
+ __field(unsigned int, tgid)
+ __array(char, usage, 16)
+ __field(unsigned int, id)
+ ),
+
+ TP_fast_assign(
+ __entry->gpuaddr = mem_entry->memdesc.gpuaddr;
+ __entry->size = mem_entry->memdesc.size;
+ __entry->fd = fd;
+ __entry->type = mem_entry->memtype;
+ __entry->tgid = mem_entry->priv->pid;
+ kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage),
+ mem_entry->memdesc.flags);
+ __entry->id = mem_entry->id;
+ ),
+
+ TP_printk(
+ "gpuaddr=0x%08x size=%u type=%d fd=%d tgid=%u usage=%s id=%u",
+ __entry->gpuaddr, __entry->size,
+ __entry->type, __entry->fd, __entry->tgid,
+ __entry->usage, __entry->id
+ )
+);
+
+TRACE_EVENT(kgsl_mem_free,
+
+ TP_PROTO(struct kgsl_mem_entry *mem_entry),
+
+ TP_ARGS(mem_entry),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, gpuaddr)
+ __field(unsigned int, size)
+ __field(int, type)
+ __field(int, fd)
+ __field(unsigned int, tgid)
+ __array(char, usage, 16)
+ __field(unsigned int, id)
+ ),
+
+ TP_fast_assign(
+ __entry->gpuaddr = mem_entry->memdesc.gpuaddr;
+ __entry->size = mem_entry->memdesc.size;
+ __entry->type = mem_entry->memtype;
+ __entry->tgid = mem_entry->priv->pid;
+ kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage),
+ mem_entry->memdesc.flags);
+ __entry->id = mem_entry->id;
+ ),
+
+ TP_printk(
+ "gpuaddr=0x%08x size=%u type=%d tgid=%u usage=%s id=%u",
+ __entry->gpuaddr, __entry->size, __entry->type,
+ __entry->tgid, __entry->usage, __entry->id
+ )
+);
+
+TRACE_EVENT(kgsl_mem_sync_cache,
+
+ TP_PROTO(struct kgsl_mem_entry *mem_entry, unsigned int op),
+
+ TP_ARGS(mem_entry, op),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, gpuaddr)
+ __field(unsigned int, size)
+ __array(char, usage, 16)
+ __field(unsigned int, tgid)
+ __field(unsigned int, id)
+ __field(unsigned int, op)
+ ),
+
+ TP_fast_assign(
+ __entry->gpuaddr = mem_entry->memdesc.gpuaddr;
+ __entry->size = mem_entry->memdesc.size;
+ __entry->tgid = mem_entry->priv->pid;
+ __entry->id = mem_entry->id;
+ kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage),
+ mem_entry->memdesc.flags);
+ __entry->op = op;
+ ),
+
+ TP_printk(
+ "gpuaddr=0x%08x size=%u tgid=%u usage=%s id=%u op=%c%c",
+ __entry->gpuaddr, __entry->size, __entry->tgid, __entry->usage,
+ __entry->id,
+ (__entry->op & KGSL_GPUMEM_CACHE_CLEAN) ? 'c' : '.',
+ (__entry->op & KGSL_GPUMEM_CACHE_INV) ? 'i' : '.'
+ )
+);
+
+TRACE_EVENT(kgsl_mem_sync_full_cache,
+
+ TP_PROTO(unsigned int num_bufs, unsigned int bulk_size,
+ unsigned int op),
+
+ TP_ARGS(num_bufs, bulk_size, op),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, num_bufs)
+ __field(unsigned int, bulk_size)
+ __field(unsigned int, op)
+ ),
+
+ TP_fast_assign(
+ __entry->num_bufs = num_bufs;
+ __entry->bulk_size = bulk_size;
+ __entry->op = op;
+ ),
+
+ TP_printk(
+ "num_bufs=%d bulk_size=%d op=%c%c",
+ __entry->num_bufs, __entry->bulk_size,
+ (__entry->op & KGSL_GPUMEM_CACHE_CLEAN) ? 'c' : '.',
+ (__entry->op & KGSL_GPUMEM_CACHE_INV) ? 'i' : '.'
+ )
+);
+
+DECLARE_EVENT_CLASS(kgsl_mem_timestamp_template,
+
+ TP_PROTO(struct kgsl_device *device, struct kgsl_mem_entry *mem_entry,
+ unsigned int id, unsigned int curr_ts, unsigned int free_ts),
+
+ TP_ARGS(device, mem_entry, id, curr_ts, free_ts),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, gpuaddr)
+ __field(unsigned int, size)
+ __field(int, type)
+ __array(char, usage, 16)
+ __field(unsigned int, id)
+ __field(unsigned int, drawctxt_id)
+ __field(unsigned int, curr_ts)
+ __field(unsigned int, free_ts)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->gpuaddr = mem_entry->memdesc.gpuaddr;
+ __entry->size = mem_entry->memdesc.size;
+ kgsl_get_memory_usage(__entry->usage, sizeof(__entry->usage),
+ mem_entry->memdesc.flags);
+ __entry->id = mem_entry->id;
+ __entry->drawctxt_id = id;
+ __entry->type = mem_entry->memtype;
+ __entry->curr_ts = curr_ts;
+ __entry->free_ts = free_ts;
+ ),
+
+ TP_printk(
+ "d_name=%s gpuaddr=0x%08x size=%u type=%d usage=%s id=%u ctx=%u"
+ " curr_ts=%u free_ts=%u",
+ __get_str(device_name),
+ __entry->gpuaddr,
+ __entry->size,
+ __entry->type,
+ __entry->usage,
+ __entry->id,
+ __entry->drawctxt_id,
+ __entry->curr_ts,
+ __entry->free_ts
+ )
+);
+
+DEFINE_EVENT(kgsl_mem_timestamp_template, kgsl_mem_timestamp_queue,
+ TP_PROTO(struct kgsl_device *device, struct kgsl_mem_entry *mem_entry,
+ unsigned int id, unsigned int curr_ts, unsigned int free_ts),
+ TP_ARGS(device, mem_entry, id, curr_ts, free_ts)
+);
+
+DEFINE_EVENT(kgsl_mem_timestamp_template, kgsl_mem_timestamp_free,
+ TP_PROTO(struct kgsl_device *device, struct kgsl_mem_entry *mem_entry,
+ unsigned int id, unsigned int curr_ts, unsigned int free_ts),
+ TP_ARGS(device, mem_entry, id, curr_ts, free_ts)
+);
+
+TRACE_EVENT(kgsl_context_create,
+
+ TP_PROTO(struct kgsl_device *device, struct kgsl_context *context,
+ unsigned int flags),
+
+ TP_ARGS(device, context, flags),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, id)
+ __field(unsigned int, flags)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->id = context->id;
+ __entry->flags = flags;
+ ),
+
+ TP_printk(
+ "d_name=%s ctx=%u flags=0x%x %s",
+ __get_str(device_name), __entry->id, __entry->flags,
+ __entry->flags ? __print_flags(__entry->flags, "|",
+ { KGSL_CONTEXT_NO_GMEM_ALLOC , "NO_GMEM_ALLOC" },
+ { KGSL_CONTEXT_PREAMBLE, "PREAMBLE" },
+ { KGSL_CONTEXT_TRASH_STATE, "TRASH_STATE" },
+ { KGSL_CONTEXT_PER_CONTEXT_TS, "PER_CONTEXT_TS" })
+ : "None"
+ )
+);
+
+TRACE_EVENT(kgsl_context_detach,
+
+ TP_PROTO(struct kgsl_device *device, struct kgsl_context *context),
+
+ TP_ARGS(device, context),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, id)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->id = context->id;
+ ),
+
+ TP_printk(
+ "d_name=%s ctx=%u",
+ __get_str(device_name), __entry->id
+ )
+);
+
+TRACE_EVENT(kgsl_context_destroy,
+
+ TP_PROTO(struct kgsl_device *device, struct kgsl_context *context),
+
+ TP_ARGS(device, context),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, id)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->id = context->id;
+ ),
+
+ TP_printk(
+ "d_name=%s ctx=%u",
+ __get_str(device_name), __entry->id
+ )
+);
+
+TRACE_EVENT(kgsl_mmu_pagefault,
+
+ TP_PROTO(struct kgsl_device *device, unsigned int page,
+ unsigned int pt, const char *op),
+
+ TP_ARGS(device, page, pt, op),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, page)
+ __field(unsigned int, pt)
+ __string(op, op)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->page = page;
+ __entry->pt = pt;
+ __assign_str(op, op);
+ ),
+
+ TP_printk(
+ "d_name=%s page=0x%08x pt=%u op=%s",
+ __get_str(device_name), __entry->page, __entry->pt,
+ __get_str(op)
+ )
+);
+
+TRACE_EVENT(kgsl_regwrite,
+
+ TP_PROTO(struct kgsl_device *device, unsigned int offset,
+ unsigned int value),
+
+ TP_ARGS(device, offset, value),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, offset)
+ __field(unsigned int, value)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->offset = offset;
+ __entry->value = value;
+ ),
+
+ TP_printk(
+ "d_name=%s reg=0x%x value=0x%x",
+ __get_str(device_name), __entry->offset, __entry->value
+ )
+);
+
+TRACE_EVENT(kgsl_register_event,
+ TP_PROTO(unsigned int id, unsigned int timestamp, void *func),
+ TP_ARGS(id, timestamp, func),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, timestamp)
+ __field(void *, func)
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->timestamp = timestamp;
+ __entry->func = func;
+ ),
+ TP_printk(
+ "ctx=%u ts=%u cb=%pF",
+ __entry->id, __entry->timestamp, __entry->func)
+);
+
+TRACE_EVENT(kgsl_fire_event,
+ TP_PROTO(unsigned int id, unsigned int ts,
+ unsigned int type, unsigned int age, void *func),
+ TP_ARGS(id, ts, type, age, func),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, ts)
+ __field(unsigned int, type)
+ __field(unsigned int, age)
+ __field(void *, func)
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->ts = ts;
+ __entry->type = type;
+ __entry->age = age;
+ __entry->func = func;
+ ),
+ TP_printk(
+ "ctx=%u ts=%u type=%s age=%u cb=%pF",
+ __entry->id, __entry->ts,
+ __print_symbolic(__entry->type, KGSL_EVENT_TYPES),
+ __entry->age, __entry->func)
+);
+
+TRACE_EVENT(kgsl_active_count,
+
+ TP_PROTO(struct kgsl_device *device, unsigned long ip),
+
+ TP_ARGS(device, ip),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, count)
+ __field(unsigned long, ip)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->count = atomic_read(&device->active_cnt);
+ __entry->ip = ip;
+ ),
+
+ TP_printk(
+ "d_name=%s active_cnt=%u func=%pf",
+ __get_str(device_name), __entry->count, (void *) __entry->ip
+ )
+);
+
+#endif /* _KGSL_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/msm2/z180.c b/drivers/gpu/msm2/z180.c
new file mode 100644
index 0000000..ac50a91
--- /dev/null
+++ b/drivers/gpu/msm2/z180.c
@@ -0,0 +1,1054 @@
+/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+
+#include "kgsl.h"
+#include "kgsl_cffdump.h"
+#include "kgsl_sharedmem.h"
+
+#include "z180.h"
+#include "z180_reg.h"
+#include "z180_trace.h"
+
+#define DRIVER_VERSION_MAJOR 3
+#define DRIVER_VERSION_MINOR 1
+
+#define GSL_VGC_INT_MASK \
+ (REG_VGC_IRQSTATUS__MH_MASK | \
+ REG_VGC_IRQSTATUS__G2D_MASK | \
+ REG_VGC_IRQSTATUS__FIFO_MASK)
+
+#define VGV3_NEXTCMD_JUMP 0x01
+
+#define VGV3_NEXTCMD_NEXTCMD_FSHIFT 12
+#define VGV3_NEXTCMD_NEXTCMD_FMASK 0x7
+
+#define VGV3_CONTROL_MARKADD_FSHIFT 0
+#define VGV3_CONTROL_MARKADD_FMASK 0xfff
+
+#define Z180_MARKER_SIZE 10
+#define Z180_CALL_CMD 0x1000
+#define Z180_MARKER_CMD 0x8000
+#define Z180_STREAM_END_CMD 0x9000
+#define Z180_STREAM_PACKET 0x7C000176
+#define Z180_STREAM_PACKET_CALL 0x7C000275
+
+#define NUMTEXUNITS 4
+#define TEXUNITREGCOUNT 25
+#define VG_REGCOUNT 0x39
+
+#define PACKETSIZE_BEGIN 3
+#define PACKETSIZE_G2DCOLOR 2
+#define PACKETSIZE_TEXUNIT (TEXUNITREGCOUNT * 2)
+#define PACKETSIZE_REG (VG_REGCOUNT * 2)
+#define PACKETSIZE_STATE (PACKETSIZE_TEXUNIT * NUMTEXUNITS + \
+ PACKETSIZE_REG + PACKETSIZE_BEGIN + \
+ PACKETSIZE_G2DCOLOR)
+#define PACKETSIZE_STATESTREAM (ALIGN((PACKETSIZE_STATE * \
+ sizeof(unsigned int)), 32) / \
+ sizeof(unsigned int))
+
+#define Z180_INVALID_CONTEXT UINT_MAX
+
+/* z180 MH arbiter config*/
+#define Z180_CFG_MHARB \
+ (0x10 \
+ | (0 << MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT) \
+ | (0 << MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT) \
+ | (0 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT) \
+ | (0x8 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT))
+
+#define Z180_TIMESTAMP_EPSILON 20000
+#define Z180_IDLE_COUNT_MAX 1000000
+
+enum z180_cmdwindow_type {
+ Z180_CMDWINDOW_2D = 0x00000000,
+ Z180_CMDWINDOW_MMU = 0x00000002,
+};
+
+#define Z180_CMDWINDOW_TARGET_MASK 0x000000FF
+#define Z180_CMDWINDOW_ADDR_MASK 0x00FFFF00
+#define Z180_CMDWINDOW_TARGET_SHIFT 0
+#define Z180_CMDWINDOW_ADDR_SHIFT 8
+
+static int z180_init(struct kgsl_device *device);
+static int z180_start(struct kgsl_device *device);
+static int z180_stop(struct kgsl_device *device);
+static int z180_wait(struct kgsl_device *device,
+ struct kgsl_context *context,
+ unsigned int timestamp,
+ unsigned int msecs);
+static void z180_regread(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int *value);
+static void z180_regwrite(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int value);
+static void z180_cmdwindow_write(struct kgsl_device *device,
+ unsigned int addr,
+ unsigned int data);
+
+#define Z180_MMU_CONFIG \
+ (0x01 \
+ | (MMU_CONFIG << MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT))
+
+#define KGSL_LOG_LEVEL_DEFAULT 3
+
+static const struct kgsl_functable z180_functable;
+
+static struct z180_device device_2d0 = {
+ .dev = {
+ KGSL_DEVICE_COMMON_INIT(device_2d0.dev),
+ .name = DEVICE_2D0_NAME,
+ .id = KGSL_DEVICE_2D0,
+ .mh = {
+ .mharb = Z180_CFG_MHARB,
+ .mh_intf_cfg1 = 0x00032f07,
+ .mh_intf_cfg2 = 0x004b274f,
+ /* turn off memory protection unit by setting
+ acceptable physical address range to include
+ all pages. */
+ .mpu_base = 0x00000000,
+ .mpu_range = 0xFFFFF000,
+ },
+ .mmu = {
+ .config = Z180_MMU_CONFIG,
+ },
+ .pwrctrl = {
+ .irq_name = KGSL_2D0_IRQ,
+ },
+ .iomemname = KGSL_2D0_REG_MEMORY,
+ .ftbl = &z180_functable,
+ .cmd_log = KGSL_LOG_LEVEL_DEFAULT,
+ .ctxt_log = KGSL_LOG_LEVEL_DEFAULT,
+ .drv_log = KGSL_LOG_LEVEL_DEFAULT,
+ .mem_log = KGSL_LOG_LEVEL_DEFAULT,
+ .pwr_log = KGSL_LOG_LEVEL_DEFAULT,
+ .pm_dump_enable = 0,
+ },
+ .cmdwin_lock = __SPIN_LOCK_INITIALIZER(device_2d1.cmdwin_lock),
+};
+
+static struct z180_device device_2d1 = {
+ .dev = {
+ KGSL_DEVICE_COMMON_INIT(device_2d1.dev),
+ .name = DEVICE_2D1_NAME,
+ .id = KGSL_DEVICE_2D1,
+ .mh = {
+ .mharb = Z180_CFG_MHARB,
+ .mh_intf_cfg1 = 0x00032f07,
+ .mh_intf_cfg2 = 0x004b274f,
+ /* turn off memory protection unit by setting
+ acceptable physical address range to include
+ all pages. */
+ .mpu_base = 0x00000000,
+ .mpu_range = 0xFFFFF000,
+ },
+ .mmu = {
+ .config = Z180_MMU_CONFIG,
+ },
+ .pwrctrl = {
+ .irq_name = KGSL_2D1_IRQ,
+ },
+ .iomemname = KGSL_2D1_REG_MEMORY,
+ .ftbl = &z180_functable,
+ },
+ .cmdwin_lock = __SPIN_LOCK_INITIALIZER(device_2d1.cmdwin_lock),
+};
+
+static irqreturn_t z180_irq_handler(struct kgsl_device *device)
+{
+ irqreturn_t result = IRQ_NONE;
+ unsigned int status;
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+
+ z180_regread(device, ADDR_VGC_IRQSTATUS >> 2, &status);
+
+ trace_kgsl_z180_irq_status(device, status);
+
+ if (status & GSL_VGC_INT_MASK) {
+ z180_regwrite(device,
+ ADDR_VGC_IRQSTATUS >> 2, status & GSL_VGC_INT_MASK);
+
+ result = IRQ_HANDLED;
+
+ if (status & REG_VGC_IRQSTATUS__FIFO_MASK)
+ KGSL_DRV_ERR(device, "z180 fifo interrupt\n");
+ if (status & REG_VGC_IRQSTATUS__MH_MASK)
+ kgsl_mh_intrcallback(device);
+ if (status & REG_VGC_IRQSTATUS__G2D_MASK) {
+ int count;
+
+ z180_regread(device,
+ ADDR_VGC_IRQ_ACTIVE_CNT >> 2,
+ &count);
+
+ count >>= 8;
+ count &= 255;
+ z180_dev->timestamp += count;
+
+ queue_work(device->work_queue, &device->ts_expired_ws);
+ wake_up_interruptible(&device->wait_queue);
+ }
+ }
+
+ if (device->requested_state == KGSL_STATE_NONE) {
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
+ queue_work(device->work_queue, &device->idle_check_ws);
+ }
+ mod_timer_pending(&device->idle_timer,
+ jiffies + device->pwrctrl.interval_timeout);
+
+ return result;
+}
+
+static void z180_cleanup_pt(struct kgsl_device *device,
+ struct kgsl_pagetable *pagetable)
+{
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+
+ kgsl_mmu_unmap(pagetable, &device->mmu.setstate_memory);
+
+ kgsl_mmu_unmap(pagetable, &device->memstore);
+
+ kgsl_mmu_unmap(pagetable, &z180_dev->ringbuffer.cmdbufdesc);
+}
+
+static int z180_setup_pt(struct kgsl_device *device,
+ struct kgsl_pagetable *pagetable)
+{
+ int result = 0;
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+
+ result = kgsl_mmu_map_global(pagetable, &device->mmu.setstate_memory);
+
+ if (result)
+ goto error;
+
+ result = kgsl_mmu_map_global(pagetable, &device->memstore);
+ if (result)
+ goto error_unmap_dummy;
+
+ result = kgsl_mmu_map_global(pagetable,
+ &z180_dev->ringbuffer.cmdbufdesc);
+ if (result)
+ goto error_unmap_memstore;
+ /*
+ * Set the mpu end to the last "normal" global memory we use.
+ * For the IOMMU, this will be used to restrict access to the
+ * mapped registers.
+ */
+ device->mh.mpu_range = z180_dev->ringbuffer.cmdbufdesc.gpuaddr +
+ z180_dev->ringbuffer.cmdbufdesc.size;
+ return result;
+
+error_unmap_dummy:
+ kgsl_mmu_unmap(pagetable, &device->mmu.setstate_memory);
+
+error_unmap_memstore:
+ kgsl_mmu_unmap(pagetable, &device->memstore);
+
+error:
+ return result;
+}
+
+static inline unsigned int rb_offset(unsigned int timestamp)
+{
+ return (timestamp % Z180_PACKET_COUNT)
+ *sizeof(unsigned int)*(Z180_PACKET_SIZE);
+}
+
+static inline unsigned int rb_gpuaddr(struct z180_device *z180_dev,
+ unsigned int timestamp)
+{
+ return z180_dev->ringbuffer.cmdbufdesc.gpuaddr + rb_offset(timestamp);
+}
+
+static void addmarker(struct z180_ringbuffer *rb, unsigned int timestamp)
+{
+ char *ptr = (char *)(rb->cmdbufdesc.hostptr);
+ unsigned int *p = (unsigned int *)(ptr + rb_offset(timestamp));
+
+ *p++ = Z180_STREAM_PACKET;
+ *p++ = (Z180_MARKER_CMD | 5);
+ *p++ = ADDR_VGV3_LAST << 24;
+ *p++ = ADDR_VGV3_LAST << 24;
+ *p++ = ADDR_VGV3_LAST << 24;
+ *p++ = Z180_STREAM_PACKET;
+ *p++ = 5;
+ *p++ = ADDR_VGV3_LAST << 24;
+ *p++ = ADDR_VGV3_LAST << 24;
+ *p++ = ADDR_VGV3_LAST << 24;
+}
+
+static void addcmd(struct z180_ringbuffer *rb, unsigned int timestamp,
+ unsigned int cmd, unsigned int nextcnt)
+{
+ char * ptr = (char *)(rb->cmdbufdesc.hostptr);
+ unsigned int *p = (unsigned int *)(ptr + (rb_offset(timestamp)
+ + (Z180_MARKER_SIZE * sizeof(unsigned int))));
+
+ *p++ = Z180_STREAM_PACKET_CALL;
+ *p++ = cmd;
+ *p++ = Z180_CALL_CMD | nextcnt;
+ *p++ = ADDR_VGV3_LAST << 24;
+ *p++ = ADDR_VGV3_LAST << 24;
+}
+
+static void z180_cmdstream_start(struct kgsl_device *device)
+{
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+ unsigned int cmd = VGV3_NEXTCMD_JUMP << VGV3_NEXTCMD_NEXTCMD_FSHIFT;
+
+ addmarker(&z180_dev->ringbuffer, 0);
+
+ z180_cmdwindow_write(device, ADDR_VGV3_MODE, 4);
+
+ z180_cmdwindow_write(device, ADDR_VGV3_NEXTADDR,
+ rb_gpuaddr(z180_dev, z180_dev->current_timestamp));
+
+ z180_cmdwindow_write(device, ADDR_VGV3_NEXTCMD, cmd | 5);
+
+ z180_cmdwindow_write(device, ADDR_VGV3_WRITEADDR,
+ device->memstore.gpuaddr);
+
+ cmd = (int)(((1) & VGV3_CONTROL_MARKADD_FMASK)
+ << VGV3_CONTROL_MARKADD_FSHIFT);
+
+ z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, cmd);
+
+ z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, 0);
+}
+
+static int room_in_rb(struct z180_device *device)
+{
+ int ts_diff;
+
+ ts_diff = device->current_timestamp - device->timestamp;
+
+ return ts_diff < Z180_PACKET_COUNT;
+}
+
+/**
+ * z180_idle() - Idle the 2D device
+ * @device: Pointer to the KGSL device struct for the Z180
+ *
+ * wait until the z180 submission queue is idle
+ */
+int z180_idle(struct kgsl_device *device)
+{
+ int status = 0;
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+
+ if (timestamp_cmp(z180_dev->current_timestamp,
+ z180_dev->timestamp) > 0)
+ status = z180_wait(device, NULL,
+ z180_dev->current_timestamp,
+ Z180_IDLE_TIMEOUT);
+
+ if (status)
+ KGSL_DRV_ERR(device, "z180_waittimestamp() timed out\n");
+
+ return status;
+}
+
+int
+z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context,
+ struct kgsl_cmdbatch *cmdbatch,
+ uint32_t *timestamp)
+{
+ long result = 0;
+ unsigned int ofs = PACKETSIZE_STATESTREAM * sizeof(unsigned int);
+ unsigned int cnt = 5;
+ unsigned int old_timestamp = 0;
+ unsigned int nextcnt = Z180_STREAM_END_CMD | 5;
+ struct kgsl_mem_entry *entry = NULL;
+ unsigned int cmd;
+ struct kgsl_device *device = dev_priv->device;
+ struct kgsl_pagetable *pagetable = dev_priv->process_priv->pagetable;
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+ unsigned int sizedwords;
+ unsigned int numibs;
+ struct kgsl_ibdesc *ibdesc;
+
+ mutex_lock(&device->mutex);
+
+ kgsl_active_count_get(device);
+
+ if (cmdbatch == NULL) {
+ result = EINVAL;
+ goto error;
+ }
+
+ ibdesc = cmdbatch->ibdesc;
+ numibs = cmdbatch->ibcount;
+
+ if (device->state & KGSL_STATE_HUNG) {
+ result = -EINVAL;
+ goto error;
+ }
+ if (numibs != 1) {
+ KGSL_DRV_ERR(device, "Invalid number of ibs: %d\n", numibs);
+ result = -EINVAL;
+ goto error;
+ }
+ cmd = ibdesc[0].gpuaddr;
+ sizedwords = ibdesc[0].sizedwords;
+ /*
+ * Get a kernel mapping to the IB for monkey patching.
+ * See the end of this function.
+ */
+ entry = kgsl_sharedmem_find_region(dev_priv->process_priv, cmd,
+ sizedwords);
+ if (entry == NULL) {
+ KGSL_DRV_ERR(device, "Bad ibdesc: gpuaddr 0x%x size %d\n",
+ cmd, sizedwords);
+ result = -EINVAL;
+ goto error;
+ }
+ /*
+ * This will only map memory if it exists, otherwise it will reuse the
+ * mapping. And the 2d userspace reuses IBs so we likely won't create
+ * too many mappings.
+ */
+ if (kgsl_gpuaddr_to_vaddr(&entry->memdesc, cmd) == NULL) {
+ KGSL_DRV_ERR(device,
+ "Cannot make kernel mapping for gpuaddr 0x%x\n",
+ cmd);
+ result = -EINVAL;
+ goto error;
+ }
+
+ KGSL_CMD_INFO(device, "ctxt %d ibaddr 0x%08x sizedwords %d\n",
+ context->id, cmd, sizedwords);
+ /* context switch */
+ if ((context->id != (int)z180_dev->ringbuffer.prevctx) ||
+ (cmdbatch->flags & KGSL_CONTEXT_CTX_SWITCH)) {
+ KGSL_CMD_INFO(device, "context switch %d -> %d\n",
+ context->id, z180_dev->ringbuffer.prevctx);
+ kgsl_mmu_setstate(&device->mmu, pagetable,
+ KGSL_MEMSTORE_GLOBAL);
+ cnt = PACKETSIZE_STATESTREAM;
+ ofs = 0;
+ }
+
+ result = kgsl_setstate(&device->mmu,
+ KGSL_MEMSTORE_GLOBAL,
+ kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
+ device->id));
+ if (result < 0)
+ goto error;
+
+ result = wait_event_interruptible_timeout(device->wait_queue,
+ room_in_rb(z180_dev),
+ msecs_to_jiffies(KGSL_TIMEOUT_DEFAULT));
+ if (result < 0) {
+ KGSL_CMD_ERR(device, "wait_event_interruptible_timeout "
+ "failed: %ld\n", result);
+ goto error;
+ }
+ result = 0;
+
+ old_timestamp = z180_dev->current_timestamp;
+ z180_dev->current_timestamp++;
+ *timestamp = z180_dev->current_timestamp;
+
+ z180_dev->ringbuffer.prevctx = context->id;
+
+ addcmd(&z180_dev->ringbuffer, old_timestamp, cmd + ofs, cnt);
+ kgsl_pwrscale_busy(device);
+
+ /* Make sure the next ringbuffer entry has a marker */
+ addmarker(&z180_dev->ringbuffer, z180_dev->current_timestamp);
+
+ /* monkey patch the IB so that it jumps back to the ringbuffer */
+ kgsl_sharedmem_writel(device, &entry->memdesc,
+ ((sizedwords + 1) * sizeof(unsigned int)),
+ rb_gpuaddr(z180_dev, z180_dev->current_timestamp));
+ kgsl_sharedmem_writel(device, &entry->memdesc,
+ ((sizedwords + 2) * sizeof(unsigned int)),
+ nextcnt);
+
+ /* sync memory before activating the hardware for the new command*/
+ mb();
+
+ cmd = (int)(((2) & VGV3_CONTROL_MARKADD_FMASK)
+ << VGV3_CONTROL_MARKADD_FSHIFT);
+
+ z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, cmd);
+ z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, 0);
+error:
+ kgsl_trace_issueibcmds(device, context->id, cmdbatch,
+ *timestamp, cmdbatch->flags, result, 0);
+
+ kgsl_active_count_put(device);
+
+ mutex_unlock(&device->mutex);
+
+ return (int)result;
+}
+
+static int z180_ringbuffer_init(struct kgsl_device *device)
+{
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+ memset(&z180_dev->ringbuffer, 0, sizeof(struct z180_ringbuffer));
+ z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT;
+ z180_dev->ringbuffer.cmdbufdesc.flags = KGSL_MEMFLAGS_GPUREADONLY;
+ return kgsl_allocate_contiguous(&z180_dev->ringbuffer.cmdbufdesc,
+ Z180_RB_SIZE);
+}
+
+static void z180_ringbuffer_close(struct kgsl_device *device)
+{
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+ kgsl_sharedmem_free(&z180_dev->ringbuffer.cmdbufdesc);
+ memset(&z180_dev->ringbuffer, 0, sizeof(struct z180_ringbuffer));
+}
+
+static int __devinit z180_probe(struct platform_device *pdev)
+{
+ int status = -EINVAL;
+ struct kgsl_device *device = NULL;
+ struct z180_device *z180_dev;
+
+ device = (struct kgsl_device *)pdev->id_entry->driver_data;
+ device->parentdev = &pdev->dev;
+
+ z180_dev = Z180_DEVICE(device);
+
+ status = z180_ringbuffer_init(device);
+ if (status != 0)
+ goto error;
+
+ status = kgsl_device_platform_probe(device);
+ if (status)
+ goto error_close_ringbuffer;
+
+ kgsl_pwrscale_init(device);
+ kgsl_pwrscale_attach_policy(device, Z180_DEFAULT_PWRSCALE_POLICY);
+
+ return status;
+
+error_close_ringbuffer:
+ z180_ringbuffer_close(device);
+error:
+ device->parentdev = NULL;
+ return status;
+}
+
+static int __devexit z180_remove(struct platform_device *pdev)
+{
+ struct kgsl_device *device = NULL;
+
+ device = (struct kgsl_device *)pdev->id_entry->driver_data;
+
+ kgsl_pwrscale_close(device);
+ kgsl_device_platform_remove(device);
+
+ z180_ringbuffer_close(device);
+
+ return 0;
+}
+
+static int z180_init(struct kgsl_device *device)
+{
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+
+ z180_dev->timestamp = 0;
+ z180_dev->current_timestamp = 0;
+
+ return 0;
+}
+
+static int z180_start(struct kgsl_device *device)
+{
+ int status = 0;
+
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+
+ kgsl_pwrctrl_enable(device);
+
+ /* Set interrupts to 0 to ensure a good state */
+ z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 0x0);
+
+ kgsl_mh_start(device);
+
+ status = kgsl_mmu_start(device);
+ if (status)
+ goto error_clk_off;
+
+ z180_cmdstream_start(device);
+
+ mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
+ device->ftbl->irqctrl(device, 1);
+
+ device->reset_counter++;
+
+ return 0;
+
+error_clk_off:
+ z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 0);
+ kgsl_pwrctrl_disable(device);
+ return status;
+}
+
+static int z180_stop(struct kgsl_device *device)
+{
+ int ret;
+
+ device->ftbl->irqctrl(device, 0);
+ ret = z180_idle(device);
+ if (ret)
+ return ret;
+
+ del_timer_sync(&device->idle_timer);
+
+ kgsl_mmu_stop(&device->mmu);
+
+ /* Disable the clocks before the power rail. */
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+
+ kgsl_pwrctrl_disable(device);
+
+ return 0;
+}
+
+static int z180_getproperty(struct kgsl_device *device,
+ enum kgsl_property_type type,
+ void *value,
+ unsigned int sizebytes)
+{
+ int status = -EINVAL;
+
+ switch (type) {
+ case KGSL_PROP_DEVICE_INFO:
+ {
+ struct kgsl_devinfo devinfo;
+
+ if (sizebytes != sizeof(devinfo)) {
+ status = -EINVAL;
+ break;
+ }
+
+ memset(&devinfo, 0, sizeof(devinfo));
+ devinfo.device_id = device->id+1;
+ devinfo.chip_id = 0;
+ devinfo.mmu_enabled = kgsl_mmu_enabled();
+
+ if (copy_to_user(value, &devinfo, sizeof(devinfo)) !=
+ 0) {
+ status = -EFAULT;
+ break;
+ }
+ status = 0;
+ }
+ break;
+ case KGSL_PROP_MMU_ENABLE:
+ {
+ int mmu_prop = kgsl_mmu_enabled();
+ if (sizebytes != sizeof(int)) {
+ status = -EINVAL;
+ break;
+ }
+ if (copy_to_user(value, &mmu_prop, sizeof(mmu_prop))) {
+ status = -EFAULT;
+ break;
+ }
+ status = 0;
+ }
+ break;
+
+ default:
+ KGSL_DRV_ERR(device, "invalid property: %d\n", type);
+ status = -EINVAL;
+ }
+ return status;
+}
+
+static bool z180_isidle(struct kgsl_device *device)
+{
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+
+ return (timestamp_cmp(z180_dev->timestamp,
+ z180_dev->current_timestamp) == 0) ? true : false;
+}
+
+static int z180_suspend_context(struct kgsl_device *device)
+{
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+
+ z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT;
+
+ return 0;
+}
+
+/* Not all Z180 registers are directly accessible.
+ * The _z180_(read|write)_simple functions below handle the ones that are.
+ */
+static void _z180_regread_simple(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int *value)
+{
+ unsigned int *reg;
+
+ BUG_ON(offsetwords * sizeof(uint32_t) >= device->reg_len);
+
+ reg = (unsigned int *)(device->reg_virt + (offsetwords << 2));
+
+ /*ensure this read finishes before the next one.
+ * i.e. act like normal readl() */
+ *value = __raw_readl(reg);
+ rmb();
+
+}
+
+static void _z180_regwrite_simple(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int value)
+{
+ unsigned int *reg;
+
+ BUG_ON(offsetwords*sizeof(uint32_t) >= device->reg_len);
+
+ reg = (unsigned int *)(device->reg_virt + (offsetwords << 2));
+ kgsl_cffdump_regwrite(device, offsetwords << 2, value);
+ /*ensure previous writes post before this one,
+ * i.e. act like normal writel() */
+ wmb();
+ __raw_writel(value, reg);
+}
+
+
+/* The MH registers must be accessed through via a 2 step write, (read|write)
+ * process. These registers may be accessed from interrupt context during
+ * the handling of MH or MMU error interrupts. Therefore a spin lock is used
+ * to ensure that the 2 step sequence is not interrupted.
+ */
+static void _z180_regread_mmu(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int *value)
+{
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&z180_dev->cmdwin_lock, flags);
+ _z180_regwrite_simple(device, (ADDR_VGC_MH_READ_ADDR >> 2),
+ offsetwords);
+ _z180_regread_simple(device, (ADDR_VGC_MH_DATA_ADDR >> 2), value);
+ spin_unlock_irqrestore(&z180_dev->cmdwin_lock, flags);
+}
+
+
+static void _z180_regwrite_mmu(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int value)
+{
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+ unsigned int cmdwinaddr;
+ unsigned long flags;
+
+ cmdwinaddr = ((Z180_CMDWINDOW_MMU << Z180_CMDWINDOW_TARGET_SHIFT) &
+ Z180_CMDWINDOW_TARGET_MASK);
+ cmdwinaddr |= ((offsetwords << Z180_CMDWINDOW_ADDR_SHIFT) &
+ Z180_CMDWINDOW_ADDR_MASK);
+
+ spin_lock_irqsave(&z180_dev->cmdwin_lock, flags);
+ _z180_regwrite_simple(device, ADDR_VGC_MMUCOMMANDSTREAM >> 2,
+ cmdwinaddr);
+ _z180_regwrite_simple(device, ADDR_VGC_MMUCOMMANDSTREAM >> 2, value);
+ spin_unlock_irqrestore(&z180_dev->cmdwin_lock, flags);
+}
+
+/* the rest of the code doesn't want to think about if it is writing mmu
+ * registers or normal registers so handle it here
+ */
+static void z180_regread(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int *value)
+{
+ if (!in_interrupt())
+ kgsl_pre_hwaccess(device);
+
+ if ((offsetwords >= MH_ARBITER_CONFIG &&
+ offsetwords <= MH_AXI_HALT_CONTROL) ||
+ (offsetwords >= MH_MMU_CONFIG &&
+ offsetwords <= MH_MMU_MPU_END)) {
+ _z180_regread_mmu(device, offsetwords, value);
+ } else {
+ _z180_regread_simple(device, offsetwords, value);
+ }
+}
+
+static void z180_regwrite(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int value)
+{
+ if (!in_interrupt())
+ kgsl_pre_hwaccess(device);
+
+ if ((offsetwords >= MH_ARBITER_CONFIG &&
+ offsetwords <= MH_CLNT_INTF_CTRL_CONFIG2) ||
+ (offsetwords >= MH_MMU_CONFIG &&
+ offsetwords <= MH_MMU_MPU_END)) {
+ _z180_regwrite_mmu(device, offsetwords, value);
+ } else {
+ _z180_regwrite_simple(device, offsetwords, value);
+ }
+}
+
+static void z180_cmdwindow_write(struct kgsl_device *device,
+ unsigned int addr, unsigned int data)
+{
+ unsigned int cmdwinaddr;
+
+ cmdwinaddr = ((Z180_CMDWINDOW_2D << Z180_CMDWINDOW_TARGET_SHIFT) &
+ Z180_CMDWINDOW_TARGET_MASK);
+ cmdwinaddr |= ((addr << Z180_CMDWINDOW_ADDR_SHIFT) &
+ Z180_CMDWINDOW_ADDR_MASK);
+
+ z180_regwrite(device, ADDR_VGC_COMMANDSTREAM >> 2, cmdwinaddr);
+ z180_regwrite(device, ADDR_VGC_COMMANDSTREAM >> 2, data);
+}
+
+static unsigned int z180_readtimestamp(struct kgsl_device *device,
+ struct kgsl_context *context, enum kgsl_timestamp_type type)
+{
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+ (void)context;
+ /* get current EOP timestamp */
+ return z180_dev->timestamp;
+}
+
+static int z180_waittimestamp(struct kgsl_device *device,
+ struct kgsl_context *context,
+ unsigned int timestamp,
+ unsigned int msecs)
+{
+ int status = -EINVAL;
+
+ /* Don't wait forever, set a max of Z180_IDLE_TIMEOUT */
+ if (msecs == -1)
+ msecs = Z180_IDLE_TIMEOUT;
+
+ mutex_unlock(&device->mutex);
+ status = z180_wait(device, context, timestamp, msecs);
+ mutex_lock(&device->mutex);
+
+ return status;
+}
+
+static int z180_wait(struct kgsl_device *device,
+ struct kgsl_context *context,
+ unsigned int timestamp,
+ unsigned int msecs)
+{
+ int status = -EINVAL;
+ long timeout = 0;
+
+ timeout = wait_io_event_interruptible_timeout(
+ device->wait_queue,
+ kgsl_check_timestamp(device, context, timestamp),
+ msecs_to_jiffies(msecs));
+
+ if (timeout > 0)
+ status = 0;
+ else if (timeout == 0) {
+ status = -ETIMEDOUT;
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_HUNG);
+ kgsl_postmortem_dump(device, 0);
+ } else
+ status = timeout;
+
+ return status;
+}
+
+struct kgsl_context *
+z180_drawctxt_create(struct kgsl_device_private *dev_priv,
+ uint32_t *flags)
+{
+ int ret;
+ struct kgsl_context *context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (context == NULL)
+ return ERR_PTR(-ENOMEM);
+ ret = kgsl_context_init(dev_priv, context);
+ if (ret != 0) {
+ kfree(context);
+ return ERR_PTR(ret);
+ }
+ return context;
+}
+
+static int
+z180_drawctxt_detach(struct kgsl_context *context)
+{
+ struct kgsl_device *device;
+ struct z180_device *z180_dev;
+
+ device = context->device;
+ z180_dev = Z180_DEVICE(device);
+
+ z180_idle(device);
+
+ if (z180_dev->ringbuffer.prevctx == context->id) {
+ z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT;
+ device->mmu.hwpagetable = device->mmu.defaultpagetable;
+
+ /* Ignore the result - we are going down anyway */
+ kgsl_setstate(&device->mmu, KGSL_MEMSTORE_GLOBAL,
+ KGSL_MMUFLAGS_PTUPDATE);
+ }
+
+ return 0;
+}
+
+static void
+z180_drawctxt_destroy(struct kgsl_context *context)
+{
+ kfree(context);
+}
+
+static void z180_power_stats(struct kgsl_device *device,
+ struct kgsl_power_stats *stats)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ s64 tmp = ktime_to_us(ktime_get());
+
+ if (pwr->time == 0) {
+ pwr->time = tmp;
+ stats->total_time = 0;
+ stats->busy_time = 0;
+ } else {
+ stats->total_time = tmp - pwr->time;
+ pwr->time = tmp;
+ stats->busy_time = tmp - device->on_time;
+ device->on_time = tmp;
+ }
+}
+
+static void z180_irqctrl(struct kgsl_device *device, int state)
+{
+ /* Control interrupts for Z180 and the Z180 MMU */
+
+ if (state) {
+ z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 3);
+ z180_regwrite(device, MH_INTERRUPT_MASK,
+ kgsl_mmu_get_int_mask());
+ } else {
+ z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 0);
+ z180_regwrite(device, MH_INTERRUPT_MASK, 0);
+ }
+}
+
+static unsigned int z180_gpuid(struct kgsl_device *device, unsigned int *chipid)
+{
+ if (chipid != NULL)
+ *chipid = 0;
+
+ /* Standard KGSL gpuid format:
+ * top word is 0x0002 for 2D or 0x0003 for 3D
+ * Bottom word is core specific identifer
+ */
+
+ return (0x0002 << 16) | 180;
+}
+
+static const struct kgsl_functable z180_functable = {
+ /* Mandatory functions */
+ .regread = z180_regread,
+ .regwrite = z180_regwrite,
+ .idle = z180_idle,
+ .isidle = z180_isidle,
+ .suspend_context = z180_suspend_context,
+ .init = z180_init,
+ .start = z180_start,
+ .stop = z180_stop,
+ .getproperty = z180_getproperty,
+ .waittimestamp = z180_waittimestamp,
+ .readtimestamp = z180_readtimestamp,
+ .issueibcmds = z180_cmdstream_issueibcmds,
+ .setup_pt = z180_setup_pt,
+ .cleanup_pt = z180_cleanup_pt,
+ .power_stats = z180_power_stats,
+ .irqctrl = z180_irqctrl,
+ .gpuid = z180_gpuid,
+ .irq_handler = z180_irq_handler,
+ .drain = z180_idle, /* drain == idle for the z180 */
+ /* Optional functions */
+ .drawctxt_create = z180_drawctxt_create,
+ .drawctxt_detach = z180_drawctxt_detach,
+ .drawctxt_destroy = z180_drawctxt_destroy,
+ .ioctl = NULL,
+ .postmortem_dump = z180_dump,
+};
+
+static struct platform_device_id z180_id_table[] = {
+ { DEVICE_2D0_NAME, (kernel_ulong_t)&device_2d0.dev, },
+ { DEVICE_2D1_NAME, (kernel_ulong_t)&device_2d1.dev, },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, z180_id_table);
+
+static struct platform_driver z180_platform_driver = {
+ .probe = z180_probe,
+ .remove = __devexit_p(z180_remove),
+ .suspend = kgsl_suspend_driver,
+ .resume = kgsl_resume_driver,
+ .id_table = z180_id_table,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DEVICE_2D_NAME,
+ .pm = &kgsl_pm_ops,
+ }
+};
+
+static int __init kgsl_2d_init(void)
+{
+ return platform_driver_register(&z180_platform_driver);
+}
+
+static void __exit kgsl_2d_exit(void)
+{
+ platform_driver_unregister(&z180_platform_driver);
+}
+
+module_init(kgsl_2d_init);
+module_exit(kgsl_2d_exit);
+
+MODULE_DESCRIPTION("2D Graphics driver");
+MODULE_VERSION("1.2");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kgsl_2d");
diff --git a/drivers/gpu/msm2/z180.h b/drivers/gpu/msm2/z180.h
new file mode 100644
index 0000000..a36e92d
--- /dev/null
+++ b/drivers/gpu/msm2/z180.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __Z180_H
+#define __Z180_H
+
+#include "kgsl_device.h"
+
+#define DEVICE_2D_NAME "kgsl-2d"
+#define DEVICE_2D0_NAME "kgsl-2d0"
+#define DEVICE_2D1_NAME "kgsl-2d1"
+
+#define Z180_PACKET_SIZE 15
+#define Z180_PACKET_COUNT 8
+#define Z180_RB_SIZE (Z180_PACKET_SIZE*Z180_PACKET_COUNT \
+ *sizeof(uint32_t))
+#define Z180_DEVICE(device) \
+ KGSL_CONTAINER_OF(device, struct z180_device, dev)
+
+#define Z180_DEFAULT_PWRSCALE_POLICY NULL
+
+/* Wait a maximum of 10 seconds when trying to idle the core */
+#define Z180_IDLE_TIMEOUT (20 * 1000)
+
+struct z180_ringbuffer {
+ unsigned int prevctx;
+ struct kgsl_memdesc cmdbufdesc;
+};
+
+struct z180_device {
+ struct kgsl_device dev; /* Must be first field in this struct */
+ int current_timestamp;
+ int timestamp;
+ struct z180_ringbuffer ringbuffer;
+ spinlock_t cmdwin_lock;
+};
+
+int z180_dump(struct kgsl_device *, int);
+int z180_idle(struct kgsl_device *);
+
+#endif /* __Z180_H */
diff --git a/drivers/gpu/msm2/z180_postmortem.c b/drivers/gpu/msm2/z180_postmortem.c
new file mode 100644
index 0000000..bc53c0e
--- /dev/null
+++ b/drivers/gpu/msm2/z180_postmortem.c
@@ -0,0 +1,234 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "kgsl.h"
+#include "kgsl_device.h"
+#include "z180.h"
+#include "z180_reg.h"
+
+#define Z180_STREAM_PACKET_CALL 0x7C000275
+
+/* Postmortem Dump formatted Output parameters */
+
+/* Number of Words per dump data line */
+#define WORDS_PER_LINE 8
+
+/* Number of spaces per dump data line */
+#define NUM_SPACES (WORDS_PER_LINE - 1)
+
+/*
+ * Output dump data is formatted as string, hence number of chars
+ * per line for line string allocation
+ */
+#define CHARS_PER_LINE \
+ ((WORDS_PER_LINE * (2*sizeof(unsigned int))) + NUM_SPACES + 1)
+
+/* Z180 registers (byte offsets) to be dumped */
+static const unsigned int regs_to_dump[] = {
+ ADDR_VGC_VERSION,
+ ADDR_VGC_SYSSTATUS,
+ ADDR_VGC_IRQSTATUS,
+ ADDR_VGC_IRQENABLE,
+ ADDR_VGC_IRQ_ACTIVE_CNT,
+ ADDR_VGC_CLOCKEN,
+ ADDR_VGC_MH_DATA_ADDR,
+ ADDR_VGC_GPR0,
+ ADDR_VGC_GPR1,
+ ADDR_VGC_BUSYCNT,
+ ADDR_VGC_FIFOFREE,
+};
+
+/**
+ * z180_dump_regs - Dumps all of Z180 external registers. Prints the word offset
+ * of the register in each output line.
+ * @device: kgsl_device pointer to the Z180 core
+ */
+static void z180_dump_regs(struct kgsl_device *device)
+{
+ unsigned int i;
+ unsigned int reg_val;
+
+ z180_idle(device);
+
+ KGSL_LOG_DUMP(device, "Z180 Register Dump\n");
+ for (i = 0; i < ARRAY_SIZE(regs_to_dump); i++) {
+ kgsl_regread(device,
+ regs_to_dump[i]/sizeof(unsigned int), ®_val);
+ KGSL_LOG_DUMP(device, "REG: %04X: %08X\n",
+ regs_to_dump[i]/sizeof(unsigned int), reg_val);
+ }
+}
+
+/**
+ * z180_dump_ringbuffer - Dumps the Z180 core's ringbuffer contents
+ * @device: kgsl_device pointer to the z180 core
+ */
+static void z180_dump_ringbuffer(struct kgsl_device *device)
+{
+ unsigned int rb_size;
+ unsigned int *rb_hostptr;
+ unsigned int rb_words;
+ unsigned int rb_gpuaddr;
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+ unsigned int i;
+ char linebuf[CHARS_PER_LINE];
+
+ KGSL_LOG_DUMP(device, "Z180 ringbuffer dump\n");
+
+ rb_hostptr = (unsigned int *) z180_dev->ringbuffer.cmdbufdesc.hostptr;
+
+ rb_size = Z180_RB_SIZE;
+ rb_gpuaddr = z180_dev->ringbuffer.cmdbufdesc.gpuaddr;
+
+ rb_words = rb_size/sizeof(unsigned int);
+
+ KGSL_LOG_DUMP(device, "ringbuffer size: %u\n", rb_size);
+
+ KGSL_LOG_DUMP(device, "rb_words: %d\n", rb_words);
+
+ for (i = 0; i < rb_words; i += WORDS_PER_LINE) {
+ hex_dump_to_buffer(rb_hostptr+i,
+ rb_size - i*sizeof(unsigned int),
+ WORDS_PER_LINE*sizeof(unsigned int),
+ sizeof(unsigned int), linebuf,
+ sizeof(linebuf), false);
+ KGSL_LOG_DUMP(device, "RB: %04X: %s\n",
+ rb_gpuaddr + i*sizeof(unsigned int), linebuf);
+ }
+}
+
+
+static void z180_dump_ib(struct kgsl_device *device)
+{
+ unsigned int rb_size;
+ unsigned int *rb_hostptr;
+ unsigned int rb_words;
+ unsigned int rb_gpuaddr;
+ unsigned int ib_gpuptr = 0;
+ unsigned int ib_size = 0;
+ void *ib_hostptr = NULL;
+ int rb_slot_num = -1;
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+ struct kgsl_mem_entry *entry = NULL;
+ phys_addr_t pt_base;
+ unsigned int i;
+ unsigned int j;
+ char linebuf[CHARS_PER_LINE];
+ unsigned int current_ib_slot;
+ unsigned int len;
+ unsigned int rowsize;
+ KGSL_LOG_DUMP(device, "Z180 IB dump\n");
+
+ rb_hostptr = (unsigned int *) z180_dev->ringbuffer.cmdbufdesc.hostptr;
+
+ rb_size = Z180_RB_SIZE;
+ rb_gpuaddr = z180_dev->ringbuffer.cmdbufdesc.gpuaddr;
+
+ rb_words = rb_size/sizeof(unsigned int);
+
+ KGSL_LOG_DUMP(device, "Ringbuffer size (bytes): %u\n", rb_size);
+
+ KGSL_LOG_DUMP(device, "rb_words: %d\n", rb_words);
+
+ pt_base = kgsl_mmu_get_current_ptbase(&device->mmu);
+
+ /* Dump the current IB */
+ for (i = 0; i < rb_words; i++) {
+ if (rb_hostptr[i] == Z180_STREAM_PACKET_CALL) {
+
+ rb_slot_num++;
+ current_ib_slot =
+ z180_dev->current_timestamp % Z180_PACKET_COUNT;
+ if (rb_slot_num != current_ib_slot)
+ continue;
+
+ ib_gpuptr = rb_hostptr[i+1];
+
+ entry = kgsl_get_mem_entry(device, pt_base, ib_gpuptr,
+ 1);
+
+ if (entry == NULL) {
+ KGSL_LOG_DUMP(device,
+ "IB mem entry not found for ringbuffer slot#: %d\n",
+ rb_slot_num);
+ continue;
+ }
+
+ ib_hostptr = kgsl_memdesc_map(&entry->memdesc);
+
+ if (ib_hostptr == NULL) {
+ KGSL_LOG_DUMP(device,
+ "Could not map IB to kernel memory, Ringbuffer Slot: %d\n",
+ rb_slot_num);
+ kgsl_mem_entry_put(entry);
+ continue;
+ }
+
+ ib_size = entry->memdesc.size;
+ KGSL_LOG_DUMP(device,
+ "IB size: %dbytes, IB size in words: %d\n",
+ ib_size,
+ ib_size/sizeof(unsigned int));
+
+ for (j = 0; j < ib_size; j += WORDS_PER_LINE) {
+ len = ib_size - j*sizeof(unsigned int);
+ rowsize = WORDS_PER_LINE*sizeof(unsigned int);
+ hex_dump_to_buffer(ib_hostptr+j, len, rowsize,
+ sizeof(unsigned int), linebuf,
+ sizeof(linebuf), false);
+ KGSL_LOG_DUMP(device, "IB%d: %04X: %s\n",
+ rb_slot_num,
+ (rb_gpuaddr +
+ j*sizeof(unsigned int)),
+ linebuf);
+ }
+ KGSL_LOG_DUMP(device, "IB Dump Finished\n");
+ kgsl_mem_entry_put(entry);
+ }
+ }
+}
+
+
+/**
+ * z180_dump - Dumps the Z180 ringbuffer and registers (and IBs if asked for)
+ * for postmortem
+ * analysis.
+ * @device: kgsl_device pointer to the Z180 core
+ */
+int z180_dump(struct kgsl_device *device, int manual)
+{
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+
+ mb();
+
+ KGSL_LOG_DUMP(device, "Retired Timestamp: %d\n", z180_dev->timestamp);
+ KGSL_LOG_DUMP(device,
+ "Current Timestamp: %d\n", z180_dev->current_timestamp);
+
+ /* Dump ringbuffer */
+ z180_dump_ringbuffer(device);
+
+ /* Dump registers */
+ z180_dump_regs(device);
+
+ /* Dump IBs, if asked for */
+ if (device->pm_ib_enabled)
+ z180_dump_ib(device);
+
+ /* Get the stack trace if the dump was automatic */
+ if (!manual)
+ BUG_ON(1);
+
+ return 0;
+}
+
diff --git a/drivers/gpu/msm2/z180_reg.h b/drivers/gpu/msm2/z180_reg.h
new file mode 100644
index 0000000..81f1fdc
--- /dev/null
+++ b/drivers/gpu/msm2/z180_reg.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __Z80_REG_H
+#define __Z80_REG_H
+
+#define REG_VGC_IRQSTATUS__MH_MASK 0x00000001L
+#define REG_VGC_IRQSTATUS__G2D_MASK 0x00000002L
+#define REG_VGC_IRQSTATUS__FIFO_MASK 0x00000004L
+
+#define MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT 0x00000006
+#define MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT 0x00000007
+#define MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT 0x00000008
+#define MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT 0x00000009
+#define MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT 0x0000000a
+#define MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT 0x0000000d
+#define MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT 0x0000000e
+#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT 0x0000000f
+#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT 0x00000010
+#define MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT 0x00000016
+#define MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT 0x00000017
+#define MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT 0x00000018
+#define MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT 0x00000019
+#define MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT 0x0000001a
+
+#define ADDR_VGC_MH_READ_ADDR 0x0510
+#define ADDR_VGC_MH_DATA_ADDR 0x0518
+#define ADDR_VGC_COMMANDSTREAM 0x0000
+#define ADDR_VGC_IRQENABLE 0x0438
+#define ADDR_VGC_IRQSTATUS 0x0418
+#define ADDR_VGC_IRQ_ACTIVE_CNT 0x04E0
+#define ADDR_VGC_MMUCOMMANDSTREAM 0x03FC
+#define ADDR_VGV3_CONTROL 0x0070
+#define ADDR_VGV3_LAST 0x007F
+#define ADDR_VGV3_MODE 0x0071
+#define ADDR_VGV3_NEXTADDR 0x0075
+#define ADDR_VGV3_NEXTCMD 0x0076
+#define ADDR_VGV3_WRITEADDR 0x0072
+#define ADDR_VGC_VERSION 0x400
+#define ADDR_VGC_SYSSTATUS 0x410
+#define ADDR_VGC_CLOCKEN 0x508
+#define ADDR_VGC_GPR0 0x520
+#define ADDR_VGC_GPR1 0x528
+#define ADDR_VGC_BUSYCNT 0x530
+#define ADDR_VGC_FIFOFREE 0x7c0
+
+#endif /* __Z180_REG_H */
diff --git a/drivers/gpu/msm2/z180_trace.c b/drivers/gpu/msm2/z180_trace.c
new file mode 100644
index 0000000..9d971ee
--- /dev/null
+++ b/drivers/gpu/msm2/z180_trace.c
@@ -0,0 +1,20 @@
+/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "kgsl.h"
+#include "z180.h"
+#include "z180_reg.h"
+
+/* Instantiate tracepoints */
+#define CREATE_TRACE_POINTS
+#include "z180_trace.h"
diff --git a/drivers/gpu/msm2/z180_trace.h b/drivers/gpu/msm2/z180_trace.h
new file mode 100644
index 0000000..4f65b9b
--- /dev/null
+++ b/drivers/gpu/msm2/z180_trace.h
@@ -0,0 +1,60 @@
+/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#if !defined(_Z180_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _Z180_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kgsl
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE z180_trace
+
+#include <linux/tracepoint.h>
+
+struct kgsl_device;
+
+/*
+ * Tracepoint for z180 irq. Includes status info
+ */
+TRACE_EVENT(kgsl_z180_irq_status,
+
+ TP_PROTO(struct kgsl_device *device, unsigned int status),
+
+ TP_ARGS(device, status),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, status)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->status = status;
+ ),
+
+ TP_printk(
+ "d_name=%s status=%s",
+ __get_str(device_name),
+ __entry->status ? __print_flags(__entry->status, "|",
+ { REG_VGC_IRQSTATUS__MH_MASK, "MH" },
+ { REG_VGC_IRQSTATUS__G2D_MASK, "G2D" },
+ { REG_VGC_IRQSTATUS__FIFO_MASK, "FIFO" }) : "None"
+ )
+);
+
+#endif /* _Z180_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>