Initial commit from HTC m7ul-3.4.10-jb-crc-ddcfb8c
diff --git a/drivers/gpu/msm/Kconfig b/drivers/gpu/msm/Kconfig
new file mode 100644
index 0000000..0b293a7
--- /dev/null
+++ b/drivers/gpu/msm/Kconfig
@@ -0,0 +1,123 @@
+config MSM_KGSL
+ tristate "MSM 3D Graphics driver"
+ default n
+ depends on ARCH_MSM && !ARCH_MSM7X00A && !ARCH_MSM7X25
+ select GENERIC_ALLOCATOR
+ select FW_LOADER
+ ---help---
+ 3D graphics driver. Required to use hardware accelerated
+ OpenGL ES 2.0 and 1.1.
+
+config MSM_KGSL_CFF_DUMP
+ bool "Enable KGSL Common File Format (CFF) Dump Feature [Use with caution]"
+ default n
+ depends on MSM_KGSL
+ select RELAY
+ ---help---
+ This is an analysis and diagnostic feature only, and should only be
+ turned on during KGSL GPU diagnostics and will slow down the KGSL
+ performance sigificantly, hence *do not use in production builds*.
+ When enabled, CFF Dump is on at boot. It can be turned off at runtime
+ via 'echo 0 > /d/kgsl/cff_dump'. The log can be captured via
+ /d/kgsl-cff/cpu[0|1].
+
+config MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP
+ bool "When selected will disable KGSL CFF Dump for context switches"
+ default n
+ depends on MSM_KGSL_CFF_DUMP
+ ---help---
+ Dumping all the memory for every context switch can produce quite
+ huge log files, to reduce this, turn this feature on.
+
+config MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL
+ bool "Disable human readable CP_STAT fields in post-mortem dump"
+ default n
+ depends on MSM_KGSL
+ ---help---
+ For a more compact kernel log the human readable output of
+ CP_STAT can be turned off with this option.
+
+config MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP
+ bool "Disable dumping current IB1 and IB2 in post-mortem dump"
+ default n
+ depends on MSM_KGSL
+ ---help---
+ For a more compact kernel log the IB1 and IB2 embedded dump
+ can be turned off with this option. Some IB dumps take up
+ so much space that vital other information gets cut from the
+ post-mortem dump.
+
+config MSM_KGSL_PSTMRTMDMP_RB_HEX
+ bool "Use hex version for ring-buffer in post-mortem dump"
+ default n
+ depends on MSM_KGSL
+ ---help---
+ Use hex version for the ring-buffer in the post-mortem dump, instead
+ of the human readable version.
+
+config MSM_KGSL_2D
+ tristate "MSM 2D graphics driver. Required for OpenVG"
+ default y
+ depends on MSM_KGSL && !ARCH_MSM7X27 && !ARCH_MSM7X27A && !(ARCH_QSD8X50 && !MSM_SOC_REV_A)
+
+config MSM_KGSL_DRM
+ bool "Build a DRM interface for the MSM_KGSL driver"
+ depends on MSM_KGSL && DRM
+
+config KGSL_PER_PROCESS_PAGE_TABLE
+ bool "Enable Per Process page tables for the KGSL driver"
+ default n
+ depends on !MSM_KGSL_DRM
+ ---help---
+ The MMU will use per process pagetables when enabled.
+
+config MSM_KGSL_PAGE_TABLE_SIZE
+ hex "Size of pagetables"
+ default 0xFFF0000
+ ---help---
+ Sets the pagetable size used by the MMU. The max value
+ is 0xFFF0000 or (256M - 64K).
+
+config MSM_KGSL_PAGE_TABLE_SIZE_FOR_IOMMU
+ hex "Size of pagetables for iommu"
+ default 0x1FE00000
+ ---help---
+ Sets the pagetable size used by the IOMMU. The max value
+ is 0x1FE00000 or (512M - 1536K - little interval).
+
+config MSM_KGSL_PAGE_TABLE_COUNT
+ int "Minimum of concurrent pagetables to support"
+ default 8
+ depends on KGSL_PER_PROCESS_PAGE_TABLE
+ ---help---
+ Specify the number of pagetables to allocate at init time
+ This is the number of concurrent processes that are guaranteed to
+ to run at any time. Additional processes can be created dynamically
+ assuming there is enough contiguous memory to allocate the pagetable.
+
+config MSM_KGSL_MMU_PAGE_FAULT
+ bool "Force the GPU MMU to page fault for unmapped regions"
+ default y
+
+config MSM_KGSL_DISABLE_SHADOW_WRITES
+ bool "Disable register shadow writes for context switches"
+ default n
+ depends on MSM_KGSL
+
+config MSM_KGSL_GPU_USAGE
+ bool "Enable sysfs node of GPU usage per process"
+ default n
+
+config MSM_KGSL_DEFAULT_GPUMMU
+ bool "Prefer gpummu than iommu"
+ default n
+
+config MSM_KGSL_GPU_USAGE_SYSTRACE
+ bool "Enable kgsl_usage node for ftrace gpu usage event"
+ default y
+
+config MSM_KGSL_KILL_HANG_PROCESS
+ bool "Enable killing recoverable gpu hang process routine"
+ default y
+ ---help---
+ We only enable this config in CRC branch.
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
new file mode 100644
index 0000000..6cdb5f1
--- /dev/null
+++ b/drivers/gpu/msm/Makefile
@@ -0,0 +1,46 @@
+ccflags-y := -Iinclude/drm -Idrivers/gpu/msm
+
+msm_kgsl_core-y = \
+ kgsl.o \
+ kgsl_trace.o \
+ kgsl_sharedmem.o \
+ kgsl_pwrctrl.o \
+ kgsl_pwrscale.o \
+ kgsl_mmu.o \
+ kgsl_gpummu.o \
+ kgsl_iommu.o \
+ kgsl_snapshot.o
+
+msm_kgsl_core-$(CONFIG_DEBUG_FS) += kgsl_debugfs.o
+msm_kgsl_core-$(CONFIG_MSM_KGSL_CFF_DUMP) += kgsl_cffdump.o
+msm_kgsl_core-$(CONFIG_MSM_KGSL_DRM) += kgsl_drm.o
+msm_kgsl_core-$(CONFIG_MSM_SCM) += kgsl_pwrscale_trustzone.o
+msm_kgsl_core-$(CONFIG_MSM_SLEEP_STATS_DEVICE) += kgsl_pwrscale_idlestats.o
+msm_kgsl_core-$(CONFIG_MSM_DCVS) += kgsl_pwrscale_msm.o
+
+msm_adreno-y += \
+ adreno_ringbuffer.o \
+ adreno_drawctxt.o \
+ adreno_postmortem.o \
+ adreno_snapshot.o \
+ adreno_a2xx.o \
+ adreno_a2xx_trace.o \
+ adreno_a2xx_snapshot.o \
+ adreno_a3xx.o \
+ adreno_a3xx_trace.o \
+ adreno_a3xx_snapshot.o \
+ adreno.o
+
+msm_adreno-$(CONFIG_DEBUG_FS) += adreno_debugfs.o
+
+msm_z180-y += \
+ z180.o \
+ z180_trace.o
+
+msm_kgsl_core-objs = $(msm_kgsl_core-y)
+msm_adreno-objs = $(msm_adreno-y)
+msm_z180-objs = $(msm_z180-y)
+
+obj-$(CONFIG_MSM_KGSL) += msm_kgsl_core.o
+obj-$(CONFIG_MSM_KGSL) += msm_adreno.o
+obj-$(CONFIG_MSM_KGSL_2D) += msm_z180.o
diff --git a/drivers/gpu/msm/a2xx_reg.h b/drivers/gpu/msm/a2xx_reg.h
new file mode 100644
index 0000000..bde8784
--- /dev/null
+++ b/drivers/gpu/msm/a2xx_reg.h
@@ -0,0 +1,435 @@
+/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __A200_REG_H
+#define __A200_REG_H
+
+enum VGT_EVENT_TYPE {
+ VS_DEALLOC = 0,
+ PS_DEALLOC = 1,
+ VS_DONE_TS = 2,
+ PS_DONE_TS = 3,
+ CACHE_FLUSH_TS = 4,
+ CONTEXT_DONE = 5,
+ CACHE_FLUSH = 6,
+ VIZQUERY_START = 7,
+ VIZQUERY_END = 8,
+ SC_WAIT_WC = 9,
+ RST_PIX_CNT = 13,
+ RST_VTX_CNT = 14,
+ TILE_FLUSH = 15,
+ CACHE_FLUSH_AND_INV_TS_EVENT = 20,
+ ZPASS_DONE = 21,
+ CACHE_FLUSH_AND_INV_EVENT = 22,
+ PERFCOUNTER_START = 23,
+ PERFCOUNTER_STOP = 24,
+ VS_FETCH_DONE = 27,
+ FACENESS_FLUSH = 28,
+};
+
+enum COLORFORMATX {
+ COLORX_4_4_4_4 = 0,
+ COLORX_1_5_5_5 = 1,
+ COLORX_5_6_5 = 2,
+ COLORX_8 = 3,
+ COLORX_8_8 = 4,
+ COLORX_8_8_8_8 = 5,
+ COLORX_S8_8_8_8 = 6,
+ COLORX_16_FLOAT = 7,
+ COLORX_16_16_FLOAT = 8,
+ COLORX_16_16_16_16_FLOAT = 9,
+ COLORX_32_FLOAT = 10,
+ COLORX_32_32_FLOAT = 11,
+ COLORX_32_32_32_32_FLOAT = 12,
+ COLORX_2_3_3 = 13,
+ COLORX_8_8_8 = 14,
+};
+
+enum SURFACEFORMAT {
+ FMT_1_REVERSE = 0,
+ FMT_1 = 1,
+ FMT_8 = 2,
+ FMT_1_5_5_5 = 3,
+ FMT_5_6_5 = 4,
+ FMT_6_5_5 = 5,
+ FMT_8_8_8_8 = 6,
+ FMT_2_10_10_10 = 7,
+ FMT_8_A = 8,
+ FMT_8_B = 9,
+ FMT_8_8 = 10,
+ FMT_Cr_Y1_Cb_Y0 = 11,
+ FMT_Y1_Cr_Y0_Cb = 12,
+ FMT_5_5_5_1 = 13,
+ FMT_8_8_8_8_A = 14,
+ FMT_4_4_4_4 = 15,
+ FMT_10_11_11 = 16,
+ FMT_11_11_10 = 17,
+ FMT_DXT1 = 18,
+ FMT_DXT2_3 = 19,
+ FMT_DXT4_5 = 20,
+ FMT_24_8 = 22,
+ FMT_24_8_FLOAT = 23,
+ FMT_16 = 24,
+ FMT_16_16 = 25,
+ FMT_16_16_16_16 = 26,
+ FMT_16_EXPAND = 27,
+ FMT_16_16_EXPAND = 28,
+ FMT_16_16_16_16_EXPAND = 29,
+ FMT_16_FLOAT = 30,
+ FMT_16_16_FLOAT = 31,
+ FMT_16_16_16_16_FLOAT = 32,
+ FMT_32 = 33,
+ FMT_32_32 = 34,
+ FMT_32_32_32_32 = 35,
+ FMT_32_FLOAT = 36,
+ FMT_32_32_FLOAT = 37,
+ FMT_32_32_32_32_FLOAT = 38,
+ FMT_32_AS_8 = 39,
+ FMT_32_AS_8_8 = 40,
+ FMT_16_MPEG = 41,
+ FMT_16_16_MPEG = 42,
+ FMT_8_INTERLACED = 43,
+ FMT_32_AS_8_INTERLACED = 44,
+ FMT_32_AS_8_8_INTERLACED = 45,
+ FMT_16_INTERLACED = 46,
+ FMT_16_MPEG_INTERLACED = 47,
+ FMT_16_16_MPEG_INTERLACED = 48,
+ FMT_DXN = 49,
+ FMT_8_8_8_8_AS_16_16_16_16 = 50,
+ FMT_DXT1_AS_16_16_16_16 = 51,
+ FMT_DXT2_3_AS_16_16_16_16 = 52,
+ FMT_DXT4_5_AS_16_16_16_16 = 53,
+ FMT_2_10_10_10_AS_16_16_16_16 = 54,
+ FMT_10_11_11_AS_16_16_16_16 = 55,
+ FMT_11_11_10_AS_16_16_16_16 = 56,
+ FMT_32_32_32_FLOAT = 57,
+ FMT_DXT3A = 58,
+ FMT_DXT5A = 59,
+ FMT_CTX1 = 60,
+ FMT_DXT3A_AS_1_1_1_1 = 61
+};
+
+#define REG_PERF_MODE_CNT 0x0
+#define REG_PERF_STATE_RESET 0x0
+#define REG_PERF_STATE_ENABLE 0x1
+#define REG_PERF_STATE_FREEZE 0x2
+
+#define RB_EDRAM_INFO_EDRAM_SIZE_SIZE 4
+#define RB_EDRAM_INFO_EDRAM_MAPPING_MODE_SIZE 2
+#define RB_EDRAM_INFO_UNUSED0_SIZE 8
+#define RB_EDRAM_INFO_EDRAM_RANGE_SIZE 18
+
+struct rb_edram_info_t {
+ unsigned int edram_size:RB_EDRAM_INFO_EDRAM_SIZE_SIZE;
+ unsigned int edram_mapping_mode:RB_EDRAM_INFO_EDRAM_MAPPING_MODE_SIZE;
+ unsigned int unused0:RB_EDRAM_INFO_UNUSED0_SIZE;
+ unsigned int edram_range:RB_EDRAM_INFO_EDRAM_RANGE_SIZE;
+};
+
+union reg_rb_edram_info {
+ unsigned int val;
+ struct rb_edram_info_t f;
+};
+
+#define RBBM_READ_ERROR_ADDRESS_MASK 0x0001fffc
+#define RBBM_READ_ERROR_REQUESTER (1<<30)
+#define RBBM_READ_ERROR_ERROR (1<<31)
+
+#define CP_RB_CNTL_RB_BUFSZ_SIZE 6
+#define CP_RB_CNTL_UNUSED0_SIZE 2
+#define CP_RB_CNTL_RB_BLKSZ_SIZE 6
+#define CP_RB_CNTL_UNUSED1_SIZE 2
+#define CP_RB_CNTL_BUF_SWAP_SIZE 2
+#define CP_RB_CNTL_UNUSED2_SIZE 2
+#define CP_RB_CNTL_RB_POLL_EN_SIZE 1
+#define CP_RB_CNTL_UNUSED3_SIZE 6
+#define CP_RB_CNTL_RB_NO_UPDATE_SIZE 1
+#define CP_RB_CNTL_UNUSED4_SIZE 3
+#define CP_RB_CNTL_RB_RPTR_WR_ENA_SIZE 1
+
+struct cp_rb_cntl_t {
+ unsigned int rb_bufsz:CP_RB_CNTL_RB_BUFSZ_SIZE;
+ unsigned int unused0:CP_RB_CNTL_UNUSED0_SIZE;
+ unsigned int rb_blksz:CP_RB_CNTL_RB_BLKSZ_SIZE;
+ unsigned int unused1:CP_RB_CNTL_UNUSED1_SIZE;
+ unsigned int buf_swap:CP_RB_CNTL_BUF_SWAP_SIZE;
+ unsigned int unused2:CP_RB_CNTL_UNUSED2_SIZE;
+ unsigned int rb_poll_en:CP_RB_CNTL_RB_POLL_EN_SIZE;
+ unsigned int unused3:CP_RB_CNTL_UNUSED3_SIZE;
+ unsigned int rb_no_update:CP_RB_CNTL_RB_NO_UPDATE_SIZE;
+ unsigned int unused4:CP_RB_CNTL_UNUSED4_SIZE;
+ unsigned int rb_rptr_wr_ena:CP_RB_CNTL_RB_RPTR_WR_ENA_SIZE;
+};
+
+union reg_cp_rb_cntl {
+ unsigned int val:32;
+ struct cp_rb_cntl_t f;
+};
+
+#define RB_COLOR_INFO__COLOR_FORMAT_MASK 0x0000000fL
+#define RB_COPY_DEST_INFO__COPY_DEST_FORMAT__SHIFT 0x00000004
+
+
+#define SQ_INT_CNTL__PS_WATCHDOG_MASK 0x00000001L
+#define SQ_INT_CNTL__VS_WATCHDOG_MASK 0x00000002L
+
+#define RBBM_INT_CNTL__RDERR_INT_MASK 0x00000001L
+#define RBBM_INT_CNTL__DISPLAY_UPDATE_INT_MASK 0x00000002L
+#define RBBM_INT_CNTL__GUI_IDLE_INT_MASK 0x00080000L
+
+#define RBBM_STATUS__CMDFIFO_AVAIL_MASK 0x0000001fL
+#define RBBM_STATUS__TC_BUSY_MASK 0x00000020L
+#define RBBM_STATUS__HIRQ_PENDING_MASK 0x00000100L
+#define RBBM_STATUS__CPRQ_PENDING_MASK 0x00000200L
+#define RBBM_STATUS__CFRQ_PENDING_MASK 0x00000400L
+#define RBBM_STATUS__PFRQ_PENDING_MASK 0x00000800L
+#define RBBM_STATUS__VGT_BUSY_NO_DMA_MASK 0x00001000L
+#define RBBM_STATUS__RBBM_WU_BUSY_MASK 0x00004000L
+#define RBBM_STATUS__CP_NRT_BUSY_MASK 0x00010000L
+#define RBBM_STATUS__MH_BUSY_MASK 0x00040000L
+#define RBBM_STATUS__MH_COHERENCY_BUSY_MASK 0x00080000L
+#define RBBM_STATUS__SX_BUSY_MASK 0x00200000L
+#define RBBM_STATUS__TPC_BUSY_MASK 0x00400000L
+#define RBBM_STATUS__SC_CNTX_BUSY_MASK 0x01000000L
+#define RBBM_STATUS__PA_BUSY_MASK 0x02000000L
+#define RBBM_STATUS__VGT_BUSY_MASK 0x04000000L
+#define RBBM_STATUS__SQ_CNTX17_BUSY_MASK 0x08000000L
+#define RBBM_STATUS__SQ_CNTX0_BUSY_MASK 0x10000000L
+#define RBBM_STATUS__RB_CNTX_BUSY_MASK 0x40000000L
+#define RBBM_STATUS__GUI_ACTIVE_MASK 0x80000000L
+
+#define CP_INT_CNTL__SW_INT_MASK 0x00080000L
+#define CP_INT_CNTL__T0_PACKET_IN_IB_MASK 0x00800000L
+#define CP_INT_CNTL__OPCODE_ERROR_MASK 0x01000000L
+#define CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK 0x02000000L
+#define CP_INT_CNTL__RESERVED_BIT_ERROR_MASK 0x04000000L
+#define CP_INT_CNTL__IB_ERROR_MASK 0x08000000L
+#define CP_INT_CNTL__IB2_INT_MASK 0x20000000L
+#define CP_INT_CNTL__IB1_INT_MASK 0x40000000L
+#define CP_INT_CNTL__RB_INT_MASK 0x80000000L
+
+#define MASTER_INT_SIGNAL__MH_INT_STAT 0x00000020L
+#define MASTER_INT_SIGNAL__SQ_INT_STAT 0x04000000L
+#define MASTER_INT_SIGNAL__CP_INT_STAT 0x40000000L
+#define MASTER_INT_SIGNAL__RBBM_INT_STAT 0x80000000L
+
+#define RB_EDRAM_INFO__EDRAM_SIZE_MASK 0x0000000fL
+#define RB_EDRAM_INFO__EDRAM_RANGE_MASK 0xffffc000L
+
+#define MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT 0x00000006
+#define MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT 0x00000007
+#define MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT 0x00000008
+#define MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT 0x00000009
+#define MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT 0x0000000a
+#define MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT 0x0000000d
+#define MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT 0x0000000e
+#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT 0x0000000f
+#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT 0x00000010
+#define MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT 0x00000016
+#define MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT 0x00000017
+#define MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT 0x00000018
+#define MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT 0x00000019
+#define MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT 0x0000001a
+
+#define CP_RB_CNTL__RB_BUFSZ__SHIFT 0x00000000
+#define CP_RB_CNTL__RB_BLKSZ__SHIFT 0x00000008
+#define CP_RB_CNTL__RB_POLL_EN__SHIFT 0x00000014
+#define CP_RB_CNTL__RB_NO_UPDATE__SHIFT 0x0000001b
+
+#define RB_COLOR_INFO__COLOR_FORMAT__SHIFT 0x00000000
+#define RB_EDRAM_INFO__EDRAM_MAPPING_MODE__SHIFT 0x00000004
+#define RB_EDRAM_INFO__EDRAM_RANGE__SHIFT 0x0000000e
+
+#define REG_CP_CSQ_IB1_STAT 0x01FE
+#define REG_CP_CSQ_IB2_STAT 0x01FF
+#define REG_CP_CSQ_RB_STAT 0x01FD
+#define REG_CP_DEBUG 0x01FC
+#define REG_CP_IB1_BASE 0x0458
+#define REG_CP_IB1_BUFSZ 0x0459
+#define REG_CP_IB2_BASE 0x045A
+#define REG_CP_IB2_BUFSZ 0x045B
+#define REG_CP_INT_ACK 0x01F4
+#define REG_CP_INT_CNTL 0x01F2
+#define REG_CP_INT_STATUS 0x01F3
+#define REG_CP_ME_CNTL 0x01F6
+#define REG_CP_ME_RAM_DATA 0x01FA
+#define REG_CP_ME_RAM_WADDR 0x01F8
+#define REG_CP_ME_RAM_RADDR 0x01F9
+#define REG_CP_ME_STATUS 0x01F7
+#define REG_CP_PFP_UCODE_ADDR 0x00C0
+#define REG_CP_PFP_UCODE_DATA 0x00C1
+#define REG_CP_QUEUE_THRESHOLDS 0x01D5
+#define REG_CP_RB_BASE 0x01C0
+#define REG_CP_RB_CNTL 0x01C1
+#define REG_CP_RB_RPTR 0x01C4
+#define REG_CP_RB_RPTR_ADDR 0x01C3
+#define REG_CP_RB_RPTR_WR 0x01C7
+#define REG_CP_RB_WPTR 0x01C5
+#define REG_CP_RB_WPTR_BASE 0x01C8
+#define REG_CP_RB_WPTR_DELAY 0x01C6
+#define REG_CP_STAT 0x047F
+#define REG_CP_STATE_DEBUG_DATA 0x01ED
+#define REG_CP_STATE_DEBUG_INDEX 0x01EC
+#define REG_CP_ST_BASE 0x044D
+#define REG_CP_ST_BUFSZ 0x044E
+
+#define REG_CP_PERFMON_CNTL 0x0444
+#define REG_CP_PERFCOUNTER_SELECT 0x0445
+#define REG_CP_PERFCOUNTER_LO 0x0446
+#define REG_CP_PERFCOUNTER_HI 0x0447
+
+#define REG_RBBM_PERFCOUNTER1_SELECT 0x0395
+#define REG_RBBM_PERFCOUNTER1_HI 0x0398
+#define REG_RBBM_PERFCOUNTER1_LO 0x0397
+
+#define REG_MASTER_INT_SIGNAL 0x03B7
+
+#define REG_PA_CL_VPORT_XSCALE 0x210F
+#define REG_PA_CL_VPORT_ZOFFSET 0x2114
+#define REG_PA_CL_VPORT_ZSCALE 0x2113
+#define REG_PA_CL_CLIP_CNTL 0x2204
+#define REG_PA_CL_VTE_CNTL 0x2206
+#define REG_PA_SC_AA_MASK 0x2312
+#define REG_PA_SC_LINE_CNTL 0x2300
+#define REG_PA_SC_SCREEN_SCISSOR_BR 0x200F
+#define REG_PA_SC_SCREEN_SCISSOR_TL 0x200E
+#define REG_PA_SC_VIZ_QUERY 0x2293
+#define REG_PA_SC_VIZ_QUERY_STATUS 0x0C44
+#define REG_PA_SC_WINDOW_OFFSET 0x2080
+#define REG_PA_SC_WINDOW_SCISSOR_BR 0x2082
+#define REG_PA_SC_WINDOW_SCISSOR_TL 0x2081
+#define REG_PA_SU_FACE_DATA 0x0C86
+#define REG_PA_SU_POINT_SIZE 0x2280
+#define REG_PA_SU_LINE_CNTL 0x2282
+#define REG_PA_SU_POLY_OFFSET_BACK_OFFSET 0x2383
+#define REG_PA_SU_POLY_OFFSET_FRONT_SCALE 0x2380
+#define REG_PA_SU_SC_MODE_CNTL 0x2205
+
+#define REG_PC_INDEX_OFFSET 0x2102
+
+#define REG_RBBM_CNTL 0x003B
+#define REG_RBBM_INT_ACK 0x03B6
+#define REG_RBBM_INT_CNTL 0x03B4
+#define REG_RBBM_INT_STATUS 0x03B5
+#define REG_RBBM_PATCH_RELEASE 0x0001
+#define REG_RBBM_PERIPHID1 0x03F9
+#define REG_RBBM_PERIPHID2 0x03FA
+#define REG_RBBM_DEBUG 0x039B
+#define REG_RBBM_DEBUG_OUT 0x03A0
+#define REG_RBBM_DEBUG_CNTL 0x03A1
+#define REG_RBBM_PM_OVERRIDE1 0x039C
+#define REG_RBBM_PM_OVERRIDE2 0x039D
+#define REG_RBBM_READ_ERROR 0x03B3
+#define REG_RBBM_SOFT_RESET 0x003C
+#define REG_RBBM_STATUS 0x05D0
+
+#define REG_RB_COLORCONTROL 0x2202
+#define REG_RB_COLOR_DEST_MASK 0x2326
+#define REG_RB_COLOR_MASK 0x2104
+#define REG_RB_COPY_CONTROL 0x2318
+#define REG_RB_DEPTHCONTROL 0x2200
+#define REG_RB_EDRAM_INFO 0x0F02
+#define REG_RB_MODECONTROL 0x2208
+#define REG_RB_SURFACE_INFO 0x2000
+#define REG_RB_SAMPLE_POS 0x220a
+
+#define REG_SCRATCH_ADDR 0x01DD
+#define REG_SCRATCH_REG0 0x0578
+#define REG_SCRATCH_REG2 0x057A
+#define REG_SCRATCH_UMSK 0x01DC
+
+#define REG_SQ_CF_BOOLEANS 0x4900
+#define REG_SQ_CF_LOOP 0x4908
+#define REG_SQ_GPR_MANAGEMENT 0x0D00
+#define REG_SQ_FLOW_CONTROL 0x0D01
+#define REG_SQ_INST_STORE_MANAGMENT 0x0D02
+#define REG_SQ_INT_ACK 0x0D36
+#define REG_SQ_INT_CNTL 0x0D34
+#define REG_SQ_INT_STATUS 0x0D35
+#define REG_SQ_PROGRAM_CNTL 0x2180
+#define REG_SQ_PS_PROGRAM 0x21F6
+#define REG_SQ_VS_PROGRAM 0x21F7
+#define REG_SQ_WRAPPING_0 0x2183
+#define REG_SQ_WRAPPING_1 0x2184
+
+#define REG_VGT_ENHANCE 0x2294
+#define REG_VGT_INDX_OFFSET 0x2102
+#define REG_VGT_MAX_VTX_INDX 0x2100
+#define REG_VGT_MIN_VTX_INDX 0x2101
+
+#define REG_TP0_CHICKEN 0x0E1E
+#define REG_TC_CNTL_STATUS 0x0E00
+#define REG_PA_SC_AA_CONFIG 0x2301
+#define REG_VGT_VERTEX_REUSE_BLOCK_CNTL 0x2316
+#define REG_SQ_INTERPOLATOR_CNTL 0x2182
+#define REG_RB_DEPTH_INFO 0x2002
+#define REG_COHER_DEST_BASE_0 0x2006
+#define REG_RB_FOG_COLOR 0x2109
+#define REG_RB_STENCILREFMASK_BF 0x210C
+#define REG_PA_SC_LINE_STIPPLE 0x2283
+#define REG_SQ_PS_CONST 0x2308
+#define REG_RB_DEPTH_CLEAR 0x231D
+#define REG_RB_SAMPLE_COUNT_CTL 0x2324
+#define REG_SQ_CONSTANT_0 0x4000
+#define REG_SQ_FETCH_0 0x4800
+
+#define REG_COHER_BASE_PM4 0xA2A
+#define REG_COHER_STATUS_PM4 0xA2B
+#define REG_COHER_SIZE_PM4 0xA29
+
+#define REG_A220_PC_INDX_OFFSET REG_VGT_INDX_OFFSET
+#define REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL REG_VGT_VERTEX_REUSE_BLOCK_CNTL
+#define REG_A220_PC_MAX_VTX_INDX REG_VGT_MAX_VTX_INDX
+#define REG_A220_RB_LRZ_VSC_CONTROL 0x2209
+#define REG_A220_GRAS_CONTROL 0x2210
+#define REG_A220_VSC_BIN_SIZE 0x0C01
+#define REG_A220_VSC_PIPE_DATA_LENGTH_7 0x0C1D
+
+#define REG_A225_RB_COLOR_INFO3 0x2005
+#define REG_A225_PC_MULTI_PRIM_IB_RESET_INDX 0x2103
+#define REG_A225_GRAS_UCP0X 0x2340
+#define REG_A225_GRAS_UCP5W 0x2357
+#define REG_A225_GRAS_UCP_ENABLED 0x2360
+
+#define REG_PA_SU_DEBUG_CNTL 0x0C80
+#define REG_PA_SU_DEBUG_DATA 0x0C81
+#define REG_RB_DEBUG_CNTL 0x0F26
+#define REG_RB_DEBUG_DATA 0x0F27
+#define REG_PC_DEBUG_CNTL 0x0C38
+#define REG_PC_DEBUG_DATA 0x0C39
+#define REG_GRAS_DEBUG_CNTL 0x0C80
+#define REG_GRAS_DEBUG_DATA 0x0C81
+#define REG_SQ_DEBUG_MISC 0x0D05
+#define REG_SQ_DEBUG_INPUT_FSM 0x0DAE
+#define REG_SQ_DEBUG_CONST_MGR_FSM 0x0DAF
+#define REG_SQ_DEBUG_EXP_ALLOC 0x0DB3
+#define REG_SQ_DEBUG_FSM_ALU_0 0x0DB1
+#define REG_SQ_DEBUG_FSM_ALU_1 0x0DB2
+#define REG_SQ_DEBUG_PTR_BUFF 0x0DB4
+#define REG_SQ_DEBUG_GPR_VTX 0x0DB5
+#define REG_SQ_DEBUG_GPR_PIX 0x0DB6
+#define REG_SQ_DEBUG_TB_STATUS_SEL 0x0DB7
+#define REG_SQ_DEBUG_VTX_TB_0 0x0DB8
+#define REG_SQ_DEBUG_VTX_TB_1 0x0DB9
+#define REG_SQ_DEBUG_VTX_TB_STATE_MEM 0x0DBB
+#define REG_SQ_DEBUG_TP_FSM 0x0DB0
+#define REG_SQ_DEBUG_VTX_TB_STATUS_REG 0x0DBA
+#define REG_SQ_DEBUG_PIX_TB_0 0x0DBC
+#define REG_SQ_DEBUG_PIX_TB_STATUS_REG_0 0x0DBD
+#define REG_SQ_DEBUG_PIX_TB_STATUS_REG_1 0x0DBE
+#define REG_SQ_DEBUG_PIX_TB_STATUS_REG_2 0x0DBF
+#define REG_SQ_DEBUG_PIX_TB_STATUS_REG_3 0x0DC0
+#define REG_SQ_DEBUG_PIX_TB_STATE_MEM 0x0DC1
+#define REG_SQ_DEBUG_MISC_0 0x2309
+#define REG_SQ_DEBUG_MISC_1 0x230A
+
+#endif
diff --git a/drivers/gpu/msm/a3xx_reg.h b/drivers/gpu/msm/a3xx_reg.h
new file mode 100644
index 0000000..77bd1d0
--- /dev/null
+++ b/drivers/gpu/msm/a3xx_reg.h
@@ -0,0 +1,510 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _A300_REG_H
+#define _A300_REG_H
+
+
+#define A3XX_INT_RBBM_GPU_IDLE 0
+#define A3XX_INT_RBBM_AHB_ERROR 1
+#define A3XX_INT_RBBM_REG_TIMEOUT 2
+#define A3XX_INT_RBBM_ME_MS_TIMEOUT 3
+#define A3XX_INT_RBBM_PFP_MS_TIMEOUT 4
+#define A3XX_INT_RBBM_ATB_BUS_OVERFLOW 5
+#define A3XX_INT_VFD_ERROR 6
+#define A3XX_INT_CP_SW_INT 7
+#define A3XX_INT_CP_T0_PACKET_IN_IB 8
+#define A3XX_INT_CP_OPCODE_ERROR 9
+#define A3XX_INT_CP_RESERVED_BIT_ERROR 10
+#define A3XX_INT_CP_HW_FAULT 11
+#define A3XX_INT_CP_DMA 12
+#define A3XX_INT_CP_IB2_INT 13
+#define A3XX_INT_CP_IB1_INT 14
+#define A3XX_INT_CP_RB_INT 15
+#define A3XX_INT_CP_REG_PROTECT_FAULT 16
+#define A3XX_INT_CP_RB_DONE_TS 17
+#define A3XX_INT_CP_VS_DONE_TS 18
+#define A3XX_INT_CP_PS_DONE_TS 19
+#define A3XX_INT_CACHE_FLUSH_TS 20
+#define A3XX_INT_CP_AHB_ERROR_HALT 21
+#define A3XX_INT_MISC_HANG_DETECT 24
+#define A3XX_INT_UCHE_OOB_ACCESS 25
+
+
+#define A3XX_RBBM_HW_VERSION 0x000
+#define A3XX_RBBM_HW_RELEASE 0x001
+#define A3XX_RBBM_HW_CONFIGURATION 0x002
+#define A3XX_RBBM_CLOCK_CTL 0x010
+#define A3XX_RBBM_SP_HYST_CNT 0x012
+#define A3XX_RBBM_SW_RESET_CMD 0x018
+#define A3XX_RBBM_AHB_CTL0 0x020
+#define A3XX_RBBM_AHB_CTL1 0x021
+#define A3XX_RBBM_AHB_CMD 0x022
+#define A3XX_RBBM_AHB_ERROR_STATUS 0x027
+#define A3XX_RBBM_GPR0_CTL 0x02E
+#define A3XX_RBBM_STATUS 0x030
+#define A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x33
+#define A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x50
+#define A3XX_RBBM_INTERFACE_HANG_MASK_CTL0 0x51
+#define A3XX_RBBM_INTERFACE_HANG_MASK_CTL1 0x54
+#define A3XX_RBBM_INTERFACE_HANG_MASK_CTL2 0x57
+#define A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x5A
+#define A3XX_RBBM_INT_CLEAR_CMD 0x061
+#define A3XX_RBBM_INT_0_MASK 0x063
+#define A3XX_RBBM_INT_0_STATUS 0x064
+#define A3XX_RBBM_GPU_BUSY_MASKED 0x88
+#define A3XX_RBBM_RBBM_CTL 0x100
+#define A3XX_RBBM_RBBM_CTL 0x100
+#define A3XX_RBBM_PERFCTR_PWR_1_LO 0x0EC
+#define A3XX_RBBM_PERFCTR_PWR_1_HI 0x0ED
+#define A3XX_RBBM_DEBUG_BUS_CTL 0x111
+#define A3XX_RBBM_DEBUG_BUS_DATA_STATUS 0x112
+#define A3XX_CP_PFP_UCODE_ADDR 0x1C9
+#define A3XX_CP_PFP_UCODE_DATA 0x1CA
+#define A3XX_CP_ROQ_ADDR 0x1CC
+#define A3XX_CP_ROQ_DATA 0x1CD
+#define A3XX_CP_MEQ_ADDR 0x1DA
+#define A3XX_CP_MEQ_DATA 0x1DB
+#define A3XX_CP_HW_FAULT 0x45C
+#define A3XX_CP_AHB_FAULT 0x54D
+#define A3XX_CP_PROTECT_CTRL 0x45E
+#define A3XX_CP_PROTECT_STATUS 0x45F
+#define A3XX_CP_PROTECT_REG_0 0x460
+#define A3XX_CP_PROTECT_REG_1 0x461
+#define A3XX_CP_PROTECT_REG_2 0x462
+#define A3XX_CP_PROTECT_REG_3 0x463
+#define A3XX_CP_PROTECT_REG_4 0x464
+#define A3XX_CP_PROTECT_REG_5 0x465
+#define A3XX_CP_PROTECT_REG_6 0x466
+#define A3XX_CP_PROTECT_REG_7 0x467
+#define A3XX_CP_PROTECT_REG_8 0x468
+#define A3XX_CP_PROTECT_REG_9 0x469
+#define A3XX_CP_PROTECT_REG_A 0x46A
+#define A3XX_CP_PROTECT_REG_B 0x46B
+#define A3XX_CP_PROTECT_REG_C 0x46C
+#define A3XX_CP_PROTECT_REG_D 0x46D
+#define A3XX_CP_PROTECT_REG_E 0x46E
+#define A3XX_CP_PROTECT_REG_F 0x46F
+#define A3XX_CP_SCRATCH_REG2 0x57A
+#define A3XX_CP_SCRATCH_REG3 0x57B
+#define A3XX_VSC_BIN_SIZE 0xC01
+#define A3XX_VSC_SIZE_ADDRESS 0xC02
+#define A3XX_VSC_PIPE_CONFIG_0 0xC06
+#define A3XX_VSC_PIPE_DATA_ADDRESS_0 0xC07
+#define A3XX_VSC_PIPE_DATA_LENGTH_0 0xC08
+#define A3XX_VSC_PIPE_CONFIG_1 0xC09
+#define A3XX_VSC_PIPE_DATA_ADDRESS_1 0xC0A
+#define A3XX_VSC_PIPE_DATA_LENGTH_1 0xC0B
+#define A3XX_VSC_PIPE_CONFIG_2 0xC0C
+#define A3XX_VSC_PIPE_DATA_ADDRESS_2 0xC0D
+#define A3XX_VSC_PIPE_DATA_LENGTH_2 0xC0E
+#define A3XX_VSC_PIPE_CONFIG_3 0xC0F
+#define A3XX_VSC_PIPE_DATA_ADDRESS_3 0xC10
+#define A3XX_VSC_PIPE_DATA_LENGTH_3 0xC11
+#define A3XX_VSC_PIPE_CONFIG_4 0xC12
+#define A3XX_VSC_PIPE_DATA_ADDRESS_4 0xC13
+#define A3XX_VSC_PIPE_DATA_LENGTH_4 0xC14
+#define A3XX_VSC_PIPE_CONFIG_5 0xC15
+#define A3XX_VSC_PIPE_DATA_ADDRESS_5 0xC16
+#define A3XX_VSC_PIPE_DATA_LENGTH_5 0xC17
+#define A3XX_VSC_PIPE_CONFIG_6 0xC18
+#define A3XX_VSC_PIPE_DATA_ADDRESS_6 0xC19
+#define A3XX_VSC_PIPE_DATA_LENGTH_6 0xC1A
+#define A3XX_VSC_PIPE_CONFIG_7 0xC1B
+#define A3XX_VSC_PIPE_DATA_ADDRESS_7 0xC1C
+#define A3XX_VSC_PIPE_DATA_LENGTH_7 0xC1D
+#define A3XX_GRAS_CL_USER_PLANE_X0 0xCA0
+#define A3XX_GRAS_CL_USER_PLANE_Y0 0xCA1
+#define A3XX_GRAS_CL_USER_PLANE_Z0 0xCA2
+#define A3XX_GRAS_CL_USER_PLANE_W0 0xCA3
+#define A3XX_GRAS_CL_USER_PLANE_X1 0xCA4
+#define A3XX_GRAS_CL_USER_PLANE_Y1 0xCA5
+#define A3XX_GRAS_CL_USER_PLANE_Z1 0xCA6
+#define A3XX_GRAS_CL_USER_PLANE_W1 0xCA7
+#define A3XX_GRAS_CL_USER_PLANE_X2 0xCA8
+#define A3XX_GRAS_CL_USER_PLANE_Y2 0xCA9
+#define A3XX_GRAS_CL_USER_PLANE_Z2 0xCAA
+#define A3XX_GRAS_CL_USER_PLANE_W2 0xCAB
+#define A3XX_GRAS_CL_USER_PLANE_X3 0xCAC
+#define A3XX_GRAS_CL_USER_PLANE_Y3 0xCAD
+#define A3XX_GRAS_CL_USER_PLANE_Z3 0xCAE
+#define A3XX_GRAS_CL_USER_PLANE_W3 0xCAF
+#define A3XX_GRAS_CL_USER_PLANE_X4 0xCB0
+#define A3XX_GRAS_CL_USER_PLANE_Y4 0xCB1
+#define A3XX_GRAS_CL_USER_PLANE_Z4 0xCB2
+#define A3XX_GRAS_CL_USER_PLANE_W4 0xCB3
+#define A3XX_GRAS_CL_USER_PLANE_X5 0xCB4
+#define A3XX_GRAS_CL_USER_PLANE_Y5 0xCB5
+#define A3XX_GRAS_CL_USER_PLANE_Z5 0xCB6
+#define A3XX_GRAS_CL_USER_PLANE_W5 0xCB7
+#define A3XX_VFD_PERFCOUNTER0_SELECT 0xE44
+#define A3XX_VPC_VPC_DEBUG_RAM_SEL 0xE61
+#define A3XX_VPC_VPC_DEBUG_RAM_READ 0xE62
+#define A3XX_UCHE_CACHE_INVALIDATE0_REG 0xEA0
+#define A3XX_GRAS_CL_CLIP_CNTL 0x2040
+#define A3XX_GRAS_CL_GB_CLIP_ADJ 0x2044
+#define A3XX_GRAS_CL_VPORT_XOFFSET 0x2048
+#define A3XX_GRAS_CL_VPORT_ZOFFSET 0x204C
+#define A3XX_GRAS_CL_VPORT_ZSCALE 0x204D
+#define A3XX_GRAS_SU_POINT_MINMAX 0x2068
+#define A3XX_GRAS_SU_POINT_SIZE 0x2069
+#define A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x206C
+#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x206D
+#define A3XX_GRAS_SU_MODE_CONTROL 0x2070
+#define A3XX_GRAS_SC_CONTROL 0x2072
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL 0x2074
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR 0x2075
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL 0x2079
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR 0x207A
+#define A3XX_RB_MODE_CONTROL 0x20C0
+#define A3XX_RB_RENDER_CONTROL 0x20C1
+#define A3XX_RB_MSAA_CONTROL 0x20C2
+#define A3XX_RB_MRT_CONTROL0 0x20C4
+#define A3XX_RB_MRT_BUF_INFO0 0x20C5
+#define A3XX_RB_MRT_BLEND_CONTROL0 0x20C7
+#define A3XX_RB_MRT_BLEND_CONTROL1 0x20CB
+#define A3XX_RB_MRT_BLEND_CONTROL2 0x20CF
+#define A3XX_RB_MRT_BLEND_CONTROL3 0x20D3
+#define A3XX_RB_BLEND_RED 0x20E4
+#define A3XX_RB_COPY_CONTROL 0x20EC
+#define A3XX_RB_COPY_DEST_INFO 0x20EF
+#define A3XX_RB_DEPTH_CONTROL 0x2100
+#define A3XX_RB_STENCIL_CONTROL 0x2104
+#define A3XX_PC_VSTREAM_CONTROL 0x21E4
+#define A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x21EA
+#define A3XX_PC_PRIM_VTX_CNTL 0x21EC
+#define A3XX_PC_RESTART_INDEX 0x21ED
+#define A3XX_HLSQ_CONTROL_0_REG 0x2200
+#define A3XX_HLSQ_VS_CONTROL_REG 0x2204
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x2207
+#define A3XX_HLSQ_CL_NDRANGE_0_REG 0x220A
+#define A3XX_HLSQ_CL_NDRANGE_2_REG 0x220C
+#define A3XX_HLSQ_CL_CONTROL_0_REG 0x2211
+#define A3XX_HLSQ_CL_CONTROL_1_REG 0x2212
+#define A3XX_HLSQ_CL_KERNEL_CONST_REG 0x2214
+#define A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x2215
+#define A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x2217
+#define A3XX_HLSQ_CL_WG_OFFSET_REG 0x221A
+#define A3XX_VFD_CONTROL_0 0x2240
+#define A3XX_VFD_INDEX_MIN 0x2242
+#define A3XX_VFD_INDEX_MAX 0x2243
+#define A3XX_VFD_FETCH_INSTR_0_0 0x2246
+#define A3XX_VFD_FETCH_INSTR_0_4 0x224E
+#define A3XX_VFD_FETCH_INSTR_1_F 0x2265
+#define A3XX_VFD_DECODE_INSTR_0 0x2266
+#define A3XX_VFD_VS_THREADING_THRESHOLD 0x227E
+#define A3XX_VPC_ATTR 0x2280
+#define A3XX_VPC_VARY_CYLWRAP_ENABLE_1 0x228B
+#define A3XX_SP_SP_CTRL_REG 0x22C0
+#define A3XX_SP_VS_CTRL_REG0 0x22C4
+#define A3XX_SP_VS_CTRL_REG1 0x22C5
+#define A3XX_SP_VS_PARAM_REG 0x22C6
+#define A3XX_SP_VS_OUT_REG_7 0x22CE
+#define A3XX_SP_VS_VPC_DST_REG_0 0x22D0
+#define A3XX_SP_VS_OBJ_OFFSET_REG 0x22D4
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG 0x22D7
+#define A3XX_SP_VS_PVT_MEM_SIZE_REG 0x22D8
+#define A3XX_SP_VS_LENGTH_REG 0x22DF
+#define A3XX_SP_FS_CTRL_REG0 0x22E0
+#define A3XX_SP_FS_CTRL_REG1 0x22E1
+#define A3XX_SP_FS_OBJ_OFFSET_REG 0x22E2
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG 0x22E5
+#define A3XX_SP_FS_PVT_MEM_SIZE_REG 0x22E6
+#define A3XX_SP_FS_FLAT_SHAD_MODE_REG_0 0x22E8
+#define A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x22E9
+#define A3XX_SP_FS_OUTPUT_REG 0x22EC
+#define A3XX_SP_FS_MRT_REG_0 0x22F0
+#define A3XX_SP_FS_IMAGE_OUTPUT_REG_0 0x22F4
+#define A3XX_SP_FS_IMAGE_OUTPUT_REG_3 0x22F7
+#define A3XX_SP_FS_LENGTH_REG 0x22FF
+#define A3XX_TPL1_TP_VS_TEX_OFFSET 0x2340
+#define A3XX_TPL1_TP_FS_TEX_OFFSET 0x2342
+#define A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x2343
+#define A3XX_VBIF_FIXED_SORT_EN 0x300C
+#define A3XX_VBIF_FIXED_SORT_SEL0 0x300D
+#define A3XX_VBIF_FIXED_SORT_SEL1 0x300E
+#define A3XX_VBIF_ABIT_SORT 0x301C
+#define A3XX_VBIF_ABIT_SORT_CONF 0x301D
+#define A3XX_VBIF_GATE_OFF_WRREQ_EN 0x302A
+#define A3XX_VBIF_IN_RD_LIM_CONF0 0x302C
+#define A3XX_VBIF_IN_RD_LIM_CONF1 0x302D
+#define A3XX_VBIF_IN_WR_LIM_CONF0 0x3030
+#define A3XX_VBIF_IN_WR_LIM_CONF1 0x3031
+#define A3XX_VBIF_OUT_RD_LIM_CONF0 0x3034
+#define A3XX_VBIF_OUT_WR_LIM_CONF0 0x3035
+#define A3XX_VBIF_DDR_OUT_MAX_BURST 0x3036
+#define A3XX_VBIF_ARB_CTL 0x303C
+#define A3XX_VBIF_OUT_AXI_AOOO_EN 0x305E
+#define A3XX_VBIF_OUT_AXI_AOOO 0x305F
+
+#define RBBM_RBBM_CTL_RESET_PWR_CTR1 (1 << 1)
+#define RBBM_RBBM_CTL_ENABLE_PWR_CTR1 (1 << 17)
+
+
+#define SP_MULTI 0
+#define SP_BUFFER_MODE 1
+#define SP_TWO_VTX_QUADS 0
+#define SP_PIXEL_BASED 0
+#define SP_R8G8B8A8_UNORM 8
+#define SP_FOUR_PIX_QUADS 1
+
+#define HLSQ_DIRECT 0
+#define HLSQ_BLOCK_ID_SP_VS 4
+#define HLSQ_SP_VS_INSTR 0
+#define HLSQ_SP_FS_INSTR 0
+#define HLSQ_BLOCK_ID_SP_FS 6
+#define HLSQ_TWO_PIX_QUADS 0
+#define HLSQ_TWO_VTX_QUADS 0
+#define HLSQ_BLOCK_ID_TP_TEX 2
+#define HLSQ_TP_TEX_SAMPLERS 0
+#define HLSQ_TP_TEX_MEMOBJ 1
+#define HLSQ_BLOCK_ID_TP_MIPMAP 3
+#define HLSQ_TP_MIPMAP_BASE 1
+#define HLSQ_FOUR_PIX_QUADS 1
+
+#define RB_FACTOR_ONE 1
+#define RB_BLEND_OP_ADD 0
+#define RB_FACTOR_ZERO 0
+#define RB_DITHER_DISABLE 0
+#define RB_DITHER_ALWAYS 1
+#define RB_FRAG_NEVER 0
+#define RB_ENDIAN_NONE 0
+#define RB_R8G8B8A8_UNORM 8
+#define RB_RESOLVE_PASS 2
+#define RB_CLEAR_MODE_RESOLVE 1
+#define RB_TILINGMODE_LINEAR 0
+#define RB_REF_NEVER 0
+#define RB_FRAG_LESS 1
+#define RB_REF_ALWAYS 7
+#define RB_STENCIL_KEEP 0
+#define RB_RENDERING_PASS 0
+#define RB_TILINGMODE_32X32 2
+
+#define PC_DRAW_TRIANGLES 2
+#define PC_DI_PT_RECTLIST 8
+#define PC_DI_SRC_SEL_AUTO_INDEX 2
+#define PC_DI_INDEX_SIZE_16_BIT 0
+#define PC_DI_IGNORE_VISIBILITY 0
+#define PC_DI_PT_TRILIST 4
+#define PC_DI_SRC_SEL_IMMEDIATE 1
+#define PC_DI_INDEX_SIZE_32_BIT 1
+
+#define UCHE_ENTIRE_CACHE 1
+#define UCHE_OP_INVALIDATE 1
+
+
+#define GRAS_CL_CLIP_CNTL_CLIP_DISABLE 16
+#define GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 12
+#define GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 21
+#define GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 19
+#define GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 20
+#define GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 17
+#define GRAS_CL_VPORT_XSCALE_VPORT_XSCALE 0
+#define GRAS_CL_VPORT_YSCALE_VPORT_YSCALE 0
+#define GRAS_CL_VPORT_ZSCALE_VPORT_ZSCALE 0
+#define GRAS_SC_CONTROL_RASTER_MODE 12
+#define GRAS_SC_CONTROL_RENDER_MODE 4
+#define GRAS_SC_SCREEN_SCISSOR_BR_BR_X 0
+#define GRAS_SC_SCREEN_SCISSOR_BR_BR_Y 16
+#define GRAS_SC_WINDOW_SCISSOR_BR_BR_X 0
+#define GRAS_SC_WINDOW_SCISSOR_BR_BR_Y 16
+#define GRAS_SU_CTRLMODE_LINEHALFWIDTH 03
+#define HLSQ_CONSTFSPRESERVEDRANGEREG_ENDENTRY 16
+#define HLSQ_CONSTFSPRESERVEDRANGEREG_STARTENTRY 0
+#define HLSQ_CTRL0REG_CHUNKDISABLE 26
+#define HLSQ_CTRL0REG_CONSTSWITCHMODE 27
+#define HLSQ_CTRL0REG_FSSUPERTHREADENABLE 6
+#define HLSQ_CTRL0REG_FSTHREADSIZE 4
+#define HLSQ_CTRL0REG_LAZYUPDATEDISABLE 28
+#define HLSQ_CTRL0REG_RESERVED2 10
+#define HLSQ_CTRL0REG_SPCONSTFULLUPDATE 29
+#define HLSQ_CTRL0REG_SPSHADERRESTART 9
+#define HLSQ_CTRL0REG_TPFULLUPDATE 30
+#define HLSQ_CTRL1REG_RESERVED1 9
+#define HLSQ_CTRL1REG_VSSUPERTHREADENABLE 8
+#define HLSQ_CTRL1REG_VSTHREADSIZE 6
+#define HLSQ_CTRL2REG_PRIMALLOCTHRESHOLD 26
+#define HLSQ_FSCTRLREG_FSCONSTLENGTH 0
+#define HLSQ_FSCTRLREG_FSCONSTSTARTOFFSET 12
+#define HLSQ_FSCTRLREG_FSINSTRLENGTH 24
+#define HLSQ_VSCTRLREG_VSINSTRLENGTH 24
+#define PC_PRIM_VTX_CONTROL_POLYMODE_BACK_PTYPE 8
+#define PC_PRIM_VTX_CONTROL_POLYMODE_FRONT_PTYPE 5
+#define PC_PRIM_VTX_CONTROL_PROVOKING_VTX_LAST 25
+#define PC_PRIM_VTX_CONTROL_STRIDE_IN_VPC 0
+#define PC_DRAW_INITIATOR_PRIM_TYPE 0
+#define PC_DRAW_INITIATOR_SOURCE_SELECT 6
+#define PC_DRAW_INITIATOR_VISIBILITY_CULLING_MODE 9
+#define PC_DRAW_INITIATOR_INDEX_SIZE 0x0B
+#define PC_DRAW_INITIATOR_SMALL_INDEX 0x0D
+#define PC_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x0E
+#define RB_COPYCONTROL_COPY_GMEM_BASE 14
+#define RB_COPYCONTROL_RESOLVE_CLEAR_MODE 4
+#define RB_COPYDESTBASE_COPY_DEST_BASE 4
+#define RB_COPYDESTINFO_COPY_COMPONENT_ENABLE 14
+#define RB_COPYDESTINFO_COPY_DEST_ENDIAN 18
+#define RB_COPYDESTINFO_COPY_DEST_FORMAT 2
+#define RB_COPYDESTINFO_COPY_DEST_TILE 0
+#define RB_COPYDESTPITCH_COPY_DEST_PITCH 0
+#define RB_DEPTHCONTROL_Z_TEST_FUNC 4
+#define RB_MODECONTROL_RENDER_MODE 8
+#define RB_MODECONTROL_MARB_CACHE_SPLIT_MODE 15
+#define RB_MODECONTROL_PACKER_TIMER_ENABLE 16
+#define RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE 21
+#define RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR 24
+#define RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR 16
+#define RB_MRTBLENDCONTROL_CLAMP_ENABLE 29
+#define RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE 5
+#define RB_MRTBLENDCONTROL_RGB_DEST_FACTOR 8
+#define RB_MRTBLENDCONTROL_RGB_SRC_FACTOR 0
+#define RB_MRTBUFBASE_COLOR_BUF_BASE 4
+#define RB_MRTBUFINFO_COLOR_BUF_PITCH 17
+#define RB_MRTBUFINFO_COLOR_FORMAT 0
+#define RB_MRTBUFINFO_COLOR_TILE_MODE 6
+#define RB_MRTCONTROL_COMPONENT_ENABLE 24
+#define RB_MRTCONTROL_DITHER_MODE 12
+#define RB_MRTCONTROL_READ_DEST_ENABLE 3
+#define RB_MRTCONTROL_ROP_CODE 8
+#define RB_MSAACONTROL_MSAA_DISABLE 10
+#define RB_MSAACONTROL_SAMPLE_MASK 16
+#define RB_RENDERCONTROL_ALPHA_TEST_FUNC 24
+#define RB_RENDERCONTROL_BIN_WIDTH 4
+#define RB_RENDERCONTROL_DISABLE_COLOR_PIPE 12
+#define RB_STENCILCONTROL_STENCIL_FAIL 11
+#define RB_STENCILCONTROL_STENCIL_FAIL_BF 23
+#define RB_STENCILCONTROL_STENCIL_FUNC 8
+#define RB_STENCILCONTROL_STENCIL_FUNC_BF 20
+#define RB_STENCILCONTROL_STENCIL_ZFAIL 17
+#define RB_STENCILCONTROL_STENCIL_ZFAIL_BF 29
+#define RB_STENCILCONTROL_STENCIL_ZPASS 14
+#define RB_STENCILCONTROL_STENCIL_ZPASS_BF 26
+#define SP_FSCTRLREG0_FSFULLREGFOOTPRINT 10
+#define SP_FSCTRLREG0_FSHALFREGFOOTPRINT 4
+#define SP_FSCTRLREG0_FSICACHEINVALID 2
+#define SP_FSCTRLREG0_FSINOUTREGOVERLAP 18
+#define SP_FSCTRLREG0_FSINSTRBUFFERMODE 1
+#define SP_FSCTRLREG0_FSLENGTH 24
+#define SP_FSCTRLREG0_FSSUPERTHREADMODE 21
+#define SP_FSCTRLREG0_FSTHREADMODE 0
+#define SP_FSCTRLREG0_FSTHREADSIZE 20
+#define SP_FSCTRLREG0_PIXLODENABLE 22
+#define SP_FSCTRLREG1_FSCONSTLENGTH 0
+#define SP_FSCTRLREG1_FSINITIALOUTSTANDING 20
+#define SP_FSCTRLREG1_HALFPRECVAROFFSET 24
+#define SP_FSMRTREG_REGID 0
+#define SP_FSMRTREG_PRECISION 8
+#define SP_FSOUTREG_PAD0 2
+#define SP_IMAGEOUTPUTREG_MRTFORMAT 0
+#define SP_IMAGEOUTPUTREG_DEPTHOUTMODE 3
+#define SP_IMAGEOUTPUTREG_PAD0 6
+#define SP_OBJOFFSETREG_CONSTOBJECTSTARTOFFSET 16
+#define SP_OBJOFFSETREG_SHADEROBJOFFSETINIC 25
+#define SP_SHADERLENGTH_LEN 0
+#define SP_SPCTRLREG_CONSTMODE 18
+#define SP_SPCTRLREG_LOMODE 22
+#define SP_SPCTRLREG_SLEEPMODE 20
+#define SP_VSCTRLREG0_VSFULLREGFOOTPRINT 10
+#define SP_VSCTRLREG0_VSICACHEINVALID 2
+#define SP_VSCTRLREG0_VSINSTRBUFFERMODE 1
+#define SP_VSCTRLREG0_VSLENGTH 24
+#define SP_VSCTRLREG0_VSSUPERTHREADMODE 21
+#define SP_VSCTRLREG0_VSTHREADMODE 0
+#define SP_VSCTRLREG0_VSTHREADSIZE 20
+#define SP_VSCTRLREG1_VSINITIALOUTSTANDING 24
+#define SP_VSOUTREG_COMPMASK0 9
+#define SP_VSPARAMREG_POSREGID 0
+#define SP_VSPARAMREG_PSIZEREGID 8
+#define SP_VSPARAMREG_TOTALVSOUTVAR 20
+#define SP_VSVPCDSTREG_OUTLOC0 0
+#define TPL1_TPTEXOFFSETREG_BASETABLEPTR 16
+#define TPL1_TPTEXOFFSETREG_MEMOBJOFFSET 8
+#define TPL1_TPTEXOFFSETREG_SAMPLEROFFSET 0
+#define UCHE_INVALIDATE1REG_OPCODE 0x1C
+#define UCHE_INVALIDATE1REG_ALLORPORTION 0x1F
+#define VFD_BASEADDR_BASEADDR 0
+#define VFD_CTRLREG0_PACKETSIZE 18
+#define VFD_CTRLREG0_STRMDECINSTRCNT 22
+#define VFD_CTRLREG0_STRMFETCHINSTRCNT 27
+#define VFD_CTRLREG0_TOTALATTRTOVS 0
+#define VFD_CTRLREG1_MAXSTORAGE 0
+#define VFD_CTRLREG1_REGID4INST 24
+#define VFD_CTRLREG1_REGID4VTX 16
+#define VFD_DECODEINSTRUCTIONS_CONSTFILL 4
+#define VFD_DECODEINSTRUCTIONS_FORMAT 6
+#define VFD_DECODEINSTRUCTIONS_LASTCOMPVALID 29
+#define VFD_DECODEINSTRUCTIONS_REGID 12
+#define VFD_DECODEINSTRUCTIONS_SHIFTCNT 24
+#define VFD_DECODEINSTRUCTIONS_SWITCHNEXT 30
+#define VFD_DECODEINSTRUCTIONS_WRITEMASK 0
+#define VFD_FETCHINSTRUCTIONS_BUFSTRIDE 7
+#define VFD_FETCHINSTRUCTIONS_FETCHSIZE 0
+#define VFD_FETCHINSTRUCTIONS_INDEXDECODE 18
+#define VFD_FETCHINSTRUCTIONS_STEPRATE 24
+#define VFD_FETCHINSTRUCTIONS_SWITCHNEXT 17
+#define VFD_THREADINGTHRESHOLD_REGID_VTXCNT 8
+#define VFD_THREADINGTHRESHOLD_REGID_THRESHOLD 0
+#define VFD_THREADINGTHRESHOLD_RESERVED6 4
+#define VPC_VPCATTR_LMSIZE 28
+#define VPC_VPCATTR_THRHDASSIGN 12
+#define VPC_VPCATTR_TOTALATTR 0
+#define VPC_VPCPACK_NUMFPNONPOSVAR 8
+#define VPC_VPCPACK_NUMNONPOSVSVAR 16
+#define VPC_VPCVARPSREPLMODE_COMPONENT08 0
+#define VPC_VPCVARPSREPLMODE_COMPONENT09 2
+#define VPC_VPCVARPSREPLMODE_COMPONENT0A 4
+#define VPC_VPCVARPSREPLMODE_COMPONENT0B 6
+#define VPC_VPCVARPSREPLMODE_COMPONENT0C 8
+#define VPC_VPCVARPSREPLMODE_COMPONENT0D 10
+#define VPC_VPCVARPSREPLMODE_COMPONENT0E 12
+#define VPC_VPCVARPSREPLMODE_COMPONENT0F 14
+#define VPC_VPCVARPSREPLMODE_COMPONENT10 16
+#define VPC_VPCVARPSREPLMODE_COMPONENT11 18
+#define VPC_VPCVARPSREPLMODE_COMPONENT12 20
+#define VPC_VPCVARPSREPLMODE_COMPONENT13 22
+#define VPC_VPCVARPSREPLMODE_COMPONENT14 24
+#define VPC_VPCVARPSREPLMODE_COMPONENT15 26
+#define VPC_VPCVARPSREPLMODE_COMPONENT16 28
+#define VPC_VPCVARPSREPLMODE_COMPONENT17 30
+
+#define RBBM_BLOCK_ID_NONE 0x0
+#define RBBM_BLOCK_ID_CP 0x1
+#define RBBM_BLOCK_ID_RBBM 0x2
+#define RBBM_BLOCK_ID_VBIF 0x3
+#define RBBM_BLOCK_ID_HLSQ 0x4
+#define RBBM_BLOCK_ID_UCHE 0x5
+#define RBBM_BLOCK_ID_PC 0x8
+#define RBBM_BLOCK_ID_VFD 0x9
+#define RBBM_BLOCK_ID_VPC 0xa
+#define RBBM_BLOCK_ID_TSE 0xb
+#define RBBM_BLOCK_ID_RAS 0xc
+#define RBBM_BLOCK_ID_VSC 0xd
+#define RBBM_BLOCK_ID_SP_0 0x10
+#define RBBM_BLOCK_ID_SP_1 0x11
+#define RBBM_BLOCK_ID_SP_2 0x12
+#define RBBM_BLOCK_ID_SP_3 0x13
+#define RBBM_BLOCK_ID_TPL1_0 0x18
+#define RBBM_BLOCK_ID_TPL1_1 0x19
+#define RBBM_BLOCK_ID_TPL1_2 0x1a
+#define RBBM_BLOCK_ID_TPL1_3 0x1b
+#define RBBM_BLOCK_ID_RB_0 0x20
+#define RBBM_BLOCK_ID_RB_1 0x21
+#define RBBM_BLOCK_ID_RB_2 0x22
+#define RBBM_BLOCK_ID_RB_3 0x23
+#define RBBM_BLOCK_ID_MARB_0 0x28
+#define RBBM_BLOCK_ID_MARB_1 0x29
+#define RBBM_BLOCK_ID_MARB_2 0x2a
+#define RBBM_BLOCK_ID_MARB_3 0x2b
+
+#define A3XX_RBBM_CLOCK_CTL_DEFAULT 0xBFFFFFFF
+
+#endif
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
new file mode 100644
index 0000000..3b3fba1
--- /dev/null
+++ b/drivers/gpu/msm/adreno.c
@@ -0,0 +1,1974 @@
+/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/ioctl.h>
+#include <linux/sched.h>
+
+#include <mach/socinfo.h>
+#include <mach/board.h>
+
+#include "kgsl.h"
+#include "kgsl_pwrscale.h"
+#include "kgsl_cffdump.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_iommu.h"
+
+#include "adreno.h"
+#include "adreno_pm4types.h"
+#include "adreno_debugfs.h"
+#include "adreno_postmortem.h"
+
+#include "a2xx_reg.h"
+#include "a3xx_reg.h"
+
+#define DRIVER_VERSION_MAJOR 3
+#define DRIVER_VERSION_MINOR 1
+
+#define ADRENO_CFG_MHARB \
+ (0x10 \
+ | (0 << MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT) \
+ | (0 << MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT) \
+ | (0 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT) \
+ | (0x8 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT))
+
+#define ADRENO_MMU_CONFIG \
+ (0x01 \
+ | (MMU_CONFIG << MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT))
+
+static const struct kgsl_functable adreno_functable;
+static volatile int adreno_regwrite_footprint = 0;
+static volatile unsigned int *adreno_regwrite_reg;
+static volatile unsigned int adreno_regwrite_val;
+
+static struct adreno_device device_3d0 = {
+ .dev = {
+ KGSL_DEVICE_COMMON_INIT(device_3d0.dev),
+ .name = DEVICE_3D0_NAME,
+ .id = KGSL_DEVICE_3D0,
+ .mh = {
+ .mharb = ADRENO_CFG_MHARB,
+ .mh_intf_cfg1 = 0x00032f07,
+ .mpu_base = 0x00000000,
+ .mpu_range = 0xFFFFF000,
+ },
+ .mmu = {
+ .config = ADRENO_MMU_CONFIG,
+ },
+ .pwrctrl = {
+ .irq_name = KGSL_3D0_IRQ,
+ },
+ .iomemname = KGSL_3D0_REG_MEMORY,
+ .ftbl = &adreno_functable,
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ .display_off = {
+ .level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
+ .suspend = kgsl_early_suspend_driver,
+ .resume = kgsl_late_resume_driver,
+ },
+#endif
+ },
+ .gmem_base = 0,
+ .gmem_size = SZ_256K,
+ .pfp_fw = NULL,
+ .pm4_fw = NULL,
+ .wait_timeout = 0,
+ .ib_check_level = 0,
+};
+
+unsigned int hang_detect_regs[] = {
+ A3XX_RBBM_STATUS,
+ REG_CP_RB_RPTR,
+ REG_CP_IB1_BASE,
+ REG_CP_IB1_BUFSZ,
+ REG_CP_IB2_BASE,
+ REG_CP_IB2_BUFSZ,
+};
+
+const unsigned int hang_detect_regs_count = ARRAY_SIZE(hang_detect_regs);
+
+
+#define ANY_ID (~0)
+#define NO_VER (~0)
+
+static const struct {
+ enum adreno_gpurev gpurev;
+ unsigned int core, major, minor, patchid;
+ const char *pm4fw;
+ const char *pfpfw;
+ struct adreno_gpudev *gpudev;
+ unsigned int istore_size;
+ unsigned int pix_shader_start;
+
+ unsigned int instruction_size;
+
+ unsigned int gmem_size;
+ unsigned int sync_lock_pm4_ver;
+ unsigned int sync_lock_pfp_ver;
+} adreno_gpulist[] = {
+ { ADRENO_REV_A200, 0, 2, ANY_ID, ANY_ID,
+ "yamato_pm4.fw", "yamato_pfp.fw", &adreno_a2xx_gpudev,
+ 512, 384, 3, SZ_256K, NO_VER, NO_VER },
+ { ADRENO_REV_A203, 0, 1, 1, ANY_ID,
+ "yamato_pm4.fw", "yamato_pfp.fw", &adreno_a2xx_gpudev,
+ 512, 384, 3, SZ_256K, NO_VER, NO_VER },
+ { ADRENO_REV_A205, 0, 1, 0, ANY_ID,
+ "yamato_pm4.fw", "yamato_pfp.fw", &adreno_a2xx_gpudev,
+ 512, 384, 3, SZ_256K, NO_VER, NO_VER },
+ { ADRENO_REV_A220, 2, 1, ANY_ID, ANY_ID,
+ "leia_pm4_470.fw", "leia_pfp_470.fw", &adreno_a2xx_gpudev,
+ 512, 384, 3, SZ_512K, NO_VER, NO_VER },
+ { ADRENO_REV_A225, 2, 2, 0, 5,
+ "a225p5_pm4.fw", "a225_pfp.fw", &adreno_a2xx_gpudev,
+ 1536, 768, 3, SZ_512K, NO_VER, NO_VER },
+ { ADRENO_REV_A225, 2, 2, 0, 6,
+ "a225_pm4.fw", "a225_pfp.fw", &adreno_a2xx_gpudev,
+ 1536, 768, 3, SZ_512K, 0x225011, 0x225002 },
+ { ADRENO_REV_A225, 2, 2, ANY_ID, ANY_ID,
+ "a225_pm4.fw", "a225_pfp.fw", &adreno_a2xx_gpudev,
+ 1536, 768, 3, SZ_512K, 0x225011, 0x225002 },
+
+ { ADRENO_REV_A305, 3, 0, 5, ANY_ID,
+ "a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev,
+ 512, 0, 2, SZ_256K, 0x3FF037, 0x3FF016 },
+
+ { ADRENO_REV_A320, 3, 2, 0, ANY_ID,
+ "a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev,
+ 512, 0, 2, SZ_512K, 0x3FF037, 0x3FF016 },
+};
+
+struct kgsl_process_name {
+ char name[TASK_COMM_LEN+1];
+};
+
+static const struct kgsl_process_name kgsl_blocking_process_tbl[] = {
+ {"SurfaceFlinger"},
+ {"surfaceflinger"},
+ {"ndroid.systemui"},
+ {"droid.htcdialer"},
+ {"mediaserver"},
+};
+
+static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
+{
+ irqreturn_t result;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ result = adreno_dev->gpudev->irq_handler(adreno_dev);
+
+ if (device->requested_state == KGSL_STATE_NONE) {
+ if (device->pwrctrl.nap_allowed == true) {
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
+ queue_work(device->work_queue, &device->idle_check_ws);
+ } else if (device->pwrscale.policy != NULL) {
+ queue_work(device->work_queue, &device->idle_check_ws);
+ }
+ }
+
+
+ mod_timer_pending(&device->idle_timer,
+ jiffies + device->pwrctrl.interval_timeout);
+ return result;
+}
+
+static void adreno_cleanup_pt(struct kgsl_device *device,
+ struct kgsl_pagetable *pagetable)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+ kgsl_mmu_unmap(pagetable, &rb->buffer_desc);
+
+ kgsl_mmu_unmap(pagetable, &rb->memptrs_desc);
+
+ kgsl_mmu_unmap(pagetable, &device->memstore);
+
+ kgsl_mmu_unmap(pagetable, &device->mmu.setstate_memory);
+}
+
+static int adreno_setup_pt(struct kgsl_device *device,
+ struct kgsl_pagetable *pagetable)
+{
+ int result = 0;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+ result = kgsl_mmu_map_global(pagetable, &rb->buffer_desc,
+ GSL_PT_PAGE_RV);
+ if (result)
+ goto error;
+
+ result = kgsl_mmu_map_global(pagetable, &rb->memptrs_desc,
+ GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+ if (result)
+ goto unmap_buffer_desc;
+
+ result = kgsl_mmu_map_global(pagetable, &device->memstore,
+ GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+ if (result)
+ goto unmap_memptrs_desc;
+
+ result = kgsl_mmu_map_global(pagetable, &device->mmu.setstate_memory,
+ GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+ if (result)
+ goto unmap_memstore_desc;
+
+ return result;
+
+unmap_memstore_desc:
+ kgsl_mmu_unmap(pagetable, &device->memstore);
+
+unmap_memptrs_desc:
+ kgsl_mmu_unmap(pagetable, &rb->memptrs_desc);
+
+unmap_buffer_desc:
+ kgsl_mmu_unmap(pagetable, &rb->buffer_desc);
+
+error:
+ return result;
+}
+
+static void adreno_iommu_setstate(struct kgsl_device *device,
+ unsigned int context_id,
+ uint32_t flags)
+{
+ unsigned int pt_val, reg_pt_val;
+ unsigned int link[250];
+ unsigned int *cmds = &link[0];
+ int sizedwords = 0;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_memdesc **reg_map_desc;
+ void *reg_map_array = NULL;
+ int num_iommu_units, i;
+ struct kgsl_context *context;
+ struct adreno_context *adreno_ctx = NULL;
+
+ if (!adreno_dev->drawctxt_active)
+ return kgsl_mmu_device_setstate(&device->mmu, flags);
+ num_iommu_units = kgsl_mmu_get_reg_map_desc(&device->mmu,
+ ®_map_array);
+
+ context = idr_find(&device->context_idr, context_id);
+ adreno_ctx = context->devctxt;
+
+ reg_map_desc = reg_map_array;
+
+ if (kgsl_mmu_enable_clk(&device->mmu,
+ KGSL_IOMMU_CONTEXT_USER))
+ goto done;
+
+ if (cpu_is_msm8960())
+ cmds += adreno_add_change_mh_phys_limit_cmds(cmds, 0xFFFFF000,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+ else
+ cmds += adreno_add_bank_change_cmds(cmds,
+ KGSL_IOMMU_CONTEXT_USER,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+
+ cmds += adreno_add_idle_cmds(adreno_dev, cmds);
+
+
+ cmds += kgsl_mmu_sync_lock(&device->mmu, cmds);
+
+ pt_val = kgsl_mmu_pt_get_base_addr(device->mmu.hwpagetable);
+ if (flags & KGSL_MMUFLAGS_PTUPDATE) {
+ for (i = 0; i < num_iommu_units; i++) {
+ reg_pt_val = (pt_val &
+ (KGSL_IOMMU_TTBR0_PA_MASK <<
+ KGSL_IOMMU_TTBR0_PA_SHIFT)) +
+ kgsl_mmu_get_pt_lsb(&device->mmu, i,
+ KGSL_IOMMU_CONTEXT_USER);
+ *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
+ *cmds++ = reg_map_desc[i]->gpuaddr +
+ (KGSL_IOMMU_CONTEXT_USER <<
+ KGSL_IOMMU_CTX_SHIFT) + KGSL_IOMMU_TTBR0;
+ *cmds++ = reg_pt_val;
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+
+ cmds += adreno_add_read_cmds(device, cmds,
+ reg_map_desc[i]->gpuaddr +
+ (KGSL_IOMMU_CONTEXT_USER <<
+ KGSL_IOMMU_CTX_SHIFT) + KGSL_IOMMU_TTBR0,
+ reg_pt_val,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+ }
+ }
+ if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
+ for (i = 0; i < num_iommu_units; i++) {
+ reg_pt_val = (pt_val &
+ (KGSL_IOMMU_TTBR0_PA_MASK <<
+ KGSL_IOMMU_TTBR0_PA_SHIFT)) +
+ kgsl_mmu_get_pt_lsb(&device->mmu, i,
+ KGSL_IOMMU_CONTEXT_USER);
+
+ *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
+ *cmds++ = (reg_map_desc[i]->gpuaddr +
+ (KGSL_IOMMU_CONTEXT_USER <<
+ KGSL_IOMMU_CTX_SHIFT) +
+ KGSL_IOMMU_CTX_TLBIALL);
+ *cmds++ = 1;
+
+ cmds += __adreno_add_idle_indirect_cmds(cmds,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+
+ cmds += adreno_add_read_cmds(device, cmds,
+ reg_map_desc[i]->gpuaddr +
+ (KGSL_IOMMU_CONTEXT_USER <<
+ KGSL_IOMMU_CTX_SHIFT) + KGSL_IOMMU_TTBR0,
+ reg_pt_val,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+ }
+ }
+
+
+ cmds += kgsl_mmu_sync_unlock(&device->mmu, cmds);
+
+ if (cpu_is_msm8960())
+ cmds += adreno_add_change_mh_phys_limit_cmds(cmds,
+ reg_map_desc[num_iommu_units - 1]->gpuaddr - PAGE_SIZE,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+ else
+ cmds += adreno_add_bank_change_cmds(cmds,
+ KGSL_IOMMU_CONTEXT_PRIV,
+ device->mmu.setstate_memory.gpuaddr +
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET);
+
+ cmds += adreno_add_idle_cmds(adreno_dev, cmds);
+
+ sizedwords += (cmds - &link[0]);
+ if (sizedwords) {
+
+ *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
+ *cmds++ = 0x7fff;
+ sizedwords += 2;
+ *cmds++ = cp_type3_packet(CP_INTERRUPT, 1);
+ *cmds++ = CP_INT_CNTL__RB_INT_MASK;
+ sizedwords += 2;
+ adreno_ringbuffer_issuecmds(device, adreno_ctx,
+ KGSL_CMD_FLAGS_PMODE,
+ &link[0], sizedwords);
+ kgsl_mmu_disable_clk_on_ts(&device->mmu,
+ adreno_dev->ringbuffer.timestamp[KGSL_MEMSTORE_GLOBAL], true);
+ }
+ if (sizedwords > (sizeof(link)/sizeof(unsigned int))) {
+ KGSL_DRV_ERR(device, "Temp command buffer overflow\n");
+ BUG();
+ }
+done:
+ if (num_iommu_units)
+ kfree(reg_map_array);
+}
+
+static void adreno_gpummu_setstate(struct kgsl_device *device,
+ unsigned int context_id,
+ uint32_t flags)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned int link[32];
+ unsigned int *cmds = &link[0];
+ int sizedwords = 0;
+ unsigned int mh_mmu_invalidate = 0x00000003;
+ struct kgsl_context *context;
+ struct adreno_context *adreno_ctx = NULL;
+
+ if (adreno_is_a20x(adreno_dev))
+ flags |= KGSL_MMUFLAGS_TLBFLUSH;
+ if (!kgsl_cff_dump_enable && adreno_dev->drawctxt_active) {
+ context = idr_find(&device->context_idr, context_id);
+ adreno_ctx = context->devctxt;
+
+ if (flags & KGSL_MMUFLAGS_PTUPDATE) {
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+
+
+ *cmds++ = cp_type0_packet(MH_MMU_PT_BASE, 1);
+ *cmds++ = kgsl_mmu_pt_get_base_addr(
+ device->mmu.hwpagetable);
+ sizedwords += 4;
+ }
+
+ if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
+ if (!(flags & KGSL_MMUFLAGS_PTUPDATE)) {
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE,
+ 1);
+ *cmds++ = 0x00000000;
+ sizedwords += 2;
+ }
+ *cmds++ = cp_type0_packet(MH_MMU_INVALIDATE, 1);
+ *cmds++ = mh_mmu_invalidate;
+ sizedwords += 2;
+ }
+
+ if (flags & KGSL_MMUFLAGS_PTUPDATE &&
+ adreno_is_a20x(adreno_dev)) {
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = (0x4 << 16) |
+ (REG_PA_SU_SC_MODE_CNTL - 0x2000);
+ *cmds++ = 0;
+ *cmds++ = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1);
+ *cmds++ = device->mmu.setstate_memory.gpuaddr;
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX_BIN, 6);
+ *cmds++ = 0;
+ *cmds++ = 0x0003C004;
+ *cmds++ = 0;
+ *cmds++ = 3;
+ *cmds++ =
+ device->mmu.setstate_memory.gpuaddr;
+ *cmds++ = 6;
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX_BIN, 6);
+ *cmds++ = 0;
+ *cmds++ = 0x0003C004;
+ *cmds++ = 0;
+ *cmds++ = 3;
+
+ *cmds++ = device->mmu.setstate_memory.gpuaddr;
+ *cmds++ = 6;
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+ sizedwords += 21;
+ }
+
+
+ if (flags & (KGSL_MMUFLAGS_PTUPDATE | KGSL_MMUFLAGS_TLBFLUSH)) {
+ *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
+ *cmds++ = 0x7fff;
+ sizedwords += 2;
+ }
+
+ adreno_ringbuffer_issuecmds(device, adreno_ctx,
+ KGSL_CMD_FLAGS_PMODE,
+ &link[0], sizedwords);
+ } else {
+ kgsl_mmu_device_setstate(&device->mmu, flags);
+ }
+}
+
+static void adreno_setstate(struct kgsl_device *device,
+ unsigned int context_id,
+ uint32_t flags)
+{
+
+ if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype())
+ return adreno_gpummu_setstate(device, context_id, flags);
+ else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
+ return adreno_iommu_setstate(device, context_id, flags);
+}
+
+static unsigned int
+a3xx_getchipid(struct kgsl_device *device)
+{
+ unsigned int majorid = 0, minorid = 0, patchid = 0;
+
+
+ unsigned int version = socinfo_get_version();
+
+ if (cpu_is_apq8064() || cpu_is_apq8064ab()) {
+
+
+ majorid = 2;
+ minorid = 0;
+
+
+ if (SOCINFO_VERSION_MAJOR(version) == 2) {
+ patchid = 2;
+ } else {
+ if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
+ (SOCINFO_VERSION_MINOR(version) == 1))
+ patchid = 1;
+ else
+ patchid = 0;
+ }
+ } else if (cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_msm8627()) {
+
+
+ majorid = 0;
+ minorid = 5;
+
+
+ if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
+ (SOCINFO_VERSION_MINOR(version) == 2))
+ patchid = 2;
+ else
+ patchid = 0;
+ }
+
+ return (0x03 << 24) | (majorid << 16) | (minorid << 8) | patchid;
+}
+
+static unsigned int
+a2xx_getchipid(struct kgsl_device *device)
+{
+ unsigned int chipid = 0;
+ unsigned int coreid, majorid, minorid, patchid, revid;
+ uint32_t soc_platform_version = socinfo_get_version();
+
+ adreno_regread(device, REG_RBBM_PERIPHID1, &coreid);
+ adreno_regread(device, REG_RBBM_PERIPHID2, &majorid);
+ adreno_regread(device, REG_RBBM_PATCH_RELEASE, &revid);
+
+ if (cpu_is_msm8960() || cpu_is_msm8x60())
+ chipid = 2 << 24;
+ else
+ chipid = (coreid & 0xF) << 24;
+
+ chipid |= ((majorid >> 4) & 0xF) << 16;
+
+ minorid = ((revid >> 0) & 0xFF);
+
+ patchid = ((revid >> 16) & 0xFF);
+
+
+
+
+ if (cpu_is_qsd8x50())
+ patchid = 1;
+ else if (cpu_is_msm8960() &&
+ SOCINFO_VERSION_MAJOR(soc_platform_version) == 3)
+ patchid = 6;
+ else if (cpu_is_msm8625() && minorid == 0)
+ minorid = 1;
+
+ chipid |= (minorid << 8) | patchid;
+
+ return chipid;
+}
+
+static unsigned int
+adreno_getchipid(struct kgsl_device *device)
+{
+ if (cpu_is_apq8064() || cpu_is_apq8064ab() || cpu_is_msm8930() ||
+ cpu_is_msm8930aa() || cpu_is_msm8627())
+ return a3xx_getchipid(device);
+ else
+ return a2xx_getchipid(device);
+}
+
+static inline bool _rev_match(unsigned int id, unsigned int entry)
+{
+ return (entry == ANY_ID || entry == id);
+}
+
+static void
+adreno_identify_gpu(struct adreno_device *adreno_dev)
+{
+ unsigned int i, core, major, minor, patchid;
+
+ adreno_dev->chip_id = adreno_getchipid(&adreno_dev->dev);
+
+ core = (adreno_dev->chip_id >> 24) & 0xff;
+ major = (adreno_dev->chip_id >> 16) & 0xff;
+ minor = (adreno_dev->chip_id >> 8) & 0xff;
+ patchid = (adreno_dev->chip_id & 0xff);
+
+ for (i = 0; i < ARRAY_SIZE(adreno_gpulist); i++) {
+ if (core == adreno_gpulist[i].core &&
+ _rev_match(major, adreno_gpulist[i].major) &&
+ _rev_match(minor, adreno_gpulist[i].minor) &&
+ _rev_match(patchid, adreno_gpulist[i].patchid))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(adreno_gpulist)) {
+ adreno_dev->gpurev = ADRENO_REV_UNKNOWN;
+ return;
+ }
+
+ adreno_dev->gpurev = adreno_gpulist[i].gpurev;
+ adreno_dev->gpudev = adreno_gpulist[i].gpudev;
+ adreno_dev->pfp_fwfile = adreno_gpulist[i].pfpfw;
+ adreno_dev->pm4_fwfile = adreno_gpulist[i].pm4fw;
+ adreno_dev->istore_size = adreno_gpulist[i].istore_size;
+ adreno_dev->pix_shader_start = adreno_gpulist[i].pix_shader_start;
+ adreno_dev->instruction_size = adreno_gpulist[i].instruction_size;
+ adreno_dev->gmem_size = adreno_gpulist[i].gmem_size;
+ adreno_dev->gpulist_index = i;
+
+}
+
+static int __devinit
+adreno_probe(struct platform_device *pdev)
+{
+ struct kgsl_device *device;
+ struct adreno_device *adreno_dev;
+ int status = -EINVAL;
+
+ device = (struct kgsl_device *)pdev->id_entry->driver_data;
+ adreno_dev = ADRENO_DEVICE(device);
+ device->parentdev = &pdev->dev;
+
+ status = adreno_ringbuffer_init(device);
+ if (status != 0)
+ goto error;
+
+ status = kgsl_device_platform_probe(device);
+ if (status)
+ goto error_close_rb;
+
+ adreno_debugfs_init(device);
+
+ kgsl_pwrscale_init(device);
+ kgsl_pwrscale_attach_policy(device, ADRENO_DEFAULT_PWRSCALE_POLICY);
+
+ device->flags &= ~KGSL_FLAGS_SOFT_RESET;
+ return 0;
+
+error_close_rb:
+ adreno_ringbuffer_close(&adreno_dev->ringbuffer);
+error:
+ device->parentdev = NULL;
+ return status;
+}
+
+static int __devexit adreno_remove(struct platform_device *pdev)
+{
+ struct kgsl_device *device;
+ struct adreno_device *adreno_dev;
+
+ device = (struct kgsl_device *)pdev->id_entry->driver_data;
+ adreno_dev = ADRENO_DEVICE(device);
+
+ kgsl_pwrscale_detach_policy(device);
+ kgsl_pwrscale_close(device);
+
+ adreno_ringbuffer_close(&adreno_dev->ringbuffer);
+ kgsl_device_platform_remove(device);
+
+ return 0;
+}
+
+static int adreno_start(struct kgsl_device *device, unsigned int init_ram)
+{
+ int status = -EINVAL;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ if (KGSL_STATE_DUMP_AND_RECOVER != device->state)
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+
+
+ kgsl_pwrctrl_enable(device);
+
+
+ adreno_identify_gpu(adreno_dev);
+
+ if (adreno_ringbuffer_read_pm4_ucode(device)) {
+ KGSL_DRV_ERR(device, "Reading pm4 microcode failed %s\n",
+ adreno_dev->pm4_fwfile);
+ BUG_ON(1);
+ }
+
+ if (adreno_ringbuffer_read_pfp_ucode(device)) {
+ KGSL_DRV_ERR(device, "Reading pfp microcode failed %s\n",
+ adreno_dev->pfp_fwfile);
+ BUG_ON(1);
+ }
+
+ if (adreno_dev->gpurev == ADRENO_REV_UNKNOWN) {
+ KGSL_DRV_ERR(device, "Unknown chip ID %x\n",
+ adreno_dev->chip_id);
+ goto error_clk_off;
+ }
+
+
+
+ if ((adreno_dev->pm4_fw_version >=
+ adreno_gpulist[adreno_dev->gpulist_index].sync_lock_pm4_ver) &&
+ (adreno_dev->pfp_fw_version >=
+ adreno_gpulist[adreno_dev->gpulist_index].sync_lock_pfp_ver))
+ device->mmu.flags |= KGSL_MMU_FLAGS_IOMMU_SYNC;
+
+
+ if (adreno_is_a2xx(adreno_dev)) {
+ if (adreno_is_a20x(adreno_dev)) {
+ device->mh.mh_intf_cfg1 = 0;
+ device->mh.mh_intf_cfg2 = 0;
+ }
+
+ kgsl_mh_start(device);
+ }
+
+ hang_detect_regs[0] = adreno_dev->gpudev->reg_rbbm_status;
+
+ status = kgsl_mmu_start(device);
+ if (status)
+ goto error_clk_off;
+
+
+ adreno_dev->gpudev->start(adreno_dev);
+
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
+ device->ftbl->irqctrl(device, 1);
+
+ status = adreno_ringbuffer_start(&adreno_dev->ringbuffer, init_ram);
+ if (status == 0) {
+ if (KGSL_STATE_DUMP_AND_RECOVER != device->state)
+ mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
+ return 0;
+ }
+
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+ kgsl_mmu_stop(&device->mmu);
+error_clk_off:
+ kgsl_pwrctrl_disable(device);
+
+ return status;
+}
+
+static int adreno_stop(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ adreno_dev->drawctxt_active = NULL;
+
+ adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
+
+ device->ftbl->irqctrl(device, 0);
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+ del_timer_sync(&device->idle_timer);
+
+ kgsl_mmu_stop(&device->mmu);
+
+ kgsl_pwrctrl_disable(device);
+
+ return 0;
+}
+
+static void adreno_mark_context_status(struct kgsl_device *device,
+ int recovery_status)
+{
+ struct kgsl_context *context;
+ int next = 0;
+ while ((context = idr_get_next(&device->context_idr, &next))) {
+ struct adreno_context *adreno_context = context->devctxt;
+ if (recovery_status) {
+ context->reset_status =
+ KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
+ adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
+ } else if (KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT !=
+ context->reset_status) {
+ if (adreno_context->flags & (CTXT_FLAGS_GPU_HANG ||
+ CTXT_FLAGS_GPU_HANG_RECOVERED))
+ context->reset_status =
+ KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT;
+ else
+ context->reset_status =
+ KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT;
+ }
+ next = next + 1;
+ }
+}
+
+static void adreno_set_max_ts_for_bad_ctxs(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ struct kgsl_context *context;
+ struct adreno_context *temp_adreno_context;
+ int next = 0;
+
+ while ((context = idr_get_next(&device->context_idr, &next))) {
+ temp_adreno_context = context->devctxt;
+ if (temp_adreno_context->flags & CTXT_FLAGS_GPU_HANG) {
+ kgsl_sharedmem_writel(&device->memstore,
+ KGSL_MEMSTORE_OFFSET(context->id,
+ soptimestamp),
+ rb->timestamp[context->id]);
+ kgsl_sharedmem_writel(&device->memstore,
+ KGSL_MEMSTORE_OFFSET(context->id,
+ eoptimestamp),
+ rb->timestamp[context->id]);
+ }
+ next = next + 1;
+ }
+}
+
+static void adreno_destroy_recovery_data(struct adreno_recovery_data *rec_data)
+{
+ vfree(rec_data->rb_buffer);
+ vfree(rec_data->bad_rb_buffer);
+}
+
+static int adreno_setup_recovery_data(struct kgsl_device *device,
+ struct adreno_recovery_data *rec_data)
+{
+ int ret = 0;
+ unsigned int ib1_sz, ib2_sz;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+ memset(rec_data, 0, sizeof(*rec_data));
+
+ adreno_regread(device, REG_CP_IB1_BUFSZ, &ib1_sz);
+ adreno_regread(device, REG_CP_IB2_BUFSZ, &ib2_sz);
+ if (ib1_sz || ib2_sz)
+ adreno_regread(device, REG_CP_IB1_BASE, &rec_data->ib1);
+
+ kgsl_sharedmem_readl(&device->memstore, &rec_data->context_id,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ current_context));
+
+ kgsl_sharedmem_readl(&device->memstore,
+ &rec_data->global_eop,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp));
+
+ rec_data->rb_buffer = vmalloc(rb->buffer_desc.size);
+ if (!rec_data->rb_buffer) {
+ KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
+ rb->buffer_desc.size);
+ return -ENOMEM;
+ }
+
+ rec_data->bad_rb_buffer = vmalloc(rb->buffer_desc.size);
+ if (!rec_data->bad_rb_buffer) {
+ KGSL_MEM_ERR(device, "vmalloc(%d) failed\n",
+ rb->buffer_desc.size);
+ ret = -ENOMEM;
+ goto done;
+ }
+
+done:
+ if (ret) {
+ vfree(rec_data->rb_buffer);
+ vfree(rec_data->bad_rb_buffer);
+ }
+ return ret;
+}
+
+static int
+_adreno_recover_hang(struct kgsl_device *device,
+ struct adreno_recovery_data *rec_data,
+ bool try_bad_commands)
+{
+ int ret;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ struct kgsl_context *context;
+ struct adreno_context *adreno_context = NULL;
+ struct adreno_context *last_active_ctx = adreno_dev->drawctxt_active;
+
+ context = idr_find(&device->context_idr, rec_data->context_id);
+ if (context == NULL) {
+ KGSL_DRV_ERR(device, "Last context unknown id:%d\n",
+ rec_data->context_id);
+ } else {
+ adreno_context = context->devctxt;
+ adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
+ }
+
+ ret = adreno_ringbuffer_extract(rb, rec_data);
+ if (ret)
+ goto done;
+
+
+ ret = adreno_stop(device);
+ if (ret) {
+ KGSL_DRV_ERR(device, "Device stop failed in recovery\n");
+ goto done;
+ }
+
+ ret = adreno_start(device, true);
+ if (ret) {
+ KGSL_DRV_ERR(device, "Device start failed in recovery\n");
+ goto done;
+ }
+
+ if (context)
+ kgsl_mmu_setstate(&device->mmu, adreno_context->pagetable,
+ KGSL_MEMSTORE_GLOBAL);
+
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
+ ret = kgsl_mmu_enable_clk(&device->mmu,
+ KGSL_IOMMU_CONTEXT_USER);
+ if (ret)
+ goto done;
+ }
+
+ if (!try_bad_commands)
+ rec_data->bad_rb_size = 0;
+
+ if (rec_data->bad_rb_size) {
+ int idle_ret;
+ adreno_ringbuffer_restore(rb, rec_data->bad_rb_buffer,
+ rec_data->bad_rb_size);
+ idle_ret = adreno_idle(device);
+ if (idle_ret) {
+ ret = adreno_stop(device);
+ if (ret) {
+ KGSL_DRV_ERR(device,
+ "Device stop failed in recovery\n");
+ goto done;
+ }
+ ret = adreno_start(device, true);
+ if (ret) {
+ KGSL_DRV_ERR(device,
+ "Device start failed in recovery\n");
+ goto done;
+ }
+ if (context)
+ kgsl_mmu_setstate(&device->mmu,
+ adreno_context->pagetable,
+ KGSL_MEMSTORE_GLOBAL);
+
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
+ ret = kgsl_mmu_enable_clk(&device->mmu,
+ KGSL_IOMMU_CONTEXT_USER);
+ if (ret)
+ goto done;
+ }
+
+ ret = idle_ret;
+ KGSL_DRV_ERR(device,
+ "Bad context commands hung in recovery\n");
+ } else {
+ KGSL_DRV_ERR(device,
+ "Bad context commands succeeded in recovery\n");
+ if (adreno_context)
+ adreno_context->flags = (adreno_context->flags &
+ ~CTXT_FLAGS_GPU_HANG) |
+ CTXT_FLAGS_GPU_HANG_RECOVERED;
+ adreno_dev->drawctxt_active = last_active_ctx;
+ }
+ }
+
+ if (ret || !rec_data->bad_rb_size) {
+ adreno_ringbuffer_restore(rb, rec_data->rb_buffer,
+ rec_data->rb_size);
+ ret = adreno_idle(device);
+ if (ret) {
+ ret = -EAGAIN;
+ goto done;
+ }
+ if (rec_data->last_valid_ctx_id) {
+ struct kgsl_context *last_ctx =
+ idr_find(&device->context_idr,
+ rec_data->last_valid_ctx_id);
+ if (last_ctx)
+ adreno_dev->drawctxt_active = last_ctx->devctxt;
+ }
+ }
+done:
+
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
+ kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
+ return ret;
+}
+
+static int
+adreno_recover_hang(struct kgsl_device *device,
+ struct adreno_recovery_data *rec_data)
+{
+ int ret = 0;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ unsigned int timestamp;
+
+ KGSL_DRV_ERR(device,
+ "Starting recovery from 3D GPU hang. Recovery parameters: IB1: 0x%X, "
+ "Bad context_id: %u, global_eop: 0x%x\n",
+ rec_data->ib1, rec_data->context_id, rec_data->global_eop);
+
+ timestamp = rb->timestamp[KGSL_MEMSTORE_GLOBAL];
+ KGSL_DRV_ERR(device, "Last issued global timestamp: %x\n", timestamp);
+
+ while (true) {
+ if (!ret)
+ ret = _adreno_recover_hang(device, rec_data, true);
+ else
+ ret = _adreno_recover_hang(device, rec_data, false);
+
+ if (-EAGAIN == ret) {
+ adreno_destroy_recovery_data(rec_data);
+ adreno_setup_recovery_data(device, rec_data);
+ KGSL_DRV_ERR(device,
+ "Retry recovery from 3D GPU hang. Recovery parameters: "
+ "IB1: 0x%X, Bad context_id: %u, global_eop: 0x%x\n",
+ rec_data->ib1, rec_data->context_id,
+ rec_data->global_eop);
+ } else {
+ break;
+ }
+ }
+
+ if (ret)
+ goto done;
+
+
+ if (adreno_dev->drawctxt_active)
+ device->mmu.hwpagetable =
+ adreno_dev->drawctxt_active->pagetable;
+ else
+ device->mmu.hwpagetable = device->mmu.defaultpagetable;
+ rb->timestamp[KGSL_MEMSTORE_GLOBAL] = timestamp;
+ kgsl_sharedmem_writel(&device->memstore,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp),
+ rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
+done:
+ adreno_set_max_ts_for_bad_ctxs(device);
+ adreno_mark_context_status(device, ret);
+
+ if (!ret)
+ KGSL_DRV_ERR(device, "Recovery succeeded\n");
+ else
+ KGSL_DRV_ERR(device, "Recovery failed\n");
+ return ret;
+}
+
+static int adreno_kill_suspect(struct kgsl_device *device)
+{
+ int ret = 1;
+#ifdef CONFIG_MSM_KGSL_KILL_HANG_PROCESS
+ int cankill = 1;
+ char suspect_task_comm[TASK_COMM_LEN+1];
+ char suspect_task_parent_comm[TASK_COMM_LEN+1];
+ int suspect_tgid;
+ struct task_struct *suspect_task = get_current();
+ struct task_struct *suspect_parent_task = suspect_task->group_leader;
+ int i = 0;
+
+ suspect_tgid = task_tgid_nr(suspect_task);
+ get_task_comm(suspect_task_comm, suspect_task);
+
+ if (suspect_parent_task)
+ get_task_comm(suspect_task_parent_comm, suspect_parent_task);
+ else
+ suspect_task_parent_comm[0] = '\0';
+
+
+
+ for (i = 0; i < ARRAY_SIZE(kgsl_blocking_process_tbl); i++) {
+ if (!((strncmp(suspect_task_comm,
+ kgsl_blocking_process_tbl[i].name, TASK_COMM_LEN)) &&
+ (strncmp(suspect_task_parent_comm,
+ kgsl_blocking_process_tbl[i].name, TASK_COMM_LEN)))) {
+ cankill=0;
+ break;
+ }
+ }
+
+ if (cankill) {
+ KGSL_DRV_ERR(device, "We need to kill suspect process "
+ "causing gpu hung, tgid=%d, name=%s, pname=%s\n",
+ suspect_tgid, suspect_task_comm, suspect_task_parent_comm);
+
+ do_send_sig_info(SIGKILL,
+ SEND_SIG_FORCED, suspect_task, true);
+ ret = 0;
+ }
+#endif
+ return ret;
+}
+
+int
+adreno_dump_and_recover(struct kgsl_device *device)
+{
+ int result = -ETIMEDOUT;
+ struct adreno_recovery_data rec_data;
+
+ if (device->state == KGSL_STATE_HUNG)
+ goto done;
+ if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
+ mutex_unlock(&device->mutex);
+ wait_for_completion(&device->recovery_gate);
+ mutex_lock(&device->mutex);
+ if (device->state != KGSL_STATE_HUNG)
+ result = 0;
+ } else {
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_DUMP_AND_RECOVER);
+ INIT_COMPLETION(device->recovery_gate);
+
+
+
+ result = adreno_setup_recovery_data(device, &rec_data);
+ adreno_postmortem_dump(device, 0);
+
+ kgsl_device_snapshot(device, 1);
+
+ result = adreno_recover_hang(device, &rec_data);
+ adreno_destroy_recovery_data(&rec_data);
+ if (result) {
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_HUNG);
+ } else {
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
+ mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
+ }
+ complete_all(&device->recovery_gate);
+
+
+ if (!device->snapshot_no_panic) {
+ if (result) {
+ msleep(10000);
+ panic("GPU Hang");
+ } else {
+ if (board_mfg_mode() ||
+ adreno_kill_suspect(device)) {
+ msleep(10000);
+ panic("Recoverable GPU Hang");
+ }
+ }
+ }
+ }
+done:
+ return result;
+}
+EXPORT_SYMBOL(adreno_dump_and_recover);
+
+static int adreno_getproperty(struct kgsl_device *device,
+ enum kgsl_property_type type,
+ void *value,
+ unsigned int sizebytes)
+{
+ int status = -EINVAL;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ switch (type) {
+ case KGSL_PROP_DEVICE_INFO:
+ {
+ struct kgsl_devinfo devinfo;
+
+ if (sizebytes != sizeof(devinfo)) {
+ status = -EINVAL;
+ break;
+ }
+
+ memset(&devinfo, 0, sizeof(devinfo));
+ devinfo.device_id = device->id+1;
+ devinfo.chip_id = adreno_dev->chip_id;
+ devinfo.mmu_enabled = kgsl_mmu_enabled();
+ devinfo.gpu_id = adreno_dev->gpurev;
+ devinfo.gmem_gpubaseaddr = adreno_dev->gmem_base;
+ devinfo.gmem_sizebytes = adreno_dev->gmem_size;
+
+ if (copy_to_user(value, &devinfo, sizeof(devinfo)) !=
+ 0) {
+ status = -EFAULT;
+ break;
+ }
+ status = 0;
+ }
+ break;
+ case KGSL_PROP_DEVICE_SHADOW:
+ {
+ struct kgsl_shadowprop shadowprop;
+
+ if (sizebytes != sizeof(shadowprop)) {
+ status = -EINVAL;
+ break;
+ }
+ memset(&shadowprop, 0, sizeof(shadowprop));
+ if (device->memstore.hostptr) {
+ shadowprop.gpuaddr = device->memstore.physaddr;
+ shadowprop.size = device->memstore.size;
+ shadowprop.flags = KGSL_FLAGS_INITIALIZED |
+ KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS;
+ }
+ if (copy_to_user(value, &shadowprop,
+ sizeof(shadowprop))) {
+ status = -EFAULT;
+ break;
+ }
+ status = 0;
+ }
+ break;
+ case KGSL_PROP_MMU_ENABLE:
+ {
+ int mmu_prop = kgsl_mmu_enabled();
+
+ if (sizebytes != sizeof(int)) {
+ status = -EINVAL;
+ break;
+ }
+ if (copy_to_user(value, &mmu_prop, sizeof(mmu_prop))) {
+ status = -EFAULT;
+ break;
+ }
+ status = 0;
+ }
+ break;
+ case KGSL_PROP_INTERRUPT_WAITS:
+ {
+ int int_waits = 1;
+ if (sizebytes != sizeof(int)) {
+ status = -EINVAL;
+ break;
+ }
+ if (copy_to_user(value, &int_waits, sizeof(int))) {
+ status = -EFAULT;
+ break;
+ }
+ status = 0;
+ }
+ break;
+ default:
+ status = -EINVAL;
+ }
+
+ return status;
+}
+
+static int adreno_setproperty(struct kgsl_device *device,
+ enum kgsl_property_type type,
+ void *value,
+ unsigned int sizebytes)
+{
+ int status = -EINVAL;
+
+ switch (type) {
+ case KGSL_PROP_PWRCTRL: {
+ unsigned int enable;
+ struct kgsl_device_platform_data *pdata =
+ kgsl_device_get_drvdata(device);
+
+ if (sizebytes != sizeof(enable))
+ break;
+
+ if (copy_from_user(&enable, (void __user *) value,
+ sizeof(enable))) {
+ status = -EFAULT;
+ break;
+ }
+
+ if (enable) {
+ if (pdata->nap_allowed)
+ device->pwrctrl.nap_allowed = true;
+
+ kgsl_pwrscale_enable(device);
+ } else {
+ device->pwrctrl.nap_allowed = false;
+ kgsl_pwrscale_disable(device);
+ }
+
+ status = 0;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return status;
+}
+
+static inline void adreno_poke(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ adreno_regwrite(device, REG_CP_RB_WPTR, adreno_dev->ringbuffer.wptr);
+}
+
+static int adreno_ringbuffer_drain(struct kgsl_device *device,
+ unsigned int *regs)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ unsigned long wait;
+ unsigned long timeout = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
+
+ if (!(rb->flags & KGSL_FLAGS_STARTED))
+ return 0;
+
+
+ wait = jiffies + msecs_to_jiffies(100);
+
+ adreno_poke(device);
+
+ do {
+ if (time_after(jiffies, wait)) {
+ adreno_poke(device);
+
+
+ if (adreno_hang_detect(device, regs))
+ return -ETIMEDOUT;
+
+ wait = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART);
+ }
+ GSL_RB_GET_READPTR(rb, &rb->rptr);
+
+ if (time_after(jiffies, timeout)) {
+ KGSL_DRV_ERR(device, "rptr: %x, wptr: %x\n",
+ rb->rptr, rb->wptr);
+ return -ETIMEDOUT;
+ }
+ } while (rb->rptr != rb->wptr);
+
+ return 0;
+}
+
+int adreno_idle(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned int rbbm_status;
+ unsigned long wait_time;
+ unsigned long wait_time_part;
+ unsigned int prev_reg_val[hang_detect_regs_count];
+
+ memset(prev_reg_val, 0, sizeof(prev_reg_val));
+
+ kgsl_cffdump_regpoll(device->id,
+ adreno_dev->gpudev->reg_rbbm_status << 2,
+ 0x00000000, 0x80000000);
+
+retry:
+
+ if (adreno_ringbuffer_drain(device, prev_reg_val))
+ goto err;
+
+
+ wait_time = jiffies + ADRENO_IDLE_TIMEOUT;
+ wait_time_part = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART);
+
+ while (time_before(jiffies, wait_time)) {
+ adreno_regread(device, adreno_dev->gpudev->reg_rbbm_status,
+ &rbbm_status);
+ if (adreno_is_a2xx(adreno_dev)) {
+ if (rbbm_status == 0x110)
+ return 0;
+ } else {
+ if (!(rbbm_status & 0x80000000))
+ return 0;
+ }
+
+ if (time_after(jiffies, wait_time_part)) {
+ wait_time_part = jiffies +
+ msecs_to_jiffies(KGSL_TIMEOUT_PART);
+ if ((adreno_hang_detect(device, prev_reg_val)))
+ goto err;
+ }
+
+ }
+
+err:
+ KGSL_DRV_ERR(device, "spun too long waiting for RB to idle\n");
+ if (KGSL_STATE_DUMP_AND_RECOVER != device->state &&
+ !adreno_dump_and_recover(device)) {
+ wait_time = jiffies + ADRENO_IDLE_TIMEOUT;
+ goto retry;
+ }
+ return -ETIMEDOUT;
+}
+
+static bool is_adreno_rbbm_status_idle(struct kgsl_device *device)
+{
+ unsigned int reg_rbbm_status;
+ bool status = false;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+
+ adreno_regread(device,
+ adreno_dev->gpudev->reg_rbbm_status,
+ ®_rbbm_status);
+
+ if (adreno_is_a2xx(adreno_dev)) {
+ if (reg_rbbm_status == 0x110)
+ status = true;
+ } else {
+ if (!(reg_rbbm_status & 0x80000000))
+ status = true;
+ }
+ return status;
+}
+
+static unsigned int adreno_isidle(struct kgsl_device *device)
+{
+ int status = false;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+ WARN_ON(device->state == KGSL_STATE_INIT);
+
+ if (device->state == KGSL_STATE_ACTIVE) {
+
+ GSL_RB_GET_READPTR(rb, &rb->rptr);
+ if (!device->active_cnt && (rb->rptr == rb->wptr)) {
+
+ status = is_adreno_rbbm_status_idle(device);
+ }
+ } else {
+ status = true;
+ }
+ return status;
+}
+
+static int adreno_suspend_context(struct kgsl_device *device)
+{
+ int status = 0;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+
+ if (adreno_dev->drawctxt_active != NULL) {
+#ifdef CONFIG_MSM_KGSL_GPU_USAGE
+ device->current_process_priv = NULL;
+#endif
+ adreno_drawctxt_switch(adreno_dev, NULL, 0);
+ status = adreno_idle(device);
+ }
+
+ return status;
+}
+
+
+struct kgsl_memdesc *adreno_find_ctxtmem(struct kgsl_device *device,
+ unsigned int pt_base, unsigned int gpuaddr, unsigned int size)
+{
+ struct kgsl_context *context;
+ struct adreno_context *adreno_context = NULL;
+ int next = 0;
+
+ while (1) {
+ context = idr_get_next(&device->context_idr, &next);
+ if (context == NULL)
+ break;
+
+ adreno_context = (struct adreno_context *)context->devctxt;
+
+ if (kgsl_mmu_pt_equal(adreno_context->pagetable, pt_base)) {
+ struct kgsl_memdesc *desc;
+
+ desc = &adreno_context->gpustate;
+ if (kgsl_gpuaddr_in_memdesc(desc, gpuaddr, size))
+ return desc;
+
+ desc = &adreno_context->context_gmem_shadow.gmemshadow;
+ if (kgsl_gpuaddr_in_memdesc(desc, gpuaddr, size))
+ return desc;
+ }
+ next = next + 1;
+ }
+
+ return NULL;
+}
+
+struct kgsl_memdesc *adreno_find_region(struct kgsl_device *device,
+ unsigned int pt_base,
+ unsigned int gpuaddr,
+ unsigned int size)
+{
+ struct kgsl_mem_entry *entry;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *ringbuffer = &adreno_dev->ringbuffer;
+
+ if (kgsl_gpuaddr_in_memdesc(&ringbuffer->buffer_desc, gpuaddr, size))
+ return &ringbuffer->buffer_desc;
+
+ if (kgsl_gpuaddr_in_memdesc(&ringbuffer->memptrs_desc, gpuaddr, size))
+ return &ringbuffer->memptrs_desc;
+
+ if (kgsl_gpuaddr_in_memdesc(&device->memstore, gpuaddr, size))
+ return &device->memstore;
+
+ if (kgsl_gpuaddr_in_memdesc(&device->mmu.setstate_memory, gpuaddr,
+ size))
+ return &device->mmu.setstate_memory;
+
+ entry = kgsl_get_mem_entry(pt_base, gpuaddr, size);
+
+ if (entry)
+ return &entry->memdesc;
+
+ return adreno_find_ctxtmem(device, pt_base, gpuaddr, size);
+}
+
+uint8_t *adreno_convertaddr(struct kgsl_device *device, unsigned int pt_base,
+ unsigned int gpuaddr, unsigned int size)
+{
+ struct kgsl_memdesc *memdesc;
+
+ memdesc = adreno_find_region(device, pt_base, gpuaddr, size);
+
+ return memdesc ? kgsl_gpuaddr_to_vaddr(memdesc, gpuaddr) : NULL;
+}
+
+void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
+ unsigned int *value)
+{
+ unsigned int *reg;
+ BUG_ON(offsetwords*sizeof(uint32_t) >= device->reg_len);
+ reg = (unsigned int *)(device->reg_virt + (offsetwords << 2));
+
+ if (!in_interrupt())
+ kgsl_pre_hwaccess(device);
+
+ *value = __raw_readl(reg);
+ rmb();
+}
+
+void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords,
+ unsigned int value)
+{
+ unsigned int *reg;
+
+ BUG_ON(offsetwords*sizeof(uint32_t) >= device->reg_len);
+
+ if (!in_interrupt())
+ kgsl_pre_hwaccess(device);
+
+ kgsl_cffdump_regwrite(device->id, offsetwords << 2, value);
+ reg = (unsigned int *)(device->reg_virt + (offsetwords << 2));
+
+ wmb();
+ adreno_regwrite_footprint = 1;
+ adreno_regwrite_reg = reg;
+ adreno_regwrite_val = value;
+ dsb();
+ __raw_writel(value, reg);
+ adreno_regwrite_footprint = 0;
+ dsb();
+}
+
+static unsigned int _get_context_id(struct kgsl_context *k_ctxt)
+{
+ unsigned int context_id = KGSL_MEMSTORE_GLOBAL;
+ if (k_ctxt != NULL) {
+ struct adreno_context *a_ctxt = k_ctxt->devctxt;
+ if (k_ctxt->id == KGSL_CONTEXT_INVALID || a_ctxt == NULL)
+ context_id = KGSL_CONTEXT_INVALID;
+ else if (a_ctxt->flags & CTXT_FLAGS_PER_CONTEXT_TS)
+ context_id = k_ctxt->id;
+ }
+
+ return context_id;
+}
+
+static int kgsl_check_interrupt_timestamp(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int timestamp)
+{
+ int status;
+ unsigned int ref_ts, enableflag;
+ unsigned int context_id;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ mutex_lock(&device->mutex);
+ context_id = _get_context_id(context);
+ if (context_id == KGSL_CONTEXT_INVALID) {
+ KGSL_DRV_WARN(device, "context was detached");
+ status = -EINVAL;
+ goto unlock;
+ }
+
+ status = kgsl_check_timestamp(device, context, timestamp);
+ if (!status) {
+ kgsl_sharedmem_readl(&device->memstore, &enableflag,
+ KGSL_MEMSTORE_OFFSET(context_id, ts_cmp_enable));
+ mb();
+
+ if (enableflag) {
+ kgsl_sharedmem_readl(&device->memstore, &ref_ts,
+ KGSL_MEMSTORE_OFFSET(context_id,
+ ref_wait_ts));
+ mb();
+ if (timestamp_cmp(ref_ts, timestamp) >= 0) {
+ kgsl_sharedmem_writel(&device->memstore,
+ KGSL_MEMSTORE_OFFSET(context_id,
+ ref_wait_ts), timestamp);
+ wmb();
+ }
+ } else {
+ unsigned int cmds[2];
+ kgsl_sharedmem_writel(&device->memstore,
+ KGSL_MEMSTORE_OFFSET(context_id,
+ ref_wait_ts), timestamp);
+ enableflag = 1;
+ kgsl_sharedmem_writel(&device->memstore,
+ KGSL_MEMSTORE_OFFSET(context_id,
+ ts_cmp_enable), enableflag);
+ wmb();
+ cmds[0] = cp_type3_packet(CP_NOP, 1);
+ cmds[1] = 0;
+
+ if (adreno_dev->drawctxt_active)
+ adreno_ringbuffer_issuecmds(device,
+ adreno_dev->drawctxt_active,
+ KGSL_CMD_FLAGS_NONE, &cmds[0], 2);
+ else
+ BUG();
+ }
+ }
+unlock:
+ mutex_unlock(&device->mutex);
+
+ return status;
+}
+
+#define kgsl_wait_event_interruptible_timeout(wq, condition, timeout, io)\
+({ \
+ long __ret = timeout; \
+ if (io) \
+ __wait_io_event_interruptible_timeout(wq, condition, __ret);\
+ else \
+ __wait_event_interruptible_timeout(wq, condition, __ret);\
+ __ret; \
+})
+
+
+
+unsigned int adreno_hang_detect(struct kgsl_device *device,
+ unsigned int *prev_reg_val)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned int curr_reg_val[hang_detect_regs_count];
+ unsigned int hang_detected = 1;
+ unsigned int i;
+
+ if (!adreno_dev->fast_hang_detect)
+ return 0;
+
+ if (is_adreno_rbbm_status_idle(device))
+ return 0;
+
+ for (i = 0; i < hang_detect_regs_count; i++) {
+ adreno_regread(device, hang_detect_regs[i],
+ &curr_reg_val[i]);
+ if (curr_reg_val[i] != prev_reg_val[i]) {
+ prev_reg_val[i] = curr_reg_val[i];
+ hang_detected = 0;
+ }
+ }
+
+ return hang_detected;
+}
+
+
+static int adreno_waittimestamp(struct kgsl_device *device,
+ struct kgsl_context *context,
+ unsigned int timestamp,
+ unsigned int msecs)
+{
+ long status = 0;
+ uint io = 1;
+ static uint io_cnt;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ int retries = 0;
+ unsigned int ts_issued;
+ unsigned int context_id = _get_context_id(context);
+ unsigned int time_elapsed = 0;
+ unsigned int prev_reg_val[hang_detect_regs_count];
+ unsigned int wait;
+
+ memset(prev_reg_val, 0, sizeof(prev_reg_val));
+
+ ts_issued = adreno_dev->ringbuffer.timestamp[context_id];
+
+
+ if (msecs == KGSL_TIMEOUT_DEFAULT)
+ msecs = adreno_dev->wait_timeout;
+
+ if (timestamp_cmp(timestamp, ts_issued) > 0) {
+ KGSL_DRV_ERR(device, "Cannot wait for invalid ts <%d:0x%x>, "
+ "last issued ts <%d:0x%x>\n",
+ context_id, timestamp, context_id, ts_issued);
+ status = -EINVAL;
+ goto done;
+ }
+
+
+ if (msecs == 0 || msecs >= 100)
+ wait = 100;
+ else
+ wait = 20;
+
+ do {
+ if (context_id == KGSL_CONTEXT_INVALID) {
+ KGSL_DRV_WARN(device, "context was detached");
+ status = -EINVAL;
+ goto done;
+ }
+ if (kgsl_check_timestamp(device, context, timestamp)) {
+ queue_work(device->work_queue, &device->ts_expired_ws);
+ status = 0;
+ goto done;
+ }
+ adreno_poke(device);
+ io_cnt = (io_cnt + 1) % 100;
+ if (io_cnt <
+ pwr->pwrlevels[pwr->active_pwrlevel].io_fraction)
+ io = 0;
+
+ if ((retries > 0) &&
+ (adreno_hang_detect(device, prev_reg_val)))
+ goto hang_dump;
+
+ mutex_unlock(&device->mutex);
+ status = kgsl_wait_event_interruptible_timeout(
+ device->wait_queue,
+ kgsl_check_interrupt_timestamp(device,
+ context, timestamp),
+ msecs_to_jiffies(wait), io);
+
+ mutex_lock(&device->mutex);
+
+ if (status > 0) {
+
+ status = 0;
+ goto done;
+ } else if (status < 0) {
+
+ goto done;
+ }
+
+
+ time_elapsed += wait;
+ wait = KGSL_TIMEOUT_PART;
+
+ retries++;
+
+ } while (!msecs || time_elapsed < msecs);
+
+hang_dump:
+ if (kgsl_check_timestamp(device, context, timestamp))
+ goto done;
+ status = -ETIMEDOUT;
+ KGSL_DRV_ERR(device,
+ "Device hang detected while waiting for timestamp: "
+ "<%d:0x%x>, last submitted timestamp: <%d:0x%x>, "
+ "wptr: 0x%x\n",
+ context_id, timestamp, context_id, ts_issued,
+ adreno_dev->ringbuffer.wptr);
+ if (!adreno_dump_and_recover(device)) {
+ status = 0;
+ }
+done:
+ return (int)status;
+}
+
+static unsigned int adreno_readtimestamp(struct kgsl_device *device,
+ struct kgsl_context *context, enum kgsl_timestamp_type type)
+{
+ unsigned int timestamp = 0;
+ unsigned int context_id = _get_context_id(context);
+
+ if (context_id == KGSL_CONTEXT_INVALID) {
+ KGSL_DRV_WARN(device, "context was detached");
+ return timestamp;
+ }
+ switch (type) {
+ case KGSL_TIMESTAMP_QUEUED: {
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+ timestamp = rb->timestamp[context_id];
+ break;
+ }
+ case KGSL_TIMESTAMP_CONSUMED:
+ adreno_regread(device, REG_CP_TIMESTAMP, ×tamp);
+ break;
+ case KGSL_TIMESTAMP_RETIRED:
+ kgsl_sharedmem_readl(&device->memstore, ×tamp,
+ KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp));
+ break;
+ }
+
+ rmb();
+
+ return timestamp;
+}
+
+static long adreno_ioctl(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ int result = 0;
+ struct kgsl_drawctxt_set_bin_base_offset *binbase;
+ struct kgsl_context *context;
+
+ switch (cmd) {
+ case IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET:
+ binbase = data;
+
+ context = kgsl_find_context(dev_priv, binbase->drawctxt_id);
+ if (context) {
+ adreno_drawctxt_set_bin_base_offset(
+ dev_priv->device, context, binbase->offset);
+ } else {
+ result = -EINVAL;
+ KGSL_DRV_ERR(dev_priv->device,
+ "invalid drawctxt drawctxt_id %d "
+ "device_id=%d\n",
+ binbase->drawctxt_id, dev_priv->device->id);
+ }
+ break;
+
+ default:
+ KGSL_DRV_INFO(dev_priv->device,
+ "invalid ioctl code %08x\n", cmd);
+ result = -ENOIOCTLCMD;
+ break;
+ }
+ return result;
+
+}
+
+static inline s64 adreno_ticks_to_us(u32 ticks, u32 gpu_freq)
+{
+ gpu_freq /= 1000000;
+ return ticks / gpu_freq;
+}
+
+static void adreno_power_stats(struct kgsl_device *device,
+ struct kgsl_power_stats *stats)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ unsigned int cycles;
+
+
+
+
+ cycles = adreno_dev->gpudev->busy_cycles(adreno_dev);
+
+ if (pwr->time != 0) {
+ s64 tmp = ktime_to_us(ktime_get());
+ stats->total_time = tmp - pwr->time;
+ pwr->time = tmp;
+ stats->busy_time = adreno_ticks_to_us(cycles, device->pwrctrl.
+ pwrlevels[device->pwrctrl.active_pwrlevel].
+ gpu_freq);
+
+
+ stats->busy_time = (stats->busy_time > stats->total_time) ? stats->total_time : stats->busy_time;
+ device->gputime.total = device->gputime.total + stats->total_time;
+ device->gputime.busy = device->gputime.busy + stats->busy_time;
+ device->gputime_in_state[device->pwrctrl.active_pwrlevel].total
+ = device->gputime_in_state[device->pwrctrl.active_pwrlevel].total + stats->total_time;
+ device->gputime_in_state[device->pwrctrl.active_pwrlevel].busy
+ = device->gputime_in_state[device->pwrctrl.active_pwrlevel].busy + stats->busy_time;
+
+#ifdef CONFIG_MSM_KGSL_GPU_USAGE
+ if(device->current_process_priv != NULL) {
+ device->current_process_priv->gputime.total = device->current_process_priv->gputime.total + stats->total_time;
+ device->current_process_priv->gputime.busy = device->current_process_priv->gputime.busy + stats->busy_time;
+ device->current_process_priv->gputime_in_state[device->pwrctrl.active_pwrlevel].total
+ = device->current_process_priv->gputime_in_state[device->pwrctrl.active_pwrlevel].total + stats->total_time;
+ device->current_process_priv->gputime_in_state[device->pwrctrl.active_pwrlevel].busy
+ = device->current_process_priv->gputime_in_state[device->pwrctrl.active_pwrlevel].busy + stats->busy_time;
+ } else
+ printk("curent_process_pirv = NULL, skip gpu usage recorde.\n");
+#endif
+ } else {
+ stats->total_time = 0;
+ stats->busy_time = 0;
+ pwr->time = ktime_to_us(ktime_get());
+ }
+}
+
+void adreno_irqctrl(struct kgsl_device *device, int state)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ adreno_dev->gpudev->irq_control(adreno_dev, state);
+}
+
+static unsigned int adreno_gpuid(struct kgsl_device *device,
+ unsigned int *chipid)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+
+ if (chipid != NULL)
+ *chipid = adreno_dev->chip_id;
+
+
+ return (0x0003 << 16) | ((int) adreno_dev->gpurev);
+}
+
+static const struct kgsl_functable adreno_functable = {
+
+ .regread = adreno_regread,
+ .regwrite = adreno_regwrite,
+ .idle = adreno_idle,
+ .isidle = adreno_isidle,
+ .suspend_context = adreno_suspend_context,
+ .start = adreno_start,
+ .stop = adreno_stop,
+ .getproperty = adreno_getproperty,
+ .waittimestamp = adreno_waittimestamp,
+ .readtimestamp = adreno_readtimestamp,
+ .issueibcmds = adreno_ringbuffer_issueibcmds,
+ .ioctl = adreno_ioctl,
+ .setup_pt = adreno_setup_pt,
+ .cleanup_pt = adreno_cleanup_pt,
+ .power_stats = adreno_power_stats,
+ .irqctrl = adreno_irqctrl,
+ .gpuid = adreno_gpuid,
+ .snapshot = adreno_snapshot,
+ .irq_handler = adreno_irq_handler,
+
+ .setstate = adreno_setstate,
+ .drawctxt_create = adreno_drawctxt_create,
+ .drawctxt_destroy = adreno_drawctxt_destroy,
+ .setproperty = adreno_setproperty,
+};
+
+static struct platform_device_id adreno_id_table[] = {
+ { DEVICE_3D0_NAME, (kernel_ulong_t)&device_3d0.dev, },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, adreno_id_table);
+
+static struct platform_driver adreno_platform_driver = {
+ .probe = adreno_probe,
+ .remove = __devexit_p(adreno_remove),
+ .suspend = kgsl_suspend_driver,
+ .resume = kgsl_resume_driver,
+ .id_table = adreno_id_table,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DEVICE_3D_NAME,
+ .pm = &kgsl_pm_ops,
+ }
+};
+
+static int __init kgsl_3d_init(void)
+{
+ return platform_driver_register(&adreno_platform_driver);
+}
+
+static void __exit kgsl_3d_exit(void)
+{
+ platform_driver_unregister(&adreno_platform_driver);
+}
+
+module_init(kgsl_3d_init);
+module_exit(kgsl_3d_exit);
+
+MODULE_DESCRIPTION("3D Graphics driver");
+MODULE_VERSION("1.2");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kgsl_3d");
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
new file mode 100644
index 0000000..3cfacd8
--- /dev/null
+++ b/drivers/gpu/msm/adreno.h
@@ -0,0 +1,312 @@
+/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ADRENO_H
+#define __ADRENO_H
+
+#include "kgsl_device.h"
+#include "adreno_drawctxt.h"
+#include "adreno_ringbuffer.h"
+#include "kgsl_iommu.h"
+
+#define DEVICE_3D_NAME "kgsl-3d"
+#define DEVICE_3D0_NAME "kgsl-3d0"
+
+#define ADRENO_DEVICE(device) \
+ KGSL_CONTAINER_OF(device, struct adreno_device, dev)
+
+#define KGSL_CMD_FLAGS_NONE 0x00000000
+#define KGSL_CMD_FLAGS_PMODE 0x00000001
+#define KGSL_CMD_FLAGS_NO_TS_CMP 0x00000002
+
+#define KGSL_CONTEXT_TO_MEM_IDENTIFIER 0x2EADBEEF
+#define KGSL_CMD_IDENTIFIER 0x2EEDFACE
+#define KGSL_START_OF_IB_IDENTIFIER 0x2EADEABE
+#define KGSL_END_OF_IB_IDENTIFIER 0x2ABEDEAD
+
+#ifdef CONFIG_MSM_SCM
+#define ADRENO_DEFAULT_PWRSCALE_POLICY (&kgsl_pwrscale_policy_tz)
+#elif defined CONFIG_MSM_SLEEP_STATS_DEVICE
+#define ADRENO_DEFAULT_PWRSCALE_POLICY (&kgsl_pwrscale_policy_idlestats)
+#else
+#define ADRENO_DEFAULT_PWRSCALE_POLICY NULL
+#endif
+
+#define ADRENO_ISTORE_START 0x5000
+
+#define ADRENO_NUM_CTX_SWITCH_ALLOWED_BEFORE_DRAW 50
+
+
+#define ADRENO_IDLE_TIMEOUT (20 * 1000)
+
+enum adreno_gpurev {
+ ADRENO_REV_UNKNOWN = 0,
+ ADRENO_REV_A200 = 200,
+ ADRENO_REV_A203 = 203,
+ ADRENO_REV_A205 = 205,
+ ADRENO_REV_A220 = 220,
+ ADRENO_REV_A225 = 225,
+ ADRENO_REV_A305 = 305,
+ ADRENO_REV_A320 = 320,
+};
+
+struct adreno_gpudev;
+
+struct adreno_device {
+ struct kgsl_device dev;
+ unsigned int chip_id;
+ enum adreno_gpurev gpurev;
+ unsigned long gmem_base;
+ unsigned int gmem_size;
+ struct adreno_context *drawctxt_active;
+ const char *pfp_fwfile;
+ unsigned int *pfp_fw;
+ size_t pfp_fw_size;
+ unsigned int pfp_fw_version;
+ const char *pm4_fwfile;
+ unsigned int *pm4_fw;
+ size_t pm4_fw_size;
+ unsigned int pm4_fw_version;
+ struct adreno_ringbuffer ringbuffer;
+ unsigned int mharb;
+ struct adreno_gpudev *gpudev;
+ unsigned int wait_timeout;
+ unsigned int istore_size;
+ unsigned int pix_shader_start;
+ unsigned int instruction_size;
+ unsigned int ib_check_level;
+ unsigned int fast_hang_detect;
+ unsigned int gpulist_index;
+};
+
+struct adreno_gpudev {
+ unsigned int reg_rbbm_status;
+ unsigned int reg_cp_pfp_ucode_data;
+ unsigned int reg_cp_pfp_ucode_addr;
+
+ int ctx_switches_since_last_draw;
+
+
+ int (*ctxt_create)(struct adreno_device *, struct adreno_context *);
+ void (*ctxt_save)(struct adreno_device *, struct adreno_context *);
+ void (*ctxt_restore)(struct adreno_device *, struct adreno_context *);
+ void (*ctxt_draw_workaround)(struct adreno_device *,
+ struct adreno_context *);
+ irqreturn_t (*irq_handler)(struct adreno_device *);
+ void (*irq_control)(struct adreno_device *, int);
+ void * (*snapshot)(struct adreno_device *, void *, int *, int);
+ void (*rb_init)(struct adreno_device *, struct adreno_ringbuffer *);
+ void (*start)(struct adreno_device *);
+ unsigned int (*busy_cycles)(struct adreno_device *);
+};
+
+struct adreno_recovery_data {
+ unsigned int ib1;
+ unsigned int context_id;
+ unsigned int global_eop;
+ unsigned int *rb_buffer;
+ unsigned int rb_size;
+ unsigned int *bad_rb_buffer;
+ unsigned int bad_rb_size;
+ unsigned int last_valid_ctx_id;
+};
+
+extern struct adreno_gpudev adreno_a2xx_gpudev;
+extern struct adreno_gpudev adreno_a3xx_gpudev;
+
+extern const unsigned int a200_registers[];
+extern const unsigned int a220_registers[];
+extern const unsigned int a225_registers[];
+extern const unsigned int a200_registers_count;
+extern const unsigned int a220_registers_count;
+extern const unsigned int a225_registers_count;
+
+extern const unsigned int a3xx_registers[];
+extern const unsigned int a3xx_registers_count;
+
+extern const unsigned int a3xx_hlsq_registers[];
+extern const unsigned int a3xx_hlsq_registers_count;
+
+extern unsigned int hang_detect_regs[];
+extern const unsigned int hang_detect_regs_count;
+
+
+int adreno_idle(struct kgsl_device *device);
+void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
+ unsigned int *value);
+void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords,
+ unsigned int value);
+
+struct kgsl_memdesc *adreno_find_region(struct kgsl_device *device,
+ unsigned int pt_base,
+ unsigned int gpuaddr,
+ unsigned int size);
+
+uint8_t *adreno_convertaddr(struct kgsl_device *device,
+ unsigned int pt_base, unsigned int gpuaddr, unsigned int size);
+
+struct kgsl_memdesc *adreno_find_ctxtmem(struct kgsl_device *device,
+ unsigned int pt_base, unsigned int gpuaddr, unsigned int size);
+
+void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
+ int hang);
+
+int adreno_dump_and_recover(struct kgsl_device *device);
+
+unsigned int adreno_hang_detect(struct kgsl_device *device,
+ unsigned int *prev_reg_val);
+
+static inline int adreno_is_a200(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev == ADRENO_REV_A200);
+}
+
+static inline int adreno_is_a203(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev == ADRENO_REV_A203);
+}
+
+static inline int adreno_is_a205(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev == ADRENO_REV_A205);
+}
+
+static inline int adreno_is_a20x(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev <= 209);
+}
+
+static inline int adreno_is_a220(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev == ADRENO_REV_A220);
+}
+
+static inline int adreno_is_a225(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev == ADRENO_REV_A225);
+}
+
+static inline int adreno_is_a22x(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev == ADRENO_REV_A220 ||
+ adreno_dev->gpurev == ADRENO_REV_A225);
+}
+
+static inline int adreno_is_a2xx(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev <= 299);
+}
+
+static inline int adreno_is_a3xx(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev >= 300);
+}
+
+static inline int adreno_is_a305(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev == ADRENO_REV_A305);
+}
+
+static inline int adreno_is_a320(struct adreno_device *adreno_dev)
+{
+ return (adreno_dev->gpurev == ADRENO_REV_A320);
+}
+
+static inline int adreno_rb_ctxtswitch(unsigned int *cmd)
+{
+ return (cmd[0] == cp_nop_packet(1) &&
+ cmd[1] == KGSL_CONTEXT_TO_MEM_IDENTIFIER);
+}
+
+static inline int adreno_encode_istore_size(struct adreno_device *adreno_dev)
+{
+ unsigned int size;
+ if (adreno_is_a225(adreno_dev))
+ size = adreno_dev->istore_size/3;
+ else
+ size = adreno_dev->istore_size;
+
+ return (ilog2(size) - 5) << 29;
+}
+
+static inline int __adreno_add_idle_indirect_cmds(unsigned int *cmds,
+ unsigned int nop_gpuaddr)
+{
+ *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
+ *cmds++ = nop_gpuaddr;
+ *cmds++ = 2;
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+ return 5;
+}
+
+static inline int adreno_add_change_mh_phys_limit_cmds(unsigned int *cmds,
+ unsigned int new_phys_limit,
+ unsigned int nop_gpuaddr)
+{
+ unsigned int *start = cmds;
+
+ cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
+ *cmds++ = cp_type0_packet(MH_MMU_MPU_END, 1);
+ *cmds++ = new_phys_limit;
+ cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
+ return cmds - start;
+}
+
+static inline int adreno_add_bank_change_cmds(unsigned int *cmds,
+ int cur_ctx_bank,
+ unsigned int nop_gpuaddr)
+{
+ unsigned int *start = cmds;
+
+ cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
+ *cmds++ = cp_type0_packet(REG_CP_STATE_DEBUG_INDEX, 1);
+ *cmds++ = (cur_ctx_bank ? 0 : 0x20);
+ cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
+ return cmds - start;
+}
+
+static inline int adreno_add_read_cmds(struct kgsl_device *device,
+ unsigned int *cmds, unsigned int addr,
+ unsigned int val, unsigned int nop_gpuaddr)
+{
+ unsigned int *start = cmds;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_REG_MEM, 5);
+
+ *cmds++ = 0x13;
+ *cmds++ = addr;
+ *cmds++ = val;
+ *cmds++ = 0xFFFFFFFF;
+ *cmds++ = 0xFFFFFFFF;
+ cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
+ return cmds - start;
+}
+
+static inline int adreno_add_idle_cmds(struct adreno_device *adreno_dev,
+ unsigned int *cmds)
+{
+ unsigned int *start = cmds;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+
+ if ((adreno_dev->gpurev == ADRENO_REV_A305) ||
+ (adreno_dev->gpurev == ADRENO_REV_A320)) {
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1);
+ *cmds++ = 0x00000000;
+ }
+
+ return cmds - start;
+}
+
+#endif
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
new file mode 100644
index 0000000..d224a21
--- /dev/null
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -0,0 +1,1770 @@
+/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <mach/socinfo.h>
+
+#include "kgsl.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_cffdump.h"
+#include "adreno.h"
+#include "adreno_a2xx_trace.h"
+
+
+const unsigned int a200_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
+ 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
+ 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
+ 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
+ 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
+ 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
+ 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
+ 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A43, 0x0A45, 0x0A45,
+ 0x0A4E, 0x0A4F, 0x0C2C, 0x0C2C, 0x0C30, 0x0C30, 0x0C38, 0x0C3C,
+ 0x0C40, 0x0C40, 0x0C44, 0x0C44, 0x0C80, 0x0C86, 0x0C88, 0x0C94,
+ 0x0C99, 0x0C9A, 0x0CA4, 0x0CA5, 0x0D00, 0x0D03, 0x0D06, 0x0D06,
+ 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
+ 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
+ 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
+ 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x0F0C, 0x0F0C, 0x0F0E, 0x0F12,
+ 0x0F26, 0x0F2A, 0x0F2C, 0x0F2C, 0x2000, 0x2002, 0x2006, 0x200F,
+ 0x2080, 0x2082, 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184,
+ 0x21F5, 0x21F7, 0x2200, 0x2208, 0x2280, 0x2283, 0x2293, 0x2294,
+ 0x2300, 0x2308, 0x2312, 0x2312, 0x2316, 0x231D, 0x2324, 0x2326,
+ 0x2380, 0x2383, 0x2400, 0x2402, 0x2406, 0x240F, 0x2480, 0x2482,
+ 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7,
+ 0x2600, 0x2608, 0x2680, 0x2683, 0x2693, 0x2694, 0x2700, 0x2708,
+ 0x2712, 0x2712, 0x2716, 0x271D, 0x2724, 0x2726, 0x2780, 0x2783,
+ 0x4000, 0x4003, 0x4800, 0x4805, 0x4900, 0x4900, 0x4908, 0x4908,
+};
+
+const unsigned int a220_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
+ 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
+ 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
+ 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
+ 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
+ 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
+ 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
+ 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A40, 0x0A42, 0x0A43,
+ 0x0A45, 0x0A45, 0x0A4E, 0x0A4F, 0x0C30, 0x0C30, 0x0C38, 0x0C39,
+ 0x0C3C, 0x0C3C, 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03,
+ 0x0D05, 0x0D06, 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1,
+ 0x0DC8, 0x0DD4, 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04,
+ 0x0E17, 0x0E1E, 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0,
+ 0x0ED4, 0x0ED7, 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x2002,
+ 0x2006, 0x200F, 0x2080, 0x2082, 0x2100, 0x2102, 0x2104, 0x2109,
+ 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7, 0x2200, 0x2202,
+ 0x2204, 0x2204, 0x2208, 0x2208, 0x2280, 0x2282, 0x2294, 0x2294,
+ 0x2300, 0x2308, 0x2309, 0x230A, 0x2312, 0x2312, 0x2316, 0x2316,
+ 0x2318, 0x231D, 0x2324, 0x2326, 0x2380, 0x2383, 0x2400, 0x2402,
+ 0x2406, 0x240F, 0x2480, 0x2482, 0x2500, 0x2502, 0x2504, 0x2509,
+ 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7, 0x2600, 0x2602,
+ 0x2604, 0x2606, 0x2608, 0x2608, 0x2680, 0x2682, 0x2694, 0x2694,
+ 0x2700, 0x2708, 0x2712, 0x2712, 0x2716, 0x2716, 0x2718, 0x271D,
+ 0x2724, 0x2726, 0x2780, 0x2783, 0x4000, 0x4003, 0x4800, 0x4805,
+ 0x4900, 0x4900, 0x4908, 0x4908,
+};
+
+const unsigned int a225_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x013C, 0x013C, 0x0140, 0x014F, 0x01C0, 0x01C1,
+ 0x01C3, 0x01C8, 0x01D5, 0x01D9, 0x01DC, 0x01DD, 0x01EA, 0x01EA,
+ 0x01EE, 0x01F3, 0x01F6, 0x01F7, 0x01FC, 0x01FF, 0x0391, 0x0392,
+ 0x039B, 0x039E, 0x03B2, 0x03B5, 0x03B7, 0x03B7, 0x03F8, 0x03FB,
+ 0x0440, 0x0440, 0x0443, 0x0444, 0x044B, 0x044B, 0x044D, 0x044F,
+ 0x0452, 0x0452, 0x0454, 0x045B, 0x047F, 0x047F, 0x0578, 0x0587,
+ 0x05C9, 0x05C9, 0x05D0, 0x05D0, 0x0601, 0x0604, 0x0606, 0x0609,
+ 0x060B, 0x060E, 0x0613, 0x0614, 0x0A29, 0x0A2B, 0x0A2F, 0x0A31,
+ 0x0A40, 0x0A40, 0x0A42, 0x0A43, 0x0A45, 0x0A45, 0x0A4E, 0x0A4F,
+ 0x0C01, 0x0C1D, 0x0C30, 0x0C30, 0x0C38, 0x0C39, 0x0C3C, 0x0C3C,
+ 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03, 0x0D05, 0x0D06,
+ 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
+ 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
+ 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
+ 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x200F, 0x2080, 0x2082,
+ 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7,
+ 0x2200, 0x2202, 0x2204, 0x2206, 0x2208, 0x2210, 0x2220, 0x2222,
+ 0x2280, 0x2282, 0x2294, 0x2294, 0x2297, 0x2297, 0x2300, 0x230A,
+ 0x2312, 0x2312, 0x2315, 0x2316, 0x2318, 0x231D, 0x2324, 0x2326,
+ 0x2340, 0x2357, 0x2360, 0x2360, 0x2380, 0x2383, 0x2400, 0x240F,
+ 0x2480, 0x2482, 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584,
+ 0x25F5, 0x25F7, 0x2600, 0x2602, 0x2604, 0x2606, 0x2608, 0x2610,
+ 0x2620, 0x2622, 0x2680, 0x2682, 0x2694, 0x2694, 0x2697, 0x2697,
+ 0x2700, 0x270A, 0x2712, 0x2712, 0x2715, 0x2716, 0x2718, 0x271D,
+ 0x2724, 0x2726, 0x2740, 0x2757, 0x2760, 0x2760, 0x2780, 0x2783,
+ 0x4000, 0x4003, 0x4800, 0x4806, 0x4808, 0x4808, 0x4900, 0x4900,
+ 0x4908, 0x4908,
+};
+
+const unsigned int a200_registers_count = ARRAY_SIZE(a200_registers) / 2;
+const unsigned int a220_registers_count = ARRAY_SIZE(a220_registers) / 2;
+const unsigned int a225_registers_count = ARRAY_SIZE(a225_registers) / 2;
+
+
+
+#define ALU_CONSTANTS 2048
+#define NUM_REGISTERS 1024
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+#define CMD_BUFFER_LEN 9216
+#else
+#define CMD_BUFFER_LEN 3072
+#endif
+#define TEX_CONSTANTS (32*6)
+#define BOOL_CONSTANTS 8
+#define LOOP_CONSTANTS 56
+
+#define LCC_SHADOW_SIZE 0x2000
+
+#define ALU_SHADOW_SIZE LCC_SHADOW_SIZE
+#define REG_SHADOW_SIZE 0x1000
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+#define CMD_BUFFER_SIZE 0x9000
+#else
+#define CMD_BUFFER_SIZE 0x3000
+#endif
+#define TEX_SHADOW_SIZE (TEX_CONSTANTS*4)
+
+#define REG_OFFSET LCC_SHADOW_SIZE
+#define CMD_OFFSET (REG_OFFSET + REG_SHADOW_SIZE)
+#define TEX_OFFSET (CMD_OFFSET + CMD_BUFFER_SIZE)
+#define SHADER_OFFSET ((TEX_OFFSET + TEX_SHADOW_SIZE + 32) & ~31)
+
+static inline int _shader_shadow_size(struct adreno_device *adreno_dev)
+{
+ return adreno_dev->istore_size *
+ (adreno_dev->instruction_size * sizeof(unsigned int));
+}
+
+static inline int _context_size(struct adreno_device *adreno_dev)
+{
+ return SHADER_OFFSET + 3*_shader_shadow_size(adreno_dev);
+}
+
+
+static struct tmp_ctx {
+ unsigned int *start;
+ unsigned int *cmd;
+
+
+ uint32_t bool_shadow;
+ uint32_t loop_shadow;
+
+ uint32_t shader_shared;
+ uint32_t shader_vertex;
+ uint32_t shader_pixel;
+
+ uint32_t reg_values[33];
+ uint32_t chicken_restore;
+
+ uint32_t gmem_base;
+
+} tmp_ctx;
+
+
+#define GMEM2SYS_VTX_PGM_LEN 0x12
+
+static unsigned int gmem2sys_vtx_pgm[GMEM2SYS_VTX_PGM_LEN] = {
+ 0x00011003, 0x00001000, 0xc2000000,
+ 0x00001004, 0x00001000, 0xc4000000,
+ 0x00001005, 0x00002000, 0x00000000,
+ 0x1cb81000, 0x00398a88, 0x00000003,
+ 0x140f803e, 0x00000000, 0xe2010100,
+ 0x14000000, 0x00000000, 0xe2000000
+};
+
+
+#define GMEM2SYS_FRAG_PGM_LEN 0x0c
+
+static unsigned int gmem2sys_frag_pgm[GMEM2SYS_FRAG_PGM_LEN] = {
+ 0x00000000, 0x1002c400, 0x10000000,
+ 0x00001003, 0x00002000, 0x00000000,
+ 0x140f8000, 0x00000000, 0x22000000,
+ 0x14000000, 0x00000000, 0xe2000000
+};
+
+
+#define SYS2GMEM_VTX_PGM_LEN 0x18
+
+static unsigned int sys2gmem_vtx_pgm[SYS2GMEM_VTX_PGM_LEN] = {
+ 0x00052003, 0x00001000, 0xc2000000, 0x00001005,
+ 0x00001000, 0xc4000000, 0x00001006, 0x10071000,
+ 0x20000000, 0x18981000, 0x0039ba88, 0x00000003,
+ 0x12982000, 0x40257b08, 0x00000002, 0x140f803e,
+ 0x00000000, 0xe2010100, 0x140f8000, 0x00000000,
+ 0xe2020200, 0x14000000, 0x00000000, 0xe2000000
+};
+
+
+#define SYS2GMEM_FRAG_PGM_LEN 0x0f
+
+static unsigned int sys2gmem_frag_pgm[SYS2GMEM_FRAG_PGM_LEN] = {
+ 0x00011002, 0x00001000, 0xc4000000, 0x00001003,
+ 0x10041000, 0x20000000, 0x10000001, 0x1ffff688,
+ 0x00000002, 0x140f8000, 0x00000000, 0xe2000000,
+ 0x14000000, 0x00000000, 0xe2000000
+};
+
+#define SYS2GMEM_TEX_CONST_LEN 6
+
+static unsigned int sys2gmem_tex_const[SYS2GMEM_TEX_CONST_LEN] = {
+ 0x00000002,
+
+ 0x00000800,
+
+
+ 0,
+
+ 0 << 1 | 1 << 4 | 2 << 7 | 3 << 10 | 2 << 23,
+
+ 0,
+
+ 1 << 9
+};
+
+#define NUM_COLOR_FORMATS 13
+
+static enum SURFACEFORMAT surface_format_table[NUM_COLOR_FORMATS] = {
+ FMT_4_4_4_4,
+ FMT_1_5_5_5,
+ FMT_5_6_5,
+ FMT_8,
+ FMT_8_8,
+ FMT_8_8_8_8,
+ FMT_8_8_8_8,
+ FMT_16_FLOAT,
+ FMT_16_16_FLOAT,
+ FMT_16_16_16_16_FLOAT,
+ FMT_32_FLOAT,
+ FMT_32_32_FLOAT,
+ FMT_32_32_32_32_FLOAT,
+};
+
+static unsigned int format2bytesperpixel[NUM_COLOR_FORMATS] = {
+ 2,
+ 2,
+ 2,
+ 1,
+ 2,
+ 4,
+ 4,
+ 2,
+ 4,
+ 8,
+ 4,
+ 8,
+ 16,
+};
+
+#define SHADER_CONST_ADDR (11 * 6 + 3)
+
+
+static unsigned int *program_shader(unsigned int *cmds, int vtxfrag,
+ unsigned int *shader_pgm, int dwords)
+{
+
+ *cmds++ = cp_type3_packet(CP_IM_LOAD_IMMEDIATE, 2 + dwords);
+
+ *cmds++ = vtxfrag;
+
+ *cmds++ = ((0 << 16) | dwords);
+
+ memcpy(cmds, shader_pgm, dwords << 2);
+ cmds += dwords;
+
+ return cmds;
+}
+
+static unsigned int *reg_to_mem(unsigned int *cmds, uint32_t dst,
+ uint32_t src, int dwords)
+{
+ while (dwords-- > 0) {
+ *cmds++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmds++ = src++;
+ *cmds++ = dst;
+ dst += 4;
+ }
+
+ return cmds;
+}
+
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+
+static void build_reg_to_mem_range(unsigned int start, unsigned int end,
+ unsigned int **cmd,
+ struct adreno_context *drawctxt)
+{
+ unsigned int i = start;
+
+ for (i = start; i <= end; i++) {
+ *(*cmd)++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *(*cmd)++ = i;
+ *(*cmd)++ =
+ ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) +
+ (i - 0x2000) * 4;
+ }
+}
+
+#endif
+
+static unsigned int *build_chicken_restore_cmds(
+ struct adreno_context *drawctxt)
+{
+ unsigned int *start = tmp_ctx.cmd;
+ unsigned int *cmds = start;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0;
+
+ *cmds++ = cp_type0_packet(REG_TP0_CHICKEN, 1);
+ tmp_ctx.chicken_restore = virt2gpu(cmds, &drawctxt->gpustate);
+ *cmds++ = 0x00000000;
+
+
+ create_ib1(drawctxt, drawctxt->chicken_restore, start, cmds);
+
+ return cmds;
+}
+
+
+static const unsigned int register_ranges_a20x[] = {
+ REG_RB_SURFACE_INFO, REG_RB_DEPTH_INFO,
+ REG_COHER_DEST_BASE_0, REG_PA_SC_SCREEN_SCISSOR_BR,
+ REG_PA_SC_WINDOW_OFFSET, REG_PA_SC_WINDOW_SCISSOR_BR,
+ REG_RB_STENCILREFMASK_BF, REG_PA_CL_VPORT_ZOFFSET,
+ REG_SQ_PROGRAM_CNTL, REG_SQ_WRAPPING_1,
+ REG_PA_SC_LINE_CNTL, REG_SQ_PS_CONST,
+ REG_PA_SC_AA_MASK, REG_PA_SC_AA_MASK,
+ REG_RB_SAMPLE_COUNT_CTL, REG_RB_COLOR_DEST_MASK,
+ REG_PA_SU_POLY_OFFSET_FRONT_SCALE, REG_PA_SU_POLY_OFFSET_BACK_OFFSET,
+ REG_VGT_MAX_VTX_INDX, REG_RB_FOG_COLOR,
+ REG_RB_DEPTHCONTROL, REG_RB_MODECONTROL,
+ REG_PA_SU_POINT_SIZE, REG_PA_SC_LINE_STIPPLE,
+ REG_PA_SC_VIZ_QUERY, REG_PA_SC_VIZ_QUERY,
+ REG_VGT_VERTEX_REUSE_BLOCK_CNTL, REG_RB_DEPTH_CLEAR
+};
+
+static const unsigned int register_ranges_a220[] = {
+ REG_RB_SURFACE_INFO, REG_RB_DEPTH_INFO,
+ REG_COHER_DEST_BASE_0, REG_PA_SC_SCREEN_SCISSOR_BR,
+ REG_PA_SC_WINDOW_OFFSET, REG_PA_SC_WINDOW_SCISSOR_BR,
+ REG_RB_STENCILREFMASK_BF, REG_PA_CL_VPORT_ZOFFSET,
+ REG_SQ_PROGRAM_CNTL, REG_SQ_WRAPPING_1,
+ REG_PA_SC_LINE_CNTL, REG_SQ_PS_CONST,
+ REG_PA_SC_AA_MASK, REG_PA_SC_AA_MASK,
+ REG_RB_SAMPLE_COUNT_CTL, REG_RB_COLOR_DEST_MASK,
+ REG_PA_SU_POLY_OFFSET_FRONT_SCALE, REG_PA_SU_POLY_OFFSET_BACK_OFFSET,
+ REG_A220_PC_MAX_VTX_INDX, REG_A220_PC_INDX_OFFSET,
+ REG_RB_COLOR_MASK, REG_RB_FOG_COLOR,
+ REG_RB_DEPTHCONTROL, REG_RB_COLORCONTROL,
+ REG_PA_CL_CLIP_CNTL, REG_PA_CL_VTE_CNTL,
+ REG_RB_MODECONTROL, REG_RB_SAMPLE_POS,
+ REG_PA_SU_POINT_SIZE, REG_PA_SU_LINE_CNTL,
+ REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL,
+ REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL,
+ REG_RB_COPY_CONTROL, REG_RB_DEPTH_CLEAR
+};
+
+static const unsigned int register_ranges_a225[] = {
+ REG_RB_SURFACE_INFO, REG_A225_RB_COLOR_INFO3,
+ REG_COHER_DEST_BASE_0, REG_PA_SC_SCREEN_SCISSOR_BR,
+ REG_PA_SC_WINDOW_OFFSET, REG_PA_SC_WINDOW_SCISSOR_BR,
+ REG_RB_STENCILREFMASK_BF, REG_PA_CL_VPORT_ZOFFSET,
+ REG_SQ_PROGRAM_CNTL, REG_SQ_WRAPPING_1,
+ REG_PA_SC_LINE_CNTL, REG_SQ_PS_CONST,
+ REG_PA_SC_AA_MASK, REG_PA_SC_AA_MASK,
+ REG_RB_SAMPLE_COUNT_CTL, REG_RB_COLOR_DEST_MASK,
+ REG_PA_SU_POLY_OFFSET_FRONT_SCALE, REG_PA_SU_POLY_OFFSET_BACK_OFFSET,
+ REG_A220_PC_MAX_VTX_INDX, REG_A225_PC_MULTI_PRIM_IB_RESET_INDX,
+ REG_RB_COLOR_MASK, REG_RB_FOG_COLOR,
+ REG_RB_DEPTHCONTROL, REG_RB_COLORCONTROL,
+ REG_PA_CL_CLIP_CNTL, REG_PA_CL_VTE_CNTL,
+ REG_RB_MODECONTROL, REG_RB_SAMPLE_POS,
+ REG_PA_SU_POINT_SIZE, REG_PA_SU_LINE_CNTL,
+ REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL,
+ REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL,
+ REG_RB_COPY_CONTROL, REG_RB_DEPTH_CLEAR,
+ REG_A225_GRAS_UCP0X, REG_A225_GRAS_UCP5W,
+ REG_A225_GRAS_UCP_ENABLED, REG_A225_GRAS_UCP_ENABLED
+};
+
+
+static void build_regsave_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ unsigned int *start = tmp_ctx.cmd;
+ unsigned int *cmd = start;
+
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmd++ = 0;
+
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+ *cmd++ = cp_type3_packet(CP_CONTEXT_UPDATE, 1);
+ *cmd++ = 0;
+
+ {
+ unsigned int i = 0;
+ unsigned int reg_array_size = 0;
+ const unsigned int *ptr_register_ranges;
+
+
+ if (adreno_is_a220(adreno_dev)) {
+ ptr_register_ranges = register_ranges_a220;
+ reg_array_size = ARRAY_SIZE(register_ranges_a220);
+ } else if (adreno_is_a225(adreno_dev)) {
+ ptr_register_ranges = register_ranges_a225;
+ reg_array_size = ARRAY_SIZE(register_ranges_a225);
+ } else {
+ ptr_register_ranges = register_ranges_a20x;
+ reg_array_size = ARRAY_SIZE(register_ranges_a20x);
+ }
+
+
+
+ for (i = 0; i < (reg_array_size/2) ; i++) {
+ build_reg_to_mem_range(ptr_register_ranges[i*2],
+ ptr_register_ranges[i*2+1],
+ &cmd, drawctxt);
+ }
+ }
+
+
+ cmd =
+ reg_to_mem(cmd, (drawctxt->gpustate.gpuaddr) & 0xFFFFE000,
+ REG_SQ_CONSTANT_0, ALU_CONSTANTS);
+
+
+ cmd =
+ reg_to_mem(cmd,
+ (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000,
+ REG_SQ_FETCH_0, TEX_CONSTANTS);
+#else
+
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmd++ = 0;
+
+ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
+ *cmd++ = 4 << 16;
+ *cmd++ = 0x0;
+
+ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000;
+ *cmd++ = 0 << 16;
+ *cmd++ = 0x0;
+
+ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000;
+ *cmd++ = 1 << 16;
+ *cmd++ = 0x0;
+#endif
+
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = REG_SQ_GPR_MANAGEMENT;
+ *cmd++ = tmp_ctx.reg_values[0];
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = REG_TP0_CHICKEN;
+ *cmd++ = tmp_ctx.reg_values[1];
+
+ if (adreno_is_a22x(adreno_dev)) {
+ unsigned int i;
+ unsigned int j = 2;
+ for (i = REG_A220_VSC_BIN_SIZE; i <=
+ REG_A220_VSC_PIPE_DATA_LENGTH_7; i++) {
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = i;
+ *cmd++ = tmp_ctx.reg_values[j];
+ j++;
+ }
+ }
+
+
+ cmd = reg_to_mem(cmd, tmp_ctx.bool_shadow, REG_SQ_CF_BOOLEANS,
+ BOOL_CONSTANTS);
+
+
+ cmd = reg_to_mem(cmd, tmp_ctx.loop_shadow,
+ REG_SQ_CF_LOOP, LOOP_CONSTANTS);
+
+
+ create_ib1(drawctxt, drawctxt->reg_save, start, cmd);
+
+ tmp_ctx.cmd = cmd;
+}
+
+static unsigned int *build_gmem2sys_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt,
+ struct gmem_shadow_t *shadow)
+{
+ unsigned int *cmds = shadow->gmem_save_commands;
+ unsigned int *start = cmds;
+
+ unsigned int bytesperpixel = format2bytesperpixel[shadow->format];
+ unsigned int addr = shadow->gmemshadow.gpuaddr;
+ unsigned int offset = (addr - (addr & 0xfffff000)) / bytesperpixel;
+
+ if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) {
+
+ *cmds++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmds++ = REG_TP0_CHICKEN;
+
+ *cmds++ = tmp_ctx.chicken_restore;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0;
+ }
+
+
+ *cmds++ = cp_type0_packet(REG_TP0_CHICKEN, 1);
+ *cmds++ = 0x00000000;
+
+
+ *cmds++ = cp_type0_packet(REG_PA_SC_AA_CONFIG, 1);
+ *cmds++ = 0x00000000;
+
+
+
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 4);
+ *cmds++ = (0x1 << 16) | SHADER_CONST_ADDR;
+ *cmds++ = 0;
+
+ *cmds++ = shadow->quad_vertices.gpuaddr | 0x3;
+
+ *cmds++ = 0x00000030;
+
+
+ *cmds++ = cp_type0_packet(REG_TC_CNTL_STATUS, 1);
+ *cmds++ = 0x1;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 4);
+ *cmds++ = CP_REG(REG_VGT_MAX_VTX_INDX);
+ *cmds++ = 0x00ffffff;
+ *cmds++ = 0x0;
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_PA_SC_AA_MASK);
+ *cmds++ = 0x0000ffff;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_RB_COLORCONTROL);
+ *cmds++ = 0x00000c20;
+
+
+ *cmds++ = cp_type0_packet(REG_SQ_INST_STORE_MANAGMENT, 1);
+ *cmds++ = adreno_dev->pix_shader_start;
+
+
+ *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
+ *cmds++ = 0x00003F00;
+
+ *cmds++ = cp_type3_packet(CP_SET_SHADER_BASES, 1);
+ *cmds++ = adreno_encode_istore_size(adreno_dev)
+ | adreno_dev->pix_shader_start;
+
+
+ cmds = program_shader(cmds, 0, gmem2sys_vtx_pgm, GMEM2SYS_VTX_PGM_LEN);
+
+
+ cmds =
+ program_shader(cmds, 1, gmem2sys_frag_pgm, GMEM2SYS_FRAG_PGM_LEN);
+
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_SQ_PROGRAM_CNTL);
+ if (adreno_is_a22x(adreno_dev))
+ *cmds++ = 0x10018001;
+ else
+ *cmds++ = 0x10010001;
+ *cmds++ = 0x00000008;
+
+
+
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_PA_CL_VTE_CNTL);
+
+ *cmds++ = 0x00000b00;
+
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_RB_SURFACE_INFO);
+ *cmds++ = shadow->gmem_pitch;
+
+
+ BUG_ON(tmp_ctx.gmem_base & 0xFFF);
+ *cmds++ =
+ (shadow->
+ format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT) | tmp_ctx.gmem_base;
+
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_RB_DEPTHCONTROL);
+ if (adreno_is_a22x(adreno_dev))
+ *cmds++ = 0x08;
+ else
+ *cmds++ = 0;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_PA_SU_SC_MODE_CNTL);
+ *cmds++ = 0x00080240;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_PA_SC_SCREEN_SCISSOR_TL);
+ *cmds++ = (0 << 16) | 0;
+ *cmds++ = (0x1fff << 16) | (0x1fff);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_PA_SC_WINDOW_SCISSOR_TL);
+ *cmds++ = (unsigned int)((1U << 31) | (0 << 16) | 0);
+ *cmds++ = (0x1fff << 16) | (0x1fff);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_PA_CL_VPORT_ZSCALE);
+ *cmds++ = 0xbf800000;
+ *cmds++ = 0x0;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_RB_COLOR_MASK);
+ *cmds++ = 0x0000000f;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_RB_COLOR_DEST_MASK);
+ *cmds++ = 0xffffffff;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_SQ_WRAPPING_0);
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+
+
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 6);
+ *cmds++ = CP_REG(REG_RB_COPY_CONTROL);
+ *cmds++ = 0;
+ *cmds++ = addr & 0xfffff000;
+ *cmds++ = shadow->pitch >> 5;
+
+ *cmds++ = 0x0003c008 |
+ (shadow->format << RB_COPY_DEST_INFO__COPY_DEST_FORMAT__SHIFT);
+
+ BUG_ON(offset & 0xfffff000);
+ *cmds++ = offset;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_RB_MODECONTROL);
+ *cmds++ = 0x6;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_PA_CL_CLIP_CNTL);
+ *cmds++ = 0x00010000;
+
+ if (adreno_is_a22x(adreno_dev)) {
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_A220_RB_LRZ_VSC_CONTROL);
+ *cmds++ = 0x0000000;
+
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX, 3);
+ *cmds++ = 0;
+
+ *cmds++ = 0x00004088;
+ *cmds++ = 3;
+ } else {
+
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX, 2);
+ *cmds++ = 0;
+
+ *cmds++ = 0x00030088;
+ }
+
+
+ create_ib1(drawctxt, shadow->gmem_save, start, cmds);
+
+ return cmds;
+}
+
+
+static unsigned int *build_sys2gmem_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt,
+ struct gmem_shadow_t *shadow)
+{
+ unsigned int *cmds = shadow->gmem_restore_commands;
+ unsigned int *start = cmds;
+
+ if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) {
+
+ *cmds++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmds++ = REG_TP0_CHICKEN;
+ *cmds++ = tmp_ctx.chicken_restore;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0;
+ }
+
+
+ *cmds++ = cp_type0_packet(REG_TP0_CHICKEN, 1);
+ *cmds++ = 0x00000000;
+
+
+ *cmds++ = cp_type0_packet(REG_PA_SC_AA_CONFIG, 1);
+ *cmds++ = 0x00000000;
+
+
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 7);
+
+ *cmds++ = (0x1 << 16) | (9 * 6);
+
+ *cmds++ = shadow->quad_vertices.gpuaddr | 0x3;
+
+ *cmds++ = 0x00000030;
+
+ *cmds++ = shadow->quad_texcoords.gpuaddr | 0x3;
+
+ *cmds++ = 0x00000020;
+ *cmds++ = 0;
+ *cmds++ = 0;
+
+
+ *cmds++ = cp_type0_packet(REG_TC_CNTL_STATUS, 1);
+ *cmds++ = 0x1;
+
+ cmds = program_shader(cmds, 0, sys2gmem_vtx_pgm, SYS2GMEM_VTX_PGM_LEN);
+
+
+ *cmds++ = cp_type0_packet(REG_SQ_INST_STORE_MANAGMENT, 1);
+ *cmds++ = adreno_dev->pix_shader_start;
+
+
+ *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
+ *cmds++ = 0x00000300;
+
+ *cmds++ = cp_type3_packet(CP_SET_SHADER_BASES, 1);
+ *cmds++ = adreno_encode_istore_size(adreno_dev)
+ | adreno_dev->pix_shader_start;
+
+
+ cmds =
+ program_shader(cmds, 1, sys2gmem_frag_pgm, SYS2GMEM_FRAG_PGM_LEN);
+
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_SQ_PROGRAM_CNTL);
+ *cmds++ = 0x10030002;
+ *cmds++ = 0x00000008;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_PA_SC_AA_MASK);
+ *cmds++ = 0x0000ffff;
+
+ if (!adreno_is_a22x(adreno_dev)) {
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_PA_SC_VIZ_QUERY);
+ *cmds++ = 0x0;
+ }
+
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_RB_COLORCONTROL);
+ *cmds++ = 0x00000c20;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 4);
+ *cmds++ = CP_REG(REG_VGT_MAX_VTX_INDX);
+ *cmds++ = 0x00ffffff;
+ *cmds++ = 0x0;
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_VGT_VERTEX_REUSE_BLOCK_CNTL);
+ *cmds++ = 0x00000002;
+ *cmds++ = 0x00000002;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_SQ_INTERPOLATOR_CNTL);
+ *cmds++ = 0xffffffff;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_PA_SC_AA_CONFIG);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_PA_SU_SC_MODE_CNTL);
+ *cmds++ = 0x00080240;
+
+
+ *cmds++ =
+ cp_type3_packet(CP_SET_CONSTANT, (SYS2GMEM_TEX_CONST_LEN + 1));
+ *cmds++ = (0x1 << 16) | (0 * 6);
+ memcpy(cmds, sys2gmem_tex_const, SYS2GMEM_TEX_CONST_LEN << 2);
+ cmds[0] |= (shadow->pitch >> 5) << 22;
+ cmds[1] |=
+ shadow->gmemshadow.gpuaddr | surface_format_table[shadow->format];
+ cmds[2] |= (shadow->width - 1) | (shadow->height - 1) << 13;
+ cmds += SYS2GMEM_TEX_CONST_LEN;
+
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_RB_SURFACE_INFO);
+ *cmds++ = shadow->gmem_pitch;
+
+ *cmds++ =
+ (shadow->
+ format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT) | tmp_ctx.gmem_base;
+
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_RB_DEPTHCONTROL);
+
+ if (adreno_is_a22x(adreno_dev))
+ *cmds++ = 8;
+ else
+ *cmds++ = 0;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_PA_SC_SCREEN_SCISSOR_TL);
+ *cmds++ = (0 << 16) | 0;
+ *cmds++ = ((0x1fff) << 16) | 0x1fff;
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_PA_SC_WINDOW_SCISSOR_TL);
+ *cmds++ = (unsigned int)((1U << 31) | (0 << 16) | 0);
+ *cmds++ = ((0x1fff) << 16) | 0x1fff;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_PA_CL_VTE_CNTL);
+
+ *cmds++ = 0x00000b00;
+
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_PA_CL_VPORT_ZSCALE);
+ *cmds++ = 0xbf800000;
+ *cmds++ = 0x0;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_RB_COLOR_MASK);
+ *cmds++ = 0x0000000f;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_RB_COLOR_DEST_MASK);
+ *cmds++ = 0xffffffff;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(REG_SQ_WRAPPING_0);
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_RB_MODECONTROL);
+
+ *cmds++ = 0x4;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_PA_CL_CLIP_CNTL);
+ *cmds++ = 0x00010000;
+
+ if (adreno_is_a22x(adreno_dev)) {
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(REG_A220_RB_LRZ_VSC_CONTROL);
+ *cmds++ = 0x0000000;
+
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX, 3);
+ *cmds++ = 0;
+
+ *cmds++ = 0x00004088;
+ *cmds++ = 3;
+ } else {
+
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX, 2);
+ *cmds++ = 0;
+
+ *cmds++ = 0x00030088;
+ }
+
+
+ create_ib1(drawctxt, shadow->gmem_restore, start, cmds);
+
+ return cmds;
+}
+
+static void build_regrestore_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ unsigned int *start = tmp_ctx.cmd;
+ unsigned int *cmd = start;
+
+ unsigned int i = 0;
+ unsigned int reg_array_size = 0;
+ const unsigned int *ptr_register_ranges;
+
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmd++ = 0;
+
+
+
+ cmd++;
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+
+ *cmd++ = ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) | 1;
+#else
+ *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
+#endif
+
+
+ if (adreno_is_a220(adreno_dev)) {
+ ptr_register_ranges = register_ranges_a220;
+ reg_array_size = ARRAY_SIZE(register_ranges_a220);
+ } else if (adreno_is_a225(adreno_dev)) {
+ ptr_register_ranges = register_ranges_a225;
+ reg_array_size = ARRAY_SIZE(register_ranges_a225);
+ } else {
+ ptr_register_ranges = register_ranges_a20x;
+ reg_array_size = ARRAY_SIZE(register_ranges_a20x);
+ }
+
+
+ for (i = 0; i < (reg_array_size/2); i++) {
+ cmd = reg_range(cmd, ptr_register_ranges[i*2],
+ ptr_register_ranges[i*2+1]);
+ }
+
+ start[2] =
+ cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, (cmd - start) - 3);
+
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+ start[4] |= (0 << 24) | (4 << 16);
+#else
+ start[4] |= (1 << 24) | (4 << 16);
+#endif
+
+
+ *cmd++ = cp_type0_packet(REG_SQ_GPR_MANAGEMENT, 1);
+ tmp_ctx.reg_values[0] = virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0x00040400;
+
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmd++ = 0;
+ *cmd++ = cp_type0_packet(REG_TP0_CHICKEN, 1);
+ tmp_ctx.reg_values[1] = virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0x00000000;
+
+ if (adreno_is_a22x(adreno_dev)) {
+ unsigned int i;
+ unsigned int j = 2;
+ for (i = REG_A220_VSC_BIN_SIZE; i <=
+ REG_A220_VSC_PIPE_DATA_LENGTH_7; i++) {
+ *cmd++ = cp_type0_packet(i, 1);
+ tmp_ctx.reg_values[j] = virt2gpu(cmd,
+ &drawctxt->gpustate);
+ *cmd++ = 0x00000000;
+ j++;
+ }
+ }
+
+
+ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000;
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+ *cmd++ = (0 << 24) | (0 << 16) | 0;
+#else
+ *cmd++ = (1 << 24) | (0 << 16) | 0;
+#endif
+ *cmd++ = ALU_CONSTANTS;
+
+
+ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000;
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+
+ *cmd++ = (0 << 24) | (1 << 16) | 0;
+#else
+ *cmd++ = (1 << 24) | (1 << 16) | 0;
+#endif
+ *cmd++ = TEX_CONSTANTS;
+
+
+ *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 1 + BOOL_CONSTANTS);
+ *cmd++ = (2 << 16) | 0;
+
+ tmp_ctx.bool_shadow = virt2gpu(cmd, &drawctxt->gpustate);
+ cmd += BOOL_CONSTANTS;
+
+
+ *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 1 + LOOP_CONSTANTS);
+ *cmd++ = (3 << 16) | 0;
+
+ tmp_ctx.loop_shadow = virt2gpu(cmd, &drawctxt->gpustate);
+ cmd += LOOP_CONSTANTS;
+
+
+ create_ib1(drawctxt, drawctxt->reg_restore, start, cmd);
+
+ tmp_ctx.cmd = cmd;
+}
+
+static void
+build_shader_save_restore_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ unsigned int *cmd = tmp_ctx.cmd;
+ unsigned int *save, *restore, *fixup;
+ unsigned int *startSizeVtx, *startSizePix, *startSizeShared;
+ unsigned int *partition1;
+ unsigned int *shaderBases, *partition2;
+
+
+ tmp_ctx.shader_vertex = drawctxt->gpustate.gpuaddr + SHADER_OFFSET;
+ tmp_ctx.shader_pixel = tmp_ctx.shader_vertex
+ + _shader_shadow_size(adreno_dev);
+ tmp_ctx.shader_shared = tmp_ctx.shader_pixel
+ + _shader_shadow_size(adreno_dev);
+
+
+
+ restore = cmd;
+
+
+ *cmd++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
+ *cmd++ = 0x00000300;
+
+
+ *cmd++ = cp_type3_packet(CP_SET_SHADER_BASES, 1);
+ shaderBases = cmd++;
+
+
+ *cmd++ = cp_type0_packet(REG_SQ_INST_STORE_MANAGMENT, 1);
+ partition1 = cmd++;
+
+
+ *cmd++ = cp_type3_packet(CP_IM_LOAD, 2);
+ *cmd++ = tmp_ctx.shader_vertex + 0x0;
+ startSizeVtx = cmd++;
+
+
+ *cmd++ = cp_type3_packet(CP_IM_LOAD, 2);
+ *cmd++ = tmp_ctx.shader_pixel + 0x1;
+ startSizePix = cmd++;
+
+
+ *cmd++ = cp_type3_packet(CP_IM_LOAD, 2);
+ *cmd++ = tmp_ctx.shader_shared + 0x2;
+ startSizeShared = cmd++;
+
+
+ create_ib1(drawctxt, drawctxt->shader_restore, restore, cmd);
+
+ /*
+ * fixup SET_SHADER_BASES data
+ *
+ * since self-modifying PM4 code is being used here, a seperate
+ * command buffer is used for this fixup operation, to ensure the
+ * commands are not read by the PM4 engine before the data fields
+ * have been written.
+ */
+
+ fixup = cmd;
+
+
+ *cmd++ = cp_type0_packet(REG_SCRATCH_REG2, 1);
+ partition2 = cmd++;
+
+
+ *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+ *cmd++ = REG_SCRATCH_REG2;
+
+ *cmd++ = 0x0FFF0FFF;
+
+ *cmd++ = adreno_encode_istore_size(adreno_dev);
+
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = REG_SCRATCH_REG2;
+
+ *cmd++ = virt2gpu(shaderBases, &drawctxt->gpustate);
+
+
+ create_ib1(drawctxt, drawctxt->shader_fixup, fixup, cmd);
+
+
+
+ save = cmd;
+
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmd++ = 0;
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = REG_SQ_INST_STORE_MANAGMENT;
+
+ *cmd++ = virt2gpu(partition1, &drawctxt->gpustate);
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = REG_SQ_INST_STORE_MANAGMENT;
+
+ *cmd++ = virt2gpu(partition2, &drawctxt->gpustate);
+
+
+
+ *cmd++ = cp_type3_packet(CP_IM_STORE, 2);
+ *cmd++ = tmp_ctx.shader_vertex + 0x0;
+
+ *cmd++ = virt2gpu(startSizeVtx, &drawctxt->gpustate);
+
+
+ *cmd++ = cp_type3_packet(CP_IM_STORE, 2);
+ *cmd++ = tmp_ctx.shader_pixel + 0x1;
+
+ *cmd++ = virt2gpu(startSizePix, &drawctxt->gpustate);
+
+
+
+ *cmd++ = cp_type3_packet(CP_IM_STORE, 2);
+ *cmd++ = tmp_ctx.shader_shared + 0x2;
+
+ *cmd++ = virt2gpu(startSizeShared, &drawctxt->gpustate);
+
+
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmd++ = 0;
+
+
+ create_ib1(drawctxt, drawctxt->shader_save, save, cmd);
+
+ tmp_ctx.cmd = cmd;
+}
+
+static int a2xx_create_gpustate_shadow(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ drawctxt->flags |= CTXT_FLAGS_STATE_SHADOW;
+
+
+ build_regrestore_cmds(adreno_dev, drawctxt);
+ build_regsave_cmds(adreno_dev, drawctxt);
+
+ build_shader_save_restore_cmds(adreno_dev, drawctxt);
+
+ return 0;
+}
+
+static int a2xx_create_gmem_shadow(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ int result;
+
+ calc_gmemsize(&drawctxt->context_gmem_shadow, adreno_dev->gmem_size);
+ tmp_ctx.gmem_base = adreno_dev->gmem_base;
+
+ result = kgsl_allocate(&drawctxt->context_gmem_shadow.gmemshadow,
+ drawctxt->pagetable, drawctxt->context_gmem_shadow.size);
+
+ if (result)
+ return result;
+
+
+ drawctxt->flags |= CTXT_FLAGS_GMEM_SHADOW;
+
+
+ kgsl_sharedmem_set(&drawctxt->context_gmem_shadow.gmemshadow, 0, 0,
+ drawctxt->context_gmem_shadow.size);
+
+
+ build_quad_vtxbuff(drawctxt, &drawctxt->context_gmem_shadow,
+ &tmp_ctx.cmd);
+
+
+ if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE))
+ tmp_ctx.cmd = build_chicken_restore_cmds(drawctxt);
+
+
+ drawctxt->context_gmem_shadow.gmem_save_commands = tmp_ctx.cmd;
+ tmp_ctx.cmd =
+ build_gmem2sys_cmds(adreno_dev, drawctxt,
+ &drawctxt->context_gmem_shadow);
+ drawctxt->context_gmem_shadow.gmem_restore_commands = tmp_ctx.cmd;
+ tmp_ctx.cmd =
+ build_sys2gmem_cmds(adreno_dev, drawctxt,
+ &drawctxt->context_gmem_shadow);
+
+ kgsl_cache_range_op(&drawctxt->context_gmem_shadow.gmemshadow,
+ KGSL_CACHE_OP_FLUSH);
+
+ kgsl_cffdump_syncmem(NULL,
+ &drawctxt->context_gmem_shadow.gmemshadow,
+ drawctxt->context_gmem_shadow.gmemshadow.gpuaddr,
+ drawctxt->context_gmem_shadow.gmemshadow.size, false);
+
+ return 0;
+}
+
+static int a2xx_drawctxt_create(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ int ret;
+
+
+ ret = kgsl_allocate(&drawctxt->gpustate,
+ drawctxt->pagetable, _context_size(adreno_dev));
+
+ if (ret)
+ return ret;
+
+ kgsl_sharedmem_set(&drawctxt->gpustate, 0, 0,
+ _context_size(adreno_dev));
+
+ tmp_ctx.cmd = tmp_ctx.start
+ = (unsigned int *)((char *)drawctxt->gpustate.hostptr + CMD_OFFSET);
+
+ if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) {
+ ret = a2xx_create_gpustate_shadow(adreno_dev, drawctxt);
+ if (ret)
+ goto done;
+
+ drawctxt->flags |= CTXT_FLAGS_SHADER_SAVE;
+ }
+
+ if (!(drawctxt->flags & CTXT_FLAGS_NOGMEMALLOC)) {
+ ret = a2xx_create_gmem_shadow(adreno_dev, drawctxt);
+ if (ret)
+ goto done;
+ }
+
+
+
+ kgsl_cache_range_op(&drawctxt->gpustate,
+ KGSL_CACHE_OP_FLUSH);
+
+ kgsl_cffdump_syncmem(NULL, &drawctxt->gpustate,
+ drawctxt->gpustate.gpuaddr,
+ drawctxt->gpustate.size, false);
+
+done:
+ if (ret)
+ kgsl_sharedmem_free(&drawctxt->gpustate);
+
+ return ret;
+}
+
+static void a2xx_drawctxt_draw_workaround(struct adreno_device *adreno_dev,
+ struct adreno_context *context)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ unsigned int cmd[11];
+ unsigned int *cmds = &cmd[0];
+
+ if (adreno_is_a225(adreno_dev)) {
+ adreno_dev->gpudev->ctx_switches_since_last_draw++;
+ if (adreno_dev->gpudev->ctx_switches_since_last_draw >
+ ADRENO_NUM_CTX_SWITCH_ALLOWED_BEFORE_DRAW)
+ adreno_dev->gpudev->ctx_switches_since_last_draw = 0;
+ else
+ return;
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = (0x4 << 16) | (REG_PA_SU_SC_MODE_CNTL - 0x2000);
+ *cmds++ = 0;
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX, 5);
+ *cmds++ = 0;
+ *cmds++ = 1<<14;
+ *cmds++ = 0;
+ *cmds++ = device->mmu.setstate_memory.gpuaddr;
+ *cmds++ = 0;
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+ } else {
+ *cmds++ = cp_type3_packet(CP_SET_SHADER_BASES, 1);
+ *cmds++ = adreno_encode_istore_size(adreno_dev)
+ | adreno_dev->pix_shader_start;
+ }
+
+ adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_PMODE,
+ &cmd[0], cmds - cmd);
+}
+
+static void a2xx_drawctxt_save(struct adreno_device *adreno_dev,
+ struct adreno_context *context)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ if (context == NULL)
+ return;
+
+ if (context->flags & CTXT_FLAGS_GPU_HANG)
+ KGSL_CTXT_WARN(device,
+ "Current active context has caused gpu hang\n");
+
+ if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
+
+
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
+ context->reg_save, 3);
+
+ if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
+
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_PMODE,
+ context->shader_save, 3);
+
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
+ context->shader_fixup, 3);
+
+ context->flags |= CTXT_FLAGS_SHADER_RESTORE;
+ }
+ }
+
+ if ((context->flags & CTXT_FLAGS_GMEM_SAVE) &&
+ (context->flags & CTXT_FLAGS_GMEM_SHADOW)) {
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_PMODE,
+ context->context_gmem_shadow.gmem_save, 3);
+
+
+ if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
+ context->chicken_restore, 3);
+ }
+ adreno_dev->gpudev->ctx_switches_since_last_draw = 0;
+
+ context->flags |= CTXT_FLAGS_GMEM_RESTORE;
+ } else if (adreno_is_a2xx(adreno_dev))
+ a2xx_drawctxt_draw_workaround(adreno_dev, context);
+}
+
+static void a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
+ struct adreno_context *context)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ unsigned int cmds[5];
+
+ if (context == NULL) {
+
+ kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
+ adreno_dev->drawctxt_active->id);
+ return;
+ }
+
+ KGSL_CTXT_INFO(device, "context flags %08x\n", context->flags);
+
+ cmds[0] = cp_nop_packet(1);
+ cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
+ cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2);
+ cmds[3] = device->memstore.gpuaddr +
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
+ cmds[4] = context->id;
+ adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
+ cmds, 5);
+ kgsl_mmu_setstate(&device->mmu, context->pagetable, context->id);
+
+#ifndef CONFIG_MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP
+ kgsl_cffdump_syncmem(NULL, &context->gpustate,
+ context->gpustate.gpuaddr, LCC_SHADOW_SIZE +
+ REG_SHADOW_SIZE + CMD_BUFFER_SIZE + TEX_SHADOW_SIZE, false);
+#endif
+
+ if (context->flags & CTXT_FLAGS_GMEM_RESTORE) {
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_PMODE,
+ context->context_gmem_shadow.gmem_restore, 3);
+
+ if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
+
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
+ context->chicken_restore, 3);
+ }
+
+ context->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
+ }
+
+ if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
+
+
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);
+
+
+ if (context->flags & CTXT_FLAGS_SHADER_RESTORE) {
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
+ context->shader_restore, 3);
+ }
+ }
+
+ if (adreno_is_a20x(adreno_dev)) {
+ cmds[0] = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1);
+ cmds[1] = context->bin_base_offset;
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE, cmds, 2);
+ }
+}
+
+
+#define RBBM_INT_MASK RBBM_INT_CNTL__RDERR_INT_MASK
+
+#define CP_INT_MASK \
+ (CP_INT_CNTL__T0_PACKET_IN_IB_MASK | \
+ CP_INT_CNTL__OPCODE_ERROR_MASK | \
+ CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK | \
+ CP_INT_CNTL__RESERVED_BIT_ERROR_MASK | \
+ CP_INT_CNTL__IB_ERROR_MASK | \
+ CP_INT_CNTL__IB1_INT_MASK | \
+ CP_INT_CNTL__RB_INT_MASK)
+
+#define VALID_STATUS_COUNT_MAX 10
+
+static struct {
+ unsigned int mask;
+ const char *message;
+} kgsl_cp_error_irqs[] = {
+ { CP_INT_CNTL__T0_PACKET_IN_IB_MASK,
+ "ringbuffer TO packet in IB interrupt" },
+ { CP_INT_CNTL__OPCODE_ERROR_MASK,
+ "ringbuffer opcode error interrupt" },
+ { CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK,
+ "ringbuffer protected mode error interrupt" },
+ { CP_INT_CNTL__RESERVED_BIT_ERROR_MASK,
+ "ringbuffer reserved bit error interrupt" },
+ { CP_INT_CNTL__IB_ERROR_MASK,
+ "ringbuffer IB error interrupt" },
+};
+
+static void a2xx_cp_intrcallback(struct kgsl_device *device)
+{
+ unsigned int status = 0, num_reads = 0, master_status = 0;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ int i;
+
+ adreno_regread(device, REG_MASTER_INT_SIGNAL, &master_status);
+ while (!status && (num_reads < VALID_STATUS_COUNT_MAX) &&
+ (master_status & MASTER_INT_SIGNAL__CP_INT_STAT)) {
+ adreno_regread(device, REG_CP_INT_STATUS, &status);
+ adreno_regread(device, REG_MASTER_INT_SIGNAL,
+ &master_status);
+ num_reads++;
+ }
+ if (num_reads > 1)
+ KGSL_DRV_WARN(device,
+ "Looped %d times to read REG_CP_INT_STATUS\n",
+ num_reads);
+
+ trace_kgsl_a2xx_irq_status(device, master_status, status);
+
+ if (!status) {
+ if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) {
+ KGSL_DRV_WARN(device, "Unable to read CP_INT_STATUS\n");
+ wake_up_interruptible_all(&device->wait_queue);
+ } else
+ KGSL_DRV_WARN(device, "Spurious interrput detected\n");
+ return;
+ }
+
+ if (status & CP_INT_CNTL__RB_INT_MASK) {
+
+ unsigned int context_id;
+ kgsl_sharedmem_readl(&device->memstore,
+ &context_id,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ current_context));
+ if (context_id < KGSL_MEMSTORE_MAX) {
+ kgsl_sharedmem_writel(&rb->device->memstore,
+ KGSL_MEMSTORE_OFFSET(context_id,
+ ts_cmp_enable), 0);
+ wmb();
+ }
+ KGSL_CMD_WARN(rb->device, "ringbuffer rb interrupt\n");
+ }
+
+ for (i = 0; i < ARRAY_SIZE(kgsl_cp_error_irqs); i++) {
+ if (status & kgsl_cp_error_irqs[i].mask) {
+ KGSL_CMD_CRIT(rb->device, "%s\n",
+ kgsl_cp_error_irqs[i].message);
+
+ kgsl_pwrctrl_irq(rb->device, KGSL_PWRFLAGS_OFF);
+ }
+ }
+
+
+ status &= CP_INT_MASK;
+ adreno_regwrite(device, REG_CP_INT_ACK, status);
+
+ if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) {
+ KGSL_CMD_WARN(rb->device, "ringbuffer ib1/rb interrupt\n");
+ queue_work(device->work_queue, &device->ts_expired_ws);
+ wake_up_interruptible_all(&device->wait_queue);
+ atomic_notifier_call_chain(&(device->ts_notifier_list),
+ device->id,
+ NULL);
+ }
+}
+
+static void a2xx_rbbm_intrcallback(struct kgsl_device *device)
+{
+ unsigned int status = 0;
+ unsigned int rderr = 0;
+ unsigned int addr = 0;
+ const char *source;
+
+ adreno_regread(device, REG_RBBM_INT_STATUS, &status);
+
+ if (status & RBBM_INT_CNTL__RDERR_INT_MASK) {
+ adreno_regread(device, REG_RBBM_READ_ERROR, &rderr);
+ source = (rderr & RBBM_READ_ERROR_REQUESTER)
+ ? "host" : "cp";
+
+ addr = (rderr & RBBM_READ_ERROR_ADDRESS_MASK) >> 2;
+
+ if (addr == REG_CP_INT_STATUS &&
+ rderr & RBBM_READ_ERROR_ERROR &&
+ rderr & RBBM_READ_ERROR_REQUESTER)
+ KGSL_DRV_WARN(device,
+ "rbbm read error interrupt: %s reg: %04X\n",
+ source, addr);
+ else
+ KGSL_DRV_CRIT(device,
+ "rbbm read error interrupt: %s reg: %04X\n",
+ source, addr);
+ }
+
+ status &= RBBM_INT_MASK;
+ adreno_regwrite(device, REG_RBBM_INT_ACK, status);
+}
+
+irqreturn_t a2xx_irq_handler(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ irqreturn_t result = IRQ_NONE;
+ unsigned int status;
+
+ adreno_regread(device, REG_MASTER_INT_SIGNAL, &status);
+
+ if (status & MASTER_INT_SIGNAL__MH_INT_STAT) {
+ kgsl_mh_intrcallback(device);
+ result = IRQ_HANDLED;
+ }
+
+ if (status & MASTER_INT_SIGNAL__CP_INT_STAT) {
+ a2xx_cp_intrcallback(device);
+ result = IRQ_HANDLED;
+ }
+
+ if (status & MASTER_INT_SIGNAL__RBBM_INT_STAT) {
+ a2xx_rbbm_intrcallback(device);
+ result = IRQ_HANDLED;
+ }
+
+ return result;
+}
+
+static void a2xx_irq_control(struct adreno_device *adreno_dev, int state)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ if (state) {
+ adreno_regwrite(device, REG_RBBM_INT_CNTL, RBBM_INT_MASK);
+ adreno_regwrite(device, REG_CP_INT_CNTL, CP_INT_MASK);
+ adreno_regwrite(device, MH_INTERRUPT_MASK,
+ kgsl_mmu_get_int_mask());
+ } else {
+ adreno_regwrite(device, REG_RBBM_INT_CNTL, 0);
+ adreno_regwrite(device, REG_CP_INT_CNTL, 0);
+ adreno_regwrite(device, MH_INTERRUPT_MASK, 0);
+ }
+
+
+ wmb();
+}
+
+static void a2xx_rb_init(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb)
+{
+ unsigned int *cmds, cmds_gpu;
+
+
+ cmds = adreno_ringbuffer_allocspace(rb, 19);
+ cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19);
+
+ GSL_RB_WRITE(cmds, cmds_gpu, cp_type3_packet(CP_ME_INIT, 18));
+
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x000003ff);
+
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+
+ GSL_RB_WRITE(cmds, cmds_gpu,
+ SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO));
+ GSL_RB_WRITE(cmds, cmds_gpu,
+ SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET));
+ GSL_RB_WRITE(cmds, cmds_gpu,
+ SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX));
+ GSL_RB_WRITE(cmds, cmds_gpu,
+ SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL));
+ GSL_RB_WRITE(cmds, cmds_gpu,
+ SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL));
+ GSL_RB_WRITE(cmds, cmds_gpu,
+ SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE));
+ GSL_RB_WRITE(cmds, cmds_gpu,
+ SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL));
+ GSL_RB_WRITE(cmds, cmds_gpu,
+ SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE));
+
+
+ GSL_RB_WRITE(cmds, cmds_gpu,
+ (adreno_encode_istore_size(adreno_dev)
+ | adreno_dev->pix_shader_start));
+
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000001);
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+
+
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
+ GSL_RB_WRITE(cmds, cmds_gpu, 0);
+ else
+ GSL_RB_WRITE(cmds, cmds_gpu, GSL_RB_PROTECTED_MODE_CONTROL);
+
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+
+ adreno_ringbuffer_submit(rb);
+}
+
+static unsigned int a2xx_busy_cycles(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ unsigned int reg, val;
+
+
+ adreno_regwrite(device, REG_CP_PERFMON_CNTL,
+ REG_PERF_MODE_CNT | REG_PERF_STATE_FREEZE);
+
+
+ adreno_regread(device, REG_RBBM_PERFCOUNTER1_LO, &val);
+
+
+ adreno_regwrite(device, REG_CP_PERFMON_CNTL,
+ REG_PERF_MODE_CNT | REG_PERF_STATE_RESET);
+
+
+ adreno_regread(device, REG_RBBM_PM_OVERRIDE2, ®);
+ adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, (reg | 0x40));
+ adreno_regwrite(device, REG_RBBM_PERFCOUNTER1_SELECT, 0x1);
+ adreno_regwrite(device, REG_CP_PERFMON_CNTL,
+ REG_PERF_MODE_CNT | REG_PERF_STATE_ENABLE);
+
+ return val;
+}
+
+static void a2xx_gmeminit(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ union reg_rb_edram_info rb_edram_info;
+ unsigned int gmem_size;
+ unsigned int edram_value = 0;
+
+
+ gmem_size = (adreno_dev->gmem_size >> 14);
+ while (gmem_size >>= 1)
+ edram_value++;
+
+ rb_edram_info.val = 0;
+
+ rb_edram_info.f.edram_size = edram_value;
+ rb_edram_info.f.edram_mapping_mode = 0;
+
+
+ rb_edram_info.f.edram_range = (adreno_dev->gmem_base >> 14);
+
+ adreno_regwrite(device, REG_RB_EDRAM_INFO, rb_edram_info.val);
+}
+
+static void a2xx_start(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0xfffffffe);
+ adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0xffffffff);
+
+ if (!(device->flags & KGSL_FLAGS_SOFT_RESET) ||
+ !adreno_is_a22x(adreno_dev)) {
+ adreno_regwrite(device, REG_RBBM_SOFT_RESET,
+ 0xFFFFFFFF);
+ device->flags |= KGSL_FLAGS_SOFT_RESET;
+ } else {
+ adreno_regwrite(device, REG_RBBM_SOFT_RESET,
+ 0x00000001);
+ }
+ msleep(30);
+
+ adreno_regwrite(device, REG_RBBM_SOFT_RESET, 0x00000000);
+
+ if (adreno_is_a225(adreno_dev)) {
+
+ adreno_regwrite(device, REG_SQ_FLOW_CONTROL,
+ 0x18000000);
+ }
+
+ if (adreno_is_a203(adreno_dev))
+ adreno_regwrite(device, REG_RBBM_CNTL, 0x0000FFFF);
+ else
+ adreno_regwrite(device, REG_RBBM_CNTL, 0x00004442);
+
+ adreno_regwrite(device, REG_SQ_VS_PROGRAM, 0x00000000);
+ adreno_regwrite(device, REG_SQ_PS_PROGRAM, 0x00000000);
+
+ if (cpu_is_msm8960())
+ adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0x200);
+ else
+ adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0);
+
+ if (!adreno_is_a22x(adreno_dev))
+ adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0);
+ else
+ adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0x80);
+
+ adreno_regwrite(device, REG_RBBM_DEBUG, 0x00080000);
+
+
+ adreno_regwrite(device, REG_RBBM_INT_CNTL, 0);
+ adreno_regwrite(device, REG_CP_INT_CNTL, 0);
+ adreno_regwrite(device, REG_SQ_INT_CNTL, 0);
+
+ a2xx_gmeminit(adreno_dev);
+}
+
+void *a2xx_snapshot(struct adreno_device *adreno_dev, void *snapshot,
+ int *remain, int hang);
+
+struct adreno_gpudev adreno_a2xx_gpudev = {
+ .reg_rbbm_status = REG_RBBM_STATUS,
+ .reg_cp_pfp_ucode_addr = REG_CP_PFP_UCODE_ADDR,
+ .reg_cp_pfp_ucode_data = REG_CP_PFP_UCODE_DATA,
+
+ .ctxt_create = a2xx_drawctxt_create,
+ .ctxt_save = a2xx_drawctxt_save,
+ .ctxt_restore = a2xx_drawctxt_restore,
+ .ctxt_draw_workaround = a2xx_drawctxt_draw_workaround,
+ .irq_handler = a2xx_irq_handler,
+ .irq_control = a2xx_irq_control,
+ .snapshot = a2xx_snapshot,
+ .rb_init = a2xx_rb_init,
+ .busy_cycles = a2xx_busy_cycles,
+ .start = a2xx_start,
+};
diff --git a/drivers/gpu/msm/adreno_a2xx_snapshot.c b/drivers/gpu/msm/adreno_a2xx_snapshot.c
new file mode 100644
index 0000000..e1cf325
--- /dev/null
+++ b/drivers/gpu/msm/adreno_a2xx_snapshot.c
@@ -0,0 +1,324 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "kgsl.h"
+#include "adreno.h"
+#include "kgsl_snapshot.h"
+
+#define DEBUG_SECTION_SZ(_dwords) (((_dwords) * sizeof(unsigned int)) \
+ + sizeof(struct kgsl_snapshot_debug))
+
+
+#define SXDEBUG_COUNT 0x1B
+
+static int a2xx_snapshot_sxdebug(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int i;
+
+ if (remain < DEBUG_SECTION_SZ(SXDEBUG_COUNT)) {
+ SNAPSHOT_ERR_NOMEM(device, "SX DEBUG");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_SX;
+ header->size = SXDEBUG_COUNT;
+
+ for (i = 0; i < SXDEBUG_COUNT; i++) {
+ adreno_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1B00 | i);
+ adreno_regread(device, REG_RBBM_DEBUG_OUT, &data[i]);
+ }
+
+ adreno_regwrite(device, REG_RBBM_DEBUG_CNTL, 0);
+
+ return DEBUG_SECTION_SZ(SXDEBUG_COUNT);
+}
+
+#define CPDEBUG_COUNT 0x20
+
+static int a2xx_snapshot_cpdebug(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int i;
+
+ if (remain < DEBUG_SECTION_SZ(CPDEBUG_COUNT)) {
+ SNAPSHOT_ERR_NOMEM(device, "CP DEBUG");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_CP;
+ header->size = CPDEBUG_COUNT;
+
+ for (i = 0; i < CPDEBUG_COUNT; i++) {
+ adreno_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1628);
+ adreno_regread(device, REG_RBBM_DEBUG_OUT, &data[i]);
+ }
+
+ adreno_regwrite(device, REG_RBBM_DEBUG_CNTL, 0);
+
+ return DEBUG_SECTION_SZ(CPDEBUG_COUNT);
+}
+
+
+#define SQ_DEBUG_WRITE(_device, _reg, _data, _offset) \
+ do { _data[(_offset)++] = (_reg); \
+ adreno_regread(_device, (_reg), &_data[(_offset)++]); } while (0)
+
+#define SQ_DEBUG_BANK_SIZE 23
+
+static int a2xx_snapshot_sqdebug(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int i, offset = 0;
+ int size = SQ_DEBUG_BANK_SIZE * 2 * 2;
+
+ if (remain < DEBUG_SECTION_SZ(size)) {
+ SNAPSHOT_ERR_NOMEM(device, "SQ Debug");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_SQ;
+ header->size = size;
+
+ for (i = 0; i < 2; i++) {
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_CONST_MGR_FSM+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_EXP_ALLOC+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_FSM_ALU_0+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_FSM_ALU_1+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_GPR_PIX+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_GPR_VTX+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_INPUT_FSM+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_MISC+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_MISC_0+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_MISC_1+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_0+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATE_MEM+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device,
+ REG_SQ_DEBUG_PIX_TB_STATUS_REG_0+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device,
+ REG_SQ_DEBUG_PIX_TB_STATUS_REG_1+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device,
+ REG_SQ_DEBUG_PIX_TB_STATUS_REG_2+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device,
+ REG_SQ_DEBUG_PIX_TB_STATUS_REG_3+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PTR_BUFF+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_TB_STATUS_SEL+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_TP_FSM+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_VTX_TB_0+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_VTX_TB_1+i*0x1000,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_VTX_TB_STATE_MEM+i*0x1000,
+ data, offset);
+ }
+
+ return DEBUG_SECTION_SZ(size);
+}
+
+#define SQ_DEBUG_THREAD_SIZE 7
+
+static int a2xx_snapshot_sqthreaddebug(struct kgsl_device *device,
+ void *snapshot, int remain, void *priv)
+{
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int i, offset = 0;
+ int size = SQ_DEBUG_THREAD_SIZE * 2 * 16;
+
+ if (remain < DEBUG_SECTION_SZ(size)) {
+ SNAPSHOT_ERR_NOMEM(device, "SQ THREAD DEBUG");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_SQTHREAD;
+ header->size = size;
+
+ for (i = 0; i < 16; i++) {
+ adreno_regwrite(device, REG_SQ_DEBUG_TB_STATUS_SEL,
+ i | (6<<4) | (i<<7) | (1<<11) | (1<<12)
+ | (i<<16) | (6<<20) | (i<<23));
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_VTX_TB_STATE_MEM,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_VTX_TB_STATUS_REG,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATE_MEM,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATUS_REG_0,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATUS_REG_1,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATUS_REG_2,
+ data, offset);
+ SQ_DEBUG_WRITE(device, REG_SQ_DEBUG_PIX_TB_STATUS_REG_3,
+ data, offset);
+ }
+
+ return DEBUG_SECTION_SZ(size);
+}
+
+#define MIUDEBUG_COUNT 0x10
+
+static int a2xx_snapshot_miudebug(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int i;
+
+ if (remain < DEBUG_SECTION_SZ(MIUDEBUG_COUNT)) {
+ SNAPSHOT_ERR_NOMEM(device, "MIU DEBUG");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_MIU;
+ header->size = MIUDEBUG_COUNT;
+
+ for (i = 0; i < MIUDEBUG_COUNT; i++) {
+ adreno_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1600 | i);
+ adreno_regread(device, REG_RBBM_DEBUG_OUT, &data[i]);
+ }
+
+ adreno_regwrite(device, REG_RBBM_DEBUG_CNTL, 0);
+
+ return DEBUG_SECTION_SZ(MIUDEBUG_COUNT);
+}
+
+
+void *a2xx_snapshot(struct adreno_device *adreno_dev, void *snapshot,
+ int *remain, int hang)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct kgsl_snapshot_registers regs;
+ unsigned int pmoverride;
+
+
+
+ if (adreno_is_a20x(adreno_dev)) {
+ regs.regs = (unsigned int *) a200_registers;
+ regs.count = a200_registers_count;
+ } else if (adreno_is_a220(adreno_dev)) {
+ regs.regs = (unsigned int *) a220_registers;
+ regs.count = a220_registers_count;
+ } else if (adreno_is_a225(adreno_dev)) {
+ regs.regs = (unsigned int *) a225_registers;
+ regs.count = a225_registers_count;
+ }
+
+
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_REGS, snapshot, remain,
+ kgsl_snapshot_dump_regs, ®s);
+
+
+ snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
+ remain, REG_CP_STATE_DEBUG_INDEX,
+ REG_CP_STATE_DEBUG_DATA, 0x0, 0x14);
+
+
+ snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
+ remain, REG_CP_ME_CNTL, REG_CP_ME_STATUS,
+ 64, 44);
+
+
+ adreno_regread(device, REG_RBBM_PM_OVERRIDE2, &pmoverride);
+ adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0xFF);
+
+
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a2xx_snapshot_sxdebug, NULL);
+
+
+ if (!adreno_is_a22x(adreno_dev))
+ snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
+ remain, REG_PA_SU_DEBUG_CNTL,
+ REG_PA_SU_DEBUG_DATA,
+ 0, 0x1B);
+
+
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a2xx_snapshot_cpdebug, NULL);
+
+
+ snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
+ remain, MH_DEBUG_CTRL, MH_DEBUG_DATA, 0x0, 0x40);
+
+
+ if (adreno_is_a22x(adreno_dev)) {
+
+ snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
+ remain, REG_RB_DEBUG_CNTL, REG_RB_DEBUG_DATA, 0, 8);
+
+
+ snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
+ remain, REG_RB_DEBUG_CNTL, REG_RB_DEBUG_DATA + 0x1000,
+ 0, 8);
+
+
+ snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
+ remain, REG_PC_DEBUG_CNTL, REG_PC_DEBUG_DATA, 0, 8);
+
+
+ snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
+ remain, REG_GRAS_DEBUG_CNTL, REG_GRAS_DEBUG_DATA, 0, 4);
+
+
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a2xx_snapshot_miudebug, NULL);
+
+
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a2xx_snapshot_sqdebug, NULL);
+
+
+ if (hang) {
+
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a2xx_snapshot_sqthreaddebug, NULL);
+ }
+ }
+
+
+ adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, pmoverride);
+
+ return snapshot;
+}
diff --git a/drivers/gpu/msm/adreno_a2xx_trace.c b/drivers/gpu/msm/adreno_a2xx_trace.c
new file mode 100644
index 0000000..b398c74
--- /dev/null
+++ b/drivers/gpu/msm/adreno_a2xx_trace.c
@@ -0,0 +1,18 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "kgsl.h"
+#include "adreno.h"
+
+#define CREATE_TRACE_POINTS
+#include "adreno_a2xx_trace.h"
diff --git a/drivers/gpu/msm/adreno_a2xx_trace.h b/drivers/gpu/msm/adreno_a2xx_trace.h
new file mode 100644
index 0000000..b4fb47d
--- /dev/null
+++ b/drivers/gpu/msm/adreno_a2xx_trace.h
@@ -0,0 +1,74 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#if !defined(_ADRENO_A2XX_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _ADRENO_A2XX_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kgsl
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE adreno_a2xx_trace
+
+#include <linux/tracepoint.h>
+
+struct kgsl_device;
+
+TRACE_EVENT(kgsl_a2xx_irq_status,
+
+ TP_PROTO(struct kgsl_device *device, unsigned int master_status,
+ unsigned int status),
+
+ TP_ARGS(device, master_status, status),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, master_status)
+ __field(unsigned int, status)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->master_status = master_status;
+ __entry->status = status;
+ ),
+
+ TP_printk(
+ "d_name=%s master=%s status=%s",
+ __get_str(device_name),
+ __entry->master_status ? __print_flags(__entry->master_status,
+ "|",
+ { MASTER_INT_SIGNAL__MH_INT_STAT, "MH" },
+ { MASTER_INT_SIGNAL__SQ_INT_STAT, "SQ" },
+ { MASTER_INT_SIGNAL__CP_INT_STAT, "CP" },
+ { MASTER_INT_SIGNAL__RBBM_INT_STAT, "RBBM" }) : "None",
+ __entry->status ? __print_flags(__entry->status, "|",
+ { CP_INT_CNTL__SW_INT_MASK, "SW" },
+ { CP_INT_CNTL__T0_PACKET_IN_IB_MASK,
+ "T0_PACKET_IN_IB" },
+ { CP_INT_CNTL__OPCODE_ERROR_MASK, "OPCODE_ERROR" },
+ { CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK,
+ "PROTECTED_MODE_ERROR" },
+ { CP_INT_CNTL__RESERVED_BIT_ERROR_MASK,
+ "RESERVED_BIT_ERROR" },
+ { CP_INT_CNTL__IB_ERROR_MASK, "IB_ERROR" },
+ { CP_INT_CNTL__IB2_INT_MASK, "IB2" },
+ { CP_INT_CNTL__IB1_INT_MASK, "IB1" },
+ { CP_INT_CNTL__RB_INT_MASK, "RB" }) : "None"
+ )
+);
+
+#endif
+
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
new file mode 100644
index 0000000..d550c62
--- /dev/null
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -0,0 +1,2583 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <mach/socinfo.h>
+
+#include "kgsl.h"
+#include "adreno.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_cffdump.h"
+#include "a3xx_reg.h"
+#include "adreno_a3xx_trace.h"
+
+
+const unsigned int a3xx_registers[] = {
+ 0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027,
+ 0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c,
+ 0x0060, 0x006c, 0x0080, 0x0082, 0x0084, 0x0088, 0x0090, 0x00e5,
+ 0x00ea, 0x00ed, 0x0100, 0x0100, 0x0110, 0x0123, 0x01c0, 0x01c1,
+ 0x01c3, 0x01c5, 0x01c7, 0x01c7, 0x01d5, 0x01d9, 0x01dc, 0x01dd,
+ 0x01ea, 0x01ea, 0x01ee, 0x01f1, 0x01f5, 0x01f5, 0x01fc, 0x01ff,
+ 0x0440, 0x0440, 0x0443, 0x0443, 0x0445, 0x0445, 0x044d, 0x044f,
+ 0x0452, 0x0452, 0x0454, 0x046f, 0x047c, 0x047c, 0x047f, 0x047f,
+ 0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e,
+ 0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f,
+ 0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7,
+ 0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5,
+ 0x0e41, 0x0e45, 0x0e64, 0x0e65,
+ 0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7,
+ 0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09,
+ 0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069,
+ 0x206c, 0x206d, 0x2070, 0x2070, 0x2072, 0x2072, 0x2074, 0x2075,
+ 0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109,
+ 0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115,
+ 0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0,
+ 0x2240, 0x227e,
+ 0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
+ 0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
+ 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356,
+ 0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
+ 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472,
+ 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
+ 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
+ 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed,
+ 0x25f0, 0x25f0,
+ 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
+ 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
+ 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749,
+ 0x2750, 0x2756, 0x2760, 0x2760, 0x300C, 0x300E, 0x301C, 0x301D,
+ 0x302A, 0x302A, 0x302C, 0x302D, 0x3030, 0x3031, 0x3034, 0x3036,
+ 0x303C, 0x303C, 0x305E, 0x305F,
+};
+
+const unsigned int a3xx_registers_count = ARRAY_SIZE(a3xx_registers) / 2;
+
+const unsigned int a3xx_hlsq_registers[] = {
+ 0x0e00, 0x0e05, 0x0e0c, 0x0e0c, 0x0e22, 0x0e23,
+ 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a,
+ 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a,
+};
+
+const unsigned int a3xx_hlsq_registers_count =
+ ARRAY_SIZE(a3xx_hlsq_registers) / 2;
+
+
+#define _SET(_shift, _val) ((_val) << (_shift))
+
+
+#define ALU_SHADOW_SIZE (8*1024)
+#define REG_SHADOW_SIZE (4*1024)
+#define CMD_BUFFER_SIZE (5*1024)
+#define TEX_SIZE_MEM_OBJECTS 896
+#define TEX_SIZE_MIPMAP 1936
+#define TEX_SIZE_SAMPLER_OBJ 256
+#define TEX_SHADOW_SIZE \
+ ((TEX_SIZE_MEM_OBJECTS + TEX_SIZE_MIPMAP + \
+ TEX_SIZE_SAMPLER_OBJ)*2)
+#define SHADER_SHADOW_SIZE (8*1024)
+
+#define CONTEXT_SIZE \
+ (ALU_SHADOW_SIZE+REG_SHADOW_SIZE + \
+ CMD_BUFFER_SIZE+SHADER_SHADOW_SIZE + \
+ TEX_SHADOW_SIZE)
+
+#define REG_OFFSET ALU_SHADOW_SIZE
+#define CMD_OFFSET (REG_OFFSET+REG_SHADOW_SIZE)
+#define SHADER_OFFSET (CMD_OFFSET+CMD_BUFFER_SIZE)
+#define TEX_OFFSET (SHADER_OFFSET+SHADER_SHADOW_SIZE)
+#define VS_TEX_OFFSET_MEM_OBJECTS TEX_OFFSET
+#define VS_TEX_OFFSET_MIPMAP (VS_TEX_OFFSET_MEM_OBJECTS+TEX_SIZE_MEM_OBJECTS)
+#define VS_TEX_OFFSET_SAMPLER_OBJ (VS_TEX_OFFSET_MIPMAP+TEX_SIZE_MIPMAP)
+#define FS_TEX_OFFSET_MEM_OBJECTS \
+ (VS_TEX_OFFSET_SAMPLER_OBJ+TEX_SIZE_SAMPLER_OBJ)
+#define FS_TEX_OFFSET_MIPMAP (FS_TEX_OFFSET_MEM_OBJECTS+TEX_SIZE_MEM_OBJECTS)
+#define FS_TEX_OFFSET_SAMPLER_OBJ (FS_TEX_OFFSET_MIPMAP+TEX_SIZE_MIPMAP)
+
+#define SSIZE (16*1024)
+
+#define HLSQ_SAMPLER_OFFSET 0x000
+#define HLSQ_MEMOBJ_OFFSET 0x400
+#define HLSQ_MIPMAP_OFFSET 0x800
+
+#define HLSQ_SHADOW_BASE (0x10000+SSIZE*2)
+
+#define REG_TO_MEM_LOOP_COUNT_SHIFT 18
+
+#define BUILD_PC_DRAW_INITIATOR(prim_type, source_select, index_size, \
+ vis_cull_mode) \
+ (((prim_type) << PC_DRAW_INITIATOR_PRIM_TYPE) | \
+ ((source_select) << PC_DRAW_INITIATOR_SOURCE_SELECT) | \
+ ((index_size & 1) << PC_DRAW_INITIATOR_INDEX_SIZE) | \
+ ((index_size >> 1) << PC_DRAW_INITIATOR_SMALL_INDEX) | \
+ ((vis_cull_mode) << PC_DRAW_INITIATOR_VISIBILITY_CULLING_MODE) | \
+ (1 << PC_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE))
+
+static const unsigned int context_register_ranges[] = {
+ A3XX_GRAS_CL_CLIP_CNTL, A3XX_GRAS_CL_CLIP_CNTL,
+ A3XX_GRAS_CL_GB_CLIP_ADJ, A3XX_GRAS_CL_GB_CLIP_ADJ,
+ A3XX_GRAS_CL_VPORT_XOFFSET, A3XX_GRAS_CL_VPORT_ZSCALE,
+ A3XX_GRAS_SU_POINT_MINMAX, A3XX_GRAS_SU_POINT_SIZE,
+ A3XX_GRAS_SU_POLY_OFFSET_SCALE, A3XX_GRAS_SU_POLY_OFFSET_OFFSET,
+ A3XX_GRAS_SU_MODE_CONTROL, A3XX_GRAS_SU_MODE_CONTROL,
+ A3XX_GRAS_SC_CONTROL, A3XX_GRAS_SC_CONTROL,
+ A3XX_GRAS_SC_SCREEN_SCISSOR_TL, A3XX_GRAS_SC_SCREEN_SCISSOR_BR,
+ A3XX_GRAS_SC_WINDOW_SCISSOR_TL, A3XX_GRAS_SC_WINDOW_SCISSOR_BR,
+ A3XX_RB_MODE_CONTROL, A3XX_RB_MRT_BLEND_CONTROL3,
+ A3XX_RB_BLEND_RED, A3XX_RB_COPY_DEST_INFO,
+ A3XX_RB_DEPTH_CONTROL, A3XX_RB_DEPTH_CONTROL,
+ A3XX_PC_VSTREAM_CONTROL, A3XX_PC_VSTREAM_CONTROL,
+ A3XX_PC_VERTEX_REUSE_BLOCK_CNTL, A3XX_PC_VERTEX_REUSE_BLOCK_CNTL,
+ A3XX_PC_PRIM_VTX_CNTL, A3XX_PC_RESTART_INDEX,
+ A3XX_HLSQ_CONTROL_0_REG, A3XX_HLSQ_CONST_FSPRESV_RANGE_REG,
+ A3XX_HLSQ_CL_NDRANGE_0_REG, A3XX_HLSQ_CL_NDRANGE_0_REG,
+ A3XX_HLSQ_CL_NDRANGE_2_REG, A3XX_HLSQ_CL_CONTROL_1_REG,
+ A3XX_HLSQ_CL_KERNEL_CONST_REG, A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG,
+ A3XX_HLSQ_CL_WG_OFFSET_REG, A3XX_HLSQ_CL_WG_OFFSET_REG,
+ A3XX_VFD_CONTROL_0, A3XX_VFD_VS_THREADING_THRESHOLD,
+ A3XX_SP_SP_CTRL_REG, A3XX_SP_SP_CTRL_REG,
+ A3XX_SP_VS_CTRL_REG0, A3XX_SP_VS_OUT_REG_7,
+ A3XX_SP_VS_VPC_DST_REG_0, A3XX_SP_VS_PVT_MEM_SIZE_REG,
+ A3XX_SP_VS_LENGTH_REG, A3XX_SP_FS_PVT_MEM_SIZE_REG,
+ A3XX_SP_FS_FLAT_SHAD_MODE_REG_0, A3XX_SP_FS_FLAT_SHAD_MODE_REG_1,
+ A3XX_SP_FS_OUTPUT_REG, A3XX_SP_FS_OUTPUT_REG,
+ A3XX_SP_FS_MRT_REG_0, A3XX_SP_FS_IMAGE_OUTPUT_REG_3,
+ A3XX_SP_FS_LENGTH_REG, A3XX_SP_FS_LENGTH_REG,
+ A3XX_TPL1_TP_VS_TEX_OFFSET, A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR,
+ A3XX_VPC_ATTR, A3XX_VPC_VARY_CYLWRAP_ENABLE_1,
+};
+
+static const unsigned int global_registers[] = {
+ A3XX_GRAS_CL_USER_PLANE_X0, A3XX_GRAS_CL_USER_PLANE_Y0,
+ A3XX_GRAS_CL_USER_PLANE_Z0, A3XX_GRAS_CL_USER_PLANE_W0,
+ A3XX_GRAS_CL_USER_PLANE_X1, A3XX_GRAS_CL_USER_PLANE_Y1,
+ A3XX_GRAS_CL_USER_PLANE_Z1, A3XX_GRAS_CL_USER_PLANE_W1,
+ A3XX_GRAS_CL_USER_PLANE_X2, A3XX_GRAS_CL_USER_PLANE_Y2,
+ A3XX_GRAS_CL_USER_PLANE_Z2, A3XX_GRAS_CL_USER_PLANE_W2,
+ A3XX_GRAS_CL_USER_PLANE_X3, A3XX_GRAS_CL_USER_PLANE_Y3,
+ A3XX_GRAS_CL_USER_PLANE_Z3, A3XX_GRAS_CL_USER_PLANE_W3,
+ A3XX_GRAS_CL_USER_PLANE_X4, A3XX_GRAS_CL_USER_PLANE_Y4,
+ A3XX_GRAS_CL_USER_PLANE_Z4, A3XX_GRAS_CL_USER_PLANE_W4,
+ A3XX_GRAS_CL_USER_PLANE_X5, A3XX_GRAS_CL_USER_PLANE_Y5,
+ A3XX_GRAS_CL_USER_PLANE_Z5, A3XX_GRAS_CL_USER_PLANE_W5,
+ A3XX_VSC_BIN_SIZE,
+ A3XX_VSC_PIPE_CONFIG_0, A3XX_VSC_PIPE_CONFIG_1,
+ A3XX_VSC_PIPE_CONFIG_2, A3XX_VSC_PIPE_CONFIG_3,
+ A3XX_VSC_PIPE_CONFIG_4, A3XX_VSC_PIPE_CONFIG_5,
+ A3XX_VSC_PIPE_CONFIG_6, A3XX_VSC_PIPE_CONFIG_7,
+ A3XX_VSC_PIPE_DATA_ADDRESS_0, A3XX_VSC_PIPE_DATA_ADDRESS_1,
+ A3XX_VSC_PIPE_DATA_ADDRESS_2, A3XX_VSC_PIPE_DATA_ADDRESS_3,
+ A3XX_VSC_PIPE_DATA_ADDRESS_4, A3XX_VSC_PIPE_DATA_ADDRESS_5,
+ A3XX_VSC_PIPE_DATA_ADDRESS_6, A3XX_VSC_PIPE_DATA_ADDRESS_7,
+ A3XX_VSC_PIPE_DATA_LENGTH_0, A3XX_VSC_PIPE_DATA_LENGTH_1,
+ A3XX_VSC_PIPE_DATA_LENGTH_2, A3XX_VSC_PIPE_DATA_LENGTH_3,
+ A3XX_VSC_PIPE_DATA_LENGTH_4, A3XX_VSC_PIPE_DATA_LENGTH_5,
+ A3XX_VSC_PIPE_DATA_LENGTH_6, A3XX_VSC_PIPE_DATA_LENGTH_7,
+ A3XX_VSC_SIZE_ADDRESS
+};
+
+#define GLOBAL_REGISTER_COUNT ARRAY_SIZE(global_registers)
+
+static struct tmp_ctx {
+ unsigned int *cmd;
+
+
+ uint32_t reg_values[GLOBAL_REGISTER_COUNT];
+ uint32_t gmem_base;
+} tmp_ctx;
+
+#ifndef GSL_CONTEXT_SWITCH_CPU_SYNC
+static unsigned int *rmw_regtomem(unsigned int *cmd,
+ unsigned int reg, unsigned int and,
+ unsigned int rol, unsigned int or,
+ unsigned int dest)
+{
+
+ *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+ *cmd++ = (1 << 30) | A3XX_CP_SCRATCH_REG2;
+ *cmd++ = 0x00000000;
+ *cmd++ = reg;
+
+
+ *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+ *cmd++ = (rol << 24) | A3XX_CP_SCRATCH_REG2;
+ *cmd++ = and;
+ *cmd++ = or;
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_CP_SCRATCH_REG2;
+ *cmd++ = dest;
+
+ return cmd;
+}
+#endif
+
+static void build_regconstantsave_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ unsigned int *cmd = tmp_ctx.cmd;
+ unsigned int *start;
+ unsigned int i;
+
+ drawctxt->constant_save_commands[0].hostptr = cmd;
+ drawctxt->constant_save_commands[0].gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+ cmd++;
+
+ start = cmd;
+
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmd++ = 0;
+
+#ifndef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+
+ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
+ *cmd++ = 4 << 16;
+ *cmd++ = 0x0;
+
+#else
+
+
+ for (i = 0; i < ARRAY_SIZE(context_register_ranges) / 2; i++) {
+ unsigned int start = context_register_ranges[i * 2];
+ unsigned int end = context_register_ranges[i * 2 + 1];
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = ((end - start + 1) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
+ start;
+ *cmd++ = ((drawctxt->gpustate.gpuaddr + REG_OFFSET)
+ & 0xFFFFE000) + (start - 0x2000) * 4;
+ }
+#endif
+
+
+ for (i = 0; i < ARRAY_SIZE(global_registers); i++) {
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = global_registers[i];
+ *cmd++ = tmp_ctx.reg_values[i];
+ }
+
+
+ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+ *cmd++ = drawctxt->cond_execs[2].gpuaddr >> 2;
+ *cmd++ = drawctxt->cond_execs[2].gpuaddr >> 2;
+ *cmd++ = 0x0000FFFF;
+ *cmd++ = 3;
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ drawctxt->constant_save_commands[1].hostptr = cmd;
+ drawctxt->constant_save_commands[1].gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0;
+
+ *cmd++ = drawctxt->gpustate.gpuaddr & 0xfffffffc;
+
+
+ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+ *cmd++ = drawctxt->cond_execs[3].gpuaddr >> 2;
+ *cmd++ = drawctxt->cond_execs[3].gpuaddr >> 2;
+ *cmd++ = 0x0000FFFF;
+ *cmd++ = 3;
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ drawctxt->constant_save_commands[2].hostptr = cmd;
+ drawctxt->constant_save_commands[2].gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0;
+
+ *cmd++ = 0;
+
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ =
+ ((TEX_SIZE_MEM_OBJECTS / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
+ ((HLSQ_SHADOW_BASE + HLSQ_MEMOBJ_OFFSET) / 4);
+ *cmd++ =
+ (drawctxt->gpustate.gpuaddr +
+ VS_TEX_OFFSET_MEM_OBJECTS) & 0xfffffffc;
+
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ =
+ ((TEX_SIZE_MIPMAP / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
+ ((HLSQ_SHADOW_BASE + HLSQ_MIPMAP_OFFSET) / 4);
+ *cmd++ =
+ (drawctxt->gpustate.gpuaddr + VS_TEX_OFFSET_MIPMAP) & 0xfffffffc;
+
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = ((TEX_SIZE_SAMPLER_OBJ / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
+ ((HLSQ_SHADOW_BASE + HLSQ_SAMPLER_OFFSET) / 4);
+ *cmd++ =
+ (drawctxt->gpustate.gpuaddr +
+ VS_TEX_OFFSET_SAMPLER_OBJ) & 0xfffffffc;
+
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ =
+ ((TEX_SIZE_MEM_OBJECTS / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
+ ((HLSQ_SHADOW_BASE + HLSQ_MEMOBJ_OFFSET + SSIZE) / 4);
+ *cmd++ =
+ (drawctxt->gpustate.gpuaddr +
+ FS_TEX_OFFSET_MEM_OBJECTS) & 0xfffffffc;
+
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ =
+ ((TEX_SIZE_MIPMAP / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
+ ((HLSQ_SHADOW_BASE + HLSQ_MIPMAP_OFFSET + SSIZE) / 4);
+ *cmd++ =
+ (drawctxt->gpustate.gpuaddr + FS_TEX_OFFSET_MIPMAP) & 0xfffffffc;
+
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ =
+ ((TEX_SIZE_SAMPLER_OBJ / 4) << REG_TO_MEM_LOOP_COUNT_SHIFT) |
+ ((HLSQ_SHADOW_BASE + HLSQ_SAMPLER_OFFSET + SSIZE) / 4);
+ *cmd++ =
+ (drawctxt->gpustate.gpuaddr +
+ FS_TEX_OFFSET_SAMPLER_OBJ) & 0xfffffffc;
+
+
+ create_ib1(drawctxt, drawctxt->regconstant_save, start, cmd);
+
+ tmp_ctx.cmd = cmd;
+}
+
+static unsigned int *build_gmem2sys_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt,
+ struct gmem_shadow_t *shadow)
+{
+ unsigned int *cmds = tmp_ctx.cmd;
+ unsigned int *start = cmds;
+
+ *cmds++ = cp_type0_packet(A3XX_RBBM_CLOCK_CTL, 1);
+ *cmds++ = A3XX_RBBM_CLOCK_CTL_DEFAULT;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_RB_MODE_CONTROL);
+
+
+ *cmds++ = _SET(RB_MODECONTROL_RENDER_MODE, RB_RESOLVE_PASS) |
+ _SET(RB_MODECONTROL_MARB_CACHE_SPLIT_MODE, 1) |
+ _SET(RB_MODECONTROL_PACKER_TIMER_ENABLE, 1);
+
+ *cmds++ = _SET(RB_RENDERCONTROL_BIN_WIDTH, shadow->width >> 5) |
+ _SET(RB_RENDERCONTROL_DISABLE_COLOR_PIPE, 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_RB_COPY_CONTROL);
+
+ *cmds++ = _SET(RB_COPYCONTROL_RESOLVE_CLEAR_MODE,
+ RB_CLEAR_MODE_RESOLVE) |
+ _SET(RB_COPYCONTROL_COPY_GMEM_BASE,
+ tmp_ctx.gmem_base >> 14);
+
+ *cmds++ = _SET(RB_COPYDESTBASE_COPY_DEST_BASE,
+ shadow->gmemshadow.gpuaddr >> 5);
+
+ *cmds++ = _SET(RB_COPYDESTPITCH_COPY_DEST_PITCH,
+ (shadow->pitch * 4) / 32);
+
+ *cmds++ = _SET(RB_COPYDESTINFO_COPY_DEST_TILE,
+ RB_TILINGMODE_LINEAR) |
+ _SET(RB_COPYDESTINFO_COPY_DEST_FORMAT, RB_R8G8B8A8_UNORM) |
+ _SET(RB_COPYDESTINFO_COPY_COMPONENT_ENABLE, 0X0F) |
+ _SET(RB_COPYDESTINFO_COPY_DEST_ENDIAN, RB_ENDIAN_NONE);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_GRAS_SC_CONTROL);
+
+ *cmds++ = _SET(GRAS_SC_CONTROL_RENDER_MODE, 2);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_VFD_CONTROL_0);
+
+ *cmds++ = _SET(VFD_CTRLREG0_TOTALATTRTOVS, 4) |
+ _SET(VFD_CTRLREG0_PACKETSIZE, 2) |
+ _SET(VFD_CTRLREG0_STRMDECINSTRCNT, 1) |
+ _SET(VFD_CTRLREG0_STRMFETCHINSTRCNT, 1);
+
+ *cmds++ = _SET(VFD_CTRLREG1_MAXSTORAGE, 1) |
+ _SET(VFD_CTRLREG1_REGID4VTX, 252) |
+ _SET(VFD_CTRLREG1_REGID4INST, 252);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_VFD_FETCH_INSTR_0_0);
+
+ *cmds++ = _SET(VFD_FETCHINSTRUCTIONS_FETCHSIZE, 11) |
+ _SET(VFD_FETCHINSTRUCTIONS_BUFSTRIDE, 12) |
+ _SET(VFD_FETCHINSTRUCTIONS_STEPRATE, 1);
+
+ *cmds++ = _SET(VFD_BASEADDR_BASEADDR,
+ shadow->quad_vertices.gpuaddr);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_VFD_DECODE_INSTR_0);
+
+ *cmds++ = _SET(VFD_DECODEINSTRUCTIONS_WRITEMASK, 0x0F) |
+ _SET(VFD_DECODEINSTRUCTIONS_CONSTFILL, 1) |
+ _SET(VFD_DECODEINSTRUCTIONS_FORMAT, 2) |
+ _SET(VFD_DECODEINSTRUCTIONS_SHIFTCNT, 12) |
+ _SET(VFD_DECODEINSTRUCTIONS_LASTCOMPVALID, 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG);
+
+ *cmds++ = _SET(HLSQ_CTRL0REG_FSTHREADSIZE, HLSQ_FOUR_PIX_QUADS) |
+ _SET(HLSQ_CTRL0REG_FSSUPERTHREADENABLE, 1) |
+ _SET(HLSQ_CTRL0REG_RESERVED2, 1) |
+ _SET(HLSQ_CTRL0REG_SPCONSTFULLUPDATE, 1);
+
+ *cmds++ = _SET(HLSQ_CTRL1REG_VSTHREADSIZE, HLSQ_TWO_VTX_QUADS) |
+ _SET(HLSQ_CTRL1REG_VSSUPERTHREADENABLE, 1);
+
+ *cmds++ = _SET(HLSQ_CTRL2REG_PRIMALLOCTHRESHOLD, 31);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_HLSQ_VS_CONTROL_REG);
+
+ *cmds++ = _SET(HLSQ_VSCTRLREG_VSINSTRLENGTH, 1);
+
+ *cmds++ = _SET(HLSQ_FSCTRLREG_FSCONSTLENGTH, 1) |
+ _SET(HLSQ_FSCTRLREG_FSCONSTSTARTOFFSET, 128) |
+ _SET(HLSQ_FSCTRLREG_FSINSTRLENGTH, 1);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = _SET(HLSQ_CONSTFSPRESERVEDRANGEREG_STARTENTRY, 32) |
+ _SET(HLSQ_CONSTFSPRESERVEDRANGEREG_ENDENTRY, 32);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_SP_FS_LENGTH_REG);
+
+ *cmds++ = _SET(SP_SHADERLENGTH_LEN, 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_SP_SP_CTRL_REG);
+
+ *cmds++ = _SET(SP_SPCTRLREG_SLEEPMODE, 1) |
+ _SET(SP_SPCTRLREG_LOMODE, 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 12);
+ *cmds++ = CP_REG(A3XX_SP_VS_CTRL_REG0);
+
+ *cmds++ = _SET(SP_VSCTRLREG0_VSTHREADMODE, SP_MULTI) |
+ _SET(SP_VSCTRLREG0_VSINSTRBUFFERMODE, SP_BUFFER_MODE) |
+ _SET(SP_VSCTRLREG0_VSICACHEINVALID, 1) |
+ _SET(SP_VSCTRLREG0_VSFULLREGFOOTPRINT, 1) |
+ _SET(SP_VSCTRLREG0_VSTHREADSIZE, SP_TWO_VTX_QUADS) |
+ _SET(SP_VSCTRLREG0_VSSUPERTHREADMODE, 1) |
+ _SET(SP_VSCTRLREG0_VSLENGTH, 1);
+
+ *cmds++ = _SET(SP_VSCTRLREG1_VSINITIALOUTSTANDING, 4);
+
+ *cmds++ = _SET(SP_VSPARAMREG_PSIZEREGID, 252);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 7);
+ *cmds++ = CP_REG(A3XX_SP_VS_VPC_DST_REG_0);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 6);
+ *cmds++ = CP_REG(A3XX_SP_VS_LENGTH_REG);
+
+ *cmds++ = _SET(SP_SHADERLENGTH_LEN, 1);
+
+ *cmds++ = _SET(SP_FSCTRLREG0_FSTHREADMODE, SP_MULTI) |
+ _SET(SP_FSCTRLREG0_FSINSTRBUFFERMODE, SP_BUFFER_MODE) |
+ _SET(SP_FSCTRLREG0_FSICACHEINVALID, 1) |
+ _SET(SP_FSCTRLREG0_FSHALFREGFOOTPRINT, 1) |
+ _SET(SP_FSCTRLREG0_FSINOUTREGOVERLAP, 1) |
+ _SET(SP_FSCTRLREG0_FSTHREADSIZE, SP_FOUR_PIX_QUADS) |
+ _SET(SP_FSCTRLREG0_FSSUPERTHREADMODE, 1) |
+ _SET(SP_FSCTRLREG0_FSLENGTH, 1);
+
+ *cmds++ = _SET(SP_FSCTRLREG1_FSCONSTLENGTH, 1) |
+ _SET(SP_FSCTRLREG1_HALFPRECVAROFFSET, 63);
+
+ *cmds++ = _SET(SP_OBJOFFSETREG_CONSTOBJECTSTARTOFFSET, 128) |
+ _SET(SP_OBJOFFSETREG_SHADEROBJOFFSETINIC, 127);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_SP_FS_FLAT_SHAD_MODE_REG_0);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_SP_FS_OUTPUT_REG);
+
+ *cmds++ = _SET(SP_IMAGEOUTPUTREG_DEPTHOUTMODE, SP_PIXEL_BASED);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_SP_FS_MRT_REG_0);
+
+ *cmds++ = _SET(SP_FSMRTREG_PRECISION, 1);
+
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 11);
+ *cmds++ = CP_REG(A3XX_VPC_ATTR);
+
+ *cmds++ = _SET(VPC_VPCATTR_THRHDASSIGN, 1) |
+ _SET(VPC_VPCATTR_LMSIZE, 1);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 10);
+ *cmds++ = (0 << CP_LOADSTATE_DSTOFFSET_SHIFT)
+ | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
+ | (HLSQ_BLOCK_ID_SP_VS << CP_LOADSTATE_STATEBLOCKID_SHIFT)
+ | (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+ *cmds++ = (HLSQ_SP_VS_INSTR << CP_LOADSTATE_STATETYPE_SHIFT)
+ | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
+
+
+ *cmds++ = 0x00000000; *cmds++ = 0x13001000;
+
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+
+
+ *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 10);
+ *cmds++ = (0 << CP_LOADSTATE_DSTOFFSET_SHIFT)
+ | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
+ | (HLSQ_BLOCK_ID_SP_FS << CP_LOADSTATE_STATEBLOCKID_SHIFT)
+ | (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+ *cmds++ = (HLSQ_SP_FS_INSTR << CP_LOADSTATE_STATETYPE_SHIFT)
+ | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
+
+
+ *cmds++ = 0x00000000; *cmds++ = 0x30201b00;
+
+ *cmds++ = 0x00000000; *cmds++ = 0x03000000;
+
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+
+
+
+ *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_MSAA_CONTROL);
+
+ *cmds++ = _SET(RB_MSAACONTROL_MSAA_DISABLE, 1) |
+ _SET(RB_MSAACONTROL_SAMPLE_MASK, 0xFFFF);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_DEPTH_CONTROL);
+
+ *cmds++ = _SET(RB_DEPTHCONTROL_Z_TEST_FUNC, RB_FRAG_NEVER);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_STENCIL_CONTROL);
+
+ *cmds++ = _SET(RB_STENCILCONTROL_STENCIL_FUNC, RB_REF_NEVER) |
+ _SET(RB_STENCILCONTROL_STENCIL_FAIL, RB_STENCIL_KEEP) |
+ _SET(RB_STENCILCONTROL_STENCIL_ZPASS, RB_STENCIL_KEEP) |
+ _SET(RB_STENCILCONTROL_STENCIL_ZFAIL, RB_STENCIL_KEEP) |
+ _SET(RB_STENCILCONTROL_STENCIL_FUNC_BF, RB_REF_NEVER) |
+ _SET(RB_STENCILCONTROL_STENCIL_FAIL_BF, RB_STENCIL_KEEP) |
+ _SET(RB_STENCILCONTROL_STENCIL_ZPASS_BF, RB_STENCIL_KEEP) |
+ _SET(RB_STENCILCONTROL_STENCIL_ZFAIL_BF, RB_STENCIL_KEEP);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_GRAS_SU_MODE_CONTROL);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_MRT_CONTROL0);
+
+ *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
+ _SET(RB_MRTCONTROL_ROP_CODE, 12) |
+ _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_ALWAYS) |
+ _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL0);
+
+ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+
+ *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
+ _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) |
+ _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL1);
+
+ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+
+ *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
+ _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) |
+ _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL2);
+
+ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+
+ *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
+ _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) |
+ _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL3);
+
+ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_VFD_INDEX_MIN);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x155;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_VFD_VS_THREADING_THRESHOLD);
+
+ *cmds++ = _SET(VFD_THREADINGTHRESHOLD_REGID_THRESHOLD, 15) |
+ _SET(VFD_THREADINGTHRESHOLD_REGID_VTXCNT, 252);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_TPL1_TP_VS_TEX_OFFSET);
+
+ *cmds++ = 0;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_TPL1_TP_FS_TEX_OFFSET);
+
+ *cmds++ = _SET(TPL1_TPTEXOFFSETREG_SAMPLEROFFSET, 16) |
+ _SET(TPL1_TPTEXOFFSETREG_MEMOBJOFFSET, 16) |
+ _SET(TPL1_TPTEXOFFSETREG_BASETABLEPTR, 224);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_PC_PRIM_VTX_CNTL);
+
+ *cmds++ = _SET(PC_PRIM_VTX_CONTROL_POLYMODE_FRONT_PTYPE,
+ PC_DRAW_TRIANGLES) |
+ _SET(PC_PRIM_VTX_CONTROL_POLYMODE_BACK_PTYPE,
+ PC_DRAW_TRIANGLES) |
+ _SET(PC_PRIM_VTX_CONTROL_PROVOKING_VTX_LAST, 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_GRAS_SC_WINDOW_SCISSOR_TL);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = _SET(GRAS_SC_WINDOW_SCISSOR_BR_BR_X, shadow->width - 1) |
+ _SET(GRAS_SC_WINDOW_SCISSOR_BR_BR_Y, shadow->height - 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_GRAS_SC_SCREEN_SCISSOR_TL);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = _SET(GRAS_SC_SCREEN_SCISSOR_BR_BR_X, shadow->width - 1) |
+ _SET(GRAS_SC_SCREEN_SCISSOR_BR_BR_Y, shadow->height - 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_GRAS_CL_VPORT_XOFFSET);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = _SET(GRAS_CL_VPORT_XSCALE_VPORT_XSCALE, 0x3f800000);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = _SET(GRAS_CL_VPORT_YSCALE_VPORT_YSCALE, 0x3f800000);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_GRAS_CL_VPORT_ZOFFSET);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = _SET(GRAS_CL_VPORT_ZSCALE_VPORT_ZSCALE, 0x3f800000);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_GRAS_CL_CLIP_CNTL);
+
+ *cmds++ = _SET(GRAS_CL_CLIP_CNTL_CLIP_DISABLE, 1) |
+ _SET(GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE, 1) |
+ _SET(GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE, 1) |
+ _SET(GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE, 1) |
+ _SET(GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE, 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_GRAS_CL_GB_CLIP_ADJ);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+
+
+
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG0, 1);
+ *cmds++ = 0x00000400;
+
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG0, 1);
+ *cmds++ = 0x00000400;
+
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_SIZE_REG, 1);
+ *cmds++ = 0x00008000;
+
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_SIZE_REG, 1);
+ *cmds++ = 0x00008000;
+
+
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG0, 1);
+ *cmds++ = _SET(SP_VSCTRLREG0_VSTHREADMODE, SP_MULTI) |
+ _SET(SP_VSCTRLREG0_VSINSTRBUFFERMODE, SP_BUFFER_MODE) |
+ _SET(SP_VSCTRLREG0_VSFULLREGFOOTPRINT, 1) |
+ _SET(SP_VSCTRLREG0_VSTHREADSIZE, SP_TWO_VTX_QUADS) |
+ _SET(SP_VSCTRLREG0_VSSUPERTHREADMODE, 1) |
+ _SET(SP_VSCTRLREG0_VSLENGTH, 1);
+
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG0, 1);
+ *cmds++ = _SET(SP_FSCTRLREG0_FSTHREADMODE, SP_MULTI) |
+ _SET(SP_FSCTRLREG0_FSINSTRBUFFERMODE, SP_BUFFER_MODE) |
+ _SET(SP_FSCTRLREG0_FSHALFREGFOOTPRINT, 1) |
+ _SET(SP_FSCTRLREG0_FSINOUTREGOVERLAP, 1) |
+ _SET(SP_FSCTRLREG0_FSTHREADSIZE, SP_FOUR_PIX_QUADS) |
+ _SET(SP_FSCTRLREG0_FSSUPERTHREADMODE, 1) |
+ _SET(SP_FSCTRLREG0_FSLENGTH, 1);
+
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_SIZE_REG, 1);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_SIZE_REG, 1);
+ *cmds++ = 0x00000000;
+
+
+
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX_2, 6);
+ *cmds++ = 0x00000000;
+ *cmds++ = BUILD_PC_DRAW_INITIATOR(PC_DI_PT_TRILIST,
+ PC_DI_SRC_SEL_IMMEDIATE,
+ PC_DI_INDEX_SIZE_32_BIT,
+ PC_DI_IGNORE_VISIBILITY);
+ *cmds++ = 0x00000003;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000001;
+ *cmds++ = 0x00000002;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_HLSQ_CL_CONTROL_0_REG);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX_2, 6);
+ *cmds++ = 0x00000000;
+ *cmds++ = BUILD_PC_DRAW_INITIATOR(PC_DI_PT_TRILIST,
+ PC_DI_SRC_SEL_IMMEDIATE,
+ PC_DI_INDEX_SIZE_32_BIT,
+ PC_DI_IGNORE_VISIBILITY);
+ *cmds++ = 0x00000003;
+ *cmds++ = 0x00000002;
+ *cmds++ = 0x00000001;
+ *cmds++ = 0x00000003;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_HLSQ_CL_CONTROL_0_REG);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+
+
+ create_ib1(drawctxt, shadow->gmem_save, start, cmds);
+
+ return cmds;
+}
+static void build_shader_save_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ unsigned int *cmd = tmp_ctx.cmd;
+ unsigned int *start;
+
+
+ drawctxt->cond_execs[0].hostptr = cmd;
+ drawctxt->cond_execs[0].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0;
+ drawctxt->cond_execs[1].hostptr = cmd;
+ drawctxt->cond_execs[1].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0;
+
+ drawctxt->shader_save_commands[0].hostptr = cmd;
+ drawctxt->shader_save_commands[0].gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0;
+ drawctxt->shader_save_commands[1].hostptr = cmd;
+ drawctxt->shader_save_commands[1].gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0;
+
+ start = cmd;
+
+
+
+ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+ *cmd++ = drawctxt->cond_execs[0].gpuaddr >> 2;
+ *cmd++ = drawctxt->cond_execs[0].gpuaddr >> 2;
+ *cmd++ = 0x0000FFFF;
+ *cmd++ = 3;
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ drawctxt->shader_save_commands[2].hostptr = cmd;
+ drawctxt->shader_save_commands[2].gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0;
+ *cmd++ = (drawctxt->gpustate.gpuaddr + SHADER_OFFSET) & 0xfffffffc;
+
+
+ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+ *cmd++ = drawctxt->cond_execs[1].gpuaddr >> 2;
+ *cmd++ = drawctxt->cond_execs[1].gpuaddr >> 2;
+ *cmd++ = 0x0000FFFF;
+ *cmd++ = 3;
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ drawctxt->shader_save_commands[3].hostptr = cmd;
+ drawctxt->shader_save_commands[3].gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0;
+ *cmd++ = (drawctxt->gpustate.gpuaddr + SHADER_OFFSET
+ + (SHADER_SHADOW_SIZE / 2)) & 0xfffffffc;
+
+
+ create_ib1(drawctxt, drawctxt->shader_save, start, cmd);
+
+ tmp_ctx.cmd = cmd;
+}
+
+
+static void build_save_fixup_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ unsigned int *cmd = tmp_ctx.cmd;
+ unsigned int *start = cmd;
+
+
+ *cmd++ = cp_type3_packet(CP_EVENT_WRITE, 1);
+ *cmd++ = 0x7;
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmd++ = 0;
+
+ *cmd++ = cp_type0_packet(A3XX_UCHE_CACHE_INVALIDATE0_REG, 2);
+ *cmd++ = 0x00000000;
+ *cmd++ = (unsigned int)
+ UCHE_ENTIRE_CACHE << UCHE_INVALIDATE1REG_ALLORPORTION |
+ UCHE_OP_INVALIDATE << UCHE_INVALIDATE1REG_OPCODE |
+ 0;
+
+
+ *cmd++ = cp_type3_packet(CP_CONTEXT_UPDATE, 1);
+ *cmd++ = 0;
+
+#ifdef GSL_CONTEXT_SWITCH_CPU_SYNC
+
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_VS_CTRL_REG0;
+ *cmd++ = drawctxt->shader_save_commands[2].gpuaddr;
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_FS_CTRL_REG0;
+ *cmd++ = drawctxt->shader_save_commands[3].gpuaddr;
+
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_FS_OBJ_OFFSET_REG;
+ *cmd++ = drawctxt->shader_save_commands[1].gpuaddr;
+
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_VS_CTRL_REG1;
+ *cmd++ = drawctxt->constant_save_commands[1].gpuaddr;
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_FS_CTRL_REG1;
+ *cmd++ = drawctxt->constant_save_commands[2].gpuaddr;
+
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_FS_OBJ_OFFSET_REG;
+ *cmd++ = drawctxt->constant_save_commands[0].gpuaddr;
+
+
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_VS_CTRL_REG0;
+ *cmd++ = drawctxt->cond_execs[0].gpuaddr;
+
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_FS_CTRL_REG0;
+ *cmd++ = drawctxt->cond_execs[1].gpuaddr;
+#else
+
+
+ cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG0, 0x7f000000,
+ 11+REG_TO_MEM_LOOP_COUNT_SHIFT,
+ (HLSQ_SHADOW_BASE + 0x1000) / 4,
+ drawctxt->shader_save_commands[2].gpuaddr);
+
+
+ *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+ *cmd++ = (1 << 30) | A3XX_CP_SCRATCH_REG2;
+ *cmd++ = 0x00000000;
+ *cmd++ = A3XX_SP_FS_CTRL_REG0;
+ *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+ *cmd++ = ((11 + REG_TO_MEM_LOOP_COUNT_SHIFT) << 24) |
+ A3XX_CP_SCRATCH_REG2;
+ *cmd++ = 0x7f000000;
+ *cmd++ = (HLSQ_SHADOW_BASE + 0x1000 + SSIZE) / 4;
+
+
+ *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+ *cmd++ = (1 << 30) | A3XX_CP_SCRATCH_REG3;
+ *cmd++ = 0x00000000;
+ *cmd++ = A3XX_SP_FS_OBJ_OFFSET_REG;
+ *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+ *cmd++ = A3XX_CP_SCRATCH_REG3;
+ *cmd++ = 0xfe000000;
+ *cmd++ = 0x00000000;
+ *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
+ *cmd++ = (1 << 30) | A3XX_CP_SCRATCH_REG2;
+ *cmd++ = 0xffffffff;
+ *cmd++ = A3XX_CP_SCRATCH_REG3;
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_CP_SCRATCH_REG2;
+ *cmd++ = drawctxt->shader_save_commands[3].gpuaddr;
+
+
+ cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG1, 0x000003ff,
+ 2 + REG_TO_MEM_LOOP_COUNT_SHIFT,
+ (HLSQ_SHADOW_BASE + 0x2000) / 4,
+ drawctxt->constant_save_commands[1].gpuaddr);
+
+ cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG1, 0x000003ff,
+ 2 + REG_TO_MEM_LOOP_COUNT_SHIFT,
+ (HLSQ_SHADOW_BASE + 0x2000 + SSIZE) / 4,
+ drawctxt->constant_save_commands[2].gpuaddr);
+
+ cmd = rmw_regtomem(cmd, A3XX_SP_FS_OBJ_OFFSET_REG, 0x00ff0000,
+ 18, drawctxt->gpustate.gpuaddr & 0xfffffe00,
+ drawctxt->constant_save_commands[2].gpuaddr
+ + sizeof(unsigned int));
+
+
+ cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG1, 0x000003ff,
+ 0, 0, drawctxt->cond_execs[2].gpuaddr);
+
+ cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG1, 0x000003ff,
+ 0, 0, drawctxt->cond_execs[3].gpuaddr);
+
+
+
+ cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG0, 0x00000002,
+ 31, 0, drawctxt->cond_execs[0].gpuaddr);
+
+
+ cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG0, 0x00000002,
+ 31, 0, drawctxt->cond_execs[1].gpuaddr);
+
+#endif
+
+ create_ib1(drawctxt, drawctxt->save_fixup, start, cmd);
+
+ tmp_ctx.cmd = cmd;
+}
+
+
+static unsigned int *build_sys2gmem_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt,
+ struct gmem_shadow_t *shadow)
+{
+ unsigned int *cmds = tmp_ctx.cmd;
+ unsigned int *start = cmds;
+
+ *cmds++ = cp_type0_packet(A3XX_RBBM_CLOCK_CTL, 1);
+ *cmds++ = A3XX_RBBM_CLOCK_CTL_DEFAULT;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG);
+
+ *cmds++ = _SET(HLSQ_CTRL0REG_FSTHREADSIZE, HLSQ_FOUR_PIX_QUADS) |
+ _SET(HLSQ_CTRL0REG_FSSUPERTHREADENABLE, 1) |
+ _SET(HLSQ_CTRL0REG_SPSHADERRESTART, 1) |
+ _SET(HLSQ_CTRL0REG_CHUNKDISABLE, 1) |
+ _SET(HLSQ_CTRL0REG_SPCONSTFULLUPDATE, 1);
+
+ *cmds++ = _SET(HLSQ_CTRL1REG_VSTHREADSIZE, HLSQ_TWO_VTX_QUADS) |
+ _SET(HLSQ_CTRL1REG_VSSUPERTHREADENABLE, 1);
+
+ *cmds++ = _SET(HLSQ_CTRL2REG_PRIMALLOCTHRESHOLD, 31);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_RB_MRT_BUF_INFO0);
+
+ *cmds++ = _SET(RB_MRTBUFINFO_COLOR_FORMAT, RB_R8G8B8A8_UNORM) |
+ _SET(RB_MRTBUFINFO_COLOR_TILE_MODE, RB_TILINGMODE_32X32) |
+ _SET(RB_MRTBUFINFO_COLOR_BUF_PITCH,
+ (shadow->gmem_pitch * 4 * 8) / 256);
+
+ *cmds++ = _SET(RB_MRTBUFBASE_COLOR_BUF_BASE, tmp_ctx.gmem_base >> 5);
+
+
+ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 4);
+ *cmds++ = (16 << CP_LOADSTATE_DSTOFFSET_SHIFT)
+ | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
+ | (HLSQ_BLOCK_ID_TP_TEX << CP_LOADSTATE_STATEBLOCKID_SHIFT)
+ | (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+ *cmds++ = (HLSQ_TP_TEX_SAMPLERS << CP_LOADSTATE_STATETYPE_SHIFT)
+ | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
+ *cmds++ = 0x00000240;
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
+ *cmds++ = 0x00000000;
+
+
+ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 6);
+ *cmds++ = (16 << CP_LOADSTATE_DSTOFFSET_SHIFT)
+ | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
+ | (HLSQ_BLOCK_ID_TP_TEX << CP_LOADSTATE_STATEBLOCKID_SHIFT)
+ | (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+ *cmds++ = (HLSQ_TP_TEX_MEMOBJ << CP_LOADSTATE_STATETYPE_SHIFT)
+ | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
+ *cmds++ = 0x4cc06880;
+ *cmds++ = shadow->height | (shadow->width << 14);
+ *cmds++ = (shadow->pitch*4*8) << 9;
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
+ *cmds++ = 0x00000000;
+
+
+ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 16);
+ *cmds++ = (224 << CP_LOADSTATE_DSTOFFSET_SHIFT)
+ | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
+ | (HLSQ_BLOCK_ID_TP_MIPMAP << CP_LOADSTATE_STATEBLOCKID_SHIFT)
+ | (14 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+ *cmds++ = (HLSQ_TP_MIPMAP_BASE << CP_LOADSTATE_STATETYPE_SHIFT)
+ | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
+ *cmds++ = shadow->gmemshadow.gpuaddr;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_HLSQ_VS_CONTROL_REG);
+
+ *cmds++ = _SET(HLSQ_VSCTRLREG_VSINSTRLENGTH, 1);
+
+ *cmds++ = _SET(HLSQ_FSCTRLREG_FSCONSTLENGTH, 1) |
+ _SET(HLSQ_FSCTRLREG_FSCONSTSTARTOFFSET, 128) |
+ _SET(HLSQ_FSCTRLREG_FSINSTRLENGTH, 2);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_SP_FS_LENGTH_REG);
+
+ *cmds++ = _SET(SP_SHADERLENGTH_LEN, 2);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 12);
+ *cmds++ = CP_REG(A3XX_SP_VS_CTRL_REG0);
+
+ *cmds++ = _SET(SP_VSCTRLREG0_VSTHREADMODE, SP_MULTI) |
+ _SET(SP_VSCTRLREG0_VSINSTRBUFFERMODE, SP_BUFFER_MODE) |
+ _SET(SP_VSCTRLREG0_VSICACHEINVALID, 1) |
+ _SET(SP_VSCTRLREG0_VSFULLREGFOOTPRINT, 2) |
+ _SET(SP_VSCTRLREG0_VSTHREADSIZE, SP_TWO_VTX_QUADS) |
+ _SET(SP_VSCTRLREG0_VSLENGTH, 1);
+
+ *cmds++ = _SET(SP_VSCTRLREG1_VSINITIALOUTSTANDING, 8);
+
+ *cmds++ = _SET(SP_VSPARAMREG_POSREGID, 4) |
+ _SET(SP_VSPARAMREG_PSIZEREGID, 252) |
+ _SET(SP_VSPARAMREG_TOTALVSOUTVAR, 1);
+
+ *cmds++ = _SET(SP_VSOUTREG_COMPMASK0, 3);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 7);
+ *cmds++ = CP_REG(A3XX_SP_VS_VPC_DST_REG_0);
+
+ *cmds++ = _SET(SP_VSVPCDSTREG_OUTLOC0, 8);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 6);
+ *cmds++ = CP_REG(A3XX_SP_VS_LENGTH_REG);
+
+ *cmds++ = _SET(SP_SHADERLENGTH_LEN, 1);
+
+ *cmds++ = _SET(SP_FSCTRLREG0_FSTHREADMODE, SP_MULTI) |
+ _SET(SP_FSCTRLREG0_FSINSTRBUFFERMODE, SP_BUFFER_MODE) |
+ _SET(SP_FSCTRLREG0_FSICACHEINVALID, 1) |
+ _SET(SP_FSCTRLREG0_FSHALFREGFOOTPRINT, 1) |
+ _SET(SP_FSCTRLREG0_FSFULLREGFOOTPRINT, 1) |
+ _SET(SP_FSCTRLREG0_FSINOUTREGOVERLAP, 1) |
+ _SET(SP_FSCTRLREG0_FSTHREADSIZE, SP_FOUR_PIX_QUADS) |
+ _SET(SP_FSCTRLREG0_FSSUPERTHREADMODE, 1) |
+ _SET(SP_FSCTRLREG0_PIXLODENABLE, 1) |
+ _SET(SP_FSCTRLREG0_FSLENGTH, 2);
+
+ *cmds++ = _SET(SP_FSCTRLREG1_FSCONSTLENGTH, 1) |
+ _SET(SP_FSCTRLREG1_FSINITIALOUTSTANDING, 2) |
+ _SET(SP_FSCTRLREG1_HALFPRECVAROFFSET, 63);
+
+ *cmds++ = _SET(SP_OBJOFFSETREG_CONSTOBJECTSTARTOFFSET, 128) |
+ _SET(SP_OBJOFFSETREG_SHADEROBJOFFSETINIC, 126);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_SP_FS_FLAT_SHAD_MODE_REG_0);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_SP_FS_OUTPUT_REG);
+
+ *cmds++ = _SET(SP_FSOUTREG_PAD0, SP_PIXEL_BASED);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_SP_FS_MRT_REG_0);
+
+ *cmds++ = _SET(SP_FSMRTREG_PRECISION, 1);
+
+ *cmds++ = 0;
+
+ *cmds++ = 0;
+
+ *cmds++ = 0;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 11);
+ *cmds++ = CP_REG(A3XX_VPC_ATTR);
+
+ *cmds++ = _SET(VPC_VPCATTR_TOTALATTR, 2) |
+ _SET(VPC_VPCATTR_THRHDASSIGN, 1) |
+ _SET(VPC_VPCATTR_LMSIZE, 1);
+
+ *cmds++ = _SET(VPC_VPCPACK_NUMFPNONPOSVAR, 2) |
+ _SET(VPC_VPCPACK_NUMNONPOSVSVAR, 2);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = _SET(VPC_VPCVARPSREPLMODE_COMPONENT08, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT09, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0A, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0B, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0C, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0D, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0E, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0F, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT10, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT11, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT12, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT13, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT14, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT15, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT16, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT17, 2);
+
+ *cmds++ = _SET(VPC_VPCVARPSREPLMODE_COMPONENT08, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT09, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0A, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0B, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0C, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0D, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0E, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0F, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT10, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT11, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT12, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT13, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT14, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT15, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT16, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT17, 2);
+
+ *cmds++ = _SET(VPC_VPCVARPSREPLMODE_COMPONENT08, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT09, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0A, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0B, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0C, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0D, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0E, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0F, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT10, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT11, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT12, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT13, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT14, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT15, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT16, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT17, 2);
+
+ *cmds++ = _SET(VPC_VPCVARPSREPLMODE_COMPONENT08, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT09, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0A, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0B, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0C, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0D, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0E, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT0F, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT10, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT11, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT12, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT13, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT14, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT15, 2) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT16, 1) |
+ _SET(VPC_VPCVARPSREPLMODE_COMPONENT17, 2);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_SP_SP_CTRL_REG);
+
+ *cmds++ = _SET(SP_SPCTRLREG_SLEEPMODE, 1) |
+ _SET(SP_SPCTRLREG_LOMODE, 1);
+
+
+ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 10);
+ *cmds++ = (0 << CP_LOADSTATE_DSTOFFSET_SHIFT)
+ | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
+ | (HLSQ_BLOCK_ID_SP_VS << CP_LOADSTATE_STATEBLOCKID_SHIFT)
+ | (1 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+ *cmds++ = (HLSQ_SP_VS_INSTR << CP_LOADSTATE_STATETYPE_SHIFT)
+ | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
+
+ *cmds++ = 0x00000000; *cmds++ = 0x13001000;
+
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+
+
+
+ *cmds++ = cp_type3_packet(CP_LOAD_STATE, 18);
+ *cmds++ = (0 << CP_LOADSTATE_DSTOFFSET_SHIFT)
+ | (HLSQ_DIRECT << CP_LOADSTATE_STATESRC_SHIFT)
+ | (HLSQ_BLOCK_ID_SP_FS << CP_LOADSTATE_STATEBLOCKID_SHIFT)
+ | (2 << CP_LOADSTATE_NUMOFUNITS_SHIFT);
+ *cmds++ = (HLSQ_SP_FS_INSTR << CP_LOADSTATE_STATETYPE_SHIFT)
+ | (0 << CP_LOADSTATE_EXTSRCADDR_SHIFT);
+
+ *cmds++ = 0x00002000; *cmds++ = 0x57309902;
+
+ *cmds++ = 0x00000000; *cmds++ = 0x00000500;
+
+ *cmds++ = 0x00000005; *cmds++ = 0xa0c01f00;
+
+ *cmds++ = 0x00000000; *cmds++ = 0x30040b00;
+
+ *cmds++ = 0x00000000; *cmds++ = 0x03000000;
+
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000; *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type0_packet(A3XX_VFD_PERFCOUNTER0_SELECT, 1);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_VFD_CONTROL_0);
+
+ *cmds++ = _SET(VFD_CTRLREG0_TOTALATTRTOVS, 8) |
+ _SET(VFD_CTRLREG0_PACKETSIZE, 2) |
+ _SET(VFD_CTRLREG0_STRMDECINSTRCNT, 2) |
+ _SET(VFD_CTRLREG0_STRMFETCHINSTRCNT, 2);
+
+ *cmds++ = _SET(VFD_CTRLREG1_MAXSTORAGE, 2) |
+ _SET(VFD_CTRLREG1_REGID4VTX, 252) |
+ _SET(VFD_CTRLREG1_REGID4INST, 252);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_VFD_FETCH_INSTR_0_0);
+
+ *cmds++ = _SET(VFD_FETCHINSTRUCTIONS_FETCHSIZE, 7) |
+ _SET(VFD_FETCHINSTRUCTIONS_BUFSTRIDE, 8) |
+ _SET(VFD_FETCHINSTRUCTIONS_SWITCHNEXT, 1) |
+ _SET(VFD_FETCHINSTRUCTIONS_STEPRATE, 1);
+
+ *cmds++ = _SET(VFD_BASEADDR_BASEADDR,
+ shadow->quad_vertices_restore.gpuaddr);
+
+ *cmds++ = _SET(VFD_FETCHINSTRUCTIONS_FETCHSIZE, 11) |
+ _SET(VFD_FETCHINSTRUCTIONS_BUFSTRIDE, 12) |
+ _SET(VFD_FETCHINSTRUCTIONS_INDEXDECODE, 1) |
+ _SET(VFD_FETCHINSTRUCTIONS_STEPRATE, 1);
+
+ *cmds++ = _SET(VFD_BASEADDR_BASEADDR,
+ shadow->quad_vertices_restore.gpuaddr + 16);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_VFD_DECODE_INSTR_0);
+
+ *cmds++ = _SET(VFD_DECODEINSTRUCTIONS_WRITEMASK, 0x0F) |
+ _SET(VFD_DECODEINSTRUCTIONS_CONSTFILL, 1) |
+ _SET(VFD_DECODEINSTRUCTIONS_FORMAT, 1) |
+ _SET(VFD_DECODEINSTRUCTIONS_SHIFTCNT, 8) |
+ _SET(VFD_DECODEINSTRUCTIONS_LASTCOMPVALID, 1) |
+ _SET(VFD_DECODEINSTRUCTIONS_SWITCHNEXT, 1);
+
+ *cmds++ = _SET(VFD_DECODEINSTRUCTIONS_WRITEMASK, 0x0F) |
+ _SET(VFD_DECODEINSTRUCTIONS_CONSTFILL, 1) |
+ _SET(VFD_DECODEINSTRUCTIONS_FORMAT, 2) |
+ _SET(VFD_DECODEINSTRUCTIONS_REGID, 4) |
+ _SET(VFD_DECODEINSTRUCTIONS_SHIFTCNT, 12) |
+ _SET(VFD_DECODEINSTRUCTIONS_LASTCOMPVALID, 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_DEPTH_CONTROL);
+
+ *cmds++ = _SET(RB_DEPTHCONTROL_Z_TEST_FUNC, RB_FRAG_LESS);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_STENCIL_CONTROL);
+
+ *cmds++ = _SET(RB_STENCILCONTROL_STENCIL_FUNC, RB_REF_ALWAYS) |
+ _SET(RB_STENCILCONTROL_STENCIL_FAIL, RB_STENCIL_KEEP) |
+ _SET(RB_STENCILCONTROL_STENCIL_ZPASS, RB_STENCIL_KEEP) |
+ _SET(RB_STENCILCONTROL_STENCIL_ZFAIL, RB_STENCIL_KEEP) |
+ _SET(RB_STENCILCONTROL_STENCIL_FUNC_BF, RB_REF_ALWAYS) |
+ _SET(RB_STENCILCONTROL_STENCIL_FAIL_BF, RB_STENCIL_KEEP) |
+ _SET(RB_STENCILCONTROL_STENCIL_ZPASS_BF, RB_STENCIL_KEEP) |
+ _SET(RB_STENCILCONTROL_STENCIL_ZFAIL_BF, RB_STENCIL_KEEP);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_MODE_CONTROL);
+
+ *cmds++ = _SET(RB_MODECONTROL_RENDER_MODE, RB_RENDERING_PASS) |
+ _SET(RB_MODECONTROL_MARB_CACHE_SPLIT_MODE, 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_RENDER_CONTROL);
+
+ *cmds++ = _SET(RB_RENDERCONTROL_BIN_WIDTH, shadow->width >> 5) |
+ _SET(RB_RENDERCONTROL_ALPHA_TEST_FUNC, 7);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_MSAA_CONTROL);
+
+ *cmds++ = _SET(RB_MSAACONTROL_MSAA_DISABLE, 1) |
+ _SET(RB_MSAACONTROL_SAMPLE_MASK, 0xFFFF);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_MRT_CONTROL0);
+
+ *cmds++ = _SET(RB_MRTCONTROL_ROP_CODE, 12) |
+ _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_DISABLE) |
+ _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL0);
+
+ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+
+ *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
+ _SET(RB_MRTCONTROL_ROP_CODE, 12) |
+ _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_ALWAYS) |
+ _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL1);
+
+ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+
+ *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
+ _SET(RB_MRTCONTROL_ROP_CODE, 12) |
+ _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_ALWAYS) |
+ _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL2);
+
+ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+
+ *cmds++ = _SET(RB_MRTCONTROL_READ_DEST_ENABLE, 1) |
+ _SET(RB_MRTCONTROL_ROP_CODE, 12) |
+ _SET(RB_MRTCONTROL_DITHER_MODE, RB_DITHER_ALWAYS) |
+ _SET(RB_MRTCONTROL_COMPONENT_ENABLE, 0xF);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_RB_MRT_BLEND_CONTROL3);
+
+ *cmds++ = _SET(RB_MRTBLENDCONTROL_RGB_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_RGB_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR, RB_FACTOR_ONE) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE, RB_BLEND_OP_ADD) |
+ _SET(RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR, RB_FACTOR_ZERO) |
+ _SET(RB_MRTBLENDCONTROL_CLAMP_ENABLE, 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_VFD_INDEX_MIN);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 340;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_VFD_VS_THREADING_THRESHOLD);
+
+ *cmds++ = _SET(VFD_THREADINGTHRESHOLD_REGID_THRESHOLD, 15) |
+ _SET(VFD_THREADINGTHRESHOLD_REGID_VTXCNT, 252);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_TPL1_TP_VS_TEX_OFFSET);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_TPL1_TP_FS_TEX_OFFSET);
+
+ *cmds++ = _SET(TPL1_TPTEXOFFSETREG_SAMPLEROFFSET, 16) |
+ _SET(TPL1_TPTEXOFFSETREG_MEMOBJOFFSET, 16) |
+ _SET(TPL1_TPTEXOFFSETREG_BASETABLEPTR, 224);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_GRAS_SC_CONTROL);
+
+ *cmds++ = 0x04001000;
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_GRAS_SU_MODE_CONTROL);
+
+ *cmds++ = _SET(GRAS_SU_CTRLMODE_LINEHALFWIDTH, 2);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_GRAS_SC_WINDOW_SCISSOR_TL);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = _SET(GRAS_SC_WINDOW_SCISSOR_BR_BR_X, shadow->width - 1) |
+ _SET(GRAS_SC_WINDOW_SCISSOR_BR_BR_Y, shadow->height - 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_GRAS_SC_SCREEN_SCISSOR_TL);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = _SET(GRAS_SC_SCREEN_SCISSOR_BR_BR_X, shadow->width - 1) |
+ _SET(GRAS_SC_SCREEN_SCISSOR_BR_BR_Y, shadow->height - 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 5);
+ *cmds++ = CP_REG(A3XX_GRAS_CL_VPORT_XOFFSET);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = _SET(GRAS_CL_VPORT_XSCALE_VPORT_XSCALE, 0x3F800000);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = _SET(GRAS_CL_VPORT_YSCALE_VPORT_YSCALE, 0x3F800000);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
+ *cmds++ = CP_REG(A3XX_GRAS_CL_VPORT_ZOFFSET);
+
+ *cmds++ = 0x00000000;
+
+ *cmds++ = _SET(GRAS_CL_VPORT_ZSCALE_VPORT_ZSCALE, 0x3F800000);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_GRAS_CL_CLIP_CNTL);
+
+ *cmds++ = _SET(GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER, 1);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_SP_FS_IMAGE_OUTPUT_REG_0);
+
+ *cmds++ = _SET(SP_IMAGEOUTPUTREG_MRTFORMAT, SP_R8G8B8A8_UNORM);
+
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmds++ = CP_REG(A3XX_PC_PRIM_VTX_CNTL);
+
+ *cmds++ = _SET(PC_PRIM_VTX_CONTROL_STRIDE_IN_VPC, 2) |
+ _SET(PC_PRIM_VTX_CONTROL_POLYMODE_FRONT_PTYPE,
+ PC_DRAW_TRIANGLES) |
+ _SET(PC_PRIM_VTX_CONTROL_POLYMODE_BACK_PTYPE,
+ PC_DRAW_TRIANGLES) |
+ _SET(PC_PRIM_VTX_CONTROL_PROVOKING_VTX_LAST, 1);
+
+
+
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG0, 1);
+ *cmds++ = 0x00000400;
+
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG0, 1);
+ *cmds++ = 0x00000400;
+
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_SIZE_REG, 1);
+ *cmds++ = 0x00008000;
+
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_SIZE_REG, 1);
+ *cmds++ = 0x00008000;
+
+
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_CTRL_REG0, 1);
+ *cmds++ = _SET(SP_VSCTRLREG0_VSTHREADMODE, SP_MULTI) |
+ _SET(SP_VSCTRLREG0_VSINSTRBUFFERMODE, SP_BUFFER_MODE) |
+ _SET(SP_VSCTRLREG0_VSFULLREGFOOTPRINT, 2) |
+ _SET(SP_VSCTRLREG0_VSTHREADSIZE, SP_TWO_VTX_QUADS) |
+ _SET(SP_VSCTRLREG0_VSLENGTH, 1);
+
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_CTRL_REG0, 1);
+ *cmds++ = _SET(SP_FSCTRLREG0_FSTHREADMODE, SP_MULTI) |
+ _SET(SP_FSCTRLREG0_FSINSTRBUFFERMODE, SP_BUFFER_MODE) |
+ _SET(SP_FSCTRLREG0_FSHALFREGFOOTPRINT, 1) |
+ _SET(SP_FSCTRLREG0_FSFULLREGFOOTPRINT, 1) |
+ _SET(SP_FSCTRLREG0_FSINOUTREGOVERLAP, 1) |
+ _SET(SP_FSCTRLREG0_FSTHREADSIZE, SP_FOUR_PIX_QUADS) |
+ _SET(SP_FSCTRLREG0_FSSUPERTHREADMODE, 1) |
+ _SET(SP_FSCTRLREG0_FSLENGTH, 2);
+
+ *cmds++ = cp_type0_packet(A3XX_SP_VS_PVT_MEM_SIZE_REG, 1);
+ *cmds++ = 0x00000000;
+
+ *cmds++ = cp_type0_packet(A3XX_SP_FS_PVT_MEM_SIZE_REG, 1);
+ *cmds++ = 0x00000000;
+
+
+
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX, 3);
+ *cmds++ = 0x00000000;
+ *cmds++ = BUILD_PC_DRAW_INITIATOR(PC_DI_PT_RECTLIST,
+ PC_DI_SRC_SEL_AUTO_INDEX,
+ PC_DI_INDEX_SIZE_16_BIT,
+ PC_DI_IGNORE_VISIBILITY);
+ *cmds++ = 0x00000002;
+
+
+ create_ib1(drawctxt, shadow->gmem_restore, start, cmds);
+
+ return cmds;
+}
+
+
+static void build_regrestore_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ unsigned int *start = tmp_ctx.cmd;
+ unsigned int *cmd = start;
+ unsigned int *lcc_start;
+
+ int i;
+
+
+ *cmd++ = cp_type3_packet(CP_EVENT_WRITE, 1);
+ *cmd++ = 0x7;
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
+ *cmd++ = 0;
+
+ *cmd++ = cp_type0_packet(A3XX_UCHE_CACHE_INVALIDATE0_REG, 2);
+ *cmd++ = 0x00000000;
+ *cmd++ = (unsigned int)
+ UCHE_ENTIRE_CACHE << UCHE_INVALIDATE1REG_ALLORPORTION |
+ UCHE_OP_INVALIDATE << UCHE_INVALIDATE1REG_OPCODE |
+ 0;
+
+ lcc_start = cmd;
+
+
+ cmd++;
+
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+
+ *cmd++ = ((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) | 1;
+#else
+ *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
+#endif
+
+ for (i = 0; i < ARRAY_SIZE(context_register_ranges) / 2; i++) {
+ cmd = reg_range(cmd, context_register_ranges[i * 2],
+ context_register_ranges[i * 2 + 1]);
+ }
+
+ lcc_start[0] = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT,
+ (cmd - lcc_start) - 1);
+
+#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+ lcc_start[2] |= (0 << 24) | (4 << 16);
+#else
+ lcc_start[2] |= (1 << 24) | (4 << 16);
+#endif
+
+ for (i = 0; i < ARRAY_SIZE(global_registers); i++) {
+ *cmd++ = cp_type0_packet(global_registers[i], 1);
+ tmp_ctx.reg_values[i] = virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0x00000000;
+ }
+
+ create_ib1(drawctxt, drawctxt->reg_restore, start, cmd);
+ tmp_ctx.cmd = cmd;
+}
+
+static void build_constantrestore_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ unsigned int *cmd = tmp_ctx.cmd;
+ unsigned int *start = cmd;
+ unsigned int mode = 4;
+ unsigned int stateblock;
+ unsigned int numunits;
+ unsigned int statetype;
+
+ drawctxt->cond_execs[2].hostptr = cmd;
+ drawctxt->cond_execs[2].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0;
+ drawctxt->cond_execs[3].hostptr = cmd;
+ drawctxt->cond_execs[3].gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0;
+
+#ifndef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
+ *cmd++ = 4 << 16;
+ *cmd++ = 0x0;
+#endif
+
+ *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmd++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG);
+ *cmd++ = 0x68000240;
+
+#ifndef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
+
+ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
+ *cmd++ = (4 << 16) | (1 << 24);
+ *cmd++ = 0x0;
+#endif
+
+
+ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+ *cmd++ = drawctxt->cond_execs[2].gpuaddr >> 2;
+ *cmd++ = drawctxt->cond_execs[2].gpuaddr >> 2;
+ *cmd++ = 0x0000ffff;
+ *cmd++ = 3;
+ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+ drawctxt->constant_load_commands[0].hostptr = cmd;
+ drawctxt->constant_load_commands[0].gpuaddr = virt2gpu(cmd,
+ &drawctxt->gpustate);
+
+
+ *cmd++ = 0;
+ *cmd++ = ((drawctxt->gpustate.gpuaddr) & 0xfffffffc) | 1;
+
+
+ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+ *cmd++ = drawctxt->cond_execs[3].gpuaddr >> 2;
+ *cmd++ = drawctxt->cond_execs[3].gpuaddr >> 2;
+ *cmd++ = 0x0000ffff;
+ *cmd++ = 3;
+ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+ drawctxt->constant_load_commands[1].hostptr = cmd;
+ drawctxt->constant_load_commands[1].gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+
+ *cmd++ = 0;
+ drawctxt->constant_load_commands[2].hostptr = cmd;
+ drawctxt->constant_load_commands[2].gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0;
+
+
+ stateblock = 0;
+ statetype = 1;
+ numunits = (TEX_SIZE_MEM_OBJECTS / 7) / 4;
+
+ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+ *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
+ *cmd++ = ((drawctxt->gpustate.gpuaddr + VS_TEX_OFFSET_MEM_OBJECTS)
+ & 0xfffffffc) | statetype;
+
+
+ stateblock = 1;
+ statetype = 1;
+ numunits = TEX_SIZE_MIPMAP / 4;
+ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+ *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
+ *cmd++ = ((drawctxt->gpustate.gpuaddr + VS_TEX_OFFSET_MIPMAP)
+ & 0xfffffffc) | statetype;
+
+
+ stateblock = 0;
+ statetype = 0;
+ numunits = (TEX_SIZE_SAMPLER_OBJ / 2) / 4;
+ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+ *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
+ *cmd++ = ((drawctxt->gpustate.gpuaddr + VS_TEX_OFFSET_SAMPLER_OBJ)
+ & 0xfffffffc) | statetype;
+
+
+ stateblock = 2;
+ statetype = 1;
+ numunits = (TEX_SIZE_MEM_OBJECTS / 7) / 4;
+ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+ *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
+ *cmd++ = ((drawctxt->gpustate.gpuaddr + FS_TEX_OFFSET_MEM_OBJECTS)
+ & 0xfffffffc) | statetype;
+
+
+ stateblock = 3;
+ statetype = 1;
+ numunits = TEX_SIZE_MIPMAP / 4;
+ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+ *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
+ *cmd++ = ((drawctxt->gpustate.gpuaddr + FS_TEX_OFFSET_MIPMAP)
+ & 0xfffffffc) | statetype;
+
+
+ stateblock = 2;
+ statetype = 0;
+ numunits = (TEX_SIZE_SAMPLER_OBJ / 2) / 4;
+ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+ *cmd++ = (numunits << 22) | (stateblock << 19) | (mode << 16);
+ *cmd++ = ((drawctxt->gpustate.gpuaddr + FS_TEX_OFFSET_SAMPLER_OBJ)
+ & 0xfffffffc) | statetype;
+
+ create_ib1(drawctxt, drawctxt->constant_restore, start, cmd);
+ tmp_ctx.cmd = cmd;
+}
+
+static void build_shader_restore_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ unsigned int *cmd = tmp_ctx.cmd;
+ unsigned int *start = cmd;
+
+
+ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+ *cmd++ = drawctxt->cond_execs[0].gpuaddr >> 2;
+ *cmd++ = drawctxt->cond_execs[0].gpuaddr >> 2;
+ *cmd++ = 1;
+ *cmd++ = 3;
+
+ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+ drawctxt->shader_load_commands[0].hostptr = cmd;
+ drawctxt->shader_load_commands[0].gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0;
+ *cmd++ = (drawctxt->gpustate.gpuaddr + SHADER_OFFSET) & 0xfffffffc;
+
+
+ *cmd++ = cp_type3_packet(CP_COND_EXEC, 4);
+ *cmd++ = drawctxt->cond_execs[1].gpuaddr >> 2;
+ *cmd++ = drawctxt->cond_execs[1].gpuaddr >> 2;
+ *cmd++ = 1;
+ *cmd++ = 3;
+
+ *cmd++ = cp_type3_packet(CP_LOAD_STATE, 2);
+ drawctxt->shader_load_commands[1].hostptr = cmd;
+ drawctxt->shader_load_commands[1].gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0;
+ *cmd++ = (drawctxt->gpustate.gpuaddr + SHADER_OFFSET
+ + (SHADER_SHADOW_SIZE / 2)) & 0xfffffffc;
+
+ create_ib1(drawctxt, drawctxt->shader_restore, start, cmd);
+ tmp_ctx.cmd = cmd;
+}
+
+static void build_hlsqcontrol_restore_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ unsigned int *cmd = tmp_ctx.cmd;
+ unsigned int *start = cmd;
+
+ *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 2);
+ *cmd++ = CP_REG(A3XX_HLSQ_CONTROL_0_REG);
+ drawctxt->hlsqcontrol_restore_commands[0].hostptr = cmd;
+ drawctxt->hlsqcontrol_restore_commands[0].gpuaddr
+ = virt2gpu(cmd, &drawctxt->gpustate);
+ *cmd++ = 0;
+
+
+ create_ib1(drawctxt, drawctxt->hlsqcontrol_restore, start, cmd);
+
+ tmp_ctx.cmd = cmd;
+}
+
+static void build_restore_fixup_cmds(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ unsigned int *cmd = tmp_ctx.cmd;
+ unsigned int *start = cmd;
+
+#ifdef GSL_CONTEXT_SWITCH_CPU_SYNC
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_VS_CTRL_REG0;
+ *cmd++ = drawctxt->shader_load_commands[0].gpuaddr;
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_FS_CTRL_REG0;
+ *cmd++ = drawctxt->shader_load_commands[1].gpuaddr;
+
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_VS_CTRL_REG1;
+ *cmd++ = drawctxt->constant_load_commands[0].gpuaddr;
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_FS_CTRL_REG1;
+ *cmd++ = drawctxt->constant_load_commands[1].gpuaddr;
+
+
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
+ *cmd++ = A3XX_SP_FS_OBJ_OFFSET_REG;
+ *cmd++ = drawctxt->constant_load_commands[2].gpuaddr;
+#else
+
+ cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG0, 0x7f000000,
+ 30, (4 << 19) | (4 << 16),
+ drawctxt->shader_load_commands[0].gpuaddr);
+
+ cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG0, 0x7f000000,
+ 30, (6 << 19) | (4 << 16),
+ drawctxt->shader_load_commands[1].gpuaddr);
+
+
+ cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG1, 0x000003ff,
+ 23, (4 << 19) | (4 << 16),
+ drawctxt->constant_load_commands[0].gpuaddr);
+
+ cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG1, 0x000003ff,
+ 23, (6 << 19) | (4 << 16),
+ drawctxt->constant_load_commands[1].gpuaddr);
+
+
+ cmd = rmw_regtomem(cmd, A3XX_SP_VS_CTRL_REG1, 0x000003ff,
+ 0, 0, drawctxt->cond_execs[2].gpuaddr);
+
+ cmd = rmw_regtomem(cmd, A3XX_SP_FS_CTRL_REG1, 0x000003ff,
+ 0, 0, drawctxt->cond_execs[3].gpuaddr);
+
+
+ cmd = rmw_regtomem(cmd, A3XX_SP_FS_OBJ_OFFSET_REG, 0x00ff0000,
+ 18, (drawctxt->gpustate.gpuaddr & 0xfffffe00) | 1,
+ drawctxt->constant_load_commands[2].gpuaddr);
+#endif
+
+
+ cmd = rmw_regtomem(cmd, A3XX_HLSQ_CONTROL_0_REG, 0x9ffffdff,
+ 0, 0, drawctxt->hlsqcontrol_restore_commands[0].gpuaddr);
+
+ create_ib1(drawctxt, drawctxt->restore_fixup, start, cmd);
+
+ tmp_ctx.cmd = cmd;
+}
+
+static int a3xx_create_gpustate_shadow(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ drawctxt->flags |= CTXT_FLAGS_STATE_SHADOW;
+
+ build_regrestore_cmds(adreno_dev, drawctxt);
+ build_constantrestore_cmds(adreno_dev, drawctxt);
+ build_hlsqcontrol_restore_cmds(adreno_dev, drawctxt);
+ build_regconstantsave_cmds(adreno_dev, drawctxt);
+ build_shader_save_cmds(adreno_dev, drawctxt);
+ build_shader_restore_cmds(adreno_dev, drawctxt);
+ build_restore_fixup_cmds(adreno_dev, drawctxt);
+ build_save_fixup_cmds(adreno_dev, drawctxt);
+
+ return 0;
+}
+
+static int a3xx_create_gmem_shadow(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ int result;
+
+ calc_gmemsize(&drawctxt->context_gmem_shadow, adreno_dev->gmem_size);
+ tmp_ctx.gmem_base = adreno_dev->gmem_base;
+
+ result = kgsl_allocate(&drawctxt->context_gmem_shadow.gmemshadow,
+ drawctxt->pagetable, drawctxt->context_gmem_shadow.size);
+
+ if (result)
+ return result;
+
+ build_quad_vtxbuff(drawctxt, &drawctxt->context_gmem_shadow,
+ &tmp_ctx.cmd);
+
+ tmp_ctx.cmd = build_gmem2sys_cmds(adreno_dev, drawctxt,
+ &drawctxt->context_gmem_shadow);
+ tmp_ctx.cmd = build_sys2gmem_cmds(adreno_dev, drawctxt,
+ &drawctxt->context_gmem_shadow);
+
+ kgsl_cache_range_op(&drawctxt->context_gmem_shadow.gmemshadow,
+ KGSL_CACHE_OP_FLUSH);
+
+ drawctxt->flags |= CTXT_FLAGS_GMEM_SHADOW;
+
+ return 0;
+}
+
+static int a3xx_drawctxt_create(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt)
+{
+ int ret;
+
+
+ ret = kgsl_allocate(&drawctxt->gpustate,
+ drawctxt->pagetable, CONTEXT_SIZE);
+
+ if (ret)
+ return ret;
+
+ kgsl_sharedmem_set(&drawctxt->gpustate, 0, 0, CONTEXT_SIZE);
+ tmp_ctx.cmd = drawctxt->gpustate.hostptr + CMD_OFFSET;
+
+ if (!(drawctxt->flags & CTXT_FLAGS_PREAMBLE)) {
+ ret = a3xx_create_gpustate_shadow(adreno_dev, drawctxt);
+ if (ret)
+ goto done;
+
+ drawctxt->flags |= CTXT_FLAGS_SHADER_SAVE;
+ }
+
+ if (!(drawctxt->flags & CTXT_FLAGS_NOGMEMALLOC))
+ ret = a3xx_create_gmem_shadow(adreno_dev, drawctxt);
+
+done:
+ if (ret)
+ kgsl_sharedmem_free(&drawctxt->gpustate);
+
+ return ret;
+}
+
+static void a3xx_drawctxt_save(struct adreno_device *adreno_dev,
+ struct adreno_context *context)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ if (context == NULL)
+ return;
+
+ if (context->flags & CTXT_FLAGS_GPU_HANG)
+ KGSL_CTXT_WARN(device,
+ "Current active context has caused gpu hang\n");
+
+ if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
+
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE, context->save_fixup, 3);
+
+
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
+ context->regconstant_save, 3);
+
+ if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
+
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_PMODE, context->shader_save, 3);
+
+ context->flags |= CTXT_FLAGS_SHADER_RESTORE;
+ }
+ }
+
+ if ((context->flags & CTXT_FLAGS_GMEM_SAVE) &&
+ (context->flags & CTXT_FLAGS_GMEM_SHADOW)) {
+
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_PMODE,
+ context->context_gmem_shadow.
+ gmem_save, 3);
+ context->flags |= CTXT_FLAGS_GMEM_RESTORE;
+ }
+}
+
+static void a3xx_drawctxt_restore(struct adreno_device *adreno_dev,
+ struct adreno_context *context)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ unsigned int cmds[5];
+
+ if (context == NULL) {
+
+ kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
+ adreno_dev->drawctxt_active->id);
+ return;
+ }
+
+ KGSL_CTXT_INFO(device, "context flags %08x\n", context->flags);
+
+ cmds[0] = cp_nop_packet(1);
+ cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
+ cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2);
+ cmds[3] = device->memstore.gpuaddr +
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
+ cmds[4] = context->id;
+ adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
+ cmds, 5);
+ kgsl_mmu_setstate(&device->mmu, context->pagetable, context->id);
+
+
+ if (context->flags & CTXT_FLAGS_GMEM_RESTORE) {
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_PMODE,
+ context->context_gmem_shadow.
+ gmem_restore, 3);
+ context->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
+ }
+
+ if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);
+
+
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
+ context->restore_fixup, 3);
+
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
+ context->constant_restore, 3);
+
+ if (context->flags & CTXT_FLAGS_SHADER_RESTORE)
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
+ context->shader_restore, 3);
+
+
+ adreno_ringbuffer_issuecmds(device, context,
+ KGSL_CMD_FLAGS_NONE,
+ context->hlsqcontrol_restore, 3);
+ }
+}
+
+static void a3xx_rb_init(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb)
+{
+ unsigned int *cmds, cmds_gpu;
+ cmds = adreno_ringbuffer_allocspace(rb, 18);
+ cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint) * (rb->wptr - 18);
+
+ GSL_RB_WRITE(cmds, cmds_gpu, cp_type3_packet(CP_ME_INIT, 17));
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x000003f7);
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000080);
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000100);
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000180);
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x00006600);
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000150);
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x0000014e);
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000154);
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000001);
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+ GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
+
+ adreno_ringbuffer_submit(rb);
+}
+
+static void a3xx_err_callback(struct adreno_device *adreno_dev, int bit)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ const char *err = "";
+
+ switch (bit) {
+ case A3XX_INT_RBBM_AHB_ERROR: {
+ unsigned int reg;
+
+ adreno_regread(device, A3XX_RBBM_AHB_ERROR_STATUS, ®);
+
+
+ KGSL_DRV_CRIT(device,
+ "RBBM | AHB bus error | %s | addr=%x | ports=%x:%x\n",
+ reg & (1 << 28) ? "WRITE" : "READ",
+ (reg & 0xFFFFF) >> 2, (reg >> 20) & 0x3,
+ (reg >> 24) & 0x3);
+
+
+ adreno_regwrite(device, A3XX_RBBM_AHB_CMD, (1 << 3));
+ return;
+ }
+ case A3XX_INT_RBBM_REG_TIMEOUT:
+ err = "RBBM: AHB register timeout";
+ break;
+ case A3XX_INT_RBBM_ME_MS_TIMEOUT:
+ err = "RBBM: ME master split timeout";
+ break;
+ case A3XX_INT_RBBM_PFP_MS_TIMEOUT:
+ err = "RBBM: PFP master split timeout";
+ break;
+ case A3XX_INT_RBBM_ATB_BUS_OVERFLOW:
+ err = "RBBM: ATB bus oveflow";
+ break;
+ case A3XX_INT_VFD_ERROR:
+ err = "VFD: Out of bounds access";
+ break;
+ case A3XX_INT_CP_T0_PACKET_IN_IB:
+ err = "ringbuffer TO packet in IB interrupt";
+ break;
+ case A3XX_INT_CP_OPCODE_ERROR:
+ err = "ringbuffer opcode error interrupt";
+ break;
+ case A3XX_INT_CP_RESERVED_BIT_ERROR:
+ err = "ringbuffer reserved bit error interrupt";
+ break;
+ case A3XX_INT_CP_HW_FAULT:
+ err = "ringbuffer hardware fault";
+ break;
+ case A3XX_INT_CP_REG_PROTECT_FAULT:
+ err = "ringbuffer protected mode error interrupt";
+ break;
+ case A3XX_INT_CP_AHB_ERROR_HALT:
+ err = "ringbuffer AHB error interrupt";
+ break;
+ case A3XX_INT_MISC_HANG_DETECT:
+ err = "MISC: GPU hang detected";
+ break;
+ case A3XX_INT_UCHE_OOB_ACCESS:
+ err = "UCHE: Out of bounds access";
+ break;
+ }
+
+ KGSL_DRV_CRIT(device, "%s\n", err);
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+}
+
+static void a3xx_cp_callback(struct adreno_device *adreno_dev, int irq)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ if (irq == A3XX_INT_CP_RB_INT) {
+ unsigned int context_id;
+ kgsl_sharedmem_readl(&device->memstore, &context_id,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ current_context));
+ if (context_id < KGSL_MEMSTORE_MAX) {
+ kgsl_sharedmem_writel(&device->memstore,
+ KGSL_MEMSTORE_OFFSET(context_id,
+ ts_cmp_enable), 0);
+ wmb();
+ }
+ KGSL_CMD_WARN(device, "ringbuffer rb interrupt\n");
+ }
+
+ wake_up_interruptible_all(&device->wait_queue);
+
+
+ queue_work(device->work_queue, &device->ts_expired_ws);
+
+ atomic_notifier_call_chain(&device->ts_notifier_list,
+ device->id, NULL);
+}
+
+#define A3XX_IRQ_CALLBACK(_c) { .func = _c }
+
+#define A3XX_INT_MASK \
+ ((1 << A3XX_INT_RBBM_AHB_ERROR) | \
+ (1 << A3XX_INT_RBBM_ATB_BUS_OVERFLOW) | \
+ (1 << A3XX_INT_CP_T0_PACKET_IN_IB) | \
+ (1 << A3XX_INT_CP_OPCODE_ERROR) | \
+ (1 << A3XX_INT_CP_RESERVED_BIT_ERROR) | \
+ (1 << A3XX_INT_CP_HW_FAULT) | \
+ (1 << A3XX_INT_CP_IB1_INT) | \
+ (1 << A3XX_INT_CP_IB2_INT) | \
+ (1 << A3XX_INT_CP_RB_INT) | \
+ (1 << A3XX_INT_CP_REG_PROTECT_FAULT) | \
+ (1 << A3XX_INT_CP_AHB_ERROR_HALT) | \
+ (1 << A3XX_INT_UCHE_OOB_ACCESS))
+
+static struct {
+ void (*func)(struct adreno_device *, int);
+} a3xx_irq_funcs[] = {
+ A3XX_IRQ_CALLBACK(NULL),
+ A3XX_IRQ_CALLBACK(a3xx_err_callback),
+ A3XX_IRQ_CALLBACK(a3xx_err_callback),
+ A3XX_IRQ_CALLBACK(a3xx_err_callback),
+ A3XX_IRQ_CALLBACK(a3xx_err_callback),
+ A3XX_IRQ_CALLBACK(a3xx_err_callback),
+ A3XX_IRQ_CALLBACK(a3xx_err_callback),
+ A3XX_IRQ_CALLBACK(NULL),
+ A3XX_IRQ_CALLBACK(a3xx_err_callback),
+ A3XX_IRQ_CALLBACK(a3xx_err_callback),
+ A3XX_IRQ_CALLBACK(a3xx_err_callback),
+ A3XX_IRQ_CALLBACK(a3xx_err_callback),
+ A3XX_IRQ_CALLBACK(NULL),
+ A3XX_IRQ_CALLBACK(a3xx_cp_callback),
+ A3XX_IRQ_CALLBACK(a3xx_cp_callback),
+ A3XX_IRQ_CALLBACK(a3xx_cp_callback),
+ A3XX_IRQ_CALLBACK(a3xx_err_callback),
+ A3XX_IRQ_CALLBACK(NULL),
+ A3XX_IRQ_CALLBACK(NULL),
+ A3XX_IRQ_CALLBACK(NULL),
+ A3XX_IRQ_CALLBACK(NULL),
+ A3XX_IRQ_CALLBACK(a3xx_err_callback),
+ A3XX_IRQ_CALLBACK(NULL),
+ A3XX_IRQ_CALLBACK(NULL),
+ A3XX_IRQ_CALLBACK(NULL),
+ A3XX_IRQ_CALLBACK(a3xx_err_callback),
+
+};
+
+static irqreturn_t a3xx_irq_handler(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned int status, tmp;
+ int i;
+
+ adreno_regread(&adreno_dev->dev, A3XX_RBBM_INT_0_STATUS, &status);
+
+ for (tmp = status, i = 0; tmp && i < ARRAY_SIZE(a3xx_irq_funcs); i++) {
+ if (tmp & 1) {
+ if (a3xx_irq_funcs[i].func != NULL) {
+ a3xx_irq_funcs[i].func(adreno_dev, i);
+ ret = IRQ_HANDLED;
+ } else {
+ KGSL_DRV_CRIT(device,
+ "Unhandled interrupt bit %x\n", i);
+ }
+ }
+
+ tmp >>= 1;
+ }
+
+ trace_kgsl_a3xx_irq_status(device, status);
+
+ if (status)
+ adreno_regwrite(&adreno_dev->dev, A3XX_RBBM_INT_CLEAR_CMD,
+ status);
+ return ret;
+}
+
+static void a3xx_irq_control(struct adreno_device *adreno_dev, int state)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ if (state)
+ adreno_regwrite(device, A3XX_RBBM_INT_0_MASK, A3XX_INT_MASK);
+ else
+ adreno_regwrite(device, A3XX_RBBM_INT_0_MASK, 0);
+}
+
+static unsigned int a3xx_busy_cycles(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ unsigned int reg, val;
+
+
+ adreno_regread(device, A3XX_RBBM_RBBM_CTL, ®);
+ reg &= ~RBBM_RBBM_CTL_ENABLE_PWR_CTR1;
+ adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, reg);
+
+
+ adreno_regread(device, A3XX_RBBM_PERFCTR_PWR_1_LO, &val);
+
+
+ reg |= RBBM_RBBM_CTL_RESET_PWR_CTR1;
+ adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, reg);
+
+
+ reg &= ~RBBM_RBBM_CTL_RESET_PWR_CTR1;
+ reg |= RBBM_RBBM_CTL_ENABLE_PWR_CTR1;
+ adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, reg);
+
+ return val;
+}
+
+struct a3xx_vbif_data {
+ unsigned int reg;
+ unsigned int val;
+};
+
+static struct a3xx_vbif_data a305_vbif[] = {
+
+ { A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010 },
+ { A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010 },
+ { A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010 },
+ { A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010 },
+ { A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303 },
+ { A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010 },
+ { A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010 },
+
+ { A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000FF },
+
+ { A3XX_VBIF_ARB_CTL, 0x00000030 },
+
+ { A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003C },
+ { A3XX_VBIF_OUT_AXI_AOOO, 0x003C003C },
+ {0, 0},
+};
+
+static struct a3xx_vbif_data a320_vbif[] = {
+
+ { A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010 },
+ { A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010 },
+ { A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010 },
+ { A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010 },
+ { A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303 },
+ { A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010 },
+ { A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010 },
+
+ { A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000FF },
+
+ { A3XX_VBIF_ARB_CTL, 0x00000030 },
+
+ { A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003C },
+ { A3XX_VBIF_OUT_AXI_AOOO, 0x003C003C },
+
+ { A3XX_VBIF_ABIT_SORT, 0x000000FF },
+ { A3XX_VBIF_ABIT_SORT_CONF, 0x000000A4 },
+ {0, 0},
+};
+
+static void a3xx_start(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct a3xx_vbif_data *vbif = NULL;
+
+ if (adreno_is_a305(adreno_dev))
+ vbif = a305_vbif;
+ else if (adreno_is_a320(adreno_dev))
+ vbif = a320_vbif;
+
+ BUG_ON(vbif == NULL);
+
+ while (vbif->reg != 0) {
+ adreno_regwrite(device, vbif->reg, vbif->val);
+ vbif++;
+ }
+
+
+ adreno_regwrite(device, A3XX_RBBM_GPU_BUSY_MASKED, 0xFFFFFFFF);
+
+
+ adreno_regwrite(device, A3XX_RBBM_SP_HYST_CNT, 0x10);
+ adreno_regwrite(device, A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
+
+
+ adreno_regwrite(device, A3XX_RBBM_AHB_CTL0, 0x00000001);
+
+
+ adreno_regwrite(device, A3XX_RBBM_AHB_CTL1, 0xA6FFFFFF);
+
+
+ adreno_regwrite(device, A3XX_RBBM_RBBM_CTL, 0x00030000);
+
+
+ adreno_regwrite(device, A3XX_RBBM_INTERFACE_HANG_INT_CTL,
+ (1 << 16) | 0xFFF);
+
+
+ adreno_regwrite(device, A3XX_RBBM_CLOCK_CTL,
+ A3XX_RBBM_CLOCK_CTL_DEFAULT);
+
+}
+
+void *a3xx_snapshot(struct adreno_device *adreno_dev, void *snapshot,
+ int *remain, int hang);
+
+struct adreno_gpudev adreno_a3xx_gpudev = {
+ .reg_rbbm_status = A3XX_RBBM_STATUS,
+ .reg_cp_pfp_ucode_addr = A3XX_CP_PFP_UCODE_ADDR,
+ .reg_cp_pfp_ucode_data = A3XX_CP_PFP_UCODE_DATA,
+
+ .ctxt_create = a3xx_drawctxt_create,
+ .ctxt_save = a3xx_drawctxt_save,
+ .ctxt_restore = a3xx_drawctxt_restore,
+ .ctxt_draw_workaround = NULL,
+ .rb_init = a3xx_rb_init,
+ .irq_control = a3xx_irq_control,
+ .irq_handler = a3xx_irq_handler,
+ .busy_cycles = a3xx_busy_cycles,
+ .start = a3xx_start,
+ .snapshot = a3xx_snapshot,
+};
diff --git a/drivers/gpu/msm/adreno_a3xx_snapshot.c b/drivers/gpu/msm/adreno_a3xx_snapshot.c
new file mode 100644
index 0000000..14cdaaa
--- /dev/null
+++ b/drivers/gpu/msm/adreno_a3xx_snapshot.c
@@ -0,0 +1,317 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "kgsl.h"
+#include "adreno.h"
+#include "kgsl_snapshot.h"
+#include "a3xx_reg.h"
+
+#define DEBUG_SECTION_SZ(_dwords) (((_dwords) * sizeof(unsigned int)) \
+ + sizeof(struct kgsl_snapshot_debug))
+
+#define SHADER_MEMORY_SIZE 0x4000
+
+static int a3xx_snapshot_shader_memory(struct kgsl_device *device,
+ void *snapshot, int remain, void *priv)
+{
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int i;
+
+ if (remain < DEBUG_SECTION_SZ(SHADER_MEMORY_SIZE)) {
+ SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_SHADER_MEMORY;
+ header->size = SHADER_MEMORY_SIZE;
+
+ for (i = 0; i < SHADER_MEMORY_SIZE; i++)
+ adreno_regread(device, 0x4000 + i, &data[i]);
+
+ return DEBUG_SECTION_SZ(SHADER_MEMORY_SIZE);
+}
+
+#define VPC_MEMORY_BANKS 4
+#define VPC_MEMORY_SIZE 512
+
+static int a3xx_snapshot_vpc_memory(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int size = VPC_MEMORY_BANKS * VPC_MEMORY_SIZE;
+ int bank, addr, i = 0;
+
+ if (remain < DEBUG_SECTION_SZ(size)) {
+ SNAPSHOT_ERR_NOMEM(device, "VPC MEMORY");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_VPC_MEMORY;
+ header->size = size;
+
+ for (bank = 0; bank < VPC_MEMORY_BANKS; bank++) {
+ for (addr = 0; addr < VPC_MEMORY_SIZE; addr++) {
+ unsigned int val = bank | (addr << 4);
+ adreno_regwrite(device,
+ A3XX_VPC_VPC_DEBUG_RAM_SEL, val);
+ adreno_regread(device,
+ A3XX_VPC_VPC_DEBUG_RAM_READ, &data[i++]);
+ }
+ }
+
+ return DEBUG_SECTION_SZ(size);
+}
+
+#define CP_MEQ_SIZE 16
+static int a3xx_snapshot_cp_meq(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int i;
+
+ if (remain < DEBUG_SECTION_SZ(CP_MEQ_SIZE)) {
+ SNAPSHOT_ERR_NOMEM(device, "CP MEQ DEBUG");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_CP_MEQ;
+ header->size = CP_MEQ_SIZE;
+
+ adreno_regwrite(device, A3XX_CP_MEQ_ADDR, 0x0);
+ for (i = 0; i < CP_MEQ_SIZE; i++)
+ adreno_regread(device, A3XX_CP_MEQ_DATA, &data[i]);
+
+ return DEBUG_SECTION_SZ(CP_MEQ_SIZE);
+}
+
+static int a3xx_snapshot_cp_pm4_ram(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int i, size = adreno_dev->pm4_fw_size - 1;
+
+ if (remain < DEBUG_SECTION_SZ(size)) {
+ SNAPSHOT_ERR_NOMEM(device, "CP PM4 RAM DEBUG");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_CP_PM4_RAM;
+ header->size = size;
+
+
+ adreno_regwrite(device, REG_CP_ME_RAM_RADDR, 0x0);
+ for (i = 0; i < size; i++)
+ adreno_regread(device, REG_CP_ME_RAM_DATA, &data[i]);
+
+ return DEBUG_SECTION_SZ(size);
+}
+
+static int a3xx_snapshot_cp_pfp_ram(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int i, size = adreno_dev->pfp_fw_size - 1;
+
+ if (remain < DEBUG_SECTION_SZ(size)) {
+ SNAPSHOT_ERR_NOMEM(device, "CP PFP RAM DEBUG");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_CP_PFP_RAM;
+ header->size = size;
+
+ kgsl_regwrite(device, A3XX_CP_PFP_UCODE_ADDR, 0x0);
+ for (i = 0; i < size; i++)
+ adreno_regread(device, A3XX_CP_PFP_UCODE_DATA, &data[i]);
+
+ return DEBUG_SECTION_SZ(size);
+}
+
+#define CP_ROQ_SIZE 128
+
+static int a3xx_snapshot_cp_roq(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_debug *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int i;
+
+ if (remain < DEBUG_SECTION_SZ(CP_ROQ_SIZE)) {
+ SNAPSHOT_ERR_NOMEM(device, "CP ROQ DEBUG");
+ return 0;
+ }
+
+ header->type = SNAPSHOT_DEBUG_CP_ROQ;
+ header->size = CP_ROQ_SIZE;
+
+ adreno_regwrite(device, A3XX_CP_ROQ_ADDR, 0x0);
+ for (i = 0; i < CP_ROQ_SIZE; i++)
+ adreno_regread(device, A3XX_CP_ROQ_DATA, &data[i]);
+
+ return DEBUG_SECTION_SZ(CP_ROQ_SIZE);
+}
+
+#define DEBUGFS_BLOCK_SIZE 0x40
+
+static int a3xx_snapshot_debugbus_block(struct kgsl_device *device,
+ void *snapshot, int remain, void *priv)
+{
+ struct kgsl_snapshot_debugbus *header = snapshot;
+ unsigned int id = (unsigned int) priv;
+ unsigned int val;
+ int i;
+ unsigned int *data = snapshot + sizeof(*header);
+ int size =
+ (DEBUGFS_BLOCK_SIZE * sizeof(unsigned int)) + sizeof(*header);
+
+ if (remain < size) {
+ SNAPSHOT_ERR_NOMEM(device, "DEBUGBUS");
+ return 0;
+ }
+
+ val = (id << 8) | (1 << 16);
+
+ header->id = id;
+ header->count = DEBUGFS_BLOCK_SIZE;
+
+ for (i = 0; i < DEBUGFS_BLOCK_SIZE; i++) {
+ adreno_regwrite(device, A3XX_RBBM_DEBUG_BUS_CTL, val | i);
+ adreno_regread(device, A3XX_RBBM_DEBUG_BUS_DATA_STATUS,
+ &data[i]);
+ }
+
+ return size;
+}
+
+static unsigned int debugbus_blocks[] = {
+ RBBM_BLOCK_ID_CP,
+ RBBM_BLOCK_ID_RBBM,
+ RBBM_BLOCK_ID_VBIF,
+ RBBM_BLOCK_ID_HLSQ,
+ RBBM_BLOCK_ID_UCHE,
+ RBBM_BLOCK_ID_PC,
+ RBBM_BLOCK_ID_VFD,
+ RBBM_BLOCK_ID_VPC,
+ RBBM_BLOCK_ID_TSE,
+ RBBM_BLOCK_ID_RAS,
+ RBBM_BLOCK_ID_VSC,
+ RBBM_BLOCK_ID_SP_0,
+ RBBM_BLOCK_ID_SP_1,
+ RBBM_BLOCK_ID_SP_2,
+ RBBM_BLOCK_ID_SP_3,
+ RBBM_BLOCK_ID_TPL1_0,
+ RBBM_BLOCK_ID_TPL1_1,
+ RBBM_BLOCK_ID_TPL1_2,
+ RBBM_BLOCK_ID_TPL1_3,
+ RBBM_BLOCK_ID_RB_0,
+ RBBM_BLOCK_ID_RB_1,
+ RBBM_BLOCK_ID_RB_2,
+ RBBM_BLOCK_ID_RB_3,
+ RBBM_BLOCK_ID_MARB_0,
+ RBBM_BLOCK_ID_MARB_1,
+ RBBM_BLOCK_ID_MARB_2,
+ RBBM_BLOCK_ID_MARB_3,
+};
+
+static void *a3xx_snapshot_debugbus(struct kgsl_device *device,
+ void *snapshot, int *remain)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(debugbus_blocks); i++) {
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUGBUS, snapshot, remain,
+ a3xx_snapshot_debugbus_block,
+ (void *) debugbus_blocks[i]);
+ }
+
+ return snapshot;
+}
+
+
+void *a3xx_snapshot(struct adreno_device *adreno_dev, void *snapshot,
+ int *remain, int hang)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+ struct kgsl_snapshot_registers regs;
+
+ regs.regs = (unsigned int *) a3xx_registers;
+ regs.count = a3xx_registers_count;
+
+
+ adreno_regwrite(device, A3XX_RBBM_CLOCK_CTL, 0x00);
+
+
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_REGS, snapshot, remain,
+ kgsl_snapshot_dump_regs, ®s);
+
+
+ snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
+ remain, REG_CP_STATE_DEBUG_INDEX,
+ REG_CP_STATE_DEBUG_DATA, 0x0, 0x14);
+
+
+ snapshot = kgsl_snapshot_indexed_registers(device, snapshot,
+ remain, REG_CP_ME_CNTL, REG_CP_ME_STATUS,
+ 64, 44);
+
+
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a3xx_snapshot_vpc_memory, NULL);
+
+
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a3xx_snapshot_cp_meq, NULL);
+
+
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a3xx_snapshot_shader_memory, NULL);
+
+
+
+
+
+ if (hang) {
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a3xx_snapshot_cp_pfp_ram, NULL);
+
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a3xx_snapshot_cp_pm4_ram, NULL);
+ }
+
+
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUG, snapshot, remain,
+ a3xx_snapshot_cp_roq, NULL);
+
+ snapshot = a3xx_snapshot_debugbus(device, snapshot, remain);
+
+
+ adreno_regwrite(device, A3XX_RBBM_CLOCK_CTL,
+ A3XX_RBBM_CLOCK_CTL_DEFAULT);
+
+ return snapshot;
+}
diff --git a/drivers/gpu/msm/adreno_a3xx_trace.c b/drivers/gpu/msm/adreno_a3xx_trace.c
new file mode 100644
index 0000000..80756c6
--- /dev/null
+++ b/drivers/gpu/msm/adreno_a3xx_trace.c
@@ -0,0 +1,19 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "kgsl.h"
+#include "adreno.h"
+
+#define CREATE_TRACE_POINTS
+#include "a3xx_reg.h"
+#include "adreno_a3xx_trace.h"
diff --git a/drivers/gpu/msm/adreno_a3xx_trace.h b/drivers/gpu/msm/adreno_a3xx_trace.h
new file mode 100644
index 0000000..e4b4d11
--- /dev/null
+++ b/drivers/gpu/msm/adreno_a3xx_trace.h
@@ -0,0 +1,85 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#if !defined(_ADRENO_A3XX_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _ADRENO_A3XX_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kgsl
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE adreno_a3xx_trace
+
+#include <linux/tracepoint.h>
+
+struct kgsl_device;
+
+TRACE_EVENT(kgsl_a3xx_irq_status,
+
+ TP_PROTO(struct kgsl_device *device, unsigned int status),
+
+ TP_ARGS(device, status),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, status)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->status = status;
+ ),
+
+ TP_printk(
+ "d_name=%s status=%s",
+ __get_str(device_name),
+ __entry->status ? __print_flags(__entry->status, "|",
+ { 1 << A3XX_INT_RBBM_AHB_ERROR, "RBBM_GPU_IDLE" },
+ { 1 << A3XX_INT_RBBM_AHB_ERROR, "RBBM_AHB_ERR" },
+ { 1 << A3XX_INT_RBBM_REG_TIMEOUT, "RBBM_REG_TIMEOUT" },
+ { 1 << A3XX_INT_RBBM_ME_MS_TIMEOUT,
+ "RBBM_ME_MS_TIMEOUT" },
+ { 1 << A3XX_INT_RBBM_PFP_MS_TIMEOUT,
+ "RBBM_PFP_MS_TIMEOUT" },
+ { 1 << A3XX_INT_RBBM_ATB_BUS_OVERFLOW,
+ "RBBM_ATB_BUS_OVERFLOW" },
+ { 1 << A3XX_INT_VFD_ERROR, "RBBM_VFD_ERROR" },
+ { 1 << A3XX_INT_CP_SW_INT, "CP_SW" },
+ { 1 << A3XX_INT_CP_T0_PACKET_IN_IB,
+ "CP_T0_PACKET_IN_IB" },
+ { 1 << A3XX_INT_CP_OPCODE_ERROR, "CP_OPCODE_ERROR" },
+ { 1 << A3XX_INT_CP_RESERVED_BIT_ERROR,
+ "CP_RESERVED_BIT_ERROR" },
+ { 1 << A3XX_INT_CP_HW_FAULT, "CP_HW_FAULT" },
+ { 1 << A3XX_INT_CP_DMA, "CP_DMA" },
+ { 1 << A3XX_INT_CP_IB2_INT, "CP_IB2_INT" },
+ { 1 << A3XX_INT_CP_IB1_INT, "CP_IB1_INT" },
+ { 1 << A3XX_INT_CP_RB_INT, "CP_RB_INT" },
+ { 1 << A3XX_INT_CP_REG_PROTECT_FAULT,
+ "CP_REG_PROTECT_FAULT" },
+ { 1 << A3XX_INT_CP_RB_DONE_TS, "CP_RB_DONE_TS" },
+ { 1 << A3XX_INT_CP_VS_DONE_TS, "CP_VS_DONE_TS" },
+ { 1 << A3XX_INT_CP_PS_DONE_TS, "CP_PS_DONE_TS" },
+ { 1 << A3XX_INT_CACHE_FLUSH_TS, "CACHE_FLUSH_TS" },
+ { 1 << A3XX_INT_CP_AHB_ERROR_HALT,
+ "CP_AHB_ERROR_HALT" },
+ { 1 << A3XX_INT_MISC_HANG_DETECT, "MISC_HANG_DETECT" },
+ { 1 << A3XX_INT_UCHE_OOB_ACCESS, "UCHE_OOB_ACCESS" })
+ : "None"
+ )
+);
+
+#endif
+
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
new file mode 100644
index 0000000..70eb2db
--- /dev/null
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -0,0 +1,138 @@
+/* Copyright (c) 2002,2008-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include "kgsl.h"
+#include "adreno_postmortem.h"
+#include "adreno.h"
+
+#include "a2xx_reg.h"
+
+unsigned int kgsl_cff_dump_enable;
+int adreno_pm_regs_enabled;
+int adreno_pm_ib_enabled;
+
+static struct dentry *pm_d_debugfs;
+
+static int pm_dump_set(void *data, u64 val)
+{
+ struct kgsl_device *device = data;
+
+ if (val) {
+ mutex_lock(&device->mutex);
+ adreno_postmortem_dump(device, 1);
+ mutex_unlock(&device->mutex);
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pm_dump_fops,
+ NULL,
+ pm_dump_set, "%llu\n");
+
+static int pm_regs_enabled_set(void *data, u64 val)
+{
+ adreno_pm_regs_enabled = val ? 1 : 0;
+ return 0;
+}
+
+static int pm_regs_enabled_get(void *data, u64 *val)
+{
+ *val = adreno_pm_regs_enabled;
+ return 0;
+}
+
+static int pm_ib_enabled_set(void *data, u64 val)
+{
+ adreno_pm_ib_enabled = val ? 1 : 0;
+ return 0;
+}
+
+static int pm_ib_enabled_get(void *data, u64 *val)
+{
+ *val = adreno_pm_ib_enabled;
+ return 0;
+}
+
+
+DEFINE_SIMPLE_ATTRIBUTE(pm_regs_enabled_fops,
+ pm_regs_enabled_get,
+ pm_regs_enabled_set, "%llu\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(pm_ib_enabled_fops,
+ pm_ib_enabled_get,
+ pm_ib_enabled_set, "%llu\n");
+
+
+static int kgsl_cff_dump_enable_set(void *data, u64 val)
+{
+#ifdef CONFIG_MSM_KGSL_CFF_DUMP
+ kgsl_cff_dump_enable = (val != 0);
+ return 0;
+#else
+ return -EINVAL;
+#endif
+}
+
+static int kgsl_cff_dump_enable_get(void *data, u64 *val)
+{
+ *val = kgsl_cff_dump_enable;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(kgsl_cff_dump_enable_fops, kgsl_cff_dump_enable_get,
+ kgsl_cff_dump_enable_set, "%llu\n");
+
+typedef void (*reg_read_init_t)(struct kgsl_device *device);
+typedef void (*reg_read_fill_t)(struct kgsl_device *device, int i,
+ unsigned int *vals, int linec);
+
+void adreno_debugfs_init(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ if (!device->d_debugfs || IS_ERR(device->d_debugfs))
+ return;
+
+ debugfs_create_file("cff_dump", 0644, device->d_debugfs, device,
+ &kgsl_cff_dump_enable_fops);
+ debugfs_create_u32("wait_timeout", 0644, device->d_debugfs,
+ &adreno_dev->wait_timeout);
+ debugfs_create_u32("ib_check", 0644, device->d_debugfs,
+ &adreno_dev->ib_check_level);
+
+
+ adreno_dev->fast_hang_detect = 1;
+ debugfs_create_u32("fast_hang_detect", 0644, device->d_debugfs,
+ &adreno_dev->fast_hang_detect);
+
+
+
+ pm_d_debugfs = debugfs_create_dir("postmortem", device->d_debugfs);
+
+ if (IS_ERR(pm_d_debugfs))
+ return;
+
+ debugfs_create_file("dump", 0600, pm_d_debugfs, device,
+ &pm_dump_fops);
+ debugfs_create_file("regs_enabled", 0644, pm_d_debugfs, device,
+ &pm_regs_enabled_fops);
+ debugfs_create_file("ib_enabled", 0644, pm_d_debugfs, device,
+ &pm_ib_enabled_fops);
+}
diff --git a/drivers/gpu/msm/adreno_debugfs.h b/drivers/gpu/msm/adreno_debugfs.h
new file mode 100644
index 0000000..1c97ebb
--- /dev/null
+++ b/drivers/gpu/msm/adreno_debugfs.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2002,2008-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ADRENO_DEBUGFS_H
+#define __ADRENO_DEBUGFS_H
+
+#ifdef CONFIG_DEBUG_FS
+
+int adreno_debugfs_init(struct kgsl_device *device);
+
+extern int adreno_pm_regs_enabled;
+extern int adreno_pm_ib_enabled;
+
+static inline int is_adreno_pm_regs_enabled(void)
+{
+ return adreno_pm_regs_enabled;
+}
+
+static inline int is_adreno_pm_ib_enabled(void)
+{
+ return adreno_pm_ib_enabled;
+}
+
+#else
+static inline int adreno_debugfs_init(struct kgsl_device *device)
+{
+ return 0;
+}
+
+static inline int kgsl_pmregs_enabled(void)
+{
+
+ return 1;
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
new file mode 100644
index 0000000..4db7258
--- /dev/null
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -0,0 +1,241 @@
+/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+
+#include "kgsl.h"
+#include "kgsl_sharedmem.h"
+#include "adreno.h"
+
+#define KGSL_INIT_REFTIMESTAMP 0x7FFFFFFF
+
+#define QUAD_LEN 12
+#define QUAD_RESTORE_LEN 14
+
+static unsigned int gmem_copy_quad[QUAD_LEN] = {
+ 0x00000000, 0x00000000, 0x3f800000,
+ 0x00000000, 0x00000000, 0x3f800000,
+ 0x00000000, 0x00000000, 0x3f800000,
+ 0x00000000, 0x00000000, 0x3f800000
+};
+
+static unsigned int gmem_restore_quad[QUAD_RESTORE_LEN] = {
+ 0x00000000, 0x3f800000, 0x3f800000,
+ 0x00000000, 0x00000000, 0x00000000,
+ 0x3f800000, 0x00000000, 0x00000000,
+ 0x3f800000, 0x00000000, 0x00000000,
+ 0x3f800000, 0x3f800000,
+};
+
+#define TEXCOORD_LEN 8
+
+static unsigned int gmem_copy_texcoord[TEXCOORD_LEN] = {
+ 0x00000000, 0x3f800000,
+ 0x3f800000, 0x3f800000,
+ 0x00000000, 0x00000000,
+ 0x3f800000, 0x00000000
+};
+
+
+
+unsigned int uint2float(unsigned int uintval)
+{
+ unsigned int exp, frac = 0;
+
+ if (uintval == 0)
+ return 0;
+
+ exp = ilog2(uintval);
+
+
+ if (23 > exp)
+ frac = (uintval & (~(1 << exp))) << (23 - exp);
+
+
+ exp = (exp + 127) << 23;
+
+ return exp | frac;
+}
+
+static void set_gmem_copy_quad(struct gmem_shadow_t *shadow)
+{
+
+ gmem_copy_quad[1] = uint2float(shadow->height);
+ gmem_copy_quad[3] = uint2float(shadow->width);
+ gmem_copy_quad[4] = uint2float(shadow->height);
+ gmem_copy_quad[9] = uint2float(shadow->width);
+
+ gmem_restore_quad[5] = uint2float(shadow->height);
+ gmem_restore_quad[7] = uint2float(shadow->width);
+
+ memcpy(shadow->quad_vertices.hostptr, gmem_copy_quad, QUAD_LEN << 2);
+ memcpy(shadow->quad_vertices_restore.hostptr, gmem_restore_quad,
+ QUAD_RESTORE_LEN << 2);
+
+ memcpy(shadow->quad_texcoords.hostptr, gmem_copy_texcoord,
+ TEXCOORD_LEN << 2);
+}
+
+
+void build_quad_vtxbuff(struct adreno_context *drawctxt,
+ struct gmem_shadow_t *shadow, unsigned int **incmd)
+{
+ unsigned int *cmd = *incmd;
+
+
+ shadow->quad_vertices.hostptr = cmd;
+ shadow->quad_vertices.gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
+
+ cmd += QUAD_LEN;
+
+
+ shadow->quad_vertices_restore.hostptr = cmd;
+ shadow->quad_vertices_restore.gpuaddr =
+ virt2gpu(cmd, &drawctxt->gpustate);
+
+ cmd += QUAD_RESTORE_LEN;
+
+
+ shadow->quad_texcoords.hostptr = cmd;
+ shadow->quad_texcoords.gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
+
+ cmd += TEXCOORD_LEN;
+
+ set_gmem_copy_quad(shadow);
+ *incmd = cmd;
+}
+
+int adreno_drawctxt_create(struct kgsl_device *device,
+ struct kgsl_pagetable *pagetable,
+ struct kgsl_context *context, uint32_t flags)
+{
+ struct adreno_context *drawctxt;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int ret;
+
+ drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
+
+ if (drawctxt == NULL)
+ return -ENOMEM;
+
+ drawctxt->pagetable = pagetable;
+ drawctxt->bin_base_offset = 0;
+ drawctxt->id = context->id;
+
+ if (flags & KGSL_CONTEXT_PREAMBLE)
+ drawctxt->flags |= CTXT_FLAGS_PREAMBLE;
+
+ if (flags & KGSL_CONTEXT_NO_GMEM_ALLOC)
+ drawctxt->flags |= CTXT_FLAGS_NOGMEMALLOC;
+
+ if (flags & KGSL_CONTEXT_PER_CONTEXT_TS)
+ drawctxt->flags |= CTXT_FLAGS_PER_CONTEXT_TS;
+
+ ret = adreno_dev->gpudev->ctxt_create(adreno_dev, drawctxt);
+ if (ret)
+ goto err;
+
+ kgsl_sharedmem_writel(&device->memstore,
+ KGSL_MEMSTORE_OFFSET(drawctxt->id, ref_wait_ts),
+ KGSL_INIT_REFTIMESTAMP);
+
+ context->devctxt = drawctxt;
+ return 0;
+err:
+ kfree(drawctxt);
+ return ret;
+}
+
+
+
+void adreno_drawctxt_destroy(struct kgsl_device *device,
+ struct kgsl_context *context)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_context *drawctxt;
+
+ if (context == NULL || context->devctxt == NULL)
+ return;
+
+ drawctxt = context->devctxt;
+
+ if (adreno_dev->drawctxt_active == drawctxt) {
+ drawctxt->flags &= ~(CTXT_FLAGS_GMEM_SAVE |
+ CTXT_FLAGS_SHADER_SAVE |
+ CTXT_FLAGS_GMEM_SHADOW |
+ CTXT_FLAGS_STATE_SHADOW);
+#ifdef CONFIG_MSM_KGSL_GPU_USAGE
+ device->current_process_priv = NULL;
+#endif
+ adreno_drawctxt_switch(adreno_dev, NULL, 0);
+ }
+
+ if (device->state != KGSL_STATE_HUNG)
+ adreno_idle(device);
+
+ if (adreno_is_a20x(adreno_dev) && adreno_dev->drawctxt_active)
+ kgsl_setstate(&device->mmu, adreno_dev->drawctxt_active->id,
+ KGSL_MMUFLAGS_PTUPDATE);
+
+ kgsl_sharedmem_free(&drawctxt->gpustate);
+ kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow);
+
+ kfree(drawctxt);
+ context->devctxt = NULL;
+}
+
+
+void adreno_drawctxt_set_bin_base_offset(struct kgsl_device *device,
+ struct kgsl_context *context,
+ unsigned int offset)
+{
+ struct adreno_context *drawctxt = context->devctxt;
+
+ if (drawctxt)
+ drawctxt->bin_base_offset = offset;
+}
+
+
+void adreno_drawctxt_switch(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt,
+ unsigned int flags)
+{
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ if (drawctxt) {
+ if (flags & KGSL_CONTEXT_SAVE_GMEM)
+ drawctxt->flags |= CTXT_FLAGS_GMEM_SAVE;
+ else
+
+ drawctxt->flags &= ~CTXT_FLAGS_GMEM_SAVE;
+ }
+
+
+ if (adreno_dev->drawctxt_active == drawctxt) {
+ if (adreno_dev->gpudev->ctxt_draw_workaround &&
+ adreno_is_a225(adreno_dev))
+ adreno_dev->gpudev->ctxt_draw_workaround(
+ adreno_dev, drawctxt);
+ return;
+ }
+
+ KGSL_CTXT_INFO(device, "from %p to %p flags %d\n",
+ adreno_dev->drawctxt_active, drawctxt, flags);
+
+
+ adreno_dev->gpudev->ctxt_save(adreno_dev, adreno_dev->drawctxt_active);
+
+
+ adreno_dev->gpudev->ctxt_restore(adreno_dev, drawctxt);
+ adreno_dev->drawctxt_active = drawctxt;
+}
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
new file mode 100644
index 0000000..f66dfbb
--- /dev/null
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -0,0 +1,161 @@
+/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ADRENO_DRAWCTXT_H
+#define __ADRENO_DRAWCTXT_H
+
+#include "adreno_pm4types.h"
+#include "a2xx_reg.h"
+
+
+#define CTXT_FLAGS_NOT_IN_USE 0x00000000
+#define CTXT_FLAGS_IN_USE 0x00000001
+
+#define CTXT_FLAGS_STATE_SHADOW 0x00000010
+
+#define CTXT_FLAGS_GMEM_SHADOW 0x00000100
+#define CTXT_FLAGS_GMEM_SAVE 0x00000200
+#define CTXT_FLAGS_GMEM_RESTORE 0x00000400
+#define CTXT_FLAGS_PREAMBLE 0x00000800
+#define CTXT_FLAGS_SHADER_SAVE 0x00002000
+#define CTXT_FLAGS_SHADER_RESTORE 0x00004000
+#define CTXT_FLAGS_GPU_HANG 0x00008000
+#define CTXT_FLAGS_NOGMEMALLOC 0x00010000
+#define CTXT_FLAGS_TRASHSTATE 0x00020000
+#define CTXT_FLAGS_PER_CONTEXT_TS 0x00040000
+#define CTXT_FLAGS_GPU_HANG_RECOVERED 0x00008000
+
+struct kgsl_device;
+struct adreno_device;
+struct kgsl_device_private;
+struct kgsl_context;
+
+struct gmem_shadow_t {
+ struct kgsl_memdesc gmemshadow;
+
+
+ enum COLORFORMATX format;
+ unsigned int size;
+ unsigned int width;
+ unsigned int height;
+ unsigned int pitch;
+ unsigned int gmem_pitch;
+ unsigned int *gmem_save_commands;
+ unsigned int *gmem_restore_commands;
+ unsigned int gmem_save[3];
+ unsigned int gmem_restore[3];
+ struct kgsl_memdesc quad_vertices;
+ struct kgsl_memdesc quad_texcoords;
+ struct kgsl_memdesc quad_vertices_restore;
+};
+
+struct adreno_context {
+ unsigned int id;
+ uint32_t flags;
+ struct kgsl_pagetable *pagetable;
+ struct kgsl_memdesc gpustate;
+ unsigned int reg_restore[3];
+ unsigned int shader_save[3];
+ unsigned int shader_restore[3];
+
+
+ struct gmem_shadow_t context_gmem_shadow;
+
+
+ unsigned int reg_save[3];
+ unsigned int shader_fixup[3];
+ unsigned int chicken_restore[3];
+ unsigned int bin_base_offset;
+
+
+ unsigned int regconstant_save[3];
+ unsigned int constant_restore[3];
+ unsigned int hlsqcontrol_restore[3];
+ unsigned int save_fixup[3];
+ unsigned int restore_fixup[3];
+ struct kgsl_memdesc shader_load_commands[2];
+ struct kgsl_memdesc shader_save_commands[4];
+ struct kgsl_memdesc constant_save_commands[3];
+ struct kgsl_memdesc constant_load_commands[3];
+ struct kgsl_memdesc cond_execs[4];
+ struct kgsl_memdesc hlsqcontrol_restore_commands[1];
+};
+
+int adreno_drawctxt_create(struct kgsl_device *device,
+ struct kgsl_pagetable *pagetable,
+ struct kgsl_context *context,
+ uint32_t flags);
+
+void adreno_drawctxt_destroy(struct kgsl_device *device,
+ struct kgsl_context *context);
+
+void adreno_drawctxt_switch(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt,
+ unsigned int flags);
+void adreno_drawctxt_set_bin_base_offset(struct kgsl_device *device,
+ struct kgsl_context *context,
+ unsigned int offset);
+
+
+void build_quad_vtxbuff(struct adreno_context *drawctxt,
+ struct gmem_shadow_t *shadow, unsigned int **incmd);
+
+unsigned int uint2float(unsigned int);
+
+static inline unsigned int virt2gpu(unsigned int *cmd,
+ struct kgsl_memdesc *memdesc)
+{
+ return memdesc->gpuaddr + ((char *) cmd - (char *) memdesc->hostptr);
+}
+
+static inline void create_ib1(struct adreno_context *drawctxt,
+ unsigned int *cmd,
+ unsigned int *start,
+ unsigned int *end)
+{
+ cmd[0] = CP_HDR_INDIRECT_BUFFER_PFD;
+ cmd[1] = virt2gpu(start, &drawctxt->gpustate);
+ cmd[2] = end - start;
+}
+
+
+static inline unsigned int *reg_range(unsigned int *cmd, unsigned int start,
+ unsigned int end)
+{
+ *cmd++ = CP_REG(start);
+ *cmd++ = end - start + 1;
+ return cmd;
+}
+
+static inline void calc_gmemsize(struct gmem_shadow_t *shadow, int gmem_size)
+{
+ int w = 64, h = 64;
+
+ shadow->format = COLORX_8_8_8_8;
+
+
+ gmem_size = (gmem_size + 3) / 4;
+
+ while ((w * h) < gmem_size) {
+ if (w < h)
+ w *= 2;
+ else
+ h *= 2;
+ }
+
+ shadow->pitch = shadow->width = w;
+ shadow->height = h;
+ shadow->gmem_pitch = shadow->pitch;
+ shadow->size = shadow->pitch * shadow->height * 4;
+}
+
+#endif
diff --git a/drivers/gpu/msm/adreno_pm4types.h b/drivers/gpu/msm/adreno_pm4types.h
new file mode 100644
index 0000000..c6ebed4
--- /dev/null
+++ b/drivers/gpu/msm/adreno_pm4types.h
@@ -0,0 +1,180 @@
+/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ADRENO_PM4TYPES_H
+#define __ADRENO_PM4TYPES_H
+
+
+#define CP_PKT_MASK 0xc0000000
+
+#define CP_TYPE0_PKT ((unsigned int)0 << 30)
+#define CP_TYPE1_PKT ((unsigned int)1 << 30)
+#define CP_TYPE2_PKT ((unsigned int)2 << 30)
+#define CP_TYPE3_PKT ((unsigned int)3 << 30)
+
+
+#define CP_ME_INIT 0x48
+
+#define CP_NOP 0x10
+
+#define CP_INDIRECT_BUFFER_PFD 0x37
+
+#define CP_WAIT_FOR_IDLE 0x26
+
+#define CP_WAIT_REG_MEM 0x3c
+
+#define CP_WAIT_REG_EQ 0x52
+
+#define CP_WAT_REG_GTE 0x53
+
+#define CP_WAIT_UNTIL_READ 0x5c
+
+#define CP_WAIT_IB_PFD_COMPLETE 0x5d
+
+#define CP_REG_RMW 0x21
+
+#define CP_SET_BIN_DATA 0x2f
+
+#define CP_REG_TO_MEM 0x3e
+
+#define CP_MEM_WRITE 0x3d
+
+#define CP_MEM_WRITE_CNTR 0x4f
+
+#define CP_COND_EXEC 0x44
+
+#define CP_COND_WRITE 0x45
+
+#define CP_EVENT_WRITE 0x46
+
+#define CP_EVENT_WRITE_SHD 0x58
+
+#define CP_EVENT_WRITE_CFL 0x59
+
+#define CP_EVENT_WRITE_ZPD 0x5b
+
+
+#define CP_DRAW_INDX 0x22
+
+#define CP_DRAW_INDX_2 0x36
+
+#define CP_DRAW_INDX_BIN 0x34
+
+#define CP_DRAW_INDX_2_BIN 0x35
+
+
+#define CP_VIZ_QUERY 0x23
+
+#define CP_SET_STATE 0x25
+
+#define CP_SET_CONSTANT 0x2d
+
+#define CP_IM_LOAD 0x27
+
+#define CP_IM_LOAD_IMMEDIATE 0x2b
+
+#define CP_LOAD_CONSTANT_CONTEXT 0x2e
+
+#define CP_SET_BIN_DATA 0x2f
+
+#define CP_INVALIDATE_STATE 0x3b
+
+
+#define CP_SET_SHADER_BASES 0x4A
+
+#define CP_SET_BIN_MASK 0x50
+
+#define CP_SET_BIN_SELECT 0x51
+
+
+#define CP_CONTEXT_UPDATE 0x5e
+
+#define CP_INTERRUPT 0x40
+
+
+#define CP_IM_STORE 0x2c
+
+#define CP_TEST_TWO_MEMS 0x71
+
+#define CP_WAIT_FOR_ME 0x13
+
+#define CP_SET_BIN_BASE_OFFSET 0x4B
+
+#define CP_SET_DRAW_INIT_FLAGS 0x4B
+
+#define CP_SET_PROTECTED_MODE 0x5f
+
+
+#define CP_LOAD_STATE 0x30
+
+#define CP_COND_INDIRECT_BUFFER_PFE 0x3A
+#define CP_COND_INDIRECT_BUFFER_PFD 0x32
+
+#define CP_INDIRECT_BUFFER_PFE 0x3F
+
+#define CP_LOADSTATE_DSTOFFSET_SHIFT 0x00000000
+#define CP_LOADSTATE_STATESRC_SHIFT 0x00000010
+#define CP_LOADSTATE_STATEBLOCKID_SHIFT 0x00000013
+#define CP_LOADSTATE_NUMOFUNITS_SHIFT 0x00000016
+#define CP_LOADSTATE_STATETYPE_SHIFT 0x00000000
+#define CP_LOADSTATE_EXTSRCADDR_SHIFT 0x00000002
+
+#define cp_type0_packet(regindx, cnt) \
+ (CP_TYPE0_PKT | (((cnt)-1) << 16) | ((regindx) & 0x7FFF))
+
+#define cp_type0_packet_for_sameregister(regindx, cnt) \
+ ((CP_TYPE0_PKT | (((cnt)-1) << 16) | ((1 << 15) | \
+ ((regindx) & 0x7FFF)))
+
+#define cp_type1_packet(reg0, reg1) \
+ (CP_TYPE1_PKT | ((reg1) << 12) | (reg0))
+
+#define cp_type3_packet(opcode, cnt) \
+ (CP_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8))
+
+#define cp_predicated_type3_packet(opcode, cnt) \
+ (CP_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8) | 0x1)
+
+#define cp_nop_packet(cnt) \
+ (CP_TYPE3_PKT | (((cnt)-1) << 16) | (CP_NOP << 8))
+
+#define pkt_is_type0(pkt) (((pkt) & 0XC0000000) == CP_TYPE0_PKT)
+
+#define type0_pkt_size(pkt) ((((pkt) >> 16) & 0x3FFF) + 1)
+#define type0_pkt_offset(pkt) ((pkt) & 0x7FFF)
+
+
+#define pkt_is_type3(pkt) \
+ ((((pkt) & 0xC0000000) == CP_TYPE3_PKT) && \
+ (((pkt) & 0x80FE) == 0))
+
+#define cp_type3_opcode(pkt) (((pkt) >> 8) & 0xFF)
+#define type3_pkt_size(pkt) ((((pkt) >> 16) & 0x3FFF) + 1)
+
+#define CP_HDR_ME_INIT cp_type3_packet(CP_ME_INIT, 18)
+#define CP_HDR_INDIRECT_BUFFER_PFD cp_type3_packet(CP_INDIRECT_BUFFER_PFD, 2)
+#define CP_HDR_INDIRECT_BUFFER_PFE cp_type3_packet(CP_INDIRECT_BUFFER_PFE, 2)
+
+#define SUBBLOCK_OFFSET(reg) ((unsigned int)((reg) - (0x2000)))
+
+#define CP_REG(reg) ((0x4 << 16) | (SUBBLOCK_OFFSET(reg)))
+
+
+static inline int adreno_cmd_is_ib(unsigned int cmd)
+{
+ return (cmd == cp_type3_packet(CP_INDIRECT_BUFFER_PFE, 2) ||
+ cmd == cp_type3_packet(CP_INDIRECT_BUFFER_PFD, 2) ||
+ cmd == cp_type3_packet(CP_COND_INDIRECT_BUFFER_PFE, 2) ||
+ cmd == cp_type3_packet(CP_COND_INDIRECT_BUFFER_PFD, 2));
+}
+
+#endif
diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c
new file mode 100644
index 0000000..45286dd
--- /dev/null
+++ b/drivers/gpu/msm/adreno_postmortem.c
@@ -0,0 +1,954 @@
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/vmalloc.h>
+
+#include "kgsl.h"
+#include "kgsl_sharedmem.h"
+
+#include "adreno.h"
+#include "adreno_pm4types.h"
+#include "adreno_ringbuffer.h"
+#include "adreno_postmortem.h"
+#include "adreno_debugfs.h"
+#include "kgsl_cffdump.h"
+#include "kgsl_pwrctrl.h"
+
+#include "a2xx_reg.h"
+#include "a3xx_reg.h"
+
+#define INVALID_RB_CMD 0xaaaaaaaa
+#define NUM_DWORDS_OF_RINGBUFFER_HISTORY 100
+
+struct pm_id_name {
+ uint32_t id;
+ char name[9];
+};
+
+static const struct pm_id_name pm0_types[] = {
+ {REG_PA_SC_AA_CONFIG, "RPASCAAC"},
+ {REG_RBBM_PM_OVERRIDE2, "RRBBPMO2"},
+ {REG_SCRATCH_REG2, "RSCRTRG2"},
+ {REG_SQ_GPR_MANAGEMENT, "RSQGPRMN"},
+ {REG_SQ_INST_STORE_MANAGMENT, "RSQINSTS"},
+ {REG_TC_CNTL_STATUS, "RTCCNTLS"},
+ {REG_TP0_CHICKEN, "RTP0CHCK"},
+ {REG_CP_TIMESTAMP, "CP_TM_ST"},
+};
+
+static const struct pm_id_name pm3_types[] = {
+ {CP_COND_EXEC, "CND_EXEC"},
+ {CP_CONTEXT_UPDATE, "CX__UPDT"},
+ {CP_DRAW_INDX, "DRW_NDX_"},
+ {CP_DRAW_INDX_BIN, "DRW_NDXB"},
+ {CP_EVENT_WRITE, "EVENT_WT"},
+ {CP_IM_LOAD, "IN__LOAD"},
+ {CP_IM_LOAD_IMMEDIATE, "IM_LOADI"},
+ {CP_IM_STORE, "IM_STORE"},
+ {CP_INDIRECT_BUFFER_PFE, "IND_BUF_"},
+ {CP_INDIRECT_BUFFER_PFD, "IND_BUFP"},
+ {CP_INTERRUPT, "PM4_INTR"},
+ {CP_INVALIDATE_STATE, "INV_STAT"},
+ {CP_LOAD_CONSTANT_CONTEXT, "LD_CN_CX"},
+ {CP_ME_INIT, "ME__INIT"},
+ {CP_NOP, "PM4__NOP"},
+ {CP_REG_RMW, "REG__RMW"},
+ {CP_REG_TO_MEM, "REG2_MEM"},
+ {CP_SET_BIN_BASE_OFFSET, "ST_BIN_O"},
+ {CP_SET_CONSTANT, "ST_CONST"},
+ {CP_SET_PROTECTED_MODE, "ST_PRT_M"},
+ {CP_SET_SHADER_BASES, "ST_SHD_B"},
+ {CP_WAIT_FOR_IDLE, "WAIT4IDL"},
+};
+
+static uint32_t adreno_is_pm4_len(uint32_t word)
+{
+ if (word == INVALID_RB_CMD)
+ return 0;
+
+ return (word >> 16) & 0x3FFF;
+}
+
+static bool adreno_is_pm4_type(uint32_t word)
+{
+ int i;
+
+ if (word == INVALID_RB_CMD)
+ return 1;
+
+ if (adreno_is_pm4_len(word) > 16)
+ return 0;
+
+ if ((word & (3<<30)) == CP_TYPE0_PKT) {
+ for (i = 0; i < ARRAY_SIZE(pm0_types); ++i) {
+ if ((word & 0x7FFF) == pm0_types[i].id)
+ return 1;
+ }
+ return 0;
+ }
+ if ((word & (3<<30)) == CP_TYPE3_PKT) {
+ for (i = 0; i < ARRAY_SIZE(pm3_types); ++i) {
+ if ((word & 0xFFFF) == (pm3_types[i].id << 8))
+ return 1;
+ }
+ return 0;
+ }
+ return 0;
+}
+
+static const char *adreno_pm4_name(uint32_t word)
+{
+ int i;
+
+ if (word == INVALID_RB_CMD)
+ return "--------";
+
+ if ((word & (3<<30)) == CP_TYPE0_PKT) {
+ for (i = 0; i < ARRAY_SIZE(pm0_types); ++i) {
+ if ((word & 0x7FFF) == pm0_types[i].id)
+ return pm0_types[i].name;
+ }
+ return "????????";
+ }
+ if ((word & (3<<30)) == CP_TYPE3_PKT) {
+ for (i = 0; i < ARRAY_SIZE(pm3_types); ++i) {
+ if ((word & 0xFFFF) == (pm3_types[i].id << 8))
+ return pm3_types[i].name;
+ }
+ return "????????";
+ }
+ return "????????";
+}
+
+static void adreno_dump_regs(struct kgsl_device *device,
+ const int *registers, int size)
+{
+ int range = 0, offset = 0;
+
+ for (range = 0; range < size; range++) {
+
+ int start = registers[range * 2];
+ int end = registers[range * 2 + 1];
+
+ unsigned char linebuf[32 * 3 + 2 + 32 + 1];
+ int linelen, i;
+
+ for (offset = start; offset <= end; offset += linelen) {
+ unsigned int regvals[32/4];
+ linelen = min(end+1-offset, 32/4);
+
+ for (i = 0; i < linelen; ++i)
+ kgsl_regread(device, offset+i, regvals+i);
+
+ hex_dump_to_buffer(regvals, linelen*4, 32, 4,
+ linebuf, sizeof(linebuf), 0);
+ KGSL_LOG_DUMP(device,
+ "REG: %5.5X: %s\n", offset, linebuf);
+ }
+ }
+}
+
+static void dump_ib(struct kgsl_device *device, char* buffId, uint32_t pt_base,
+ uint32_t base_offset, uint32_t ib_base, uint32_t ib_size, bool dump)
+{
+ uint8_t *base_addr = adreno_convertaddr(device, pt_base,
+ ib_base, ib_size*sizeof(uint32_t));
+
+ if (base_addr && dump)
+ print_hex_dump(KERN_ERR, buffId, DUMP_PREFIX_OFFSET,
+ 32, 4, base_addr, ib_size*4, 0);
+ else
+ KGSL_LOG_DUMP(device, "%s base:%8.8X ib_size:%d "
+ "offset:%5.5X%s\n",
+ buffId, ib_base, ib_size*4, base_offset,
+ base_addr ? "" : " [Invalid]");
+}
+
+#define IB_LIST_SIZE 64
+struct ib_list {
+ int count;
+ uint32_t bases[IB_LIST_SIZE];
+ uint32_t sizes[IB_LIST_SIZE];
+ uint32_t offsets[IB_LIST_SIZE];
+};
+
+static void dump_ib1(struct kgsl_device *device, uint32_t pt_base,
+ uint32_t base_offset,
+ uint32_t ib1_base, uint32_t ib1_size,
+ struct ib_list *ib_list, bool dump)
+{
+ int i, j;
+ uint32_t value;
+ uint32_t *ib1_addr;
+
+ dump_ib(device, "IB1:", pt_base, base_offset, ib1_base,
+ ib1_size, dump);
+
+
+ ib1_addr = (uint32_t *)adreno_convertaddr(device, pt_base,
+ ib1_base, ib1_size*sizeof(uint32_t));
+ if (!ib1_addr)
+ return;
+
+ for (i = 0; i+3 < ib1_size; ) {
+ value = ib1_addr[i++];
+ if (adreno_cmd_is_ib(value)) {
+ uint32_t ib2_base = ib1_addr[i++];
+ uint32_t ib2_size = ib1_addr[i++];
+
+
+ for (j = 0; j < ib_list->count; ++j)
+ if (ib_list->sizes[j] == ib2_size
+ && ib_list->bases[j] == ib2_base)
+ break;
+
+ if (j < ib_list->count || ib_list->count
+ >= IB_LIST_SIZE)
+ continue;
+
+
+ ib_list->sizes[ib_list->count] = ib2_size;
+ ib_list->bases[ib_list->count] = ib2_base;
+ ib_list->offsets[ib_list->count] = i<<2;
+ ++ib_list->count;
+ }
+ }
+}
+
+static void adreno_dump_rb_buffer(const void *buf, size_t len,
+ char *linebuf, size_t linebuflen, int *argp)
+{
+ const u32 *ptr4 = buf;
+ const int ngroups = len;
+ int lx = 0, j;
+ bool nxsp = 1;
+
+ for (j = 0; j < ngroups; j++) {
+ if (*argp < 0) {
+ lx += scnprintf(linebuf + lx, linebuflen - lx, " <");
+ *argp = -*argp;
+ } else if (nxsp)
+ lx += scnprintf(linebuf + lx, linebuflen - lx, " ");
+ else
+ nxsp = 1;
+ if (!*argp && adreno_is_pm4_type(ptr4[j])) {
+ lx += scnprintf(linebuf + lx, linebuflen - lx,
+ "%s", adreno_pm4_name(ptr4[j]));
+ *argp = -(adreno_is_pm4_len(ptr4[j])+1);
+ } else {
+ lx += scnprintf(linebuf + lx, linebuflen - lx,
+ "%8.8X", ptr4[j]);
+ if (*argp > 1)
+ --*argp;
+ else if (*argp == 1) {
+ *argp = 0;
+ nxsp = 0;
+ lx += scnprintf(linebuf + lx, linebuflen - lx,
+ "> ");
+ }
+ }
+ }
+ linebuf[lx] = '\0';
+}
+
+static bool adreno_rb_use_hex(void)
+{
+#ifdef CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+static void adreno_dump_rb(struct kgsl_device *device, const void *buf,
+ size_t len, int start, int size)
+{
+ const uint32_t *ptr = buf;
+ int i, remaining, args = 0;
+ unsigned char linebuf[32 * 3 + 2 + 32 + 1];
+ const int rowsize = 8;
+
+ len >>= 2;
+ remaining = len;
+ for (i = 0; i < len; i += rowsize) {
+ int linelen = min(remaining, rowsize);
+ remaining -= rowsize;
+
+ if (adreno_rb_use_hex())
+ hex_dump_to_buffer(ptr+i, linelen*4, rowsize*4, 4,
+ linebuf, sizeof(linebuf), 0);
+ else
+ adreno_dump_rb_buffer(ptr+i, linelen, linebuf,
+ sizeof(linebuf), &args);
+ KGSL_LOG_DUMP(device,
+ "RB: %4.4X:%s\n", (start+i)%size, linebuf);
+ }
+}
+
+struct log_field {
+ bool show;
+ const char *display;
+};
+
+static int adreno_dump_fields_line(struct kgsl_device *device,
+ const char *start, char *str, int slen,
+ const struct log_field **lines,
+ int num)
+{
+ const struct log_field *l = *lines;
+ int sptr, count = 0;
+
+ sptr = snprintf(str, slen, "%s", start);
+
+ for ( ; num && sptr < slen; num--, l++) {
+ int ilen = strlen(l->display);
+
+ if (!l->show)
+ continue;
+
+ if (count)
+ ilen += strlen(" | ");
+
+ if (ilen > (slen - sptr))
+ break;
+
+ if (count++)
+ sptr += snprintf(str + sptr, slen - sptr, " | ");
+
+ sptr += snprintf(str + sptr, slen - sptr, "%s", l->display);
+ }
+
+ KGSL_LOG_DUMP(device, "%s\n", str);
+
+ *lines = l;
+ return num;
+}
+
+static void adreno_dump_fields(struct kgsl_device *device,
+ const char *start, const struct log_field *lines,
+ int num)
+{
+ char lb[90];
+ const char *sstr = start;
+
+ lb[sizeof(lb) - 1] = '\0';
+
+ while (num) {
+ int ret = adreno_dump_fields_line(device, sstr, lb,
+ sizeof(lb) - 1, &lines, num);
+
+ if (ret == num)
+ break;
+
+ num = ret;
+ sstr = " ";
+ }
+}
+
+static void adreno_dump_a3xx(struct kgsl_device *device)
+{
+ unsigned int r1, r2, r3, rbbm_status;
+ unsigned int cp_stat, rb_count;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ kgsl_regread(device, adreno_dev->gpudev->reg_rbbm_status, &rbbm_status);
+ KGSL_LOG_DUMP(device, "RBBM: STATUS = %08X\n", rbbm_status);
+
+ {
+ struct log_field lines[] = {
+ {rbbm_status & BIT(0), "HI busy "},
+ {rbbm_status & BIT(1), "CP ME busy "},
+ {rbbm_status & BIT(2), "CP PFP busy "},
+ {rbbm_status & BIT(14), "CP NRT busy "},
+ {rbbm_status & BIT(15), "VBIF busy "},
+ {rbbm_status & BIT(16), "TSE busy "},
+ {rbbm_status & BIT(17), "RAS busy "},
+ {rbbm_status & BIT(18), "RB busy "},
+ {rbbm_status & BIT(19), "PC DCALL bsy"},
+ {rbbm_status & BIT(20), "PC VSD busy "},
+ {rbbm_status & BIT(21), "VFD busy "},
+ {rbbm_status & BIT(22), "VPC busy "},
+ {rbbm_status & BIT(23), "UCHE busy "},
+ {rbbm_status & BIT(24), "SP busy "},
+ {rbbm_status & BIT(25), "TPL1 busy "},
+ {rbbm_status & BIT(26), "MARB busy "},
+ {rbbm_status & BIT(27), "VSC busy "},
+ {rbbm_status & BIT(28), "ARB busy "},
+ {rbbm_status & BIT(29), "HLSQ busy "},
+ {rbbm_status & BIT(30), "GPU bsy noHC"},
+ {rbbm_status & BIT(31), "GPU busy "},
+ };
+ adreno_dump_fields(device, " STATUS=", lines,
+ ARRAY_SIZE(lines));
+ }
+
+ kgsl_regread(device, REG_CP_RB_BASE, &r1);
+ kgsl_regread(device, REG_CP_RB_CNTL, &r2);
+ rb_count = 2 << (r2 & (BIT(6) - 1));
+ kgsl_regread(device, REG_CP_RB_RPTR_ADDR, &r3);
+ KGSL_LOG_DUMP(device,
+ "CP_RB: BASE = %08X | CNTL = %08X | RPTR_ADDR = %08X"
+ "| rb_count = %08X\n", r1, r2, r3, rb_count);
+
+ kgsl_regread(device, REG_CP_RB_RPTR, &r1);
+ kgsl_regread(device, REG_CP_RB_WPTR, &r2);
+ kgsl_regread(device, REG_CP_RB_RPTR_WR, &r3);
+ KGSL_LOG_DUMP(device,
+ " RPTR = %08X | WPTR = %08X | RPTR_WR = %08X"
+ "\n", r1, r2, r3);
+
+ kgsl_regread(device, REG_CP_IB1_BASE, &r1);
+ kgsl_regread(device, REG_CP_IB1_BUFSZ, &r2);
+ KGSL_LOG_DUMP(device, "CP_IB1: BASE = %08X | BUFSZ = %d\n", r1, r2);
+
+ kgsl_regread(device, REG_CP_ME_CNTL, &r1);
+ kgsl_regread(device, REG_CP_ME_STATUS, &r2);
+ KGSL_LOG_DUMP(device, "CP_ME: CNTL = %08X | STATUS = %08X\n", r1, r2);
+
+ kgsl_regread(device, REG_CP_STAT, &cp_stat);
+ KGSL_LOG_DUMP(device, "CP_STAT = %08X\n", cp_stat);
+#ifndef CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL
+ {
+ struct log_field lns[] = {
+ {cp_stat & BIT(0), "WR_BSY 0"},
+ {cp_stat & BIT(1), "RD_RQ_BSY 1"},
+ {cp_stat & BIT(2), "RD_RTN_BSY 2"},
+ };
+ adreno_dump_fields(device, " MIU=", lns, ARRAY_SIZE(lns));
+ }
+ {
+ struct log_field lns[] = {
+ {cp_stat & BIT(5), "RING_BUSY 5"},
+ {cp_stat & BIT(6), "NDRCTS_BSY 6"},
+ {cp_stat & BIT(7), "NDRCT2_BSY 7"},
+ {cp_stat & BIT(9), "ST_BUSY 9"},
+ {cp_stat & BIT(10), "BUSY 10"},
+ };
+ adreno_dump_fields(device, " CSF=", lns, ARRAY_SIZE(lns));
+ }
+ {
+ struct log_field lns[] = {
+ {cp_stat & BIT(11), "RNG_Q_BSY 11"},
+ {cp_stat & BIT(12), "NDRCTS_Q_B12"},
+ {cp_stat & BIT(13), "NDRCT2_Q_B13"},
+ {cp_stat & BIT(16), "ST_QUEUE_B16"},
+ {cp_stat & BIT(17), "PFP_BUSY 17"},
+ };
+ adreno_dump_fields(device, " RING=", lns, ARRAY_SIZE(lns));
+ }
+ {
+ struct log_field lns[] = {
+ {cp_stat & BIT(3), "RBIU_BUSY 3"},
+ {cp_stat & BIT(4), "RCIU_BUSY 4"},
+ {cp_stat & BIT(8), "EVENT_BUSY 8"},
+ {cp_stat & BIT(18), "MQ_RG_BSY 18"},
+ {cp_stat & BIT(19), "MQ_NDRS_BS19"},
+ {cp_stat & BIT(20), "MQ_NDR2_BS20"},
+ {cp_stat & BIT(21), "MIU_WC_STL21"},
+ {cp_stat & BIT(22), "CP_NRT_BSY22"},
+ {cp_stat & BIT(23), "3D_BUSY 23"},
+ {cp_stat & BIT(26), "ME_BUSY 26"},
+ {cp_stat & BIT(27), "RB_FFO_BSY27"},
+ {cp_stat & BIT(28), "CF_FFO_BSY28"},
+ {cp_stat & BIT(29), "PS_FFO_BSY29"},
+ {cp_stat & BIT(30), "VS_FFO_BSY30"},
+ {cp_stat & BIT(31), "CP_BUSY 31"},
+ };
+ adreno_dump_fields(device, " CP_STT=", lns, ARRAY_SIZE(lns));
+ }
+#endif
+
+ kgsl_regread(device, A3XX_RBBM_INT_0_STATUS, &r1);
+ KGSL_LOG_DUMP(device, "MSTR_INT_SGNL = %08X\n", r1);
+ {
+ struct log_field ints[] = {
+ {r1 & BIT(0), "RBBM_GPU_IDLE 0"},
+ {r1 & BIT(1), "RBBM_AHB_ERROR 1"},
+ {r1 & BIT(2), "RBBM_REG_TIMEOUT 2"},
+ {r1 & BIT(3), "RBBM_ME_MS_TIMEOUT 3"},
+ {r1 & BIT(4), "RBBM_PFP_MS_TIMEOUT 4"},
+ {r1 & BIT(5), "RBBM_ATB_BUS_OVERFLOW 5"},
+ {r1 & BIT(6), "VFD_ERROR 6"},
+ {r1 & BIT(7), "CP_SW_INT 7"},
+ {r1 & BIT(8), "CP_T0_PACKET_IN_IB 8"},
+ {r1 & BIT(9), "CP_OPCODE_ERROR 9"},
+ {r1 & BIT(10), "CP_RESERVED_BIT_ERROR 10"},
+ {r1 & BIT(11), "CP_HW_FAULT 11"},
+ {r1 & BIT(12), "CP_DMA 12"},
+ {r1 & BIT(13), "CP_IB2_INT 13"},
+ {r1 & BIT(14), "CP_IB1_INT 14"},
+ {r1 & BIT(15), "CP_RB_INT 15"},
+ {r1 & BIT(16), "CP_REG_PROTECT_FAULT 16"},
+ {r1 & BIT(17), "CP_RB_DONE_TS 17"},
+ {r1 & BIT(18), "CP_VS_DONE_TS 18"},
+ {r1 & BIT(19), "CP_PS_DONE_TS 19"},
+ {r1 & BIT(20), "CACHE_FLUSH_TS 20"},
+ {r1 & BIT(21), "CP_AHB_ERROR_HALT 21"},
+ {r1 & BIT(24), "MISC_HANG_DETECT 24"},
+ {r1 & BIT(25), "UCHE_OOB_ACCESS 25"},
+ };
+ adreno_dump_fields(device, "INT_SGNL=", ints, ARRAY_SIZE(ints));
+ }
+}
+
+static void adreno_dump_a2xx(struct kgsl_device *device)
+{
+ unsigned int r1, r2, r3, rbbm_status;
+ unsigned int cp_stat, rb_count;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ kgsl_regread(device, adreno_dev->gpudev->reg_rbbm_status, &rbbm_status);
+
+ kgsl_regread(device, REG_RBBM_PM_OVERRIDE1, &r2);
+ kgsl_regread(device, REG_RBBM_PM_OVERRIDE2, &r3);
+ KGSL_LOG_DUMP(device, "RBBM: STATUS = %08X | PM_OVERRIDE1 = %08X | "
+ "PM_OVERRIDE2 = %08X\n", rbbm_status, r2, r3);
+
+ kgsl_regread(device, REG_RBBM_INT_CNTL, &r1);
+ kgsl_regread(device, REG_RBBM_INT_STATUS, &r2);
+ kgsl_regread(device, REG_RBBM_READ_ERROR, &r3);
+ KGSL_LOG_DUMP(device, " INT_CNTL = %08X | INT_STATUS = %08X | "
+ "READ_ERROR = %08X\n", r1, r2, r3);
+
+ {
+ char cmdFifo[16];
+ struct log_field lines[] = {
+ {rbbm_status & 0x001F, cmdFifo},
+ {rbbm_status & BIT(5), "TC busy "},
+ {rbbm_status & BIT(8), "HIRQ pending"},
+ {rbbm_status & BIT(9), "CPRQ pending"},
+ {rbbm_status & BIT(10), "CFRQ pending"},
+ {rbbm_status & BIT(11), "PFRQ pending"},
+ {rbbm_status & BIT(12), "VGT 0DMA bsy"},
+ {rbbm_status & BIT(14), "RBBM WU busy"},
+ {rbbm_status & BIT(16), "CP NRT busy "},
+ {rbbm_status & BIT(18), "MH busy "},
+ {rbbm_status & BIT(19), "MH chncy bsy"},
+ {rbbm_status & BIT(21), "SX busy "},
+ {rbbm_status & BIT(22), "TPC busy "},
+ {rbbm_status & BIT(24), "SC CNTX busy"},
+ {rbbm_status & BIT(25), "PA busy "},
+ {rbbm_status & BIT(26), "VGT busy "},
+ {rbbm_status & BIT(27), "SQ cntx1 bsy"},
+ {rbbm_status & BIT(28), "SQ cntx0 bsy"},
+ {rbbm_status & BIT(30), "RB busy "},
+ {rbbm_status & BIT(31), "Grphs pp bsy"},
+ };
+ snprintf(cmdFifo, sizeof(cmdFifo), "CMD FIFO=%01X ",
+ rbbm_status & 0xf);
+ adreno_dump_fields(device, " STATUS=", lines,
+ ARRAY_SIZE(lines));
+ }
+
+ kgsl_regread(device, REG_CP_RB_BASE, &r1);
+ kgsl_regread(device, REG_CP_RB_CNTL, &r2);
+ rb_count = 2 << (r2 & (BIT(6)-1));
+ kgsl_regread(device, REG_CP_RB_RPTR_ADDR, &r3);
+ KGSL_LOG_DUMP(device,
+ "CP_RB: BASE = %08X | CNTL = %08X | RPTR_ADDR = %08X"
+ "| rb_count = %08X\n", r1, r2, r3, rb_count);
+ {
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ if (rb->sizedwords != rb_count)
+ rb_count = rb->sizedwords;
+ }
+
+ kgsl_regread(device, REG_CP_RB_RPTR, &r1);
+ kgsl_regread(device, REG_CP_RB_WPTR, &r2);
+ kgsl_regread(device, REG_CP_RB_RPTR_WR, &r3);
+ KGSL_LOG_DUMP(device,
+ " RPTR = %08X | WPTR = %08X | RPTR_WR = %08X"
+ "\n", r1, r2, r3);
+
+ kgsl_regread(device, REG_CP_IB1_BASE, &r1);
+ kgsl_regread(device, REG_CP_IB1_BUFSZ, &r2);
+ KGSL_LOG_DUMP(device, "CP_IB1: BASE = %08X | BUFSZ = %d\n", r1, r2);
+
+ kgsl_regread(device, REG_CP_IB2_BASE, &r1);
+ kgsl_regread(device, REG_CP_IB2_BUFSZ, &r2);
+ KGSL_LOG_DUMP(device, "CP_IB2: BASE = %08X | BUFSZ = %d\n", r1, r2);
+
+ kgsl_regread(device, REG_CP_INT_CNTL, &r1);
+ kgsl_regread(device, REG_CP_INT_STATUS, &r2);
+ KGSL_LOG_DUMP(device, "CP_INT: CNTL = %08X | STATUS = %08X\n", r1, r2);
+
+ kgsl_regread(device, REG_CP_ME_CNTL, &r1);
+ kgsl_regread(device, REG_CP_ME_STATUS, &r2);
+ kgsl_regread(device, REG_MASTER_INT_SIGNAL, &r3);
+ KGSL_LOG_DUMP(device,
+ "CP_ME: CNTL = %08X | STATUS = %08X | MSTR_INT_SGNL = "
+ "%08X\n", r1, r2, r3);
+
+ kgsl_regread(device, REG_CP_STAT, &cp_stat);
+ KGSL_LOG_DUMP(device, "CP_STAT = %08X\n", cp_stat);
+#ifndef CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL
+ {
+ struct log_field lns[] = {
+ {cp_stat & BIT(0), "WR_BSY 0"},
+ {cp_stat & BIT(1), "RD_RQ_BSY 1"},
+ {cp_stat & BIT(2), "RD_RTN_BSY 2"},
+ };
+ adreno_dump_fields(device, " MIU=", lns, ARRAY_SIZE(lns));
+ }
+ {
+ struct log_field lns[] = {
+ {cp_stat & BIT(5), "RING_BUSY 5"},
+ {cp_stat & BIT(6), "NDRCTS_BSY 6"},
+ {cp_stat & BIT(7), "NDRCT2_BSY 7"},
+ {cp_stat & BIT(9), "ST_BUSY 9"},
+ {cp_stat & BIT(10), "BUSY 10"},
+ };
+ adreno_dump_fields(device, " CSF=", lns, ARRAY_SIZE(lns));
+ }
+ {
+ struct log_field lns[] = {
+ {cp_stat & BIT(11), "RNG_Q_BSY 11"},
+ {cp_stat & BIT(12), "NDRCTS_Q_B12"},
+ {cp_stat & BIT(13), "NDRCT2_Q_B13"},
+ {cp_stat & BIT(16), "ST_QUEUE_B16"},
+ {cp_stat & BIT(17), "PFP_BUSY 17"},
+ };
+ adreno_dump_fields(device, " RING=", lns, ARRAY_SIZE(lns));
+ }
+ {
+ struct log_field lns[] = {
+ {cp_stat & BIT(3), "RBIU_BUSY 3"},
+ {cp_stat & BIT(4), "RCIU_BUSY 4"},
+ {cp_stat & BIT(18), "MQ_RG_BSY 18"},
+ {cp_stat & BIT(19), "MQ_NDRS_BS19"},
+ {cp_stat & BIT(20), "MQ_NDR2_BS20"},
+ {cp_stat & BIT(21), "MIU_WC_STL21"},
+ {cp_stat & BIT(22), "CP_NRT_BSY22"},
+ {cp_stat & BIT(23), "3D_BUSY 23"},
+ {cp_stat & BIT(26), "ME_BUSY 26"},
+ {cp_stat & BIT(29), "ME_WC_BSY 29"},
+ {cp_stat & BIT(30), "MIU_FF EM 30"},
+ {cp_stat & BIT(31), "CP_BUSY 31"},
+ };
+ adreno_dump_fields(device, " CP_STT=", lns, ARRAY_SIZE(lns));
+ }
+#endif
+
+ kgsl_regread(device, REG_SCRATCH_REG0, &r1);
+ KGSL_LOG_DUMP(device, "SCRATCH_REG0 = %08X\n", r1);
+
+ kgsl_regread(device, REG_COHER_SIZE_PM4, &r1);
+ kgsl_regread(device, REG_COHER_BASE_PM4, &r2);
+ kgsl_regread(device, REG_COHER_STATUS_PM4, &r3);
+ KGSL_LOG_DUMP(device,
+ "COHER: SIZE_PM4 = %08X | BASE_PM4 = %08X | STATUS_PM4"
+ " = %08X\n", r1, r2, r3);
+
+ kgsl_regread(device, MH_AXI_ERROR, &r1);
+ KGSL_LOG_DUMP(device, "MH: AXI_ERROR = %08X\n", r1);
+
+ kgsl_regread(device, MH_MMU_PAGE_FAULT, &r1);
+ kgsl_regread(device, MH_MMU_CONFIG, &r2);
+ kgsl_regread(device, MH_MMU_MPU_BASE, &r3);
+ KGSL_LOG_DUMP(device,
+ "MH_MMU: PAGE_FAULT = %08X | CONFIG = %08X | MPU_BASE ="
+ " %08X\n", r1, r2, r3);
+
+ kgsl_regread(device, MH_MMU_MPU_END, &r1);
+ kgsl_regread(device, MH_MMU_VA_RANGE, &r2);
+ r3 = kgsl_mmu_get_current_ptbase(&device->mmu);
+ KGSL_LOG_DUMP(device,
+ " MPU_END = %08X | VA_RANGE = %08X | PT_BASE ="
+ " %08X\n", r1, r2, r3);
+
+ KGSL_LOG_DUMP(device, "PAGETABLE SIZE: %08X ",
+ kgsl_mmu_get_ptsize());
+
+ kgsl_regread(device, MH_MMU_TRAN_ERROR, &r1);
+ KGSL_LOG_DUMP(device, " TRAN_ERROR = %08X\n", r1);
+
+ kgsl_regread(device, MH_INTERRUPT_MASK, &r1);
+ kgsl_regread(device, MH_INTERRUPT_STATUS, &r2);
+ KGSL_LOG_DUMP(device,
+ "MH_INTERRUPT: MASK = %08X | STATUS = %08X\n", r1, r2);
+}
+
+static int adreno_dump(struct kgsl_device *device)
+{
+ unsigned int cp_ib1_base, cp_ib1_bufsz;
+ unsigned int cp_ib2_base, cp_ib2_bufsz;
+ unsigned int pt_base, cur_pt_base;
+ unsigned int cp_rb_base, cp_rb_ctrl, rb_count;
+ unsigned int cp_rb_wptr, cp_rb_rptr;
+ unsigned int i;
+ int result = 0;
+ uint32_t *rb_copy;
+ const uint32_t *rb_vaddr;
+ int num_item = 0;
+ int read_idx, write_idx;
+ unsigned int ts_processed = 0xdeaddead;
+ struct kgsl_context *context;
+ unsigned int context_id;
+
+ static struct ib_list ib_list;
+
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ struct kgsl_memdesc **reg_map;
+ void *reg_map_array;
+ int num_iommu_units = 0;
+
+ mb();
+
+ if (adreno_is_a2xx(adreno_dev))
+ adreno_dump_a2xx(device);
+ else if (adreno_is_a3xx(adreno_dev))
+ adreno_dump_a3xx(device);
+
+ pt_base = kgsl_mmu_get_current_ptbase(&device->mmu);
+ cur_pt_base = pt_base;
+
+ kgsl_regread(device, REG_CP_RB_BASE, &cp_rb_base);
+ kgsl_regread(device, REG_CP_RB_CNTL, &cp_rb_ctrl);
+ rb_count = 2 << (cp_rb_ctrl & (BIT(6) - 1));
+ kgsl_regread(device, REG_CP_RB_RPTR, &cp_rb_rptr);
+ kgsl_regread(device, REG_CP_RB_WPTR, &cp_rb_wptr);
+ kgsl_regread(device, REG_CP_IB1_BASE, &cp_ib1_base);
+ kgsl_regread(device, REG_CP_IB1_BUFSZ, &cp_ib1_bufsz);
+ kgsl_regread(device, REG_CP_IB2_BASE, &cp_ib2_base);
+ kgsl_regread(device, REG_CP_IB2_BUFSZ, &cp_ib2_bufsz);
+
+ kgsl_sharedmem_readl(&device->memstore,
+ (unsigned int *) &context_id,
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ current_context));
+ context = idr_find(&device->context_idr, context_id);
+ if (context) {
+ ts_processed = kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED);
+ KGSL_LOG_DUMP(device, "CTXT: %d TIMESTM RTRD: %08X\n",
+ context->id, ts_processed);
+ } else
+ KGSL_LOG_DUMP(device, "BAD CTXT: %d\n", context_id);
+
+ num_item = adreno_ringbuffer_count(&adreno_dev->ringbuffer,
+ cp_rb_rptr);
+ if (num_item <= 0)
+ KGSL_LOG_POSTMORTEM_WRITE(device, "Ringbuffer is Empty.\n");
+
+ rb_copy = vmalloc(rb_count<<2);
+ if (!rb_copy) {
+ KGSL_LOG_POSTMORTEM_WRITE(device,
+ "vmalloc(%d) failed\n", rb_count << 2);
+ result = -ENOMEM;
+ goto end;
+ }
+
+ KGSL_LOG_DUMP(device, "RB: rd_addr:%8.8x rb_size:%d num_item:%d\n",
+ cp_rb_base, rb_count<<2, num_item);
+
+ if (adreno_dev->ringbuffer.buffer_desc.gpuaddr != cp_rb_base)
+ KGSL_LOG_POSTMORTEM_WRITE(device,
+ "rb address mismatch, should be 0x%08x\n",
+ adreno_dev->ringbuffer.buffer_desc.gpuaddr);
+
+ rb_vaddr = adreno_dev->ringbuffer.buffer_desc.hostptr;
+ if (!rb_vaddr) {
+ KGSL_LOG_POSTMORTEM_WRITE(device,
+ "rb has no kernel mapping!\n");
+ goto error_vfree;
+ }
+
+ read_idx = (int)cp_rb_rptr - NUM_DWORDS_OF_RINGBUFFER_HISTORY;
+ if (read_idx < 0)
+ read_idx += rb_count;
+ write_idx = (int)cp_rb_wptr + 16;
+ if (write_idx > rb_count)
+ write_idx -= rb_count;
+ num_item += NUM_DWORDS_OF_RINGBUFFER_HISTORY+16;
+ if (num_item > rb_count)
+ num_item = rb_count;
+ if (write_idx >= read_idx)
+ memcpy(rb_copy, rb_vaddr+read_idx, num_item<<2);
+ else {
+ int part1_c = rb_count-read_idx;
+ memcpy(rb_copy, rb_vaddr+read_idx, part1_c<<2);
+ memcpy(rb_copy+part1_c, rb_vaddr, (num_item-part1_c)<<2);
+ }
+
+
+ ib_list.count = 0;
+ i = 0;
+
+ num_iommu_units = kgsl_mmu_get_reg_map_desc(&device->mmu,
+ ®_map_array);
+ reg_map = reg_map_array;
+ for (read_idx = 0; read_idx < num_item; ) {
+ uint32_t this_cmd = rb_copy[read_idx++];
+ if (adreno_cmd_is_ib(this_cmd)) {
+ uint32_t ib_addr = rb_copy[read_idx++];
+ uint32_t ib_size = rb_copy[read_idx++];
+ dump_ib1(device, cur_pt_base, (read_idx-3)<<2, ib_addr,
+ ib_size, &ib_list, 0);
+ for (; i < ib_list.count; ++i)
+ dump_ib(device, "IB2:", cur_pt_base,
+ ib_list.offsets[i],
+ ib_list.bases[i],
+ ib_list.sizes[i], 0);
+ } else if (this_cmd == cp_type0_packet(MH_MMU_PT_BASE, 1) ||
+ (num_iommu_units && this_cmd == (reg_map[0]->gpuaddr +
+ (KGSL_IOMMU_CONTEXT_USER << KGSL_IOMMU_CTX_SHIFT) +
+ KGSL_IOMMU_TTBR0))) {
+
+ KGSL_LOG_DUMP(device, "Current pagetable: %x\t"
+ "pagetable base: %x\n",
+ kgsl_mmu_get_ptname_from_ptbase(cur_pt_base),
+ cur_pt_base);
+
+
+ cur_pt_base = rb_copy[read_idx++];
+
+ KGSL_LOG_DUMP(device, "New pagetable: %x\t"
+ "pagetable base: %x\n",
+ kgsl_mmu_get_ptname_from_ptbase(cur_pt_base),
+ cur_pt_base);
+ }
+ }
+ if (num_iommu_units)
+ kfree(reg_map_array);
+
+ cur_pt_base = pt_base;
+
+ read_idx = (int)cp_rb_rptr - NUM_DWORDS_OF_RINGBUFFER_HISTORY;
+ if (read_idx < 0)
+ read_idx += rb_count;
+ KGSL_LOG_DUMP(device,
+ "RB: addr=%8.8x window:%4.4x-%4.4x, start:%4.4x\n",
+ cp_rb_base, cp_rb_rptr, cp_rb_wptr, read_idx);
+ adreno_dump_rb(device, rb_copy, num_item<<2, read_idx, rb_count);
+
+ if (is_adreno_pm_ib_enabled()) {
+ for (read_idx = NUM_DWORDS_OF_RINGBUFFER_HISTORY;
+ read_idx >= 0; --read_idx) {
+ uint32_t this_cmd = rb_copy[read_idx];
+ if (adreno_cmd_is_ib(this_cmd)) {
+ uint32_t ib_addr = rb_copy[read_idx+1];
+ uint32_t ib_size = rb_copy[read_idx+2];
+ if (ib_size && cp_ib1_base == ib_addr) {
+ KGSL_LOG_DUMP(device,
+ "IB1: base:%8.8X "
+ "count:%d\n", ib_addr, ib_size);
+ dump_ib(device, "IB1: ", cur_pt_base,
+ read_idx<<2, ib_addr, ib_size,
+ 1);
+ }
+ }
+ }
+ for (i = 0; i < ib_list.count; ++i) {
+ uint32_t ib_size = ib_list.sizes[i];
+ uint32_t ib_offset = ib_list.offsets[i];
+ if (ib_size && cp_ib2_base == ib_list.bases[i]) {
+ KGSL_LOG_DUMP(device,
+ "IB2: base:%8.8X count:%d\n",
+ cp_ib2_base, ib_size);
+ dump_ib(device, "IB2: ", cur_pt_base, ib_offset,
+ ib_list.bases[i], ib_size, 1);
+ }
+ }
+ }
+
+
+ if (is_adreno_pm_regs_enabled()) {
+ if (adreno_is_a20x(adreno_dev))
+ adreno_dump_regs(device, a200_registers,
+ a200_registers_count);
+ else if (adreno_is_a22x(adreno_dev))
+ adreno_dump_regs(device, a220_registers,
+ a220_registers_count);
+ else if (adreno_is_a225(adreno_dev))
+ adreno_dump_regs(device, a225_registers,
+ a225_registers_count);
+ else if (adreno_is_a3xx(adreno_dev))
+ adreno_dump_regs(device, a3xx_registers,
+ a3xx_registers_count);
+ }
+
+error_vfree:
+ vfree(rb_copy);
+end:
+ return result;
+}
+
+
+int adreno_postmortem_dump(struct kgsl_device *device, int manual)
+{
+ bool saved_nap;
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+ BUG_ON(device == NULL);
+
+ kgsl_cffdump_hang(device->id);
+
+
+
+ if (manual) {
+ if (device->active_cnt != 0) {
+ mutex_unlock(&device->mutex);
+ wait_for_completion(&device->suspend_gate);
+ mutex_lock(&device->mutex);
+ }
+
+ if (device->state == KGSL_STATE_ACTIVE)
+ kgsl_idle(device);
+
+ }
+ KGSL_LOG_DUMP(device, "POWER: FLAGS = %08lX | ACTIVE POWERLEVEL = %08X",
+ pwr->power_flags, pwr->active_pwrlevel);
+
+ KGSL_LOG_DUMP(device, "POWER: INTERVAL TIMEOUT = %08X ",
+ pwr->interval_timeout);
+
+ KGSL_LOG_DUMP(device, "GRP_CLK = %lu ",
+ kgsl_get_clkrate(pwr->grp_clks[0]));
+
+ KGSL_LOG_DUMP(device, "BUS CLK = %lu ",
+ kgsl_get_clkrate(pwr->ebi1_clk));
+
+
+ del_timer_sync(&device->idle_timer);
+ mutex_unlock(&device->mutex);
+ flush_workqueue(device->work_queue);
+ mutex_lock(&device->mutex);
+
+ saved_nap = device->pwrctrl.nap_allowed;
+ device->pwrctrl.nap_allowed = false;
+
+
+ kgsl_pwrctrl_wake(device);
+
+
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+
+ adreno_dump(device);
+
+
+ device->pwrctrl.nap_allowed = saved_nap;
+
+
+ if (manual) {
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
+
+
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
+ kgsl_pwrctrl_sleep(device);
+ }
+
+ KGSL_DRV_ERR(device, "Dump Finished\n");
+
+ return 0;
+}
diff --git a/drivers/gpu/msm/adreno_postmortem.h b/drivers/gpu/msm/adreno_postmortem.h
new file mode 100644
index 0000000..7706037
--- /dev/null
+++ b/drivers/gpu/msm/adreno_postmortem.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ADRENO_POSTMORTEM_H
+#define __ADRENO_POSTMORTEM_H
+
+struct kgsl_device;
+
+int adreno_postmortem_dump(struct kgsl_device *device, int manual);
+
+#endif
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
new file mode 100644
index 0000000..90ff642
--- /dev/null
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -0,0 +1,1271 @@
+/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/log2.h>
+
+#include "kgsl.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_cffdump.h"
+#include "kgsl_trace.h"
+
+#include "adreno.h"
+#include "adreno_pm4types.h"
+#include "adreno_ringbuffer.h"
+#include "adreno_debugfs.h"
+
+#include "a2xx_reg.h"
+#include "a3xx_reg.h"
+
+#define GSL_RB_NOP_SIZEDWORDS 2
+
+#define CP_DEBUG_DEFAULT 0xA000000
+
+void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
+{
+ BUG_ON(rb->wptr == 0);
+
+ kgsl_pwrscale_busy(rb->device);
+
+ mb();
+
+ adreno_regwrite(rb->device, REG_CP_RB_WPTR, rb->wptr);
+}
+
+static void
+adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds,
+ int wptr_ahead)
+{
+ int nopcount;
+ unsigned int freecmds;
+ unsigned int *cmds;
+ uint cmds_gpu;
+ unsigned long wait_time;
+ unsigned long wait_timeout = msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
+ unsigned long wait_time_part;
+ unsigned int prev_reg_val[hang_detect_regs_count];
+
+ memset(prev_reg_val, 0, sizeof(prev_reg_val));
+
+
+ if (wptr_ahead) {
+
+ nopcount = rb->sizedwords - rb->wptr - 1;
+
+ cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
+ cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*rb->wptr;
+
+ GSL_RB_WRITE(cmds, cmds_gpu, cp_nop_packet(nopcount));
+
+ do {
+ GSL_RB_GET_READPTR(rb, &rb->rptr);
+ } while (!rb->rptr);
+
+ rb->wptr++;
+
+ adreno_ringbuffer_submit(rb);
+
+ rb->wptr = 0;
+ }
+
+ wait_time = jiffies + wait_timeout;
+ wait_time_part = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART);
+
+ while (1) {
+ GSL_RB_GET_READPTR(rb, &rb->rptr);
+
+ freecmds = rb->rptr - rb->wptr;
+
+ if (freecmds == 0 || freecmds > numcmds)
+ break;
+
+ if (time_after(jiffies, wait_time_part)) {
+ wait_time_part = jiffies +
+ msecs_to_jiffies(KGSL_TIMEOUT_PART);
+ if ((adreno_hang_detect(rb->device,
+ prev_reg_val))){
+ KGSL_DRV_ERR(rb->device,
+ "Hang detected while waiting for freespace in"
+ "ringbuffer rptr: 0x%x, wptr: 0x%x\n",
+ rb->rptr, rb->wptr);
+ goto err;
+ }
+ }
+
+ if (time_after(jiffies, wait_time)) {
+ KGSL_DRV_ERR(rb->device,
+ "Timed out while waiting for freespace in ringbuffer "
+ "rptr: 0x%x, wptr: 0x%x\n", rb->rptr, rb->wptr);
+ goto err;
+ }
+
+ continue;
+
+err:
+ if (!adreno_dump_and_recover(rb->device)) {
+ wait_time = jiffies + wait_timeout;
+ } else {
+
+ BUG();
+ }
+ }
+}
+
+unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
+ unsigned int numcmds)
+{
+ unsigned int *ptr = NULL;
+
+ BUG_ON(numcmds >= rb->sizedwords);
+
+ GSL_RB_GET_READPTR(rb, &rb->rptr);
+
+ if (rb->wptr >= rb->rptr) {
+
+
+ if ((rb->wptr + numcmds) > (rb->sizedwords -
+ GSL_RB_NOP_SIZEDWORDS))
+ adreno_ringbuffer_waitspace(rb, numcmds, 1);
+ } else {
+
+ if ((rb->wptr + numcmds) >= rb->rptr)
+ adreno_ringbuffer_waitspace(rb, numcmds, 0);
+
+
+ if ((rb->wptr + numcmds) > (rb->sizedwords -
+ GSL_RB_NOP_SIZEDWORDS))
+ adreno_ringbuffer_waitspace(rb, numcmds, 1);
+ }
+
+ ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
+ rb->wptr += numcmds;
+
+ return ptr;
+}
+
+static int _load_firmware(struct kgsl_device *device, const char *fwfile,
+ void **data, int *len)
+{
+ const struct firmware *fw = NULL;
+ int ret;
+
+ ret = request_firmware(&fw, fwfile, device->dev);
+
+ if (ret) {
+ KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n",
+ fwfile, ret);
+ return ret;
+ }
+
+ *data = kmalloc(fw->size, GFP_KERNEL);
+
+ if (*data) {
+ memcpy(*data, fw->data, fw->size);
+ *len = fw->size;
+ } else
+ KGSL_MEM_ERR(device, "kmalloc(%d) failed\n", fw->size);
+
+ release_firmware(fw);
+ return (*data != NULL) ? 0 : -ENOMEM;
+}
+
+int adreno_ringbuffer_read_pm4_ucode(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int ret = 0;
+
+ if (adreno_dev->pm4_fw == NULL) {
+ int len;
+ void *ptr;
+
+ ret = _load_firmware(device, adreno_dev->pm4_fwfile,
+ &ptr, &len);
+
+ if (ret)
+ goto err;
+
+
+ if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) {
+ KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
+ ret = -EINVAL;
+ kfree(ptr);
+ goto err;
+ }
+
+ adreno_dev->pm4_fw_size = len / sizeof(uint32_t);
+ adreno_dev->pm4_fw = ptr;
+ adreno_dev->pm4_fw_version = adreno_dev->pm4_fw[1];
+ }
+
+err:
+ return ret;
+}
+
+
+int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int i;
+
+ if (adreno_dev->pm4_fw == NULL) {
+ int ret = adreno_ringbuffer_read_pm4_ucode(device);
+ if (ret)
+ return ret;
+ }
+
+ KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n",
+ adreno_dev->pm4_fw_version);
+ if (adreno_is_a3xx(adreno_dev))
+ adreno_regwrite(device, REG_CP_DEBUG, CP_DEBUG_DEFAULT);
+ else
+ adreno_regwrite(device, REG_CP_DEBUG, 0x02000000);
+ adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0);
+ for (i = 1; i < adreno_dev->pm4_fw_size; i++)
+ adreno_regwrite(device, REG_CP_ME_RAM_DATA,
+ adreno_dev->pm4_fw[i]);
+
+ return 0;
+}
+
+int adreno_ringbuffer_read_pfp_ucode(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int ret = 0;
+
+ if (adreno_dev->pfp_fw == NULL) {
+ int len;
+ void *ptr;
+
+ ret = _load_firmware(device, adreno_dev->pfp_fwfile,
+ &ptr, &len);
+ if (ret)
+ goto err;
+
+
+ if (len % sizeof(uint32_t) != 0) {
+ KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
+ ret = -EINVAL;
+ kfree(ptr);
+ goto err;
+ }
+
+ adreno_dev->pfp_fw_size = len / sizeof(uint32_t);
+ adreno_dev->pfp_fw = ptr;
+ adreno_dev->pfp_fw_version = adreno_dev->pfp_fw[5];
+ }
+
+err:
+ return ret;
+}
+
+int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int i;
+
+ if (adreno_dev->pfp_fw == NULL) {
+ int ret = adreno_ringbuffer_read_pfp_ucode(device);
+ if (ret)
+ return ret;
+ }
+
+ KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n",
+ adreno_dev->pfp_fw_version);
+
+ adreno_regwrite(device, adreno_dev->gpudev->reg_cp_pfp_ucode_addr, 0);
+ for (i = 1; i < adreno_dev->pfp_fw_size; i++)
+ adreno_regwrite(device,
+ adreno_dev->gpudev->reg_cp_pfp_ucode_data,
+ adreno_dev->pfp_fw[i]);
+ return 0;
+}
+
+int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
+{
+ int status;
+
+ union reg_cp_rb_cntl cp_rb_cntl;
+ unsigned int rb_cntl;
+ struct kgsl_device *device = rb->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ if (rb->flags & KGSL_FLAGS_STARTED)
+ return 0;
+
+ if (init_ram)
+ rb->timestamp[KGSL_MEMSTORE_GLOBAL] = 0;
+
+ kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
+ sizeof(struct kgsl_rbmemptrs));
+
+ kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
+ (rb->sizedwords << 2));
+
+ if (adreno_is_a2xx(adreno_dev)) {
+ adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
+ (rb->memptrs_desc.gpuaddr
+ + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
+
+
+ adreno_regwrite(device, REG_CP_RB_WPTR_DELAY,
+ 0 );
+ }
+
+
+ adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
+ cp_rb_cntl.val = rb_cntl;
+
+ cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);
+
+ cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);
+
+ if (adreno_is_a2xx(adreno_dev)) {
+
+ cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN;
+ }
+
+
+ cp_rb_cntl.f.rb_no_update = GSL_RB_CNTL_NO_UPDATE;
+
+ adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);
+
+ adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);
+
+ adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
+ rb->memptrs_desc.gpuaddr +
+ GSL_RB_MEMPTRS_RPTR_OFFSET);
+
+ if (adreno_is_a3xx(adreno_dev)) {
+
+ adreno_regwrite(device, A3XX_CP_PROTECT_CTRL, 0x00000007);
+
+
+ adreno_regwrite(device, A3XX_CP_PROTECT_REG_0, 0x63000040);
+ adreno_regwrite(device, A3XX_CP_PROTECT_REG_1, 0x62000080);
+ adreno_regwrite(device, A3XX_CP_PROTECT_REG_2, 0x600000CC);
+ adreno_regwrite(device, A3XX_CP_PROTECT_REG_3, 0x60000108);
+ adreno_regwrite(device, A3XX_CP_PROTECT_REG_4, 0x64000140);
+ adreno_regwrite(device, A3XX_CP_PROTECT_REG_5, 0x66000400);
+
+
+ adreno_regwrite(device, A3XX_CP_PROTECT_REG_6, 0x65000700);
+ adreno_regwrite(device, A3XX_CP_PROTECT_REG_7, 0x610007D8);
+ adreno_regwrite(device, A3XX_CP_PROTECT_REG_8, 0x620007E0);
+ adreno_regwrite(device, A3XX_CP_PROTECT_REG_9, 0x61001178);
+ adreno_regwrite(device, A3XX_CP_PROTECT_REG_A, 0x64001180);
+
+
+ adreno_regwrite(device, A3XX_CP_PROTECT_REG_B, 0x60003300);
+
+
+ adreno_regwrite(device, A3XX_CP_PROTECT_REG_C, 0x6B00C000);
+ }
+
+ if (adreno_is_a2xx(adreno_dev)) {
+
+ adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
+ }
+
+
+ adreno_regwrite(device, REG_SCRATCH_ADDR, device->memstore.gpuaddr +
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ soptimestamp));
+
+ adreno_regwrite(device, REG_SCRATCH_UMSK,
+ GSL_RB_MEMPTRS_SCRATCH_MASK);
+
+
+
+ status = adreno_ringbuffer_load_pm4_ucode(device);
+ if (status != 0)
+ return status;
+
+
+ status = adreno_ringbuffer_load_pfp_ucode(device);
+ if (status != 0)
+ return status;
+
+
+ if (adreno_is_a305(adreno_dev) || adreno_is_a320(adreno_dev))
+ adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000E0602);
+
+ rb->rptr = 0;
+ rb->wptr = 0;
+
+
+ adreno_regwrite(device, REG_CP_ME_CNTL, 0);
+
+
+ adreno_dev->gpudev->rb_init(adreno_dev, rb);
+
+
+ status = adreno_idle(device);
+
+ if (status == 0)
+ rb->flags |= KGSL_FLAGS_STARTED;
+
+ return status;
+}
+
+void adreno_ringbuffer_stop(struct adreno_ringbuffer *rb)
+{
+ if (rb->flags & KGSL_FLAGS_STARTED) {
+
+ adreno_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000);
+ rb->flags &= ~KGSL_FLAGS_STARTED;
+ }
+}
+
+int adreno_ringbuffer_init(struct kgsl_device *device)
+{
+ int status;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+ rb->device = device;
+ rb->sizedwords = KGSL_RB_SIZE >> 2;
+
+
+ status = kgsl_allocate_contiguous(&rb->buffer_desc,
+ (rb->sizedwords << 2));
+
+ if (status != 0) {
+ adreno_ringbuffer_close(rb);
+ return status;
+ }
+
+
+ status = kgsl_allocate_contiguous(&rb->memptrs_desc,
+ sizeof(struct kgsl_rbmemptrs));
+
+ if (status != 0) {
+ adreno_ringbuffer_close(rb);
+ return status;
+ }
+
+
+ rb->memptrs = (struct kgsl_rbmemptrs *) rb->memptrs_desc.hostptr;
+
+ return 0;
+}
+
+void adreno_ringbuffer_close(struct adreno_ringbuffer *rb)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
+
+ kgsl_sharedmem_free(&rb->buffer_desc);
+ kgsl_sharedmem_free(&rb->memptrs_desc);
+
+ kfree(adreno_dev->pfp_fw);
+ kfree(adreno_dev->pm4_fw);
+
+ adreno_dev->pfp_fw = NULL;
+ adreno_dev->pm4_fw = NULL;
+
+ memset(rb, 0, sizeof(struct adreno_ringbuffer));
+}
+
+static uint32_t
+adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
+ struct adreno_context *context,
+ unsigned int flags, unsigned int *cmds,
+ int sizedwords)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
+ unsigned int *ringcmds;
+ unsigned int timestamp;
+ unsigned int total_sizedwords = sizedwords;
+ unsigned int i;
+ unsigned int rcmd_gpu;
+ unsigned int context_id = KGSL_MEMSTORE_GLOBAL;
+ unsigned int gpuaddr = rb->device->memstore.gpuaddr;
+
+ if (context && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS))
+ context_id = context->id;
+
+ total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
+ total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0;
+
+ total_sizedwords += 2;
+
+ if (adreno_is_a3xx(adreno_dev))
+ total_sizedwords += 7;
+
+ total_sizedwords += 2;
+ if (context && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS)) {
+ total_sizedwords += 3;
+ total_sizedwords += 4;
+ total_sizedwords += 3;
+ } else {
+ total_sizedwords += 4;
+ }
+
+ ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
+ if (context && (context->flags & CTXT_FLAGS_GPU_HANG)) {
+ KGSL_CTXT_WARN(rb->device,
+ "Context %p caused a gpu hang. Will not accept commands for context %d\n",
+ context, context->id);
+ return rb->timestamp[context_id];
+ }
+
+ rcmd_gpu = rb->buffer_desc.gpuaddr
+ + sizeof(uint)*(rb->wptr-total_sizedwords);
+
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
+
+ if (flags & KGSL_CMD_FLAGS_PMODE) {
+
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
+ }
+
+ for (i = 0; i < sizedwords; i++) {
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
+ cmds++;
+ }
+
+ if (flags & KGSL_CMD_FLAGS_PMODE) {
+
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
+ }
+
+
+ rb->timestamp[KGSL_MEMSTORE_GLOBAL]++;
+ if (context) {
+ if (context_id == KGSL_MEMSTORE_GLOBAL)
+ rb->timestamp[context_id] =
+ rb->timestamp[KGSL_MEMSTORE_GLOBAL];
+ else
+ rb->timestamp[context_id]++;
+ }
+ timestamp = rb->timestamp[context_id];
+
+
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type0_packet(REG_CP_TIMESTAMP, 1));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
+
+ if (adreno_is_a3xx(adreno_dev)) {
+
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_EVENT_WRITE, 1));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x07);
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_WAIT_FOR_IDLE, 1));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x00);
+ }
+
+ if (context && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS)) {
+
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_MEM_WRITE, 2));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
+ KGSL_MEMSTORE_OFFSET(context->id, soptimestamp)));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
+
+
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_EVENT_WRITE, 3));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
+ KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp)));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
+
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_MEM_WRITE, 2));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp)));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
+ } else {
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_EVENT_WRITE, 3));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp)));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
+ }
+
+ if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
+
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_COND_EXEC, 4));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
+ KGSL_MEMSTORE_OFFSET(
+ context_id, ts_cmp_enable)) >> 2);
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
+ KGSL_MEMSTORE_OFFSET(
+ context_id, ref_wait_ts)) >> 2);
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
+
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_INTERRUPT, 1));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
+ }
+
+ if (adreno_is_a3xx(adreno_dev)) {
+
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_SET_CONSTANT, 2));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ (0x4<<16)|(A3XX_HLSQ_CL_KERNEL_GROUP_X_REG - 0x2000));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
+ }
+
+ adreno_ringbuffer_submit(rb);
+
+ return timestamp;
+}
+
+unsigned int
+adreno_ringbuffer_issuecmds(struct kgsl_device *device,
+ struct adreno_context *drawctxt,
+ unsigned int flags,
+ unsigned int *cmds,
+ int sizedwords)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+
+ if (device->state & KGSL_STATE_HUNG)
+ return kgsl_readtimestamp(device, KGSL_MEMSTORE_GLOBAL,
+ KGSL_TIMESTAMP_RETIRED);
+ return adreno_ringbuffer_addcmds(rb, drawctxt, flags, cmds, sizedwords);
+}
+
+static bool _parse_ibs(struct kgsl_device_private *dev_priv, uint gpuaddr,
+ int sizedwords);
+
+static bool
+_handle_type3(struct kgsl_device_private *dev_priv, uint *hostaddr)
+{
+ unsigned int opcode = cp_type3_opcode(*hostaddr);
+ switch (opcode) {
+ case CP_INDIRECT_BUFFER_PFD:
+ case CP_INDIRECT_BUFFER_PFE:
+ case CP_COND_INDIRECT_BUFFER_PFE:
+ case CP_COND_INDIRECT_BUFFER_PFD:
+ return _parse_ibs(dev_priv, hostaddr[1], hostaddr[2]);
+ case CP_NOP:
+ case CP_WAIT_FOR_IDLE:
+ case CP_WAIT_REG_MEM:
+ case CP_WAIT_REG_EQ:
+ case CP_WAT_REG_GTE:
+ case CP_WAIT_UNTIL_READ:
+ case CP_WAIT_IB_PFD_COMPLETE:
+ case CP_REG_RMW:
+ case CP_REG_TO_MEM:
+ case CP_MEM_WRITE:
+ case CP_MEM_WRITE_CNTR:
+ case CP_COND_EXEC:
+ case CP_COND_WRITE:
+ case CP_EVENT_WRITE:
+ case CP_EVENT_WRITE_SHD:
+ case CP_EVENT_WRITE_CFL:
+ case CP_EVENT_WRITE_ZPD:
+ case CP_DRAW_INDX:
+ case CP_DRAW_INDX_2:
+ case CP_DRAW_INDX_BIN:
+ case CP_DRAW_INDX_2_BIN:
+ case CP_VIZ_QUERY:
+ case CP_SET_STATE:
+ case CP_SET_CONSTANT:
+ case CP_IM_LOAD:
+ case CP_IM_LOAD_IMMEDIATE:
+ case CP_LOAD_CONSTANT_CONTEXT:
+ case CP_INVALIDATE_STATE:
+ case CP_SET_SHADER_BASES:
+ case CP_SET_BIN_MASK:
+ case CP_SET_BIN_SELECT:
+ case CP_SET_BIN_BASE_OFFSET:
+ case CP_SET_BIN_DATA:
+ case CP_CONTEXT_UPDATE:
+ case CP_INTERRUPT:
+ case CP_IM_STORE:
+ case CP_LOAD_STATE:
+ break;
+
+ case CP_ME_INIT:
+ case CP_SET_PROTECTED_MODE:
+ default:
+ KGSL_CMD_ERR(dev_priv->device, "bad CP opcode %0x\n", opcode);
+ return false;
+ break;
+ }
+
+ return true;
+}
+
+static bool
+_handle_type0(struct kgsl_device_private *dev_priv, uint *hostaddr)
+{
+ unsigned int reg = type0_pkt_offset(*hostaddr);
+ unsigned int cnt = type0_pkt_size(*hostaddr);
+ if (reg < 0x0192 || (reg + cnt) >= 0x8000) {
+ KGSL_CMD_ERR(dev_priv->device, "bad type0 reg: 0x%0x cnt: %d\n",
+ reg, cnt);
+ return false;
+ }
+ return true;
+}
+
+static bool _parse_ibs(struct kgsl_device_private *dev_priv,
+ uint gpuaddr, int sizedwords)
+{
+ static uint level;
+ bool ret = false;
+ uint *hostaddr, *hoststart;
+ int dwords_left = sizedwords;
+ struct kgsl_mem_entry *entry;
+
+ spin_lock(&dev_priv->process_priv->mem_lock);
+ entry = kgsl_sharedmem_find_region(dev_priv->process_priv,
+ gpuaddr, sizedwords * sizeof(uint));
+ spin_unlock(&dev_priv->process_priv->mem_lock);
+ if (entry == NULL) {
+ KGSL_CMD_ERR(dev_priv->device,
+ "no mapping for gpuaddr: 0x%08x\n", gpuaddr);
+ return false;
+ }
+
+ hostaddr = (uint *)kgsl_gpuaddr_to_vaddr(&entry->memdesc, gpuaddr);
+ if (hostaddr == NULL) {
+ KGSL_CMD_ERR(dev_priv->device,
+ "no mapping for gpuaddr: 0x%08x\n", gpuaddr);
+ return false;
+ }
+
+ hoststart = hostaddr;
+
+ level++;
+
+ KGSL_CMD_INFO(dev_priv->device, "ib: gpuaddr:0x%08x, wc:%d, hptr:%p\n",
+ gpuaddr, sizedwords, hostaddr);
+
+ mb();
+ while (dwords_left > 0) {
+ bool cur_ret = true;
+ int count = 0;
+
+ switch (*hostaddr >> 30) {
+ case 0x0:
+ count = (*hostaddr >> 16)+2;
+ cur_ret = _handle_type0(dev_priv, hostaddr);
+ break;
+ case 0x1:
+ count = 2;
+ break;
+ case 0x3:
+ count = ((*hostaddr >> 16) & 0x3fff) + 2;
+ cur_ret = _handle_type3(dev_priv, hostaddr);
+ break;
+ default:
+ KGSL_CMD_ERR(dev_priv->device, "unexpected type: "
+ "type:%d, word:0x%08x @ 0x%p, gpu:0x%08x\n",
+ *hostaddr >> 30, *hostaddr, hostaddr,
+ gpuaddr+4*(sizedwords-dwords_left));
+ cur_ret = false;
+ count = dwords_left;
+ break;
+ }
+
+ if (!cur_ret) {
+ KGSL_CMD_ERR(dev_priv->device,
+ "bad sub-type: #:%d/%d, v:0x%08x"
+ " @ 0x%p[gb:0x%08x], level:%d\n",
+ sizedwords-dwords_left, sizedwords, *hostaddr,
+ hostaddr, gpuaddr+4*(sizedwords-dwords_left),
+ level);
+
+ if (ADRENO_DEVICE(dev_priv->device)->ib_check_level
+ >= 2)
+ print_hex_dump(KERN_ERR,
+ level == 1 ? "IB1:" : "IB2:",
+ DUMP_PREFIX_OFFSET, 32, 4, hoststart,
+ sizedwords*4, 0);
+ goto done;
+ }
+
+
+ dwords_left -= count;
+ hostaddr += count;
+ if (dwords_left < 0) {
+ KGSL_CMD_ERR(dev_priv->device,
+ "bad count: c:%d, #:%d/%d, "
+ "v:0x%08x @ 0x%p[gb:0x%08x], level:%d\n",
+ count, sizedwords-(dwords_left+count),
+ sizedwords, *(hostaddr-count), hostaddr-count,
+ gpuaddr+4*(sizedwords-(dwords_left+count)),
+ level);
+ if (ADRENO_DEVICE(dev_priv->device)->ib_check_level
+ >= 2)
+ print_hex_dump(KERN_ERR,
+ level == 1 ? "IB1:" : "IB2:",
+ DUMP_PREFIX_OFFSET, 32, 4, hoststart,
+ sizedwords*4, 0);
+ goto done;
+ }
+ }
+
+ ret = true;
+done:
+ if (!ret)
+ KGSL_DRV_ERR(dev_priv->device,
+ "parsing failed: gpuaddr:0x%08x, "
+ "host:0x%p, wc:%d\n", gpuaddr, hoststart, sizedwords);
+
+ level--;
+
+ return ret;
+}
+
+int
+adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context,
+ struct kgsl_ibdesc *ibdesc,
+ unsigned int numibs,
+ uint32_t *timestamp,
+ unsigned int flags)
+{
+ struct kgsl_device *device = dev_priv->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned int *link;
+ unsigned int *cmds;
+ unsigned int i;
+ struct adreno_context *drawctxt;
+ unsigned int start_index = 0;
+#ifdef CONFIG_MSM_KGSL_GPU_USAGE_SYSTRACE
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+#endif
+
+ if (device->state & KGSL_STATE_HUNG)
+ return -EBUSY;
+ if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED) ||
+ context == NULL || ibdesc == 0 || numibs == 0)
+ return -EINVAL;
+
+ drawctxt = context->devctxt;
+
+ if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
+ KGSL_CTXT_WARN(device, "Context %p caused a gpu hang.."
+ " will not accept commands for context %d\n",
+ drawctxt, drawctxt->id);
+ return -EDEADLK;
+ }
+
+ cmds = link = kzalloc(sizeof(unsigned int) * (numibs * 3 + 4),
+ GFP_KERNEL);
+ if (!link) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n",
+ sizeof(unsigned int) * (numibs * 3 + 4));
+ return -ENOMEM;
+ }
+
+
+ if (drawctxt->flags & CTXT_FLAGS_PREAMBLE &&
+ adreno_dev->drawctxt_active == drawctxt)
+ start_index = 1;
+
+ if (!start_index) {
+ *cmds++ = cp_nop_packet(1);
+ *cmds++ = KGSL_START_OF_IB_IDENTIFIER;
+ } else {
+ *cmds++ = cp_nop_packet(4);
+ *cmds++ = KGSL_START_OF_IB_IDENTIFIER;
+ *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
+ *cmds++ = ibdesc[0].gpuaddr;
+ *cmds++ = ibdesc[0].sizedwords;
+ }
+ for (i = start_index; i < numibs; i++) {
+ if (unlikely(adreno_dev->ib_check_level >= 1 &&
+ !_parse_ibs(dev_priv, ibdesc[i].gpuaddr,
+ ibdesc[i].sizedwords))) {
+ kfree(link);
+ return -EINVAL;
+ }
+ *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
+ *cmds++ = ibdesc[i].gpuaddr;
+ *cmds++ = ibdesc[i].sizedwords;
+ }
+
+ *cmds++ = cp_nop_packet(1);
+ *cmds++ = KGSL_END_OF_IB_IDENTIFIER;
+
+ kgsl_setstate(&device->mmu, context->id,
+ kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
+ device->id));
+
+#ifdef CONFIG_MSM_KGSL_GPU_USAGE_SYSTRACE
+ if(device->id == 0 && device->prev_pid != -1 && device->prev_pid != task_tgid_nr(current)) {
+ trace_kgsl_usage(device, KGSL_PWRFLAGS_ON, dev_priv->process_priv->pid, device->gputime.total, device->gputime.busy,
+ pwr->active_pwrlevel, pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
+ device->prev_pid = task_tgid_nr(current);
+ }
+#endif
+
+#ifdef CONFIG_MSM_KGSL_GPU_USAGE
+ if(device->current_process_priv == NULL || device->current_process_priv->pid != dev_priv->process_priv->pid)
+ device->current_process_priv = dev_priv->process_priv;
+#endif
+
+ adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
+
+ *timestamp = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
+ drawctxt, 0,
+ &link[0], (cmds - link));
+
+ KGSL_CMD_INFO(device, "ctxt %d g %08x numibs %d ts %d\n",
+ context->id, (unsigned int)ibdesc, numibs, *timestamp);
+
+ kfree(link);
+
+#ifdef CONFIG_MSM_KGSL_CFF_DUMP
+ adreno_idle(device);
+#endif
+ if (drawctxt->flags & CTXT_FLAGS_GPU_HANG_RECOVERED)
+ return -EDEADLK;
+ else
+ return 0;
+
+}
+
+static int _find_start_of_cmd_seq(struct adreno_ringbuffer *rb,
+ unsigned int *ptr,
+ bool inc)
+{
+ int status = -EINVAL;
+ unsigned int val1;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int start_ptr = *ptr;
+
+ while ((start_ptr / sizeof(unsigned int)) != rb->wptr) {
+ if (inc)
+ start_ptr = adreno_ringbuffer_inc_wrapped(start_ptr,
+ size);
+ else
+ start_ptr = adreno_ringbuffer_dec_wrapped(start_ptr,
+ size);
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val1, start_ptr);
+ if (KGSL_CMD_IDENTIFIER == val1) {
+ if ((start_ptr / sizeof(unsigned int)) != rb->wptr)
+ start_ptr = adreno_ringbuffer_dec_wrapped(
+ start_ptr, size);
+ *ptr = start_ptr;
+ status = 0;
+ break;
+ }
+ }
+ return status;
+}
+
+static int _find_cmd_seq_after_eop_ts(struct adreno_ringbuffer *rb,
+ unsigned int *rb_rptr,
+ unsigned int global_eop,
+ bool inc)
+{
+ int status = -EINVAL;
+ unsigned int temp_rb_rptr = *rb_rptr;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int val[3];
+ int i = 0;
+ bool check = false;
+
+ if (inc && temp_rb_rptr / sizeof(unsigned int) != rb->wptr)
+ return status;
+
+ do {
+ if (!inc)
+ temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
+ temp_rb_rptr, size);
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val[i],
+ temp_rb_rptr);
+
+ if (check && ((inc && val[i] == global_eop) ||
+ (!inc && (val[i] ==
+ cp_type3_packet(CP_MEM_WRITE, 2) ||
+ val[i] == CACHE_FLUSH_TS)))) {
+ i = (i + 2) % 3;
+ if (val[i] == rb->device->memstore.gpuaddr +
+ KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
+ eoptimestamp)) {
+ int j = ((i + 2) % 3);
+ if ((inc && (val[j] == CACHE_FLUSH_TS ||
+ val[j] == cp_type3_packet(
+ CP_MEM_WRITE, 2))) ||
+ (!inc && val[j] == global_eop)) {
+
+ status = 0;
+ break;
+ }
+ }
+ i = (i + 1) % 3;
+ }
+ if (inc)
+ temp_rb_rptr = adreno_ringbuffer_inc_wrapped(
+ temp_rb_rptr, size);
+
+ i = (i + 1) % 3;
+ if (2 == i)
+ check = true;
+ } while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr);
+ if (!status) {
+ status = _find_start_of_cmd_seq(rb, &temp_rb_rptr, false);
+ if (!status) {
+ *rb_rptr = temp_rb_rptr;
+ KGSL_DRV_ERR(rb->device,
+ "Offset of cmd sequence after eop timestamp: 0x%x\n",
+ temp_rb_rptr / sizeof(unsigned int));
+ }
+ }
+ if (status)
+ KGSL_DRV_ERR(rb->device,
+ "Failed to find the command sequence after eop timestamp\n");
+ return status;
+}
+
+static int _find_hanging_ib_sequence(struct adreno_ringbuffer *rb,
+ unsigned int *rb_rptr,
+ unsigned int ib1)
+{
+ int status = -EINVAL;
+ unsigned int temp_rb_rptr = *rb_rptr;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int val[2];
+ int i = 0;
+ bool check = false;
+ bool ctx_switch = false;
+
+ while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
+
+ if (check && val[i] == ib1) {
+
+ i = (i + 1) % 2;
+ if (adreno_cmd_is_ib(val[i])) {
+
+ status = _find_start_of_cmd_seq(rb,
+ &temp_rb_rptr, false);
+ KGSL_DRV_ERR(rb->device,
+ "Found the hanging IB at offset 0x%x\n",
+ temp_rb_rptr / sizeof(unsigned int));
+ break;
+ }
+ i = (i + 1) % 2;
+ }
+ if (val[i] == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
+ if (ctx_switch) {
+ KGSL_DRV_ERR(rb->device,
+ "Context switch encountered before bad "
+ "IB found\n");
+ break;
+ }
+ ctx_switch = true;
+ }
+ i = (i + 1) % 2;
+ if (1 == i)
+ check = true;
+ temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
+ size);
+ }
+ if (!status)
+ *rb_rptr = temp_rb_rptr;
+ return status;
+}
+
+static void _turn_preamble_on_for_ib_seq(struct adreno_ringbuffer *rb,
+ unsigned int rb_rptr)
+{
+ unsigned int temp_rb_rptr = rb_rptr;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int val[2];
+ int i = 0;
+ bool check = false;
+ bool cmd_start = false;
+
+
+ while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
+ if (check && KGSL_START_OF_IB_IDENTIFIER == val[i]) {
+
+ i = (i + 1) % 2;
+ if (val[i] == cp_nop_packet(4)) {
+ temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
+ temp_rb_rptr, size);
+ kgsl_sharedmem_writel(&rb->buffer_desc,
+ temp_rb_rptr, cp_nop_packet(1));
+ }
+ KGSL_DRV_ERR(rb->device,
+ "Turned preamble on at offset 0x%x\n",
+ temp_rb_rptr / 4);
+ break;
+ }
+ if (KGSL_CMD_IDENTIFIER == val[i]) {
+ if (cmd_start)
+ break;
+ cmd_start = true;
+ }
+
+ i = (i + 1) % 2;
+ if (1 == i)
+ check = true;
+ temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
+ size);
+ }
+}
+
+static void _copy_valid_rb_content(struct adreno_ringbuffer *rb,
+ unsigned int rb_rptr, unsigned int *temp_rb_buffer,
+ int *rb_size, unsigned int *bad_rb_buffer,
+ int *bad_rb_size,
+ int *last_valid_ctx_id)
+{
+ unsigned int good_rb_idx = 0, cmd_start_idx = 0;
+ unsigned int val1 = 0;
+ struct kgsl_context *k_ctxt;
+ struct adreno_context *a_ctxt;
+ unsigned int bad_rb_idx = 0;
+ int copy_rb_contents = 0;
+ unsigned int temp_rb_rptr;
+ unsigned int size = rb->buffer_desc.size;
+ unsigned int good_cmd_start_idx = 0;
+
+ while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
+
+ if (KGSL_CMD_IDENTIFIER == val1) {
+ cmd_start_idx = bad_rb_idx - 1;
+ if (copy_rb_contents)
+ good_cmd_start_idx = good_rb_idx - 1;
+ }
+
+
+ if (val1 == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
+ unsigned int temp_idx, val2;
+
+ temp_rb_rptr = rb_rptr + (3 * sizeof(unsigned int)) %
+ size;
+ kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
+ temp_rb_rptr);
+
+ k_ctxt = idr_find(&rb->device->context_idr, val2);
+ if (k_ctxt) {
+ a_ctxt = k_ctxt->devctxt;
+
+ if (!copy_rb_contents && ((k_ctxt &&
+ !(a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) ||
+ !k_ctxt)) {
+ for (temp_idx = cmd_start_idx;
+ temp_idx < bad_rb_idx;
+ temp_idx++)
+ temp_rb_buffer[good_rb_idx++] =
+ bad_rb_buffer[temp_idx];
+ *last_valid_ctx_id = val2;
+ copy_rb_contents = 1;
+ } else if (copy_rb_contents && k_ctxt &&
+ (a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) {
+ good_rb_idx = good_cmd_start_idx;
+ copy_rb_contents = 0;
+ }
+ }
+ }
+
+ if (copy_rb_contents)
+ temp_rb_buffer[good_rb_idx++] = val1;
+ bad_rb_buffer[bad_rb_idx++] = val1;
+
+ rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr, size);
+ }
+ *rb_size = good_rb_idx;
+ *bad_rb_size = bad_rb_idx;
+}
+
+int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
+ struct adreno_recovery_data *rec_data)
+{
+ int status;
+ struct kgsl_device *device = rb->device;
+ unsigned int rb_rptr = rb->wptr * sizeof(unsigned int);
+ struct kgsl_context *context;
+ struct adreno_context *adreno_context;
+
+ context = idr_find(&device->context_idr, rec_data->context_id);
+
+
+ status = _find_cmd_seq_after_eop_ts(rb, &rb_rptr,
+ rec_data->global_eop + 1, false);
+ if (status)
+ goto done;
+
+ if (context) {
+ adreno_context = context->devctxt;
+
+ if (adreno_context->flags & CTXT_FLAGS_PREAMBLE) {
+ if (rec_data->ib1) {
+ status = _find_hanging_ib_sequence(rb, &rb_rptr,
+ rec_data->ib1);
+ if (status)
+ goto copy_rb_contents;
+ }
+ _turn_preamble_on_for_ib_seq(rb, rb_rptr);
+ } else {
+ status = -EINVAL;
+ }
+ }
+
+copy_rb_contents:
+ _copy_valid_rb_content(rb, rb_rptr, rec_data->rb_buffer,
+ &rec_data->rb_size,
+ rec_data->bad_rb_buffer,
+ &rec_data->bad_rb_size,
+ &rec_data->last_valid_ctx_id);
+ if (status) {
+ rec_data->bad_rb_size = 0;
+ status = 0;
+ }
+ if (!context)
+ rec_data->rb_size = 0;
+done:
+ return status;
+}
+
+void
+adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
+ int num_rb_contents)
+{
+ int i;
+ unsigned int *ringcmds;
+ unsigned int rcmd_gpu;
+
+ if (!num_rb_contents)
+ return;
+
+ if (num_rb_contents > (rb->buffer_desc.size - rb->wptr)) {
+ adreno_regwrite(rb->device, REG_CP_RB_RPTR, 0);
+ rb->rptr = 0;
+ BUG_ON(num_rb_contents > rb->buffer_desc.size);
+ }
+ ringcmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
+ rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(unsigned int) * rb->wptr;
+ for (i = 0; i < num_rb_contents; i++)
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, rb_buff[i]);
+ rb->wptr += num_rb_contents;
+ adreno_ringbuffer_submit(rb);
+}
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
new file mode 100644
index 0000000..7560848
--- /dev/null
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -0,0 +1,139 @@
+/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ADRENO_RINGBUFFER_H
+#define __ADRENO_RINGBUFFER_H
+
+
+#define KGSL_RB_SIZE (32 * 1024)
+#define KGSL_RB_BLKSIZE 16
+
+#define REG_CP_TIMESTAMP REG_SCRATCH_REG0
+
+
+struct kgsl_device;
+struct kgsl_device_private;
+struct adreno_recovery_data;
+
+#define GSL_RB_MEMPTRS_SCRATCH_COUNT 8
+struct kgsl_rbmemptrs {
+ int rptr;
+ int wptr_poll;
+};
+
+#define GSL_RB_MEMPTRS_RPTR_OFFSET \
+ (offsetof(struct kgsl_rbmemptrs, rptr))
+
+#define GSL_RB_MEMPTRS_WPTRPOLL_OFFSET \
+ (offsetof(struct kgsl_rbmemptrs, wptr_poll))
+
+struct adreno_ringbuffer {
+ struct kgsl_device *device;
+ uint32_t flags;
+
+ struct kgsl_memdesc buffer_desc;
+
+ struct kgsl_memdesc memptrs_desc;
+ struct kgsl_rbmemptrs *memptrs;
+
+
+ unsigned int sizedwords;
+
+ unsigned int wptr;
+ unsigned int rptr;
+
+ unsigned int timestamp[KGSL_MEMSTORE_MAX];
+};
+
+
+#define GSL_RB_WRITE(ring, gpuaddr, data) \
+ do { \
+ *ring = data; \
+ wmb(); \
+ kgsl_cffdump_setmem(gpuaddr, data, 4); \
+ ring++; \
+ gpuaddr += sizeof(uint); \
+ } while (0)
+
+#define GSL_RB_MEMPTRS_SCRATCH_MASK 0x1
+
+#define GSL_RB_CNTL_NO_UPDATE 0x0
+#define GSL_RB_GET_READPTR(rb, data) \
+ do { \
+ *(data) = rb->memptrs->rptr; \
+ } while (0)
+
+#define GSL_RB_CNTL_POLL_EN 0x0
+
+#define GSL_RB_PROTECTED_MODE_CONTROL 0x200001F2
+
+int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context,
+ struct kgsl_ibdesc *ibdesc,
+ unsigned int numibs,
+ uint32_t *timestamp,
+ unsigned int flags);
+
+int adreno_ringbuffer_init(struct kgsl_device *device);
+
+int adreno_ringbuffer_start(struct adreno_ringbuffer *rb,
+ unsigned int init_ram);
+
+void adreno_ringbuffer_stop(struct adreno_ringbuffer *rb);
+
+void adreno_ringbuffer_close(struct adreno_ringbuffer *rb);
+
+unsigned int adreno_ringbuffer_issuecmds(struct kgsl_device *device,
+ struct adreno_context *drawctxt,
+ unsigned int flags,
+ unsigned int *cmdaddr,
+ int sizedwords);
+
+void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb);
+
+void kgsl_cp_intrcallback(struct kgsl_device *device);
+
+int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
+ struct adreno_recovery_data *rec_data);
+
+void
+adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
+ int num_rb_contents);
+
+unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
+ unsigned int numcmds);
+
+int adreno_ringbuffer_read_pfp_ucode(struct kgsl_device *device);
+
+int adreno_ringbuffer_read_pm4_ucode(struct kgsl_device *device);
+
+static inline int adreno_ringbuffer_count(struct adreno_ringbuffer *rb,
+ unsigned int rptr)
+{
+ if (rb->wptr >= rptr)
+ return rb->wptr - rptr;
+ return rb->wptr + rb->sizedwords - rptr;
+}
+
+static inline unsigned int adreno_ringbuffer_inc_wrapped(unsigned int val,
+ unsigned int size)
+{
+ return (val + sizeof(unsigned int)) % size;
+}
+
+static inline unsigned int adreno_ringbuffer_dec_wrapped(unsigned int val,
+ unsigned int size)
+{
+ return (val + size - sizeof(unsigned int)) % size;
+}
+
+#endif
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
new file mode 100644
index 0000000..a412c12
--- /dev/null
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -0,0 +1,696 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "kgsl.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_snapshot.h"
+
+#include "adreno.h"
+#include "adreno_pm4types.h"
+#include "a2xx_reg.h"
+#include "a3xx_reg.h"
+
+#define NUM_DWORDS_OF_RINGBUFFER_HISTORY 100
+
+
+#define SNAPSHOT_OBJ_BUFSIZE 64
+
+#define SNAPSHOT_OBJ_TYPE_IB 0
+
+static int snapshot_frozen_objsize;
+
+static struct kgsl_snapshot_obj {
+ int type;
+ uint32_t gpuaddr;
+ uint32_t ptbase;
+ void *ptr;
+ int dwords;
+} objbuf[SNAPSHOT_OBJ_BUFSIZE];
+
+static int objbufptr;
+
+static void push_object(struct kgsl_device *device, int type, uint32_t ptbase,
+ uint32_t gpuaddr, int dwords)
+{
+ int index;
+ void *ptr;
+
+
+ for (index = 0; index < objbufptr; index++) {
+ if (objbuf[index].gpuaddr == gpuaddr &&
+ objbuf[index].ptbase == ptbase) {
+ objbuf[index].dwords = dwords;
+ return;
+ }
+ }
+
+ if (objbufptr == SNAPSHOT_OBJ_BUFSIZE) {
+ KGSL_DRV_ERR(device, "snapshot: too many snapshot objects\n");
+ return;
+ }
+
+ ptr = adreno_convertaddr(device, ptbase, gpuaddr, dwords << 2);
+
+ if (ptr == NULL) {
+ KGSL_DRV_ERR(device,
+ "snapshot: Can't find GPU address for %x\n", gpuaddr);
+ return;
+ }
+
+
+ objbuf[objbufptr].type = type;
+ objbuf[objbufptr].gpuaddr = gpuaddr;
+ objbuf[objbufptr].ptbase = ptbase;
+ objbuf[objbufptr].dwords = dwords;
+ objbuf[objbufptr++].ptr = ptr;
+}
+
+
+static int find_object(int type, unsigned int gpuaddr, unsigned int ptbase)
+{
+ int index;
+
+ for (index = 0; index < objbufptr; index++) {
+ if (objbuf[index].gpuaddr == gpuaddr &&
+ objbuf[index].ptbase == ptbase &&
+ objbuf[index].type == type)
+ return 1;
+ }
+
+ return 0;
+}
+
+
+static struct {
+ unsigned int base;
+ unsigned int size;
+} vsc_pipe[8];
+
+
+static unsigned int vsc_size_address;
+
+
+static struct {
+ unsigned int base;
+ unsigned int stride;
+} vbo[16];
+
+
+static unsigned int vfd_index_max;
+
+
+static unsigned int vfd_control_0;
+
+
+static unsigned int sp_vs_pvt_mem_addr;
+static unsigned int sp_fs_pvt_mem_addr;
+
+
+static int load_state_unit_sizes[7][2] = {
+ { 2, 4 },
+ { 0, 1 },
+ { 2, 4 },
+ { 0, 1 },
+ { 8, 2 },
+ { 8, 2 },
+ { 8, 2 },
+};
+
+static void ib_parse_load_state(struct kgsl_device *device, unsigned int *pkt,
+ unsigned int ptbase)
+{
+ unsigned int block, source, type;
+
+
+ if (type3_pkt_size(pkt[0]) < 2)
+ return;
+
+
+ block = (pkt[1] >> 19) & 0x07;
+ source = (pkt[1] >> 16) & 0x07;
+ type = pkt[2] & 0x03;
+
+ if (source == 4) {
+ int unitsize, ret;
+
+ if (type == 0)
+ unitsize = load_state_unit_sizes[block][0];
+ else
+ unitsize = load_state_unit_sizes[block][1];
+
+
+
+ ret = kgsl_snapshot_get_object(device, ptbase,
+ pkt[2] & 0xFFFFFFFC,
+ (((pkt[1] >> 22) & 0x03FF) * unitsize) << 2,
+ SNAPSHOT_GPU_OBJECT_SHADER);
+ snapshot_frozen_objsize += ret;
+ }
+}
+
+
+static void ib_parse_set_bin_data(struct kgsl_device *device, unsigned int *pkt,
+ unsigned int ptbase)
+{
+ int ret;
+
+ if (type3_pkt_size(pkt[0]) < 2)
+ return;
+
+
+ ret = kgsl_snapshot_get_object(device, ptbase, pkt[1], 0,
+ SNAPSHOT_GPU_OBJECT_GENERIC);
+ snapshot_frozen_objsize += ret;
+
+
+ ret = kgsl_snapshot_get_object(device, ptbase, pkt[2], 32,
+ SNAPSHOT_GPU_OBJECT_GENERIC);
+ snapshot_frozen_objsize += ret;
+}
+
+/*
+ * This opcode writes to GPU memory - if the buffer is written to, there is a
+ * good chance that it would be valuable to capture in the snapshot, so mark all
+ * buffers that are written to as frozen
+ */
+
+static void ib_parse_mem_write(struct kgsl_device *device, unsigned int *pkt,
+ unsigned int ptbase)
+{
+ int ret;
+
+ if (type3_pkt_size(pkt[0]) < 1)
+ return;
+
+ /*
+ * The address is where the data in the rest of this packet is written
+ * to, but since that might be an offset into the larger buffer we need
+ * to get the whole thing. Pass a size of 0 kgsl_snapshot_get_object to
+ * capture the entire buffer.
+ */
+
+ ret = kgsl_snapshot_get_object(device, ptbase, pkt[1] & 0xFFFFFFFC, 0,
+ SNAPSHOT_GPU_OBJECT_GENERIC);
+
+ snapshot_frozen_objsize += ret;
+}
+
+
+static void ib_parse_draw_indx(struct kgsl_device *device, unsigned int *pkt,
+ unsigned int ptbase)
+{
+ int ret, i;
+
+ if (type3_pkt_size(pkt[0]) < 3)
+ return;
+
+
+
+ if (type3_pkt_size(pkt[0]) > 3) {
+ ret = kgsl_snapshot_get_object(device, ptbase, pkt[4], pkt[5],
+ SNAPSHOT_GPU_OBJECT_GENERIC);
+ snapshot_frozen_objsize += ret;
+ }
+
+
+
+
+ for (i = 0; i < ARRAY_SIZE(vsc_pipe); i++) {
+ if (vsc_pipe[i].base != 0 && vsc_pipe[i].size != 0) {
+ ret = kgsl_snapshot_get_object(device, ptbase,
+ vsc_pipe[i].base, vsc_pipe[i].size,
+ SNAPSHOT_GPU_OBJECT_GENERIC);
+ snapshot_frozen_objsize += ret;
+ }
+ }
+
+
+
+ if (vsc_size_address) {
+ ret = kgsl_snapshot_get_object(device, ptbase,
+ vsc_size_address, 32,
+ SNAPSHOT_GPU_OBJECT_GENERIC);
+ snapshot_frozen_objsize += ret;
+ }
+
+
+ if (sp_vs_pvt_mem_addr) {
+ ret = kgsl_snapshot_get_object(device, ptbase,
+ sp_vs_pvt_mem_addr, 8192,
+ SNAPSHOT_GPU_OBJECT_GENERIC);
+ snapshot_frozen_objsize += ret;
+ sp_vs_pvt_mem_addr = 0;
+ }
+
+ if (sp_fs_pvt_mem_addr) {
+ ret = kgsl_snapshot_get_object(device, ptbase,
+ sp_fs_pvt_mem_addr, 8192,
+ SNAPSHOT_GPU_OBJECT_GENERIC);
+ snapshot_frozen_objsize += ret;
+ sp_fs_pvt_mem_addr = 0;
+ }
+
+
+
+
+ for (i = 0; i < (vfd_control_0) >> 27; i++) {
+ int size;
+
+
+ if (vbo[i].base != 0) {
+ size = vbo[i].stride * vfd_index_max;
+
+ ret = kgsl_snapshot_get_object(device, ptbase,
+ vbo[i].base,
+ 0, SNAPSHOT_GPU_OBJECT_GENERIC);
+ snapshot_frozen_objsize += ret;
+ }
+
+ vbo[i].base = 0;
+ vbo[i].stride = 0;
+ }
+
+ vfd_control_0 = 0;
+ vfd_index_max = 0;
+}
+
+
+static void ib_parse_type3(struct kgsl_device *device, unsigned int *ptr,
+ unsigned int ptbase)
+{
+ switch (cp_type3_opcode(*ptr)) {
+ case CP_LOAD_STATE:
+ ib_parse_load_state(device, ptr, ptbase);
+ break;
+ case CP_SET_BIN_DATA:
+ ib_parse_set_bin_data(device, ptr, ptbase);
+ break;
+ case CP_MEM_WRITE:
+ ib_parse_mem_write(device, ptr, ptbase);
+ break;
+ case CP_DRAW_INDX:
+ ib_parse_draw_indx(device, ptr, ptbase);
+ break;
+ }
+}
+
+/*
+ * Parse type0 packets found in the stream. Some of the registers that are
+ * written are clues for GPU buffers that we need to freeze. Register writes
+ * are considred valid when a draw initator is called, so just cache the values
+ * here and freeze them when a CP_DRAW_INDX is seen. This protects against
+ * needlessly caching buffers that won't be used during a draw call
+ */
+
+static void ib_parse_type0(struct kgsl_device *device, unsigned int *ptr,
+ unsigned int ptbase)
+{
+ int size = type0_pkt_size(*ptr);
+ int offset = type0_pkt_offset(*ptr);
+ int i;
+
+ for (i = 0; i < size; i++, offset++) {
+
+
+
+ if (offset >= A3XX_VSC_PIPE_DATA_ADDRESS_0 &&
+ offset <= A3XX_VSC_PIPE_DATA_LENGTH_7) {
+ int index = offset - A3XX_VSC_PIPE_DATA_ADDRESS_0;
+
+
+ if ((index % 3) == 0)
+ vsc_pipe[index / 3].base = ptr[i + 1];
+ else if ((index % 3) == 1)
+ vsc_pipe[index / 3].size = ptr[i + 1];
+ } else if ((offset >= A3XX_VFD_FETCH_INSTR_0_0) &&
+ (offset <= A3XX_VFD_FETCH_INSTR_1_F)) {
+ int index = offset - A3XX_VFD_FETCH_INSTR_0_0;
+
+
+ if ((index % 2) == 0)
+ vbo[index >> 1].stride =
+ (ptr[i + 1] >> 7) & 0x1FF;
+ else
+ vbo[index >> 1].base = ptr[i + 1];
+ } else {
+
+ switch (offset) {
+ case A3XX_VFD_CONTROL_0:
+ vfd_control_0 = ptr[i + 1];
+ break;
+ case A3XX_VFD_INDEX_MAX:
+ vfd_index_max = ptr[i + 1];
+ break;
+ case A3XX_VSC_SIZE_ADDRESS:
+ vsc_size_address = ptr[i + 1];
+ break;
+ case A3XX_SP_VS_PVT_MEM_ADDR_REG:
+ sp_vs_pvt_mem_addr = ptr[i + 1];
+ break;
+ case A3XX_SP_FS_PVT_MEM_ADDR_REG:
+ sp_fs_pvt_mem_addr = ptr[i + 1];
+ break;
+ }
+ }
+ }
+}
+
+
+static void ib_add_gpu_object(struct kgsl_device *device, unsigned int ptbase,
+ unsigned int gpuaddr, unsigned int dwords)
+{
+ int i, ret, rem = dwords;
+ unsigned int *src;
+
+
+ if (kgsl_snapshot_have_object(device, ptbase, gpuaddr, dwords << 2))
+ return;
+
+ src = (unsigned int *) adreno_convertaddr(device, ptbase, gpuaddr,
+ dwords << 2);
+
+ if (src == NULL)
+ return;
+
+ for (i = 0; rem > 0; rem--, i++) {
+ int pktsize;
+
+
+ if (!pkt_is_type0(src[i]) && !pkt_is_type3(src[i]))
+ break;
+
+ pktsize = type3_pkt_size(src[i]);
+
+ if (!pktsize || (pktsize + 1) > rem)
+ break;
+
+ if (pkt_is_type3(src[i])) {
+ if (adreno_cmd_is_ib(src[i]))
+ ib_add_gpu_object(device, ptbase,
+ src[i + 1], src[i + 2]);
+ else
+ ib_parse_type3(device, &src[i], ptbase);
+ } else if (pkt_is_type0(src[i])) {
+ ib_parse_type0(device, &src[i], ptbase);
+ }
+
+ i += pktsize;
+ rem -= pktsize;
+ }
+
+ ret = kgsl_snapshot_get_object(device, ptbase, gpuaddr, dwords << 2,
+ SNAPSHOT_GPU_OBJECT_IB);
+
+ snapshot_frozen_objsize += ret;
+}
+
+static int snapshot_istore(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_istore *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ int count, i;
+
+ count = adreno_dev->istore_size * adreno_dev->instruction_size;
+
+ if (remain < (count * 4) + sizeof(*header)) {
+ KGSL_DRV_ERR(device,
+ "snapshot: Not enough memory for the istore section");
+ return 0;
+ }
+
+ header->count = adreno_dev->istore_size;
+
+ for (i = 0; i < count; i++)
+ kgsl_regread(device, ADRENO_ISTORE_START + i, &data[i]);
+
+ return (count * 4) + sizeof(*header);
+}
+
+static int snapshot_rb(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_rb *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
+ unsigned int ptbase, rptr, *rbptr, ibbase;
+ int index, size, i;
+ int parse_ibs = 0, ib_parse_start;
+
+
+ ptbase = kgsl_mmu_get_current_ptbase(&device->mmu);
+
+
+ kgsl_regread(device, REG_CP_RB_RPTR, &rptr);
+
+
+ kgsl_regread(device, REG_CP_IB1_BASE, &ibbase);
+
+
+ index = rptr;
+ rbptr = rb->buffer_desc.hostptr;
+
+ do {
+ index--;
+
+ if (index < 0) {
+ index = rb->sizedwords - 3;
+
+
+ if (index < rb->wptr) {
+ index = rb->wptr;
+ break;
+ }
+ }
+
+ if (adreno_cmd_is_ib(rbptr[index]) &&
+ rbptr[index + 1] == ibbase)
+ break;
+ } while (index != rb->wptr);
+
+
+ while (index != rb->wptr) {
+ index--;
+
+ if (index < 0) {
+ index = rb->sizedwords - 2;
+
+
+ if (index < rb->wptr) {
+ index = rb->wptr;
+ break;
+ }
+ }
+
+
+ if ((rbptr[index] == cp_nop_packet(1)) &&
+ (rbptr[index + 1] == KGSL_CONTEXT_TO_MEM_IDENTIFIER))
+ break;
+ }
+
+
+ ib_parse_start = index;
+
+
+ size = (rb->sizedwords << 2);
+
+ if (remain < size + sizeof(*header)) {
+ KGSL_DRV_ERR(device,
+ "snapshot: Not enough memory for the rb section");
+ return 0;
+ }
+
+
+ header->start = rb->wptr;
+ header->end = rb->wptr;
+ header->wptr = rb->wptr;
+ header->rbsize = rb->sizedwords;
+ header->count = rb->sizedwords;
+
+
+ index = rb->wptr;
+ for (i = 0; i < rb->sizedwords; i++) {
+ *data = rbptr[index];
+
+
+ if (parse_ibs == 0 && index == ib_parse_start)
+ parse_ibs = 1;
+ else if (index == rptr || adreno_rb_ctxtswitch(&rbptr[index]))
+ parse_ibs = 0;
+
+ if (parse_ibs && adreno_cmd_is_ib(rbptr[index])) {
+ unsigned int ibaddr = rbptr[index + 1];
+ unsigned int ibsize = rbptr[index + 2];
+
+
+ struct kgsl_memdesc *memdesc =
+ adreno_find_ctxtmem(device, ptbase, ibaddr,
+ ibsize);
+
+
+ if (NULL == memdesc)
+ if (kgsl_gpuaddr_in_memdesc(
+ &device->mmu.setstate_memory,
+ ibaddr, ibsize))
+ memdesc = &device->mmu.setstate_memory;
+
+ if (ibaddr == ibbase || memdesc != NULL)
+ push_object(device, SNAPSHOT_OBJ_TYPE_IB,
+ ptbase, ibaddr, ibsize);
+ else
+ ib_add_gpu_object(device, ptbase, ibaddr,
+ ibsize);
+ }
+
+ index = index + 1;
+
+ if (index == rb->sizedwords)
+ index = 0;
+
+ data++;
+ }
+
+
+ return size + sizeof(*header);
+}
+
+static int snapshot_ib(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_ib *header = snapshot;
+ struct kgsl_snapshot_obj *obj = priv;
+ unsigned int *src = obj->ptr;
+ unsigned int *dst = snapshot + sizeof(*header);
+ int i;
+
+ if (remain < (obj->dwords << 2) + sizeof(*header)) {
+ KGSL_DRV_ERR(device,
+ "snapshot: Not enough memory for the ib section");
+ return 0;
+ }
+
+
+ header->gpuaddr = obj->gpuaddr;
+ header->ptbase = obj->ptbase;
+ header->size = obj->dwords;
+
+
+ for (i = 0; i < obj->dwords; i++, src++, dst++) {
+ *dst = *src;
+
+ if (pkt_is_type3(*src)) {
+ if ((obj->dwords - i) < type3_pkt_size(*src) + 1)
+ continue;
+
+ if (adreno_cmd_is_ib(*src))
+ push_object(device, SNAPSHOT_OBJ_TYPE_IB,
+ obj->ptbase, src[1], src[2]);
+ else
+ ib_parse_type3(device, src, obj->ptbase);
+ }
+ }
+
+ return (obj->dwords << 2) + sizeof(*header);
+}
+
+static void *dump_object(struct kgsl_device *device, int obj, void *snapshot,
+ int *remain)
+{
+ switch (objbuf[obj].type) {
+ case SNAPSHOT_OBJ_TYPE_IB:
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_IB, snapshot, remain,
+ snapshot_ib, &objbuf[obj]);
+ break;
+ default:
+ KGSL_DRV_ERR(device,
+ "snapshot: Invalid snapshot object type: %d\n",
+ objbuf[obj].type);
+ break;
+ }
+
+ return snapshot;
+}
+
+
+void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
+ int hang)
+{
+ int i;
+ uint32_t ptbase, ibbase, ibsize;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+
+ objbufptr = 0;
+
+ snapshot_frozen_objsize = 0;
+
+
+
+ vfd_control_0 = 0;
+ vfd_index_max = 0;
+ vsc_size_address = 0;
+
+ memset(vsc_pipe, 0, sizeof(vsc_pipe));
+ memset(vbo, 0, sizeof(vbo));
+
+
+ ptbase = kgsl_mmu_get_current_ptbase(&device->mmu);
+
+
+ snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_RB,
+ snapshot, remain, snapshot_rb, NULL);
+
+
+ kgsl_regread(device, REG_CP_IB1_BASE, &ibbase);
+ kgsl_regread(device, REG_CP_IB1_BUFSZ, &ibsize);
+
+
+ if (!find_object(SNAPSHOT_OBJ_TYPE_IB, ibbase, ptbase) && ibsize) {
+ push_object(device, SNAPSHOT_OBJ_TYPE_IB, ptbase,
+ ibbase, ibsize);
+ KGSL_DRV_ERR(device, "CP_IB1_BASE not found in the ringbuffer. "
+ "Dumping %x dwords of the buffer.\n", ibsize);
+ }
+
+ kgsl_regread(device, REG_CP_IB2_BASE, &ibbase);
+ kgsl_regread(device, REG_CP_IB2_BUFSZ, &ibsize);
+
+
+ if (!find_object(SNAPSHOT_OBJ_TYPE_IB, ibbase, ptbase) && ibsize) {
+ push_object(device, SNAPSHOT_OBJ_TYPE_IB, ptbase,
+ ibbase, ibsize);
+ }
+
+ for (i = 0; i < objbufptr; i++)
+ snapshot = dump_object(device, i, snapshot, remain);
+
+
+ if (hang) {
+ snapshot = kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_ISTORE, snapshot, remain,
+ snapshot_istore, NULL);
+ }
+
+
+ if (adreno_dev->gpudev->snapshot)
+ snapshot = adreno_dev->gpudev->snapshot(adreno_dev, snapshot,
+ remain, hang);
+
+ if (snapshot_frozen_objsize)
+ KGSL_DRV_ERR(device, "GPU snapshot froze %dKb of GPU buffers\n",
+ snapshot_frozen_objsize / 1024);
+
+ return snapshot;
+}
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
new file mode 100644
index 0000000..55692a6
--- /dev/null
+++ b/drivers/gpu/msm/kgsl.c
@@ -0,0 +1,2654 @@
+/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/fb.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/android_pmem.h>
+#include <linux/vmalloc.h>
+#include <linux/pm_runtime.h>
+#include <linux/genlock.h>
+#include <linux/rbtree.h>
+#include <linux/ashmem.h>
+#include <linux/major.h>
+#include <linux/ion.h>
+#include <linux/io.h>
+#include <mach/socinfo.h>
+
+#include "kgsl.h"
+#include "kgsl_debugfs.h"
+#include "kgsl_cffdump.h"
+#include "kgsl_log.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_device.h"
+#include "kgsl_trace.h"
+
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "kgsl."
+
+static int kgsl_pagetable_count = KGSL_PAGETABLE_COUNT;
+static char *ksgl_mmu_type;
+module_param_named(ptcount, kgsl_pagetable_count, int, 0);
+MODULE_PARM_DESC(kgsl_pagetable_count,
+"Minimum number of pagetables for KGSL to allocate at initialization time");
+module_param_named(mmutype, ksgl_mmu_type, charp, 0);
+MODULE_PARM_DESC(ksgl_mmu_type,
+"Type of MMU to be used for graphics. Valid values are 'iommu' or 'gpummu' or 'nommu'");
+
+static struct ion_client *kgsl_ion_client;
+
+
+int kgsl_add_event(struct kgsl_device *device, u32 id, u32 ts,
+ void (*cb)(struct kgsl_device *, void *, u32, u32), void *priv,
+ void *owner)
+{
+ struct kgsl_event *event;
+ struct list_head *n;
+ unsigned int cur_ts;
+ struct kgsl_context *context = NULL;
+
+ if (cb == NULL)
+ return -EINVAL;
+
+ if (id != KGSL_MEMSTORE_GLOBAL) {
+ context = idr_find(&device->context_idr, id);
+ if (context == NULL)
+ return -EINVAL;
+ }
+ cur_ts = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED);
+
+
+
+ if (timestamp_cmp(cur_ts, ts) >= 0) {
+ cb(device, priv, id, cur_ts);
+ return 0;
+ }
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (event == NULL)
+ return -ENOMEM;
+
+ event->context = context;
+ event->timestamp = ts;
+ event->priv = priv;
+ event->func = cb;
+ event->owner = owner;
+
+
+ for (n = device->events.next ; n != &device->events; n = n->next) {
+ struct kgsl_event *e =
+ list_entry(n, struct kgsl_event, list);
+
+ if (e->context != context)
+ continue;
+
+ if (timestamp_cmp(e->timestamp, ts) > 0) {
+ list_add(&event->list, n->prev);
+ break;
+ }
+ }
+
+ if (n == &device->events)
+ list_add_tail(&event->list, &device->events);
+
+ queue_work(device->work_queue, &device->ts_expired_ws);
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_add_event);
+
+static void kgsl_cancel_events_ctxt(struct kgsl_device *device,
+ struct kgsl_context *context)
+{
+ struct kgsl_event *event, *event_tmp;
+ unsigned int id, cur;
+
+ cur = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED);
+ id = context->id;
+
+ list_for_each_entry_safe(event, event_tmp, &device->events, list) {
+ if (event->context != context)
+ continue;
+
+ if (event->func)
+ event->func(device, event->priv, id, cur);
+
+ list_del(&event->list);
+ kfree(event);
+ }
+}
+
+void kgsl_cancel_events(struct kgsl_device *device,
+ void *owner)
+{
+ struct kgsl_event *event, *event_tmp;
+ unsigned int id, cur;
+
+ list_for_each_entry_safe(event, event_tmp, &device->events, list) {
+ if (event->owner != owner)
+ continue;
+
+ cur = kgsl_readtimestamp(device, event->context,
+ KGSL_TIMESTAMP_RETIRED);
+
+ id = event->context ? event->context->id : KGSL_MEMSTORE_GLOBAL;
+ if (event->func)
+ event->func(device, event->priv, id, cur);
+
+ list_del(&event->list);
+ kfree(event);
+ }
+}
+EXPORT_SYMBOL(kgsl_cancel_events);
+
+
+struct kgsl_mem_entry *kgsl_get_mem_entry(unsigned int ptbase,
+ unsigned int gpuaddr, unsigned int size)
+{
+ struct kgsl_process_private *priv;
+ struct kgsl_mem_entry *entry;
+
+ mutex_lock(&kgsl_driver.process_mutex);
+
+ list_for_each_entry(priv, &kgsl_driver.process_list, list) {
+ if (!kgsl_mmu_pt_equal(priv->pagetable, ptbase))
+ continue;
+ spin_lock(&priv->mem_lock);
+ entry = kgsl_sharedmem_find_region(priv, gpuaddr, size);
+
+ if (entry) {
+ spin_unlock(&priv->mem_lock);
+ mutex_unlock(&kgsl_driver.process_mutex);
+ return entry;
+ }
+ spin_unlock(&priv->mem_lock);
+ }
+ mutex_unlock(&kgsl_driver.process_mutex);
+
+ return NULL;
+}
+EXPORT_SYMBOL(kgsl_get_mem_entry);
+
+static inline struct kgsl_mem_entry *
+kgsl_mem_entry_create(void)
+{
+ struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+
+ if (!entry)
+ KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*entry));
+ else {
+ kref_init(&entry->refcount);
+ entry->memdesc.handle = NULL;
+ }
+
+ return entry;
+}
+
+unsigned int kgsl_get_alloc_size(int detailed)
+{
+ unsigned int ret = 0;
+ struct kgsl_process_private *private;
+ int i = 0;
+
+ ret = kgsl_driver.stats.page_alloc;
+ printk("kgsl: kgsl_driver.stats.page_alloc = %u\n", kgsl_driver.stats.page_alloc);
+ printk("kgsl: kgsl_driver.stats.page_alloc_kernel = %u\n", kgsl_driver.stats.page_alloc_kernel);
+ printk("kgsl: kgsl_driver.stats.pre_alloc = %u\n", kgsl_driver.stats.pre_alloc);
+ printk("kgsl: kgsl_driver.stats.pre_alloc_kernel = %u\n", kgsl_driver.stats.pre_alloc_kernel);
+
+ if (!detailed)
+ return ret;
+
+ mutex_lock(&kgsl_driver.process_mutex);
+
+ list_for_each_entry(private, &kgsl_driver.process_list, list) {
+ printk("kgsl: below is going to list all memory info of pid:%d \n", private->pid);
+ for (i = 0; i < KGSL_MEM_ENTRY_MAX; i++) {
+ switch (i) {
+ case KGSL_MEM_ENTRY_PAGE_ALLOC:
+ if (private->stats[KGSL_MEM_ENTRY_PAGE_ALLOC].cur != 0)
+ printk("kgsl: page alloc %d\n", private->stats[KGSL_MEM_ENTRY_PAGE_ALLOC].cur);
+ break;
+ case KGSL_MEM_ENTRY_PRE_ALLOC:
+ if (private->stats[KGSL_MEM_ENTRY_PRE_ALLOC].cur != 0)
+ printk("kgsl: pre alloc %d\n", private->stats[KGSL_MEM_ENTRY_PRE_ALLOC].cur);
+ break;
+ }
+ }
+ }
+ mutex_unlock(&kgsl_driver.process_mutex);
+
+ return ret;
+}
+
+void
+kgsl_mem_entry_destroy(struct kref *kref)
+{
+ struct kgsl_mem_entry *entry = container_of(kref,
+ struct kgsl_mem_entry,
+ refcount);
+
+ if (entry->memtype != KGSL_MEM_ENTRY_KERNEL)
+ kgsl_driver.stats.mapped -= entry->memdesc.size;
+
+
+ if (entry->memtype == KGSL_MEM_ENTRY_ION) {
+ entry->memdesc.sg = NULL;
+ }
+
+ kgsl_sharedmem_free(&entry->memdesc);
+
+ switch (entry->memtype) {
+ case KGSL_MEM_ENTRY_PMEM:
+ case KGSL_MEM_ENTRY_ASHMEM:
+ if (entry->priv_data)
+ fput(entry->priv_data);
+ break;
+ case KGSL_MEM_ENTRY_ION:
+ ion_free(kgsl_ion_client, entry->priv_data);
+ break;
+ }
+
+ kfree(entry);
+}
+EXPORT_SYMBOL(kgsl_mem_entry_destroy);
+
+static
+void kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
+ struct kgsl_process_private *process)
+{
+ struct rb_node **node;
+ struct rb_node *parent = NULL;
+
+ spin_lock(&process->mem_lock);
+
+ node = &process->mem_rb.rb_node;
+
+ while (*node) {
+ struct kgsl_mem_entry *cur;
+
+ parent = *node;
+ cur = rb_entry(parent, struct kgsl_mem_entry, node);
+
+ if (entry->memdesc.gpuaddr < cur->memdesc.gpuaddr)
+ node = &parent->rb_left;
+ else
+ node = &parent->rb_right;
+ }
+
+ rb_link_node(&entry->node, parent, node);
+ rb_insert_color(&entry->node, &process->mem_rb);
+
+ spin_unlock(&process->mem_lock);
+
+ entry->priv = process;
+ entry->memdesc.private = process;
+}
+
+
+static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry)
+{
+ if (entry == NULL)
+ return;
+
+ entry->priv->stats[entry->memtype].cur -= entry->memdesc.size;
+ entry->priv = NULL;
+
+ kgsl_mmu_unmap(entry->memdesc.pagetable, &entry->memdesc);
+
+ kgsl_mem_entry_put(entry);
+}
+
+
+static struct kgsl_context *
+kgsl_create_context(struct kgsl_device_private *dev_priv)
+{
+ struct kgsl_context *context;
+ int ret, id;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+
+ if (context == NULL)
+ return NULL;
+
+ while (1) {
+ if (idr_pre_get(&dev_priv->device->context_idr,
+ GFP_KERNEL) == 0) {
+ kfree(context);
+ return NULL;
+ }
+
+ ret = idr_get_new_above(&dev_priv->device->context_idr,
+ context, 1, &id);
+
+ if (ret != -EAGAIN)
+ break;
+ }
+
+ if (ret) {
+ kfree(context);
+ return NULL;
+ }
+
+
+ if (id >= KGSL_MEMSTORE_MAX) {
+ KGSL_DRV_ERR(dev_priv->device, "cannot have more than %d "
+ "ctxts due to memstore limitation\n",
+ KGSL_MEMSTORE_MAX);
+ idr_remove(&dev_priv->device->context_idr, id);
+ kfree(context);
+ return NULL;
+ }
+
+ kref_init(&context->refcount);
+ context->id = id;
+ context->dev_priv = dev_priv;
+
+ return context;
+}
+
+void
+kgsl_context_detach(struct kgsl_context *context)
+{
+ int id;
+ struct kgsl_device *device;
+ if (context == NULL)
+ return;
+ device = context->dev_priv->device;
+ trace_kgsl_context_detach(device, context);
+ id = context->id;
+
+ if (device->ftbl->drawctxt_destroy)
+ device->ftbl->drawctxt_destroy(device, context);
+
+ BUG_ON(context->devctxt);
+ kgsl_cancel_events_ctxt(device, context);
+ idr_remove(&device->context_idr, id);
+ context->id = KGSL_CONTEXT_INVALID;
+ kgsl_context_put(context);
+}
+
+void
+kgsl_context_destroy(struct kref *kref)
+{
+ struct kgsl_context *context = container_of(kref, struct kgsl_context,
+ refcount);
+ kfree(context);
+}
+
+void kgsl_timestamp_expired(struct work_struct *work)
+{
+ struct kgsl_device *device = container_of(work, struct kgsl_device,
+ ts_expired_ws);
+ struct kgsl_event *event, *event_tmp;
+ uint32_t ts_processed;
+ unsigned int id;
+
+ mutex_lock(&device->mutex);
+
+
+ list_for_each_entry_safe(event, event_tmp, &device->events, list) {
+ ts_processed = kgsl_readtimestamp(device, event->context,
+ KGSL_TIMESTAMP_RETIRED);
+ if (timestamp_cmp(ts_processed, event->timestamp) < 0)
+ continue;
+
+ id = event->context ? event->context->id : KGSL_MEMSTORE_GLOBAL;
+
+ if (event->func)
+ event->func(device, event->priv, id, ts_processed);
+
+ list_del(&event->list);
+ kfree(event);
+ }
+
+ mutex_unlock(&device->mutex);
+}
+EXPORT_SYMBOL(kgsl_timestamp_expired);
+
+static void kgsl_check_idle_locked(struct kgsl_device *device)
+{
+ if (device->pwrctrl.nap_allowed == true &&
+ device->state == KGSL_STATE_ACTIVE &&
+ device->requested_state == KGSL_STATE_NONE) {
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
+ if (kgsl_pwrctrl_sleep(device) != 0)
+ mod_timer(&device->idle_timer,
+ jiffies +
+ device->pwrctrl.interval_timeout);
+ }
+}
+
+static void kgsl_check_idle(struct kgsl_device *device)
+{
+ mutex_lock(&device->mutex);
+ kgsl_check_idle_locked(device);
+ mutex_unlock(&device->mutex);
+}
+
+struct kgsl_device *kgsl_get_device(int dev_idx)
+{
+ int i;
+ struct kgsl_device *ret = NULL;
+
+ mutex_lock(&kgsl_driver.devlock);
+
+ for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+ if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->id == dev_idx) {
+ ret = kgsl_driver.devp[i];
+ break;
+ }
+ }
+
+ mutex_unlock(&kgsl_driver.devlock);
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_get_device);
+
+static struct kgsl_device *kgsl_get_minor(int minor)
+{
+ struct kgsl_device *ret = NULL;
+
+ if (minor < 0 || minor >= KGSL_DEVICE_MAX)
+ return NULL;
+
+ mutex_lock(&kgsl_driver.devlock);
+ ret = kgsl_driver.devp[minor];
+ mutex_unlock(&kgsl_driver.devlock);
+
+ return ret;
+}
+
+int kgsl_register_ts_notifier(struct kgsl_device *device,
+ struct notifier_block *nb)
+{
+ BUG_ON(device == NULL);
+ return atomic_notifier_chain_register(&device->ts_notifier_list,
+ nb);
+}
+EXPORT_SYMBOL(kgsl_register_ts_notifier);
+
+int kgsl_unregister_ts_notifier(struct kgsl_device *device,
+ struct notifier_block *nb)
+{
+ BUG_ON(device == NULL);
+ return atomic_notifier_chain_unregister(&device->ts_notifier_list,
+ nb);
+}
+EXPORT_SYMBOL(kgsl_unregister_ts_notifier);
+
+int kgsl_check_timestamp(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int timestamp)
+{
+ unsigned int ts_processed;
+
+ ts_processed = kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED);
+
+ return (timestamp_cmp(ts_processed, timestamp) >= 0);
+}
+EXPORT_SYMBOL(kgsl_check_timestamp);
+
+static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state)
+{
+ int status = -EINVAL;
+ unsigned int nap_allowed_saved;
+ struct kgsl_pwrscale_policy *policy_saved;
+
+ if (!device)
+ return -EINVAL;
+
+ KGSL_PWR_WARN(device, "suspend start\n");
+
+ mutex_lock(&device->mutex);
+ nap_allowed_saved = device->pwrctrl.nap_allowed;
+ device->pwrctrl.nap_allowed = false;
+ policy_saved = device->pwrscale.policy;
+ device->pwrscale.policy = NULL;
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_SUSPEND);
+ if (device->active_cnt != 0) {
+ mutex_unlock(&device->mutex);
+ wait_for_completion(&device->suspend_gate);
+ mutex_lock(&device->mutex);
+ }
+
+ del_timer_sync(&device->idle_timer);
+ switch (device->state) {
+ case KGSL_STATE_INIT:
+ break;
+ case KGSL_STATE_ACTIVE:
+
+ device->ftbl->idle(device);
+ case KGSL_STATE_NAP:
+ case KGSL_STATE_SLEEP:
+
+ INIT_COMPLETION(device->hwaccess_gate);
+ device->ftbl->suspend_context(device);
+ device->ftbl->stop(device);
+ pm_qos_update_request(&device->pm_qos_req_dma,
+ PM_QOS_DEFAULT_VALUE);
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND);
+ break;
+ case KGSL_STATE_SLUMBER:
+ INIT_COMPLETION(device->hwaccess_gate);
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_SUSPEND);
+ break;
+ default:
+ KGSL_PWR_ERR(device, "suspend fail, device %d\n",
+ device->id);
+ goto end;
+ }
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ device->pwrctrl.nap_allowed = nap_allowed_saved;
+ device->pwrscale.policy = policy_saved;
+ status = 0;
+
+end:
+ mutex_unlock(&device->mutex);
+ KGSL_PWR_WARN(device, "suspend end\n");
+ return status;
+}
+
+static int kgsl_resume_device(struct kgsl_device *device)
+{
+ int status = -EINVAL;
+
+ if (!device) {
+ printk("kgsl_resume_device: device is null!\n");
+ return -EINVAL;
+ }
+ KGSL_PWR_WARN(device, "resume start\n");
+ mutex_lock(&device->mutex);
+ if (device->state == KGSL_STATE_SUSPEND) {
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
+ status = 0;
+ complete_all(&device->hwaccess_gate);
+ }else
+ printk(" kgsl_resume_device: state=%d\n", device->state);
+
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+
+ mutex_unlock(&device->mutex);
+ KGSL_PWR_WARN(device, "resume end\n");
+ return status;
+}
+
+static int kgsl_suspend(struct device *dev)
+{
+
+ pm_message_t arg = {0};
+ struct kgsl_device *device = dev_get_drvdata(dev);
+ return kgsl_suspend_device(device, arg);
+}
+
+static int kgsl_resume(struct device *dev)
+{
+ struct kgsl_device *device = dev_get_drvdata(dev);
+ return kgsl_resume_device(device);
+}
+
+static int kgsl_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int kgsl_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+
+const struct dev_pm_ops kgsl_pm_ops = {
+ .suspend = kgsl_suspend,
+ .resume = kgsl_resume,
+ .runtime_suspend = kgsl_runtime_suspend,
+ .runtime_resume = kgsl_runtime_resume,
+};
+EXPORT_SYMBOL(kgsl_pm_ops);
+
+void kgsl_early_suspend_driver(struct early_suspend *h)
+{
+ struct kgsl_device *device = container_of(h,
+ struct kgsl_device, display_off);
+ KGSL_PWR_WARN(device, "early suspend start\n");
+ mutex_lock(&device->mutex);
+ device->pwrctrl.restore_slumber = true;
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
+ kgsl_pwrctrl_sleep(device);
+ mutex_unlock(&device->mutex);
+ KGSL_PWR_WARN(device, "early suspend end\n");
+}
+EXPORT_SYMBOL(kgsl_early_suspend_driver);
+
+int kgsl_suspend_driver(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
+ return kgsl_suspend_device(device, state);
+}
+EXPORT_SYMBOL(kgsl_suspend_driver);
+
+int kgsl_resume_driver(struct platform_device *pdev)
+{
+ struct kgsl_device *device = dev_get_drvdata(&pdev->dev);
+ return kgsl_resume_device(device);
+}
+EXPORT_SYMBOL(kgsl_resume_driver);
+
+void kgsl_late_resume_driver(struct early_suspend *h)
+{
+ struct kgsl_device *device = container_of(h,
+ struct kgsl_device, display_off);
+ KGSL_PWR_WARN(device, "late resume start\n");
+ mutex_lock(&device->mutex);
+ device->pwrctrl.restore_slumber = false;
+ if (device->pwrscale.policy == NULL)
+ kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO);
+ kgsl_pwrctrl_wake(device);
+ mutex_unlock(&device->mutex);
+ kgsl_check_idle(device);
+ KGSL_PWR_WARN(device, "late resume end\n");
+}
+EXPORT_SYMBOL(kgsl_late_resume_driver);
+
+static struct kgsl_process_private *
+kgsl_get_process_private(struct kgsl_device_private *cur_dev_priv)
+{
+ struct kgsl_process_private *private;
+#ifdef CONFIG_MSM_KGSL_GPU_USAGE
+ int i;
+#endif
+
+ mutex_lock(&kgsl_driver.process_mutex);
+ list_for_each_entry(private, &kgsl_driver.process_list, list) {
+ if (private->pid == task_tgid_nr(current)) {
+ private->refcnt++;
+ goto out;
+ }
+ }
+
+
+ private = kzalloc(sizeof(struct kgsl_process_private), GFP_KERNEL);
+ if (private == NULL) {
+ KGSL_DRV_ERR(cur_dev_priv->device, "kzalloc(%d) failed\n",
+ sizeof(struct kgsl_process_private));
+ goto out;
+ }
+
+#ifdef CONFIG_MSM_KGSL_GPU_USAGE
+ private->gputime.total = 0;
+ private->gputime.busy = 0;
+ for(i=0;i<KGSL_MAX_PWRLEVELS;i++) {
+ private->gputime_in_state[i].total = 0;
+ private->gputime_in_state[i].busy = 0;
+ }
+#endif
+
+ spin_lock_init(&private->mem_lock);
+ private->refcnt = 1;
+ private->pid = task_tgid_nr(current);
+ private->mem_rb = RB_ROOT;
+
+ if (kgsl_mmu_enabled())
+ {
+ unsigned long pt_name;
+
+ pt_name = task_tgid_nr(current);
+ private->pagetable = kgsl_mmu_getpagetable(pt_name);
+ if (private->pagetable == NULL) {
+ kfree(private);
+ private = NULL;
+ goto out;
+ }
+ }
+
+ list_add(&private->list, &kgsl_driver.process_list);
+
+ kgsl_process_init_sysfs(private);
+
+out:
+ mutex_unlock(&kgsl_driver.process_mutex);
+ return private;
+}
+
+static void
+kgsl_put_process_private(struct kgsl_device *device,
+ struct kgsl_process_private *private)
+{
+ struct kgsl_mem_entry *entry = NULL;
+ struct rb_node *node;
+
+ if (!private)
+ return;
+
+ mutex_lock(&kgsl_driver.process_mutex);
+
+ if (--private->refcnt)
+ goto unlock;
+
+ kgsl_process_uninit_sysfs(private);
+
+ list_del(&private->list);
+
+ for (node = rb_first(&private->mem_rb); node; ) {
+ entry = rb_entry(node, struct kgsl_mem_entry, node);
+ node = rb_next(&entry->node);
+
+ rb_erase(&entry->node, &private->mem_rb);
+ kgsl_mem_entry_detach_process(entry);
+ }
+ kgsl_mmu_putpagetable(private->pagetable);
+ kfree(private);
+unlock:
+ mutex_unlock(&kgsl_driver.process_mutex);
+}
+
+static int kgsl_release(struct inode *inodep, struct file *filep)
+{
+ int result = 0;
+ struct kgsl_device_private *dev_priv = filep->private_data;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_device *device = dev_priv->device;
+ struct kgsl_context *context;
+ int next = 0;
+
+ filep->private_data = NULL;
+
+ mutex_lock(&device->mutex);
+ kgsl_check_suspended(device);
+
+ while (1) {
+ context = idr_get_next(&device->context_idr, &next);
+ if (context == NULL)
+ break;
+
+ if (context->dev_priv == dev_priv)
+ kgsl_context_detach(context);
+
+ next = next + 1;
+ }
+ kgsl_cancel_events(device, dev_priv);
+
+ device->open_count--;
+ if (device->open_count == 0) {
+ result = device->ftbl->stop(device);
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+ }
+
+ mutex_unlock(&device->mutex);
+ kfree(dev_priv);
+
+ kgsl_put_process_private(device, private);
+
+ pm_runtime_put(device->parentdev);
+ return result;
+}
+
+static int kgsl_open(struct inode *inodep, struct file *filep)
+{
+ int result;
+ struct kgsl_device_private *dev_priv;
+ struct kgsl_device *device;
+ unsigned int minor = iminor(inodep);
+
+ device = kgsl_get_minor(minor);
+ BUG_ON(device == NULL);
+
+ if (filep->f_flags & O_EXCL) {
+ KGSL_DRV_ERR(device, "O_EXCL not allowed\n");
+ return -EBUSY;
+ }
+
+ result = pm_runtime_get_sync(device->parentdev);
+ if (result < 0) {
+ KGSL_DRV_ERR(device,
+ "Runtime PM: Unable to wake up the device, rc = %d\n",
+ result);
+ return result;
+ }
+ result = 0;
+
+ dev_priv = kzalloc(sizeof(struct kgsl_device_private), GFP_KERNEL);
+ if (dev_priv == NULL) {
+ KGSL_DRV_ERR(device, "kzalloc failed(%d)\n",
+ sizeof(struct kgsl_device_private));
+ result = -ENOMEM;
+ goto err_pmruntime;
+ }
+
+ dev_priv->device = device;
+ filep->private_data = dev_priv;
+
+
+ dev_priv->process_priv = kgsl_get_process_private(dev_priv);
+ if (dev_priv->process_priv == NULL) {
+ result = -ENOMEM;
+ goto err_freedevpriv;
+ }
+
+ mutex_lock(&device->mutex);
+ kgsl_check_suspended(device);
+
+ if (device->open_count == 0) {
+ kgsl_sharedmem_set(&device->memstore, 0, 0,
+ device->memstore.size);
+
+ result = device->ftbl->start(device, true);
+
+ if (result) {
+ mutex_unlock(&device->mutex);
+ goto err_putprocess;
+ }
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
+ }
+ device->open_count++;
+ mutex_unlock(&device->mutex);
+
+ KGSL_DRV_INFO(device, "Initialized %s: mmu=%s pagetable_count=%d\n",
+ device->name, kgsl_mmu_enabled() ? "on" : "off",
+ kgsl_pagetable_count);
+
+ return result;
+
+err_putprocess:
+ kgsl_put_process_private(device, dev_priv->process_priv);
+err_freedevpriv:
+ filep->private_data = NULL;
+ kfree(dev_priv);
+err_pmruntime:
+ pm_runtime_put(device->parentdev);
+ return result;
+}
+
+struct kgsl_mem_entry *
+kgsl_sharedmem_find_region(struct kgsl_process_private *private,
+ unsigned int gpuaddr, size_t size)
+{
+ struct rb_node *node = private->mem_rb.rb_node;
+
+ if (!kgsl_mmu_gpuaddr_in_range(gpuaddr))
+ return NULL;
+
+ while (node != NULL) {
+ struct kgsl_mem_entry *entry;
+
+ entry = rb_entry(node, struct kgsl_mem_entry, node);
+
+
+ if (kgsl_gpuaddr_in_memdesc(&entry->memdesc, gpuaddr, size))
+ return entry;
+
+ if (gpuaddr < entry->memdesc.gpuaddr)
+ node = node->rb_left;
+ else if (gpuaddr >=
+ (entry->memdesc.gpuaddr + entry->memdesc.size))
+ node = node->rb_right;
+ else {
+ return NULL;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_find_region);
+
+static inline struct kgsl_mem_entry *
+kgsl_sharedmem_find(struct kgsl_process_private *private, unsigned int gpuaddr)
+{
+ return kgsl_sharedmem_find_region(private, gpuaddr, 1);
+}
+
+static long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ int result = 0;
+ struct kgsl_device_getproperty *param = data;
+
+ switch (param->type) {
+ case KGSL_PROP_VERSION:
+ {
+ struct kgsl_version version;
+ if (param->sizebytes != sizeof(version)) {
+ result = -EINVAL;
+ break;
+ }
+
+ version.drv_major = KGSL_VERSION_MAJOR;
+ version.drv_minor = KGSL_VERSION_MINOR;
+ version.dev_major = dev_priv->device->ver_major;
+ version.dev_minor = dev_priv->device->ver_minor;
+
+ if (copy_to_user(param->value, &version, sizeof(version)))
+ result = -EFAULT;
+
+ break;
+ }
+ case KGSL_PROP_GPU_RESET_STAT:
+ {
+
+ uint32_t id;
+ struct kgsl_context *context;
+
+ if (param->sizebytes != sizeof(unsigned int)) {
+ result = -EINVAL;
+ break;
+ }
+
+ if (copy_from_user(&id, param->value,
+ sizeof(unsigned int))) {
+ result = -EFAULT;
+ break;
+ }
+ context = kgsl_find_context(dev_priv, id);
+ if (!context) {
+ result = -EINVAL;
+ break;
+ }
+ if (copy_to_user(param->value, &(context->reset_status),
+ sizeof(unsigned int))) {
+ result = -EFAULT;
+ break;
+ }
+
+ context->reset_status = KGSL_CTX_STAT_NO_ERROR;
+ break;
+ }
+ default:
+ result = dev_priv->device->ftbl->getproperty(
+ dev_priv->device, param->type,
+ param->value, param->sizebytes);
+ }
+
+
+ return result;
+}
+
+static long kgsl_ioctl_device_setproperty(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ int result = 0;
+
+ struct kgsl_device_getproperty *param = data;
+
+ if (dev_priv->device->ftbl->setproperty)
+ result = dev_priv->device->ftbl->setproperty(
+ dev_priv->device, param->type,
+ param->value, param->sizebytes);
+
+ return result;
+}
+
+static long _device_waittimestamp(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context,
+ unsigned int timestamp,
+ unsigned int timeout)
+{
+ int result = 0;
+ struct kgsl_device *device = dev_priv->device;
+ unsigned int context_id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
+
+
+
+ device->active_cnt++;
+
+ trace_kgsl_waittimestamp_entry(device, context_id,
+ kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED),
+ timestamp, timeout);
+
+ result = device->ftbl->waittimestamp(dev_priv->device,
+ context, timestamp, timeout);
+
+ trace_kgsl_waittimestamp_exit(device,
+ kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED),
+ result);
+
+
+
+ INIT_COMPLETION(dev_priv->device->suspend_gate);
+ dev_priv->device->active_cnt--;
+ complete(&dev_priv->device->suspend_gate);
+
+ return result;
+}
+
+static long kgsl_ioctl_device_waittimestamp(struct kgsl_device_private
+ *dev_priv, unsigned int cmd,
+ void *data)
+{
+ struct kgsl_device_waittimestamp *param = data;
+
+ return _device_waittimestamp(dev_priv, NULL,
+ param->timestamp, param->timeout);
+}
+
+static long kgsl_ioctl_device_waittimestamp_ctxtid(struct kgsl_device_private
+ *dev_priv, unsigned int cmd,
+ void *data)
+{
+ struct kgsl_device_waittimestamp_ctxtid *param = data;
+ struct kgsl_context *context;
+ int result;
+
+ context = kgsl_find_context(dev_priv, param->context_id);
+ if (context == NULL) {
+ KGSL_DRV_ERR(dev_priv->device, "invalid context_id %d\n",
+ param->context_id);
+ return -EINVAL;
+ }
+ kgsl_context_get(context);
+ result = _device_waittimestamp(dev_priv, context,
+ param->timestamp, param->timeout);
+ kgsl_context_put(context);
+ return result;
+}
+
+static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ int result = 0;
+ struct kgsl_ringbuffer_issueibcmds *param = data;
+ struct kgsl_ibdesc *ibdesc;
+ struct kgsl_context *context;
+
+ context = kgsl_find_context(dev_priv, param->drawctxt_id);
+ if (context == NULL) {
+ result = -EINVAL;
+ KGSL_DRV_ERR(dev_priv->device,
+ "invalid context_id %d\n",
+ param->drawctxt_id);
+ goto done;
+ }
+
+ if (param->flags & KGSL_CONTEXT_SUBMIT_IB_LIST) {
+ KGSL_DRV_INFO(dev_priv->device,
+ "Using IB list mode for ib submission, numibs: %d\n",
+ param->numibs);
+ if (!param->numibs) {
+ KGSL_DRV_ERR(dev_priv->device,
+ "Invalid numibs as parameter: %d\n",
+ param->numibs);
+ result = -EINVAL;
+ goto done;
+ }
+
+ ibdesc = kzalloc(sizeof(struct kgsl_ibdesc) * param->numibs,
+ GFP_KERNEL);
+ if (!ibdesc) {
+ KGSL_MEM_ERR(dev_priv->device,
+ "kzalloc(%d) failed\n",
+ sizeof(struct kgsl_ibdesc) * param->numibs);
+ result = -ENOMEM;
+ goto done;
+ }
+
+ if (copy_from_user(ibdesc, (void *)param->ibdesc_addr,
+ sizeof(struct kgsl_ibdesc) * param->numibs)) {
+ result = -EFAULT;
+ KGSL_DRV_ERR(dev_priv->device,
+ "copy_from_user failed\n");
+ goto free_ibdesc;
+ }
+ } else {
+ KGSL_DRV_INFO(dev_priv->device,
+ "Using single IB submission mode for ib submission\n");
+ ibdesc = kzalloc(sizeof(struct kgsl_ibdesc), GFP_KERNEL);
+ if (!ibdesc) {
+ KGSL_MEM_ERR(dev_priv->device,
+ "kzalloc(%d) failed\n",
+ sizeof(struct kgsl_ibdesc));
+ result = -ENOMEM;
+ goto done;
+ }
+ ibdesc[0].gpuaddr = param->ibdesc_addr;
+ ibdesc[0].sizedwords = param->numibs;
+ param->numibs = 1;
+ }
+
+ result = dev_priv->device->ftbl->issueibcmds(dev_priv,
+ context,
+ ibdesc,
+ param->numibs,
+ ¶m->timestamp,
+ param->flags);
+
+ trace_kgsl_issueibcmds(dev_priv->device, param, ibdesc, result);
+
+free_ibdesc:
+ kfree(ibdesc);
+done:
+
+ return result;
+}
+
+static long _cmdstream_readtimestamp(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, unsigned int type,
+ unsigned int *timestamp)
+{
+ *timestamp = kgsl_readtimestamp(dev_priv->device, context, type);
+
+ trace_kgsl_readtimestamp(dev_priv->device,
+ context ? context->id : KGSL_MEMSTORE_GLOBAL,
+ type, *timestamp);
+
+ return 0;
+}
+
+static long kgsl_ioctl_cmdstream_readtimestamp(struct kgsl_device_private
+ *dev_priv, unsigned int cmd,
+ void *data)
+{
+ struct kgsl_cmdstream_readtimestamp *param = data;
+
+ return _cmdstream_readtimestamp(dev_priv, NULL,
+ param->type, ¶m->timestamp);
+}
+
+static long kgsl_ioctl_cmdstream_readtimestamp_ctxtid(struct kgsl_device_private
+ *dev_priv, unsigned int cmd,
+ void *data)
+{
+ struct kgsl_cmdstream_readtimestamp_ctxtid *param = data;
+ struct kgsl_context *context;
+
+ context = kgsl_find_context(dev_priv, param->context_id);
+ if (context == NULL) {
+ KGSL_DRV_ERR(dev_priv->device, "invalid context_id %d\n",
+ param->context_id);
+ return -EINVAL;
+ }
+
+ return _cmdstream_readtimestamp(dev_priv, context,
+ param->type, ¶m->timestamp);
+}
+
+static void kgsl_freemem_event_cb(struct kgsl_device *device,
+ void *priv, u32 id, u32 timestamp)
+{
+ struct kgsl_mem_entry *entry = priv;
+ spin_lock(&entry->priv->mem_lock);
+ rb_erase(&entry->node, &entry->priv->mem_rb);
+ spin_unlock(&entry->priv->mem_lock);
+ trace_kgsl_mem_timestamp_free(device, entry, id, timestamp, 0);
+ kgsl_mem_entry_detach_process(entry);
+}
+
+static long _cmdstream_freememontimestamp(struct kgsl_device_private *dev_priv,
+ unsigned int gpuaddr, struct kgsl_context *context,
+ unsigned int timestamp, unsigned int type)
+{
+ int result = 0;
+ struct kgsl_mem_entry *entry = NULL;
+ struct kgsl_device *device = dev_priv->device;
+ unsigned int context_id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
+
+ spin_lock(&dev_priv->process_priv->mem_lock);
+ entry = kgsl_sharedmem_find(dev_priv->process_priv, gpuaddr);
+ spin_unlock(&dev_priv->process_priv->mem_lock);
+
+ if (!entry) {
+ KGSL_DRV_ERR(dev_priv->device,
+ "invalid gpuaddr %08x\n", gpuaddr);
+ result = -EINVAL;
+ goto done;
+ }
+ trace_kgsl_mem_timestamp_queue(device, entry, context_id,
+ kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED),
+ timestamp);
+ result = kgsl_add_event(dev_priv->device, context_id, timestamp,
+ kgsl_freemem_event_cb, entry, dev_priv);
+done:
+ return result;
+}
+
+static long kgsl_ioctl_cmdstream_freememontimestamp(struct kgsl_device_private
+ *dev_priv, unsigned int cmd,
+ void *data)
+{
+ struct kgsl_cmdstream_freememontimestamp *param = data;
+
+ return _cmdstream_freememontimestamp(dev_priv, param->gpuaddr,
+ NULL, param->timestamp, param->type);
+}
+
+static long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid(
+ struct kgsl_device_private
+ *dev_priv, unsigned int cmd,
+ void *data)
+{
+ struct kgsl_cmdstream_freememontimestamp_ctxtid *param = data;
+ struct kgsl_context *context;
+
+ context = kgsl_find_context(dev_priv, param->context_id);
+ if (context == NULL) {
+ KGSL_DRV_ERR(dev_priv->device,
+ "invalid drawctxt context_id %d\n", param->context_id);
+ return -EINVAL;
+ }
+
+ return _cmdstream_freememontimestamp(dev_priv, param->gpuaddr,
+ context, param->timestamp, param->type);
+}
+
+static long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ int result = 0;
+ struct kgsl_drawctxt_create *param = data;
+ struct kgsl_context *context = NULL;
+
+ context = kgsl_create_context(dev_priv);
+
+ if (context == NULL) {
+ result = -ENOMEM;
+ goto done;
+ }
+
+ if (dev_priv->device->ftbl->drawctxt_create) {
+ result = dev_priv->device->ftbl->drawctxt_create(
+ dev_priv->device, dev_priv->process_priv->pagetable,
+ context, param->flags);
+ if (result)
+ goto done;
+ }
+ trace_kgsl_context_create(dev_priv->device, context, param->flags);
+ param->drawctxt_id = context->id;
+done:
+ if (result && context)
+ kgsl_context_detach(context);
+
+ return result;
+}
+
+static long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ int result = 0;
+ struct kgsl_drawctxt_destroy *param = data;
+ struct kgsl_context *context;
+
+ context = kgsl_find_context(dev_priv, param->drawctxt_id);
+
+ if (context == NULL) {
+ result = -EINVAL;
+ goto done;
+ }
+
+ kgsl_context_detach(context);
+done:
+ return result;
+}
+
+static long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ int result = 0;
+ struct kgsl_sharedmem_free *param = data;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_mem_entry *entry = NULL;
+
+ spin_lock(&private->mem_lock);
+ entry = kgsl_sharedmem_find(private, param->gpuaddr);
+ if (entry)
+ rb_erase(&entry->node, &private->mem_rb);
+
+ spin_unlock(&private->mem_lock);
+
+ if (entry) {
+ trace_kgsl_mem_free(entry);
+ kgsl_mem_entry_detach_process(entry);
+ } else {
+ KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
+ result = -EINVAL;
+ }
+
+ return result;
+}
+
+static struct vm_area_struct *kgsl_get_vma_from_start_addr(unsigned int addr)
+{
+ struct vm_area_struct *vma;
+
+ down_read(¤t->mm->mmap_sem);
+ vma = find_vma(current->mm, addr);
+ up_read(¤t->mm->mmap_sem);
+ if (!vma)
+ KGSL_CORE_ERR("find_vma(%x) failed\n", addr);
+
+ return vma;
+}
+
+static long
+kgsl_ioctl_sharedmem_from_vmalloc(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ int result = 0, len = 0;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_sharedmem_from_vmalloc *param = data;
+ struct kgsl_mem_entry *entry = NULL;
+ struct vm_area_struct *vma;
+
+ KGSL_DEV_ERR_ONCE(dev_priv->device, "IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC"
+ " is deprecated\n");
+ if (!kgsl_mmu_enabled())
+ return -ENODEV;
+
+ if (!param->hostptr) {
+ KGSL_CORE_ERR("invalid hostptr %x\n", param->hostptr);
+ result = -EINVAL;
+ goto error;
+ }
+
+ vma = kgsl_get_vma_from_start_addr(param->hostptr);
+ if (!vma) {
+ result = -EINVAL;
+ goto error;
+ }
+
+ if (param->gpuaddr != 0) {
+ len = param->gpuaddr;
+ } else {
+ if (vma->vm_pgoff || (param->hostptr != vma->vm_start)) {
+ KGSL_CORE_ERR("VMA region does not match hostaddr\n");
+ result = -EINVAL;
+ goto error;
+ }
+
+ len = vma->vm_end - vma->vm_start;
+ }
+
+
+ if (len == 0 || param->hostptr + len > vma->vm_end) {
+ KGSL_CORE_ERR("Invalid memory allocation length %d\n", len);
+ result = -EINVAL;
+ goto error;
+ }
+
+ entry = kgsl_mem_entry_create();
+ if (entry == NULL) {
+ result = -ENOMEM;
+ goto error;
+ }
+
+ result = kgsl_sharedmem_page_alloc_user(&entry->memdesc,
+ private,
+ private->pagetable, len,
+ param->flags);
+ if (result != 0)
+ goto error_free_entry;
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ result = kgsl_sharedmem_map_vma(vma, &entry->memdesc);
+ if (result) {
+ KGSL_CORE_ERR("kgsl_sharedmem_map_vma failed: %d\n", result);
+ goto error_free_alloc;
+ }
+
+ param->gpuaddr = entry->memdesc.gpuaddr;
+
+ entry->memtype = KGSL_MEM_ENTRY_KERNEL;
+
+ kgsl_mem_entry_attach_process(entry, private);
+
+ trace_kgsl_mem_alloc(entry);
+
+ kgsl_process_add_stats(private, entry->memtype, len);
+
+ kgsl_check_idle(dev_priv->device);
+ return 0;
+
+error_free_alloc:
+ kgsl_sharedmem_free(&entry->memdesc);
+
+error_free_entry:
+ kfree(entry);
+
+error:
+ kgsl_check_idle(dev_priv->device);
+ return result;
+}
+
+static inline int _check_region(unsigned long start, unsigned long size,
+ uint64_t len)
+{
+ uint64_t end = ((uint64_t) start) + size;
+ return (end > len);
+}
+
+static int kgsl_get_phys_file(int fd, unsigned long *start, unsigned long *len,
+ unsigned long *vstart, struct file **filep)
+{
+ struct file *fbfile;
+ int ret = 0;
+ dev_t rdev;
+ struct fb_info *info;
+
+ *filep = NULL;
+#ifdef CONFIG_ANDROID_PMEM
+ if (!get_pmem_file(fd, start, vstart, len, filep))
+ return 0;
+#endif
+
+ fbfile = fget(fd);
+ if (fbfile == NULL) {
+ KGSL_CORE_ERR("fget_light failed\n");
+ return -1;
+ }
+
+ rdev = fbfile->f_dentry->d_inode->i_rdev;
+ info = MAJOR(rdev) == FB_MAJOR ? registered_fb[MINOR(rdev)] : NULL;
+ if (info) {
+ *start = info->fix.smem_start;
+ *len = info->fix.smem_len;
+ *vstart = (unsigned long)__va(info->fix.smem_start);
+ ret = 0;
+ } else {
+ KGSL_CORE_ERR("framebuffer minor %d not found\n",
+ MINOR(rdev));
+ ret = -1;
+ }
+
+ fput(fbfile);
+
+ return ret;
+}
+
+static int kgsl_setup_phys_file(struct kgsl_mem_entry *entry,
+ struct kgsl_pagetable *pagetable,
+ unsigned int fd, unsigned int offset,
+ size_t size)
+{
+ int ret;
+ unsigned long phys, virt, len;
+ struct file *filep;
+
+ ret = kgsl_get_phys_file(fd, &phys, &len, &virt, &filep);
+ if (ret)
+ return ret;
+
+ ret = -ERANGE;
+
+ if (phys == 0)
+ goto err;
+
+ if ((len & ~PAGE_MASK) ||
+ (offset & ~PAGE_MASK) ||
+ (size & ~PAGE_MASK)) {
+ KGSL_CORE_ERR("length offset or size is not page aligned\n");
+ goto err;
+ }
+
+
+ if (offset >= len || size > len)
+ goto err;
+
+ if (size == 0)
+ size = len - offset;
+
+ else if (_check_region(offset, size, len))
+ goto err;
+
+ entry->priv_data = filep;
+
+ entry->memdesc.pagetable = pagetable;
+ entry->memdesc.size = size;
+ entry->memdesc.physaddr = phys + offset;
+ entry->memdesc.hostptr = (void *) (virt + offset);
+
+ ret = memdesc_sg_phys(&entry->memdesc, phys + offset, size);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+#ifdef CONFIG_ANDROID_PMEM
+ put_pmem_file(filep);
+#endif
+ return ret;
+}
+
+static int memdesc_sg_virt(struct kgsl_memdesc *memdesc,
+ void *addr, int size)
+{
+ int i;
+ int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
+ unsigned long paddr = (unsigned long) addr;
+
+ memdesc->sg = kgsl_sg_alloc(sglen);
+
+ if (memdesc->sg == NULL)
+ return -ENOMEM;
+
+ memdesc->sglen = sglen;
+ sg_init_table(memdesc->sg, sglen);
+
+ spin_lock(¤t->mm->page_table_lock);
+
+ for (i = 0; i < sglen; i++, paddr += PAGE_SIZE) {
+ struct page *page;
+ pmd_t *ppmd;
+ pte_t *ppte;
+ pgd_t *ppgd = pgd_offset(current->mm, paddr);
+
+ if (pgd_none(*ppgd) || pgd_bad(*ppgd))
+ goto err;
+
+ ppmd = pmd_offset(pud_offset(ppgd, paddr), paddr);
+ if (pmd_none(*ppmd) || pmd_bad(*ppmd))
+ goto err;
+
+ ppte = pte_offset_map(ppmd, paddr);
+ if (ppte == NULL)
+ goto err;
+
+ page = pfn_to_page(pte_pfn(*ppte));
+ if (!page)
+ goto err;
+
+ sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
+ pte_unmap(ppte);
+ }
+
+ spin_unlock(¤t->mm->page_table_lock);
+
+ return 0;
+
+err:
+ spin_unlock(¤t->mm->page_table_lock);
+ kgsl_sg_free(memdesc->sg, sglen);
+ memdesc->sg = NULL;
+
+ return -EINVAL;
+}
+
+static int kgsl_setup_hostptr(struct kgsl_mem_entry *entry,
+ struct kgsl_pagetable *pagetable,
+ void *hostptr, unsigned int offset,
+ size_t size)
+{
+ struct vm_area_struct *vma;
+ unsigned int len;
+
+ down_read(¤t->mm->mmap_sem);
+ vma = find_vma(current->mm, (unsigned int) hostptr);
+ up_read(¤t->mm->mmap_sem);
+
+ if (!vma) {
+ KGSL_CORE_ERR("find_vma(%p) failed\n", hostptr);
+ return -EINVAL;
+ }
+
+
+ len = vma->vm_end - (unsigned long) hostptr;
+
+ if (offset >= len)
+ return -EINVAL;
+
+ if (!KGSL_IS_PAGE_ALIGNED((unsigned long) hostptr) ||
+ !KGSL_IS_PAGE_ALIGNED(len)) {
+ KGSL_CORE_ERR("user address len(%u)"
+ "and start(%p) must be page"
+ "aligned\n", len, hostptr);
+ return -EINVAL;
+ }
+
+ if (size == 0)
+ size = len;
+
+
+ size += offset & ~PAGE_MASK;
+
+ size = ALIGN(size, PAGE_SIZE);
+
+ if (_check_region(offset & PAGE_MASK, size, len)) {
+ KGSL_CORE_ERR("Offset (%ld) + size (%d) is larger"
+ "than region length %d\n",
+ offset & PAGE_MASK, size, len);
+ return -EINVAL;
+ }
+
+ entry->memdesc.pagetable = pagetable;
+ entry->memdesc.size = size;
+ entry->memdesc.hostptr = hostptr + (offset & PAGE_MASK);
+
+ return memdesc_sg_virt(&entry->memdesc,
+ hostptr + (offset & PAGE_MASK), size);
+}
+
+#ifdef CONFIG_ASHMEM
+static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
+ struct kgsl_pagetable *pagetable,
+ int fd, void *hostptr, size_t size)
+{
+ int ret;
+ struct vm_area_struct *vma;
+ struct file *filep, *vmfile;
+ unsigned long len;
+ unsigned int hostaddr = (unsigned int) hostptr;
+
+ vma = kgsl_get_vma_from_start_addr(hostaddr);
+ if (vma == NULL)
+ return -EINVAL;
+
+ if (vma->vm_pgoff || vma->vm_start != hostaddr) {
+ KGSL_CORE_ERR("Invalid vma region\n");
+ return -EINVAL;
+ }
+
+ len = vma->vm_end - vma->vm_start;
+
+ if (size == 0)
+ size = len;
+
+ if (size != len) {
+ KGSL_CORE_ERR("Invalid size %d for vma region %p\n",
+ size, hostptr);
+ return -EINVAL;
+ }
+
+ ret = get_ashmem_file(fd, &filep, &vmfile, &len);
+
+ if (ret) {
+ KGSL_CORE_ERR("get_ashmem_file failed\n");
+ return ret;
+ }
+
+ if (vmfile != vma->vm_file) {
+ KGSL_CORE_ERR("ashmem shmem file does not match vma\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ entry->priv_data = filep;
+ entry->memdesc.pagetable = pagetable;
+ entry->memdesc.size = ALIGN(size, PAGE_SIZE);
+ entry->memdesc.hostptr = hostptr;
+
+ ret = memdesc_sg_virt(&entry->memdesc, hostptr, size);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ put_ashmem_file(filep);
+ return ret;
+}
+#else
+static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
+ struct kgsl_pagetable *pagetable,
+ int fd, void *hostptr, size_t size)
+{
+ return -EINVAL;
+}
+#endif
+
+static int kgsl_setup_ion(struct kgsl_mem_entry *entry,
+ struct kgsl_pagetable *pagetable, int fd)
+{
+ struct ion_handle *handle;
+ struct scatterlist *s;
+ struct sg_table *sg_table;
+
+ if (IS_ERR_OR_NULL(kgsl_ion_client))
+ return -ENODEV;
+
+ handle = ion_import_dma_buf(kgsl_ion_client, fd);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ else if (!handle)
+ return -EINVAL;
+
+ entry->memtype = KGSL_MEM_ENTRY_ION;
+ entry->priv_data = handle;
+ entry->memdesc.pagetable = pagetable;
+ entry->memdesc.size = 0;
+
+ sg_table = ion_sg_table(kgsl_ion_client, handle);
+
+ if (IS_ERR_OR_NULL(sg_table))
+ goto err;
+
+ entry->memdesc.sg = sg_table->sgl;
+
+
+
+ entry->memdesc.sglen = 0;
+
+ for (s = entry->memdesc.sg; s != NULL; s = sg_next(s)) {
+ entry->memdesc.size += s->length;
+ entry->memdesc.sglen++;
+ }
+
+ return 0;
+err:
+ ion_free(kgsl_ion_client, handle);
+ return -ENOMEM;
+}
+
+static long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ int result = -EINVAL;
+ struct kgsl_map_user_mem *param = data;
+ struct kgsl_mem_entry *entry = NULL;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ enum kgsl_user_mem_type memtype;
+
+ entry = kgsl_mem_entry_create();
+
+ if (entry == NULL)
+ return -ENOMEM;
+
+ if (_IOC_SIZE(cmd) == sizeof(struct kgsl_sharedmem_from_pmem))
+ memtype = KGSL_USER_MEM_TYPE_PMEM;
+ else
+ memtype = param->memtype;
+
+ switch (memtype) {
+ case KGSL_USER_MEM_TYPE_PMEM:
+ if (param->fd == 0 || param->len == 0)
+ break;
+
+ result = kgsl_setup_phys_file(entry, private->pagetable,
+ param->fd, param->offset,
+ param->len);
+ entry->memtype = KGSL_MEM_ENTRY_PMEM;
+ break;
+
+ case KGSL_USER_MEM_TYPE_ADDR:
+ KGSL_DEV_ERR_ONCE(dev_priv->device, "User mem type "
+ "KGSL_USER_MEM_TYPE_ADDR is deprecated\n");
+ if (!kgsl_mmu_enabled()) {
+ KGSL_DRV_ERR(dev_priv->device,
+ "Cannot map paged memory with the "
+ "MMU disabled\n");
+ break;
+ }
+
+ if (param->hostptr == 0)
+ break;
+
+ result = kgsl_setup_hostptr(entry, private->pagetable,
+ (void *) param->hostptr,
+ param->offset, param->len);
+ entry->memtype = KGSL_MEM_ENTRY_USER;
+ break;
+
+ case KGSL_USER_MEM_TYPE_ASHMEM:
+ if (!kgsl_mmu_enabled()) {
+ KGSL_DRV_ERR(dev_priv->device,
+ "Cannot map paged memory with the "
+ "MMU disabled\n");
+ break;
+ }
+
+ if (param->hostptr == 0)
+ break;
+
+ result = kgsl_setup_ashmem(entry, private->pagetable,
+ param->fd, (void *) param->hostptr,
+ param->len);
+
+ entry->memtype = KGSL_MEM_ENTRY_ASHMEM;
+ break;
+ case KGSL_USER_MEM_TYPE_ION:
+ result = kgsl_setup_ion(entry, private->pagetable,
+ param->fd);
+ break;
+ default:
+ KGSL_CORE_ERR("Invalid memory type: %x\n", memtype);
+ break;
+ }
+
+ if (result)
+ goto error;
+
+ result = kgsl_mmu_map(private->pagetable,
+ &entry->memdesc,
+ GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+
+ if (result)
+ goto error_put_file_ptr;
+
+
+ param->gpuaddr = entry->memdesc.gpuaddr + (param->offset & ~PAGE_MASK);
+
+ KGSL_STATS_ADD(param->len, kgsl_driver.stats.mapped,
+ kgsl_driver.stats.mapped_max);
+
+ kgsl_process_add_stats(private, entry->memtype, param->len);
+
+ kgsl_mem_entry_attach_process(entry, private);
+ trace_kgsl_mem_map(entry, param->fd);
+
+ kgsl_check_idle(dev_priv->device);
+ return result;
+
+error_put_file_ptr:
+ switch (entry->memtype) {
+ case KGSL_MEM_ENTRY_PMEM:
+ case KGSL_MEM_ENTRY_ASHMEM:
+ if (entry->priv_data)
+ fput(entry->priv_data);
+ break;
+ case KGSL_MEM_ENTRY_ION:
+ ion_free(kgsl_ion_client, entry->priv_data);
+ break;
+ default:
+ break;
+ }
+error:
+ kfree(entry);
+ kgsl_check_idle(dev_priv->device);
+ return result;
+}
+
+static long
+kgsl_ioctl_sharedmem_flush_cache(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ int result = 0;
+ struct kgsl_mem_entry *entry;
+ struct kgsl_sharedmem_free *param = data;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+
+ spin_lock(&private->mem_lock);
+ entry = kgsl_sharedmem_find(private, param->gpuaddr);
+ if (!entry) {
+ KGSL_CORE_ERR("invalid gpuaddr %08x\n", param->gpuaddr);
+ result = -EINVAL;
+ goto done;
+ }
+ if (!entry->memdesc.hostptr) {
+ KGSL_CORE_ERR("invalid hostptr with gpuaddr %08x\n",
+ param->gpuaddr);
+ goto done;
+ }
+
+ kgsl_cache_range_op(&entry->memdesc, KGSL_CACHE_OP_CLEAN);
+done:
+ spin_unlock(&private->mem_lock);
+ return result;
+}
+
+static long
+kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_gpumem_alloc *param = data;
+ struct kgsl_mem_entry *entry;
+ int result;
+
+ entry = kgsl_mem_entry_create();
+ if (entry == NULL)
+ return -ENOMEM;
+
+ result = kgsl_allocate_user(&entry->memdesc, private, private->pagetable,
+ param->size, param->flags);
+
+ if (result == 0) {
+ entry->memtype = KGSL_MEM_ENTRY_KERNEL;
+ kgsl_mem_entry_attach_process(entry, private);
+ param->gpuaddr = entry->memdesc.gpuaddr;
+
+ kgsl_process_add_stats(private, entry->memtype, param->size);
+ trace_kgsl_mem_alloc(entry);
+ } else
+ kfree(entry);
+
+ kgsl_check_idle(dev_priv->device);
+ return result;
+}
+static long kgsl_ioctl_cff_syncmem(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ int result = 0;
+ struct kgsl_cff_syncmem *param = data;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_mem_entry *entry = NULL;
+
+ spin_lock(&private->mem_lock);
+ entry = kgsl_sharedmem_find_region(private, param->gpuaddr, param->len);
+ if (entry)
+ kgsl_cffdump_syncmem(dev_priv, &entry->memdesc, param->gpuaddr,
+ param->len, true);
+ else
+ result = -EINVAL;
+ spin_unlock(&private->mem_lock);
+ return result;
+}
+
+static long kgsl_ioctl_cff_user_event(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ int result = 0;
+ struct kgsl_cff_user_event *param = data;
+
+ kgsl_cffdump_user_event(param->cff_opcode, param->op1, param->op2,
+ param->op3, param->op4, param->op5);
+
+ return result;
+}
+
+#ifdef CONFIG_GENLOCK
+struct kgsl_genlock_event_priv {
+ struct genlock_handle *handle;
+ struct genlock *lock;
+};
+
+
+static void kgsl_genlock_event_cb(struct kgsl_device *device,
+ void *priv, u32 context_id, u32 timestamp)
+{
+ struct kgsl_genlock_event_priv *ev = priv;
+ int ret;
+
+ ret = genlock_lock(ev->handle, GENLOCK_UNLOCK, 0, 0);
+ if (ret)
+ KGSL_CORE_ERR("Error while unlocking genlock: %d\n", ret);
+
+ genlock_put_handle(ev->handle);
+
+ kfree(ev);
+}
+
+
+static int kgsl_add_genlock_event(struct kgsl_device *device,
+ u32 context_id, u32 timestamp, void __user *data, int len,
+ struct kgsl_device_private *owner)
+{
+ struct kgsl_genlock_event_priv *event;
+ struct kgsl_timestamp_event_genlock priv;
+ int ret;
+
+ if (len != sizeof(priv))
+ return -EINVAL;
+
+ if (copy_from_user(&priv, data, sizeof(priv)))
+ return -EFAULT;
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+
+ if (event == NULL)
+ return -ENOMEM;
+
+ event->handle = genlock_get_handle_fd(priv.handle);
+
+ if (IS_ERR(event->handle)) {
+ int ret = PTR_ERR(event->handle);
+ kfree(event);
+ return ret;
+ }
+
+ ret = kgsl_add_event(device, context_id, timestamp,
+ kgsl_genlock_event_cb, event, owner);
+ if (ret)
+ kfree(event);
+
+ return ret;
+}
+#else
+static long kgsl_add_genlock_event(struct kgsl_device *device,
+ u32 context_id, u32 timestamp, void __user *data, int len,
+ struct kgsl_device_private *owner)
+{
+ return -EINVAL;
+}
+#endif
+
+
+static long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data)
+{
+ struct kgsl_timestamp_event *param = data;
+ int ret;
+
+ switch (param->type) {
+ case KGSL_TIMESTAMP_EVENT_GENLOCK:
+ ret = kgsl_add_genlock_event(dev_priv->device,
+ param->context_id, param->timestamp, param->priv,
+ param->len, dev_priv);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+typedef long (*kgsl_ioctl_func_t)(struct kgsl_device_private *,
+ unsigned int, void *);
+
+#define KGSL_IOCTL_FUNC(_cmd, _func, _lock) \
+ [_IOC_NR(_cmd)] = { .cmd = _cmd, .func = _func, .lock = _lock }
+
+static const struct {
+ unsigned int cmd;
+ kgsl_ioctl_func_t func;
+ int lock;
+} kgsl_ioctl_funcs[] = {
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_GETPROPERTY,
+ kgsl_ioctl_device_getproperty, 1),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP,
+ kgsl_ioctl_device_waittimestamp, 1),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID,
+ kgsl_ioctl_device_waittimestamp_ctxtid, 1),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS,
+ kgsl_ioctl_rb_issueibcmds, 1),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP,
+ kgsl_ioctl_cmdstream_readtimestamp, 1),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID,
+ kgsl_ioctl_cmdstream_readtimestamp_ctxtid, 1),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP,
+ kgsl_ioctl_cmdstream_freememontimestamp, 1),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID,
+ kgsl_ioctl_cmdstream_freememontimestamp_ctxtid, 1),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_CREATE,
+ kgsl_ioctl_drawctxt_create, 1),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_DESTROY,
+ kgsl_ioctl_drawctxt_destroy, 1),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_MAP_USER_MEM,
+ kgsl_ioctl_map_user_mem, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_PMEM,
+ kgsl_ioctl_map_user_mem, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FREE,
+ kgsl_ioctl_sharedmem_free, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC,
+ kgsl_ioctl_sharedmem_from_vmalloc, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE,
+ kgsl_ioctl_sharedmem_flush_cache, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_GPUMEM_ALLOC,
+ kgsl_ioctl_gpumem_alloc, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_SYNCMEM,
+ kgsl_ioctl_cff_syncmem, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_USER_EVENT,
+ kgsl_ioctl_cff_user_event, 0),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMESTAMP_EVENT,
+ kgsl_ioctl_timestamp_event, 1),
+ KGSL_IOCTL_FUNC(IOCTL_KGSL_SETPROPERTY,
+ kgsl_ioctl_device_setproperty, 1),
+};
+
+static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
+{
+ struct kgsl_device_private *dev_priv = filep->private_data;
+ unsigned int nr;
+ kgsl_ioctl_func_t func;
+ int lock, ret;
+ char ustack[64];
+ void *uptr = NULL;
+
+ BUG_ON(dev_priv == NULL);
+
+
+ if (cmd == IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD)
+ cmd = IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP;
+ else if (cmd == IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD)
+ cmd = IOCTL_KGSL_CMDSTREAM_READTIMESTAMP;
+
+ nr = _IOC_NR(cmd);
+
+ if (cmd & (IOC_IN | IOC_OUT)) {
+ if (_IOC_SIZE(cmd) < sizeof(ustack))
+ uptr = ustack;
+ else {
+ uptr = kzalloc(_IOC_SIZE(cmd), GFP_KERNEL);
+ if (uptr == NULL) {
+ KGSL_MEM_ERR(dev_priv->device,
+ "kzalloc(%d) failed\n", _IOC_SIZE(cmd));
+ ret = -ENOMEM;
+ goto done;
+ }
+ }
+
+ if (cmd & IOC_IN) {
+ if (copy_from_user(uptr, (void __user *) arg,
+ _IOC_SIZE(cmd))) {
+ ret = -EFAULT;
+ goto done;
+ }
+ } else
+ memset(uptr, 0, _IOC_SIZE(cmd));
+ }
+
+ if (nr < ARRAY_SIZE(kgsl_ioctl_funcs) &&
+ kgsl_ioctl_funcs[nr].func != NULL) {
+
+
+ if (kgsl_ioctl_funcs[nr].cmd != cmd) {
+ KGSL_DRV_ERR(dev_priv->device,
+ "Malformed ioctl code %08x\n", cmd);
+ ret = -ENOIOCTLCMD;
+ goto done;
+ }
+
+ func = kgsl_ioctl_funcs[nr].func;
+ lock = kgsl_ioctl_funcs[nr].lock;
+ } else {
+ func = dev_priv->device->ftbl->ioctl;
+ if (!func) {
+ KGSL_DRV_INFO(dev_priv->device,
+ "invalid ioctl code %08x\n", cmd);
+ ret = -ENOIOCTLCMD;
+ goto done;
+ }
+ lock = 1;
+ }
+
+ if (lock) {
+ mutex_lock(&dev_priv->device->mutex);
+ kgsl_check_suspended(dev_priv->device);
+ }
+
+ ret = func(dev_priv, cmd, uptr);
+
+ if (lock) {
+ kgsl_check_idle_locked(dev_priv->device);
+ mutex_unlock(&dev_priv->device->mutex);
+ }
+
+ if (ret == 0 && (cmd & IOC_OUT)) {
+ if (copy_to_user((void __user *) arg, uptr, _IOC_SIZE(cmd)))
+ ret = -EFAULT;
+ }
+
+done:
+ if (_IOC_SIZE(cmd) >= sizeof(ustack))
+ kfree(uptr);
+
+ return ret;
+}
+
+static int
+kgsl_mmap_memstore(struct kgsl_device *device, struct vm_area_struct *vma)
+{
+ struct kgsl_memdesc *memdesc = &device->memstore;
+ int result;
+ unsigned int vma_size = vma->vm_end - vma->vm_start;
+
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+
+ if (memdesc->size != vma_size) {
+ KGSL_MEM_ERR(device, "memstore bad size: %d should be %d\n",
+ vma_size, memdesc->size);
+ return -EINVAL;
+ }
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ result = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma_size, vma->vm_page_prot);
+ if (result != 0)
+ KGSL_MEM_ERR(device, "remap_pfn_range failed: %d\n",
+ result);
+
+ return result;
+}
+
+
+static void kgsl_gpumem_vm_open(struct vm_area_struct *vma)
+{
+ struct kgsl_mem_entry *entry = vma->vm_private_data;
+ kgsl_mem_entry_get(entry);
+}
+
+static int
+kgsl_gpumem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct kgsl_mem_entry *entry = vma->vm_private_data;
+
+ if (!entry->memdesc.ops || !entry->memdesc.ops->vmfault)
+ return VM_FAULT_SIGBUS;
+
+ return entry->memdesc.ops->vmfault(&entry->memdesc, vma, vmf);
+}
+
+static void
+kgsl_gpumem_vm_close(struct vm_area_struct *vma)
+{
+ struct kgsl_mem_entry *entry = vma->vm_private_data;
+ kgsl_mem_entry_put(entry);
+}
+
+static struct vm_operations_struct kgsl_gpumem_vm_ops = {
+ .open = kgsl_gpumem_vm_open,
+ .fault = kgsl_gpumem_vm_fault,
+ .close = kgsl_gpumem_vm_close,
+};
+
+static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned long vma_offset = vma->vm_pgoff << PAGE_SHIFT;
+ struct kgsl_device_private *dev_priv = file->private_data;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+ struct kgsl_mem_entry *entry = NULL;
+ struct kgsl_device *device = dev_priv->device;
+ int i;
+
+
+
+ if (vma_offset == device->memstore.physaddr)
+ return kgsl_mmap_memstore(device, vma);
+
+
+
+ spin_lock(&private->mem_lock);
+ entry = kgsl_sharedmem_find(private, vma_offset);
+
+ if (entry)
+ kgsl_mem_entry_get(entry);
+
+ spin_unlock(&private->mem_lock);
+
+ if (entry == NULL)
+ return -EINVAL;
+
+ if (!entry->memdesc.ops ||
+ !entry->memdesc.ops->vmflags ||
+ !entry->memdesc.ops->vmfault)
+ return -EINVAL;
+
+ vma->vm_flags |= entry->memdesc.ops->vmflags(&entry->memdesc);
+
+ vma->vm_private_data = entry;
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_ops = &kgsl_gpumem_vm_ops;
+ vma->vm_file = file;
+
+
+ if(entry->memdesc.handle == NULL)
+ {
+ for(i = 0; (i*PAGE_SIZE) < (vma->vm_end - vma->vm_start); i++)
+ {
+ vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE, sg_page(&entry->memdesc.sg[i]));
+ }
+ }
+ else
+ {
+ remap_pfn_range(vma, vma->vm_start, __phys_to_pfn(entry->memdesc.sg[0].dma_address),
+ entry->memdesc.size, vma->vm_page_prot);
+ }
+
+
+ return 0;
+}
+
+static irqreturn_t kgsl_irq_handler(int irq, void *data)
+{
+ struct kgsl_device *device = data;
+
+ return device->ftbl->irq_handler(device);
+
+}
+
+static const struct file_operations kgsl_fops = {
+ .owner = THIS_MODULE,
+ .release = kgsl_release,
+ .open = kgsl_open,
+ .mmap = kgsl_mmap,
+ .unlocked_ioctl = kgsl_ioctl,
+};
+
+struct kgsl_driver kgsl_driver = {
+ .process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex),
+ .ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock),
+ .devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock),
+};
+EXPORT_SYMBOL(kgsl_driver);
+
+static void _unregister_device(struct kgsl_device *device)
+{
+ int minor;
+
+ mutex_lock(&kgsl_driver.devlock);
+ for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
+ if (device == kgsl_driver.devp[minor])
+ break;
+ }
+ if (minor != KGSL_DEVICE_MAX) {
+ device_destroy(kgsl_driver.class,
+ MKDEV(MAJOR(kgsl_driver.major), minor));
+ kgsl_driver.devp[minor] = NULL;
+ }
+ mutex_unlock(&kgsl_driver.devlock);
+}
+
+static int _register_device(struct kgsl_device *device)
+{
+ int minor, ret;
+ dev_t dev;
+
+
+
+ mutex_lock(&kgsl_driver.devlock);
+ for (minor = 0; minor < KGSL_DEVICE_MAX; minor++) {
+ if (kgsl_driver.devp[minor] == NULL) {
+ kgsl_driver.devp[minor] = device;
+ break;
+ }
+ }
+ mutex_unlock(&kgsl_driver.devlock);
+
+ if (minor == KGSL_DEVICE_MAX) {
+ KGSL_CORE_ERR("minor devices exhausted\n");
+ return -ENODEV;
+ }
+
+
+ dev = MKDEV(MAJOR(kgsl_driver.major), minor);
+ device->dev = device_create(kgsl_driver.class,
+ device->parentdev,
+ dev, device,
+ device->name);
+
+ if (IS_ERR(device->dev)) {
+ mutex_lock(&kgsl_driver.devlock);
+ kgsl_driver.devp[minor] = NULL;
+ mutex_unlock(&kgsl_driver.devlock);
+ ret = PTR_ERR(device->dev);
+ KGSL_CORE_ERR("device_create(%s): %d\n", device->name, ret);
+ return ret;
+ }
+
+ dev_set_drvdata(device->parentdev, device);
+ return 0;
+}
+
+int kgsl_device_platform_probe(struct kgsl_device *device)
+{
+ int result;
+ int i;
+ int status = -EINVAL;
+ struct resource *res;
+ struct platform_device *pdev =
+ container_of(device->parentdev, struct platform_device, dev);
+
+ status = _register_device(device);
+ if (status)
+ return status;
+
+#ifdef CONFIG_MSM_KGSL_GPU_USAGE
+ device->current_process_priv = NULL;
+#endif
+
+
+ device->gputime.total = 0;
+ device->gputime.busy = 0;
+ for(i=0;i<KGSL_MAX_PWRLEVELS;i++) {
+ device->gputime_in_state[i].total = 0;
+ device->gputime_in_state[i].busy = 0;
+ }
+
+
+
+ kgsl_device_debugfs_init(device);
+
+ status = kgsl_pwrctrl_init(device);
+ if (status)
+ goto error;
+
+ kgsl_ion_client = msm_ion_client_create(UINT_MAX, KGSL_NAME);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ device->iomemname);
+ if (res == NULL) {
+ KGSL_DRV_ERR(device, "platform_get_resource_byname failed\n");
+ status = -EINVAL;
+ goto error_pwrctrl_close;
+ }
+ if (res->start == 0 || resource_size(res) == 0) {
+ KGSL_DRV_ERR(device, "dev %d invalid register region\n",
+ device->id);
+ status = -EINVAL;
+ goto error_pwrctrl_close;
+ }
+
+ device->reg_phys = res->start;
+ device->reg_len = resource_size(res);
+
+ if (!devm_request_mem_region(device->dev, device->reg_phys,
+ device->reg_len, device->name)) {
+ KGSL_DRV_ERR(device, "request_mem_region failed\n");
+ status = -ENODEV;
+ goto error_pwrctrl_close;
+ }
+
+ device->reg_virt = devm_ioremap(device->dev, device->reg_phys,
+ device->reg_len);
+
+ if (device->reg_virt == NULL) {
+ KGSL_DRV_ERR(device, "ioremap failed\n");
+ status = -ENODEV;
+ goto error_pwrctrl_close;
+ }
+
+ device->pwrctrl.interrupt_num =
+ platform_get_irq_byname(pdev, device->pwrctrl.irq_name);
+
+ if (device->pwrctrl.interrupt_num <= 0) {
+ KGSL_DRV_ERR(device, "platform_get_irq_byname failed: %d\n",
+ device->pwrctrl.interrupt_num);
+ status = -EINVAL;
+ goto error_pwrctrl_close;
+ }
+
+ status = devm_request_irq(device->dev, device->pwrctrl.interrupt_num,
+ kgsl_irq_handler, IRQF_TRIGGER_HIGH,
+ device->name, device);
+ if (status) {
+ KGSL_DRV_ERR(device, "request_irq(%d) failed: %d\n",
+ device->pwrctrl.interrupt_num, status);
+ goto error_pwrctrl_close;
+ }
+ disable_irq(device->pwrctrl.interrupt_num);
+
+ KGSL_DRV_INFO(device,
+ "dev_id %d regs phys 0x%08lx size 0x%08x virt %p\n",
+ device->id, device->reg_phys, device->reg_len,
+ device->reg_virt);
+
+ result = kgsl_drm_init(pdev);
+ if (result)
+ goto error_pwrctrl_close;
+
+ kgsl_cffdump_open(device->id);
+
+ setup_timer(&device->idle_timer, kgsl_timer, (unsigned long) device);
+ status = kgsl_create_device_workqueue(device);
+ if (status)
+ goto error_pwrctrl_close;
+
+ status = kgsl_mmu_init(device);
+ if (status != 0) {
+ KGSL_DRV_ERR(device, "kgsl_mmu_init failed %d\n", status);
+ goto error_dest_work_q;
+ }
+
+ status = kgsl_allocate_contiguous(&device->memstore,
+ KGSL_MEMSTORE_SIZE);
+
+ if (status != 0) {
+ KGSL_DRV_ERR(device, "kgsl_allocate_contiguous failed %d\n",
+ status);
+ goto error_close_mmu;
+ }
+
+ pm_qos_add_request(&device->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
+
+ kgsl_device_snapshot_init(device);
+
+
+ kgsl_pwrctrl_init_sysfs(device);
+
+ return 0;
+
+error_close_mmu:
+ kgsl_mmu_close(device);
+error_dest_work_q:
+ destroy_workqueue(device->work_queue);
+ device->work_queue = NULL;
+error_pwrctrl_close:
+ kgsl_pwrctrl_close(device);
+error:
+ _unregister_device(device);
+ return status;
+}
+EXPORT_SYMBOL(kgsl_device_platform_probe);
+
+void kgsl_device_platform_remove(struct kgsl_device *device)
+{
+ kgsl_device_snapshot_close(device);
+
+ kgsl_cffdump_close(device->id);
+ kgsl_pwrctrl_uninit_sysfs(device);
+
+ pm_qos_remove_request(&device->pm_qos_req_dma);
+
+ idr_destroy(&device->context_idr);
+
+ kgsl_sharedmem_free(&device->memstore);
+
+ kgsl_mmu_close(device);
+
+ if (device->work_queue) {
+ destroy_workqueue(device->work_queue);
+ device->work_queue = NULL;
+ }
+ kgsl_pwrctrl_close(device);
+
+ _unregister_device(device);
+}
+EXPORT_SYMBOL(kgsl_device_platform_remove);
+
+static int __devinit
+kgsl_ptdata_init(void)
+{
+ kgsl_driver.ptpool = kgsl_mmu_ptpool_init(kgsl_pagetable_count);
+
+ if (!kgsl_driver.ptpool)
+ return -ENOMEM;
+ return 0;
+}
+
+static void kgsl_core_exit(void)
+{
+ kgsl_mmu_ptpool_destroy(kgsl_driver.ptpool);
+ kgsl_driver.ptpool = NULL;
+
+ kgsl_drm_exit();
+ kgsl_cffdump_destroy();
+ kgsl_core_debugfs_close();
+
+ if (kgsl_driver.virtdev.class) {
+ kgsl_sharedmem_uninit_sysfs();
+ device_unregister(&kgsl_driver.virtdev);
+ }
+
+ if (kgsl_driver.class) {
+ class_destroy(kgsl_driver.class);
+ kgsl_driver.class = NULL;
+ }
+
+ unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
+}
+
+static int __init kgsl_core_init(void)
+{
+ int result = 0;
+
+ result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX,
+ KGSL_NAME);
+ if (result < 0) {
+ KGSL_CORE_ERR("alloc_chrdev_region failed err = %d\n", result);
+ goto err;
+ }
+
+ cdev_init(&kgsl_driver.cdev, &kgsl_fops);
+ kgsl_driver.cdev.owner = THIS_MODULE;
+ kgsl_driver.cdev.ops = &kgsl_fops;
+ result = cdev_add(&kgsl_driver.cdev, MKDEV(MAJOR(kgsl_driver.major), 0),
+ KGSL_DEVICE_MAX);
+
+ if (result) {
+ KGSL_CORE_ERR("kgsl: cdev_add() failed, dev_num= %d,"
+ " result= %d\n", kgsl_driver.major, result);
+ goto err;
+ }
+
+ kgsl_driver.class = class_create(THIS_MODULE, KGSL_NAME);
+
+ if (IS_ERR(kgsl_driver.class)) {
+ result = PTR_ERR(kgsl_driver.class);
+ KGSL_CORE_ERR("failed to create class %s", KGSL_NAME);
+ goto err;
+ }
+
+ kgsl_driver.virtdev.class = kgsl_driver.class;
+ dev_set_name(&kgsl_driver.virtdev, "kgsl");
+ result = device_register(&kgsl_driver.virtdev);
+ if (result) {
+ KGSL_CORE_ERR("driver_register failed\n");
+ goto err;
+ }
+
+
+
+ kgsl_driver.ptkobj =
+ kobject_create_and_add("pagetables",
+ &kgsl_driver.virtdev.kobj);
+
+ kgsl_driver.prockobj =
+ kobject_create_and_add("proc",
+ &kgsl_driver.virtdev.kobj);
+
+ kgsl_core_debugfs_init();
+
+ kgsl_sharedmem_init_sysfs();
+ kgsl_cffdump_init();
+
+ INIT_LIST_HEAD(&kgsl_driver.process_list);
+
+ INIT_LIST_HEAD(&kgsl_driver.pagetable_list);
+
+ kgsl_mmu_set_mmutype(ksgl_mmu_type);
+
+ if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype()) {
+ result = kgsl_ptdata_init();
+ if (result)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ kgsl_core_exit();
+ return result;
+}
+
+module_init(kgsl_core_init);
+module_exit(kgsl_core_exit);
+
+MODULE_AUTHOR("Qualcomm Innovation Center, Inc.");
+MODULE_DESCRIPTION("MSM GPU driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
new file mode 100644
index 0000000..7e62475
--- /dev/null
+++ b/drivers/gpu/msm/kgsl.h
@@ -0,0 +1,273 @@
+/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_H
+#define __KGSL_H
+
+#include <linux/types.h>
+#include <linux/msm_kgsl.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/cdev.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mm.h>
+#include <linux/ion.h>
+
+#define KGSL_NAME "kgsl"
+
+#define KGSL_MEMSTORE_SIZE ((int)(PAGE_SIZE * 2))
+#define KGSL_MEMSTORE_GLOBAL (0)
+#define KGSL_MEMSTORE_MAX (KGSL_MEMSTORE_SIZE / \
+ sizeof(struct kgsl_devmemstore) - 1)
+
+#define KGSL_TIMESTAMP_WINDOW 0x80000000
+
+#define DRM_KGSL_GEM_CACHE_OP_TO_DEV 0x0001
+#define DRM_KGSL_GEM_CACHE_OP_FROM_DEV 0x0002
+
+#define KGSL_PAGETABLE_ENTRY_SIZE 4
+
+#define KGSL_PAGETABLE_BASE 0x10000000
+
+#define KGSL_PT_EXTRA_ENTRIES 16
+
+#define KGSL_PAGETABLE_ENTRIES(_sz) (((_sz) >> PAGE_SHIFT) + \
+ KGSL_PT_EXTRA_ENTRIES)
+
+#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
+#define KGSL_PAGETABLE_COUNT (CONFIG_MSM_KGSL_PAGE_TABLE_COUNT)
+#else
+#define KGSL_PAGETABLE_COUNT 1
+#endif
+
+#define KGSL_CONTAINER_OF(ptr, type, member) \
+ container_of(ptr, type, member)
+
+
+#define KGSL_STATS_ADD(_size, _stat, _max) \
+ do { _stat += (_size); if (_stat > _max) _max = _stat; } while (0)
+
+struct kgsl_device;
+
+struct kgsl_driver {
+ struct cdev cdev;
+ dev_t major;
+ struct class *class;
+
+ struct device virtdev;
+
+ struct kobject *ptkobj;
+ struct kobject *prockobj;
+ struct kgsl_device *devp[KGSL_DEVICE_MAX];
+
+
+ struct list_head process_list;
+
+ struct list_head pagetable_list;
+
+ spinlock_t ptlock;
+
+ struct mutex process_mutex;
+
+
+ struct mutex devlock;
+
+ void *ptpool;
+
+ struct {
+ unsigned int vmalloc;
+ unsigned int vmalloc_max;
+ unsigned int page_alloc;
+ unsigned int page_alloc_max;
+ unsigned int page_alloc_kernel;
+ unsigned int coherent;
+ unsigned int coherent_max;
+ unsigned int mapped;
+ unsigned int mapped_max;
+ unsigned int pre_alloc;
+ unsigned int pre_alloc_max;
+ unsigned int pre_alloc_kernel;
+ unsigned int histogram[16];
+ } stats;
+};
+
+extern struct kgsl_driver kgsl_driver;
+
+struct kgsl_pagetable;
+struct kgsl_memdesc;
+
+struct kgsl_memdesc_ops {
+ int (*vmflags)(struct kgsl_memdesc *);
+ int (*vmfault)(struct kgsl_memdesc *, struct vm_area_struct *,
+ struct vm_fault *);
+ void (*free)(struct kgsl_memdesc *memdesc);
+ int (*map_kernel_mem)(struct kgsl_memdesc *);
+};
+
+#define KGSL_MEMDESC_GUARD_PAGE BIT(0)
+
+struct kgsl_memdesc {
+ struct kgsl_pagetable *pagetable;
+ void *hostptr;
+ unsigned int gpuaddr;
+ unsigned int physaddr;
+ unsigned int size;
+ unsigned int priv;
+ struct scatterlist *sg;
+ unsigned int sglen;
+ struct kgsl_memdesc_ops *ops;
+ int flags;
+ struct ion_handle* handle;
+ struct kgsl_process_private *private;
+};
+
+#if 0
+#define KGSL_MEM_ENTRY_KERNEL 0
+#define KGSL_MEM_ENTRY_PMEM 1
+#define KGSL_MEM_ENTRY_ASHMEM 2
+#define KGSL_MEM_ENTRY_USER 3
+#define KGSL_MEM_ENTRY_ION 4
+#define KGSL_MEM_ENTRY_PAGE_ALLOC 5
+#define KGSL_MEM_ENTRY_PRE_ALLOC 6
+#define KGSL_MEM_ENTRY_MAX 7
+#else
+enum {
+ KGSL_MEM_ENTRY_KERNEL = 0,
+ KGSL_MEM_ENTRY_PMEM,
+ KGSL_MEM_ENTRY_ASHMEM,
+ KGSL_MEM_ENTRY_USER,
+ KGSL_MEM_ENTRY_ION,
+ KGSL_MEM_ENTRY_PAGE_ALLOC,
+ KGSL_MEM_ENTRY_PRE_ALLOC,
+ KGSL_MEM_ENTRY_MAX,
+};
+#endif
+
+
+#define KGSL_MEM_ENTRY_FROZEN (1 << 0)
+
+struct kgsl_mem_entry {
+ struct kref refcount;
+ struct kgsl_memdesc memdesc;
+ int memtype;
+ int flags;
+ void *priv_data;
+ struct rb_node node;
+ unsigned int context_id;
+ struct kgsl_process_private *priv;
+};
+
+#ifdef CONFIG_MSM_KGSL_MMU_PAGE_FAULT
+#define MMU_CONFIG 2
+#else
+#define MMU_CONFIG 1
+#endif
+
+void kgsl_mem_entry_destroy(struct kref *kref);
+
+struct kgsl_mem_entry *kgsl_get_mem_entry(unsigned int ptbase,
+ unsigned int gpuaddr, unsigned int size);
+
+struct kgsl_mem_entry *kgsl_sharedmem_find_region(
+ struct kgsl_process_private *private, unsigned int gpuaddr,
+ size_t size);
+
+int kgsl_add_event(struct kgsl_device *device, u32 id, u32 ts,
+ void (*cb)(struct kgsl_device *, void *, u32, u32), void *priv,
+ void *owner);
+
+void kgsl_cancel_events(struct kgsl_device *device,
+ void *owner);
+
+extern const struct dev_pm_ops kgsl_pm_ops;
+
+struct early_suspend;
+int kgsl_suspend_driver(struct platform_device *pdev, pm_message_t state);
+int kgsl_resume_driver(struct platform_device *pdev);
+void kgsl_early_suspend_driver(struct early_suspend *h);
+void kgsl_late_resume_driver(struct early_suspend *h);
+
+#ifdef CONFIG_MSM_KGSL_DRM
+extern int kgsl_drm_init(struct platform_device *dev);
+extern void kgsl_drm_exit(void);
+#else
+static inline int kgsl_drm_init(struct platform_device *dev)
+{
+ return 0;
+}
+
+static inline void kgsl_drm_exit(void)
+{
+}
+#endif
+
+static inline int kgsl_gpuaddr_in_memdesc(const struct kgsl_memdesc *memdesc,
+ unsigned int gpuaddr, unsigned int size)
+{
+ if (gpuaddr >= memdesc->gpuaddr &&
+ ((gpuaddr + size) <= (memdesc->gpuaddr + memdesc->size))) {
+ return 1;
+ }
+ return 0;
+}
+
+static inline void *kgsl_memdesc_map(struct kgsl_memdesc *memdesc)
+{
+ if (memdesc->hostptr == NULL && memdesc->ops &&
+ memdesc->ops->map_kernel_mem)
+ memdesc->ops->map_kernel_mem(memdesc);
+
+ return memdesc->hostptr;
+}
+
+static inline uint8_t *kgsl_gpuaddr_to_vaddr(struct kgsl_memdesc *memdesc,
+ unsigned int gpuaddr)
+{
+ void *hostptr = NULL;
+
+ if ((gpuaddr >= memdesc->gpuaddr) &&
+ (gpuaddr < (memdesc->gpuaddr + memdesc->size)))
+ hostptr = kgsl_memdesc_map(memdesc);
+
+ return hostptr != NULL ? hostptr + (gpuaddr - memdesc->gpuaddr) : NULL;
+}
+
+static inline int timestamp_cmp(unsigned int a, unsigned int b)
+{
+
+ if (a == b)
+ return 0;
+
+
+ if ((a > b) && (a - b < KGSL_TIMESTAMP_WINDOW))
+ return 1;
+
+ a += KGSL_TIMESTAMP_WINDOW;
+ b += KGSL_TIMESTAMP_WINDOW;
+ return ((a > b) && (a - b <= KGSL_TIMESTAMP_WINDOW)) ? 1 : -1;
+}
+
+static inline void
+kgsl_mem_entry_get(struct kgsl_mem_entry *entry)
+{
+ kref_get(&entry->refcount);
+}
+
+static inline void
+kgsl_mem_entry_put(struct kgsl_mem_entry *entry)
+{
+ kref_put(&entry->refcount, kgsl_mem_entry_destroy);
+}
+
+#endif
diff --git a/drivers/gpu/msm/kgsl_cffdump.h b/drivers/gpu/msm/kgsl_cffdump.h
new file mode 100644
index 0000000..cea8ea0
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_cffdump.h
@@ -0,0 +1,69 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __KGSL_CFFDUMP_H
+#define __KGSL_CFFDUMP_H
+
+#ifdef CONFIG_MSM_KGSL_CFF_DUMP
+
+#include <linux/types.h>
+
+#include "kgsl_device.h"
+
+void kgsl_cffdump_init(void);
+void kgsl_cffdump_destroy(void);
+void kgsl_cffdump_open(enum kgsl_deviceid device_id);
+void kgsl_cffdump_close(enum kgsl_deviceid device_id);
+void kgsl_cffdump_syncmem(struct kgsl_device_private *dev_priv,
+ const struct kgsl_memdesc *memdesc, uint physaddr, uint sizebytes,
+ bool clean_cache);
+void kgsl_cffdump_setmem(uint addr, uint value, uint sizebytes);
+void kgsl_cffdump_regwrite(enum kgsl_deviceid device_id, uint addr,
+ uint value);
+void kgsl_cffdump_regpoll(enum kgsl_deviceid device_id, uint addr,
+ uint value, uint mask);
+bool kgsl_cffdump_parse_ibs(struct kgsl_device_private *dev_priv,
+ const struct kgsl_memdesc *memdesc, uint gpuaddr, int sizedwords,
+ bool check_only);
+void kgsl_cffdump_user_event(unsigned int cff_opcode, unsigned int op1,
+ unsigned int op2, unsigned int op3,
+ unsigned int op4, unsigned int op5);
+static inline bool kgsl_cffdump_flags_no_memzero(void) { return true; }
+
+void kgsl_cffdump_memory_base(enum kgsl_deviceid device_id, unsigned int base,
+ unsigned int range, unsigned int gmemsize);
+
+void kgsl_cffdump_hang(enum kgsl_deviceid device_id);
+
+#else
+
+#define kgsl_cffdump_init() (void)0
+#define kgsl_cffdump_destroy() (void)0
+#define kgsl_cffdump_open(device_id) (void)0
+#define kgsl_cffdump_close(device_id) (void)0
+#define kgsl_cffdump_syncmem(dev_priv, memdesc, addr, sizebytes, clean_cache) \
+ (void) 0
+#define kgsl_cffdump_setmem(addr, value, sizebytes) (void)0
+#define kgsl_cffdump_regwrite(device_id, addr, value) (void)0
+#define kgsl_cffdump_regpoll(device_id, addr, value, mask) (void)0
+#define kgsl_cffdump_parse_ibs(dev_priv, memdesc, gpuaddr, \
+ sizedwords, check_only) true
+#define kgsl_cffdump_flags_no_memzero() true
+#define kgsl_cffdump_memory_base(base, range, gmemsize) (void)0
+#define kgsl_cffdump_hang(device_id) (void)0
+#define kgsl_cffdump_user_event(cff_opcode, op1, op2, op3, op4, op5) \
+ (void)param
+
+#endif
+
+#endif
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
new file mode 100644
index 0000000..68fee6d
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -0,0 +1,87 @@
+/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+
+#include "kgsl.h"
+#include "kgsl_device.h"
+
+#define KGSL_LOG_LEVEL_DEFAULT 3
+#define KGSL_LOG_LEVEL_MAX 7
+
+struct dentry *kgsl_debugfs_dir;
+
+static inline int kgsl_log_set(unsigned int *log_val, void *data, u64 val)
+{
+ *log_val = min((unsigned int)val, (unsigned int)KGSL_LOG_LEVEL_MAX);
+ return 0;
+}
+
+#define KGSL_DEBUGFS_LOG(__log) \
+static int __log ## _set(void *data, u64 val) \
+{ \
+ struct kgsl_device *device = data; \
+ return kgsl_log_set(&device->__log, data, val); \
+} \
+static int __log ## _get(void *data, u64 *val) \
+{ \
+ struct kgsl_device *device = data; \
+ *val = device->__log; \
+ return 0; \
+} \
+DEFINE_SIMPLE_ATTRIBUTE(__log ## _fops, \
+__log ## _get, __log ## _set, "%llu\n"); \
+
+KGSL_DEBUGFS_LOG(drv_log);
+KGSL_DEBUGFS_LOG(cmd_log);
+KGSL_DEBUGFS_LOG(ctxt_log);
+KGSL_DEBUGFS_LOG(mem_log);
+KGSL_DEBUGFS_LOG(pwr_log);
+
+void kgsl_device_debugfs_init(struct kgsl_device *device)
+{
+ if (kgsl_debugfs_dir && !IS_ERR(kgsl_debugfs_dir))
+ device->d_debugfs = debugfs_create_dir(device->name,
+ kgsl_debugfs_dir);
+
+ if (!device->d_debugfs || IS_ERR(device->d_debugfs))
+ return;
+
+ device->cmd_log = KGSL_LOG_LEVEL_DEFAULT;
+ device->ctxt_log = KGSL_LOG_LEVEL_DEFAULT;
+ device->drv_log = KGSL_LOG_LEVEL_DEFAULT;
+ device->mem_log = KGSL_LOG_LEVEL_DEFAULT;
+ device->pwr_log = KGSL_LOG_LEVEL_DEFAULT;
+
+ debugfs_create_file("log_level_cmd", 0644, device->d_debugfs, device,
+ &cmd_log_fops);
+ debugfs_create_file("log_level_ctxt", 0644, device->d_debugfs, device,
+ &ctxt_log_fops);
+ debugfs_create_file("log_level_drv", 0644, device->d_debugfs, device,
+ &drv_log_fops);
+ debugfs_create_file("log_level_mem", 0644, device->d_debugfs, device,
+ &mem_log_fops);
+ debugfs_create_file("log_level_pwr", 0644, device->d_debugfs, device,
+ &pwr_log_fops);
+}
+
+void kgsl_core_debugfs_init(void)
+{
+ kgsl_debugfs_dir = debugfs_create_dir("kgsl", 0);
+}
+
+void kgsl_core_debugfs_close(void)
+{
+ debugfs_remove_recursive(kgsl_debugfs_dir);
+}
diff --git a/drivers/gpu/msm/kgsl_debugfs.h b/drivers/gpu/msm/kgsl_debugfs.h
new file mode 100644
index 0000000..5e10988
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_debugfs.h
@@ -0,0 +1,39 @@
+/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _KGSL_DEBUGFS_H
+#define _KGSL_DEBUGFS_H
+
+struct kgsl_device;
+
+#ifdef CONFIG_DEBUG_FS
+void kgsl_core_debugfs_init(void);
+void kgsl_core_debugfs_close(void);
+
+void kgsl_device_debugfs_init(struct kgsl_device *device);
+
+extern struct dentry *kgsl_debugfs_dir;
+static inline struct dentry *kgsl_get_debugfs_dir(void)
+{
+ return kgsl_debugfs_dir;
+}
+
+#else
+static inline void kgsl_core_debugfs_init(void) { }
+static inline void kgsl_device_debugfs_init(struct kgsl_device *device) { }
+static inline void kgsl_core_debugfs_close(void) { }
+static inline struct dentry *kgsl_get_debugfs_dir(void) { return NULL; }
+
+#endif
+
+#endif
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
new file mode 100644
index 0000000..df61717
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -0,0 +1,402 @@
+/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_DEVICE_H
+#define __KGSL_DEVICE_H
+
+#include <linux/idr.h>
+#include <linux/pm_qos.h>
+#include <linux/earlysuspend.h>
+
+#include "kgsl.h"
+#include "kgsl_mmu.h"
+#include "kgsl_pwrctrl.h"
+#include "kgsl_log.h"
+#include "kgsl_pwrscale.h"
+
+#define KGSL_TIMEOUT_NONE 0
+#define KGSL_TIMEOUT_DEFAULT 0xFFFFFFFF
+#define KGSL_TIMEOUT_PART 2000
+
+#define FIRST_TIMEOUT (HZ / 2)
+
+
+
+#define KGSL_STATE_NONE 0x00000000
+#define KGSL_STATE_INIT 0x00000001
+#define KGSL_STATE_ACTIVE 0x00000002
+#define KGSL_STATE_NAP 0x00000004
+#define KGSL_STATE_SLEEP 0x00000008
+#define KGSL_STATE_SUSPEND 0x00000010
+#define KGSL_STATE_HUNG 0x00000020
+#define KGSL_STATE_DUMP_AND_RECOVER 0x00000040
+#define KGSL_STATE_SLUMBER 0x00000080
+
+#define KGSL_GRAPHICS_MEMORY_LOW_WATERMARK 0x1000000
+
+#define KGSL_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
+
+struct kgsl_device;
+struct platform_device;
+struct kgsl_device_private;
+struct kgsl_context;
+struct kgsl_power_stats;
+
+struct kgsl_functable {
+ void (*regread) (struct kgsl_device *device,
+ unsigned int offsetwords, unsigned int *value);
+ void (*regwrite) (struct kgsl_device *device,
+ unsigned int offsetwords, unsigned int value);
+ int (*idle) (struct kgsl_device *device);
+ unsigned int (*isidle) (struct kgsl_device *device);
+ int (*suspend_context) (struct kgsl_device *device);
+ int (*start) (struct kgsl_device *device, unsigned int init_ram);
+ int (*stop) (struct kgsl_device *device);
+ int (*getproperty) (struct kgsl_device *device,
+ enum kgsl_property_type type, void *value,
+ unsigned int sizebytes);
+ int (*waittimestamp) (struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int timestamp,
+ unsigned int msecs);
+ unsigned int (*readtimestamp) (struct kgsl_device *device,
+ struct kgsl_context *context, enum kgsl_timestamp_type type);
+ int (*issueibcmds) (struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, struct kgsl_ibdesc *ibdesc,
+ unsigned int sizedwords, uint32_t *timestamp,
+ unsigned int flags);
+ int (*setup_pt)(struct kgsl_device *device,
+ struct kgsl_pagetable *pagetable);
+ void (*cleanup_pt)(struct kgsl_device *device,
+ struct kgsl_pagetable *pagetable);
+ void (*power_stats)(struct kgsl_device *device,
+ struct kgsl_power_stats *stats);
+ void (*irqctrl)(struct kgsl_device *device, int state);
+ unsigned int (*gpuid)(struct kgsl_device *device, unsigned int *chipid);
+ void * (*snapshot)(struct kgsl_device *device, void *snapshot,
+ int *remain, int hang);
+ irqreturn_t (*irq_handler)(struct kgsl_device *device);
+ void (*setstate) (struct kgsl_device *device, unsigned int context_id,
+ uint32_t flags);
+ int (*drawctxt_create) (struct kgsl_device *device,
+ struct kgsl_pagetable *pagetable, struct kgsl_context *context,
+ uint32_t flags);
+ void (*drawctxt_destroy) (struct kgsl_device *device,
+ struct kgsl_context *context);
+ long (*ioctl) (struct kgsl_device_private *dev_priv,
+ unsigned int cmd, void *data);
+ int (*setproperty) (struct kgsl_device *device,
+ enum kgsl_property_type type, void *value,
+ unsigned int sizebytes);
+};
+
+struct kgsl_mh {
+ unsigned int mharb;
+ unsigned int mh_intf_cfg1;
+ unsigned int mh_intf_cfg2;
+ uint32_t mpu_base;
+ int mpu_range;
+};
+
+struct kgsl_event {
+ struct kgsl_context *context;
+ uint32_t timestamp;
+ void (*func)(struct kgsl_device *, void *, u32, u32);
+ void *priv;
+ struct list_head list;
+ void *owner;
+};
+
+struct kgsl_gpubusy {
+ s64 busy;
+ s64 total;
+};
+
+struct kgsl_device {
+ struct device *dev;
+ const char *name;
+ unsigned int ver_major;
+ unsigned int ver_minor;
+ uint32_t flags;
+ enum kgsl_deviceid id;
+ unsigned long reg_phys;
+ void *reg_virt;
+ unsigned int reg_len;
+ struct kgsl_memdesc memstore;
+ const char *iomemname;
+
+ struct kgsl_mh mh;
+ struct kgsl_mmu mmu;
+ struct completion hwaccess_gate;
+ const struct kgsl_functable *ftbl;
+ struct work_struct idle_check_ws;
+ struct timer_list idle_timer;
+ struct kgsl_pwrctrl pwrctrl;
+ int open_count;
+
+ struct atomic_notifier_head ts_notifier_list;
+ struct mutex mutex;
+ uint32_t state;
+ uint32_t requested_state;
+
+ unsigned int active_cnt;
+ struct completion suspend_gate;
+
+ wait_queue_head_t wait_queue;
+ struct workqueue_struct *work_queue;
+ struct device *parentdev;
+ struct completion recovery_gate;
+ struct dentry *d_debugfs;
+ struct idr context_idr;
+ struct early_suspend display_off;
+
+ void *snapshot;
+ int snapshot_maxsize;
+ int snapshot_size;
+ u32 snapshot_timestamp;
+ int snapshot_frozen;
+ int snapshot_no_panic;
+ struct kobject snapshot_kobj;
+
+ struct list_head snapshot_obj_list;
+
+
+ int cmd_log;
+ int ctxt_log;
+ int drv_log;
+ int mem_log;
+ int pwr_log;
+ struct kgsl_pwrscale pwrscale;
+ struct kobject pwrscale_kobj;
+ struct pm_qos_request pm_qos_req_dma;
+ struct work_struct ts_expired_ws;
+ struct list_head events;
+ s64 on_time;
+
+
+ struct kgsl_gpubusy gputime;
+ struct kgsl_gpubusy gputime_in_state[KGSL_MAX_PWRLEVELS];
+#ifdef CONFIG_MSM_KGSL_GPU_USAGE
+ struct kgsl_process_private *current_process_priv;
+#endif
+#if defined(CONFIG_MSM_KGSL_GPU_USAGE_SYSTRACE)
+ int prev_pid;
+#endif
+};
+
+void kgsl_timestamp_expired(struct work_struct *work);
+
+#define KGSL_DEVICE_COMMON_INIT(_dev) \
+ .hwaccess_gate = COMPLETION_INITIALIZER((_dev).hwaccess_gate),\
+ .suspend_gate = COMPLETION_INITIALIZER((_dev).suspend_gate),\
+ .recovery_gate = COMPLETION_INITIALIZER((_dev).recovery_gate),\
+ .ts_notifier_list = ATOMIC_NOTIFIER_INIT((_dev).ts_notifier_list),\
+ .idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\
+ kgsl_idle_check),\
+ .ts_expired_ws = __WORK_INITIALIZER((_dev).ts_expired_ws,\
+ kgsl_timestamp_expired),\
+ .context_idr = IDR_INIT((_dev).context_idr),\
+ .events = LIST_HEAD_INIT((_dev).events),\
+ .wait_queue = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).wait_queue),\
+ .mutex = __MUTEX_INITIALIZER((_dev).mutex),\
+ .state = KGSL_STATE_INIT,\
+ .ver_major = DRIVER_VERSION_MAJOR,\
+ .ver_minor = DRIVER_VERSION_MINOR
+
+struct kgsl_context {
+ struct kref refcount;
+ uint32_t id;
+
+
+ struct kgsl_device_private *dev_priv;
+
+
+ void *devctxt;
+ unsigned int reset_status;
+};
+
+struct kgsl_process_private {
+ unsigned int refcnt;
+ pid_t pid;
+ spinlock_t mem_lock;
+ struct rb_root mem_rb;
+ struct kgsl_pagetable *pagetable;
+ struct list_head list;
+ struct kobject kobj;
+
+ struct {
+ unsigned int cur;
+ unsigned int max;
+ } stats[KGSL_MEM_ENTRY_MAX];
+#ifdef CONFIG_MSM_KGSL_GPU_USAGE
+ struct kgsl_gpubusy gputime;
+ struct kgsl_gpubusy gputime_in_state[KGSL_MAX_PWRLEVELS];
+#endif
+};
+
+struct kgsl_device_private {
+ struct kgsl_device *device;
+ struct kgsl_process_private *process_priv;
+};
+
+struct kgsl_power_stats {
+ s64 total_time;
+ s64 busy_time;
+};
+
+struct kgsl_device *kgsl_get_device(int dev_idx);
+
+static inline void kgsl_process_add_stats(struct kgsl_process_private *priv,
+ unsigned int type, size_t size)
+{
+ priv->stats[type].cur += size;
+ if (priv->stats[type].max < priv->stats[type].cur)
+ priv->stats[type].max = priv->stats[type].cur;
+}
+
+static inline void kgsl_process_sub_stats(struct kgsl_process_private *priv,
+ unsigned int type, size_t size)
+{
+ priv->stats[type].cur -= size;
+}
+
+static inline void kgsl_regread(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int *value)
+{
+ device->ftbl->regread(device, offsetwords, value);
+}
+
+static inline void kgsl_regwrite(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int value)
+{
+ device->ftbl->regwrite(device, offsetwords, value);
+}
+
+static inline int kgsl_idle(struct kgsl_device *device)
+{
+ return device->ftbl->idle(device);
+}
+
+static inline unsigned int kgsl_gpuid(struct kgsl_device *device,
+ unsigned int *chipid)
+{
+ return device->ftbl->gpuid(device, chipid);
+}
+
+static inline unsigned int kgsl_readtimestamp(struct kgsl_device *device,
+ struct kgsl_context *context,
+ enum kgsl_timestamp_type type)
+{
+ return device->ftbl->readtimestamp(device, context, type);
+}
+
+static inline int kgsl_create_device_sysfs_files(struct device *root,
+ const struct device_attribute **list)
+{
+ int ret = 0, i;
+ for (i = 0; list[i] != NULL; i++)
+ ret |= device_create_file(root, list[i]);
+ return ret;
+}
+
+static inline void kgsl_remove_device_sysfs_files(struct device *root,
+ const struct device_attribute **list)
+{
+ int i;
+ for (i = 0; list[i] != NULL; i++)
+ device_remove_file(root, list[i]);
+}
+
+static inline struct kgsl_mmu *
+kgsl_get_mmu(struct kgsl_device *device)
+{
+ return (struct kgsl_mmu *) (device ? &device->mmu : NULL);
+}
+
+static inline struct kgsl_device *kgsl_device_from_dev(struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+ if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->dev == dev)
+ return kgsl_driver.devp[i];
+ }
+
+ return NULL;
+}
+
+static inline int kgsl_create_device_workqueue(struct kgsl_device *device)
+{
+ device->work_queue = create_singlethread_workqueue(device->name);
+ if (!device->work_queue) {
+ KGSL_DRV_ERR(device,
+ "create_singlethread_workqueue(%s) failed\n",
+ device->name);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline struct kgsl_context *
+kgsl_find_context(struct kgsl_device_private *dev_priv, uint32_t id)
+{
+ struct kgsl_context *ctxt =
+ idr_find(&dev_priv->device->context_idr, id);
+
+
+ return (ctxt && ctxt->dev_priv == dev_priv) ? ctxt : NULL;
+}
+
+int kgsl_check_timestamp(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int timestamp);
+
+int kgsl_register_ts_notifier(struct kgsl_device *device,
+ struct notifier_block *nb);
+
+int kgsl_unregister_ts_notifier(struct kgsl_device *device,
+ struct notifier_block *nb);
+
+int kgsl_device_platform_probe(struct kgsl_device *device);
+
+void kgsl_device_platform_remove(struct kgsl_device *device);
+
+const char *kgsl_pwrstate_to_str(unsigned int state);
+
+int kgsl_device_snapshot_init(struct kgsl_device *device);
+int kgsl_device_snapshot(struct kgsl_device *device, int hang);
+void kgsl_device_snapshot_close(struct kgsl_device *device);
+
+static inline struct kgsl_device_platform_data *
+kgsl_device_get_drvdata(struct kgsl_device *dev)
+{
+ struct platform_device *pdev =
+ container_of(dev->parentdev, struct platform_device, dev);
+
+ return pdev->dev.platform_data;
+}
+
+static inline void
+kgsl_context_get(struct kgsl_context *context)
+{
+ kref_get(&context->refcount);
+}
+
+void kgsl_context_destroy(struct kref *kref);
+
+static inline void
+kgsl_context_put(struct kgsl_context *context)
+{
+ kref_put(&context->refcount, kgsl_context_destroy);
+}
+
+#endif
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
new file mode 100644
index 0000000..33f242b
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -0,0 +1,682 @@
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "kgsl.h"
+#include "kgsl_mmu.h"
+#include "kgsl_device.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_trace.h"
+
+#define KGSL_PAGETABLE_SIZE \
+ ALIGN(KGSL_PAGETABLE_ENTRIES(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE) * \
+ KGSL_PAGETABLE_ENTRY_SIZE, PAGE_SIZE)
+
+static ssize_t
+sysfs_show_ptpool_entries(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
+ kgsl_driver.ptpool;
+ return snprintf(buf, PAGE_SIZE, "%d\n", pool->entries);
+}
+
+static ssize_t
+sysfs_show_ptpool_min(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
+ kgsl_driver.ptpool;
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pool->static_entries);
+}
+
+static ssize_t
+sysfs_show_ptpool_chunks(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
+ kgsl_driver.ptpool;
+ return snprintf(buf, PAGE_SIZE, "%d\n", pool->chunks);
+}
+
+static ssize_t
+sysfs_show_ptpool_ptsize(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
+ kgsl_driver.ptpool;
+ return snprintf(buf, PAGE_SIZE, "%d\n", pool->ptsize);
+}
+
+static struct kobj_attribute attr_ptpool_entries = {
+ .attr = { .name = "ptpool_entries", .mode = 0444 },
+ .show = sysfs_show_ptpool_entries,
+ .store = NULL,
+};
+
+static struct kobj_attribute attr_ptpool_min = {
+ .attr = { .name = "ptpool_min", .mode = 0444 },
+ .show = sysfs_show_ptpool_min,
+ .store = NULL,
+};
+
+static struct kobj_attribute attr_ptpool_chunks = {
+ .attr = { .name = "ptpool_chunks", .mode = 0444 },
+ .show = sysfs_show_ptpool_chunks,
+ .store = NULL,
+};
+
+static struct kobj_attribute attr_ptpool_ptsize = {
+ .attr = { .name = "ptpool_ptsize", .mode = 0444 },
+ .show = sysfs_show_ptpool_ptsize,
+ .store = NULL,
+};
+
+static struct attribute *ptpool_attrs[] = {
+ &attr_ptpool_entries.attr,
+ &attr_ptpool_min.attr,
+ &attr_ptpool_chunks.attr,
+ &attr_ptpool_ptsize.attr,
+ NULL,
+};
+
+static struct attribute_group ptpool_attr_group = {
+ .attrs = ptpool_attrs,
+};
+
+static int
+_kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
+{
+ struct kgsl_ptpool_chunk *chunk;
+ size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
+
+ BUG_ON(count == 0);
+
+ if (get_order(size) >= MAX_ORDER) {
+ KGSL_CORE_ERR("ptpool allocation is too big: %d\n", size);
+ return -EINVAL;
+ }
+
+ chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+ if (chunk == NULL) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
+ return -ENOMEM;
+ }
+
+ chunk->size = size;
+ chunk->count = count;
+ chunk->dynamic = dynamic;
+
+ chunk->data = dma_alloc_coherent(NULL, size,
+ &chunk->phys, GFP_KERNEL);
+
+ if (chunk->data == NULL) {
+ KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
+ goto err;
+ }
+
+ chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
+
+ if (chunk->bitmap == NULL) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n",
+ BITS_TO_LONGS(count) * 4);
+ goto err_dma;
+ }
+
+ list_add_tail(&chunk->list, &pool->list);
+
+ pool->chunks++;
+ pool->entries += count;
+
+ if (!dynamic)
+ pool->static_entries += count;
+
+ return 0;
+
+err_dma:
+ dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
+err:
+ kfree(chunk);
+ return -ENOMEM;
+}
+
+static void *
+_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr)
+{
+ struct kgsl_ptpool_chunk *chunk;
+
+ list_for_each_entry(chunk, &pool->list, list) {
+ int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
+
+ if (bit >= chunk->count)
+ continue;
+
+ set_bit(bit, chunk->bitmap);
+ *physaddr = chunk->phys + (bit * pool->ptsize);
+
+ return chunk->data + (bit * pool->ptsize);
+ }
+
+ return NULL;
+}
+
+
+static int
+kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
+{
+ int ret = 0;
+ BUG_ON(count == 0);
+
+ mutex_lock(&pool->lock);
+
+
+ while (count) {
+ int entries = ((count * pool->ptsize) > SZ_4M) ?
+ SZ_4M / pool->ptsize : count;
+
+
+ ret = _kgsl_ptpool_add_entries(pool, entries, 0);
+ if (ret)
+ break;
+
+ count -= entries;
+ }
+
+ mutex_unlock(&pool->lock);
+ return ret;
+}
+
+
+static void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool,
+ unsigned int *physaddr)
+{
+ void *addr = NULL;
+ int ret;
+
+ mutex_lock(&pool->lock);
+ addr = _kgsl_ptpool_get_entry(pool, physaddr);
+ if (addr)
+ goto done;
+
+
+ ret = _kgsl_ptpool_add_entries(pool, 1, 1);
+
+ if (ret)
+ goto done;
+
+ addr = _kgsl_ptpool_get_entry(pool, physaddr);
+done:
+ mutex_unlock(&pool->lock);
+ return addr;
+}
+
+static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
+{
+ list_del(&chunk->list);
+
+ if (chunk->data)
+ dma_free_coherent(NULL, chunk->size, chunk->data,
+ chunk->phys);
+ kfree(chunk->bitmap);
+ kfree(chunk);
+}
+
+
+static void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
+{
+ struct kgsl_ptpool_chunk *chunk, *tmp;
+
+ if (pool == NULL || addr == NULL)
+ return;
+
+ mutex_lock(&pool->lock);
+ list_for_each_entry_safe(chunk, tmp, &pool->list, list) {
+ if (addr >= chunk->data &&
+ addr < chunk->data + chunk->size) {
+ int bit = ((unsigned long) (addr - chunk->data)) /
+ pool->ptsize;
+
+ clear_bit(bit, chunk->bitmap);
+ memset(addr, 0, pool->ptsize);
+
+ if (chunk->dynamic &&
+ bitmap_empty(chunk->bitmap, chunk->count))
+ _kgsl_ptpool_rm_chunk(chunk);
+
+ break;
+ }
+ }
+
+ mutex_unlock(&pool->lock);
+}
+
+void kgsl_gpummu_ptpool_destroy(void *ptpool)
+{
+ struct kgsl_ptpool *pool = (struct kgsl_ptpool *)ptpool;
+ struct kgsl_ptpool_chunk *chunk, *tmp;
+
+ if (pool == NULL)
+ return;
+
+ mutex_lock(&pool->lock);
+ list_for_each_entry_safe(chunk, tmp, &pool->list, list)
+ _kgsl_ptpool_rm_chunk(chunk);
+ mutex_unlock(&pool->lock);
+
+ kfree(pool);
+}
+
+void *kgsl_gpummu_ptpool_init(int entries)
+{
+ int ptsize = KGSL_PAGETABLE_SIZE;
+ struct kgsl_ptpool *pool;
+ int ret = 0;
+
+ pool = kzalloc(sizeof(struct kgsl_ptpool), GFP_KERNEL);
+ if (!pool) {
+ KGSL_CORE_ERR("Failed to allocate memory "
+ "for ptpool\n");
+ return NULL;
+ }
+
+ pool->ptsize = ptsize;
+ mutex_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->list);
+
+ if (entries) {
+ ret = kgsl_ptpool_add(pool, entries);
+ if (ret)
+ goto err_ptpool_remove;
+ }
+
+ ret = sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
+ if (ret) {
+ KGSL_CORE_ERR("sysfs_create_group failed for ptpool "
+ "statistics: %d\n", ret);
+ goto err_ptpool_remove;
+ }
+ return (void *)pool;
+
+err_ptpool_remove:
+ kgsl_gpummu_ptpool_destroy(pool);
+ return NULL;
+}
+
+int kgsl_gpummu_pt_equal(struct kgsl_pagetable *pt,
+ unsigned int pt_base)
+{
+ struct kgsl_gpummu_pt *gpummu_pt = pt ? pt->priv : NULL;
+ return gpummu_pt && pt_base && (gpummu_pt->base.gpuaddr == pt_base);
+}
+
+void kgsl_gpummu_destroy_pagetable(void *mmu_specific_pt)
+{
+ struct kgsl_gpummu_pt *gpummu_pt = (struct kgsl_gpummu_pt *)
+ mmu_specific_pt;
+ kgsl_ptpool_free((struct kgsl_ptpool *)kgsl_driver.ptpool,
+ gpummu_pt->base.hostptr);
+
+ kgsl_driver.stats.coherent -= KGSL_PAGETABLE_SIZE;
+
+ kfree(gpummu_pt->tlbflushfilter.base);
+
+ kfree(gpummu_pt);
+}
+
+static inline uint32_t
+kgsl_pt_entry_get(unsigned int va_base, uint32_t va)
+{
+ return (va - va_base) >> PAGE_SHIFT;
+}
+
+static inline void
+kgsl_pt_map_set(struct kgsl_gpummu_pt *pt, uint32_t pte, uint32_t val)
+{
+ uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
+ BUG_ON(pte*sizeof(uint32_t) >= pt->base.size);
+ baseptr[pte] = val;
+}
+
+static inline uint32_t
+kgsl_pt_map_get(struct kgsl_gpummu_pt *pt, uint32_t pte)
+{
+ uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
+ BUG_ON(pte*sizeof(uint32_t) >= pt->base.size);
+ return baseptr[pte] & GSL_PT_PAGE_ADDR_MASK;
+}
+
+static void kgsl_gpummu_pagefault(struct kgsl_mmu *mmu)
+{
+ unsigned int reg;
+ unsigned int ptbase;
+
+ kgsl_regread(mmu->device, MH_MMU_PAGE_FAULT, ®);
+ kgsl_regread(mmu->device, MH_MMU_PT_BASE, &ptbase);
+
+ KGSL_MEM_CRIT(mmu->device,
+ "mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
+ reg & ~(PAGE_SIZE - 1),
+ kgsl_mmu_get_ptname_from_ptbase(ptbase),
+ reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
+ trace_kgsl_mmu_pagefault(mmu->device, reg & ~(PAGE_SIZE - 1),
+ kgsl_mmu_get_ptname_from_ptbase(ptbase),
+ reg & 0x02 ? "WRITE" : "READ");
+}
+
+static void *kgsl_gpummu_create_pagetable(void)
+{
+ struct kgsl_gpummu_pt *gpummu_pt;
+
+ gpummu_pt = kzalloc(sizeof(struct kgsl_gpummu_pt),
+ GFP_KERNEL);
+ if (!gpummu_pt)
+ return NULL;
+
+ gpummu_pt->last_superpte = 0;
+
+ gpummu_pt->tlbflushfilter.size = (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE /
+ (PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
+ gpummu_pt->tlbflushfilter.base = (unsigned int *)
+ kzalloc(gpummu_pt->tlbflushfilter.size, GFP_KERNEL);
+ if (!gpummu_pt->tlbflushfilter.base) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n",
+ gpummu_pt->tlbflushfilter.size);
+ goto err_free_gpummu;
+ }
+ GSL_TLBFLUSH_FILTER_RESET();
+
+ gpummu_pt->base.hostptr = kgsl_ptpool_alloc((struct kgsl_ptpool *)
+ kgsl_driver.ptpool,
+ &gpummu_pt->base.physaddr);
+
+ if (gpummu_pt->base.hostptr == NULL)
+ goto err_flushfilter;
+
+
+ KGSL_STATS_ADD(KGSL_PAGETABLE_SIZE, kgsl_driver.stats.coherent,
+ kgsl_driver.stats.coherent_max);
+
+ gpummu_pt->base.gpuaddr = gpummu_pt->base.physaddr;
+ gpummu_pt->base.size = KGSL_PAGETABLE_SIZE;
+
+ return (void *)gpummu_pt;
+
+err_flushfilter:
+ kfree(gpummu_pt->tlbflushfilter.base);
+err_free_gpummu:
+ kfree(gpummu_pt);
+
+ return NULL;
+}
+
+static void kgsl_gpummu_default_setstate(struct kgsl_mmu *mmu,
+ uint32_t flags)
+{
+ struct kgsl_gpummu_pt *gpummu_pt;
+ if (!kgsl_mmu_enabled())
+ return;
+
+ if (flags & KGSL_MMUFLAGS_PTUPDATE) {
+ kgsl_idle(mmu->device);
+ gpummu_pt = mmu->hwpagetable->priv;
+ kgsl_regwrite(mmu->device, MH_MMU_PT_BASE,
+ gpummu_pt->base.gpuaddr);
+ }
+
+ if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
+
+ kgsl_regwrite(mmu->device, MH_MMU_INVALIDATE, 0x00000003);
+ }
+}
+
+static void kgsl_gpummu_setstate(struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pagetable,
+ unsigned int context_id)
+{
+ if (mmu->flags & KGSL_FLAGS_STARTED) {
+ if (mmu->hwpagetable != pagetable) {
+ mmu->hwpagetable = pagetable;
+ kgsl_mmu_pt_get_flags(pagetable, mmu->device->id);
+
+
+ kgsl_setstate(mmu, context_id, KGSL_MMUFLAGS_TLBFLUSH |
+ KGSL_MMUFLAGS_PTUPDATE);
+ }
+ }
+}
+
+static int kgsl_gpummu_init(struct kgsl_mmu *mmu)
+{
+ int status = 0;
+
+
+ if ((mmu->config & ~0x1) > 0) {
+
+ if (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1)) {
+ KGSL_CORE_ERR("Invalid pagetable size requested "
+ "for GPUMMU: %x\n", CONFIG_MSM_KGSL_PAGE_TABLE_SIZE);
+ return -EINVAL;
+ }
+ }
+
+ dev_info(mmu->device->dev, "|%s| MMU type set for device is GPUMMU\n",
+ __func__);
+ return status;
+}
+
+static int kgsl_gpummu_start(struct kgsl_mmu *mmu)
+{
+
+ struct kgsl_device *device = mmu->device;
+ struct kgsl_gpummu_pt *gpummu_pt;
+
+ if (mmu->flags & KGSL_FLAGS_STARTED)
+ return 0;
+
+
+ if ((mmu->config & 0x1) == 0)
+ return 0;
+
+
+ kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
+
+
+ kgsl_idle(device);
+
+
+ kgsl_regwrite(device, MH_INTERRUPT_MASK,
+ GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
+
+ kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
+ mmu->setstate_memory.size);
+
+ kgsl_regwrite(device, MH_MMU_TRAN_ERROR,
+ mmu->setstate_memory.physaddr + 32);
+
+ if (mmu->defaultpagetable == NULL)
+ mmu->defaultpagetable =
+ kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
+
+
+ if (mmu->defaultpagetable == NULL)
+ return -ENOMEM;
+
+ mmu->hwpagetable = mmu->defaultpagetable;
+ gpummu_pt = mmu->hwpagetable->priv;
+ kgsl_regwrite(mmu->device, MH_MMU_PT_BASE,
+ gpummu_pt->base.gpuaddr);
+ kgsl_regwrite(mmu->device, MH_MMU_VA_RANGE,
+ (KGSL_PAGETABLE_BASE |
+ (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE >> 16)));
+ kgsl_setstate(mmu, KGSL_MEMSTORE_GLOBAL, KGSL_MMUFLAGS_TLBFLUSH);
+ mmu->flags |= KGSL_FLAGS_STARTED;
+
+ return 0;
+}
+
+static int
+kgsl_gpummu_unmap(void *mmu_specific_pt,
+ struct kgsl_memdesc *memdesc,
+ unsigned int *tlb_flags)
+{
+ unsigned int numpages;
+ unsigned int pte, ptefirst, ptelast, superpte;
+ unsigned int range = kgsl_sg_size(memdesc->sg, memdesc->sglen);
+ struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
+
+
+ unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
+
+ numpages = (range >> PAGE_SHIFT);
+ if (range & (PAGE_SIZE - 1))
+ numpages++;
+
+ ptefirst = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, gpuaddr);
+ ptelast = ptefirst + numpages;
+
+ superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
+ GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
+ for (pte = ptefirst; pte < ptelast; pte++) {
+#ifdef VERBOSE_DEBUG
+
+ if (!kgsl_pt_map_get(gpummu_pt, pte))
+ KGSL_CORE_ERR("pt entry %x is already "
+ "unmapped for pagetable %p\n", pte, gpummu_pt);
+#endif
+ kgsl_pt_map_set(gpummu_pt, pte, GSL_PT_PAGE_DIRTY);
+ superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
+ if (pte == superpte)
+ GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
+ GSL_PT_SUPER_PTE);
+ }
+
+
+ wmb();
+
+ return 0;
+}
+
+#define SUPERPTE_IS_DIRTY(_p) \
+(((_p) & (GSL_PT_SUPER_PTE - 1)) == 0 && \
+GSL_TLBFLUSH_FILTER_ISDIRTY((_p) / GSL_PT_SUPER_PTE))
+
+static int
+kgsl_gpummu_map(void *mmu_specific_pt,
+ struct kgsl_memdesc *memdesc,
+ unsigned int protflags,
+ unsigned int *tlb_flags)
+{
+ unsigned int pte;
+ struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
+ struct scatterlist *s;
+ int flushtlb = 0;
+ int i;
+
+ pte = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, memdesc->gpuaddr);
+
+
+ if (pte & (GSL_PT_SUPER_PTE - 1))
+ flushtlb = 1;
+
+ for_each_sg(memdesc->sg, s, memdesc->sglen, i) {
+ unsigned int paddr = kgsl_get_sg_pa(s);
+ unsigned int j;
+
+
+ for (j = paddr; j < paddr + s->length; pte++, j += PAGE_SIZE) {
+ if (SUPERPTE_IS_DIRTY(pte))
+ flushtlb = 1;
+ kgsl_pt_map_set(gpummu_pt, pte, j | protflags);
+ }
+ }
+
+
+ if ((pte + 1) & (GSL_PT_SUPER_PTE - 1))
+ flushtlb = 1;
+
+ wmb();
+
+ if (flushtlb) {
+
+ *tlb_flags = UINT_MAX;
+ GSL_TLBFLUSH_FILTER_RESET();
+ }
+
+ return 0;
+}
+
+static void kgsl_gpummu_stop(struct kgsl_mmu *mmu)
+{
+ kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000000);
+ mmu->flags &= ~KGSL_FLAGS_STARTED;
+}
+
+static int kgsl_gpummu_close(struct kgsl_mmu *mmu)
+{
+ if (mmu->setstate_memory.gpuaddr)
+ kgsl_sharedmem_free(&mmu->setstate_memory);
+
+ if (mmu->defaultpagetable)
+ kgsl_mmu_putpagetable(mmu->defaultpagetable);
+
+ return 0;
+}
+
+static unsigned int
+kgsl_gpummu_get_current_ptbase(struct kgsl_mmu *mmu)
+{
+ unsigned int ptbase;
+ kgsl_regread(mmu->device, MH_MMU_PT_BASE, &ptbase);
+ return ptbase;
+}
+
+static unsigned int
+kgsl_gpummu_pt_get_base_addr(struct kgsl_pagetable *pt)
+{
+ struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
+ return gpummu_pt->base.gpuaddr;
+}
+
+struct kgsl_mmu_ops gpummu_ops = {
+ .mmu_init = kgsl_gpummu_init,
+ .mmu_close = kgsl_gpummu_close,
+ .mmu_start = kgsl_gpummu_start,
+ .mmu_stop = kgsl_gpummu_stop,
+ .mmu_setstate = kgsl_gpummu_setstate,
+ .mmu_device_setstate = kgsl_gpummu_default_setstate,
+ .mmu_pagefault = kgsl_gpummu_pagefault,
+ .mmu_get_current_ptbase = kgsl_gpummu_get_current_ptbase,
+ .mmu_enable_clk = NULL,
+ .mmu_disable_clk_on_ts = NULL,
+ .mmu_get_pt_lsb = NULL,
+ .mmu_get_reg_map_desc = NULL,
+};
+
+struct kgsl_mmu_pt_ops gpummu_pt_ops = {
+ .mmu_map = kgsl_gpummu_map,
+ .mmu_unmap = kgsl_gpummu_unmap,
+ .mmu_create_pagetable = kgsl_gpummu_create_pagetable,
+ .mmu_destroy_pagetable = kgsl_gpummu_destroy_pagetable,
+ .mmu_pt_equal = kgsl_gpummu_pt_equal,
+ .mmu_pt_get_base_addr = kgsl_gpummu_pt_get_base_addr,
+};
diff --git a/drivers/gpu/msm/kgsl_gpummu.h b/drivers/gpu/msm/kgsl_gpummu.h
new file mode 100644
index 0000000..d49a430
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_gpummu.h
@@ -0,0 +1,77 @@
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __KGSL_GPUMMU_H
+#define __KGSL_GPUMMU_H
+
+#define GSL_PT_PAGE_BITS_MASK 0x00000007
+#define GSL_PT_PAGE_ADDR_MASK PAGE_MASK
+
+#define GSL_MMU_INT_MASK \
+ (MH_INTERRUPT_MASK__AXI_READ_ERROR | \
+ MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
+
+#define GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS (sizeof(unsigned char) * 8)
+#define GSL_TLBFLUSH_FILTER_GET(superpte) \
+ (*((unsigned char *) \
+ (((unsigned int)gpummu_pt->tlbflushfilter.base) \
+ + (superpte / GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))))
+#define GSL_TLBFLUSH_FILTER_SETDIRTY(superpte) \
+ (GSL_TLBFLUSH_FILTER_GET((superpte)) |= 1 << \
+ (superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))
+#define GSL_TLBFLUSH_FILTER_ISDIRTY(superpte) \
+ (GSL_TLBFLUSH_FILTER_GET((superpte)) & \
+ (1 << (superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS)))
+#define GSL_TLBFLUSH_FILTER_RESET() memset(gpummu_pt->tlbflushfilter.base,\
+ 0, gpummu_pt->tlbflushfilter.size)
+
+extern struct kgsl_mmu_ops gpummu_ops;
+extern struct kgsl_mmu_pt_ops gpummu_pt_ops;
+
+struct kgsl_tlbflushfilter {
+ unsigned int *base;
+ unsigned int size;
+};
+
+struct kgsl_gpummu_pt {
+ struct kgsl_memdesc base;
+ unsigned int last_superpte;
+
+ struct kgsl_tlbflushfilter tlbflushfilter;
+};
+
+struct kgsl_ptpool_chunk {
+ size_t size;
+ unsigned int count;
+ int dynamic;
+
+ void *data;
+ unsigned int phys;
+
+ unsigned long *bitmap;
+ struct list_head list;
+};
+
+struct kgsl_ptpool {
+ size_t ptsize;
+ struct mutex lock;
+ struct list_head list;
+ int entries;
+ int static_entries;
+ int chunks;
+};
+
+void *kgsl_gpummu_ptpool_init(int entries);
+void kgsl_gpummu_ptpool_destroy(void *ptpool);
+
+#endif
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
new file mode 100644
index 0000000..dc517ae
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -0,0 +1,1041 @@
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/msm_kgsl.h>
+#include <mach/socinfo.h>
+#include <mach/msm_iomap.h>
+#include <mach/board.h>
+#include <stddef.h>
+
+#include "kgsl.h"
+#include "kgsl_device.h"
+#include "kgsl_mmu.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_iommu.h"
+#include "adreno_pm4types.h"
+#include "adreno.h"
+#include "kgsl_trace.h"
+#include "z180.h"
+
+
+struct remote_iommu_petersons_spinlock kgsl_iommu_sync_lock_vars;
+
+static struct kgsl_iommu_unit *get_iommu_unit(struct device *dev)
+{
+ int i, j, k;
+
+ for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+ struct kgsl_mmu *mmu;
+ struct kgsl_iommu *iommu;
+
+ if (kgsl_driver.devp[i] == NULL)
+ continue;
+
+ mmu = kgsl_get_mmu(kgsl_driver.devp[i]);
+ if (mmu == NULL || mmu->priv == NULL)
+ continue;
+
+ iommu = mmu->priv;
+
+ for (j = 0; j < iommu->unit_count; j++) {
+ struct kgsl_iommu_unit *iommu_unit =
+ &iommu->iommu_units[j];
+ for (k = 0; k < iommu_unit->dev_count; k++) {
+ if (iommu_unit->dev[k].dev == dev)
+ return iommu_unit;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static struct kgsl_iommu_device *get_iommu_device(struct kgsl_iommu_unit *unit,
+ struct device *dev)
+{
+ int k;
+
+ for (k = 0; unit && k < unit->dev_count; k++) {
+ if (unit->dev[k].dev == dev)
+ return &(unit->dev[k]);
+ }
+
+ return NULL;
+}
+
+static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long addr, int flags)
+{
+ struct kgsl_iommu_unit *iommu_unit = get_iommu_unit(dev);
+ struct kgsl_iommu_device *iommu_dev = get_iommu_device(iommu_unit, dev);
+ unsigned int ptbase, fsr;
+ static unsigned long last_pagefault_jiffies;
+ static int last_pid;
+ int current_pid;
+ unsigned long wait_time_jiff = 0;
+
+ if (!iommu_dev) {
+ KGSL_CORE_ERR("Invalid IOMMU device %p\n", dev);
+ return -ENOSYS;
+ }
+
+ wait_time_jiff = last_pagefault_jiffies + msecs_to_jiffies(500);
+ last_pagefault_jiffies = jiffies;
+
+ ptbase = KGSL_IOMMU_GET_IOMMU_REG(iommu_unit->reg_map.hostptr,
+ iommu_dev->ctx_id, TTBR0);
+ current_pid = kgsl_mmu_get_ptname_from_ptbase(ptbase);
+
+ if ((last_pid != current_pid) ||
+ (time_after(jiffies, wait_time_jiff))
+ ) {
+ fsr = KGSL_IOMMU_GET_IOMMU_REG(iommu_unit->reg_map.hostptr,
+ iommu_dev->ctx_id, FSR);
+
+ KGSL_MEM_CRIT(iommu_dev->kgsldev,
+ "GPU PAGE FAULT: addr = %lX pid = %d\n",
+ addr, kgsl_mmu_get_ptname_from_ptbase(ptbase));
+ KGSL_MEM_CRIT(iommu_dev->kgsldev, "context = %d FSR = %X\n",
+ iommu_dev->ctx_id, fsr);
+
+ last_pid = current_pid;
+ }
+
+ trace_kgsl_mmu_pagefault(iommu_dev->kgsldev, addr,
+ kgsl_mmu_get_ptname_from_ptbase(ptbase), 0);
+
+ return 0;
+}
+
+static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ int i, j;
+
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+ for (j = 0; j < iommu_unit->dev_count; j++) {
+ if (!iommu_unit->dev[j].clk_enabled)
+ continue;
+ iommu_drvdata = dev_get_drvdata(
+ iommu_unit->dev[j].dev->parent);
+ if (iommu_drvdata->clk)
+ clk_disable_unprepare(iommu_drvdata->clk);
+ clk_disable_unprepare(iommu_drvdata->pclk);
+ iommu_unit->dev[j].clk_enabled = false;
+ }
+ }
+}
+
+static void kgsl_iommu_clk_disable_event(struct kgsl_device *device, void *data,
+ unsigned int id, unsigned int ts)
+{
+ struct kgsl_mmu *mmu = data;
+ struct kgsl_iommu *iommu = mmu->priv;
+
+ if (!iommu->clk_event_queued) {
+ if (0 > timestamp_cmp(ts, iommu->iommu_last_cmd_ts))
+ KGSL_DRV_ERR(device,
+ "IOMMU disable clock event being cancelled, "
+ "iommu_last_cmd_ts: %x, retired ts: %x\n",
+ iommu->iommu_last_cmd_ts, ts);
+ return;
+ }
+
+ if (0 <= timestamp_cmp(ts, iommu->iommu_last_cmd_ts)) {
+ kgsl_iommu_disable_clk(mmu);
+ iommu->clk_event_queued = false;
+ } else {
+ if (kgsl_add_event(device, id, iommu->iommu_last_cmd_ts,
+ kgsl_iommu_clk_disable_event, mmu, mmu)) {
+ KGSL_DRV_ERR(device,
+ "Failed to add IOMMU disable clk event\n");
+ iommu->clk_event_queued = false;
+ }
+ }
+}
+
+static void
+kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu, unsigned int ts,
+ bool ts_valid)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+
+ if (iommu->clk_event_queued) {
+ if (ts_valid && (0 <
+ timestamp_cmp(ts, iommu->iommu_last_cmd_ts)))
+ iommu->iommu_last_cmd_ts = ts;
+ } else {
+ if (ts_valid) {
+ iommu->iommu_last_cmd_ts = ts;
+ iommu->clk_event_queued = true;
+ if (kgsl_add_event(mmu->device, KGSL_MEMSTORE_GLOBAL,
+ ts, kgsl_iommu_clk_disable_event, mmu, mmu)) {
+ KGSL_DRV_ERR(mmu->device,
+ "Failed to add IOMMU disable clk event\n");
+ iommu->clk_event_queued = false;
+ }
+ } else {
+ kgsl_iommu_disable_clk(mmu);
+ }
+ }
+}
+
+static int kgsl_iommu_enable_clk(struct kgsl_mmu *mmu,
+ int ctx_id)
+{
+ int ret = 0;
+ int i, j;
+ struct kgsl_iommu *iommu = mmu->priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+ for (j = 0; j < iommu_unit->dev_count; j++) {
+ if (iommu_unit->dev[j].clk_enabled ||
+ ctx_id != iommu_unit->dev[j].ctx_id)
+ continue;
+ iommu_drvdata =
+ dev_get_drvdata(iommu_unit->dev[j].dev->parent);
+ ret = clk_prepare_enable(iommu_drvdata->pclk);
+ if (ret)
+ goto done;
+ if (iommu_drvdata->clk) {
+ ret = clk_prepare_enable(iommu_drvdata->clk);
+ if (ret) {
+ clk_disable_unprepare(
+ iommu_drvdata->pclk);
+ goto done;
+ }
+ }
+ iommu_unit->dev[j].clk_enabled = true;
+ }
+ }
+done:
+ if (ret)
+ kgsl_iommu_disable_clk(mmu);
+ return ret;
+}
+
+static int kgsl_iommu_pt_equal(struct kgsl_pagetable *pt,
+ unsigned int pt_base)
+{
+ struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
+ unsigned int domain_ptbase = iommu_pt ?
+ iommu_get_pt_base_addr(iommu_pt->domain) : 0;
+
+ domain_ptbase &= (KGSL_IOMMU_TTBR0_PA_MASK <<
+ KGSL_IOMMU_TTBR0_PA_SHIFT);
+ pt_base &= (KGSL_IOMMU_TTBR0_PA_MASK <<
+ KGSL_IOMMU_TTBR0_PA_SHIFT);
+ return domain_ptbase && pt_base &&
+ (domain_ptbase == pt_base);
+}
+
+static void kgsl_iommu_destroy_pagetable(void *mmu_specific_pt)
+{
+ struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;
+ if (iommu_pt->domain)
+ iommu_domain_free(iommu_pt->domain);
+ kfree(iommu_pt);
+}
+
+void *kgsl_iommu_create_pagetable(void)
+{
+ struct kgsl_iommu_pt *iommu_pt;
+
+ iommu_pt = kzalloc(sizeof(struct kgsl_iommu_pt), GFP_KERNEL);
+ if (!iommu_pt) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n",
+ sizeof(struct kgsl_iommu_pt));
+ return NULL;
+ }
+ iommu_pt->domain = iommu_domain_alloc(&platform_bus_type,
+ MSM_IOMMU_DOMAIN_PT_CACHEABLE);
+ if (!iommu_pt->domain) {
+ KGSL_CORE_ERR("Failed to create iommu domain\n");
+ kfree(iommu_pt);
+ return NULL;
+ } else {
+ iommu_set_fault_handler(iommu_pt->domain,
+ kgsl_iommu_fault_handler);
+ }
+
+ return iommu_pt;
+}
+
+static void kgsl_detach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu_pt *iommu_pt;
+ struct kgsl_iommu *iommu = mmu->priv;
+ int i, j;
+
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+ iommu_pt = mmu->defaultpagetable->priv;
+ for (j = 0; j < iommu_unit->dev_count; j++) {
+ if (mmu->priv_bank_table &&
+ (KGSL_IOMMU_CONTEXT_PRIV == j))
+ iommu_pt = mmu->priv_bank_table->priv;
+ if (iommu_unit->dev[j].attached) {
+ iommu_detach_device(iommu_pt->domain,
+ iommu_unit->dev[j].dev);
+ iommu_unit->dev[j].attached = false;
+ KGSL_MEM_INFO(mmu->device, "iommu %p detached "
+ "from user dev of MMU: %p\n",
+ iommu_pt->domain, mmu);
+ }
+ }
+ }
+}
+
+static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu_pt *iommu_pt;
+ struct kgsl_iommu *iommu = mmu->priv;
+ int i, j, ret = 0;
+
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+ iommu_pt = mmu->defaultpagetable->priv;
+ for (j = 0; j < iommu_unit->dev_count; j++) {
+ if (mmu->priv_bank_table &&
+ (KGSL_IOMMU_CONTEXT_PRIV == j))
+ iommu_pt = mmu->priv_bank_table->priv;
+ if (!iommu_unit->dev[j].attached) {
+ ret = iommu_attach_device(iommu_pt->domain,
+ iommu_unit->dev[j].dev);
+ if (ret) {
+ KGSL_MEM_ERR(mmu->device,
+ "Failed to attach device, err %d\n",
+ ret);
+ goto done;
+ }
+ iommu_unit->dev[j].attached = true;
+ KGSL_MEM_INFO(mmu->device,
+ "iommu pt %p attached to dev %p, ctx_id %d\n",
+ iommu_pt->domain, iommu_unit->dev[j].dev,
+ iommu_unit->dev[j].ctx_id);
+ }
+ }
+ }
+done:
+ return ret;
+}
+
+static int _get_iommu_ctxs(struct kgsl_mmu *mmu,
+ struct kgsl_device_iommu_data *data, unsigned int unit_id)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[unit_id];
+ int i;
+
+ if (data->iommu_ctx_count > KGSL_IOMMU_MAX_DEVS_PER_UNIT) {
+ KGSL_CORE_ERR("Too many iommu devices defined for an "
+ "IOMMU unit\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < data->iommu_ctx_count; i++) {
+ if (!data->iommu_ctxs[i].iommu_ctx_name)
+ continue;
+
+ iommu_unit->dev[iommu_unit->dev_count].dev =
+ msm_iommu_get_ctx(data->iommu_ctxs[i].iommu_ctx_name);
+ if (iommu_unit->dev[iommu_unit->dev_count].dev == NULL) {
+ KGSL_CORE_ERR("Failed to get iommu dev handle for "
+ "device %s\n", data->iommu_ctxs[i].iommu_ctx_name);
+ return -EINVAL;
+ }
+ if (KGSL_IOMMU_CONTEXT_USER != data->iommu_ctxs[i].ctx_id &&
+ KGSL_IOMMU_CONTEXT_PRIV != data->iommu_ctxs[i].ctx_id) {
+ KGSL_CORE_ERR("Invalid context ID defined: %d\n",
+ data->iommu_ctxs[i].ctx_id);
+ return -EINVAL;
+ }
+ iommu_unit->dev[iommu_unit->dev_count].ctx_id =
+ data->iommu_ctxs[i].ctx_id;
+ iommu_unit->dev[iommu_unit->dev_count].kgsldev = mmu->device;
+
+ KGSL_DRV_INFO(mmu->device,
+ "Obtained dev handle %p for iommu context %s\n",
+ iommu_unit->dev[iommu_unit->dev_count].dev,
+ data->iommu_ctxs[i].iommu_ctx_name);
+
+ iommu_unit->dev_count++;
+ }
+
+ return 0;
+}
+
+static int kgsl_iommu_init_sync_lock(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu *iommu = mmu->device->mmu.priv;
+ int status = 0;
+ struct kgsl_pagetable *pagetable = NULL;
+ uint32_t lock_gpu_addr = 0;
+ uint32_t lock_phy_addr = 0;
+ uint32_t page_offset = 0;
+
+ iommu->sync_lock_initialized = 0;
+
+ if (!(mmu->flags & KGSL_MMU_FLAGS_IOMMU_SYNC)) {
+ KGSL_DRV_ERR(mmu->device,
+ "The GPU microcode does not support IOMMUv1 sync opcodes\n");
+ return -ENXIO;
+ }
+
+
+ lock_phy_addr = (msm_iommu_lock_initialize()
+ - MSM_SHARED_RAM_BASE + msm_shared_ram_phys);
+
+ if (!lock_phy_addr) {
+ KGSL_DRV_ERR(mmu->device,
+ "GPU CPU sync lock is not supported by kernel\n");
+ return -ENXIO;
+ }
+
+
+ page_offset = (lock_phy_addr & (PAGE_SIZE - 1));
+ lock_phy_addr = (lock_phy_addr & ~(PAGE_SIZE - 1));
+ iommu->sync_lock_desc.physaddr = (unsigned int)lock_phy_addr;
+
+ iommu->sync_lock_desc.size =
+ PAGE_ALIGN(sizeof(kgsl_iommu_sync_lock_vars));
+ status = memdesc_sg_phys(&iommu->sync_lock_desc,
+ iommu->sync_lock_desc.physaddr,
+ iommu->sync_lock_desc.size);
+
+ if (status)
+ return status;
+
+
+ iommu->sync_lock_desc.priv |= KGSL_MEMFLAGS_GLOBAL;
+
+ pagetable = mmu->priv_bank_table ? mmu->priv_bank_table :
+ mmu->defaultpagetable;
+
+ status = kgsl_mmu_map(pagetable, &iommu->sync_lock_desc,
+ GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+
+ if (status) {
+ kgsl_mmu_unmap(pagetable, &iommu->sync_lock_desc);
+ iommu->sync_lock_desc.priv &= ~~KGSL_MEMFLAGS_GLOBAL;
+ return status;
+ }
+
+
+ lock_gpu_addr = (iommu->sync_lock_desc.gpuaddr + page_offset);
+
+ kgsl_iommu_sync_lock_vars.flag[PROC_APPS] = (lock_gpu_addr +
+ (offsetof(struct remote_iommu_petersons_spinlock,
+ flag[PROC_APPS])));
+ kgsl_iommu_sync_lock_vars.flag[PROC_GPU] = (lock_gpu_addr +
+ (offsetof(struct remote_iommu_petersons_spinlock,
+ flag[PROC_GPU])));
+ kgsl_iommu_sync_lock_vars.turn = (lock_gpu_addr +
+ (offsetof(struct remote_iommu_petersons_spinlock, turn)));
+
+ iommu->sync_lock_vars = &kgsl_iommu_sync_lock_vars;
+
+
+ iommu->sync_lock_initialized = 1;
+
+ return status;
+}
+
+inline unsigned int kgsl_iommu_sync_lock(struct kgsl_mmu *mmu,
+ unsigned int *cmds)
+{
+ struct kgsl_device *device = mmu->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_iommu *iommu = mmu->device->mmu.priv;
+ struct remote_iommu_petersons_spinlock *lock_vars =
+ iommu->sync_lock_vars;
+ unsigned int *start = cmds;
+
+ if (!iommu->sync_lock_initialized)
+ return 0;
+
+ *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
+ *cmds++ = lock_vars->flag[PROC_GPU];
+ *cmds++ = 1;
+
+ cmds += adreno_add_idle_cmds(adreno_dev, cmds);
+
+ *cmds++ = cp_type3_packet(CP_WAIT_REG_MEM, 5);
+
+ *cmds++ = 0x13;
+ *cmds++ = lock_vars->flag[PROC_GPU];
+ *cmds++ = 0x1;
+ *cmds++ = 0x1;
+ *cmds++ = 0x1;
+
+ *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
+ *cmds++ = lock_vars->turn;
+ *cmds++ = 0;
+
+ cmds += adreno_add_idle_cmds(adreno_dev, cmds);
+
+ *cmds++ = cp_type3_packet(CP_WAIT_REG_MEM, 5);
+
+ *cmds++ = 0x13;
+ *cmds++ = lock_vars->flag[PROC_GPU];
+ *cmds++ = 0x1;
+ *cmds++ = 0x1;
+ *cmds++ = 0x1;
+
+ *cmds++ = cp_type3_packet(CP_TEST_TWO_MEMS, 3);
+ *cmds++ = lock_vars->flag[PROC_APPS];
+ *cmds++ = lock_vars->turn;
+ *cmds++ = 0;
+
+ cmds += adreno_add_idle_cmds(adreno_dev, cmds);
+
+ return cmds - start;
+}
+
+inline unsigned int kgsl_iommu_sync_unlock(struct kgsl_mmu *mmu,
+ unsigned int *cmds)
+{
+ struct kgsl_device *device = mmu->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_iommu *iommu = mmu->device->mmu.priv;
+ struct remote_iommu_petersons_spinlock *lock_vars =
+ iommu->sync_lock_vars;
+ unsigned int *start = cmds;
+
+ if (!iommu->sync_lock_initialized)
+ return 0;
+
+ *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
+ *cmds++ = lock_vars->flag[PROC_GPU];
+ *cmds++ = 0;
+
+ *cmds++ = cp_type3_packet(CP_WAIT_REG_MEM, 5);
+
+ *cmds++ = 0x13;
+ *cmds++ = lock_vars->flag[PROC_GPU];
+ *cmds++ = 0x0;
+ *cmds++ = 0x1;
+ *cmds++ = 0x1;
+
+ cmds += adreno_add_idle_cmds(adreno_dev, cmds);
+
+ return cmds - start;
+}
+
+static int kgsl_get_iommu_ctxt(struct kgsl_mmu *mmu)
+{
+ struct platform_device *pdev =
+ container_of(mmu->device->parentdev, struct platform_device,
+ dev);
+ struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
+ struct kgsl_iommu *iommu = mmu->device->mmu.priv;
+ int i, ret = 0;
+
+
+ if (KGSL_IOMMU_MAX_UNITS < pdata_dev->iommu_count) {
+ KGSL_CORE_ERR("Too many IOMMU units defined\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ for (i = 0; i < pdata_dev->iommu_count; i++) {
+ ret = _get_iommu_ctxs(mmu, &pdata_dev->iommu_data[i], i);
+ if (ret)
+ break;
+ }
+ iommu->unit_count = pdata_dev->iommu_count;
+done:
+ return ret;
+}
+
+static int kgsl_set_register_map(struct kgsl_mmu *mmu)
+{
+ struct platform_device *pdev =
+ container_of(mmu->device->parentdev, struct platform_device,
+ dev);
+ struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
+ struct kgsl_iommu *iommu = mmu->device->mmu.priv;
+ struct kgsl_iommu_unit *iommu_unit;
+ int i = 0, ret = 0;
+
+ for (; i < pdata_dev->iommu_count; i++) {
+ struct kgsl_device_iommu_data data = pdata_dev->iommu_data[i];
+ iommu_unit = &iommu->iommu_units[i];
+
+ if (!data.physstart || !data.physend) {
+ KGSL_CORE_ERR("The register range for IOMMU unit not"
+ " specified\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ iommu_unit->reg_map.hostptr = ioremap(data.physstart,
+ data.physend - data.physstart + 1);
+ if (!iommu_unit->reg_map.hostptr) {
+ KGSL_CORE_ERR("Failed to map SMMU register address "
+ "space from %x to %x\n", data.physstart,
+ data.physend - data.physstart + 1);
+ ret = -ENOMEM;
+ i--;
+ goto err;
+ }
+ iommu_unit->reg_map.size = data.physend - data.physstart + 1;
+ iommu_unit->reg_map.physaddr = data.physstart;
+ memdesc_sg_phys(&iommu_unit->reg_map, data.physstart,
+ iommu_unit->reg_map.size);
+ }
+ iommu->unit_count = pdata_dev->iommu_count;
+ return ret;
+err:
+
+ for (; i >= 0; i--) {
+ iommu_unit = &iommu->iommu_units[i];
+ iounmap(iommu_unit->reg_map.hostptr);
+ iommu_unit->reg_map.size = 0;
+ iommu_unit->reg_map.physaddr = 0;
+ }
+ return ret;
+}
+
+static unsigned int kgsl_iommu_pt_get_base_addr(struct kgsl_pagetable *pt)
+{
+ struct kgsl_iommu_pt *iommu_pt = pt->priv;
+ return iommu_get_pt_base_addr(iommu_pt->domain);
+}
+
+static int kgsl_iommu_get_pt_lsb(struct kgsl_mmu *mmu,
+ unsigned int unit_id,
+ enum kgsl_iommu_context_id ctx_id)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ int i, j;
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+ for (j = 0; j < iommu_unit->dev_count; j++)
+ if (unit_id == i &&
+ ctx_id == iommu_unit->dev[j].ctx_id)
+ return iommu_unit->dev[j].pt_lsb;
+ }
+ return 0;
+}
+
+static void kgsl_iommu_setstate(struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pagetable,
+ unsigned int context_id)
+{
+ if (mmu->flags & KGSL_FLAGS_STARTED) {
+ if (mmu->hwpagetable != pagetable) {
+ unsigned int flags = 0;
+ mmu->hwpagetable = pagetable;
+ flags |= kgsl_mmu_pt_get_flags(mmu->hwpagetable,
+ mmu->device->id) |
+ KGSL_MMUFLAGS_TLBFLUSH;
+ kgsl_setstate(mmu, context_id,
+ KGSL_MMUFLAGS_PTUPDATE | flags);
+ }
+ }
+}
+
+static int kgsl_iommu_init(struct kgsl_mmu *mmu)
+{
+ int status = 0;
+ struct kgsl_iommu *iommu;
+
+ iommu = kzalloc(sizeof(struct kgsl_iommu), GFP_KERNEL);
+ if (!iommu) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n",
+ sizeof(struct kgsl_iommu));
+ return -ENOMEM;
+ }
+
+ mmu->priv = iommu;
+ status = kgsl_get_iommu_ctxt(mmu);
+ if (status)
+ goto done;
+ status = kgsl_set_register_map(mmu);
+ if (status)
+ goto done;
+
+ kgsl_sharedmem_writel(&mmu->setstate_memory,
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET,
+ cp_nop_packet(1));
+
+ dev_info(mmu->device->dev, "|%s| MMU type set for device is IOMMU\n",
+ __func__);
+done:
+ if (status) {
+ kfree(iommu);
+ mmu->priv = NULL;
+ }
+ return status;
+}
+
+static int kgsl_iommu_setup_defaultpagetable(struct kgsl_mmu *mmu)
+{
+ int status = 0;
+ int i = 0;
+ struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu_pt *iommu_pt;
+ struct kgsl_pagetable *pagetable = NULL;
+
+ if (!cpu_is_msm8960()) {
+ mmu->priv_bank_table =
+ kgsl_mmu_getpagetable(KGSL_MMU_PRIV_BANK_TABLE_NAME);
+ if (mmu->priv_bank_table == NULL) {
+ status = -ENOMEM;
+ goto err;
+ }
+ iommu_pt = mmu->priv_bank_table->priv;
+ }
+ mmu->defaultpagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
+
+ if (mmu->defaultpagetable == NULL) {
+ status = -ENOMEM;
+ goto err;
+ }
+ pagetable = mmu->priv_bank_table ? mmu->priv_bank_table :
+ mmu->defaultpagetable;
+
+ for (i = 0; i < iommu->unit_count; i++) {
+ iommu->iommu_units[i].reg_map.priv |= KGSL_MEMFLAGS_GLOBAL;
+ status = kgsl_mmu_map(pagetable,
+ &(iommu->iommu_units[i].reg_map),
+ GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+ if (status) {
+ iommu->iommu_units[i].reg_map.priv &=
+ ~KGSL_MEMFLAGS_GLOBAL;
+ goto err;
+ }
+ }
+ return status;
+err:
+ for (i--; i >= 0; i--) {
+ kgsl_mmu_unmap(pagetable,
+ &(iommu->iommu_units[i].reg_map));
+ iommu->iommu_units[i].reg_map.priv &= ~KGSL_MEMFLAGS_GLOBAL;
+ }
+ if (mmu->priv_bank_table) {
+ kgsl_mmu_putpagetable(mmu->priv_bank_table);
+ mmu->priv_bank_table = NULL;
+ }
+ if (mmu->defaultpagetable) {
+ kgsl_mmu_putpagetable(mmu->defaultpagetable);
+ mmu->defaultpagetable = NULL;
+ }
+ return status;
+}
+
+static int kgsl_iommu_start(struct kgsl_mmu *mmu)
+{
+ struct kgsl_device *device = mmu->device;
+ int status;
+ struct kgsl_iommu *iommu = mmu->priv;
+ int i, j;
+
+ if (mmu->flags & KGSL_FLAGS_STARTED)
+ return 0;
+
+ if (mmu->defaultpagetable == NULL) {
+ status = kgsl_iommu_setup_defaultpagetable(mmu);
+ if (status)
+ return -ENOMEM;
+
+
+ if (msm_soc_version_supports_iommu_v1() &&
+ (device->id == KGSL_DEVICE_3D0))
+ kgsl_iommu_init_sync_lock(mmu);
+ }
+
+ if (cpu_is_msm8960()) {
+ struct kgsl_mh *mh = &(mmu->device->mh);
+ kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000001);
+ kgsl_regwrite(mmu->device, MH_MMU_MPU_END,
+ mh->mpu_base +
+ iommu->iommu_units
+ [iommu->unit_count - 1].reg_map.gpuaddr -
+ PAGE_SIZE);
+ } else {
+ kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000000);
+ }
+
+ mmu->hwpagetable = mmu->defaultpagetable;
+
+ status = kgsl_attach_pagetable_iommu_domain(mmu);
+ if (status) {
+ mmu->hwpagetable = NULL;
+ goto done;
+ }
+ status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
+ if (status) {
+ KGSL_CORE_ERR("clk enable failed\n");
+ goto done;
+ }
+ status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
+ if (status) {
+ KGSL_CORE_ERR("clk enable failed\n");
+ goto done;
+ }
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+ for (j = 0; j < iommu_unit->dev_count; j++)
+ iommu_unit->dev[j].pt_lsb = KGSL_IOMMMU_PT_LSB(
+ KGSL_IOMMU_GET_IOMMU_REG(
+ iommu_unit->reg_map.hostptr,
+ iommu_unit->dev[j].ctx_id,
+ TTBR0));
+ }
+
+ kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ mmu->flags |= KGSL_FLAGS_STARTED;
+
+done:
+ if (status) {
+ kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ kgsl_detach_pagetable_iommu_domain(mmu);
+ }
+ return status;
+}
+
+static int
+kgsl_iommu_unmap(void *mmu_specific_pt,
+ struct kgsl_memdesc *memdesc,
+ unsigned int *tlb_flags)
+{
+ int ret;
+ unsigned int range = kgsl_sg_size(memdesc->sg, memdesc->sglen);
+ struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;
+
+
+ unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
+
+ if (range == 0 || gpuaddr == 0)
+ return 0;
+
+ ret = iommu_unmap_range(iommu_pt->domain, gpuaddr, range);
+ if (ret)
+ KGSL_CORE_ERR("iommu_unmap_range(%p, %x, %d) failed "
+ "with err: %d\n", iommu_pt->domain, gpuaddr,
+ range, ret);
+
+#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
+ if (!ret)
+ *tlb_flags = UINT_MAX;
+#endif
+ return 0;
+}
+
+static int
+kgsl_iommu_map(void *mmu_specific_pt,
+ struct kgsl_memdesc *memdesc,
+ unsigned int protflags,
+ unsigned int *tlb_flags)
+{
+ int ret;
+ unsigned int iommu_virt_addr;
+ struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;
+ int size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
+
+ BUG_ON(NULL == iommu_pt);
+
+
+ iommu_virt_addr = memdesc->gpuaddr;
+
+ ret = iommu_map_range(iommu_pt->domain, iommu_virt_addr, memdesc->sg,
+ size, (IOMMU_READ | IOMMU_WRITE));
+ if (ret) {
+ KGSL_CORE_ERR("iommu_map_range(%p, %x, %p, %d, %d) "
+ "failed with err: %d\n", iommu_pt->domain,
+ iommu_virt_addr, memdesc->sg, size,
+ (IOMMU_READ | IOMMU_WRITE), ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void kgsl_iommu_stop(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+
+ if (mmu->flags & KGSL_FLAGS_STARTED) {
+ kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000000);
+
+ kgsl_detach_pagetable_iommu_domain(mmu);
+ mmu->hwpagetable = NULL;
+
+ mmu->flags &= ~KGSL_FLAGS_STARTED;
+ }
+
+
+ iommu->clk_event_queued = false;
+ kgsl_cancel_events(mmu->device, mmu);
+ kgsl_iommu_disable_clk(mmu);
+}
+
+static int kgsl_iommu_close(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ int i;
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_pagetable *pagetable = (mmu->priv_bank_table ?
+ mmu->priv_bank_table : mmu->defaultpagetable);
+ if (iommu->iommu_units[i].reg_map.gpuaddr)
+ kgsl_mmu_unmap(pagetable,
+ &(iommu->iommu_units[i].reg_map));
+ if (iommu->iommu_units[i].reg_map.hostptr)
+ iounmap(iommu->iommu_units[i].reg_map.hostptr);
+ kgsl_sg_free(iommu->iommu_units[i].reg_map.sg,
+ iommu->iommu_units[i].reg_map.sglen);
+ }
+
+ if (mmu->priv_bank_table)
+ kgsl_mmu_putpagetable(mmu->priv_bank_table);
+ if (mmu->defaultpagetable)
+ kgsl_mmu_putpagetable(mmu->defaultpagetable);
+ kfree(iommu);
+
+ return 0;
+}
+
+static unsigned int
+kgsl_iommu_get_current_ptbase(struct kgsl_mmu *mmu)
+{
+ unsigned int pt_base;
+ struct kgsl_iommu *iommu = mmu->priv;
+ if (in_interrupt())
+ return 0;
+
+ kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
+ pt_base = readl_relaxed(iommu->iommu_units[0].reg_map.hostptr +
+ (KGSL_IOMMU_CONTEXT_USER << KGSL_IOMMU_CTX_SHIFT) +
+ KGSL_IOMMU_TTBR0);
+ kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+ return pt_base & (KGSL_IOMMU_TTBR0_PA_MASK <<
+ KGSL_IOMMU_TTBR0_PA_SHIFT);
+}
+
+static void kgsl_iommu_default_setstate(struct kgsl_mmu *mmu,
+ uint32_t flags)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ int temp;
+ int i;
+ unsigned int pt_base = kgsl_iommu_pt_get_base_addr(
+ mmu->hwpagetable);
+ unsigned int pt_val;
+
+ if (kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER)) {
+ KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
+ return;
+ }
+
+ pt_base &= (KGSL_IOMMU_TTBR0_PA_MASK << KGSL_IOMMU_TTBR0_PA_SHIFT);
+
+
+ if (msm_soc_version_supports_iommu_v1())
+ kgsl_idle(mmu->device);
+
+
+ msm_iommu_lock();
+
+ if (flags & KGSL_MMUFLAGS_PTUPDATE) {
+ if (!msm_soc_version_supports_iommu_v1())
+ kgsl_idle(mmu->device);
+ for (i = 0; i < iommu->unit_count; i++) {
+ pt_val = kgsl_iommu_get_pt_lsb(mmu, i,
+ KGSL_IOMMU_CONTEXT_USER);
+ pt_val += pt_base;
+
+ KGSL_IOMMU_SET_IOMMU_REG(
+ iommu->iommu_units[i].reg_map.hostptr,
+ KGSL_IOMMU_CONTEXT_USER, TTBR0, pt_val);
+
+ mb();
+ temp = KGSL_IOMMU_GET_IOMMU_REG(
+ iommu->iommu_units[i].reg_map.hostptr,
+ KGSL_IOMMU_CONTEXT_USER, TTBR0);
+ }
+ }
+
+ if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
+ for (i = 0; i < iommu->unit_count; i++) {
+ KGSL_IOMMU_SET_IOMMU_REG(
+ iommu->iommu_units[i].reg_map.hostptr,
+ KGSL_IOMMU_CONTEXT_USER, CTX_TLBIALL,
+ 1);
+ mb();
+ }
+ }
+
+
+ msm_iommu_unlock();
+
+
+ kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
+}
+
+static int kgsl_iommu_get_reg_map_desc(struct kgsl_mmu *mmu,
+ void **reg_map_desc)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ void **reg_desc_ptr;
+ int i;
+
+ reg_desc_ptr = kmalloc(iommu->unit_count *
+ sizeof(struct kgsl_memdesc *), GFP_KERNEL);
+ if (!reg_desc_ptr) {
+ KGSL_CORE_ERR("Failed to kmalloc(%d)\n",
+ iommu->unit_count * sizeof(struct kgsl_memdesc *));
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < iommu->unit_count; i++)
+ reg_desc_ptr[i] = &(iommu->iommu_units[i].reg_map);
+
+ *reg_map_desc = reg_desc_ptr;
+ return i;
+}
+
+struct kgsl_mmu_ops iommu_ops = {
+ .mmu_init = kgsl_iommu_init,
+ .mmu_close = kgsl_iommu_close,
+ .mmu_start = kgsl_iommu_start,
+ .mmu_stop = kgsl_iommu_stop,
+ .mmu_setstate = kgsl_iommu_setstate,
+ .mmu_device_setstate = kgsl_iommu_default_setstate,
+ .mmu_pagefault = NULL,
+ .mmu_get_current_ptbase = kgsl_iommu_get_current_ptbase,
+ .mmu_enable_clk = kgsl_iommu_enable_clk,
+ .mmu_disable_clk_on_ts = kgsl_iommu_disable_clk_on_ts,
+ .mmu_get_pt_lsb = kgsl_iommu_get_pt_lsb,
+ .mmu_get_reg_map_desc = kgsl_iommu_get_reg_map_desc,
+ .mmu_sync_lock = kgsl_iommu_sync_lock,
+ .mmu_sync_unlock = kgsl_iommu_sync_unlock,
+};
+
+struct kgsl_mmu_pt_ops iommu_pt_ops = {
+ .mmu_map = kgsl_iommu_map,
+ .mmu_unmap = kgsl_iommu_unmap,
+ .mmu_create_pagetable = kgsl_iommu_create_pagetable,
+ .mmu_destroy_pagetable = kgsl_iommu_destroy_pagetable,
+ .mmu_pt_equal = kgsl_iommu_pt_equal,
+ .mmu_pt_get_base_addr = kgsl_iommu_pt_get_base_addr,
+};
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
new file mode 100644
index 0000000..3389f08
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -0,0 +1,78 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_IOMMU_H
+#define __KGSL_IOMMU_H
+
+#include <mach/iommu.h>
+
+#define KGSL_IOMMU_TTBR0 0x10
+#define KGSL_IOMMU_TTBR1 0x14
+#define KGSL_IOMMU_FSR 0x20
+
+#define KGSL_IOMMU_TTBR0_PA_MASK 0x0003FFFF
+#define KGSL_IOMMU_TTBR0_PA_SHIFT 14
+#define KGSL_IOMMU_CTX_TLBIALL 0x800
+#define KGSL_IOMMU_CTX_SHIFT 12
+
+#define KGSL_IOMMU_MAX_UNITS 2
+
+#define KGSL_IOMMU_MAX_DEVS_PER_UNIT 2
+
+#define KGSL_IOMMU_SET_IOMMU_REG(base_addr, ctx, REG, val) \
+ writel_relaxed(val, base_addr + \
+ (ctx << KGSL_IOMMU_CTX_SHIFT) + \
+ KGSL_IOMMU_##REG)
+
+#define KGSL_IOMMU_GET_IOMMU_REG(base_addr, ctx, REG) \
+ readl_relaxed(base_addr + \
+ (ctx << KGSL_IOMMU_CTX_SHIFT) + \
+ KGSL_IOMMU_##REG)
+
+#define KGSL_IOMMMU_PT_LSB(pt_val) \
+ (pt_val & ~(KGSL_IOMMU_TTBR0_PA_MASK << \
+ KGSL_IOMMU_TTBR0_PA_SHIFT))
+
+#define KGSL_IOMMU_SETSTATE_NOP_OFFSET 1024
+
+struct kgsl_iommu_device {
+ struct device *dev;
+ bool attached;
+ unsigned int pt_lsb;
+ enum kgsl_iommu_context_id ctx_id;
+ bool clk_enabled;
+ struct kgsl_device *kgsldev;
+};
+
+struct kgsl_iommu_unit {
+ struct kgsl_iommu_device dev[KGSL_IOMMU_MAX_DEVS_PER_UNIT];
+ unsigned int dev_count;
+ struct kgsl_memdesc reg_map;
+};
+
+struct kgsl_iommu {
+ struct kgsl_iommu_unit iommu_units[KGSL_IOMMU_MAX_UNITS];
+ unsigned int unit_count;
+ unsigned int iommu_last_cmd_ts;
+ bool clk_event_queued;
+ struct kgsl_device *device;
+ struct remote_iommu_petersons_spinlock *sync_lock_vars;
+ struct kgsl_memdesc sync_lock_desc;
+ bool sync_lock_initialized;
+};
+
+struct kgsl_iommu_pt {
+ struct iommu_domain *domain;
+ struct kgsl_iommu *iommu;
+};
+
+#endif
diff --git a/drivers/gpu/msm/kgsl_log.h b/drivers/gpu/msm/kgsl_log.h
new file mode 100644
index 0000000..9c6e317
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_log.h
@@ -0,0 +1,110 @@
+/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_LOG_H
+#define __KGSL_LOG_H
+
+extern unsigned int kgsl_cff_dump_enable;
+
+#define KGSL_LOG_INFO(dev, lvl, fmt, args...) \
+ do { \
+ if ((lvl) >= 6) \
+ dev_info(dev, "|%s| " fmt, \
+ __func__, ##args);\
+ } while (0)
+
+#define KGSL_LOG_WARN(dev, lvl, fmt, args...) \
+ do { \
+ if ((lvl) >= 4) \
+ dev_warn(dev, "|%s| " fmt, \
+ __func__, ##args);\
+ } while (0)
+
+#define KGSL_LOG_ERR(dev, lvl, fmt, args...) \
+ do { \
+ if ((lvl) >= 3) \
+ dev_err(dev, "|%s| " fmt, \
+ __func__, ##args);\
+ } while (0)
+
+#define KGSL_LOG_CRIT(dev, lvl, fmt, args...) \
+ do { \
+ if ((lvl) >= 2) \
+ dev_crit(dev, "|%s| " fmt, \
+ __func__, ##args);\
+ } while (0)
+
+#define KGSL_LOG_POSTMORTEM_WRITE(_dev, fmt, args...) \
+ do { dev_crit(_dev->dev, fmt, ##args); } while (0)
+
+#define KGSL_LOG_DUMP(_dev, fmt, args...) dev_err(_dev->dev, fmt, ##args)
+
+#define KGSL_DEV_ERR_ONCE(_dev, fmt, args...) \
+({ \
+ static bool kgsl_dev_err_once; \
+ \
+ if (!kgsl_dev_err_once) { \
+ kgsl_dev_err_once = true; \
+ dev_crit(_dev->dev, "|%s| " fmt, __func__, ##args); \
+ } \
+})
+
+#define KGSL_DRV_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->drv_log, fmt, ##args)
+#define KGSL_DRV_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->drv_log, fmt, ##args)
+#define KGSL_DRV_ERR(_dev, fmt, args...) \
+KGSL_LOG_ERR(_dev->dev, _dev->drv_log, fmt, ##args)
+#define KGSL_DRV_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->drv_log, fmt, ##args)
+
+#define KGSL_CMD_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->cmd_log, fmt, ##args)
+#define KGSL_CMD_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->cmd_log, fmt, ##args)
+#define KGSL_CMD_ERR(_dev, fmt, args...) \
+KGSL_LOG_ERR(_dev->dev, _dev->cmd_log, fmt, ##args)
+#define KGSL_CMD_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->cmd_log, fmt, ##args)
+
+#define KGSL_CTXT_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->ctxt_log, fmt, ##args)
+#define KGSL_CTXT_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->ctxt_log, fmt, ##args)
+#define KGSL_CTXT_ERR(_dev, fmt, args...) \
+KGSL_LOG_ERR(_dev->dev, _dev->ctxt_log, fmt, ##args)
+#define KGSL_CTXT_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->ctxt_log, fmt, ##args)
+
+#define KGSL_MEM_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->mem_log, fmt, ##args)
+#define KGSL_MEM_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->mem_log, fmt, ##args)
+#define KGSL_MEM_ERR(_dev, fmt, args...) \
+KGSL_LOG_ERR(_dev->dev, _dev->mem_log, fmt, ##args)
+#define KGSL_MEM_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->mem_log, fmt, ##args)
+
+#define KGSL_PWR_INFO(_dev, fmt, args...) \
+KGSL_LOG_INFO(_dev->dev, _dev->pwr_log, fmt, ##args)
+#define KGSL_PWR_WARN(_dev, fmt, args...) \
+KGSL_LOG_WARN(_dev->dev, _dev->pwr_log, fmt, ##args)
+#define KGSL_PWR_ERR(_dev, fmt, args...) \
+KGSL_LOG_ERR(_dev->dev, _dev->pwr_log, fmt, ##args)
+#define KGSL_PWR_CRIT(_dev, fmt, args...) \
+KGSL_LOG_CRIT(_dev->dev, _dev->pwr_log, fmt, ##args)
+
+
+#define KGSL_CORE_ERR(fmt, args...) \
+pr_err("kgsl: %s: " fmt, __func__, ##args)
+
+#endif
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
new file mode 100644
index 0000000..df74c11
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -0,0 +1,813 @@
+/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/iommu.h>
+#include <mach/socinfo.h>
+
+#include "kgsl.h"
+#include "kgsl_mmu.h"
+#include "kgsl_device.h"
+#include "kgsl_sharedmem.h"
+#include "adreno_postmortem.h"
+
+#define KGSL_MMU_ALIGN_SHIFT 13
+#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
+
+static enum kgsl_mmutype kgsl_mmu_type;
+
+static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
+
+static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
+{
+ int i;
+
+ if ((KGSL_MMU_TYPE_NONE != kgsl_mmu_type) &&
+ (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
+ (KGSL_MMU_GLOBAL_PT != pt->name) &&
+ (KGSL_MMU_PRIV_BANK_TABLE_NAME != pt->name))
+ return 0;
+ for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+ struct kgsl_device *device = kgsl_driver.devp[i];
+ if (device)
+ device->ftbl->cleanup_pt(device, pt);
+ }
+ return 0;
+}
+
+
+static int kgsl_setup_pt(struct kgsl_pagetable *pt)
+{
+ int i = 0;
+ int status = 0;
+
+
+ if ((KGSL_MMU_TYPE_NONE != kgsl_mmu_type) &&
+ (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
+ (KGSL_MMU_GLOBAL_PT != pt->name) &&
+ (KGSL_MMU_PRIV_BANK_TABLE_NAME != pt->name))
+ return 0;
+ for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+ struct kgsl_device *device = kgsl_driver.devp[i];
+ if (device) {
+ status = device->ftbl->setup_pt(device, pt);
+ if (status)
+ goto error_pt;
+ }
+ }
+ return status;
+error_pt:
+ while (i >= 0) {
+ struct kgsl_device *device = kgsl_driver.devp[i];
+ if (device)
+ device->ftbl->cleanup_pt(device, pt);
+ i--;
+ }
+ return status;
+}
+
+static void kgsl_destroy_pagetable(struct kref *kref)
+{
+ struct kgsl_pagetable *pagetable = container_of(kref,
+ struct kgsl_pagetable, refcount);
+ unsigned long flags;
+
+ spin_lock_irqsave(&kgsl_driver.ptlock, flags);
+ list_del(&pagetable->list);
+ spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
+
+ pagetable_remove_sysfs_objects(pagetable);
+
+ kgsl_cleanup_pt(pagetable);
+
+ if (pagetable->kgsl_pool)
+ gen_pool_destroy(pagetable->kgsl_pool);
+ if (pagetable->pool)
+ gen_pool_destroy(pagetable->pool);
+
+ pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
+
+ kfree(pagetable);
+}
+
+static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
+{
+ if (pagetable)
+ kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
+}
+
+static struct kgsl_pagetable *
+kgsl_get_pagetable(unsigned long name)
+{
+ struct kgsl_pagetable *pt, *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kgsl_driver.ptlock, flags);
+ list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
+ if (pt->name == name) {
+ ret = pt;
+ kref_get(&ret->refcount);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
+ return ret;
+}
+
+static struct kgsl_pagetable *
+_get_pt_from_kobj(struct kobject *kobj)
+{
+ unsigned long ptname;
+
+ if (!kobj)
+ return NULL;
+
+ if (sscanf(kobj->name, "%ld", &ptname) != 1)
+ return NULL;
+
+ return kgsl_get_pagetable(ptname);
+}
+
+static ssize_t
+sysfs_show_entries(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct kgsl_pagetable *pt;
+ int ret = 0;
+
+ pt = _get_pt_from_kobj(kobj);
+
+ if (pt)
+ ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.entries);
+
+ kgsl_put_pagetable(pt);
+ return ret;
+}
+
+static ssize_t
+sysfs_show_mapped(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct kgsl_pagetable *pt;
+ int ret = 0;
+
+ pt = _get_pt_from_kobj(kobj);
+
+ if (pt)
+ ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.mapped);
+
+ kgsl_put_pagetable(pt);
+ return ret;
+}
+
+static ssize_t
+sysfs_show_va_range(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct kgsl_pagetable *pt;
+ int ret = 0;
+
+ pt = _get_pt_from_kobj(kobj);
+
+ if (pt) {
+ ret += snprintf(buf, PAGE_SIZE, "0x%x\n",
+ kgsl_mmu_get_ptsize());
+ }
+
+ kgsl_put_pagetable(pt);
+ return ret;
+}
+
+static ssize_t
+sysfs_show_max_mapped(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct kgsl_pagetable *pt;
+ int ret = 0;
+
+ pt = _get_pt_from_kobj(kobj);
+
+ if (pt)
+ ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_mapped);
+
+ kgsl_put_pagetable(pt);
+ return ret;
+}
+
+static ssize_t
+sysfs_show_max_entries(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ struct kgsl_pagetable *pt;
+ int ret = 0;
+
+ pt = _get_pt_from_kobj(kobj);
+
+ if (pt)
+ ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_entries);
+
+ kgsl_put_pagetable(pt);
+ return ret;
+}
+
+static struct kobj_attribute attr_entries = {
+ .attr = { .name = "entries", .mode = 0444 },
+ .show = sysfs_show_entries,
+ .store = NULL,
+};
+
+static struct kobj_attribute attr_mapped = {
+ .attr = { .name = "mapped", .mode = 0444 },
+ .show = sysfs_show_mapped,
+ .store = NULL,
+};
+
+static struct kobj_attribute attr_va_range = {
+ .attr = { .name = "va_range", .mode = 0444 },
+ .show = sysfs_show_va_range,
+ .store = NULL,
+};
+
+static struct kobj_attribute attr_max_mapped = {
+ .attr = { .name = "max_mapped", .mode = 0444 },
+ .show = sysfs_show_max_mapped,
+ .store = NULL,
+};
+
+static struct kobj_attribute attr_max_entries = {
+ .attr = { .name = "max_entries", .mode = 0444 },
+ .show = sysfs_show_max_entries,
+ .store = NULL,
+};
+
+static struct attribute *pagetable_attrs[] = {
+ &attr_entries.attr,
+ &attr_mapped.attr,
+ &attr_va_range.attr,
+ &attr_max_mapped.attr,
+ &attr_max_entries.attr,
+ NULL,
+};
+
+static struct attribute_group pagetable_attr_group = {
+ .attrs = pagetable_attrs,
+};
+
+static void
+pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
+{
+ if (pagetable->kobj)
+ sysfs_remove_group(pagetable->kobj,
+ &pagetable_attr_group);
+
+ kobject_put(pagetable->kobj);
+}
+
+static int
+pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
+{
+ char ptname[16];
+ int ret = -ENOMEM;
+
+ snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
+ pagetable->kobj = kobject_create_and_add(ptname,
+ kgsl_driver.ptkobj);
+ if (pagetable->kobj == NULL)
+ goto err;
+
+ ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
+
+err:
+ if (ret) {
+ if (pagetable->kobj)
+ kobject_put(pagetable->kobj);
+
+ pagetable->kobj = NULL;
+ }
+
+ return ret;
+}
+
+unsigned int kgsl_mmu_get_ptsize(void)
+{
+
+ if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
+ return CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
+ else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
+#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
+ return CONFIG_MSM_KGSL_PAGE_TABLE_SIZE_FOR_IOMMU;
+#else
+ return SZ_2G - KGSL_PAGETABLE_BASE;
+#endif
+
+ else
+ return 0;
+}
+
+int
+kgsl_mmu_get_ptname_from_ptbase(unsigned int pt_base)
+{
+ struct kgsl_pagetable *pt;
+ int ptid = -1;
+
+ spin_lock(&kgsl_driver.ptlock);
+ list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
+ if (pt->pt_ops->mmu_pt_equal(pt, pt_base)) {
+ ptid = (int) pt->name;
+ break;
+ }
+ }
+ spin_unlock(&kgsl_driver.ptlock);
+
+ return ptid;
+}
+EXPORT_SYMBOL(kgsl_mmu_get_ptname_from_ptbase);
+
+int kgsl_mmu_init(struct kgsl_device *device)
+{
+ int status = 0;
+ struct kgsl_mmu *mmu = &device->mmu;
+
+ mmu->device = device;
+ status = kgsl_allocate_contiguous(&mmu->setstate_memory, PAGE_SIZE);
+ if (status)
+ return status;
+ kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
+ mmu->setstate_memory.size);
+
+ if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) {
+ dev_info(device->dev, "|%s| MMU type set for device is "
+ "NOMMU\n", __func__);
+ goto done;
+ } else if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
+ mmu->mmu_ops = &gpummu_ops;
+ else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
+ mmu->mmu_ops = &iommu_ops;
+
+ status = mmu->mmu_ops->mmu_init(mmu);
+done:
+ if (status)
+ kgsl_sharedmem_free(&mmu->setstate_memory);
+ return status;
+}
+EXPORT_SYMBOL(kgsl_mmu_init);
+
+int kgsl_mmu_start(struct kgsl_device *device)
+{
+ struct kgsl_mmu *mmu = &device->mmu;
+
+ if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
+ kgsl_regwrite(device, MH_MMU_CONFIG, 0);
+
+ if (!mmu->setstate_memory.gpuaddr)
+ kgsl_setup_pt(NULL);
+ return 0;
+ } else {
+ return mmu->mmu_ops->mmu_start(mmu);
+ }
+}
+EXPORT_SYMBOL(kgsl_mmu_start);
+
+static void mh_axi_error(struct kgsl_device *device, const char* type)
+{
+ unsigned int reg, gpu_err, phys_err, pt_base;
+
+ kgsl_regread(device, MH_AXI_ERROR, ®);
+ pt_base = kgsl_mmu_get_current_ptbase(&device->mmu);
+ kgsl_regwrite(device, MH_DEBUG_CTRL, 44);
+ kgsl_regread(device, MH_DEBUG_DATA, &gpu_err);
+ kgsl_regwrite(device, MH_DEBUG_CTRL, 45);
+ kgsl_regread(device, MH_DEBUG_DATA, &phys_err);
+ KGSL_MEM_CRIT(device,
+ "axi %s error: %08x pt %08x gpu %08x phys %08x\n",
+ type, reg, pt_base, gpu_err, phys_err);
+}
+
+void kgsl_mh_intrcallback(struct kgsl_device *device)
+{
+ unsigned int status = 0;
+
+ kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
+
+ if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
+ mh_axi_error(device, "read");
+ if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
+ mh_axi_error(device, "write");
+ if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
+ device->mmu.mmu_ops->mmu_pagefault(&device->mmu);
+
+ status &= KGSL_MMU_INT_MASK;
+ kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
+}
+EXPORT_SYMBOL(kgsl_mh_intrcallback);
+
+static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
+ unsigned int name)
+{
+ int status = 0;
+ struct kgsl_pagetable *pagetable = NULL;
+ unsigned long flags;
+ unsigned int ptsize;
+
+ pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
+ if (pagetable == NULL) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n",
+ sizeof(struct kgsl_pagetable));
+ return NULL;
+ }
+
+ kref_init(&pagetable->refcount);
+
+ spin_lock_init(&pagetable->lock);
+
+ ptsize = kgsl_mmu_get_ptsize();
+
+ pagetable->name = name;
+ pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(ptsize);
+
+ if ((KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) &&
+ ((KGSL_MMU_GLOBAL_PT == name) ||
+ (KGSL_MMU_PRIV_BANK_TABLE_NAME == name))) {
+ pagetable->kgsl_pool = gen_pool_create(PAGE_SHIFT, -1);
+ if (pagetable->kgsl_pool == NULL) {
+ KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
+ KGSL_MMU_ALIGN_SHIFT);
+ goto err_alloc;
+ }
+ if (gen_pool_add(pagetable->kgsl_pool,
+ KGSL_IOMMU_GLOBAL_MEM_BASE,
+ KGSL_IOMMU_GLOBAL_MEM_SIZE, -1)) {
+ KGSL_CORE_ERR("gen_pool_add failed\n");
+ goto err_kgsl_pool;
+ }
+ }
+
+ pagetable->pool = gen_pool_create(KGSL_MMU_ALIGN_SHIFT, -1);
+ if (pagetable->pool == NULL) {
+ KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
+ KGSL_MMU_ALIGN_SHIFT);
+ goto err_kgsl_pool;
+ }
+
+ if (gen_pool_add(pagetable->pool, KGSL_PAGETABLE_BASE,
+ ptsize, -1)) {
+ KGSL_CORE_ERR("gen_pool_add failed\n");
+ goto err_pool;
+ }
+
+ if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
+ pagetable->pt_ops = &gpummu_pt_ops;
+ else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
+ pagetable->pt_ops = &iommu_pt_ops;
+
+ pagetable->priv = pagetable->pt_ops->mmu_create_pagetable();
+ if (!pagetable->priv)
+ goto err_pool;
+
+ status = kgsl_setup_pt(pagetable);
+ if (status)
+ goto err_mmu_create;
+
+ spin_lock_irqsave(&kgsl_driver.ptlock, flags);
+ list_add(&pagetable->list, &kgsl_driver.pagetable_list);
+ spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
+
+
+ pagetable_add_sysfs_objects(pagetable);
+
+ return pagetable;
+
+err_mmu_create:
+ pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
+err_pool:
+ gen_pool_destroy(pagetable->pool);
+err_kgsl_pool:
+ if (pagetable->kgsl_pool)
+ gen_pool_destroy(pagetable->kgsl_pool);
+err_alloc:
+ kfree(pagetable);
+
+ return NULL;
+}
+
+struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
+{
+ struct kgsl_pagetable *pt;
+
+ if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
+ return (void *)(-1);
+
+#ifndef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
+ name = KGSL_MMU_GLOBAL_PT;
+#endif
+ pt = kgsl_get_pagetable(name);
+
+ if (pt == NULL)
+ pt = kgsl_mmu_createpagetableobject(name);
+
+ return pt;
+}
+
+void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
+{
+ kgsl_put_pagetable(pagetable);
+}
+EXPORT_SYMBOL(kgsl_mmu_putpagetable);
+
+void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
+ uint32_t flags)
+{
+ struct kgsl_device *device = mmu->device;
+ if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
+ return;
+ else if (device->ftbl->setstate)
+ device->ftbl->setstate(device, context_id, flags);
+ else if (mmu->mmu_ops->mmu_device_setstate)
+ mmu->mmu_ops->mmu_device_setstate(mmu, flags);
+}
+EXPORT_SYMBOL(kgsl_setstate);
+
+void kgsl_mh_start(struct kgsl_device *device)
+{
+ struct kgsl_mh *mh = &device->mh;
+
+ kgsl_regwrite(device, MH_MMU_CONFIG, 0);
+ kgsl_idle(device);
+
+
+ kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
+ kgsl_regwrite(device, MH_MMU_MPU_END,
+ mh->mpu_base + mh->mpu_range);
+ kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb);
+
+ if (mh->mh_intf_cfg1 != 0)
+ kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1,
+ mh->mh_intf_cfg1);
+
+ if (mh->mh_intf_cfg2 != 0)
+ kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2,
+ mh->mh_intf_cfg2);
+
+}
+
+static inline struct gen_pool *
+_get_pool(struct kgsl_pagetable *pagetable, unsigned int flags)
+{
+ if (pagetable->kgsl_pool &&
+ (KGSL_MEMFLAGS_GLOBAL & flags))
+ return pagetable->kgsl_pool;
+ return pagetable->pool;
+}
+
+int
+kgsl_mmu_map(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc,
+ unsigned int protflags)
+{
+ int ret;
+ struct gen_pool *pool;
+ int size;
+
+ if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
+ if (memdesc->sglen == 1) {
+ memdesc->gpuaddr = sg_dma_address(memdesc->sg);
+ if (!memdesc->gpuaddr)
+ memdesc->gpuaddr = sg_phys(memdesc->sg);
+ if (!memdesc->gpuaddr) {
+ KGSL_CORE_ERR("Unable to get a valid physical "
+ "address for memdesc\n");
+ return -EINVAL;
+ }
+ return 0;
+ } else {
+ KGSL_CORE_ERR("Memory is not contigious "
+ "(sglen = %d)\n", memdesc->sglen);
+ return -EINVAL;
+ }
+ }
+
+ size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
+
+
+ pool = _get_pool(pagetable, memdesc->priv);
+
+ memdesc->gpuaddr = gen_pool_alloc(pool, size);
+ if (memdesc->gpuaddr == 0) {
+ KGSL_CORE_ERR("gen_pool_alloc(%d) failed from pool: %s\n",
+ size,
+ (pool == pagetable->kgsl_pool) ?
+ "kgsl_pool" : "general_pool");
+ KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
+ pagetable->name, pagetable->stats.mapped,
+ pagetable->stats.entries);
+ return -ENOMEM;
+ }
+
+ if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
+ spin_lock(&pagetable->lock);
+ ret = pagetable->pt_ops->mmu_map(pagetable->priv, memdesc, protflags,
+ &pagetable->tlb_flags);
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
+ spin_lock(&pagetable->lock);
+
+ if (ret)
+ goto err_free_gpuaddr;
+
+
+
+ KGSL_STATS_ADD(1, pagetable->stats.entries,
+ pagetable->stats.max_entries);
+
+ KGSL_STATS_ADD(size, pagetable->stats.mapped,
+ pagetable->stats.max_mapped);
+
+ spin_unlock(&pagetable->lock);
+
+ return 0;
+
+err_free_gpuaddr:
+ spin_unlock(&pagetable->lock);
+ gen_pool_free(pool, memdesc->gpuaddr, size);
+ memdesc->gpuaddr = 0;
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_mmu_map);
+
+int
+kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc)
+{
+ struct gen_pool *pool;
+ int size;
+
+ if (memdesc->size == 0 || memdesc->gpuaddr == 0)
+ return 0;
+
+ if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
+ memdesc->gpuaddr = 0;
+ return 0;
+ }
+
+ size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
+
+ if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
+ spin_lock(&pagetable->lock);
+ pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc,
+ &pagetable->tlb_flags);
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
+ spin_lock(&pagetable->lock);
+
+ pagetable->stats.entries--;
+ pagetable->stats.mapped -= size;
+
+ spin_unlock(&pagetable->lock);
+
+ pool = _get_pool(pagetable, memdesc->priv);
+ gen_pool_free(pool, memdesc->gpuaddr, size);
+
+ if (!(memdesc->priv & KGSL_MEMFLAGS_GLOBAL))
+ memdesc->gpuaddr = 0;
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_mmu_unmap);
+
+int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc, unsigned int protflags)
+{
+ int result = -EINVAL;
+ unsigned int gpuaddr = 0;
+
+ if (memdesc == NULL) {
+ KGSL_CORE_ERR("invalid memdesc\n");
+ goto error;
+ }
+
+ if (!memdesc->size)
+ return 0;
+
+ gpuaddr = memdesc->gpuaddr;
+ memdesc->priv |= KGSL_MEMFLAGS_GLOBAL;
+
+ result = kgsl_mmu_map(pagetable, memdesc, protflags);
+ if (result)
+ goto error;
+
+
+ if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
+ KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
+ "gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
+ gpuaddr, memdesc->gpuaddr);
+ goto error_unmap;
+ }
+ return result;
+error_unmap:
+ kgsl_mmu_unmap(pagetable, memdesc);
+error:
+ return result;
+}
+EXPORT_SYMBOL(kgsl_mmu_map_global);
+
+int kgsl_mmu_close(struct kgsl_device *device)
+{
+ struct kgsl_mmu *mmu = &device->mmu;
+
+ kgsl_sharedmem_free(&mmu->setstate_memory);
+ if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
+ return 0;
+ else
+ return mmu->mmu_ops->mmu_close(mmu);
+}
+EXPORT_SYMBOL(kgsl_mmu_close);
+
+int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
+ enum kgsl_deviceid id)
+{
+ unsigned int result = 0;
+
+ if (pt == NULL)
+ return 0;
+
+ spin_lock(&pt->lock);
+ if (pt->tlb_flags & (1<<id)) {
+ result = KGSL_MMUFLAGS_TLBFLUSH;
+ pt->tlb_flags &= ~(1<<id);
+ }
+ spin_unlock(&pt->lock);
+ return result;
+}
+EXPORT_SYMBOL(kgsl_mmu_pt_get_flags);
+
+void kgsl_mmu_ptpool_destroy(void *ptpool)
+{
+ if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
+ kgsl_gpummu_ptpool_destroy(ptpool);
+ ptpool = 0;
+}
+EXPORT_SYMBOL(kgsl_mmu_ptpool_destroy);
+
+void *kgsl_mmu_ptpool_init(int entries)
+{
+ if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
+ return kgsl_gpummu_ptpool_init(entries);
+ else
+ return (void *)(-1);
+}
+EXPORT_SYMBOL(kgsl_mmu_ptpool_init);
+
+int kgsl_mmu_enabled(void)
+{
+ if (KGSL_MMU_TYPE_NONE != kgsl_mmu_type)
+ return 1;
+ else
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_mmu_enabled);
+
+enum kgsl_mmutype kgsl_mmu_get_mmutype(void)
+{
+ return kgsl_mmu_type;
+}
+EXPORT_SYMBOL(kgsl_mmu_get_mmutype);
+
+void kgsl_mmu_set_mmutype(char *mmutype)
+{
+
+ kgsl_mmu_type =
+ cpu_is_apq8064() ? KGSL_MMU_TYPE_NONE : KGSL_MMU_TYPE_GPU;
+
+#ifndef CONFIG_MSM_KGSL_DEFAULT_GPUMMU
+
+ if (iommu_present(&platform_bus_type))
+ kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
+#endif
+
+ if (mmutype && !strncmp(mmutype, "gpummu", 6))
+ kgsl_mmu_type = KGSL_MMU_TYPE_GPU;
+ if (iommu_present(&platform_bus_type) && mmutype &&
+ !strncmp(mmutype, "iommu", 5))
+ kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
+ if (mmutype && !strncmp(mmutype, "nommu", 5))
+ kgsl_mmu_type = KGSL_MMU_TYPE_NONE;
+}
+EXPORT_SYMBOL(kgsl_mmu_set_mmutype);
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
new file mode 100644
index 0000000..d8713d3
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -0,0 +1,322 @@
+/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_MMU_H
+#define __KGSL_MMU_H
+
+#define KGSL_IOMMU_GLOBAL_MEM_BASE 0xC0000000
+#define KGSL_IOMMU_GLOBAL_MEM_SIZE SZ_4M
+#define KGSL_IOMMU_TTBR1_SPLIT 2
+
+#define KGSL_MMU_ALIGN_SHIFT 13
+#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
+
+
+#define KGSL_MMU_GLOBAL_PT 0
+#define KGSL_MMU_PRIV_BANK_TABLE_NAME 0xFFFFFFFF
+
+struct kgsl_device;
+
+#define GSL_PT_SUPER_PTE 8
+#define GSL_PT_PAGE_WV 0x00000001
+#define GSL_PT_PAGE_RV 0x00000002
+#define GSL_PT_PAGE_DIRTY 0x00000004
+
+
+#define MH_MMU_CONFIG 0x0040
+#define MH_MMU_VA_RANGE 0x0041
+#define MH_MMU_PT_BASE 0x0042
+#define MH_MMU_PAGE_FAULT 0x0043
+#define MH_MMU_TRAN_ERROR 0x0044
+#define MH_MMU_INVALIDATE 0x0045
+#define MH_MMU_MPU_BASE 0x0046
+#define MH_MMU_MPU_END 0x0047
+
+#define MH_INTERRUPT_MASK 0x0A42
+#define MH_INTERRUPT_STATUS 0x0A43
+#define MH_INTERRUPT_CLEAR 0x0A44
+#define MH_AXI_ERROR 0x0A45
+#define MH_ARBITER_CONFIG 0x0A40
+#define MH_DEBUG_CTRL 0x0A4E
+#define MH_DEBUG_DATA 0x0A4F
+#define MH_AXI_HALT_CONTROL 0x0A50
+#define MH_CLNT_INTF_CTRL_CONFIG1 0x0A54
+#define MH_CLNT_INTF_CTRL_CONFIG2 0x0A55
+
+
+#define MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT 0x00000004
+#define MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT 0x00000006
+#define MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT 0x00000008
+#define MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT 0x0000000a
+#define MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT 0x0000000c
+#define MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT 0x0000000e
+#define MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT 0x00000010
+#define MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT 0x00000012
+#define MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT 0x00000014
+#define MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT 0x00000016
+#define MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT 0x00000018
+
+#define KGSL_MMUFLAGS_TLBFLUSH 0x10000000
+#define KGSL_MMUFLAGS_PTUPDATE 0x20000000
+
+#define MH_INTERRUPT_MASK__AXI_READ_ERROR 0x00000001L
+#define MH_INTERRUPT_MASK__AXI_WRITE_ERROR 0x00000002L
+#define MH_INTERRUPT_MASK__MMU_PAGE_FAULT 0x00000004L
+
+#define KGSL_MMU_INT_MASK \
+ (MH_INTERRUPT_MASK__AXI_READ_ERROR | \
+ MH_INTERRUPT_MASK__AXI_WRITE_ERROR | \
+ MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
+
+enum kgsl_mmutype {
+ KGSL_MMU_TYPE_GPU = 0,
+ KGSL_MMU_TYPE_IOMMU,
+ KGSL_MMU_TYPE_NONE
+};
+
+struct kgsl_pagetable {
+ spinlock_t lock;
+ struct kref refcount;
+ unsigned int max_entries;
+ struct gen_pool *pool;
+ struct gen_pool *kgsl_pool;
+ struct list_head list;
+ unsigned int name;
+ struct kobject *kobj;
+
+ struct {
+ unsigned int entries;
+ unsigned int mapped;
+ unsigned int max_mapped;
+ unsigned int max_entries;
+ } stats;
+ const struct kgsl_mmu_pt_ops *pt_ops;
+ unsigned int tlb_flags;
+ void *priv;
+};
+
+struct kgsl_mmu;
+
+struct kgsl_mmu_ops {
+ int (*mmu_init) (struct kgsl_mmu *mmu);
+ int (*mmu_close) (struct kgsl_mmu *mmu);
+ int (*mmu_start) (struct kgsl_mmu *mmu);
+ void (*mmu_stop) (struct kgsl_mmu *mmu);
+ void (*mmu_setstate) (struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pagetable,
+ unsigned int context_id);
+ void (*mmu_device_setstate) (struct kgsl_mmu *mmu,
+ uint32_t flags);
+ void (*mmu_pagefault) (struct kgsl_mmu *mmu);
+ unsigned int (*mmu_get_current_ptbase)
+ (struct kgsl_mmu *mmu);
+ void (*mmu_disable_clk_on_ts)
+ (struct kgsl_mmu *mmu, uint32_t ts, bool ts_valid);
+ int (*mmu_enable_clk)
+ (struct kgsl_mmu *mmu, int ctx_id);
+ int (*mmu_get_pt_lsb)(struct kgsl_mmu *mmu,
+ unsigned int unit_id,
+ enum kgsl_iommu_context_id ctx_id);
+ int (*mmu_get_reg_map_desc)(struct kgsl_mmu *mmu,
+ void **reg_map_desc);
+ unsigned int (*mmu_sync_lock)
+ (struct kgsl_mmu *mmu,
+ unsigned int *cmds);
+ unsigned int (*mmu_sync_unlock)
+ (struct kgsl_mmu *mmu,
+ unsigned int *cmds);
+};
+
+struct kgsl_mmu_pt_ops {
+ int (*mmu_map) (void *mmu_pt,
+ struct kgsl_memdesc *memdesc,
+ unsigned int protflags,
+ unsigned int *tlb_flags);
+ int (*mmu_unmap) (void *mmu_pt,
+ struct kgsl_memdesc *memdesc,
+ unsigned int *tlb_flags);
+ void *(*mmu_create_pagetable) (void);
+ void (*mmu_destroy_pagetable) (void *pt);
+ int (*mmu_pt_equal) (struct kgsl_pagetable *pt,
+ unsigned int pt_base);
+ unsigned int (*mmu_pt_get_base_addr)
+ (struct kgsl_pagetable *pt);
+};
+
+#define KGSL_MMU_FLAGS_IOMMU_SYNC BIT(31)
+
+struct kgsl_mmu {
+ unsigned int refcnt;
+ uint32_t flags;
+ struct kgsl_device *device;
+ unsigned int config;
+ struct kgsl_memdesc setstate_memory;
+
+ struct kgsl_pagetable *defaultpagetable;
+
+ struct kgsl_pagetable *priv_bank_table;
+ struct kgsl_pagetable *hwpagetable;
+ const struct kgsl_mmu_ops *mmu_ops;
+ void *priv;
+};
+
+#include "kgsl_gpummu.h"
+
+extern struct kgsl_mmu_ops iommu_ops;
+extern struct kgsl_mmu_pt_ops iommu_pt_ops;
+
+struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name);
+void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable);
+void kgsl_mh_start(struct kgsl_device *device);
+void kgsl_mh_intrcallback(struct kgsl_device *device);
+int kgsl_mmu_init(struct kgsl_device *device);
+int kgsl_mmu_start(struct kgsl_device *device);
+int kgsl_mmu_close(struct kgsl_device *device);
+int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc,
+ unsigned int protflags);
+int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc, unsigned int protflags);
+int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
+ struct kgsl_memdesc *memdesc);
+unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr);
+void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
+ uint32_t flags);
+int kgsl_mmu_get_ptname_from_ptbase(unsigned int pt_base);
+int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
+ enum kgsl_deviceid id);
+void kgsl_mmu_ptpool_destroy(void *ptpool);
+void *kgsl_mmu_ptpool_init(int entries);
+int kgsl_mmu_enabled(void);
+void kgsl_mmu_set_mmutype(char *mmutype);
+enum kgsl_mmutype kgsl_mmu_get_mmutype(void);
+unsigned int kgsl_mmu_get_ptsize(void);
+
+
+static inline unsigned int kgsl_mmu_get_current_ptbase(struct kgsl_mmu *mmu)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_current_ptbase)
+ return mmu->mmu_ops->mmu_get_current_ptbase(mmu);
+ else
+ return 0;
+}
+
+static inline void kgsl_mmu_setstate(struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pagetable,
+ unsigned int context_id)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_setstate)
+ mmu->mmu_ops->mmu_setstate(mmu, pagetable, context_id);
+}
+
+static inline void kgsl_mmu_device_setstate(struct kgsl_mmu *mmu,
+ uint32_t flags)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_device_setstate)
+ mmu->mmu_ops->mmu_device_setstate(mmu, flags);
+}
+
+static inline void kgsl_mmu_stop(struct kgsl_mmu *mmu)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_stop)
+ mmu->mmu_ops->mmu_stop(mmu);
+}
+
+static inline int kgsl_mmu_pt_equal(struct kgsl_pagetable *pt,
+ unsigned int pt_base)
+{
+ if (KGSL_MMU_TYPE_NONE == kgsl_mmu_get_mmutype())
+ return 1;
+ else
+ return pt->pt_ops->mmu_pt_equal(pt, pt_base);
+}
+
+static inline unsigned int kgsl_mmu_pt_get_base_addr(struct kgsl_pagetable *pt)
+{
+ if (KGSL_MMU_TYPE_NONE == kgsl_mmu_get_mmutype())
+ return 0;
+ else
+ return pt->pt_ops->mmu_pt_get_base_addr(pt);
+}
+
+static inline int kgsl_mmu_get_reg_map_desc(struct kgsl_mmu *mmu,
+ void **reg_map_desc)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_reg_map_desc)
+ return mmu->mmu_ops->mmu_get_reg_map_desc(mmu, reg_map_desc);
+ else
+ return 0;
+}
+
+static inline int kgsl_mmu_get_pt_lsb(struct kgsl_mmu *mmu,
+ unsigned int unit_id,
+ enum kgsl_iommu_context_id ctx_id)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_pt_lsb)
+ return mmu->mmu_ops->mmu_get_pt_lsb(mmu, unit_id, ctx_id);
+ else
+ return 0;
+}
+
+static inline int kgsl_mmu_enable_clk(struct kgsl_mmu *mmu,
+ int ctx_id)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_enable_clk)
+ return mmu->mmu_ops->mmu_enable_clk(mmu, ctx_id);
+ else
+ return 0;
+}
+
+static inline void kgsl_mmu_disable_clk_on_ts(struct kgsl_mmu *mmu,
+ unsigned int ts, bool ts_valid)
+{
+ if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk_on_ts)
+ mmu->mmu_ops->mmu_disable_clk_on_ts(mmu, ts, ts_valid);
+}
+
+static inline unsigned int kgsl_mmu_get_int_mask(void)
+{
+
+ if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype())
+ return KGSL_MMU_INT_MASK;
+ else
+ return (MH_INTERRUPT_MASK__AXI_READ_ERROR |
+ MH_INTERRUPT_MASK__AXI_WRITE_ERROR);
+}
+
+static inline int kgsl_mmu_gpuaddr_in_range(unsigned int gpuaddr)
+{
+ return ((gpuaddr >= KGSL_PAGETABLE_BASE) &&
+ (gpuaddr < (KGSL_PAGETABLE_BASE + kgsl_mmu_get_ptsize())));
+}
+
+static inline int kgsl_mmu_sync_lock(struct kgsl_mmu *mmu,
+ unsigned int *cmds)
+{
+ if ((mmu->flags & KGSL_MMU_FLAGS_IOMMU_SYNC) &&
+ mmu->mmu_ops && mmu->mmu_ops->mmu_sync_lock)
+ return mmu->mmu_ops->mmu_sync_lock(mmu, cmds);
+ else
+ return 0;
+}
+
+static inline int kgsl_mmu_sync_unlock(struct kgsl_mmu *mmu,
+ unsigned int *cmds)
+{
+ if ((mmu->flags & KGSL_MMU_FLAGS_IOMMU_SYNC) &&
+ mmu->mmu_ops && mmu->mmu_ops->mmu_sync_unlock)
+ return mmu->mmu_ops->mmu_sync_unlock(mmu, cmds);
+ else
+ return 0;
+}
+
+#endif
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
new file mode 100644
index 0000000..3ed1ec8
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -0,0 +1,1134 @@
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <asm/page.h>
+#include <linux/pm_runtime.h>
+#include <mach/msm_iomap.h>
+#include <mach/msm_bus.h>
+#include <linux/fb.h>
+
+#include "kgsl.h"
+#include "kgsl_pwrscale.h"
+#include "kgsl_device.h"
+#include "kgsl_trace.h"
+
+#define KGSL_PWRFLAGS_POWER_ON 0
+#define KGSL_PWRFLAGS_CLK_ON 1
+#define KGSL_PWRFLAGS_AXI_ON 2
+#define KGSL_PWRFLAGS_IRQ_ON 3
+
+#define GPU_SWFI_LATENCY 3
+#define UPDATE_BUSY_VAL 1000000
+#define UPDATE_BUSY 50
+
+struct clk_pair {
+ const char *name;
+ uint map;
+};
+
+struct gpufreq_stats {
+ unsigned long long last_time;
+ unsigned int last_index;
+ unsigned int cur_index;
+};
+
+static spinlock_t gpufreq_stats_lock;
+static unsigned long long gputime_in_state[KGSL_MAX_PWRLEVELS] = {0};
+struct gpufreq_stats gpufreq_stat;
+
+struct clk_pair clks[KGSL_MAX_CLKS] = {
+ {
+ .name = "src_clk",
+ .map = KGSL_CLK_SRC,
+ },
+ {
+ .name = "core_clk",
+ .map = KGSL_CLK_CORE,
+ },
+ {
+ .name = "iface_clk",
+ .map = KGSL_CLK_IFACE,
+ },
+ {
+ .name = "mem_clk",
+ .map = KGSL_CLK_MEM,
+ },
+ {
+ .name = "mem_iface_clk",
+ .map = KGSL_CLK_MEM_IFACE,
+ },
+};
+
+static int gpufreq_stats_update(unsigned int update_time_only, unsigned int last_index, unsigned int cur_index)
+{
+ unsigned long long cur_time;
+ spin_lock(&gpufreq_stats_lock);
+ cur_time = get_jiffies_64();
+ if (update_time_only)
+ goto done;
+
+ if (last_index < KGSL_MAX_PWRLEVELS)
+ gputime_in_state[last_index] = gputime_in_state[last_index] + cur_time - gpufreq_stat.last_time;
+
+done:
+
+ gpufreq_stat.cur_index = cur_index;
+ gpufreq_stat.last_index = last_index;
+ gpufreq_stat.last_time = cur_time;
+
+ spin_unlock(&gpufreq_stats_lock);
+ return 0;
+}
+
+void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
+ unsigned int new_level)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ if (new_level < (pwr->num_pwrlevels - 1) &&
+ new_level >= pwr->thermal_pwrlevel &&
+ new_level != pwr->active_pwrlevel) {
+ struct kgsl_pwrlevel *pwrlevel = &pwr->pwrlevels[new_level];
+ int diff = new_level - pwr->active_pwrlevel;
+ int d = (diff > 0) ? 1 : -1;
+ int level = pwr->active_pwrlevel;
+ pwr->active_pwrlevel = new_level;
+ if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) ||
+ (device->state == KGSL_STATE_NAP)) {
+ if (pwr->idle_needed == true)
+ device->ftbl->idle(device);
+ while (level != new_level) {
+ level += d;
+ clk_set_rate(pwr->grp_clks[0],
+ pwr->pwrlevels[level].gpu_freq);
+ }
+ }
+ if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
+ if (pwr->pcl) {
+ msm_bus_scale_client_update_request(pwr->pcl,
+ pwrlevel->bus_freq);
+ } else if (pwr->ebi1_clk)
+ clk_set_rate(pwr->ebi1_clk, pwrlevel->bus_freq);
+ }
+ trace_kgsl_pwrlevel(device, pwr->active_pwrlevel,
+ pwrlevel->gpu_freq);
+ }
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
+
+static int __gpuclk_store(int max, struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{ int ret, i, delta = 5000000;
+ unsigned long val;
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+
+ if (device == NULL)
+ return 0;
+ pwr = &device->pwrctrl;
+
+ ret = sscanf(buf, "%ld", &val);
+ if (ret != 1)
+ return count;
+
+ mutex_lock(&device->mutex);
+ for (i = 0; i < pwr->num_pwrlevels; i++) {
+ if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
+ if (max)
+ pwr->thermal_pwrlevel = i;
+ break;
+ }
+ }
+
+ if (i == pwr->num_pwrlevels)
+ goto done;
+
+
+ if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
+ pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
+ kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
+ else if (!max)
+ kgsl_pwrctrl_pwrlevel_change(device, i);
+
+done:
+ mutex_unlock(&device->mutex);
+ return count;
+}
+
+static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return __gpuclk_store(1, dev, attr, buf, count);
+}
+
+static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ if (device == NULL)
+ return 0;
+ pwr = &device->pwrctrl;
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
+}
+
+static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return __gpuclk_store(0, dev, attr, buf, count);
+}
+
+static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ if (device == NULL)
+ return 0;
+ pwr = &device->pwrctrl;
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
+}
+
+static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ char temp[20];
+ unsigned long val;
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ int rc;
+
+ if (device == NULL)
+ return 0;
+ pwr = &device->pwrctrl;
+
+ snprintf(temp, sizeof(temp), "%.*s",
+ (int)min(count, sizeof(temp) - 1), buf);
+ rc = strict_strtoul(temp, 0, &val);
+ if (rc)
+ return rc;
+
+ mutex_lock(&device->mutex);
+
+ if (val == 1)
+ pwr->nap_allowed = true;
+ else if (val == 0)
+ pwr->nap_allowed = false;
+
+ mutex_unlock(&device->mutex);
+
+ return count;
+}
+
+static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ if (device == NULL)
+ return 0;
+ return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
+}
+
+
+static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ char temp[20];
+ unsigned long val;
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ const long div = 1000/HZ;
+ static unsigned int org_interval_timeout = 1;
+ int rc;
+
+ if (device == NULL)
+ return 0;
+ pwr = &device->pwrctrl;
+
+ snprintf(temp, sizeof(temp), "%.*s",
+ (int)min(count, sizeof(temp) - 1), buf);
+ rc = strict_strtoul(temp, 0, &val);
+ if (rc)
+ return rc;
+
+ if (org_interval_timeout == 1)
+ org_interval_timeout = pwr->interval_timeout;
+
+ mutex_lock(&device->mutex);
+
+
+ val /= div;
+ if (val >= org_interval_timeout)
+ pwr->interval_timeout = val;
+
+ mutex_unlock(&device->mutex);
+
+ return count;
+}
+
+static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ if (device == NULL)
+ return 0;
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ device->pwrctrl.interval_timeout);
+}
+
+static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_busy *b = &device->pwrctrl.busy;
+ ret = snprintf(buf, 17, "%7d %7d\n",
+ b->on_time_old, b->time_old);
+ if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
+ b->on_time_old = 0;
+ b->time_old = 0;
+ }
+ return ret;
+}
+
+static int kgsl_pwrctrl_gpu_available_frequencies_show(
+ struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ int index, num_chars = 0;
+
+ if (device == NULL)
+ return 0;
+ pwr = &device->pwrctrl;
+ for (index = 0; index < pwr->num_pwrlevels - 1; index++)
+ num_chars += snprintf(buf + num_chars, PAGE_SIZE, "%d ",
+ pwr->pwrlevels[index].gpu_freq);
+ buf[num_chars++] = '\n';
+ return num_chars;
+}
+
+
+static int kgsl_pwrctrl_gpubusy_time_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ s64 system_time, busy_time;
+
+ if(device == NULL)
+ return 0;
+
+ system_time = device->gputime.total;
+ do_div(system_time, 1000);
+ busy_time = device->gputime.busy;
+ do_div(busy_time, 1000);
+ ret = snprintf(buf, 63, "%lld %lld\n", system_time, busy_time);
+
+ return ret;
+}
+
+static int kgsl_pwrctrl_gpubusy_time_in_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i;
+ char* tmp = buf;
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct platform_device *pdev = NULL;
+ struct kgsl_device_platform_data *pdata = NULL;
+ s64 system_time, busy_time;
+
+ if (device == NULL)
+ return 0;
+
+ pdev = container_of(device->parentdev, struct platform_device, dev);
+ if (pdev == NULL)
+ return 0;
+
+ pdata = pdev->dev.platform_data;
+ if (pdata == NULL)
+ return 0;
+
+ for(i=0;i<pdata->num_levels;i++) {
+ system_time = device->gputime_in_state[i].total;
+ do_div(system_time, 1000);
+ busy_time = device->gputime_in_state[i].busy;
+ do_div(busy_time, 1000);
+ tmp = (char*)( (int)tmp + snprintf(tmp, PAGE_SIZE - (int)(tmp-buf), "%d %lld %lld\n", pdata->pwrlevel[i].gpu_freq, system_time, busy_time));
+ }
+ return (ssize_t)(tmp - buf);
+}
+
+static int kgsl_pwrctrl_gputime_in_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t len = 0;
+ int i;
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+
+ if (device == NULL)
+ return 0;
+
+ if (test_bit(KGSL_PWRFLAGS_CLK_ON, &device->pwrctrl.power_flags) || (device->state == KGSL_STATE_NAP))
+ gpufreq_stats_update(0, device->pwrctrl.active_pwrlevel, device->pwrctrl.active_pwrlevel);
+
+ for (i = 0; i < device->pwrctrl.num_pwrlevels; i++) {
+ len += sprintf(buf + len, "%u %llu\n", device->pwrctrl.pwrlevels[i].gpu_freq,
+ (unsigned long long)jiffies_to_clock_t(gputime_in_state[i]));
+ }
+
+ return len;
+}
+
+static int kgsl_pwrctrl_init_pwrlevel_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ char temp[20];
+ unsigned long val;
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ int rc;
+
+ if (device == NULL)
+ return 0;
+ pwr = &device->pwrctrl;
+
+ snprintf(temp, sizeof(temp), "%.*s",
+ (int)min(count, sizeof(temp) - 1), buf);
+ rc = strict_strtoul(temp, 0, &val);
+ if (rc)
+ return rc;
+
+ mutex_lock(&device->mutex);
+
+ if (val >=0 && val < pwr->num_pwrlevels - 1)
+ pwr->default_pwrlevel = val;
+
+ mutex_unlock(&device->mutex);
+
+ return count;
+}
+
+static int kgsl_pwrctrl_init_pwrlevel_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ if (device == NULL)
+ return 0;
+ return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.default_pwrlevel);
+}
+
+DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
+DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show, kgsl_pwrctrl_max_gpuclk_store);
+DEVICE_ATTR(pwrnap, 0664, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
+DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show, kgsl_pwrctrl_idle_timer_store);
+DEVICE_ATTR(gputime_in_state, 0444, kgsl_pwrctrl_gputime_in_state_show, NULL);
+DEVICE_ATTR(gpubusy, 0644, kgsl_pwrctrl_gpubusy_show, NULL);
+DEVICE_ATTR(gpu_available_frequencies, 0444,kgsl_pwrctrl_gpu_available_frequencies_show,NULL);
+DEVICE_ATTR(gpubusy_time, 0644, kgsl_pwrctrl_gpubusy_time_show, NULL);
+DEVICE_ATTR(gpubusy_time_in_state, 0644, kgsl_pwrctrl_gpubusy_time_in_state_show, NULL);
+DEVICE_ATTR(init_pwrlevel, 0644, kgsl_pwrctrl_init_pwrlevel_show, kgsl_pwrctrl_init_pwrlevel_store);
+
+static const struct device_attribute *pwrctrl_attr_list[] = {
+ &dev_attr_gpuclk,
+ &dev_attr_max_gpuclk,
+ &dev_attr_pwrnap,
+ &dev_attr_idle_timer,
+ &dev_attr_gpubusy,
+ &dev_attr_gpu_available_frequencies,
+ &dev_attr_gpubusy_time,
+ &dev_attr_gpubusy_time_in_state,
+ &dev_attr_gputime_in_state,
+ &dev_attr_init_pwrlevel,
+ NULL
+};
+
+int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
+{
+ return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
+}
+
+void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
+{
+ kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
+}
+
+static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
+{
+ struct kgsl_busy *b = &device->pwrctrl.busy;
+ int elapsed;
+ if (b->start.tv_sec == 0)
+ do_gettimeofday(&(b->start));
+ do_gettimeofday(&(b->stop));
+ elapsed = (b->stop.tv_sec - b->start.tv_sec) * 1000000;
+ elapsed += b->stop.tv_usec - b->start.tv_usec;
+ b->time += elapsed;
+ if (on_time)
+ b->on_time += elapsed;
+
+ if ((b->time > UPDATE_BUSY_VAL) ||
+ !test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
+ b->on_time_old = b->on_time;
+ b->time_old = b->time;
+ b->on_time = 0;
+ b->time = 0;
+ }
+ do_gettimeofday(&(b->start));
+}
+
+void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
+ int requested_state)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ int i = 0;
+ if (state == KGSL_PWRFLAGS_OFF) {
+ if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
+ &pwr->power_flags)) {
+ trace_kgsl_clk(device, state);
+#ifdef CONFIG_MSM_KGSL_GPU_USAGE_SYSTRACE
+ if(device->id == 0) {
+ trace_kgsl_usage(device, state, task_tgid_nr(current), device->gputime.total, device->gputime.busy,
+ pwr->active_pwrlevel, pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
+ device->prev_pid= -1;
+ }
+#endif
+ for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
+ if (pwr->grp_clks[i]) {
+ clk_disable(pwr->grp_clks[i]);
+ if (i == 0)
+ gpufreq_stats_update(0, pwr->active_pwrlevel, (pwr->num_pwrlevels - 1));
+ }
+
+ if ((pwr->pwrlevels[0].gpu_freq > 0) &&
+ (requested_state != KGSL_STATE_NAP)) {
+ clk_set_rate(pwr->grp_clks[0],
+ pwr->pwrlevels[pwr->num_pwrlevels - 1].
+ gpu_freq);
+ for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
+ if (pwr->grp_clks[i])
+ clk_unprepare(pwr->grp_clks[i]);
+ }
+ kgsl_pwrctrl_busy_time(device, true);
+ }
+ } else if (state == KGSL_PWRFLAGS_ON) {
+ if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
+ &pwr->power_flags)) {
+
+ trace_kgsl_clk(device, state);
+#ifdef CONFIG_MSM_KGSL_GPU_USAGE_SYSTRACE
+ if(device->id == 0) {
+ trace_kgsl_usage(device, state, task_tgid_nr(current), device->gputime.total, device->gputime.busy,
+ pwr->active_pwrlevel, pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
+ device->prev_pid = task_tgid_nr(current);
+ }
+#endif
+
+ if (device->state != KGSL_STATE_NAP) {
+ for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
+ if (pwr->grp_clks[i])
+ clk_prepare(pwr->grp_clks[i]);
+
+ if (pwr->pwrlevels[0].gpu_freq > 0)
+ clk_set_rate(pwr->grp_clks[0],
+ pwr->pwrlevels
+ [pwr->active_pwrlevel].
+ gpu_freq);
+ }
+ for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
+ if (pwr->grp_clks[i]) {
+ clk_enable(pwr->grp_clks[i]);
+ if (i == 0)
+ gpufreq_stats_update(1, KGSL_MAX_PWRLEVELS, pwr->active_pwrlevel);
+ }
+ kgsl_pwrctrl_busy_time(device, false);
+ }
+ }
+}
+
+void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+ if (state == KGSL_PWRFLAGS_OFF) {
+ if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
+ &pwr->power_flags)) {
+ trace_kgsl_bus(device, state);
+ if (pwr->ebi1_clk) {
+ clk_set_rate(pwr->ebi1_clk, 0);
+ clk_disable_unprepare(pwr->ebi1_clk);
+ }
+ if (pwr->pcl)
+ msm_bus_scale_client_update_request(pwr->pcl,
+ 0);
+ }
+ } else if (state == KGSL_PWRFLAGS_ON) {
+ if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
+ &pwr->power_flags)) {
+ trace_kgsl_bus(device, state);
+ if (pwr->ebi1_clk) {
+ clk_prepare_enable(pwr->ebi1_clk);
+ clk_set_rate(pwr->ebi1_clk,
+ pwr->pwrlevels[pwr->active_pwrlevel].
+ bus_freq);
+ }
+ if (pwr->pcl)
+ msm_bus_scale_client_update_request(pwr->pcl,
+ pwr->pwrlevels[pwr->active_pwrlevel].
+ bus_freq);
+ }
+ }
+}
+
+void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+ if (state == KGSL_PWRFLAGS_OFF) {
+ if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
+ &pwr->power_flags)) {
+ trace_kgsl_rail(device, state);
+ if (pwr->gpu_dig)
+ regulator_disable(pwr->gpu_dig);
+ if (pwr->gpu_reg)
+ regulator_disable(pwr->gpu_reg);
+ }
+ } else if (state == KGSL_PWRFLAGS_ON) {
+ if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
+ &pwr->power_flags)) {
+ trace_kgsl_rail(device, state);
+ if (pwr->gpu_reg) {
+ int status = regulator_enable(pwr->gpu_reg);
+ if (status)
+ KGSL_DRV_ERR(device,
+ "core regulator_enable "
+ "failed: %d\n",
+ status);
+ }
+ if (pwr->gpu_dig) {
+ int status = regulator_enable(pwr->gpu_dig);
+ if (status)
+ KGSL_DRV_ERR(device,
+ "cx regulator_enable "
+ "failed: %d\n",
+ status);
+ }
+ }
+ }
+}
+
+void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+ if (state == KGSL_PWRFLAGS_ON) {
+ if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
+ &pwr->power_flags)) {
+ trace_kgsl_irq(device, state);
+ enable_irq(pwr->interrupt_num);
+ }
+ } else if (state == KGSL_PWRFLAGS_OFF) {
+ if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
+ &pwr->power_flags)) {
+ trace_kgsl_irq(device, state);
+ if (in_interrupt())
+ disable_irq_nosync(pwr->interrupt_num);
+ else
+ disable_irq(pwr->interrupt_num);
+ }
+ }
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_irq);
+
+int kgsl_pwrctrl_init(struct kgsl_device *device)
+{
+ int i, result = 0;
+ struct clk *clk;
+ struct platform_device *pdev =
+ container_of(device->parentdev, struct platform_device, dev);
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
+
+ spin_lock_init(&gpufreq_stats_lock);
+
+
+ for (i = 0; i < KGSL_MAX_CLKS; i++) {
+ if (pdata->clk_map & clks[i].map) {
+ clk = clk_get(&pdev->dev, clks[i].name);
+ if (IS_ERR(clk))
+ goto clk_err;
+ pwr->grp_clks[i] = clk;
+ }
+ }
+
+ if (pwr->grp_clks[0] == NULL)
+ pwr->grp_clks[0] = pwr->grp_clks[1];
+
+
+ if (pdata->set_grp_async != NULL)
+ pdata->set_grp_async();
+
+ if (pdata->num_levels > KGSL_MAX_PWRLEVELS) {
+ KGSL_PWR_ERR(device, "invalid power level count: %d\n",
+ pdata->num_levels);
+ result = -EINVAL;
+ goto done;
+ }
+ pwr->num_pwrlevels = pdata->num_levels;
+ pwr->active_pwrlevel = pdata->init_level;
+ pwr->default_pwrlevel = pdata->init_level;
+ for (i = 0; i < pdata->num_levels; i++) {
+ pwr->pwrlevels[i].gpu_freq =
+ (pdata->pwrlevel[i].gpu_freq > 0) ?
+ clk_round_rate(pwr->grp_clks[0],
+ pdata->pwrlevel[i].
+ gpu_freq) : 0;
+ pwr->pwrlevels[i].bus_freq =
+ pdata->pwrlevel[i].bus_freq;
+ pwr->pwrlevels[i].io_fraction =
+ pdata->pwrlevel[i].io_fraction;
+ }
+
+ if (pwr->pwrlevels[0].gpu_freq > 0)
+ clk_set_rate(pwr->grp_clks[0], pwr->
+ pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
+
+ pwr->gpu_reg = regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(pwr->gpu_reg))
+ pwr->gpu_reg = NULL;
+
+ if (pwr->gpu_reg) {
+ pwr->gpu_dig = regulator_get(&pdev->dev, "vdd_dig");
+ if (IS_ERR(pwr->gpu_dig))
+ pwr->gpu_dig = NULL;
+ } else
+ pwr->gpu_dig = NULL;
+
+ pwr->power_flags = 0;
+
+ pwr->nap_allowed = pdata->nap_allowed;
+ pwr->idle_needed = pdata->idle_needed;
+ pwr->interval_timeout = pdata->idle_timeout;
+ pwr->strtstp_sleepwake = pdata->strtstp_sleepwake;
+ pwr->ebi1_clk = clk_get(&pdev->dev, "bus_clk");
+ if (IS_ERR(pwr->ebi1_clk))
+ pwr->ebi1_clk = NULL;
+ else
+ clk_set_rate(pwr->ebi1_clk,
+ pwr->pwrlevels[pwr->active_pwrlevel].
+ bus_freq);
+ if (pdata->bus_scale_table != NULL) {
+ pwr->pcl = msm_bus_scale_register_client(pdata->
+ bus_scale_table);
+ if (!pwr->pcl) {
+ KGSL_PWR_ERR(device,
+ "msm_bus_scale_register_client failed: "
+ "id %d table %p", device->id,
+ pdata->bus_scale_table);
+ result = -EINVAL;
+ goto done;
+ }
+ }
+
+
+ pm_runtime_enable(device->parentdev);
+ register_early_suspend(&device->display_off);
+
+ gpufreq_stats_update(1, pwr->active_pwrlevel, KGSL_MAX_PWRLEVELS);
+
+ return result;
+
+clk_err:
+ result = PTR_ERR(clk);
+ KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
+ clks[i].name, result);
+
+done:
+ return result;
+}
+
+void kgsl_pwrctrl_close(struct kgsl_device *device)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ int i;
+
+ KGSL_PWR_INFO(device, "close device %d\n", device->id);
+
+ pm_runtime_disable(device->parentdev);
+ unregister_early_suspend(&device->display_off);
+
+ clk_put(pwr->ebi1_clk);
+
+ if (pwr->pcl)
+ msm_bus_scale_unregister_client(pwr->pcl);
+
+ pwr->pcl = 0;
+
+ if (pwr->gpu_reg) {
+ regulator_put(pwr->gpu_reg);
+ pwr->gpu_reg = NULL;
+ }
+
+ if (pwr->gpu_dig) {
+ regulator_put(pwr->gpu_dig);
+ pwr->gpu_dig = NULL;
+ }
+
+ for (i = 1; i < KGSL_MAX_CLKS; i++)
+ if (pwr->grp_clks[i]) {
+ clk_put(pwr->grp_clks[i]);
+ pwr->grp_clks[i] = NULL;
+ }
+
+ pwr->grp_clks[0] = NULL;
+ pwr->power_flags = 0;
+}
+
+void kgsl_idle_check(struct work_struct *work)
+{
+ struct kgsl_device *device = container_of(work, struct kgsl_device,
+ idle_check_ws);
+ WARN_ON(device == NULL);
+ if (device == NULL)
+ return;
+
+ mutex_lock(&device->mutex);
+ if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
+ kgsl_pwrscale_idle(device);
+
+ if (kgsl_pwrctrl_sleep(device) != 0) {
+ mod_timer(&device->idle_timer,
+ jiffies +
+ device->pwrctrl.interval_timeout);
+ device->pwrctrl.busy.no_nap_cnt++;
+ if (device->pwrctrl.busy.no_nap_cnt > UPDATE_BUSY) {
+ kgsl_pwrctrl_busy_time(device, true);
+ device->pwrctrl.busy.no_nap_cnt = 0;
+ }
+ }
+ } else if (device->state & (KGSL_STATE_HUNG |
+ KGSL_STATE_DUMP_AND_RECOVER)) {
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ }
+
+ mutex_unlock(&device->mutex);
+}
+
+void kgsl_timer(unsigned long data)
+{
+ struct kgsl_device *device = (struct kgsl_device *) data;
+
+ KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
+ if (device->requested_state != KGSL_STATE_SUSPEND) {
+ if (device->pwrctrl.restore_slumber ||
+ device->pwrctrl.strtstp_sleepwake)
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_SLUMBER);
+ else
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_SLEEP);
+
+ queue_work(device->work_queue, &device->idle_check_ws);
+ }
+}
+
+void kgsl_pre_hwaccess(struct kgsl_device *device)
+{
+ BUG_ON(!mutex_is_locked(&device->mutex));
+ switch (device->state) {
+ case KGSL_STATE_ACTIVE:
+ return;
+ case KGSL_STATE_NAP:
+ case KGSL_STATE_SLEEP:
+ case KGSL_STATE_SLUMBER:
+ kgsl_pwrctrl_wake(device);
+ break;
+ case KGSL_STATE_SUSPEND:
+ kgsl_check_suspended(device);
+ break;
+ case KGSL_STATE_INIT:
+ case KGSL_STATE_HUNG:
+ case KGSL_STATE_DUMP_AND_RECOVER:
+ if (test_bit(KGSL_PWRFLAGS_CLK_ON,
+ &device->pwrctrl.power_flags))
+ break;
+ else
+ KGSL_PWR_ERR(device,
+ "hw access while clocks off from state %d\n",
+ device->state);
+ break;
+ default:
+ KGSL_PWR_ERR(device, "hw access while in unknown state %d\n",
+ device->state);
+ break;
+ }
+}
+EXPORT_SYMBOL(kgsl_pre_hwaccess);
+
+void kgsl_check_suspended(struct kgsl_device *device)
+{
+ if (device->requested_state == KGSL_STATE_SUSPEND ||
+ device->state == KGSL_STATE_SUSPEND) {
+ mutex_unlock(&device->mutex);
+ wait_for_completion(&device->hwaccess_gate);
+ mutex_lock(&device->mutex);
+ } else if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
+ mutex_unlock(&device->mutex);
+ wait_for_completion(&device->recovery_gate);
+ mutex_lock(&device->mutex);
+ } else if (device->state == KGSL_STATE_SLUMBER)
+ kgsl_pwrctrl_wake(device);
+}
+
+static int
+_nap(struct kgsl_device *device)
+{
+ switch (device->state) {
+ case KGSL_STATE_ACTIVE:
+ if (!device->ftbl->isidle(device)) {
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ return -EBUSY;
+ }
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+ kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_NAP);
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_NAP);
+ case KGSL_STATE_NAP:
+ case KGSL_STATE_SLEEP:
+ case KGSL_STATE_SLUMBER:
+ break;
+ default:
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ break;
+ }
+ return 0;
+}
+
+static void
+_sleep_accounting(struct kgsl_device *device)
+{
+ kgsl_pwrctrl_busy_time(device, false);
+ device->pwrctrl.busy.start.tv_sec = 0;
+ device->pwrctrl.time = 0;
+ kgsl_pwrscale_sleep(device);
+}
+
+static int
+_sleep(struct kgsl_device *device)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ switch (device->state) {
+ case KGSL_STATE_ACTIVE:
+ if (!device->ftbl->isidle(device)) {
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ return -EBUSY;
+ }
+
+ case KGSL_STATE_NAP:
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+ kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
+ if (pwr->pwrlevels[0].gpu_freq > 0)
+ clk_set_rate(pwr->grp_clks[0],
+ pwr->pwrlevels[pwr->num_pwrlevels - 1].
+ gpu_freq);
+ _sleep_accounting(device);
+ kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_SLEEP);
+ pm_qos_update_request(&device->pm_qos_req_dma,
+ PM_QOS_DEFAULT_VALUE);
+ break;
+ case KGSL_STATE_SLEEP:
+ case KGSL_STATE_SLUMBER:
+ break;
+ default:
+ KGSL_PWR_WARN(device, "unhandled state %s\n",
+ kgsl_pwrstate_to_str(device->state));
+ break;
+ }
+ return 0;
+}
+
+static int
+_slumber(struct kgsl_device *device)
+{
+ switch (device->state) {
+ case KGSL_STATE_ACTIVE:
+ if (!device->ftbl->isidle(device)) {
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ return -EBUSY;
+ }
+
+ case KGSL_STATE_NAP:
+ case KGSL_STATE_SLEEP:
+ del_timer_sync(&device->idle_timer);
+ device->ftbl->suspend_context(device);
+ device->ftbl->stop(device);
+ _sleep_accounting(device);
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_SLUMBER);
+ pm_qos_update_request(&device->pm_qos_req_dma,
+ PM_QOS_DEFAULT_VALUE);
+ break;
+ case KGSL_STATE_SLUMBER:
+ break;
+ default:
+ KGSL_PWR_WARN(device, "unhandled state %s\n",
+ kgsl_pwrstate_to_str(device->state));
+ break;
+ }
+ return 0;
+}
+
+int kgsl_pwrctrl_sleep(struct kgsl_device *device)
+{
+ int status = 0;
+ KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
+
+
+ switch (device->requested_state) {
+ case KGSL_STATE_NAP:
+ status = _nap(device);
+ break;
+ case KGSL_STATE_SLEEP:
+ status = _sleep(device);
+ break;
+ case KGSL_STATE_SLUMBER:
+ status = _slumber(device);
+ break;
+ default:
+ KGSL_PWR_INFO(device, "bad state request 0x%x\n",
+ device->requested_state);
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ status = -EINVAL;
+ break;
+ }
+ return status;
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
+
+void kgsl_pwrctrl_wake(struct kgsl_device *device)
+{
+ int status;
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_ACTIVE);
+ switch (device->state) {
+ case KGSL_STATE_SLUMBER:
+ status = device->ftbl->start(device, 0);
+ if (status) {
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ KGSL_DRV_ERR(device, "start failed %d\n", status);
+ break;
+ }
+
+ case KGSL_STATE_SLEEP:
+ kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
+ kgsl_pwrscale_wake(device);
+
+ case KGSL_STATE_NAP:
+
+ kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
+
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
+
+ mod_timer(&device->idle_timer,
+ jiffies + device->pwrctrl.interval_timeout);
+ pm_qos_update_request(&device->pm_qos_req_dma,
+ GPU_SWFI_LATENCY);
+ case KGSL_STATE_ACTIVE:
+ break;
+ default:
+ KGSL_PWR_WARN(device, "unhandled state %s\n",
+ kgsl_pwrstate_to_str(device->state));
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NONE);
+ break;
+ }
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_wake);
+
+void kgsl_pwrctrl_enable(struct kgsl_device *device)
+{
+
+ kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
+ kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE);
+ kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_enable);
+
+void kgsl_pwrctrl_disable(struct kgsl_device *device)
+{
+
+ kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
+ kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF, KGSL_STATE_SLEEP);
+ kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_disable);
+
+void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state)
+{
+ trace_kgsl_pwr_set_state(device, state);
+ device->state = state;
+ device->requested_state = KGSL_STATE_NONE;
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_set_state);
+
+void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state)
+{
+ if (state != KGSL_STATE_NONE && state != device->requested_state)
+ trace_kgsl_pwr_request_state(device, state);
+ device->requested_state = state;
+}
+EXPORT_SYMBOL(kgsl_pwrctrl_request_state);
+
+const char *kgsl_pwrstate_to_str(unsigned int state)
+{
+ switch (state) {
+ case KGSL_STATE_NONE:
+ return "NONE";
+ case KGSL_STATE_INIT:
+ return "INIT";
+ case KGSL_STATE_ACTIVE:
+ return "ACTIVE";
+ case KGSL_STATE_NAP:
+ return "NAP";
+ case KGSL_STATE_SLEEP:
+ return "SLEEP";
+ case KGSL_STATE_SUSPEND:
+ return "SUSPEND";
+ case KGSL_STATE_HUNG:
+ return "HUNG";
+ case KGSL_STATE_DUMP_AND_RECOVER:
+ return "DNR";
+ case KGSL_STATE_SLUMBER:
+ return "SLUMBER";
+ default:
+ break;
+ }
+ return "UNKNOWN";
+}
+EXPORT_SYMBOL(kgsl_pwrstate_to_str);
+
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
new file mode 100644
index 0000000..591582f
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -0,0 +1,82 @@
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_PWRCTRL_H
+#define __KGSL_PWRCTRL_H
+
+#define KGSL_PWRFLAGS_ON 1
+#define KGSL_PWRFLAGS_OFF 0
+
+#define KGSL_PWRLEVEL_TURBO 0
+#define KGSL_PWRLEVEL_NOMINAL 1
+#define KGSL_PWRLEVEL_LAST_OFFSET 2
+
+#define KGSL_MAX_CLKS 5
+
+struct platform_device;
+
+struct kgsl_busy {
+ struct timeval start;
+ struct timeval stop;
+ int on_time;
+ int time;
+ int on_time_old;
+ int time_old;
+ unsigned int no_nap_cnt;
+};
+
+struct kgsl_pwrctrl {
+ int interrupt_num;
+ struct clk *ebi1_clk;
+ struct clk *grp_clks[KGSL_MAX_CLKS];
+ unsigned long power_flags;
+ struct kgsl_pwrlevel pwrlevels[KGSL_MAX_PWRLEVELS];
+ unsigned int active_pwrlevel;
+ int thermal_pwrlevel;
+ unsigned int default_pwrlevel;
+ unsigned int num_pwrlevels;
+ unsigned int interval_timeout;
+ bool strtstp_sleepwake;
+ struct regulator *gpu_reg;
+ struct regulator *gpu_dig;
+ uint32_t pcl;
+ unsigned int nap_allowed;
+ unsigned int idle_needed;
+ const char *irq_name;
+ s64 time;
+ struct kgsl_busy busy;
+ unsigned int restore_slumber;
+};
+
+void kgsl_pwrctrl_irq(struct kgsl_device *device, int state);
+int kgsl_pwrctrl_init(struct kgsl_device *device);
+void kgsl_pwrctrl_close(struct kgsl_device *device);
+void kgsl_timer(unsigned long data);
+void kgsl_idle_check(struct work_struct *work);
+void kgsl_pre_hwaccess(struct kgsl_device *device);
+void kgsl_check_suspended(struct kgsl_device *device);
+int kgsl_pwrctrl_sleep(struct kgsl_device *device);
+void kgsl_pwrctrl_wake(struct kgsl_device *device);
+void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
+ unsigned int level);
+int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device);
+void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device);
+void kgsl_pwrctrl_enable(struct kgsl_device *device);
+void kgsl_pwrctrl_disable(struct kgsl_device *device);
+static inline unsigned long kgsl_get_clkrate(struct clk *clk)
+{
+ return (clk != NULL) ? clk_get_rate(clk) : 0;
+}
+
+void kgsl_pwrctrl_set_state(struct kgsl_device *device, unsigned int state);
+void kgsl_pwrctrl_request_state(struct kgsl_device *device, unsigned int state);
+#endif
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
new file mode 100644
index 0000000..12e1885
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -0,0 +1,371 @@
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+
+#include <asm/page.h>
+
+#include "kgsl.h"
+#include "kgsl_pwrscale.h"
+#include "kgsl_device.h"
+
+struct kgsl_pwrscale_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kgsl_device *device, char *buf);
+ ssize_t (*store)(struct kgsl_device *device, const char *buf,
+ size_t count);
+};
+
+#define to_pwrscale(k) container_of(k, struct kgsl_pwrscale, kobj)
+#define pwrscale_to_device(p) container_of(p, struct kgsl_device, pwrscale)
+#define to_device(k) container_of(k, struct kgsl_device, pwrscale_kobj)
+#define to_pwrscale_attr(a) \
+container_of(a, struct kgsl_pwrscale_attribute, attr)
+#define to_policy_attr(a) \
+container_of(a, struct kgsl_pwrscale_policy_attribute, attr)
+
+#define PWRSCALE_ATTR(_name, _mode, _show, _store) \
+struct kgsl_pwrscale_attribute pwrscale_attr_##_name = \
+__ATTR(_name, _mode, _show, _store)
+
+
+static struct kgsl_pwrscale_policy *kgsl_pwrscale_policies[] = {
+#ifdef CONFIG_MSM_SCM
+ &kgsl_pwrscale_policy_tz,
+#endif
+#ifdef CONFIG_MSM_SLEEP_STATS_DEVICE
+ &kgsl_pwrscale_policy_idlestats,
+#endif
+#ifdef CONFIG_MSM_DCVS
+ &kgsl_pwrscale_policy_msm,
+#endif
+ NULL
+};
+
+static ssize_t pwrscale_policy_store(struct kgsl_device *device,
+ const char *buf, size_t count)
+{
+ int i;
+ struct kgsl_pwrscale_policy *policy = NULL;
+
+ if (!strncmp("none", buf, 4)) {
+ kgsl_pwrscale_detach_policy(device);
+ return count;
+ }
+
+ for (i = 0; kgsl_pwrscale_policies[i]; i++) {
+ if (!strncmp(kgsl_pwrscale_policies[i]->name, buf,
+ strnlen(kgsl_pwrscale_policies[i]->name,
+ PAGE_SIZE))) {
+ policy = kgsl_pwrscale_policies[i];
+ break;
+ }
+ }
+
+ if (policy)
+ if (kgsl_pwrscale_attach_policy(device, policy))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t pwrscale_policy_show(struct kgsl_device *device, char *buf)
+{
+ int ret;
+
+ if (device->pwrscale.policy) {
+ ret = snprintf(buf, PAGE_SIZE, "%s",
+ device->pwrscale.policy->name);
+ if (device->pwrscale.enabled == 0)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ " (disabled)");
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ } else
+ ret = snprintf(buf, PAGE_SIZE, "none\n");
+
+ return ret;
+}
+
+PWRSCALE_ATTR(policy, 0664, pwrscale_policy_show, pwrscale_policy_store);
+
+static ssize_t pwrscale_avail_policies_show(struct kgsl_device *device,
+ char *buf)
+{
+ int i, ret = 0;
+
+ for (i = 0; kgsl_pwrscale_policies[i]; i++) {
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s ",
+ kgsl_pwrscale_policies[i]->name);
+ }
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "none\n");
+ return ret;
+}
+PWRSCALE_ATTR(avail_policies, 0444, pwrscale_avail_policies_show, NULL);
+
+static struct attribute *pwrscale_attrs[] = {
+ &pwrscale_attr_policy.attr,
+ &pwrscale_attr_avail_policies.attr,
+ NULL
+};
+
+static ssize_t policy_sysfs_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct kgsl_pwrscale *pwrscale = to_pwrscale(kobj);
+ struct kgsl_device *device = pwrscale_to_device(pwrscale);
+ struct kgsl_pwrscale_policy_attribute *pattr = to_policy_attr(attr);
+ ssize_t ret;
+
+ if (pattr->show)
+ ret = pattr->show(device, pwrscale, buf);
+ else
+ ret = -EIO;
+
+ return ret;
+}
+
+static ssize_t policy_sysfs_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kgsl_pwrscale *pwrscale = to_pwrscale(kobj);
+ struct kgsl_device *device = pwrscale_to_device(pwrscale);
+ struct kgsl_pwrscale_policy_attribute *pattr = to_policy_attr(attr);
+ ssize_t ret;
+
+ if (pattr->store)
+ ret = pattr->store(device, pwrscale, buf, count);
+ else
+ ret = -EIO;
+
+ return ret;
+}
+
+static void policy_sysfs_release(struct kobject *kobj)
+{
+}
+
+static ssize_t pwrscale_sysfs_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct kgsl_device *device = to_device(kobj);
+ struct kgsl_pwrscale_attribute *pattr = to_pwrscale_attr(attr);
+ ssize_t ret;
+
+ if (pattr->show)
+ ret = pattr->show(device, buf);
+ else
+ ret = -EIO;
+
+ return ret;
+}
+
+static ssize_t pwrscale_sysfs_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kgsl_device *device = to_device(kobj);
+ struct kgsl_pwrscale_attribute *pattr = to_pwrscale_attr(attr);
+ ssize_t ret;
+
+ if (pattr->store)
+ ret = pattr->store(device, buf, count);
+ else
+ ret = -EIO;
+
+ return ret;
+}
+
+static void pwrscale_sysfs_release(struct kobject *kobj)
+{
+}
+
+static const struct sysfs_ops policy_sysfs_ops = {
+ .show = policy_sysfs_show,
+ .store = policy_sysfs_store
+};
+
+static const struct sysfs_ops pwrscale_sysfs_ops = {
+ .show = pwrscale_sysfs_show,
+ .store = pwrscale_sysfs_store
+};
+
+static struct kobj_type ktype_pwrscale_policy = {
+ .sysfs_ops = &policy_sysfs_ops,
+ .default_attrs = NULL,
+ .release = policy_sysfs_release
+};
+
+static struct kobj_type ktype_pwrscale = {
+ .sysfs_ops = &pwrscale_sysfs_ops,
+ .default_attrs = pwrscale_attrs,
+ .release = pwrscale_sysfs_release
+};
+
+#define PWRSCALE_ACTIVE(_d) \
+ ((_d)->pwrscale.policy && (_d)->pwrscale.enabled)
+
+void kgsl_pwrscale_sleep(struct kgsl_device *device)
+{
+ if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->sleep)
+ device->pwrscale.policy->sleep(device, &device->pwrscale);
+}
+EXPORT_SYMBOL(kgsl_pwrscale_sleep);
+
+void kgsl_pwrscale_wake(struct kgsl_device *device)
+{
+ if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->wake)
+ device->pwrscale.policy->wake(device, &device->pwrscale);
+}
+EXPORT_SYMBOL(kgsl_pwrscale_wake);
+
+void kgsl_pwrscale_busy(struct kgsl_device *device)
+{
+ if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->busy)
+ if ((!device->pwrscale.gpu_busy) &&
+ (device->requested_state != KGSL_STATE_SLUMBER))
+ device->pwrscale.policy->busy(device,
+ &device->pwrscale);
+ device->pwrscale.gpu_busy = 1;
+}
+
+void kgsl_pwrscale_idle(struct kgsl_device *device)
+{
+ if (PWRSCALE_ACTIVE(device) && device->pwrscale.policy->idle)
+ if (device->requested_state != KGSL_STATE_SLUMBER &&
+ device->requested_state != KGSL_STATE_SLEEP)
+ device->pwrscale.policy->idle(device,
+ &device->pwrscale);
+ device->pwrscale.gpu_busy = 0;
+}
+EXPORT_SYMBOL(kgsl_pwrscale_idle);
+
+void kgsl_pwrscale_disable(struct kgsl_device *device)
+{
+ device->pwrscale.enabled = 0;
+}
+EXPORT_SYMBOL(kgsl_pwrscale_disable);
+
+void kgsl_pwrscale_enable(struct kgsl_device *device)
+{
+ device->pwrscale.enabled = 1;
+}
+EXPORT_SYMBOL(kgsl_pwrscale_enable);
+
+int kgsl_pwrscale_policy_add_files(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ struct attribute_group *attr_group)
+{
+ int ret;
+
+ ret = kobject_add(&pwrscale->kobj, &device->pwrscale_kobj,
+ "%s", pwrscale->policy->name);
+
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_group(&pwrscale->kobj, attr_group);
+
+ if (ret) {
+ kobject_del(&pwrscale->kobj);
+ kobject_put(&pwrscale->kobj);
+ }
+
+ return ret;
+}
+
+void kgsl_pwrscale_policy_remove_files(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ struct attribute_group *attr_group)
+{
+ sysfs_remove_group(&pwrscale->kobj, attr_group);
+ kobject_del(&pwrscale->kobj);
+ kobject_put(&pwrscale->kobj);
+}
+
+static void _kgsl_pwrscale_detach_policy(struct kgsl_device *device)
+{
+ if (device->pwrscale.policy != NULL) {
+ device->pwrscale.policy->close(device, &device->pwrscale);
+ kgsl_pwrctrl_pwrlevel_change(device,
+ device->pwrctrl.thermal_pwrlevel);
+ }
+ device->pwrscale.policy = NULL;
+}
+
+void kgsl_pwrscale_detach_policy(struct kgsl_device *device)
+{
+ mutex_lock(&device->mutex);
+ _kgsl_pwrscale_detach_policy(device);
+ mutex_unlock(&device->mutex);
+}
+EXPORT_SYMBOL(kgsl_pwrscale_detach_policy);
+
+int kgsl_pwrscale_attach_policy(struct kgsl_device *device,
+ struct kgsl_pwrscale_policy *policy)
+{
+ int ret = 0;
+
+ mutex_lock(&device->mutex);
+
+ if (device->pwrscale.policy == policy)
+ goto done;
+
+ if (device->pwrctrl.num_pwrlevels < 3) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (device->pwrscale.policy != NULL)
+ _kgsl_pwrscale_detach_policy(device);
+
+ device->pwrscale.policy = policy;
+
+
+ kgsl_pwrscale_enable(device);
+
+ if (policy) {
+ ret = device->pwrscale.policy->init(device, &device->pwrscale);
+ if (ret)
+ device->pwrscale.policy = NULL;
+ }
+
+done:
+ mutex_unlock(&device->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_pwrscale_attach_policy);
+
+int kgsl_pwrscale_init(struct kgsl_device *device)
+{
+ int ret;
+
+ ret = kobject_init_and_add(&device->pwrscale_kobj, &ktype_pwrscale,
+ &device->dev->kobj, "pwrscale");
+
+ if (ret)
+ return ret;
+
+ kobject_init(&device->pwrscale.kobj, &ktype_pwrscale_policy);
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_pwrscale_init);
+
+void kgsl_pwrscale_close(struct kgsl_device *device)
+{
+ kobject_put(&device->pwrscale_kobj);
+}
+EXPORT_SYMBOL(kgsl_pwrscale_close);
diff --git a/drivers/gpu/msm/kgsl_pwrscale.h b/drivers/gpu/msm/kgsl_pwrscale.h
new file mode 100644
index 0000000..34698cd
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_pwrscale.h
@@ -0,0 +1,82 @@
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __KGSL_PWRSCALE_H
+#define __KGSL_PWRSCALE_H
+
+struct kgsl_pwrscale;
+
+struct kgsl_pwrscale_policy {
+ const char *name;
+ int (*init)(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale);
+ void (*close)(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale);
+ void (*idle)(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale);
+ void (*busy)(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale);
+ void (*sleep)(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale);
+ void (*wake)(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale);
+};
+
+struct kgsl_pwrscale {
+ struct kgsl_pwrscale_policy *policy;
+ struct kobject kobj;
+ void *priv;
+ int gpu_busy;
+ int enabled;
+};
+
+struct kgsl_pwrscale_policy_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale, char *buf);
+ ssize_t (*store)(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale, const char *buf,
+ size_t count);
+};
+
+#define PWRSCALE_POLICY_ATTR(_name, _mode, _show, _store) \
+ struct kgsl_pwrscale_policy_attribute policy_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+extern struct kgsl_pwrscale_policy kgsl_pwrscale_policy_tz;
+extern struct kgsl_pwrscale_policy kgsl_pwrscale_policy_idlestats;
+extern struct kgsl_pwrscale_policy kgsl_pwrscale_policy_msm;
+
+int kgsl_pwrscale_init(struct kgsl_device *device);
+void kgsl_pwrscale_close(struct kgsl_device *device);
+
+int kgsl_pwrscale_attach_policy(struct kgsl_device *device,
+ struct kgsl_pwrscale_policy *policy);
+void kgsl_pwrscale_detach_policy(struct kgsl_device *device);
+
+void kgsl_pwrscale_idle(struct kgsl_device *device);
+void kgsl_pwrscale_busy(struct kgsl_device *device);
+void kgsl_pwrscale_sleep(struct kgsl_device *device);
+void kgsl_pwrscale_wake(struct kgsl_device *device);
+
+void kgsl_pwrscale_enable(struct kgsl_device *device);
+void kgsl_pwrscale_disable(struct kgsl_device *device);
+
+int kgsl_pwrscale_policy_add_files(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ struct attribute_group *attr_group);
+
+void kgsl_pwrscale_policy_remove_files(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ struct attribute_group *attr_group);
+#endif
diff --git a/drivers/gpu/msm/kgsl_pwrscale_msm.c b/drivers/gpu/msm/kgsl_pwrscale_msm.c
new file mode 100644
index 0000000..f3948c3
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_pwrscale_msm.c
@@ -0,0 +1,197 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <mach/msm_dcvs.h>
+#include "kgsl.h"
+#include "kgsl_pwrscale.h"
+#include "kgsl_device.h"
+#include "a2xx_reg.h"
+
+struct msm_priv {
+ struct kgsl_device *device;
+ int enabled;
+ int handle;
+ unsigned int cur_freq;
+ struct msm_dcvs_idle idle_source;
+ struct msm_dcvs_freq freq_sink;
+ struct msm_dcvs_core_info *core_info;
+};
+
+static int msm_idle_enable(struct msm_dcvs_idle *self,
+ enum msm_core_control_event event)
+{
+ struct msm_priv *priv = container_of(self, struct msm_priv,
+ idle_source);
+
+ switch (event) {
+ case MSM_DCVS_ENABLE_IDLE_PULSE:
+ priv->enabled = true;
+ break;
+ case MSM_DCVS_DISABLE_IDLE_PULSE:
+ priv->enabled = false;
+ break;
+ case MSM_DCVS_ENABLE_HIGH_LATENCY_MODES:
+ case MSM_DCVS_DISABLE_HIGH_LATENCY_MODES:
+ break;
+ }
+ return 0;
+}
+
+static int msm_set_freq(struct msm_dcvs_freq *self,
+ unsigned int freq)
+{
+ int i, delta = 5000000;
+ struct msm_priv *priv = container_of(self, struct msm_priv,
+ freq_sink);
+ struct kgsl_device *device = priv->device;
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+
+
+ freq *= 1000;
+ for (i = 0; i < pwr->num_pwrlevels; i++)
+ if (abs(pwr->pwrlevels[i].gpu_freq - freq) < delta)
+ break;
+ if (i == pwr->num_pwrlevels)
+ return 0;
+
+ mutex_lock(&device->mutex);
+ kgsl_pwrctrl_pwrlevel_change(device, i);
+ priv->cur_freq = pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq;
+ mutex_unlock(&device->mutex);
+
+
+ return priv->cur_freq / 1000;
+}
+
+static unsigned int msm_get_freq(struct msm_dcvs_freq *self)
+{
+ struct msm_priv *priv = container_of(self, struct msm_priv,
+ freq_sink);
+
+ return priv->cur_freq / 1000;
+}
+
+static void msm_busy(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale)
+{
+ struct msm_priv *priv = pwrscale->priv;
+ if (priv->enabled)
+ msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_EXIT, 0);
+ return;
+}
+
+static void msm_idle(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale)
+{
+ struct msm_priv *priv = pwrscale->priv;
+ unsigned int rb_rptr, rb_wptr;
+ kgsl_regread(device, REG_CP_RB_RPTR, &rb_rptr);
+ kgsl_regread(device, REG_CP_RB_WPTR, &rb_wptr);
+
+ if (priv->enabled && (rb_rptr == rb_wptr))
+ msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_ENTER, 0);
+
+ return;
+}
+
+static void msm_sleep(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale)
+{
+
+}
+
+static int msm_init(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale)
+{
+ struct msm_priv *priv;
+ struct msm_dcvs_freq_entry *tbl;
+ int i, ret, low_level;
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ struct platform_device *pdev =
+ container_of(device->parentdev, struct platform_device, dev);
+ struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
+
+ priv = pwrscale->priv = kzalloc(sizeof(struct msm_priv),
+ GFP_KERNEL);
+ if (pwrscale->priv == NULL)
+ return -ENOMEM;
+
+ priv->core_info = pdata->core_info;
+ tbl = priv->core_info->freq_tbl;
+
+ low_level = pwr->num_pwrlevels - KGSL_PWRLEVEL_LAST_OFFSET;
+ for (i = 0; i <= low_level; i++)
+ tbl[i].freq =
+ pwr->pwrlevels[low_level - i].gpu_freq / 1000;
+ ret = msm_dcvs_register_core(device->name, 0, priv->core_info);
+ if (ret) {
+ KGSL_PWR_ERR(device, "msm_dcvs_register_core failed");
+ goto err;
+ }
+
+ priv->device = device;
+ priv->idle_source.enable = msm_idle_enable;
+ priv->idle_source.core_name = device->name;
+ priv->handle = msm_dcvs_idle_source_register(&priv->idle_source);
+ if (priv->handle < 0) {
+ ret = priv->handle;
+ KGSL_PWR_ERR(device, "msm_dcvs_idle_source_register failed\n");
+ goto err;
+ }
+
+ priv->freq_sink.core_name = device->name;
+ priv->freq_sink.set_frequency = msm_set_freq;
+ priv->freq_sink.get_frequency = msm_get_freq;
+ ret = msm_dcvs_freq_sink_register(&priv->freq_sink);
+ if (ret >= 0) {
+ if (device->ftbl->isidle(device)) {
+ device->pwrscale.gpu_busy = 0;
+ msm_dcvs_idle(priv->handle, MSM_DCVS_IDLE_ENTER, 0);
+ } else {
+ device->pwrscale.gpu_busy = 1;
+ }
+ return 0;
+ }
+
+ KGSL_PWR_ERR(device, "msm_dcvs_freq_sink_register failed\n");
+ msm_dcvs_idle_source_unregister(&priv->idle_source);
+
+err:
+ kfree(pwrscale->priv);
+ pwrscale->priv = NULL;
+
+ return ret;
+}
+
+static void msm_close(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale)
+{
+ struct msm_priv *priv = pwrscale->priv;
+
+ if (pwrscale->priv == NULL)
+ return;
+ msm_dcvs_idle_source_unregister(&priv->idle_source);
+ msm_dcvs_freq_sink_unregister(&priv->freq_sink);
+ kfree(pwrscale->priv);
+ pwrscale->priv = NULL;
+}
+
+struct kgsl_pwrscale_policy kgsl_pwrscale_policy_msm = {
+ .name = "msm",
+ .init = msm_init,
+ .idle = msm_idle,
+ .busy = msm_busy,
+ .sleep = msm_sleep,
+ .close = msm_close,
+};
diff --git a/drivers/gpu/msm/kgsl_pwrscale_trustzone.c b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
new file mode 100644
index 0000000..5ae3fe0
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_pwrscale_trustzone.c
@@ -0,0 +1,554 @@
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <mach/socinfo.h>
+#include <mach/scm.h>
+
+#include "kgsl.h"
+#include "kgsl_pwrscale.h"
+#include "kgsl_device.h"
+#include "kgsl_trace.h"
+
+#define TZ_GOVERNOR_PERFORMANCE 0
+#define TZ_GOVERNOR_ONDEMAND 1
+
+struct tz_priv {
+ int governor;
+ unsigned int no_switch_cnt;
+ unsigned int skip_cnt;
+};
+spinlock_t tz_lock;
+
+#define SWITCH_OFF 200
+#define SWITCH_OFF_RESET_TH 40
+#define SKIP_COUNTER 500
+#define TZ_RESET_ID 0x3
+#define TZ_UPDATE_ID 0x4
+#define TZ_CMD_ID 0x90
+
+
+#define PARAM_INDEX_WRITE_DOWNTHRESHOLD 100
+#define PARAM_INDEX_WRITE_UPTHRESHOLD 101
+#define PARAM_INDEX_WRITE_MINGAPCOUNT 102
+#define PARAM_INDEX_WRITE_NUMGAPS 103
+#define PARAM_INDEX_WRITE_INITIDLEVECTOR 104
+#define PARAM_INDEX_WRITE_DOWNTHRESHOLD_PERCENT 105
+#define PARAM_INDEX_WRITE_UPTHRESHOLD_PERCENT 106
+#define PARAM_INDEX_WRITE_DOWNTHRESHOLD_COUNT 107
+#define PARAM_INDEX_WRITE_UPTHRESHOLD_COUNT 108
+#define PARAM_INDEX_WRITE_ALGORITHM 109
+
+
+#define PARAM_INDEX_READ_DOWNTHRESHOLD 200
+#define PARAM_INDEX_READ_UPTHRESHOLD 201
+#define PARAM_INDEX_READ_MINGAPCOUNT 202
+#define PARAM_INDEX_READ_NUMGAPS 203
+#define PARAM_INDEX_READ_INITIDLEVECTOR 204
+#define PARAM_INDEX_READ_DOWNTHRESHOLD_PERCENT 205
+#define PARAM_INDEX_READ_UPTHRESHOLD_PERCENT 206
+#define PARAM_INDEX_READ_DOWNTHRESHOLD_COUNT 207
+#define PARAM_INDEX_READ_UPTHRESHOLD_COUNT 208
+#define PARAM_INDEX_READ_ALGORITHM 209
+
+#ifdef CONFIG_MSM_SCM
+static int __secure_tz_entry(u32 cmd, u32 val, u32 id)
+{
+ int ret;
+ spin_lock(&tz_lock);
+ __iowmb();
+ ret = scm_call_atomic2(SCM_SVC_IO, cmd, val, id);
+ spin_unlock(&tz_lock);
+ return ret;
+}
+#else
+static int __secure_tz_entry(u32 cmd, u32 val, u32 id)
+{
+ return 0;
+}
+#endif
+
+static ssize_t tz_governor_show(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ char *buf)
+{
+ struct tz_priv *priv = pwrscale->priv;
+ int ret;
+
+ if (priv->governor == TZ_GOVERNOR_ONDEMAND)
+ ret = snprintf(buf, 10, "ondemand\n");
+ else
+ ret = snprintf(buf, 13, "performance\n");
+
+ return ret;
+}
+
+static ssize_t tz_governor_store(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ const char *buf, size_t count)
+{
+ char str[20];
+ struct tz_priv *priv = pwrscale->priv;
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ int ret;
+
+ ret = sscanf(buf, "%20s", str);
+ if (ret != 1)
+ return -EINVAL;
+
+ mutex_lock(&device->mutex);
+
+ if (!strncmp(str, "ondemand", 8))
+ priv->governor = TZ_GOVERNOR_ONDEMAND;
+ else if (!strncmp(str, "performance", 11))
+ priv->governor = TZ_GOVERNOR_PERFORMANCE;
+
+ if (priv->governor == TZ_GOVERNOR_PERFORMANCE)
+ kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
+
+ mutex_unlock(&device->mutex);
+ return count;
+}
+
+static ssize_t dcvs_downthreshold_show(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ char *buf)
+{
+ int val, ret;
+ val = __secure_tz_entry(TZ_CMD_ID, 0, PARAM_INDEX_READ_DOWNTHRESHOLD);
+
+ ret = sprintf(buf, "%d\n", val);
+
+ return ret;
+}
+
+static ssize_t dcvs_downthreshold_store(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ const char *buf, size_t count)
+{
+ int val, ret;
+
+ ret = sscanf(buf, "%d", &val);
+
+ if (ret != 1)
+ return -EINVAL;
+
+ __secure_tz_entry(TZ_CMD_ID, val, PARAM_INDEX_WRITE_DOWNTHRESHOLD);
+
+ return count;
+}
+
+static ssize_t dcvs_upthreshold_show(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ char *buf)
+{
+ int val, ret;
+ val = __secure_tz_entry(TZ_CMD_ID, 0, PARAM_INDEX_READ_UPTHRESHOLD);
+
+ ret = sprintf(buf, "%d\n", val);
+
+ return ret;
+}
+
+static ssize_t dcvs_upthreshold_store(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ const char *buf, size_t count)
+{
+ int val, ret;
+
+ ret = sscanf(buf, "%d", &val);
+
+ if (ret != 1)
+ return -EINVAL;
+
+ __secure_tz_entry(TZ_CMD_ID, val, PARAM_INDEX_WRITE_UPTHRESHOLD);
+
+ return count;
+}
+
+static ssize_t dcvs_down_count_show(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ char *buf)
+{
+ int val, ret;
+ val = __secure_tz_entry(TZ_CMD_ID, 0, PARAM_INDEX_READ_MINGAPCOUNT);
+
+ ret = sprintf(buf, "%d\n", val);
+
+ return ret;
+}
+
+static ssize_t dcvs_down_count_store(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ const char *buf, size_t count)
+{
+ int val, ret;
+
+ ret = sscanf(buf, "%d", &val);
+
+ if (ret != 1)
+ return -EINVAL;
+
+ __secure_tz_entry(TZ_CMD_ID, val, PARAM_INDEX_WRITE_MINGAPCOUNT);
+
+ return count;
+}
+
+static ssize_t dcvs_numgaps_show(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ char *buf)
+{
+ int val, ret;
+ val = __secure_tz_entry(TZ_CMD_ID, 0, PARAM_INDEX_READ_NUMGAPS);
+
+ ret = sprintf(buf, "%d\n", val);
+
+ return ret;
+}
+
+static ssize_t dcvs_numgaps_store(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ const char *buf, size_t count)
+{
+ int val, ret;
+
+ ret = sscanf(buf, "%d", &val);
+
+ if (ret != 1)
+ return -EINVAL;
+
+ __secure_tz_entry(TZ_CMD_ID, val, PARAM_INDEX_WRITE_NUMGAPS);
+
+ return count;
+}
+
+static ssize_t dcvs_init_idle_vector_show(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ char *buf)
+{
+ int val, ret;
+ val = __secure_tz_entry(TZ_CMD_ID, 0, PARAM_INDEX_READ_INITIDLEVECTOR);
+
+ ret = sprintf(buf, "%d\n", val);
+
+ return ret;
+}
+
+static ssize_t dcvs_init_idle_vector_store(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ const char *buf, size_t count)
+{
+ int val, ret;
+
+ ret = sscanf(buf, "%d", &val);
+
+ if (ret != 1)
+ return -EINVAL;
+
+ __secure_tz_entry(TZ_CMD_ID, val, PARAM_INDEX_WRITE_INITIDLEVECTOR);
+
+ return count;
+}
+
+static ssize_t dcvs_algorithm_show(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ char *buf)
+{
+ int val, ret;
+ val = __secure_tz_entry(TZ_CMD_ID, 0, PARAM_INDEX_READ_ALGORITHM);
+
+ ret = sprintf(buf, "%d\n", val);
+
+ return ret;
+}
+
+static ssize_t dcvs_algorithm_store(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ const char *buf, size_t count)
+{
+ int val, ret;
+
+ ret = sscanf(buf, "%d", &val);
+
+ if (ret != 1)
+ return -EINVAL;
+
+ __secure_tz_entry(TZ_CMD_ID, val, PARAM_INDEX_WRITE_ALGORITHM);
+
+ return count;
+}
+
+static ssize_t dcvs_upthreshold_percent_show(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ char *buf)
+{
+ int val, ret;
+ val = __secure_tz_entry(TZ_CMD_ID, 0, PARAM_INDEX_READ_UPTHRESHOLD_PERCENT);
+
+ ret = sprintf(buf, "%d\n", val);
+
+ return ret;
+}
+
+static ssize_t dcvs_upthreshold_percent_store(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ const char *buf, size_t count)
+{
+ int val, ret;
+
+ ret = sscanf(buf, "%d", &val);
+
+ if (ret != 1)
+ return -EINVAL;
+
+ __secure_tz_entry(TZ_CMD_ID, val, PARAM_INDEX_WRITE_UPTHRESHOLD_PERCENT);
+
+ return count;
+}
+
+static ssize_t dcvs_downthreshold_percent_show(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ char *buf)
+{
+ int val, ret;
+ val = __secure_tz_entry(TZ_CMD_ID, 0, PARAM_INDEX_READ_DOWNTHRESHOLD_PERCENT);
+
+ ret = sprintf(buf, "%d\n", val);
+
+ return ret;
+}
+
+static ssize_t dcvs_downthreshold_percent_store(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ const char *buf, size_t count)
+{
+ int val, ret;
+
+ ret = sscanf(buf, "%d", &val);
+
+ if (ret != 1)
+ return -EINVAL;
+
+ __secure_tz_entry(TZ_CMD_ID, val, PARAM_INDEX_WRITE_DOWNTHRESHOLD_PERCENT);
+
+ return count;
+}
+
+static ssize_t dcvs_upthreshold_count_show(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ char *buf)
+{
+ int val, ret;
+ val = __secure_tz_entry(TZ_CMD_ID, 0, PARAM_INDEX_READ_UPTHRESHOLD_COUNT);
+
+ ret = sprintf(buf, "%d\n", val);
+
+ return ret;
+}
+
+static ssize_t dcvs_upthreshold_count_store(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ const char *buf, size_t count)
+{
+ int val, ret;
+
+ ret = sscanf(buf, "%d", &val);
+
+ if (ret != 1)
+ return -EINVAL;
+
+ __secure_tz_entry(TZ_CMD_ID, val, PARAM_INDEX_WRITE_UPTHRESHOLD_COUNT);
+
+ return count;
+}
+
+static ssize_t dcvs_downthreshold_count_show(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ char *buf)
+{
+ int val, ret;
+ val = __secure_tz_entry(TZ_CMD_ID, 0, PARAM_INDEX_READ_DOWNTHRESHOLD_COUNT);
+
+ ret = sprintf(buf, "%d\n", val);
+
+ return ret;
+}
+
+static ssize_t dcvs_downthreshold_count_store(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale,
+ const char *buf, size_t count)
+{
+ int val, ret;
+
+ ret = sscanf(buf, "%d", &val);
+
+ if (ret != 1)
+ return -EINVAL;
+
+ __secure_tz_entry(TZ_CMD_ID, val, PARAM_INDEX_WRITE_DOWNTHRESHOLD_COUNT);
+
+ return count;
+}
+
+PWRSCALE_POLICY_ATTR(governor, 0644, tz_governor_show, tz_governor_store);
+PWRSCALE_POLICY_ATTR(dcvs_downthreshold, 0644, dcvs_downthreshold_show, dcvs_downthreshold_store);
+PWRSCALE_POLICY_ATTR(dcvs_upthreshold, 0644, dcvs_upthreshold_show, dcvs_upthreshold_store);
+PWRSCALE_POLICY_ATTR(dcvs_down_count, 0644, dcvs_down_count_show, dcvs_down_count_store);
+PWRSCALE_POLICY_ATTR(dcvs_numgaps, 0644, dcvs_numgaps_show, dcvs_numgaps_store);
+PWRSCALE_POLICY_ATTR(dcvs_init_idle_vector, 0644, dcvs_init_idle_vector_show, dcvs_init_idle_vector_store);
+PWRSCALE_POLICY_ATTR(dcvs_algorithm, 0644, dcvs_algorithm_show, dcvs_algorithm_store);
+PWRSCALE_POLICY_ATTR(dcvs_upthreshold_percent, 0644, dcvs_upthreshold_percent_show, dcvs_upthreshold_percent_store);
+PWRSCALE_POLICY_ATTR(dcvs_downthreshold_percent, 0644, dcvs_downthreshold_percent_show, dcvs_downthreshold_percent_store);
+PWRSCALE_POLICY_ATTR(dcvs_upthreshold_count, 0644, dcvs_upthreshold_count_show, dcvs_upthreshold_count_store);
+PWRSCALE_POLICY_ATTR(dcvs_downthreshold_count, 0644, dcvs_downthreshold_count_show, dcvs_downthreshold_count_store);
+
+static struct attribute *tz_attrs[] = {
+ &policy_attr_governor.attr,
+ &policy_attr_dcvs_downthreshold.attr,
+ &policy_attr_dcvs_upthreshold.attr,
+ &policy_attr_dcvs_down_count.attr,
+ &policy_attr_dcvs_numgaps.attr,
+ &policy_attr_dcvs_init_idle_vector.attr,
+ &policy_attr_dcvs_algorithm.attr,
+ &policy_attr_dcvs_upthreshold_percent.attr,
+ &policy_attr_dcvs_downthreshold_percent.attr,
+ &policy_attr_dcvs_upthreshold_count.attr,
+ &policy_attr_dcvs_downthreshold_count.attr,
+ NULL
+};
+
+static struct attribute_group tz_attr_group = {
+ .attrs = tz_attrs,
+};
+
+static void tz_wake(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
+{
+ struct tz_priv *priv = pwrscale->priv;
+ if (device->state != KGSL_STATE_NAP &&
+ priv->governor == TZ_GOVERNOR_ONDEMAND) {
+ trace_kgsl_pwrlevel(device, device->pwrctrl.default_pwrlevel,
+ device->pwrctrl.pwrlevels[device->pwrctrl.default_pwrlevel].gpu_freq);
+ kgsl_pwrctrl_pwrlevel_change(device,
+ device->pwrctrl.default_pwrlevel);
+ }
+}
+
+static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ struct tz_priv *priv = pwrscale->priv;
+ struct kgsl_power_stats stats;
+ int val, idle, total_time;
+
+
+ if (priv->governor == TZ_GOVERNOR_PERFORMANCE)
+ return;
+
+ device->ftbl->power_stats(device, &stats);
+ if (stats.total_time == 0)
+ return;
+
+ if (pwr->active_pwrlevel == 0) {
+ if (priv->no_switch_cnt > SWITCH_OFF) {
+ priv->skip_cnt++;
+ if (priv->skip_cnt > SKIP_COUNTER) {
+ priv->no_switch_cnt -= SWITCH_OFF_RESET_TH;
+ priv->skip_cnt = 0;
+ }
+ return;
+ }
+ priv->no_switch_cnt++;
+ } else {
+ priv->no_switch_cnt = 0;
+ }
+
+ idle = stats.total_time - stats.busy_time;
+ idle = (idle > 0) ? idle : 0;
+
+
+ total_time = stats.total_time & 0x0FFFFFFF;
+ total_time |= (pwr->active_pwrlevel) << 28;
+
+ val = __secure_tz_entry(TZ_UPDATE_ID, idle, total_time);
+
+ if (val)
+ kgsl_pwrctrl_pwrlevel_change(device,
+ pwr->active_pwrlevel + val);
+}
+
+static void tz_busy(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale)
+{
+ device->on_time = ktime_to_us(ktime_get());
+}
+
+static void tz_sleep(struct kgsl_device *device,
+ struct kgsl_pwrscale *pwrscale)
+{
+ struct tz_priv *priv = pwrscale->priv;
+
+ trace_kgsl_pwrlevel(device, 0, 0);
+
+ __secure_tz_entry(TZ_RESET_ID, 0, device->id);
+ priv->no_switch_cnt = 0;
+}
+
+static int tz_init(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
+{
+ struct tz_priv *priv;
+ int ret;
+
+
+ if (!(cpu_is_msm8x60() || cpu_is_msm8960() || cpu_is_apq8064() ||
+ cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_msm8627()))
+ return -EINVAL;
+
+ priv = pwrscale->priv = kzalloc(sizeof(struct tz_priv), GFP_KERNEL);
+ if (pwrscale->priv == NULL)
+ return -ENOMEM;
+
+ priv->governor = TZ_GOVERNOR_ONDEMAND;
+ spin_lock_init(&tz_lock);
+ kgsl_pwrscale_policy_add_files(device, pwrscale, &tz_attr_group);
+
+ ret = __secure_tz_entry(TZ_CMD_ID, 0, PARAM_INDEX_WRITE_ALGORITHM);
+
+ if(ret == 1)
+ pr_info("Using HTC GPU DCVS algorithm\n");
+ else
+ pr_info("Using QCT GPU DCVS algorithm\n");
+
+ return 0;
+}
+
+static void tz_close(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
+{
+ kgsl_pwrscale_policy_remove_files(device, pwrscale, &tz_attr_group);
+ kfree(pwrscale->priv);
+ pwrscale->priv = NULL;
+}
+
+struct kgsl_pwrscale_policy kgsl_pwrscale_policy_tz = {
+ .name = "trustzone",
+ .init = tz_init,
+ .busy = tz_busy,
+ .idle = tz_idle,
+ .sleep = tz_sleep,
+ .wake = tz_wake,
+ .close = tz_close
+};
+EXPORT_SYMBOL(kgsl_pwrscale_policy_tz);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
new file mode 100644
index 0000000..d4a8f92
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -0,0 +1,1061 @@
+/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/vmalloc.h>
+#include <linux/memory_alloc.h>
+#include <asm/cacheflush.h>
+#include <linux/slab.h>
+#include <linux/kmemleak.h>
+#include <linux/highmem.h>
+
+#include "kgsl.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_cffdump.h"
+#include "kgsl_device.h"
+
+struct ion_client* kgsl_client = NULL;
+
+struct kgsl_mem_entry_attribute {
+ struct attribute attr;
+ int memtype;
+ ssize_t (*show)(struct kgsl_process_private *priv,
+ int type, char *buf);
+};
+
+#define to_mem_entry_attr(a) \
+container_of(a, struct kgsl_mem_entry_attribute, attr)
+
+#define __MEM_ENTRY_ATTR(_type, _name, _show) \
+{ \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .memtype = _type, \
+ .show = _show, \
+}
+
+#ifdef CONFIG_MSM_KGSL_GPU_USAGE
+static ssize_t
+gpubusy_show(struct kgsl_process_private *priv, int type, char *buf)
+{
+ char* tmp = buf;
+ int i;
+
+ tmp = (char*)((int)tmp + snprintf(tmp, PAGE_SIZE, "%lld %lld", priv->gputime.total, priv->gputime.busy));
+ for(i=0;i<KGSL_MAX_PWRLEVELS;i++)
+ tmp = (char*)( (int)tmp + snprintf(tmp, PAGE_SIZE - (int)(tmp-buf), " %lld %lld", priv->gputime_in_state[i].total, priv->gputime_in_state[i].busy));
+ tmp = (char*)((int)tmp + snprintf(tmp, PAGE_SIZE, "\n"));
+ return (ssize_t)(tmp - buf);
+}
+
+static struct kgsl_mem_entry_attribute gpubusy = __MEM_ENTRY_ATTR(0, gpubusy, gpubusy_show);
+#endif
+
+
+struct mem_entry_stats {
+ int memtype;
+ struct kgsl_mem_entry_attribute attr;
+ struct kgsl_mem_entry_attribute max_attr;
+};
+
+
+#define MEM_ENTRY_STAT(_type, _name) \
+{ \
+ .memtype = _type, \
+ .attr = __MEM_ENTRY_ATTR(_type, _name, mem_entry_show), \
+ .max_attr = __MEM_ENTRY_ATTR(_type, _name##_max, \
+ mem_entry_max_show), \
+}
+
+
+
+static struct page *kgsl_guard_page;
+
+
+static struct kgsl_process_private *
+_get_priv_from_kobj(struct kobject *kobj)
+{
+ struct kgsl_process_private *private;
+ unsigned long name;
+
+ if (!kobj)
+ return NULL;
+
+ if (sscanf(kobj->name, "%ld", &name) != 1)
+ return NULL;
+
+ list_for_each_entry(private, &kgsl_driver.process_list, list) {
+ if (private->pid == name)
+ return private;
+ }
+
+ return NULL;
+}
+
+
+static ssize_t
+mem_entry_show(struct kgsl_process_private *priv, int type, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].cur);
+}
+
+
+static ssize_t
+mem_entry_max_show(struct kgsl_process_private *priv, int type, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", priv->stats[type].max);
+}
+
+
+static void mem_entry_sysfs_release(struct kobject *kobj)
+{
+}
+
+static ssize_t mem_entry_sysfs_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct kgsl_mem_entry_attribute *pattr = to_mem_entry_attr(attr);
+ struct kgsl_process_private *priv;
+ ssize_t ret;
+
+ mutex_lock(&kgsl_driver.process_mutex);
+ priv = _get_priv_from_kobj(kobj);
+
+ if (priv && pattr->show)
+ ret = pattr->show(priv, pattr->memtype, buf);
+ else
+ ret = -EIO;
+
+ mutex_unlock(&kgsl_driver.process_mutex);
+ return ret;
+}
+
+static const struct sysfs_ops mem_entry_sysfs_ops = {
+ .show = mem_entry_sysfs_show,
+};
+
+static struct kobj_type ktype_mem_entry = {
+ .sysfs_ops = &mem_entry_sysfs_ops,
+ .default_attrs = NULL,
+ .release = mem_entry_sysfs_release
+};
+
+static struct mem_entry_stats mem_stats[] = {
+ MEM_ENTRY_STAT(KGSL_MEM_ENTRY_KERNEL, kernel),
+#ifdef CONFIG_ANDROID_PMEM
+ MEM_ENTRY_STAT(KGSL_MEM_ENTRY_PMEM, pmem),
+#endif
+#ifdef CONFIG_ASHMEM
+ MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ASHMEM, ashmem),
+#endif
+ MEM_ENTRY_STAT(KGSL_MEM_ENTRY_USER, user),
+#ifdef CONFIG_ION
+ MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ION, ion),
+#endif
+};
+
+void
+kgsl_process_uninit_sysfs(struct kgsl_process_private *private)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
+ sysfs_remove_file(&private->kobj, &mem_stats[i].attr.attr);
+ sysfs_remove_file(&private->kobj,
+ &mem_stats[i].max_attr.attr);
+ }
+
+#ifdef CONFIG_MSM_KGSL_GPU_USAGE
+ sysfs_remove_file(&private->kobj, &gpubusy.attr);
+#endif
+ kobject_put(&private->kobj);
+}
+
+void
+kgsl_process_init_sysfs(struct kgsl_process_private *private)
+{
+ unsigned char name[16];
+ int i, ret;
+
+ snprintf(name, sizeof(name), "%d", private->pid);
+
+ if (kobject_init_and_add(&private->kobj, &ktype_mem_entry,
+ kgsl_driver.prockobj, name))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(mem_stats); i++) {
+
+ ret = sysfs_create_file(&private->kobj,
+ &mem_stats[i].attr.attr);
+ ret = sysfs_create_file(&private->kobj,
+ &mem_stats[i].max_attr.attr);
+ }
+#ifdef CONFIG_MSM_KGSL_GPU_USAGE
+ ret = sysfs_create_file(&private->kobj, &gpubusy.attr);
+#endif
+}
+
+static int kgsl_drv_memstat_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned int val = 0;
+
+ if (!strncmp(attr->attr.name, "vmalloc", 7))
+ val = kgsl_driver.stats.vmalloc;
+ else if (!strncmp(attr->attr.name, "vmalloc_max", 11))
+ val = kgsl_driver.stats.vmalloc_max;
+ else if (!strncmp(attr->attr.name, "page_alloc", 10))
+ val = kgsl_driver.stats.page_alloc;
+ else if (!strncmp(attr->attr.name, "page_alloc_max", 14))
+ val = kgsl_driver.stats.page_alloc_max;
+ else if (!strncmp(attr->attr.name, "coherent", 8))
+ val = kgsl_driver.stats.coherent;
+ else if (!strncmp(attr->attr.name, "coherent_max", 12))
+ val = kgsl_driver.stats.coherent_max;
+ else if (!strncmp(attr->attr.name, "mapped", 6))
+ val = kgsl_driver.stats.mapped;
+ else if (!strncmp(attr->attr.name, "mapped_max", 10))
+ val = kgsl_driver.stats.mapped_max;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static int kgsl_drv_histogram_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int len = 0;
+ int i;
+
+ for (i = 0; i < 16; i++)
+ len += snprintf(buf + len, PAGE_SIZE - len, "%d ",
+ kgsl_driver.stats.histogram[i]);
+
+ len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ return len;
+}
+
+DEVICE_ATTR(vmalloc, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(vmalloc_max, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(page_alloc, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(page_alloc_max, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(coherent, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(coherent_max, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(mapped, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(mapped_max, 0444, kgsl_drv_memstat_show, NULL);
+DEVICE_ATTR(histogram, 0444, kgsl_drv_histogram_show, NULL);
+
+static const struct device_attribute *drv_attr_list[] = {
+ &dev_attr_vmalloc,
+ &dev_attr_vmalloc_max,
+ &dev_attr_page_alloc,
+ &dev_attr_page_alloc_max,
+ &dev_attr_coherent,
+ &dev_attr_coherent_max,
+ &dev_attr_mapped,
+ &dev_attr_mapped_max,
+ &dev_attr_histogram,
+ NULL
+};
+
+void
+kgsl_sharedmem_uninit_sysfs(void)
+{
+ kgsl_remove_device_sysfs_files(&kgsl_driver.virtdev, drv_attr_list);
+}
+
+int
+kgsl_sharedmem_init_sysfs(void)
+{
+ return kgsl_create_device_sysfs_files(&kgsl_driver.virtdev,
+ drv_attr_list);
+}
+
+#ifdef CONFIG_OUTER_CACHE
+static void _outer_cache_range_op(int op, unsigned long addr, size_t size)
+{
+ switch (op) {
+ case KGSL_CACHE_OP_FLUSH:
+ outer_flush_range(addr, addr + size);
+ break;
+ case KGSL_CACHE_OP_CLEAN:
+ outer_clean_range(addr, addr + size);
+ break;
+ case KGSL_CACHE_OP_INV:
+ outer_inv_range(addr, addr + size);
+ break;
+ }
+}
+
+static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, sglen, i) {
+ unsigned int paddr = kgsl_get_sg_pa(s);
+ _outer_cache_range_op(op, paddr, s->length);
+ }
+}
+
+#else
+static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
+{
+}
+#endif
+
+static int kgsl_ion_alloc_vmfault(struct kgsl_memdesc *memdesc,
+ struct vm_area_struct *vma,
+ struct vm_fault *vmf)
+{
+ unsigned long offset, pfn;
+ int ret;
+
+ offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
+
+ pfn = (memdesc->sg[0].dma_address >> PAGE_SHIFT) + offset;
+ ret = vm_insert_pfn(vma, (unsigned long) vmf->virtual_address, pfn);
+
+ if (ret == -ENOMEM || ret == -EAGAIN)
+ return VM_FAULT_OOM;
+ else if (ret == -EFAULT)
+ return VM_FAULT_SIGBUS;
+
+ return 0;
+}
+
+static int kgsl_ion_alloc_vmflags(struct kgsl_memdesc *memdesc)
+{
+ return VM_RESERVED | VM_DONTEXPAND;
+}
+
+static void kgsl_ion_alloc_free(struct kgsl_memdesc *memdesc)
+{
+ kgsl_driver.stats.pre_alloc -= memdesc->size;
+ if (memdesc->handle)
+ ion_free(kgsl_client, memdesc->handle);
+
+ if (memdesc->hostptr) {
+ iounmap(memdesc->hostptr);
+ kgsl_driver.stats.vmalloc -= memdesc->size;
+ }
+
+ if (memdesc->private)
+ kgsl_process_sub_stats(memdesc->private, KGSL_MEM_ENTRY_PRE_ALLOC, memdesc->size);
+ else
+ kgsl_driver.stats.pre_alloc_kernel -= memdesc->size;
+}
+
+static int kgsl_ion_alloc_map_kernel(struct kgsl_memdesc *memdesc)
+{
+ if (!memdesc->hostptr) {
+ memdesc->hostptr = ioremap(memdesc->sg[0].dma_address, memdesc->sg[0].length);
+ if(IS_ERR_OR_NULL(memdesc->hostptr)) {
+ KGSL_CORE_ERR("kgsl: ion ioremap failed\n");
+ return -ENOMEM;
+ }
+ KGSL_STATS_ADD(memdesc->size, kgsl_driver.stats.vmalloc,
+ kgsl_driver.stats.vmalloc_max);
+ }
+
+ return 0;
+}
+
+static int kgsl_page_alloc_vmfault(struct kgsl_memdesc *memdesc,
+ struct vm_area_struct *vma,
+ struct vm_fault *vmf)
+{
+ unsigned long offset;
+ struct page *page;
+ int i;
+
+ offset = (unsigned long) vmf->virtual_address - vma->vm_start;
+
+ i = offset >> PAGE_SHIFT;
+ page = sg_page(&memdesc->sg[i]);
+ if (page == NULL)
+ return VM_FAULT_SIGBUS;
+
+ get_page(page);
+
+ vmf->page = page;
+ return 0;
+}
+
+static int kgsl_page_alloc_vmflags(struct kgsl_memdesc *memdesc)
+{
+ return VM_RESERVED | VM_DONTEXPAND;
+}
+
+static void kgsl_page_alloc_free(struct kgsl_memdesc *memdesc)
+{
+ int i = 0;
+ struct scatterlist *sg;
+ int sglen = memdesc->sglen;
+
+
+ if (memdesc->flags & KGSL_MEMDESC_GUARD_PAGE)
+ sglen--;
+
+ kgsl_driver.stats.page_alloc -= memdesc->size;
+
+ if (memdesc->hostptr) {
+ vunmap(memdesc->hostptr);
+ kgsl_driver.stats.vmalloc -= memdesc->size;
+ }
+ if (memdesc->sg)
+ for_each_sg(memdesc->sg, sg, sglen, i)
+ __free_page(sg_page(sg));
+
+ if (memdesc->private)
+ kgsl_process_sub_stats(memdesc->private, KGSL_MEM_ENTRY_PAGE_ALLOC, memdesc->size);
+ else
+ kgsl_driver.stats.page_alloc_kernel -= memdesc->size;
+}
+
+static int kgsl_contiguous_vmflags(struct kgsl_memdesc *memdesc)
+{
+ return VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
+}
+
+static int kgsl_page_alloc_map_kernel(struct kgsl_memdesc *memdesc)
+{
+ if (!memdesc->hostptr) {
+ pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
+ struct page **pages = NULL;
+ struct scatterlist *sg;
+ int sglen = memdesc->sglen;
+ int i;
+
+
+ if (memdesc->flags & KGSL_MEMDESC_GUARD_PAGE)
+ sglen--;
+
+
+ pages = vmalloc(sglen * sizeof(struct page *));
+ if (!pages) {
+ KGSL_CORE_ERR("vmalloc(%d) failed\n",
+ sglen * sizeof(struct page *));
+ return -ENOMEM;
+ }
+ for_each_sg(memdesc->sg, sg, sglen, i)
+ pages[i] = sg_page(sg);
+ memdesc->hostptr = vmap(pages, sglen,
+ VM_IOREMAP, page_prot);
+ KGSL_STATS_ADD(memdesc->size, kgsl_driver.stats.vmalloc,
+ kgsl_driver.stats.vmalloc_max);
+ vfree(pages);
+ }
+ if (!memdesc->hostptr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc,
+ struct vm_area_struct *vma,
+ struct vm_fault *vmf)
+{
+ unsigned long offset, pfn;
+ int ret;
+
+ offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
+ PAGE_SHIFT;
+
+ pfn = (memdesc->physaddr >> PAGE_SHIFT) + offset;
+ ret = vm_insert_pfn(vma, (unsigned long) vmf->virtual_address, pfn);
+
+ if (ret == -ENOMEM || ret == -EAGAIN)
+ return VM_FAULT_OOM;
+ else if (ret == -EFAULT)
+ return VM_FAULT_SIGBUS;
+
+ return VM_FAULT_NOPAGE;
+}
+
+static void kgsl_ebimem_free(struct kgsl_memdesc *memdesc)
+
+{
+ kgsl_driver.stats.coherent -= memdesc->size;
+ if (memdesc->hostptr)
+ iounmap(memdesc->hostptr);
+
+ free_contiguous_memory_by_paddr(memdesc->physaddr);
+}
+
+static void kgsl_coherent_free(struct kgsl_memdesc *memdesc)
+{
+ kgsl_driver.stats.coherent -= memdesc->size;
+ dma_free_coherent(NULL, memdesc->size,
+ memdesc->hostptr, memdesc->physaddr);
+}
+
+struct kgsl_memdesc_ops kgsl_page_alloc_ops = {
+ .free = kgsl_page_alloc_free,
+ .vmflags = kgsl_page_alloc_vmflags,
+ .vmfault = kgsl_page_alloc_vmfault,
+ .map_kernel_mem = kgsl_page_alloc_map_kernel,
+};
+EXPORT_SYMBOL(kgsl_page_alloc_ops);
+
+struct kgsl_memdesc_ops kgsl_ion_alloc_ops = {
+ .free = kgsl_ion_alloc_free,
+ .vmflags = kgsl_ion_alloc_vmflags,
+ .vmfault = kgsl_ion_alloc_vmfault,
+ .map_kernel_mem = kgsl_ion_alloc_map_kernel,
+};
+EXPORT_SYMBOL(kgsl_ion_alloc_ops);
+
+
+static struct kgsl_memdesc_ops kgsl_ebimem_ops = {
+ .free = kgsl_ebimem_free,
+ .vmflags = kgsl_contiguous_vmflags,
+ .vmfault = kgsl_contiguous_vmfault,
+};
+
+static struct kgsl_memdesc_ops kgsl_coherent_ops = {
+ .free = kgsl_coherent_free,
+};
+
+void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op)
+{
+ void *addr = memdesc->hostptr;
+ int size = memdesc->size;
+
+ switch (op) {
+ case KGSL_CACHE_OP_FLUSH:
+ dmac_flush_range(addr, addr + size);
+ break;
+ case KGSL_CACHE_OP_CLEAN:
+ dmac_clean_range(addr, addr + size);
+ break;
+ case KGSL_CACHE_OP_INV:
+ dmac_inv_range(addr, addr + size);
+ break;
+ }
+
+ outer_cache_range_op_sg(memdesc->sg, memdesc->sglen, op);
+}
+EXPORT_SYMBOL(kgsl_cache_range_op);
+
+static int
+_kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable,
+ size_t size, unsigned int protflags)
+{
+ int i, order, ret = 0;
+ int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
+ struct page **pages = NULL;
+ pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
+ void *ptr;
+
+
+ if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU)
+ sglen++;
+
+ memdesc->size = size;
+ memdesc->pagetable = pagetable;
+ memdesc->priv = KGSL_MEMFLAGS_CACHED;
+ memdesc->ops = &kgsl_page_alloc_ops;
+
+ memdesc->sg = kgsl_sg_alloc(sglen);
+
+ if (memdesc->sg == NULL) {
+ KGSL_CORE_ERR("vmalloc(%d) failed\n",
+ sglen * sizeof(struct scatterlist));
+ ret = -ENOMEM;
+ goto done;
+ }
+
+
+ pages = kmalloc(sglen * sizeof(struct page *), GFP_KERNEL);
+
+ if (pages == NULL) {
+ KGSL_CORE_ERR("kmalloc (%d) failed\n",
+ sglen * sizeof(struct page *));
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ kmemleak_not_leak(memdesc->sg);
+
+ memdesc->sglen = sglen;
+ sg_init_table(memdesc->sg, sglen);
+
+ for (i = 0; i < PAGE_ALIGN(size) / PAGE_SIZE; i++) {
+
+
+ pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+ if (pages[i] == NULL) {
+ ret = -ENOMEM;
+ memdesc->sglen = i;
+ goto done;
+ }
+
+ sg_set_page(&memdesc->sg[i], pages[i], PAGE_SIZE, 0);
+ }
+
+
+
+ if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_IOMMU) {
+
+ if (kgsl_guard_page == NULL)
+ kgsl_guard_page = alloc_page(GFP_KERNEL | __GFP_ZERO |
+ __GFP_HIGHMEM);
+
+ if (kgsl_guard_page != NULL) {
+ sg_set_page(&memdesc->sg[sglen - 1], kgsl_guard_page,
+ PAGE_SIZE, 0);
+ memdesc->flags |= KGSL_MEMDESC_GUARD_PAGE;
+ } else
+ memdesc->sglen--;
+ }
+
+
+ ptr = vmap(pages, i, VM_IOREMAP, page_prot);
+
+ if (ptr != NULL) {
+ memset(ptr, 0, memdesc->size);
+ dmac_flush_range(ptr, ptr + memdesc->size);
+ vunmap(ptr);
+ } else {
+ int j;
+
+
+
+ for (j = 0; j < i; j++) {
+ ptr = kmap_atomic(pages[j]);
+ memset(ptr, 0, PAGE_SIZE);
+ dmac_flush_range(ptr, ptr + PAGE_SIZE);
+ kunmap_atomic(ptr);
+ }
+ }
+
+ outer_cache_range_op_sg(memdesc->sg, memdesc->sglen,
+ KGSL_CACHE_OP_FLUSH);
+
+ ret = kgsl_mmu_map(pagetable, memdesc, protflags);
+
+ if (ret)
+ goto done;
+
+ order = get_order(size);
+
+ if (order < 16)
+ kgsl_driver.stats.histogram[order]++;
+
+done:
+ kfree(pages);
+
+ KGSL_STATS_ADD(size, kgsl_driver.stats.page_alloc,
+ kgsl_driver.stats.page_alloc_max);
+
+ if (ret)
+ kgsl_sharedmem_free(memdesc);
+
+ return ret;
+}
+
+int
+kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable, size_t size)
+{
+ int ret = 0;
+ BUG_ON(size == 0);
+
+ size = ALIGN(size, PAGE_SIZE * 2);
+
+ kgsl_driver.stats.page_alloc_kernel += size;
+ ret = _kgsl_sharedmem_page_alloc(memdesc, pagetable, size,
+ GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+ if (!ret)
+ ret = kgsl_page_alloc_map_kernel(memdesc);
+ if (ret) {
+
+ kgsl_driver.stats.page_alloc_kernel += size;
+ kgsl_sharedmem_free(memdesc);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_page_alloc);
+
+int
+kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
+ struct kgsl_process_private *private,
+ struct kgsl_pagetable *pagetable,
+ size_t size, int flags)
+{
+ unsigned int protflags;
+ int ret = 0;
+
+ if (size == 0)
+ return -EINVAL;
+
+ protflags = GSL_PT_PAGE_RV;
+ if (!(flags & KGSL_MEMFLAGS_GPUREADONLY))
+ protflags |= GSL_PT_PAGE_WV;
+
+ ret = _kgsl_sharedmem_page_alloc(memdesc, pagetable, size,
+ protflags);
+
+ if (ret == 0 && private)
+ kgsl_process_add_stats(private, KGSL_MEM_ENTRY_PAGE_ALLOC, size);
+
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_page_alloc_user);
+
+static int
+_kgsl_sharedmem_ion_alloc(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable,
+ size_t size, unsigned int protflags)
+{
+ int order, ret = 0;
+ int sglen = 1;
+ void *ptr;
+ struct ion_handle *handle = NULL;
+ ion_phys_addr_t pa = 0;
+ size_t len = 0;
+
+
+
+
+
+ memdesc->size = size;
+ memdesc->pagetable = pagetable;
+ memdesc->priv = KGSL_MEMFLAGS_CACHED;
+ memdesc->ops = &kgsl_ion_alloc_ops;
+
+ memdesc->sg = kgsl_sg_alloc(sglen);
+
+ if (memdesc->sg == NULL) {
+ KGSL_CORE_ERR("kgsl_sg_alloc vmalloc(%d) failed\n",
+ sglen * sizeof(struct scatterlist));
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ kmemleak_not_leak(memdesc->sg);
+
+ memdesc->sglen = sglen;
+ sg_init_table(memdesc->sg, sglen);
+
+ if (kgsl_client == NULL)
+ kgsl_client = msm_ion_client_create(-1, "KGSL");
+
+ handle = ion_alloc(kgsl_client, size, SZ_4K, 0x1 << ION_SF_HEAP_ID);
+ if (IS_ERR_OR_NULL(handle)) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ if (ion_phys(kgsl_client, handle, &pa, &len)) {
+ KGSL_CORE_ERR("kgsl: ion_phys() failed\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ memdesc->handle = handle;
+
+ memdesc->sg[0].length = memdesc->size;
+ memdesc->sg[0].offset = 0;
+ memdesc->sg[0].dma_address = pa;
+
+
+
+
+
+ ptr = ioremap(pa, memdesc->size);
+
+ if (ptr != NULL) {
+ memset(ptr, 0, memdesc->size);
+ dmac_flush_range(ptr, ptr + memdesc->size);
+ iounmap(ptr);
+ }
+
+ outer_cache_range_op_sg(memdesc->sg, memdesc->sglen, KGSL_CACHE_OP_FLUSH);
+
+ ret = kgsl_mmu_map(pagetable, memdesc, protflags);
+
+ if (ret) {
+ KGSL_CORE_ERR("kgsl: kgsl_mmu_map failed\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ order = get_order(size);
+
+ if (order < 16)
+ kgsl_driver.stats.histogram[order]++;
+
+done:
+ KGSL_STATS_ADD(size, kgsl_driver.stats.pre_alloc, kgsl_driver.stats.pre_alloc_max);
+
+ if (ret)
+ kgsl_sharedmem_free(memdesc);
+
+ return ret;
+}
+
+int
+kgsl_sharedmem_ion_alloc(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable,
+ size_t size)
+{
+ int ret;
+
+ BUG_ON(size == 0);
+ size = PAGE_ALIGN(size);
+
+ kgsl_driver.stats.pre_alloc_kernel += size;
+ ret = _kgsl_sharedmem_ion_alloc(memdesc, pagetable, size,
+ GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+
+ if (!ret)
+ ret = kgsl_ion_alloc_map_kernel(memdesc);
+
+ if (ret) {
+
+ kgsl_driver.stats.pre_alloc_kernel += size;
+ kgsl_sharedmem_free(memdesc);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_ion_alloc);
+
+int
+kgsl_sharedmem_ion_alloc_user(struct kgsl_memdesc *memdesc,
+ struct kgsl_process_private *private,
+ struct kgsl_pagetable *pagetable,
+ size_t size, int flags)
+{
+ unsigned int protflags;
+ int ret = 0;
+
+ BUG_ON(size == 0);
+
+ size = PAGE_ALIGN(size);
+
+ protflags = GSL_PT_PAGE_RV;
+ if (!(flags & KGSL_MEMFLAGS_GPUREADONLY))
+ protflags |= GSL_PT_PAGE_WV;
+
+ ret = _kgsl_sharedmem_ion_alloc(memdesc, pagetable, size,
+ protflags);
+
+ if (ret == 0 && private)
+ kgsl_process_add_stats(private, KGSL_MEM_ENTRY_PRE_ALLOC, size);
+
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_ion_alloc_user);
+
+int
+kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size)
+{
+ int result = 0;
+
+ size = ALIGN(size, PAGE_SIZE);
+
+ memdesc->size = size;
+ memdesc->ops = &kgsl_coherent_ops;
+
+ memdesc->hostptr = dma_alloc_coherent(NULL, size, &memdesc->physaddr,
+ GFP_KERNEL);
+ if (memdesc->hostptr == NULL) {
+ KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
+ result = -ENOMEM;
+ goto err;
+ }
+
+ result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
+ if (result)
+ goto err;
+
+
+
+ KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
+ kgsl_driver.stats.coherent_max);
+
+err:
+ if (result)
+ kgsl_sharedmem_free(memdesc);
+
+ return result;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_alloc_coherent);
+
+void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
+{
+ if (memdesc == NULL || memdesc->size == 0)
+ return;
+
+ if (memdesc->gpuaddr)
+ kgsl_mmu_unmap(memdesc->pagetable, memdesc);
+
+ if (memdesc->ops && memdesc->ops->free)
+ memdesc->ops->free(memdesc);
+
+ kgsl_sg_free(memdesc->sg, memdesc->sglen);
+
+ memset(memdesc, 0, sizeof(*memdesc));
+}
+EXPORT_SYMBOL(kgsl_sharedmem_free);
+
+static int
+_kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable, size_t size)
+{
+ int result = 0;
+
+ memdesc->size = size;
+ memdesc->pagetable = pagetable;
+ memdesc->ops = &kgsl_ebimem_ops;
+ memdesc->physaddr = allocate_contiguous_ebi_nomap(size, SZ_8K);
+
+ if (memdesc->physaddr == 0) {
+ KGSL_CORE_ERR("allocate_contiguous_ebi_nomap(%d) failed\n",
+ size);
+ return -ENOMEM;
+ }
+
+ result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
+
+ if (result)
+ goto err;
+
+ result = kgsl_mmu_map(pagetable, memdesc,
+ GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+
+ if (result)
+ goto err;
+
+ KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
+ kgsl_driver.stats.coherent_max);
+
+err:
+ if (result)
+ kgsl_sharedmem_free(memdesc);
+
+ return result;
+}
+
+int
+kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable,
+ size_t size, int flags)
+{
+ size = ALIGN(size, PAGE_SIZE);
+ return _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
+}
+EXPORT_SYMBOL(kgsl_sharedmem_ebimem_user);
+
+int
+kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable, size_t size)
+{
+ int result;
+ size = ALIGN(size, 8192);
+ result = _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
+
+ if (result)
+ return result;
+
+ memdesc->hostptr = ioremap(memdesc->physaddr, size);
+
+ if (memdesc->hostptr == NULL) {
+ KGSL_CORE_ERR("ioremap failed\n");
+ kgsl_sharedmem_free(memdesc);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_ebimem);
+
+int
+kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
+ uint32_t *dst,
+ unsigned int offsetbytes)
+{
+ uint32_t *src;
+ BUG_ON(memdesc == NULL || memdesc->hostptr == NULL || dst == NULL);
+ WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
+ if (offsetbytes % sizeof(uint32_t) != 0)
+ return -EINVAL;
+
+ WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size);
+ if (offsetbytes + sizeof(uint32_t) > memdesc->size)
+ return -ERANGE;
+ src = (uint32_t *)(memdesc->hostptr + offsetbytes);
+ *dst = *src;
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_readl);
+
+int
+kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
+ unsigned int offsetbytes,
+ uint32_t src)
+{
+ uint32_t *dst;
+ BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
+ WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
+ if (offsetbytes % sizeof(uint32_t) != 0)
+ return -EINVAL;
+
+ WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size);
+ if (offsetbytes + sizeof(uint32_t) > memdesc->size)
+ return -ERANGE;
+ kgsl_cffdump_setmem(memdesc->gpuaddr + offsetbytes,
+ src, sizeof(uint32_t));
+ dst = (uint32_t *)(memdesc->hostptr + offsetbytes);
+ *dst = src;
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_writel);
+
+int
+kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc, unsigned int offsetbytes,
+ unsigned int value, unsigned int sizebytes)
+{
+ BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
+ BUG_ON(offsetbytes + sizebytes > memdesc->size);
+
+ kgsl_cffdump_setmem(memdesc->gpuaddr + offsetbytes, value,
+ sizebytes);
+ memset(memdesc->hostptr + offsetbytes, value, sizebytes);
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_set);
+
+int
+kgsl_sharedmem_map_vma(struct vm_area_struct *vma,
+ const struct kgsl_memdesc *memdesc)
+{
+ unsigned long addr = vma->vm_start;
+ unsigned long size = vma->vm_end - vma->vm_start;
+ int ret, i = 0;
+
+ if (!memdesc->sg || (size != memdesc->size) ||
+ (memdesc->sglen != (size / PAGE_SIZE)))
+ return -EINVAL;
+
+ for (; addr < vma->vm_end; addr += PAGE_SIZE, i++) {
+ ret = vm_insert_page(vma, addr, sg_page(&memdesc->sg[i]));
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_sharedmem_map_vma);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
new file mode 100644
index 0000000..9c7eb70
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -0,0 +1,191 @@
+/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __KGSL_SHAREDMEM_H
+#define __KGSL_SHAREDMEM_H
+
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include "kgsl_mmu.h"
+#include <linux/slab.h>
+#include <linux/kmemleak.h>
+#include <linux/sched.h>
+
+struct kgsl_device;
+struct kgsl_process_private;
+
+#define KGSL_CACHE_OP_INV 0x01
+#define KGSL_CACHE_OP_FLUSH 0x02
+#define KGSL_CACHE_OP_CLEAN 0x03
+
+#define KGSL_MEMFLAGS_CACHED 0x00000001
+#define KGSL_MEMFLAGS_GLOBAL 0x00000002
+
+extern struct kgsl_memdesc_ops kgsl_page_alloc_ops;
+
+int kgsl_sharedmem_ion_alloc(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable, size_t size);
+
+int kgsl_sharedmem_ion_alloc_user(struct kgsl_memdesc *memdesc,
+ struct kgsl_process_private *private,
+ struct kgsl_pagetable *pagetable,
+ size_t size, int flags);
+
+
+int kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable, size_t size);
+
+int kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
+ struct kgsl_process_private *private,
+ struct kgsl_pagetable *pagetable,
+ size_t size, int flags);
+
+int kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size);
+
+int kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable,
+ size_t size, int flags);
+
+int kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable,
+ size_t size);
+
+void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc);
+
+int kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
+ uint32_t *dst,
+ unsigned int offsetbytes);
+
+int kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
+ unsigned int offsetbytes,
+ uint32_t src);
+
+int kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc,
+ unsigned int offsetbytes, unsigned int value,
+ unsigned int sizebytes);
+
+void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op);
+
+void kgsl_process_init_sysfs(struct kgsl_process_private *private);
+void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
+
+int kgsl_sharedmem_init_sysfs(void);
+void kgsl_sharedmem_uninit_sysfs(void);
+
+static inline unsigned int kgsl_get_sg_pa(struct scatterlist *sg)
+{
+ unsigned int pa = sg_dma_address(sg);
+ if (pa == 0)
+ pa = sg_phys(sg);
+ return pa;
+}
+
+int
+kgsl_sharedmem_map_vma(struct vm_area_struct *vma,
+ const struct kgsl_memdesc *memdesc);
+
+
+static inline void *kgsl_sg_alloc(unsigned int sglen)
+{
+ if ((sglen * sizeof(struct scatterlist)) < PAGE_SIZE)
+ return kzalloc(sglen * sizeof(struct scatterlist), GFP_KERNEL);
+ else
+ return vmalloc(sglen * sizeof(struct scatterlist));
+}
+
+static inline void kgsl_sg_free(void *ptr, unsigned int sglen)
+{
+ if ((sglen * sizeof(struct scatterlist)) < PAGE_SIZE)
+ kfree(ptr);
+ else
+ vfree(ptr);
+}
+
+static inline int
+memdesc_sg_phys(struct kgsl_memdesc *memdesc,
+ unsigned int physaddr, unsigned int size)
+{
+ memdesc->sg = kgsl_sg_alloc(1);
+ if (memdesc->sg == NULL)
+ return -ENOMEM;
+
+ kmemleak_not_leak(memdesc->sg);
+
+ memdesc->sglen = 1;
+ sg_init_table(memdesc->sg, 1);
+ memdesc->sg[0].length = size;
+ memdesc->sg[0].offset = 0;
+ memdesc->sg[0].dma_address = physaddr;
+ return 0;
+}
+
+static inline int
+kgsl_allocate(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable, size_t size)
+{
+ int ret = 1;
+ if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
+ return kgsl_sharedmem_ebimem(memdesc, pagetable, size);
+
+ if(size >= SZ_4M)
+ ret = kgsl_sharedmem_ion_alloc(memdesc, pagetable, size);
+
+ if(ret)
+ return kgsl_sharedmem_page_alloc(memdesc, pagetable, size);
+ return ret;
+}
+
+static inline int
+kgsl_allocate_user(struct kgsl_memdesc *memdesc,
+ struct kgsl_process_private *private,
+ struct kgsl_pagetable *pagetable,
+ size_t size, unsigned int flags)
+{
+ int ret = 1;
+ char task_comm[TASK_COMM_LEN];
+
+ if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
+ return kgsl_sharedmem_ebimem_user(memdesc, pagetable, size,
+ flags);
+ if(size >= SZ_4M)
+ ret = kgsl_sharedmem_ion_alloc_user(memdesc, private, pagetable, size, flags);
+ else if ( size >= SZ_1M && strcmp("om.htc.launcher", get_task_comm(task_comm, current->group_leader)) == 0 )
+ ret = kgsl_sharedmem_ion_alloc_user(memdesc, private, pagetable, size, flags);
+
+ if(ret)
+ return kgsl_sharedmem_page_alloc_user(memdesc, private, pagetable, size, flags);
+
+ return ret;
+}
+
+static inline int
+kgsl_allocate_contiguous(struct kgsl_memdesc *memdesc, size_t size)
+{
+ int ret = kgsl_sharedmem_alloc_coherent(memdesc, size);
+ if (!ret && (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE))
+ memdesc->gpuaddr = memdesc->physaddr;
+ return ret;
+}
+
+static inline int kgsl_sg_size(struct scatterlist *sg, int sglen)
+{
+ int i, size = 0;
+ struct scatterlist *s;
+
+ for_each_sg(sg, s, sglen, i) {
+ size += s->length;
+ }
+
+ return size;
+}
+#endif
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
new file mode 100644
index 0000000..9704e2b
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -0,0 +1,738 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/time.h>
+#include <linux/sysfs.h>
+#include <linux/utsname.h>
+#include <linux/sched.h>
+#include <linux/idr.h>
+
+#include "kgsl.h"
+#include "kgsl_log.h"
+#include "kgsl_device.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_snapshot.h"
+
+
+struct kgsl_snapshot_object {
+ unsigned int gpuaddr;
+ unsigned int ptbase;
+ unsigned int size;
+ unsigned int offset;
+ int type;
+ struct kgsl_mem_entry *entry;
+ struct list_head node;
+};
+
+struct snapshot_obj_itr {
+ void *buf;
+ int pos;
+ loff_t offset;
+ size_t remain;
+ size_t write; /* Bytes written so far */
+};
+
+static void obj_itr_init(struct snapshot_obj_itr *itr, void *buf,
+ loff_t offset, size_t remain)
+{
+ itr->buf = buf;
+ itr->offset = offset;
+ itr->remain = remain;
+ itr->pos = 0;
+ itr->write = 0;
+}
+
+static int obj_itr_out(struct snapshot_obj_itr *itr, void *src, int size)
+{
+ if (itr->remain == 0)
+ return 0;
+
+ if ((itr->pos + size) <= itr->offset)
+ goto done;
+
+
+
+ if (itr->offset > itr->pos) {
+ src += (itr->offset - itr->pos);
+ size -= (itr->offset - itr->pos);
+
+
+ itr->pos = itr->offset;
+ }
+
+ if (size > itr->remain)
+ size = itr->remain;
+
+ memcpy(itr->buf, src, size);
+
+ itr->buf += size;
+ itr->write += size;
+ itr->remain -= size;
+
+done:
+ itr->pos += size;
+ return size;
+}
+
+
+static int snapshot_context_count(int id, void *ptr, void *data)
+{
+ int *count = data;
+ *count = *count + 1;
+
+ return 0;
+}
+
+
+static void *_ctxtptr;
+
+static int snapshot_context_info(int id, void *ptr, void *data)
+{
+ struct kgsl_snapshot_linux_context *header = _ctxtptr;
+ struct kgsl_context *context = ptr;
+ struct kgsl_device *device = context->dev_priv->device;
+
+ header->id = id;
+
+
+ header->timestamp_queued = kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_QUEUED);
+ header->timestamp_retired = kgsl_readtimestamp(device, context,
+ KGSL_TIMESTAMP_RETIRED);
+
+ _ctxtptr += sizeof(struct kgsl_snapshot_linux_context);
+
+ return 0;
+}
+
+static int snapshot_os(struct kgsl_device *device,
+ void *snapshot, int remain, void *priv)
+{
+ struct kgsl_snapshot_linux *header = snapshot;
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ struct task_struct *task;
+ pid_t pid;
+ int hang = (int) priv;
+ int ctxtcount = 0;
+ int size = sizeof(*header);
+
+
+ idr_for_each(&device->context_idr, snapshot_context_count, &ctxtcount);
+
+ size += ctxtcount * sizeof(struct kgsl_snapshot_linux_context);
+
+
+ if (remain < size) {
+ SNAPSHOT_ERR_NOMEM(device, "OS");
+ return 0;
+ }
+
+ memset(header, 0, sizeof(*header));
+
+ header->osid = KGSL_SNAPSHOT_OS_LINUX;
+
+ header->state = hang ? SNAPSHOT_STATE_HUNG : SNAPSHOT_STATE_RUNNING;
+
+
+ strlcpy(header->release, utsname()->release, sizeof(header->release));
+ strlcpy(header->version, utsname()->version, sizeof(header->version));
+
+
+ header->seconds = get_seconds();
+
+
+ header->power_flags = pwr->power_flags;
+ header->power_level = pwr->active_pwrlevel;
+ header->power_interval_timeout = pwr->interval_timeout;
+ header->grpclk = kgsl_get_clkrate(pwr->grp_clks[0]);
+ header->busclk = kgsl_get_clkrate(pwr->ebi1_clk);
+
+
+ header->current_context = -1;
+
+
+ header->ptbase = kgsl_mmu_get_current_ptbase(&device->mmu);
+
+ pid = header->pid = kgsl_mmu_get_ptname_from_ptbase(header->ptbase);
+
+ task = find_task_by_vpid(pid);
+
+ if (task)
+ get_task_comm(header->comm, task);
+
+ header->ctxtcount = ctxtcount;
+
+
+ _ctxtptr = snapshot + sizeof(*header);
+ idr_for_each(&device->context_idr, snapshot_context_info, NULL);
+
+
+ return size;
+}
+
+static int kgsl_snapshot_dump_indexed_regs(struct kgsl_device *device,
+ void *snapshot, int remain, void *priv)
+{
+ struct kgsl_snapshot_indexed_registers *iregs = priv;
+ struct kgsl_snapshot_indexed_regs *header = snapshot;
+ unsigned int *data = snapshot + sizeof(*header);
+ int i;
+
+ if (remain < (iregs->count * 4) + sizeof(*header)) {
+ SNAPSHOT_ERR_NOMEM(device, "INDEXED REGS");
+ return 0;
+ }
+
+ header->index_reg = iregs->index;
+ header->data_reg = iregs->data;
+ header->count = iregs->count;
+ header->start = iregs->start;
+
+ for (i = 0; i < iregs->count; i++) {
+ kgsl_regwrite(device, iregs->index, iregs->start + i);
+ kgsl_regread(device, iregs->data, &data[i]);
+ }
+
+ return (iregs->count * 4) + sizeof(*header);
+}
+
+#define GPU_OBJ_HEADER_SZ \
+ (sizeof(struct kgsl_snapshot_section_header) + \
+ sizeof(struct kgsl_snapshot_gpu_object))
+
+static int kgsl_snapshot_dump_object(struct kgsl_device *device,
+ struct kgsl_snapshot_object *obj, struct snapshot_obj_itr *itr)
+{
+ struct kgsl_snapshot_section_header sect;
+ struct kgsl_snapshot_gpu_object header;
+ int ret;
+
+ sect.magic = SNAPSHOT_SECTION_MAGIC;
+ sect.id = KGSL_SNAPSHOT_SECTION_GPU_OBJECT;
+
+
+ sect.size = GPU_OBJ_HEADER_SZ + ALIGN(obj->size, 4);
+
+ ret = obj_itr_out(itr, §, sizeof(sect));
+ if (ret == 0)
+ return 0;
+
+ header.size = ALIGN(obj->size, 4) >> 2;
+ header.gpuaddr = obj->gpuaddr;
+ header.ptbase = obj->ptbase;
+ header.type = obj->type;
+
+ ret = obj_itr_out(itr, &header, sizeof(header));
+ if (ret == 0)
+ return 0;
+
+ ret = obj_itr_out(itr, obj->entry->memdesc.hostptr + obj->offset,
+ obj->size);
+ if (ret == 0)
+ return 0;
+
+
+
+ if (obj->size % 4) {
+ unsigned int dummy = 0;
+ ret = obj_itr_out(itr, &dummy, obj->size % 4);
+ }
+
+ return ret;
+}
+
+static void kgsl_snapshot_put_object(struct kgsl_device *device,
+ struct kgsl_snapshot_object *obj)
+{
+ list_del(&obj->node);
+
+ obj->entry->flags &= ~KGSL_MEM_ENTRY_FROZEN;
+ kgsl_mem_entry_put(obj->entry);
+
+ kfree(obj);
+}
+
+#if 0
+int kgsl_snapshot_have_object(struct kgsl_device *device, unsigned int ptbase,
+ unsigned int gpuaddr, unsigned int size)
+{
+ struct kgsl_snapshot_object *obj;
+
+ list_for_each_entry(obj, &device->snapshot_obj_list, node) {
+ if (obj->ptbase != ptbase)
+ continue;
+
+ if ((gpuaddr >= obj->gpuaddr) &&
+ ((gpuaddr + size) <= (obj->gpuaddr + obj->size)))
+ return 1;
+ }
+
+ return 0;
+}
+#endif
+
+int kgsl_snapshot_have_object(struct kgsl_device *device, unsigned int ptbase,
+ unsigned int gpuaddr, unsigned int size)
+{
+ struct kgsl_snapshot_object *obj;
+
+ list_for_each_entry(obj, &device->snapshot_obj_list, node) {
+ if (obj->ptbase != ptbase)
+ continue;
+
+ if ((gpuaddr >= obj->gpuaddr) &&
+ ((gpuaddr + size) <= (obj->gpuaddr + obj->size)))
+ return 1;
+ }
+
+ return 0;
+}
+
+
+int kgsl_snapshot_get_object(struct kgsl_device *device, unsigned int ptbase,
+ unsigned int gpuaddr, unsigned int size, unsigned int type)
+{
+ struct kgsl_mem_entry *entry;
+ struct kgsl_snapshot_object *obj;
+ int offset;
+
+ entry = kgsl_get_mem_entry(ptbase, gpuaddr, size);
+
+ if (entry == NULL) {
+ KGSL_DRV_ERR(device, "Unable to find GPU buffer %8.8X\n",
+ gpuaddr);
+ return 0;
+ }
+
+
+ if (entry->memtype != KGSL_MEM_ENTRY_KERNEL) {
+ KGSL_DRV_ERR(device,
+ "Only internal GPU buffers can be frozen\n");
+ return 0;
+ }
+
+
+ if (size == 0) {
+ size = entry->memdesc.size;
+ offset = 0;
+
+
+ gpuaddr = entry->memdesc.gpuaddr;
+ } else {
+ offset = gpuaddr - entry->memdesc.gpuaddr;
+ }
+
+ if (size + offset > entry->memdesc.size) {
+ KGSL_DRV_ERR(device, "Invalid size for GPU buffer %8.8X\n",
+ gpuaddr);
+ return 0;
+ }
+
+
+ list_for_each_entry(obj, &device->snapshot_obj_list, node) {
+ if (obj->gpuaddr == gpuaddr && obj->ptbase == ptbase) {
+
+ if (obj->size != size)
+ obj->size = size;
+
+ return 0;
+ }
+ }
+
+ if (kgsl_memdesc_map(&entry->memdesc) == NULL) {
+ KGSL_DRV_ERR(device, "Unable to map GPU buffer %X\n",
+ gpuaddr);
+ return 0;
+ }
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+
+ if (obj == NULL) {
+ KGSL_DRV_ERR(device, "Unable to allocate memory\n");
+ return 0;
+ }
+
+
+ kgsl_mem_entry_get(entry);
+
+ obj->type = type;
+ obj->entry = entry;
+ obj->gpuaddr = gpuaddr;
+ obj->ptbase = ptbase;
+ obj->size = size;
+ obj->offset = offset;
+
+ list_add(&obj->node, &device->snapshot_obj_list);
+
+
+ if (entry->flags & KGSL_MEM_ENTRY_FROZEN)
+ return 0;
+
+ entry->flags |= KGSL_MEM_ENTRY_FROZEN;
+
+ return entry->memdesc.size;
+}
+EXPORT_SYMBOL(kgsl_snapshot_get_object);
+
+int kgsl_snapshot_dump_regs(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv)
+{
+ struct kgsl_snapshot_regs *header = snapshot;
+ struct kgsl_snapshot_registers *regs = priv;
+ unsigned int *data = snapshot + sizeof(*header);
+ int count = 0, i, j;
+
+
+
+ for (i = 0; i < regs->count; i++) {
+ int start = regs->regs[i * 2];
+ int end = regs->regs[i * 2 + 1];
+
+ count += (end - start + 1);
+ }
+
+ if (remain < (count * 8) + sizeof(*header)) {
+ SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+ return 0;
+ }
+
+ for (i = 0; i < regs->count; i++) {
+ unsigned int start = regs->regs[i * 2];
+ unsigned int end = regs->regs[i * 2 + 1];
+
+ for (j = start; j <= end; j++) {
+ unsigned int val;
+
+ kgsl_regread(device, j, &val);
+ *data++ = j;
+ *data++ = val;
+ }
+ }
+
+ header->count = count;
+
+
+ return (count * 8) + sizeof(*header);
+}
+EXPORT_SYMBOL(kgsl_snapshot_dump_regs);
+
+void *kgsl_snapshot_indexed_registers(struct kgsl_device *device,
+ void *snapshot, int *remain,
+ unsigned int index, unsigned int data, unsigned int start,
+ unsigned int count)
+{
+ struct kgsl_snapshot_indexed_registers iregs;
+ iregs.index = index;
+ iregs.data = data;
+ iregs.start = start;
+ iregs.count = count;
+
+ return kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_INDEXED_REGS, snapshot,
+ remain, kgsl_snapshot_dump_indexed_regs, &iregs);
+}
+EXPORT_SYMBOL(kgsl_snapshot_indexed_registers);
+
+int kgsl_device_snapshot(struct kgsl_device *device, int hang)
+{
+ struct kgsl_snapshot_header *header = device->snapshot;
+ int remain = device->snapshot_maxsize - sizeof(*header);
+ void *snapshot;
+ struct platform_device *pdev =
+ container_of(device->parentdev, struct platform_device, dev);
+ struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
+
+
+ if (hang && device->snapshot_frozen == 1)
+ return 0;
+
+ if (device->snapshot == NULL) {
+ KGSL_DRV_ERR(device,
+ "snapshot: No snapshot memory available\n");
+ return -ENOMEM;
+ }
+
+ if (remain < sizeof(*header)) {
+ KGSL_DRV_ERR(device,
+ "snapshot: Not enough memory for the header\n");
+ return -ENOMEM;
+ }
+
+ header->magic = SNAPSHOT_MAGIC;
+
+ header->gpuid = kgsl_gpuid(device, &header->chipid);
+
+
+ snapshot = ((void *) device->snapshot) + sizeof(*header);
+
+
+ snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_OS,
+ snapshot, &remain, snapshot_os, (void *) hang);
+
+
+ if (device->ftbl->snapshot)
+ snapshot = device->ftbl->snapshot(device, snapshot, &remain,
+ hang);
+
+ device->snapshot_timestamp = get_seconds();
+ device->snapshot_size = (int) (snapshot - device->snapshot);
+
+
+ device->snapshot_frozen = (hang) ? 1 : 0;
+
+
+ KGSL_DRV_ERR(device,"snapshot created at va %p pa %x size %d\n",
+ device->snapshot, pdata->snapshot_address,
+ device->snapshot_size);
+ if (hang)
+ sysfs_notify(&device->snapshot_kobj, NULL, "timestamp");
+ return 0;
+}
+EXPORT_SYMBOL(kgsl_device_snapshot);
+
+struct kgsl_snapshot_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kgsl_device *device, char *buf);
+ ssize_t (*store)(struct kgsl_device *device, const char *buf,
+ size_t count);
+};
+
+#define to_snapshot_attr(a) \
+container_of(a, struct kgsl_snapshot_attribute, attr)
+
+#define kobj_to_device(a) \
+container_of(a, struct kgsl_device, snapshot_kobj)
+
+static ssize_t snapshot_show(struct file *filep, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf, loff_t off,
+ size_t count)
+{
+ struct kgsl_device *device = kobj_to_device(kobj);
+ struct kgsl_snapshot_object *obj, *tmp;
+ struct kgsl_snapshot_section_header head;
+ struct snapshot_obj_itr itr;
+ int ret;
+
+ if (device == NULL)
+ return 0;
+
+
+ if (device->snapshot_timestamp == 0)
+ return 0;
+
+
+ mutex_lock(&device->mutex);
+
+ obj_itr_init(&itr, buf, off, count);
+
+ ret = obj_itr_out(&itr, device->snapshot, device->snapshot_size);
+
+ if (ret == 0)
+ goto done;
+
+ list_for_each_entry(obj, &device->snapshot_obj_list, node)
+ kgsl_snapshot_dump_object(device, obj, &itr);
+
+ {
+ head.magic = SNAPSHOT_SECTION_MAGIC;
+ head.id = KGSL_SNAPSHOT_SECTION_END;
+ head.size = sizeof(head);
+
+ obj_itr_out(&itr, &head, sizeof(head));
+ }
+
+ /*
+ * Make sure everything has been written out before destroying things.
+ * The best way to confirm this is to go all the way through without
+ * writing any bytes - so only release if we get this far and
+ * itr->write is 0
+ */
+
+ if (itr.write == 0) {
+ list_for_each_entry_safe(obj, tmp, &device->snapshot_obj_list,
+ node)
+ kgsl_snapshot_put_object(device, obj);
+
+ if (device->snapshot_frozen)
+ KGSL_DRV_ERR(device, "Snapshot objects released\n");
+
+ device->snapshot_frozen = 0;
+ }
+
+done:
+ mutex_unlock(&device->mutex);
+
+ return itr.write;
+}
+
+static ssize_t timestamp_show(struct kgsl_device *device, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%x\n", device->snapshot_timestamp);
+}
+
+static ssize_t trigger_store(struct kgsl_device *device, const char *buf,
+ size_t count)
+{
+ if (device && count > 0) {
+ mutex_lock(&device->mutex);
+ kgsl_device_snapshot(device, 0);
+ mutex_unlock(&device->mutex);
+ }
+
+ return count;
+}
+
+static ssize_t no_panic_show(struct kgsl_device *device, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%x\n", device->snapshot_no_panic);
+}
+
+static ssize_t no_panic_store(struct kgsl_device *device, const char *buf,
+ size_t count)
+{
+ if (device && count > 0) {
+ mutex_lock(&device->mutex);
+ device->snapshot_no_panic = simple_strtol(buf, NULL, 10);
+ mutex_unlock(&device->mutex);
+ }
+
+ return count;
+}
+
+static struct bin_attribute snapshot_attr = {
+ .attr.name = "dump",
+ .attr.mode = 0444,
+ .size = 0,
+ .read = snapshot_show
+};
+
+#define SNAPSHOT_ATTR(_name, _mode, _show, _store) \
+struct kgsl_snapshot_attribute attr_##_name = { \
+ .attr = { .name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+}
+
+SNAPSHOT_ATTR(trigger, 0600, NULL, trigger_store);
+SNAPSHOT_ATTR(timestamp, 0444, timestamp_show, NULL);
+SNAPSHOT_ATTR(no_panic, 0644, no_panic_show, no_panic_store);
+
+static void snapshot_sysfs_release(struct kobject *kobj)
+{
+}
+
+static ssize_t snapshot_sysfs_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct kgsl_snapshot_attribute *pattr = to_snapshot_attr(attr);
+ struct kgsl_device *device = kobj_to_device(kobj);
+ ssize_t ret;
+
+ if (device && pattr->show)
+ ret = pattr->show(device, buf);
+ else
+ ret = -EIO;
+
+ return ret;
+}
+
+static ssize_t snapshot_sysfs_store(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ struct kgsl_snapshot_attribute *pattr = to_snapshot_attr(attr);
+ struct kgsl_device *device = kobj_to_device(kobj);
+ ssize_t ret;
+
+ if (device && pattr->store)
+ ret = pattr->store(device, buf, count);
+ else
+ ret = -EIO;
+
+ return ret;
+}
+
+static const struct sysfs_ops snapshot_sysfs_ops = {
+ .show = snapshot_sysfs_show,
+ .store = snapshot_sysfs_store,
+};
+
+static struct kobj_type ktype_snapshot = {
+ .sysfs_ops = &snapshot_sysfs_ops,
+ .default_attrs = NULL,
+ .release = snapshot_sysfs_release,
+};
+
+
+int kgsl_device_snapshot_init(struct kgsl_device *device)
+{
+ int ret;
+ struct platform_device *pdev =
+ container_of(device->parentdev, struct platform_device, dev);
+ struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
+
+ if (device->snapshot == NULL) {
+ if(pdata->snapshot_address) {
+ device->snapshot = ioremap(pdata->snapshot_address, KGSL_SNAPSHOT_MEMSIZE);
+ KGSL_DRV_INFO(device, "snapshot created at va %p pa %x\n", device->snapshot, pdata->snapshot_address);
+ } else
+ device->snapshot = kzalloc(KGSL_SNAPSHOT_MEMSIZE, GFP_KERNEL);
+ }
+
+ if (device->snapshot == NULL)
+ return -ENOMEM;
+
+ device->snapshot_maxsize = KGSL_SNAPSHOT_MEMSIZE;
+ device->snapshot_timestamp = 0;
+
+ INIT_LIST_HEAD(&device->snapshot_obj_list);
+
+ ret = kobject_init_and_add(&device->snapshot_kobj, &ktype_snapshot,
+ &device->dev->kobj, "snapshot");
+ if (ret)
+ goto done;
+
+ ret = sysfs_create_bin_file(&device->snapshot_kobj, &snapshot_attr);
+ if (ret)
+ goto done;
+
+ ret = sysfs_create_file(&device->snapshot_kobj, &attr_trigger.attr);
+ if (ret)
+ goto done;
+
+ ret = sysfs_create_file(&device->snapshot_kobj, &attr_timestamp.attr);
+ if (ret)
+ goto done;
+
+ ret = sysfs_create_file(&device->snapshot_kobj, &attr_no_panic.attr);
+
+done:
+ return ret;
+}
+EXPORT_SYMBOL(kgsl_device_snapshot_init);
+
+
+void kgsl_device_snapshot_close(struct kgsl_device *device)
+{
+ sysfs_remove_bin_file(&device->snapshot_kobj, &snapshot_attr);
+ sysfs_remove_file(&device->snapshot_kobj, &attr_trigger.attr);
+ sysfs_remove_file(&device->snapshot_kobj, &attr_timestamp.attr);
+
+ kobject_put(&device->snapshot_kobj);
+
+ kfree(device->snapshot);
+
+ device->snapshot = NULL;
+ device->snapshot_maxsize = 0;
+ device->snapshot_timestamp = 0;
+}
+EXPORT_SYMBOL(kgsl_device_snapshot_close);
diff --git a/drivers/gpu/msm/kgsl_snapshot.h b/drivers/gpu/msm/kgsl_snapshot.h
new file mode 100644
index 0000000..baee17d
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_snapshot.h
@@ -0,0 +1,218 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _KGSL_SNAPSHOT_H_
+#define _KGSL_SNAPSHOT_H_
+
+#include <linux/types.h>
+
+
+#define SNAPSHOT_MAGIC 0x504D0002
+
+
+struct kgsl_snapshot_header {
+ __u32 magic;
+ __u32 gpuid;
+
+ __u32 chipid;
+} __packed;
+
+#define SNAPSHOT_SECTION_MAGIC 0xABCD
+
+struct kgsl_snapshot_section_header {
+ __u16 magic;
+ __u16 id;
+ __u32 size;
+} __packed;
+
+#define KGSL_SNAPSHOT_SECTION_OS 0x0101
+#define KGSL_SNAPSHOT_SECTION_REGS 0x0201
+#define KGSL_SNAPSHOT_SECTION_RB 0x0301
+#define KGSL_SNAPSHOT_SECTION_IB 0x0401
+#define KGSL_SNAPSHOT_SECTION_INDEXED_REGS 0x0501
+#define KGSL_SNAPSHOT_SECTION_ISTORE 0x0801
+#define KGSL_SNAPSHOT_SECTION_DEBUG 0x0901
+#define KGSL_SNAPSHOT_SECTION_DEBUGBUS 0x0A01
+#define KGSL_SNAPSHOT_SECTION_GPU_OBJECT 0x0B01
+
+#define KGSL_SNAPSHOT_SECTION_END 0xFFFF
+
+#define KGSL_SNAPSHOT_OS_LINUX 0x0001
+
+
+#define SNAPSHOT_STATE_HUNG 0
+#define SNAPSHOT_STATE_RUNNING 1
+
+struct kgsl_snapshot_linux {
+ int osid;
+ int state;
+ __u32 seconds;
+ __u32 power_flags;
+ __u32 power_level;
+ __u32 power_interval_timeout;
+ __u32 grpclk;
+ __u32 busclk;
+ __u32 ptbase;
+ __u32 pid;
+ __u32 current_context;
+ __u32 ctxtcount;
+ unsigned char release[32];
+ unsigned char version[32];
+ unsigned char comm[16];
+} __packed;
+
+
+struct kgsl_snapshot_linux_context {
+ __u32 id;
+ __u32 timestamp_queued;
+ __u32 timestamp_retired;
+};
+
+struct kgsl_snapshot_rb {
+ int start;
+ int end;
+ int rbsize;
+ int wptr;
+ int rptr;
+ int count;
+} __packed;
+
+struct kgsl_snapshot_ib {
+ __u32 gpuaddr;
+ __u32 ptbase;
+ int size;
+} __packed;
+
+struct kgsl_snapshot_regs {
+ __u32 count;
+} __packed;
+
+struct kgsl_snapshot_indexed_regs {
+ __u32 index_reg;
+ __u32 data_reg;
+ int start;
+ int count;
+} __packed;
+
+struct kgsl_snapshot_istore {
+ int count;
+} __packed;
+
+
+#define SNAPSHOT_DEBUG_SX 1
+#define SNAPSHOT_DEBUG_CP 2
+#define SNAPSHOT_DEBUG_SQ 3
+#define SNAPSHOT_DEBUG_SQTHREAD 4
+#define SNAPSHOT_DEBUG_MIU 5
+
+#define SNAPSHOT_DEBUG_VPC_MEMORY 6
+#define SNAPSHOT_DEBUG_CP_MEQ 7
+#define SNAPSHOT_DEBUG_CP_PM4_RAM 8
+#define SNAPSHOT_DEBUG_CP_PFP_RAM 9
+#define SNAPSHOT_DEBUG_CP_ROQ 10
+#define SNAPSHOT_DEBUG_SHADER_MEMORY 11
+
+struct kgsl_snapshot_debug {
+ int type;
+ int size;
+} __packed;
+
+struct kgsl_snapshot_debugbus {
+ int id;
+ int count;
+} __packed;
+
+#define SNAPSHOT_GPU_OBJECT_SHADER 1
+#define SNAPSHOT_GPU_OBJECT_IB 2
+#define SNAPSHOT_GPU_OBJECT_GENERIC 3
+
+struct kgsl_snapshot_gpu_object {
+ int type;
+ __u32 gpuaddr;
+ __u32 ptbase;
+ int size;
+};
+
+#ifdef __KERNEL__
+
+#define KGSL_SNAPSHOT_MEMSIZE (512 * 1024)
+
+struct kgsl_device;
+
+#define SNAPSHOT_ERR_NOMEM(_d, _s) \
+ KGSL_DRV_ERR((_d), \
+ "snapshot: not enough snapshot memory for section %s\n", (_s))
+
+
+static inline void *kgsl_snapshot_add_section(struct kgsl_device *device,
+ u16 id, void *snapshot, int *remain,
+ int (*func)(struct kgsl_device *, void *, int, void *), void *priv)
+{
+ struct kgsl_snapshot_section_header *header = snapshot;
+ void *data = snapshot + sizeof(*header);
+ int ret = 0;
+
+
+ if (*remain < sizeof(*header))
+ return snapshot;
+
+
+
+ if (func) {
+ ret = func(device, data, *remain, priv);
+
+
+ if (ret == 0)
+ return snapshot;
+ }
+
+ header->magic = SNAPSHOT_SECTION_MAGIC;
+ header->id = id;
+ header->size = ret + sizeof(*header);
+
+
+ *remain -= header->size;
+
+ return snapshot + header->size;
+}
+
+
+struct kgsl_snapshot_registers {
+ unsigned int *regs;
+ int count;
+};
+
+int kgsl_snapshot_dump_regs(struct kgsl_device *device, void *snapshot,
+ int remain, void *priv);
+
+
+struct kgsl_snapshot_indexed_registers {
+ unsigned int index;
+ unsigned int data;
+ unsigned int start;
+ unsigned int count;
+};
+
+
+void *kgsl_snapshot_indexed_registers(struct kgsl_device *device,
+ void *snapshot, int *remain, unsigned int index,
+ unsigned int data, unsigned int start, unsigned int count);
+
+int kgsl_snapshot_get_object(struct kgsl_device *device, unsigned int ptbase,
+ unsigned int gpuaddr, unsigned int size, unsigned int type);
+
+int kgsl_snapshot_have_object(struct kgsl_device *device, unsigned int ptbase,
+ unsigned int gpuaddr, unsigned int size);
+
+#endif
+#endif
diff --git a/drivers/gpu/msm/kgsl_trace.c b/drivers/gpu/msm/kgsl_trace.c
new file mode 100644
index 0000000..8bdf996
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_trace.c
@@ -0,0 +1,18 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "kgsl.h"
+#include "kgsl_device.h"
+
+#define CREATE_TRACE_POINTS
+#include "kgsl_trace.h"
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
new file mode 100644
index 0000000..0df8409
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -0,0 +1,524 @@
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#if !defined(_KGSL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _KGSL_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kgsl
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE kgsl_trace
+
+#include <linux/tracepoint.h>
+#include "kgsl_device.h"
+
+struct kgsl_device;
+struct kgsl_ringbuffer_issueibcmds;
+struct kgsl_device_waittimestamp;
+
+TRACE_EVENT(kgsl_issueibcmds,
+
+ TP_PROTO(struct kgsl_device *device,
+ struct kgsl_ringbuffer_issueibcmds *cmd,
+ struct kgsl_ibdesc *ibdesc,
+ int result),
+
+ TP_ARGS(device, cmd, ibdesc, result),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, drawctxt_id)
+ __field(unsigned int, ibdesc_addr)
+ __field(unsigned int, numibs)
+ __field(unsigned int, timestamp)
+ __field(unsigned int, flags)
+ __field(int, result)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->drawctxt_id = cmd->drawctxt_id;
+ __entry->ibdesc_addr = ibdesc[0].gpuaddr;
+ __entry->numibs = cmd->numibs;
+ __entry->timestamp = cmd->timestamp;
+ __entry->flags = cmd->flags;
+ __entry->result = result;
+ ),
+
+ TP_printk(
+ "d_name=%s ctx=%u ib=0x%u numibs=%u timestamp=0x%x "
+ "flags=0x%x(%s) result=%d",
+ __get_str(device_name),
+ __entry->drawctxt_id,
+ __entry->ibdesc_addr,
+ __entry->numibs,
+ __entry->timestamp,
+ __entry->flags,
+ __entry->flags ? __print_flags(__entry->flags, "|",
+ { KGSL_CONTEXT_SAVE_GMEM, "SAVE_GMEM" },
+ { KGSL_CONTEXT_SUBMIT_IB_LIST, "IB_LIST" },
+ { KGSL_CONTEXT_CTX_SWITCH, "CTX_SWITCH" })
+ : "None",
+ __entry->result
+ )
+);
+
+TRACE_EVENT(kgsl_readtimestamp,
+
+ TP_PROTO(struct kgsl_device *device,
+ unsigned int context_id,
+ unsigned int type,
+ unsigned int timestamp),
+
+ TP_ARGS(device, context_id, type, timestamp),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, context_id)
+ __field(unsigned int, type)
+ __field(unsigned int, timestamp)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->context_id = context_id;
+ __entry->type = type;
+ __entry->timestamp = timestamp;
+ ),
+
+ TP_printk(
+ "d_name=%s context_id=%u type=%u timestamp=0x%x",
+ __get_str(device_name),
+ __entry->context_id,
+ __entry->type,
+ __entry->timestamp
+ )
+);
+
+TRACE_EVENT(kgsl_waittimestamp_entry,
+
+ TP_PROTO(struct kgsl_device *device,
+ unsigned int context_id,
+ unsigned int curr_ts,
+ unsigned int wait_ts,
+ unsigned int timeout),
+
+ TP_ARGS(device, context_id, curr_ts, wait_ts, timeout),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, context_id)
+ __field(unsigned int, curr_ts)
+ __field(unsigned int, wait_ts)
+ __field(unsigned int, timeout)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->context_id = context_id;
+ __entry->curr_ts = curr_ts;
+ __entry->wait_ts = wait_ts;
+ __entry->timeout = timeout;
+ ),
+
+ TP_printk(
+ "d_name=%s context_id=%u curr_ts=0x%x timestamp=0x%x timeout=%u",
+ __get_str(device_name),
+ __entry->context_id,
+ __entry->curr_ts,
+ __entry->wait_ts,
+ __entry->timeout
+ )
+);
+
+TRACE_EVENT(kgsl_waittimestamp_exit,
+
+ TP_PROTO(struct kgsl_device *device, unsigned int curr_ts,
+ int result),
+
+ TP_ARGS(device, curr_ts, result),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, curr_ts)
+ __field(int, result)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->curr_ts = curr_ts;
+ __entry->result = result;
+ ),
+
+ TP_printk(
+ "d_name=%s curr_ts=0x%x result=%d",
+ __get_str(device_name),
+ __entry->curr_ts,
+ __entry->result
+ )
+);
+
+DECLARE_EVENT_CLASS(kgsl_pwr_template,
+ TP_PROTO(struct kgsl_device *device, int on),
+
+ TP_ARGS(device, on),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(int, on)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->on = on;
+ ),
+
+ TP_printk(
+ "d_name=%s %s",
+ __get_str(device_name),
+ __entry->on ? "on" : "off"
+ )
+);
+
+DEFINE_EVENT(kgsl_pwr_template, kgsl_clk,
+ TP_PROTO(struct kgsl_device *device, int on),
+ TP_ARGS(device, on)
+);
+
+DEFINE_EVENT(kgsl_pwr_template, kgsl_irq,
+ TP_PROTO(struct kgsl_device *device, int on),
+ TP_ARGS(device, on)
+);
+
+DEFINE_EVENT(kgsl_pwr_template, kgsl_bus,
+ TP_PROTO(struct kgsl_device *device, int on),
+ TP_ARGS(device, on)
+);
+
+DEFINE_EVENT(kgsl_pwr_template, kgsl_rail,
+ TP_PROTO(struct kgsl_device *device, int on),
+ TP_ARGS(device, on)
+);
+
+#ifdef CONFIG_MSM_KGSL_GPU_USAGE_SYSTRACE
+TRACE_EVENT(kgsl_usage,
+
+ TP_PROTO(struct kgsl_device *device, int on, int pid, s64 total_time, s64 busy_time,
+ unsigned int pwrlevel, unsigned int freq),
+
+ TP_ARGS(device, on, pid, total_time, busy_time, pwrlevel, freq),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(int, on)
+ __field(int, pid)
+ __field(s64, total_time)
+ __field(s64, busy_time)
+ __field(unsigned int, pwrlevel)
+ __field(unsigned int, freq)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->on = on;
+ __entry->pid = pid;
+ __entry->total_time =total_time;
+ __entry->busy_time = busy_time;
+ __entry->pwrlevel = pwrlevel;
+ __entry->freq = freq;
+ ),
+
+ TP_printk(
+ "d_name=%s %s pid=%d total=%lld busy=%lld pwrlevel=%d freq=%d",
+ __get_str(device_name),
+ __entry->on ? "on" : "off",
+ __entry->pid,
+ __entry->total_time,
+ __entry->busy_time,
+ __entry->pwrlevel,
+ __entry->freq
+ )
+);
+#endif
+
+TRACE_EVENT(kgsl_pwrlevel,
+
+ TP_PROTO(struct kgsl_device *device, unsigned int pwrlevel,
+ unsigned int freq),
+
+ TP_ARGS(device, pwrlevel, freq),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, pwrlevel)
+ __field(unsigned int, freq)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->pwrlevel = pwrlevel;
+ __entry->freq = freq;
+ ),
+
+ TP_printk(
+ "d_name=%s pwrlevel=%d freq=%d",
+ __get_str(device_name),
+ __entry->pwrlevel,
+ __entry->freq
+ )
+);
+
+DECLARE_EVENT_CLASS(kgsl_pwrstate_template,
+ TP_PROTO(struct kgsl_device *device, unsigned int state),
+
+ TP_ARGS(device, state),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, state)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->state = state;
+ ),
+
+ TP_printk(
+ "d_name=%s %s",
+ __get_str(device_name),
+ kgsl_pwrstate_to_str(__entry->state)
+ )
+);
+
+DEFINE_EVENT(kgsl_pwrstate_template, kgsl_pwr_set_state,
+ TP_PROTO(struct kgsl_device *device, unsigned int state),
+ TP_ARGS(device, state)
+);
+
+DEFINE_EVENT(kgsl_pwrstate_template, kgsl_pwr_request_state,
+ TP_PROTO(struct kgsl_device *device, unsigned int state),
+ TP_ARGS(device, state)
+);
+
+TRACE_EVENT(kgsl_mem_alloc,
+
+ TP_PROTO(struct kgsl_mem_entry *mem_entry),
+
+ TP_ARGS(mem_entry),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, gpuaddr)
+ __field(unsigned int, size)
+ ),
+
+ TP_fast_assign(
+ __entry->gpuaddr = mem_entry->memdesc.gpuaddr;
+ __entry->size = mem_entry->memdesc.size;
+ ),
+
+ TP_printk(
+ "gpuaddr=0x%08x size=%d",
+ __entry->gpuaddr, __entry->size
+ )
+);
+
+TRACE_EVENT(kgsl_mem_map,
+
+ TP_PROTO(struct kgsl_mem_entry *mem_entry, int fd),
+
+ TP_ARGS(mem_entry, fd),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, gpuaddr)
+ __field(unsigned int, size)
+ __field(int, fd)
+ __field(int, type)
+ ),
+
+ TP_fast_assign(
+ __entry->gpuaddr = mem_entry->memdesc.gpuaddr;
+ __entry->size = mem_entry->memdesc.size;
+ __entry->fd = fd;
+ __entry->type = mem_entry->memtype;
+ ),
+
+ TP_printk(
+ "gpuaddr=0x%08x size=%d type=%d fd=%d",
+ __entry->gpuaddr, __entry->size,
+ __entry->type, __entry->fd
+ )
+);
+
+TRACE_EVENT(kgsl_mem_free,
+
+ TP_PROTO(struct kgsl_mem_entry *mem_entry),
+
+ TP_ARGS(mem_entry),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, gpuaddr)
+ __field(unsigned int, size)
+ __field(int, type)
+ __field(int, fd)
+ ),
+
+ TP_fast_assign(
+ __entry->gpuaddr = mem_entry->memdesc.gpuaddr;
+ __entry->size = mem_entry->memdesc.size;
+ __entry->type = mem_entry->memtype;
+ ),
+
+ TP_printk(
+ "gpuaddr=0x%08x size=%d type=%d",
+ __entry->gpuaddr, __entry->size, __entry->type
+ )
+);
+
+DECLARE_EVENT_CLASS(kgsl_mem_timestamp_template,
+
+ TP_PROTO(struct kgsl_device *device, struct kgsl_mem_entry *mem_entry,
+ unsigned int id, unsigned int curr_ts, unsigned int free_ts),
+
+ TP_ARGS(device, mem_entry, id, curr_ts, free_ts),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, gpuaddr)
+ __field(unsigned int, size)
+ __field(int, type)
+ __field(unsigned int, drawctxt_id)
+ __field(unsigned int, curr_ts)
+ __field(unsigned int, free_ts)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->gpuaddr = mem_entry->memdesc.gpuaddr;
+ __entry->size = mem_entry->memdesc.size;
+ __entry->drawctxt_id = id;
+ __entry->type = mem_entry->memtype;
+ __entry->curr_ts = curr_ts;
+ __entry->free_ts = free_ts;
+ ),
+
+ TP_printk(
+ "d_name=%s gpuaddr=0x%08x size=%d type=%d ctx=%u"
+ " curr_ts=0x%x free_ts=0x%x",
+ __get_str(device_name),
+ __entry->gpuaddr,
+ __entry->size,
+ __entry->type,
+ __entry->drawctxt_id,
+ __entry->curr_ts,
+ __entry->free_ts
+ )
+);
+
+DEFINE_EVENT(kgsl_mem_timestamp_template, kgsl_mem_timestamp_queue,
+ TP_PROTO(struct kgsl_device *device, struct kgsl_mem_entry *mem_entry,
+ unsigned int id, unsigned int curr_ts, unsigned int free_ts),
+ TP_ARGS(device, mem_entry, id, curr_ts, free_ts)
+);
+
+DEFINE_EVENT(kgsl_mem_timestamp_template, kgsl_mem_timestamp_free,
+ TP_PROTO(struct kgsl_device *device, struct kgsl_mem_entry *mem_entry,
+ unsigned int id, unsigned int curr_ts, unsigned int free_ts),
+ TP_ARGS(device, mem_entry, id, curr_ts, free_ts)
+);
+
+TRACE_EVENT(kgsl_context_create,
+
+ TP_PROTO(struct kgsl_device *device, struct kgsl_context *context,
+ unsigned int flags),
+
+ TP_ARGS(device, context, flags),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, id)
+ __field(unsigned int, flags)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->id = context->id;
+ __entry->flags = flags;
+ ),
+
+ TP_printk(
+ "d_name=%s ctx=%u flags=0x%x %s",
+ __get_str(device_name), __entry->id, __entry->flags,
+ __entry->flags ? __print_flags(__entry->flags, "|",
+ { KGSL_CONTEXT_NO_GMEM_ALLOC , "NO_GMEM_ALLOC" },
+ { KGSL_CONTEXT_PREAMBLE, "PREAMBLE" },
+ { KGSL_CONTEXT_TRASH_STATE, "TRASH_STATE" },
+ { KGSL_CONTEXT_PER_CONTEXT_TS, "PER_CONTEXT_TS" })
+ : "None"
+ )
+);
+
+TRACE_EVENT(kgsl_context_detach,
+
+ TP_PROTO(struct kgsl_device *device, struct kgsl_context *context),
+
+ TP_ARGS(device, context),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, id)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->id = context->id;
+ ),
+
+ TP_printk(
+ "d_name=%s ctx=%u",
+ __get_str(device_name), __entry->id
+ )
+);
+
+TRACE_EVENT(kgsl_mmu_pagefault,
+
+ TP_PROTO(struct kgsl_device *device, unsigned int page,
+ unsigned int pt, const char *op),
+
+ TP_ARGS(device, page, pt, op),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, page)
+ __field(unsigned int, pt)
+ __string(op, op)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->page = page;
+ __entry->pt = pt;
+ __assign_str(op, op);
+ ),
+
+ TP_printk(
+ "d_name=%s page=0x%08x pt=%d op=%s\n",
+ __get_str(device_name), __entry->page, __entry->pt,
+ __get_str(op)
+ )
+);
+
+#endif
+
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c
new file mode 100644
index 0000000..fd03d5e
--- /dev/null
+++ b/drivers/gpu/msm/z180.c
@@ -0,0 +1,943 @@
+/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+
+#include "kgsl.h"
+#include "kgsl_cffdump.h"
+#include "kgsl_sharedmem.h"
+
+#include "z180.h"
+#include "z180_reg.h"
+#include "z180_trace.h"
+
+#define DRIVER_VERSION_MAJOR 3
+#define DRIVER_VERSION_MINOR 1
+
+#define Z180_DEVICE(device) \
+ KGSL_CONTAINER_OF(device, struct z180_device, dev)
+
+#define GSL_VGC_INT_MASK \
+ (REG_VGC_IRQSTATUS__MH_MASK | \
+ REG_VGC_IRQSTATUS__G2D_MASK | \
+ REG_VGC_IRQSTATUS__FIFO_MASK)
+
+#define VGV3_NEXTCMD_JUMP 0x01
+
+#define VGV3_NEXTCMD_NEXTCMD_FSHIFT 12
+#define VGV3_NEXTCMD_NEXTCMD_FMASK 0x7
+
+#define VGV3_CONTROL_MARKADD_FSHIFT 0
+#define VGV3_CONTROL_MARKADD_FMASK 0xfff
+
+#define Z180_PACKET_SIZE 15
+#define Z180_MARKER_SIZE 10
+#define Z180_CALL_CMD 0x1000
+#define Z180_MARKER_CMD 0x8000
+#define Z180_STREAM_END_CMD 0x9000
+#define Z180_STREAM_PACKET 0x7C000176
+#define Z180_STREAM_PACKET_CALL 0x7C000275
+#define Z180_PACKET_COUNT 8
+#define Z180_RB_SIZE (Z180_PACKET_SIZE*Z180_PACKET_COUNT \
+ *sizeof(uint32_t))
+
+#define NUMTEXUNITS 4
+#define TEXUNITREGCOUNT 25
+#define VG_REGCOUNT 0x39
+
+#define PACKETSIZE_BEGIN 3
+#define PACKETSIZE_G2DCOLOR 2
+#define PACKETSIZE_TEXUNIT (TEXUNITREGCOUNT * 2)
+#define PACKETSIZE_REG (VG_REGCOUNT * 2)
+#define PACKETSIZE_STATE (PACKETSIZE_TEXUNIT * NUMTEXUNITS + \
+ PACKETSIZE_REG + PACKETSIZE_BEGIN + \
+ PACKETSIZE_G2DCOLOR)
+#define PACKETSIZE_STATESTREAM (ALIGN((PACKETSIZE_STATE * \
+ sizeof(unsigned int)), 32) / \
+ sizeof(unsigned int))
+
+#define Z180_INVALID_CONTEXT UINT_MAX
+
+#define Z180_CFG_MHARB \
+ (0x10 \
+ | (0 << MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT) \
+ | (0 << MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT) \
+ | (0 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT) \
+ | (0x8 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT) \
+ | (1 << MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT))
+
+#define Z180_TIMESTAMP_EPSILON 20000
+#define Z180_IDLE_COUNT_MAX 1000000
+
+enum z180_cmdwindow_type {
+ Z180_CMDWINDOW_2D = 0x00000000,
+ Z180_CMDWINDOW_MMU = 0x00000002,
+};
+
+#define Z180_CMDWINDOW_TARGET_MASK 0x000000FF
+#define Z180_CMDWINDOW_ADDR_MASK 0x00FFFF00
+#define Z180_CMDWINDOW_TARGET_SHIFT 0
+#define Z180_CMDWINDOW_ADDR_SHIFT 8
+
+static int z180_start(struct kgsl_device *device, unsigned int init_ram);
+static int z180_stop(struct kgsl_device *device);
+static int z180_wait(struct kgsl_device *device,
+ struct kgsl_context *context,
+ unsigned int timestamp,
+ unsigned int msecs);
+static void z180_regread(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int *value);
+static void z180_regwrite(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int value);
+static void z180_cmdwindow_write(struct kgsl_device *device,
+ unsigned int addr,
+ unsigned int data);
+
+#define Z180_MMU_CONFIG \
+ (0x01 \
+ | (MMU_CONFIG << MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT) \
+ | (MMU_CONFIG << MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT))
+
+static const struct kgsl_functable z180_functable;
+
+static struct z180_device device_2d0 = {
+ .dev = {
+ KGSL_DEVICE_COMMON_INIT(device_2d0.dev),
+ .name = DEVICE_2D0_NAME,
+ .id = KGSL_DEVICE_2D0,
+ .mh = {
+ .mharb = Z180_CFG_MHARB,
+ .mh_intf_cfg1 = 0x00032f07,
+ .mh_intf_cfg2 = 0x004b274f,
+ .mpu_base = 0x00000000,
+ .mpu_range = 0xFFFFF000,
+ },
+ .mmu = {
+ .config = Z180_MMU_CONFIG,
+ },
+ .pwrctrl = {
+ .irq_name = KGSL_2D0_IRQ,
+ },
+ .iomemname = KGSL_2D0_REG_MEMORY,
+ .ftbl = &z180_functable,
+ },
+ .cmdwin_lock = __SPIN_LOCK_INITIALIZER(device_2d1.cmdwin_lock),
+};
+
+static struct z180_device device_2d1 = {
+ .dev = {
+ KGSL_DEVICE_COMMON_INIT(device_2d1.dev),
+ .name = DEVICE_2D1_NAME,
+ .id = KGSL_DEVICE_2D1,
+ .mh = {
+ .mharb = Z180_CFG_MHARB,
+ .mh_intf_cfg1 = 0x00032f07,
+ .mh_intf_cfg2 = 0x004b274f,
+ .mpu_base = 0x00000000,
+ .mpu_range = 0xFFFFF000,
+ },
+ .mmu = {
+ .config = Z180_MMU_CONFIG,
+ },
+ .pwrctrl = {
+ .irq_name = KGSL_2D1_IRQ,
+ },
+ .iomemname = KGSL_2D1_REG_MEMORY,
+ .ftbl = &z180_functable,
+ },
+ .cmdwin_lock = __SPIN_LOCK_INITIALIZER(device_2d1.cmdwin_lock),
+};
+
+static irqreturn_t z180_irq_handler(struct kgsl_device *device)
+{
+ irqreturn_t result = IRQ_NONE;
+ unsigned int status;
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+
+ z180_regread(device, ADDR_VGC_IRQSTATUS >> 2, &status);
+
+ trace_kgsl_z180_irq_status(device, status);
+
+ if (status & GSL_VGC_INT_MASK) {
+ z180_regwrite(device,
+ ADDR_VGC_IRQSTATUS >> 2, status & GSL_VGC_INT_MASK);
+
+ result = IRQ_HANDLED;
+
+ if (status & REG_VGC_IRQSTATUS__FIFO_MASK)
+ KGSL_DRV_ERR(device, "z180 fifo interrupt\n");
+ if (status & REG_VGC_IRQSTATUS__MH_MASK)
+ kgsl_mh_intrcallback(device);
+ if (status & REG_VGC_IRQSTATUS__G2D_MASK) {
+ int count;
+
+ z180_regread(device,
+ ADDR_VGC_IRQ_ACTIVE_CNT >> 2,
+ &count);
+
+ count >>= 8;
+ count &= 255;
+ z180_dev->timestamp += count;
+
+ queue_work(device->work_queue, &device->ts_expired_ws);
+ wake_up_interruptible(&device->wait_queue);
+
+ atomic_notifier_call_chain(
+ &(device->ts_notifier_list),
+ device->id, NULL);
+ }
+ }
+
+ if ((device->pwrctrl.nap_allowed == true) &&
+ (device->requested_state == KGSL_STATE_NONE)) {
+ kgsl_pwrctrl_request_state(device, KGSL_STATE_NAP);
+ queue_work(device->work_queue, &device->idle_check_ws);
+ }
+ mod_timer_pending(&device->idle_timer,
+ jiffies + device->pwrctrl.interval_timeout);
+
+ return result;
+}
+
+static void z180_cleanup_pt(struct kgsl_device *device,
+ struct kgsl_pagetable *pagetable)
+{
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+
+ kgsl_mmu_unmap(pagetable, &device->mmu.setstate_memory);
+
+ kgsl_mmu_unmap(pagetable, &device->memstore);
+
+ kgsl_mmu_unmap(pagetable, &z180_dev->ringbuffer.cmdbufdesc);
+}
+
+static int z180_setup_pt(struct kgsl_device *device,
+ struct kgsl_pagetable *pagetable)
+{
+ int result = 0;
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+
+ result = kgsl_mmu_map_global(pagetable, &device->mmu.setstate_memory,
+ GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+
+ if (result)
+ goto error;
+
+ result = kgsl_mmu_map_global(pagetable, &device->memstore,
+ GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+ if (result)
+ goto error_unmap_dummy;
+
+ result = kgsl_mmu_map_global(pagetable,
+ &z180_dev->ringbuffer.cmdbufdesc,
+ GSL_PT_PAGE_RV);
+ if (result)
+ goto error_unmap_memstore;
+ return result;
+
+error_unmap_dummy:
+ kgsl_mmu_unmap(pagetable, &device->mmu.setstate_memory);
+
+error_unmap_memstore:
+ kgsl_mmu_unmap(pagetable, &device->memstore);
+
+error:
+ return result;
+}
+
+static inline unsigned int rb_offset(unsigned int timestamp)
+{
+ return (timestamp % Z180_PACKET_COUNT)
+ *sizeof(unsigned int)*(Z180_PACKET_SIZE);
+}
+
+static inline unsigned int rb_gpuaddr(struct z180_device *z180_dev,
+ unsigned int timestamp)
+{
+ return z180_dev->ringbuffer.cmdbufdesc.gpuaddr + rb_offset(timestamp);
+}
+
+static void addmarker(struct z180_ringbuffer *rb, unsigned int timestamp)
+{
+ char *ptr = (char *)(rb->cmdbufdesc.hostptr);
+ unsigned int *p = (unsigned int *)(ptr + rb_offset(timestamp));
+
+ *p++ = Z180_STREAM_PACKET;
+ *p++ = (Z180_MARKER_CMD | 5);
+ *p++ = ADDR_VGV3_LAST << 24;
+ *p++ = ADDR_VGV3_LAST << 24;
+ *p++ = ADDR_VGV3_LAST << 24;
+ *p++ = Z180_STREAM_PACKET;
+ *p++ = 5;
+ *p++ = ADDR_VGV3_LAST << 24;
+ *p++ = ADDR_VGV3_LAST << 24;
+ *p++ = ADDR_VGV3_LAST << 24;
+}
+
+static void addcmd(struct z180_ringbuffer *rb, unsigned int timestamp,
+ unsigned int cmd, unsigned int nextcnt)
+{
+ char * ptr = (char *)(rb->cmdbufdesc.hostptr);
+ unsigned int *p = (unsigned int *)(ptr + (rb_offset(timestamp)
+ + (Z180_MARKER_SIZE * sizeof(unsigned int))));
+
+ *p++ = Z180_STREAM_PACKET_CALL;
+ *p++ = cmd;
+ *p++ = Z180_CALL_CMD | nextcnt;
+ *p++ = ADDR_VGV3_LAST << 24;
+ *p++ = ADDR_VGV3_LAST << 24;
+}
+
+static void z180_cmdstream_start(struct kgsl_device *device, int init_ram)
+{
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+ unsigned int cmd = VGV3_NEXTCMD_JUMP << VGV3_NEXTCMD_NEXTCMD_FSHIFT;
+
+ if (init_ram) {
+ z180_dev->timestamp = 0;
+ z180_dev->current_timestamp = 0;
+ }
+
+ addmarker(&z180_dev->ringbuffer, 0);
+
+ z180_cmdwindow_write(device, ADDR_VGV3_MODE, 4);
+
+ z180_cmdwindow_write(device, ADDR_VGV3_NEXTADDR,
+ rb_gpuaddr(z180_dev, z180_dev->current_timestamp));
+
+ z180_cmdwindow_write(device, ADDR_VGV3_NEXTCMD, cmd | 5);
+
+ z180_cmdwindow_write(device, ADDR_VGV3_WRITEADDR,
+ device->memstore.gpuaddr);
+
+ cmd = (int)(((1) & VGV3_CONTROL_MARKADD_FMASK)
+ << VGV3_CONTROL_MARKADD_FSHIFT);
+
+ z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, cmd);
+
+ z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, 0);
+}
+
+static int room_in_rb(struct z180_device *device)
+{
+ int ts_diff;
+
+ ts_diff = device->current_timestamp - device->timestamp;
+
+ return ts_diff < Z180_PACKET_COUNT;
+}
+
+static int z180_idle(struct kgsl_device *device)
+{
+ int status = 0;
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+
+ if (timestamp_cmp(z180_dev->current_timestamp,
+ z180_dev->timestamp) > 0)
+ status = z180_wait(device, NULL,
+ z180_dev->current_timestamp,
+ Z180_IDLE_TIMEOUT);
+
+ if (status)
+ KGSL_DRV_ERR(device, "z180_waittimestamp() timed out\n");
+
+ return status;
+}
+
+int
+z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context,
+ struct kgsl_ibdesc *ibdesc,
+ unsigned int numibs,
+ uint32_t *timestamp,
+ unsigned int ctrl)
+{
+ long result = 0;
+ unsigned int ofs = PACKETSIZE_STATESTREAM * sizeof(unsigned int);
+ unsigned int cnt = 5;
+ unsigned int old_timestamp = 0;
+ unsigned int nextcnt = Z180_STREAM_END_CMD | 5;
+ struct kgsl_mem_entry *entry = NULL;
+ unsigned int cmd;
+ struct kgsl_device *device = dev_priv->device;
+ struct kgsl_pagetable *pagetable = dev_priv->process_priv->pagetable;
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+ unsigned int sizedwords;
+
+ if (device->state & KGSL_STATE_HUNG) {
+ result = -EINVAL;
+ goto error;
+ }
+ if (numibs != 1) {
+ KGSL_DRV_ERR(device, "Invalid number of ibs: %d\n", numibs);
+ result = -EINVAL;
+ goto error;
+ }
+ cmd = ibdesc[0].gpuaddr;
+ sizedwords = ibdesc[0].sizedwords;
+ entry = kgsl_sharedmem_find_region(dev_priv->process_priv, cmd,
+ sizedwords);
+ if (entry == NULL) {
+ KGSL_DRV_ERR(device, "Bad ibdesc: gpuaddr 0x%x size %d\n",
+ cmd, sizedwords);
+ result = -EINVAL;
+ goto error;
+ }
+ if (kgsl_gpuaddr_to_vaddr(&entry->memdesc, cmd) == NULL) {
+ KGSL_DRV_ERR(device,
+ "Cannot make kernel mapping for gpuaddr 0x%x\n",
+ cmd);
+ result = -EINVAL;
+ goto error;
+ }
+
+ KGSL_CMD_INFO(device, "ctxt %d ibaddr 0x%08x sizedwords %d\n",
+ context->id, cmd, sizedwords);
+
+ if ((context->id != (int)z180_dev->ringbuffer.prevctx) ||
+ (ctrl & KGSL_CONTEXT_CTX_SWITCH)) {
+ KGSL_CMD_INFO(device, "context switch %d -> %d\n",
+ context->id, z180_dev->ringbuffer.prevctx);
+ kgsl_mmu_setstate(&device->mmu, pagetable,
+ KGSL_MEMSTORE_GLOBAL);
+ cnt = PACKETSIZE_STATESTREAM;
+ ofs = 0;
+ }
+ kgsl_setstate(&device->mmu,
+ KGSL_MEMSTORE_GLOBAL,
+ kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
+ device->id));
+
+ result = wait_event_interruptible_timeout(device->wait_queue,
+ room_in_rb(z180_dev),
+ msecs_to_jiffies(KGSL_TIMEOUT_DEFAULT));
+ if (result < 0) {
+ KGSL_CMD_ERR(device, "wait_event_interruptible_timeout "
+ "failed: %ld\n", result);
+ goto error;
+ }
+ result = 0;
+
+ old_timestamp = z180_dev->current_timestamp;
+ z180_dev->current_timestamp++;
+ *timestamp = z180_dev->current_timestamp;
+
+ z180_dev->ringbuffer.prevctx = context->id;
+
+ addcmd(&z180_dev->ringbuffer, old_timestamp, cmd + ofs, cnt);
+ kgsl_pwrscale_busy(device);
+
+
+ addmarker(&z180_dev->ringbuffer, z180_dev->current_timestamp);
+
+
+ kgsl_sharedmem_writel(&entry->memdesc,
+ ((sizedwords + 1) * sizeof(unsigned int)),
+ rb_gpuaddr(z180_dev, z180_dev->current_timestamp));
+ kgsl_sharedmem_writel(&entry->memdesc,
+ ((sizedwords + 2) * sizeof(unsigned int)),
+ nextcnt);
+
+
+ mb();
+
+ cmd = (int)(((2) & VGV3_CONTROL_MARKADD_FMASK)
+ << VGV3_CONTROL_MARKADD_FSHIFT);
+
+ z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, cmd);
+ z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, 0);
+error:
+ return (int)result;
+}
+
+static int z180_ringbuffer_init(struct kgsl_device *device)
+{
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+ memset(&z180_dev->ringbuffer, 0, sizeof(struct z180_ringbuffer));
+ z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT;
+ return kgsl_allocate_contiguous(&z180_dev->ringbuffer.cmdbufdesc,
+ Z180_RB_SIZE);
+}
+
+static void z180_ringbuffer_close(struct kgsl_device *device)
+{
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+ kgsl_sharedmem_free(&z180_dev->ringbuffer.cmdbufdesc);
+ memset(&z180_dev->ringbuffer, 0, sizeof(struct z180_ringbuffer));
+}
+
+static int __devinit z180_probe(struct platform_device *pdev)
+{
+ int status = -EINVAL;
+ struct kgsl_device *device = NULL;
+ struct z180_device *z180_dev;
+
+ device = (struct kgsl_device *)pdev->id_entry->driver_data;
+ device->parentdev = &pdev->dev;
+
+ z180_dev = Z180_DEVICE(device);
+
+ status = z180_ringbuffer_init(device);
+ if (status != 0)
+ goto error;
+
+ status = kgsl_device_platform_probe(device);
+ if (status)
+ goto error_close_ringbuffer;
+
+ kgsl_pwrscale_init(device);
+ kgsl_pwrscale_attach_policy(device, Z180_DEFAULT_PWRSCALE_POLICY);
+
+ return status;
+
+error_close_ringbuffer:
+ z180_ringbuffer_close(device);
+error:
+ device->parentdev = NULL;
+ return status;
+}
+
+static int __devexit z180_remove(struct platform_device *pdev)
+{
+ struct kgsl_device *device = NULL;
+
+ device = (struct kgsl_device *)pdev->id_entry->driver_data;
+
+ kgsl_pwrscale_close(device);
+ kgsl_device_platform_remove(device);
+
+ z180_ringbuffer_close(device);
+
+ return 0;
+}
+
+static int z180_start(struct kgsl_device *device, unsigned int init_ram)
+{
+ int status = 0;
+
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+
+ kgsl_pwrctrl_enable(device);
+
+
+ z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 0x0);
+
+ kgsl_mh_start(device);
+
+ status = kgsl_mmu_start(device);
+ if (status)
+ goto error_clk_off;
+
+ z180_cmdstream_start(device, init_ram);
+
+ mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
+ device->ftbl->irqctrl(device, 1);
+ return 0;
+
+error_clk_off:
+ z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 0);
+ kgsl_pwrctrl_disable(device);
+ return status;
+}
+
+static int z180_stop(struct kgsl_device *device)
+{
+ device->ftbl->irqctrl(device, 0);
+ z180_idle(device);
+
+ del_timer_sync(&device->idle_timer);
+
+ kgsl_mmu_stop(&device->mmu);
+
+
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
+
+ kgsl_pwrctrl_disable(device);
+
+ return 0;
+}
+
+static int z180_getproperty(struct kgsl_device *device,
+ enum kgsl_property_type type,
+ void *value,
+ unsigned int sizebytes)
+{
+ int status = -EINVAL;
+
+ switch (type) {
+ case KGSL_PROP_DEVICE_INFO:
+ {
+ struct kgsl_devinfo devinfo;
+
+ if (sizebytes != sizeof(devinfo)) {
+ status = -EINVAL;
+ break;
+ }
+
+ memset(&devinfo, 0, sizeof(devinfo));
+ devinfo.device_id = device->id+1;
+ devinfo.chip_id = 0;
+ devinfo.mmu_enabled = kgsl_mmu_enabled();
+
+ if (copy_to_user(value, &devinfo, sizeof(devinfo)) !=
+ 0) {
+ status = -EFAULT;
+ break;
+ }
+ status = 0;
+ }
+ break;
+ case KGSL_PROP_MMU_ENABLE:
+ {
+ int mmu_prop = kgsl_mmu_enabled();
+ if (sizebytes != sizeof(int)) {
+ status = -EINVAL;
+ break;
+ }
+ if (copy_to_user(value, &mmu_prop, sizeof(mmu_prop))) {
+ status = -EFAULT;
+ break;
+ }
+ status = 0;
+ }
+ break;
+
+ default:
+ KGSL_DRV_ERR(device, "invalid property: %d\n", type);
+ status = -EINVAL;
+ }
+ return status;
+}
+
+static unsigned int z180_isidle(struct kgsl_device *device)
+{
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+
+ return (timestamp_cmp(z180_dev->timestamp,
+ z180_dev->current_timestamp) == 0) ? true : false;
+}
+
+static int z180_suspend_context(struct kgsl_device *device)
+{
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+
+ z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT;
+
+ return 0;
+}
+
+static void _z180_regread_simple(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int *value)
+{
+ unsigned int *reg;
+
+ BUG_ON(offsetwords * sizeof(uint32_t) >= device->reg_len);
+
+ reg = (unsigned int *)(device->reg_virt + (offsetwords << 2));
+
+ *value = __raw_readl(reg);
+ rmb();
+
+}
+
+static void _z180_regwrite_simple(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int value)
+{
+ unsigned int *reg;
+
+ BUG_ON(offsetwords*sizeof(uint32_t) >= device->reg_len);
+
+ reg = (unsigned int *)(device->reg_virt + (offsetwords << 2));
+ kgsl_cffdump_regwrite(device->id, offsetwords << 2, value);
+ wmb();
+ __raw_writel(value, reg);
+}
+
+
+static void _z180_regread_mmu(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int *value)
+{
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&z180_dev->cmdwin_lock, flags);
+ _z180_regwrite_simple(device, (ADDR_VGC_MH_READ_ADDR >> 2),
+ offsetwords);
+ _z180_regread_simple(device, (ADDR_VGC_MH_DATA_ADDR >> 2), value);
+ spin_unlock_irqrestore(&z180_dev->cmdwin_lock, flags);
+}
+
+
+static void _z180_regwrite_mmu(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int value)
+{
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+ unsigned int cmdwinaddr;
+ unsigned long flags;
+
+ cmdwinaddr = ((Z180_CMDWINDOW_MMU << Z180_CMDWINDOW_TARGET_SHIFT) &
+ Z180_CMDWINDOW_TARGET_MASK);
+ cmdwinaddr |= ((offsetwords << Z180_CMDWINDOW_ADDR_SHIFT) &
+ Z180_CMDWINDOW_ADDR_MASK);
+
+ spin_lock_irqsave(&z180_dev->cmdwin_lock, flags);
+ _z180_regwrite_simple(device, ADDR_VGC_MMUCOMMANDSTREAM >> 2,
+ cmdwinaddr);
+ _z180_regwrite_simple(device, ADDR_VGC_MMUCOMMANDSTREAM >> 2, value);
+ spin_unlock_irqrestore(&z180_dev->cmdwin_lock, flags);
+}
+
+static void z180_regread(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int *value)
+{
+ if (!in_interrupt())
+ kgsl_pre_hwaccess(device);
+
+ if ((offsetwords >= MH_ARBITER_CONFIG &&
+ offsetwords <= MH_AXI_HALT_CONTROL) ||
+ (offsetwords >= MH_MMU_CONFIG &&
+ offsetwords <= MH_MMU_MPU_END)) {
+ _z180_regread_mmu(device, offsetwords, value);
+ } else {
+ _z180_regread_simple(device, offsetwords, value);
+ }
+}
+
+static void z180_regwrite(struct kgsl_device *device,
+ unsigned int offsetwords,
+ unsigned int value)
+{
+ if (!in_interrupt())
+ kgsl_pre_hwaccess(device);
+
+ if ((offsetwords >= MH_ARBITER_CONFIG &&
+ offsetwords <= MH_CLNT_INTF_CTRL_CONFIG2) ||
+ (offsetwords >= MH_MMU_CONFIG &&
+ offsetwords <= MH_MMU_MPU_END)) {
+ _z180_regwrite_mmu(device, offsetwords, value);
+ } else {
+ _z180_regwrite_simple(device, offsetwords, value);
+ }
+}
+
+static void z180_cmdwindow_write(struct kgsl_device *device,
+ unsigned int addr, unsigned int data)
+{
+ unsigned int cmdwinaddr;
+
+ cmdwinaddr = ((Z180_CMDWINDOW_2D << Z180_CMDWINDOW_TARGET_SHIFT) &
+ Z180_CMDWINDOW_TARGET_MASK);
+ cmdwinaddr |= ((addr << Z180_CMDWINDOW_ADDR_SHIFT) &
+ Z180_CMDWINDOW_ADDR_MASK);
+
+ z180_regwrite(device, ADDR_VGC_COMMANDSTREAM >> 2, cmdwinaddr);
+ z180_regwrite(device, ADDR_VGC_COMMANDSTREAM >> 2, data);
+}
+
+static unsigned int z180_readtimestamp(struct kgsl_device *device,
+ struct kgsl_context *context, enum kgsl_timestamp_type type)
+{
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+ (void)context;
+
+ return z180_dev->timestamp;
+}
+
+static int z180_waittimestamp(struct kgsl_device *device,
+ struct kgsl_context *context,
+ unsigned int timestamp,
+ unsigned int msecs)
+{
+ int status = -EINVAL;
+
+
+ if (msecs == -1)
+ msecs = 10 * MSEC_PER_SEC;
+
+ mutex_unlock(&device->mutex);
+ status = z180_wait(device, context, timestamp, msecs);
+ mutex_lock(&device->mutex);
+
+ return status;
+}
+
+static int z180_wait(struct kgsl_device *device,
+ struct kgsl_context *context,
+ unsigned int timestamp,
+ unsigned int msecs)
+{
+ int status = -EINVAL;
+ long timeout = 0;
+
+ timeout = wait_io_event_interruptible_timeout(
+ device->wait_queue,
+ kgsl_check_timestamp(device, context, timestamp),
+ msecs_to_jiffies(msecs));
+
+ if (timeout > 0)
+ status = 0;
+ else if (timeout == 0) {
+ status = -ETIMEDOUT;
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_HUNG);
+ } else
+ status = timeout;
+
+ return status;
+}
+
+static void
+z180_drawctxt_destroy(struct kgsl_device *device,
+ struct kgsl_context *context)
+{
+ struct z180_device *z180_dev = Z180_DEVICE(device);
+
+ z180_idle(device);
+
+ if (z180_dev->ringbuffer.prevctx == context->id) {
+ z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT;
+ device->mmu.hwpagetable = device->mmu.defaultpagetable;
+ kgsl_setstate(&device->mmu, KGSL_MEMSTORE_GLOBAL,
+ KGSL_MMUFLAGS_PTUPDATE);
+ }
+}
+
+static void z180_power_stats(struct kgsl_device *device,
+ struct kgsl_power_stats *stats)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ s64 tmp = ktime_to_us(ktime_get());
+
+ if (pwr->time == 0) {
+ pwr->time = tmp;
+ stats->total_time = 0;
+ stats->busy_time = 0;
+ } else {
+ stats->total_time = tmp - pwr->time;
+ pwr->time = tmp;
+ stats->busy_time = tmp - device->on_time;
+ device->on_time = tmp;
+ }
+}
+
+static void z180_irqctrl(struct kgsl_device *device, int state)
+{
+
+
+ if (state) {
+ z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 3);
+ z180_regwrite(device, MH_INTERRUPT_MASK,
+ kgsl_mmu_get_int_mask());
+ } else {
+ z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 0);
+ z180_regwrite(device, MH_INTERRUPT_MASK, 0);
+ }
+}
+
+static unsigned int z180_gpuid(struct kgsl_device *device, unsigned int *chipid)
+{
+ if (chipid != NULL)
+ *chipid = 0;
+
+
+ return (0x0002 << 16) | 180;
+}
+
+static const struct kgsl_functable z180_functable = {
+
+ .regread = z180_regread,
+ .regwrite = z180_regwrite,
+ .idle = z180_idle,
+ .isidle = z180_isidle,
+ .suspend_context = z180_suspend_context,
+ .start = z180_start,
+ .stop = z180_stop,
+ .getproperty = z180_getproperty,
+ .waittimestamp = z180_waittimestamp,
+ .readtimestamp = z180_readtimestamp,
+ .issueibcmds = z180_cmdstream_issueibcmds,
+ .setup_pt = z180_setup_pt,
+ .cleanup_pt = z180_cleanup_pt,
+ .power_stats = z180_power_stats,
+ .irqctrl = z180_irqctrl,
+ .gpuid = z180_gpuid,
+ .irq_handler = z180_irq_handler,
+
+ .drawctxt_create = NULL,
+ .drawctxt_destroy = z180_drawctxt_destroy,
+ .ioctl = NULL,
+};
+
+static struct platform_device_id z180_id_table[] = {
+ { DEVICE_2D0_NAME, (kernel_ulong_t)&device_2d0.dev, },
+ { DEVICE_2D1_NAME, (kernel_ulong_t)&device_2d1.dev, },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, z180_id_table);
+
+static struct platform_driver z180_platform_driver = {
+ .probe = z180_probe,
+ .remove = __devexit_p(z180_remove),
+ .suspend = kgsl_suspend_driver,
+ .resume = kgsl_resume_driver,
+ .id_table = z180_id_table,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DEVICE_2D_NAME,
+ .pm = &kgsl_pm_ops,
+ }
+};
+
+static int __init kgsl_2d_init(void)
+{
+ return platform_driver_register(&z180_platform_driver);
+}
+
+static void __exit kgsl_2d_exit(void)
+{
+ platform_driver_unregister(&z180_platform_driver);
+}
+
+module_init(kgsl_2d_init);
+module_exit(kgsl_2d_exit);
+
+MODULE_DESCRIPTION("2D Graphics driver");
+MODULE_VERSION("1.2");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kgsl_2d");
diff --git a/drivers/gpu/msm/z180.h b/drivers/gpu/msm/z180.h
new file mode 100644
index 0000000..7f4ab7f
--- /dev/null
+++ b/drivers/gpu/msm/z180.h
@@ -0,0 +1,39 @@
+/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __Z180_H
+#define __Z180_H
+
+#include "kgsl_device.h"
+
+#define DEVICE_2D_NAME "kgsl-2d"
+#define DEVICE_2D0_NAME "kgsl-2d0"
+#define DEVICE_2D1_NAME "kgsl-2d1"
+
+#define Z180_DEFAULT_PWRSCALE_POLICY NULL
+
+#define Z180_IDLE_TIMEOUT (10 * 1000)
+
+struct z180_ringbuffer {
+ unsigned int prevctx;
+ struct kgsl_memdesc cmdbufdesc;
+};
+
+struct z180_device {
+ struct kgsl_device dev;
+ int current_timestamp;
+ int timestamp;
+ struct z180_ringbuffer ringbuffer;
+ spinlock_t cmdwin_lock;
+};
+
+#endif
diff --git a/drivers/gpu/msm/z180_reg.h b/drivers/gpu/msm/z180_reg.h
new file mode 100644
index 0000000..382d0c5
--- /dev/null
+++ b/drivers/gpu/msm/z180_reg.h
@@ -0,0 +1,49 @@
+/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __Z80_REG_H
+#define __Z80_REG_H
+
+#define REG_VGC_IRQSTATUS__MH_MASK 0x00000001L
+#define REG_VGC_IRQSTATUS__G2D_MASK 0x00000002L
+#define REG_VGC_IRQSTATUS__FIFO_MASK 0x00000004L
+
+#define MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT 0x00000006
+#define MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT 0x00000007
+#define MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT 0x00000008
+#define MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT 0x00000009
+#define MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT 0x0000000a
+#define MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT 0x0000000d
+#define MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT 0x0000000e
+#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT 0x0000000f
+#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT 0x00000010
+#define MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT 0x00000016
+#define MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT 0x00000017
+#define MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT 0x00000018
+#define MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT 0x00000019
+#define MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT 0x0000001a
+
+#define ADDR_VGC_MH_READ_ADDR 0x0510
+#define ADDR_VGC_MH_DATA_ADDR 0x0518
+#define ADDR_VGC_COMMANDSTREAM 0x0000
+#define ADDR_VGC_IRQENABLE 0x0438
+#define ADDR_VGC_IRQSTATUS 0x0418
+#define ADDR_VGC_IRQ_ACTIVE_CNT 0x04E0
+#define ADDR_VGC_MMUCOMMANDSTREAM 0x03FC
+#define ADDR_VGV3_CONTROL 0x0070
+#define ADDR_VGV3_LAST 0x007F
+#define ADDR_VGV3_MODE 0x0071
+#define ADDR_VGV3_NEXTADDR 0x0075
+#define ADDR_VGV3_NEXTCMD 0x0076
+#define ADDR_VGV3_WRITEADDR 0x0072
+
+#endif
diff --git a/drivers/gpu/msm/z180_trace.c b/drivers/gpu/msm/z180_trace.c
new file mode 100644
index 0000000..c5349db
--- /dev/null
+++ b/drivers/gpu/msm/z180_trace.c
@@ -0,0 +1,19 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "kgsl.h"
+#include "z180.h"
+#include "z180_reg.h"
+
+#define CREATE_TRACE_POINTS
+#include "z180_trace.h"
diff --git a/drivers/gpu/msm/z180_trace.h b/drivers/gpu/msm/z180_trace.h
new file mode 100644
index 0000000..3536655
--- /dev/null
+++ b/drivers/gpu/msm/z180_trace.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#if !defined(_Z180_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _Z180_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kgsl
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE z180_trace
+
+#include <linux/tracepoint.h>
+
+struct kgsl_device;
+
+TRACE_EVENT(kgsl_z180_irq_status,
+
+ TP_PROTO(struct kgsl_device *device, unsigned int status),
+
+ TP_ARGS(device, status),
+
+ TP_STRUCT__entry(
+ __string(device_name, device->name)
+ __field(unsigned int, status)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device_name, device->name);
+ __entry->status = status;
+ ),
+
+ TP_printk(
+ "d_name=%s status=%s",
+ __get_str(device_name),
+ __entry->status ? __print_flags(__entry->status, "|",
+ { REG_VGC_IRQSTATUS__MH_MASK, "MH" },
+ { REG_VGC_IRQSTATUS__G2D_MASK, "G2D" },
+ { REG_VGC_IRQSTATUS__FIFO_MASK, "FIFO" }) : "None"
+ )
+);
+
+#endif
+
+#include <trace/define_trace.h>