msm: kgsl: Rename PM4 to CP to better align with A3XX code
The A3XX code likes to use CP_ as a prefix for ringbuffer
commands rather then the legacy PM4 prefix. Since it is more
correct, switch the A2XX code over now to make it easier to
integrate A3XX into the mix.
Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index eb8527a..6638d8c 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -243,23 +243,23 @@
if (adreno_dev->drawctxt_active) {
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
/* wait for graphics pipe to be idle */
- *cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
*cmds++ = 0x00000000;
/* set page table base */
- *cmds++ = pm4_type0_packet(MH_MMU_PT_BASE, 1);
+ *cmds++ = cp_type0_packet(MH_MMU_PT_BASE, 1);
*cmds++ = device->mmu.hwpagetable->base.gpuaddr;
sizedwords += 4;
}
if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
if (!(flags & KGSL_MMUFLAGS_PTUPDATE)) {
- *cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE,
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE,
1);
*cmds++ = 0x00000000;
sizedwords += 2;
}
- *cmds++ = pm4_type0_packet(MH_MMU_INVALIDATE, 1);
+ *cmds++ = cp_type0_packet(MH_MMU_INVALIDATE, 1);
*cmds++ = mh_mmu_invalidate;
sizedwords += 2;
}
@@ -278,20 +278,20 @@
* VGT DMA request fifo and prevent any further
* vertex/bin updates from occurring until the wait
* has finished. */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = (0x4 << 16) |
(REG_PA_SU_SC_MODE_CNTL - 0x2000);
*cmds++ = 0; /* disable faceness generation */
- *cmds++ = pm4_type3_packet(PM4_SET_BIN_BASE_OFFSET, 1);
+ *cmds++ = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1);
*cmds++ = device->mmu.dummyspace.gpuaddr;
- *cmds++ = pm4_type3_packet(PM4_DRAW_INDX_BIN, 6);
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX_BIN, 6);
*cmds++ = 0; /* viz query info */
*cmds++ = 0x0003C004; /* draw indicator */
*cmds++ = 0; /* bin base */
*cmds++ = 3; /* bin size */
*cmds++ = device->mmu.dummyspace.gpuaddr; /* dma base */
*cmds++ = 6; /* dma size */
- *cmds++ = pm4_type3_packet(PM4_DRAW_INDX_BIN, 6);
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX_BIN, 6);
*cmds++ = 0; /* viz query info */
*cmds++ = 0x0003C004; /* draw indicator */
*cmds++ = 0; /* bin base */
@@ -299,13 +299,13 @@
/* dma base */
*cmds++ = device->mmu.dummyspace.gpuaddr;
*cmds++ = 6; /* dma size */
- *cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
*cmds++ = 0x00000000;
sizedwords += 21;
}
if (flags & (KGSL_MMUFLAGS_PTUPDATE | KGSL_MMUFLAGS_TLBFLUSH)) {
- *cmds++ = pm4_type3_packet(PM4_INVALIDATE_STATE, 1);
+ *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
*cmds++ = 0x7fff; /* invalidate all base pointers */
sizedwords += 2;
}
@@ -1013,7 +1013,7 @@
/* submit a dummy packet so that even if all
* commands upto timestamp get executed we will still
* get an interrupt */
- cmds[0] = pm4_type3_packet(PM4_NOP, 1);
+ cmds[0] = cp_type3_packet(CP_NOP, 1);
cmds[1] = 0;
adreno_ringbuffer_issuecmds(device, 0, &cmds[0], 2);
}
diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c
index 3aa601c..eb923df 100644
--- a/drivers/gpu/msm/adreno_a2xx.c
+++ b/drivers/gpu/msm/adreno_a2xx.c
@@ -74,12 +74,8 @@
#define LOOP_CONSTANTS 56 /* DWORDS */
#define SHADER_INSTRUCT_LOG2 9U /* 2^n == SHADER_INSTRUCTIONS */
-#if defined(PM4_IM_STORE)
/* 96-bit instructions */
#define SHADER_INSTRUCT (1<<SHADER_INSTRUCT_LOG2)
-#else
-#define SHADER_INSTRUCT 0
-#endif
/* LOAD_CONSTANT_CONTEXT shadow size */
#define LCC_SHADOW_SIZE 0x2000 /* 8KB */
@@ -111,11 +107,9 @@
uint32_t bool_shadow; /* bool constants */
uint32_t loop_shadow; /* loop constants */
-#if defined(PM4_IM_STORE)
uint32_t shader_shared; /* shared shader instruction shadow */
uint32_t shader_vertex; /* vertex shader instruction shadow */
uint32_t shader_pixel; /* pixel shader instruction shadow */
-#endif
/* Addresses in command buffer where separately handled registers
* are saved
@@ -286,7 +280,7 @@
unsigned int *shader_pgm, int dwords)
{
/* load the patched vertex shader stream */
- *cmds++ = pm4_type3_packet(PM4_IM_LOAD_IMMEDIATE, 2 + dwords);
+ *cmds++ = cp_type3_packet(CP_IM_LOAD_IMMEDIATE, 2 + dwords);
/* 0=vertex shader, 1=fragment shader */
*cmds++ = vtxfrag;
/* instruction start & size (in 32-bit words) */
@@ -302,7 +296,7 @@
uint32_t src, int dwords)
{
while (dwords-- > 0) {
- *cmds++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+ *cmds++ = cp_type3_packet(CP_REG_TO_MEM, 2);
*cmds++ = src++;
*cmds++ = dst;
dst += 4;
@@ -320,7 +314,7 @@
unsigned int i = start;
for (i = start; i <= end; i++) {
- *(*cmd)++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+ *(*cmd)++ = cp_type3_packet(CP_REG_TO_MEM, 2);
*(*cmd)++ = i;
*(*cmd)++ =
((drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000) +
@@ -337,10 +331,10 @@
unsigned int *start = tmp_ctx.cmd;
unsigned int *cmds = start;
- *cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
*cmds++ = 0;
- *cmds++ = pm4_type0_packet(REG_TP0_CHICKEN, 1);
+ *cmds++ = cp_type0_packet(REG_TP0_CHICKEN, 1);
tmp_ctx.chicken_restore = virt2gpu(cmds, &drawctxt->gpustate);
*cmds++ = 0x00000000;
@@ -403,13 +397,13 @@
unsigned int *start = tmp_ctx.cmd;
unsigned int *cmd = start;
- *cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
*cmd++ = 0;
#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
/* Make sure the HW context has the correct register values
* before reading them. */
- *cmd++ = pm4_type3_packet(PM4_CONTEXT_UPDATE, 1);
+ *cmd++ = cp_type3_packet(CP_CONTEXT_UPDATE, 1);
*cmd++ = 0;
{
@@ -453,13 +447,13 @@
* register. There is logic in the HW that blocks reading of this
* register when the SQ block is not idle, which we believe is
* contributing to the hang.*/
- *cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
*cmd++ = 0;
/* H/w registers are already shadowed; just need to disable shadowing
* to prevent corruption.
*/
- *cmd++ = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
*cmd++ = (drawctxt->gpustate.gpuaddr + REG_OFFSET) & 0xFFFFE000;
*cmd++ = 4 << 16; /* regs, start=0 */
*cmd++ = 0x0; /* count = 0 */
@@ -467,7 +461,7 @@
/* ALU constants are already shadowed; just need to disable shadowing
* to prevent corruption.
*/
- *cmd++ = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
*cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000;
*cmd++ = 0 << 16; /* ALU, start=0 */
*cmd++ = 0x0; /* count = 0 */
@@ -475,18 +469,18 @@
/* Tex constants are already shadowed; just need to disable shadowing
* to prevent corruption.
*/
- *cmd++ = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
*cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000;
*cmd++ = 1 << 16; /* Tex, start=0 */
*cmd++ = 0x0; /* count = 0 */
#endif
/* Need to handle some of the registers separately */
- *cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
*cmd++ = REG_SQ_GPR_MANAGEMENT;
*cmd++ = tmp_ctx.reg_values[0];
- *cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
*cmd++ = REG_TP0_CHICKEN;
*cmd++ = tmp_ctx.reg_values[1];
@@ -495,7 +489,7 @@
unsigned int j = 2;
for (i = REG_LEIA_VSC_BIN_SIZE; i <=
REG_LEIA_VSC_PIPE_DATA_LENGTH_7; i++) {
- *cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
*cmd++ = i;
*cmd++ = tmp_ctx.reg_values[j];
j++;
@@ -529,26 +523,26 @@
unsigned int offset = (addr - (addr & 0xfffff000)) / bytesperpixel;
/* Store TP0_CHICKEN register */
- *cmds++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+ *cmds++ = cp_type3_packet(CP_REG_TO_MEM, 2);
*cmds++ = REG_TP0_CHICKEN;
*cmds++ = tmp_ctx.chicken_restore;
- *cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
*cmds++ = 0;
/* Set TP0_CHICKEN to zero */
- *cmds++ = pm4_type0_packet(REG_TP0_CHICKEN, 1);
+ *cmds++ = cp_type0_packet(REG_TP0_CHICKEN, 1);
*cmds++ = 0x00000000;
/* Set PA_SC_AA_CONFIG to 0 */
- *cmds++ = pm4_type0_packet(REG_PA_SC_AA_CONFIG, 1);
+ *cmds++ = cp_type0_packet(REG_PA_SC_AA_CONFIG, 1);
*cmds++ = 0x00000000;
/* program shader */
/* load shader vtx constants ... 5 dwords */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 4);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 4);
*cmds++ = (0x1 << 16) | SHADER_CONST_ADDR;
*cmds++ = 0;
/* valid(?) vtx constant flag & addr */
@@ -557,32 +551,32 @@
*cmds++ = 0x00000030;
/* Invalidate L2 cache to make sure vertices are updated */
- *cmds++ = pm4_type0_packet(REG_TC_CNTL_STATUS, 1);
+ *cmds++ = cp_type0_packet(REG_TC_CNTL_STATUS, 1);
*cmds++ = 0x1;
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 4);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 4);
*cmds++ = CP_REG(REG_VGT_MAX_VTX_INDX);
*cmds++ = 0x00ffffff; /* REG_VGT_MAX_VTX_INDX */
*cmds++ = 0x0; /* REG_VGT_MIN_VTX_INDX */
*cmds++ = 0x00000000; /* REG_VGT_INDX_OFFSET */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_PA_SC_AA_MASK);
*cmds++ = 0x0000ffff; /* REG_PA_SC_AA_MASK */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_RB_COLORCONTROL);
*cmds++ = 0x00000c20;
/* Repartition shaders */
- *cmds++ = pm4_type0_packet(REG_SQ_INST_STORE_MANAGMENT, 1);
+ *cmds++ = cp_type0_packet(REG_SQ_INST_STORE_MANAGMENT, 1);
*cmds++ = 0x180;
/* Invalidate Vertex & Pixel instruction code address and sizes */
- *cmds++ = pm4_type3_packet(PM4_INVALIDATE_STATE, 1);
+ *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
*cmds++ = 0x00003F00;
- *cmds++ = pm4_type3_packet(PM4_SET_SHADER_BASES, 1);
+ *cmds++ = cp_type3_packet(CP_SET_SHADER_BASES, 1);
*cmds++ = (0x80000000) | 0x180;
/* load the patched vertex shader stream */
@@ -593,7 +587,7 @@
program_shader(cmds, 1, gmem2sys_frag_pgm, GMEM2SYS_FRAG_PGM_LEN);
/* SQ_PROGRAM_CNTL / SQ_CONTEXT_MISC */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
*cmds++ = CP_REG(REG_SQ_PROGRAM_CNTL);
if (adreno_is_a22x(adreno_dev))
*cmds++ = 0x10018001;
@@ -604,13 +598,13 @@
/* resolve */
/* PA_CL_VTE_CNTL */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_PA_CL_VTE_CNTL);
/* disable X/Y/Z transforms, X/Y/Z are premultiplied by W */
*cmds++ = 0x00000b00;
/* program surface info */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
*cmds++ = CP_REG(REG_RB_SURFACE_INFO);
*cmds++ = shadow->gmem_pitch; /* pitch, MSAA = 1 */
@@ -624,7 +618,7 @@
format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT) | tmp_ctx.gmem_base;
/* disable Z */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_RB_DEPTHCONTROL);
if (adreno_is_a22x(adreno_dev))
*cmds++ = 0x08;
@@ -636,17 +630,17 @@
* Back_ptype = draw triangles
* Provoking vertex = last
*/
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_PA_SU_SC_MODE_CNTL);
*cmds++ = 0x00080240;
/* Use maximum scissor values -- quad vertices already have the
* correct bounds */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
*cmds++ = CP_REG(REG_PA_SC_SCREEN_SCISSOR_TL);
*cmds++ = (0 << 16) | 0;
*cmds++ = (0x1fff << 16) | (0x1fff);
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
*cmds++ = CP_REG(REG_PA_SC_WINDOW_SCISSOR_TL);
*cmds++ = (unsigned int)((1U << 31) | (0 << 16) | 0);
*cmds++ = (0x1fff << 16) | (0x1fff);
@@ -654,20 +648,20 @@
/* load the viewport so that z scale = clear depth and
* z offset = 0.0f
*/
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
*cmds++ = CP_REG(REG_PA_CL_VPORT_ZSCALE);
*cmds++ = 0xbf800000; /* -1.0f */
*cmds++ = 0x0;
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_RB_COLOR_MASK);
*cmds++ = 0x0000000f; /* R = G = B = 1:enabled */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_RB_COLOR_DEST_MASK);
*cmds++ = 0xffffffff;
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
*cmds++ = CP_REG(REG_SQ_WRAPPING_0);
*cmds++ = 0x00000000;
*cmds++ = 0x00000000;
@@ -677,7 +671,7 @@
*/
/* load the COPY state */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 6);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 6);
*cmds++ = CP_REG(REG_RB_COPY_CONTROL);
*cmds++ = 0; /* RB_COPY_CONTROL */
*cmds++ = addr & 0xfffff000; /* RB_COPY_DEST_BASE */
@@ -692,30 +686,30 @@
BUG_ON(offset & 0xfffff000);
*cmds++ = offset;
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_RB_MODECONTROL);
*cmds++ = 0x6; /* EDRAM copy */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_PA_CL_CLIP_CNTL);
*cmds++ = 0x00010000;
if (adreno_is_a22x(adreno_dev)) {
- *cmds++ = pm4_type3_packet(PM4_SET_DRAW_INIT_FLAGS, 1);
+ *cmds++ = cp_type3_packet(CP_SET_DRAW_INIT_FLAGS, 1);
*cmds++ = 0;
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_LEIA_RB_LRZ_VSC_CONTROL);
*cmds++ = 0x0000000;
- *cmds++ = pm4_type3_packet(PM4_DRAW_INDX, 3);
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX, 3);
*cmds++ = 0; /* viz query info. */
/* PrimType=RectList, SrcSel=AutoIndex, VisCullMode=Ignore*/
*cmds++ = 0x00004088;
*cmds++ = 3; /* NumIndices=3 */
} else {
/* queue the draw packet */
- *cmds++ = pm4_type3_packet(PM4_DRAW_INDX, 2);
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX, 2);
*cmds++ = 0; /* viz query info. */
/* PrimType=RectList, NumIndices=3, SrcSel=AutoIndex */
*cmds++ = 0x00030088;
@@ -738,24 +732,24 @@
unsigned int *start = cmds;
/* Store TP0_CHICKEN register */
- *cmds++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+ *cmds++ = cp_type3_packet(CP_REG_TO_MEM, 2);
*cmds++ = REG_TP0_CHICKEN;
*cmds++ = tmp_ctx.chicken_restore;
- *cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
*cmds++ = 0;
/* Set TP0_CHICKEN to zero */
- *cmds++ = pm4_type0_packet(REG_TP0_CHICKEN, 1);
+ *cmds++ = cp_type0_packet(REG_TP0_CHICKEN, 1);
*cmds++ = 0x00000000;
/* Set PA_SC_AA_CONFIG to 0 */
- *cmds++ = pm4_type0_packet(REG_PA_SC_AA_CONFIG, 1);
+ *cmds++ = cp_type0_packet(REG_PA_SC_AA_CONFIG, 1);
*cmds++ = 0x00000000;
/* shader constants */
/* vertex buffer constants */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 7);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 7);
*cmds++ = (0x1 << 16) | (9 * 6);
/* valid(?) vtx constant flag & addr */
@@ -770,20 +764,20 @@
*cmds++ = 0;
/* Invalidate L2 cache to make sure vertices are updated */
- *cmds++ = pm4_type0_packet(REG_TC_CNTL_STATUS, 1);
+ *cmds++ = cp_type0_packet(REG_TC_CNTL_STATUS, 1);
*cmds++ = 0x1;
cmds = program_shader(cmds, 0, sys2gmem_vtx_pgm, SYS2GMEM_VTX_PGM_LEN);
/* Repartition shaders */
- *cmds++ = pm4_type0_packet(REG_SQ_INST_STORE_MANAGMENT, 1);
+ *cmds++ = cp_type0_packet(REG_SQ_INST_STORE_MANAGMENT, 1);
*cmds++ = 0x180;
/* Invalidate Vertex & Pixel instruction code address and sizes */
- *cmds++ = pm4_type3_packet(PM4_INVALIDATE_STATE, 1);
+ *cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
*cmds++ = 0x00000300; /* 0x100 = Vertex, 0x200 = Pixel */
- *cmds++ = pm4_type3_packet(PM4_SET_SHADER_BASES, 1);
+ *cmds++ = cp_type3_packet(CP_SET_SHADER_BASES, 1);
*cmds++ = (0x80000000) | 0x180;
/* Load the patched fragment shader stream */
@@ -791,43 +785,43 @@
program_shader(cmds, 1, sys2gmem_frag_pgm, SYS2GMEM_FRAG_PGM_LEN);
/* SQ_PROGRAM_CNTL / SQ_CONTEXT_MISC */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
*cmds++ = CP_REG(REG_SQ_PROGRAM_CNTL);
*cmds++ = 0x10030002;
*cmds++ = 0x00000008;
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_PA_SC_AA_MASK);
*cmds++ = 0x0000ffff; /* REG_PA_SC_AA_MASK */
if (!adreno_is_a22x(adreno_dev)) {
/* PA_SC_VIZ_QUERY */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_PA_SC_VIZ_QUERY);
*cmds++ = 0x0; /*REG_PA_SC_VIZ_QUERY */
}
/* RB_COLORCONTROL */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_RB_COLORCONTROL);
*cmds++ = 0x00000c20;
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 4);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 4);
*cmds++ = CP_REG(REG_VGT_MAX_VTX_INDX);
*cmds++ = 0x00ffffff; /* mmVGT_MAX_VTX_INDX */
*cmds++ = 0x0; /* mmVGT_MIN_VTX_INDX */
*cmds++ = 0x00000000; /* mmVGT_INDX_OFFSET */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
*cmds++ = CP_REG(REG_VGT_VERTEX_REUSE_BLOCK_CNTL);
*cmds++ = 0x00000002; /* mmVGT_VERTEX_REUSE_BLOCK_CNTL */
*cmds++ = 0x00000002; /* mmVGT_OUT_DEALLOC_CNTL */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_SQ_INTERPOLATOR_CNTL);
*cmds++ = 0xffffffff; /* mmSQ_INTERPOLATOR_CNTL */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_PA_SC_AA_CONFIG);
*cmds++ = 0x00000000; /* REG_PA_SC_AA_CONFIG */
@@ -836,13 +830,13 @@
* Back_ptype = draw triangles
* Provoking vertex = last
*/
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_PA_SU_SC_MODE_CNTL);
*cmds++ = 0x00080240;
/* texture constants */
*cmds++ =
- pm4_type3_packet(PM4_SET_CONSTANT, (SYS2GMEM_TEX_CONST_LEN + 1));
+ cp_type3_packet(CP_SET_CONSTANT, (SYS2GMEM_TEX_CONST_LEN + 1));
*cmds++ = (0x1 << 16) | (0 * 6);
memcpy(cmds, sys2gmem_tex_const, SYS2GMEM_TEX_CONST_LEN << 2);
cmds[0] |= (shadow->pitch >> 5) << 22;
@@ -852,7 +846,7 @@
cmds += SYS2GMEM_TEX_CONST_LEN;
/* program surface info */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
*cmds++ = CP_REG(REG_RB_SURFACE_INFO);
*cmds++ = shadow->gmem_pitch; /* pitch, MSAA = 1 */
@@ -864,7 +858,7 @@
format << RB_COLOR_INFO__COLOR_FORMAT__SHIFT) | tmp_ctx.gmem_base;
/* RB_DEPTHCONTROL */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_RB_DEPTHCONTROL);
if (adreno_is_a22x(adreno_dev))
@@ -874,35 +868,35 @@
/* Use maximum scissor values -- quad vertices already
* have the correct bounds */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
*cmds++ = CP_REG(REG_PA_SC_SCREEN_SCISSOR_TL);
*cmds++ = (0 << 16) | 0;
*cmds++ = ((0x1fff) << 16) | 0x1fff;
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
*cmds++ = CP_REG(REG_PA_SC_WINDOW_SCISSOR_TL);
*cmds++ = (unsigned int)((1U << 31) | (0 << 16) | 0);
*cmds++ = ((0x1fff) << 16) | 0x1fff;
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_PA_CL_VTE_CNTL);
/* disable X/Y/Z transforms, X/Y/Z are premultiplied by W */
*cmds++ = 0x00000b00;
/*load the viewport so that z scale = clear depth and z offset = 0.0f */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
*cmds++ = CP_REG(REG_PA_CL_VPORT_ZSCALE);
*cmds++ = 0xbf800000;
*cmds++ = 0x0;
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_RB_COLOR_MASK);
*cmds++ = 0x0000000f; /* R = G = B = 1:enabled */
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_RB_COLOR_DEST_MASK);
*cmds++ = 0xffffffff;
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 3);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 3);
*cmds++ = CP_REG(REG_SQ_WRAPPING_0);
*cmds++ = 0x00000000;
*cmds++ = 0x00000000;
@@ -910,31 +904,31 @@
/* load the stencil ref value
* $AAM - do this later
*/
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_RB_MODECONTROL);
/* draw pixels with color and depth/stencil component */
*cmds++ = 0x4;
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_PA_CL_CLIP_CNTL);
*cmds++ = 0x00010000;
if (adreno_is_a22x(adreno_dev)) {
- *cmds++ = pm4_type3_packet(PM4_SET_DRAW_INIT_FLAGS, 1);
+ *cmds++ = cp_type3_packet(CP_SET_DRAW_INIT_FLAGS, 1);
*cmds++ = 0;
- *cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
+ *cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = CP_REG(REG_LEIA_RB_LRZ_VSC_CONTROL);
*cmds++ = 0x0000000;
- *cmds++ = pm4_type3_packet(PM4_DRAW_INDX, 3);
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX, 3);
*cmds++ = 0; /* viz query info. */
/* PrimType=RectList, SrcSel=AutoIndex, VisCullMode=Ignore*/
*cmds++ = 0x00004088;
*cmds++ = 3; /* NumIndices=3 */
} else {
/* queue the draw packet */
- *cmds++ = pm4_type3_packet(PM4_DRAW_INDX, 2);
+ *cmds++ = cp_type3_packet(CP_DRAW_INDX, 2);
*cmds++ = 0; /* viz query info. */
/* PrimType=RectList, NumIndices=3, SrcSel=AutoIndex */
*cmds++ = 0x00030088;
@@ -956,11 +950,11 @@
unsigned int reg_array_size = 0;
const unsigned int *ptr_register_ranges;
- *cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
*cmd++ = 0;
/* H/W Registers */
- /* deferred pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, ???); */
+ /* deferred cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, ???); */
cmd++;
#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
/* Force mismatch */
@@ -988,7 +982,7 @@
* length
*/
start[2] =
- pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, (cmd - start) - 3);
+ cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, (cmd - start) - 3);
/* Enable shadowing for the entire register block. */
#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
start[4] |= (0 << 24) | (4 << 16); /* Disable shadowing. */
@@ -997,13 +991,13 @@
#endif
/* Need to handle some of the registers separately */
- *cmd++ = pm4_type0_packet(REG_SQ_GPR_MANAGEMENT, 1);
+ *cmd++ = cp_type0_packet(REG_SQ_GPR_MANAGEMENT, 1);
tmp_ctx.reg_values[0] = virt2gpu(cmd, &drawctxt->gpustate);
*cmd++ = 0x00040400;
- *cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
*cmd++ = 0;
- *cmd++ = pm4_type0_packet(REG_TP0_CHICKEN, 1);
+ *cmd++ = cp_type0_packet(REG_TP0_CHICKEN, 1);
tmp_ctx.reg_values[1] = virt2gpu(cmd, &drawctxt->gpustate);
*cmd++ = 0x00000000;
@@ -1012,7 +1006,7 @@
unsigned int j = 2;
for (i = REG_LEIA_VSC_BIN_SIZE; i <=
REG_LEIA_VSC_PIPE_DATA_LENGTH_7; i++) {
- *cmd++ = pm4_type0_packet(i, 1);
+ *cmd++ = cp_type0_packet(i, 1);
tmp_ctx.reg_values[j] = virt2gpu(cmd,
&drawctxt->gpustate);
*cmd++ = 0x00000000;
@@ -1021,7 +1015,7 @@
}
/* ALU Constants */
- *cmd++ = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
*cmd++ = drawctxt->gpustate.gpuaddr & 0xFFFFE000;
#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
*cmd++ = (0 << 24) | (0 << 16) | 0; /* Disable shadowing */
@@ -1031,7 +1025,7 @@
*cmd++ = ALU_CONSTANTS;
/* Texture Constants */
- *cmd++ = pm4_type3_packet(PM4_LOAD_CONSTANT_CONTEXT, 3);
+ *cmd++ = cp_type3_packet(CP_LOAD_CONSTANT_CONTEXT, 3);
*cmd++ = (drawctxt->gpustate.gpuaddr + TEX_OFFSET) & 0xFFFFE000;
#ifdef CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES
/* Disable shadowing */
@@ -1042,7 +1036,7 @@
*cmd++ = TEX_CONSTANTS;
/* Boolean Constants */
- *cmd++ = pm4_type3_packet(PM4_SET_CONSTANT, 1 + BOOL_CONSTANTS);
+ *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 1 + BOOL_CONSTANTS);
*cmd++ = (2 << 16) | 0;
/* the next BOOL_CONSTANT dwords is the shadow area for
@@ -1052,7 +1046,7 @@
cmd += BOOL_CONSTANTS;
/* Loop Constants */
- *cmd++ = pm4_type3_packet(PM4_SET_CONSTANT, 1 + LOOP_CONSTANTS);
+ *cmd++ = cp_type3_packet(CP_SET_CONSTANT, 1 + LOOP_CONSTANTS);
*cmd++ = (3 << 16) | 0;
/* the next LOOP_CONSTANTS dwords is the shadow area for
@@ -1072,51 +1066,45 @@
{
unsigned int *cmd = tmp_ctx.cmd;
unsigned int *save, *restore, *fixup;
-#if defined(PM4_IM_STORE)
unsigned int *startSizeVtx, *startSizePix, *startSizeShared;
-#endif
unsigned int *partition1;
unsigned int *shaderBases, *partition2;
-#if defined(PM4_IM_STORE)
/* compute vertex, pixel and shared instruction shadow GPU addresses */
tmp_ctx.shader_vertex = drawctxt->gpustate.gpuaddr + SHADER_OFFSET;
tmp_ctx.shader_pixel = tmp_ctx.shader_vertex + SHADER_SHADOW_SIZE;
tmp_ctx.shader_shared = tmp_ctx.shader_pixel + SHADER_SHADOW_SIZE;
-#endif
/* restore shader partitioning and instructions */
restore = cmd; /* start address */
/* Invalidate Vertex & Pixel instruction code address and sizes */
- *cmd++ = pm4_type3_packet(PM4_INVALIDATE_STATE, 1);
+ *cmd++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
*cmd++ = 0x00000300; /* 0x100 = Vertex, 0x200 = Pixel */
/* Restore previous shader vertex & pixel instruction bases. */
- *cmd++ = pm4_type3_packet(PM4_SET_SHADER_BASES, 1);
+ *cmd++ = cp_type3_packet(CP_SET_SHADER_BASES, 1);
shaderBases = cmd++; /* TBD #5: shader bases (from fixup) */
/* write the shader partition information to a scratch register */
- *cmd++ = pm4_type0_packet(REG_SQ_INST_STORE_MANAGMENT, 1);
+ *cmd++ = cp_type0_packet(REG_SQ_INST_STORE_MANAGMENT, 1);
partition1 = cmd++; /* TBD #4a: partition info (from save) */
-#if defined(PM4_IM_STORE)
/* load vertex shader instructions from the shadow. */
- *cmd++ = pm4_type3_packet(PM4_IM_LOAD, 2);
+ *cmd++ = cp_type3_packet(CP_IM_LOAD, 2);
*cmd++ = tmp_ctx.shader_vertex + 0x0; /* 0x0 = Vertex */
startSizeVtx = cmd++; /* TBD #1: start/size (from save) */
/* load pixel shader instructions from the shadow. */
- *cmd++ = pm4_type3_packet(PM4_IM_LOAD, 2);
+ *cmd++ = cp_type3_packet(CP_IM_LOAD, 2);
*cmd++ = tmp_ctx.shader_pixel + 0x1; /* 0x1 = Pixel */
startSizePix = cmd++; /* TBD #2: start/size (from save) */
/* load shared shader instructions from the shadow. */
- *cmd++ = pm4_type3_packet(PM4_IM_LOAD, 2);
+ *cmd++ = cp_type3_packet(CP_IM_LOAD, 2);
*cmd++ = tmp_ctx.shader_shared + 0x2; /* 0x2 = Shared */
startSizeShared = cmd++; /* TBD #3: start/size (from save) */
-#endif
/* create indirect buffer command for above command sequence */
create_ib1(drawctxt, drawctxt->shader_restore, restore, cmd);
@@ -1133,11 +1121,11 @@
fixup = cmd; /* start address */
/* write the shader partition information to a scratch register */
- *cmd++ = pm4_type0_packet(REG_SCRATCH_REG2, 1);
+ *cmd++ = cp_type0_packet(REG_SCRATCH_REG2, 1);
partition2 = cmd++; /* TBD #4b: partition info (from save) */
/* mask off unused bits, then OR with shader instruction memory size */
- *cmd++ = pm4_type3_packet(PM4_REG_RMW, 3);
+ *cmd++ = cp_type3_packet(CP_REG_RMW, 3);
*cmd++ = REG_SCRATCH_REG2;
/* AND off invalid bits. */
*cmd++ = 0x0FFF0FFF;
@@ -1145,7 +1133,7 @@
*cmd++ = (unsigned int)((SHADER_INSTRUCT_LOG2 - 5U) << 29);
/* write the computed value to the SET_SHADER_BASES data field */
- *cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
*cmd++ = REG_SCRATCH_REG2;
/* TBD #5: shader bases (to restore) */
*cmd++ = virt2gpu(shaderBases, &drawctxt->gpustate);
@@ -1157,46 +1145,44 @@
save = cmd; /* start address */
- *cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
*cmd++ = 0;
/* fetch the SQ_INST_STORE_MANAGMENT register value,
* store the value in the data fields of the SET_CONSTANT commands
* above.
*/
- *cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
*cmd++ = REG_SQ_INST_STORE_MANAGMENT;
/* TBD #4a: partition info (to restore) */
*cmd++ = virt2gpu(partition1, &drawctxt->gpustate);
- *cmd++ = pm4_type3_packet(PM4_REG_TO_MEM, 2);
+ *cmd++ = cp_type3_packet(CP_REG_TO_MEM, 2);
*cmd++ = REG_SQ_INST_STORE_MANAGMENT;
/* TBD #4b: partition info (to fixup) */
*cmd++ = virt2gpu(partition2, &drawctxt->gpustate);
-#if defined(PM4_IM_STORE)
/* store the vertex shader instructions */
- *cmd++ = pm4_type3_packet(PM4_IM_STORE, 2);
+ *cmd++ = cp_type3_packet(CP_IM_STORE, 2);
*cmd++ = tmp_ctx.shader_vertex + 0x0; /* 0x0 = Vertex */
/* TBD #1: start/size (to restore) */
*cmd++ = virt2gpu(startSizeVtx, &drawctxt->gpustate);
/* store the pixel shader instructions */
- *cmd++ = pm4_type3_packet(PM4_IM_STORE, 2);
+ *cmd++ = cp_type3_packet(CP_IM_STORE, 2);
*cmd++ = tmp_ctx.shader_pixel + 0x1; /* 0x1 = Pixel */
/* TBD #2: start/size (to restore) */
*cmd++ = virt2gpu(startSizePix, &drawctxt->gpustate);
/* store the shared shader instructions if vertex base is nonzero */
- *cmd++ = pm4_type3_packet(PM4_IM_STORE, 2);
+ *cmd++ = cp_type3_packet(CP_IM_STORE, 2);
*cmd++ = tmp_ctx.shader_shared + 0x2; /* 0x2 = Shared */
/* TBD #3: start/size (to restore) */
*cmd++ = virt2gpu(startSizeShared, &drawctxt->gpustate);
-#endif
- *cmd++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
+ *cmd++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
*cmd++ = 0;
/* create indirect buffer command for above command sequence */
@@ -1350,9 +1336,9 @@
KGSL_CTXT_INFO(device, "context flags %08x\n", context->flags);
- cmds[0] = pm4_nop_packet(1);
+ cmds[0] = cp_nop_packet(1);
cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
- cmds[2] = pm4_type3_packet(PM4_MEM_WRITE, 2);
+ cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2);
cmds[3] = device->memstore.gpuaddr +
KGSL_DEVICE_MEMSTORE_OFFSET(current_context);
cmds[4] = (unsigned int) context;
@@ -1390,7 +1376,7 @@
}
if (adreno_is_a20x(adreno_dev)) {
- cmds[0] = pm4_type3_packet(PM4_SET_BIN_BASE_OFFSET, 1);
+ cmds[0] = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1);
cmds[1] = context->bin_base_offset;
adreno_ringbuffer_issuecmds(device, 0, cmds, 2);
}
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index c62d6c7..a942f30 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -113,7 +113,7 @@
unsigned int *start,
unsigned int *end)
{
- cmd[0] = PM4_HDR_INDIRECT_BUFFER_PFD;
+ cmd[0] = CP_HDR_INDIRECT_BUFFER_PFD;
cmd[1] = virt2gpu(start, &drawctxt->gpustate);
cmd[2] = end - start;
}
diff --git a/drivers/gpu/msm/adreno_pm4types.h b/drivers/gpu/msm/adreno_pm4types.h
index 772cd28..8aea58c 100644
--- a/drivers/gpu/msm/adreno_pm4types.h
+++ b/drivers/gpu/msm/adreno_pm4types.h
@@ -14,175 +14,175 @@
#define __ADRENO_PM4TYPES_H
-#define PM4_PKT_MASK 0xc0000000
+#define CP_PKT_MASK 0xc0000000
-#define PM4_TYPE0_PKT ((unsigned int)0 << 30)
-#define PM4_TYPE1_PKT ((unsigned int)1 << 30)
-#define PM4_TYPE2_PKT ((unsigned int)2 << 30)
-#define PM4_TYPE3_PKT ((unsigned int)3 << 30)
+#define CP_TYPE0_PKT ((unsigned int)0 << 30)
+#define CP_TYPE1_PKT ((unsigned int)1 << 30)
+#define CP_TYPE2_PKT ((unsigned int)2 << 30)
+#define CP_TYPE3_PKT ((unsigned int)3 << 30)
/* type3 packets */
/* initialize CP's micro-engine */
-#define PM4_ME_INIT 0x48
+#define CP_ME_INIT 0x48
/* skip N 32-bit words to get to the next packet */
-#define PM4_NOP 0x10
+#define CP_NOP 0x10
/* indirect buffer dispatch. prefetch parser uses this packet type to determine
* whether to pre-fetch the IB
*/
-#define PM4_INDIRECT_BUFFER 0x3f
+#define CP_INDIRECT_BUFFER 0x3f
/* indirect buffer dispatch. same as IB, but init is pipelined */
-#define PM4_INDIRECT_BUFFER_PFD 0x37
+#define CP_INDIRECT_BUFFER_PFD 0x37
/* wait for the IDLE state of the engine */
-#define PM4_WAIT_FOR_IDLE 0x26
+#define CP_WAIT_FOR_IDLE 0x26
/* wait until a register or memory location is a specific value */
-#define PM4_WAIT_REG_MEM 0x3c
+#define CP_WAIT_REG_MEM 0x3c
/* wait until a register location is equal to a specific value */
-#define PM4_WAIT_REG_EQ 0x52
+#define CP_WAIT_REG_EQ 0x52
/* wait until a register location is >= a specific value */
-#define PM4_WAT_REG_GTE 0x53
+#define CP_WAT_REG_GTE 0x53
/* wait until a read completes */
-#define PM4_WAIT_UNTIL_READ 0x5c
+#define CP_WAIT_UNTIL_READ 0x5c
/* wait until all base/size writes from an IB_PFD packet have completed */
-#define PM4_WAIT_IB_PFD_COMPLETE 0x5d
+#define CP_WAIT_IB_PFD_COMPLETE 0x5d
/* register read/modify/write */
-#define PM4_REG_RMW 0x21
+#define CP_REG_RMW 0x21
/* reads register in chip and writes to memory */
-#define PM4_REG_TO_MEM 0x3e
+#define CP_REG_TO_MEM 0x3e
/* write N 32-bit words to memory */
-#define PM4_MEM_WRITE 0x3d
+#define CP_MEM_WRITE 0x3d
/* write CP_PROG_COUNTER value to memory */
-#define PM4_MEM_WRITE_CNTR 0x4f
+#define CP_MEM_WRITE_CNTR 0x4f
/* conditional execution of a sequence of packets */
-#define PM4_COND_EXEC 0x44
+#define CP_COND_EXEC 0x44
/* conditional write to memory or register */
-#define PM4_COND_WRITE 0x45
+#define CP_COND_WRITE 0x45
/* generate an event that creates a write to memory when completed */
-#define PM4_EVENT_WRITE 0x46
+#define CP_EVENT_WRITE 0x46
/* generate a VS|PS_done event */
-#define PM4_EVENT_WRITE_SHD 0x58
+#define CP_EVENT_WRITE_SHD 0x58
/* generate a cache flush done event */
-#define PM4_EVENT_WRITE_CFL 0x59
+#define CP_EVENT_WRITE_CFL 0x59
/* generate a z_pass done event */
-#define PM4_EVENT_WRITE_ZPD 0x5b
+#define CP_EVENT_WRITE_ZPD 0x5b
/* initiate fetch of index buffer and draw */
-#define PM4_DRAW_INDX 0x22
+#define CP_DRAW_INDX 0x22
/* draw using supplied indices in packet */
-#define PM4_DRAW_INDX_2 0x36
+#define CP_DRAW_INDX_2 0x36
/* initiate fetch of index buffer and binIDs and draw */
-#define PM4_DRAW_INDX_BIN 0x34
+#define CP_DRAW_INDX_BIN 0x34
/* initiate fetch of bin IDs and draw using supplied indices */
-#define PM4_DRAW_INDX_2_BIN 0x35
+#define CP_DRAW_INDX_2_BIN 0x35
/* begin/end initiator for viz query extent processing */
-#define PM4_VIZ_QUERY 0x23
+#define CP_VIZ_QUERY 0x23
/* fetch state sub-blocks and initiate shader code DMAs */
-#define PM4_SET_STATE 0x25
+#define CP_SET_STATE 0x25
/* load constant into chip and to memory */
-#define PM4_SET_CONSTANT 0x2d
+#define CP_SET_CONSTANT 0x2d
/* load sequencer instruction memory (pointer-based) */
-#define PM4_IM_LOAD 0x27
+#define CP_IM_LOAD 0x27
/* load sequencer instruction memory (code embedded in packet) */
-#define PM4_IM_LOAD_IMMEDIATE 0x2b
+#define CP_IM_LOAD_IMMEDIATE 0x2b
/* load constants from a location in memory */
-#define PM4_LOAD_CONSTANT_CONTEXT 0x2e
+#define CP_LOAD_CONSTANT_CONTEXT 0x2e
/* selective invalidation of state pointers */
-#define PM4_INVALIDATE_STATE 0x3b
+#define CP_INVALIDATE_STATE 0x3b
/* dynamically changes shader instruction memory partition */
-#define PM4_SET_SHADER_BASES 0x4A
+#define CP_SET_SHADER_BASES 0x4A
/* sets the 64-bit BIN_MASK register in the PFP */
-#define PM4_SET_BIN_MASK 0x50
+#define CP_SET_BIN_MASK 0x50
/* sets the 64-bit BIN_SELECT register in the PFP */
-#define PM4_SET_BIN_SELECT 0x51
+#define CP_SET_BIN_SELECT 0x51
/* updates the current context, if needed */
-#define PM4_CONTEXT_UPDATE 0x5e
+#define CP_CONTEXT_UPDATE 0x5e
/* generate interrupt from the command stream */
-#define PM4_INTERRUPT 0x40
+#define CP_INTERRUPT 0x40
/* copy sequencer instruction memory to system memory */
-#define PM4_IM_STORE 0x2c
+#define CP_IM_STORE 0x2c
/*
* for a20x
* program an offset that will added to the BIN_BASE value of
* the 3D_DRAW_INDX_BIN packet
*/
-#define PM4_SET_BIN_BASE_OFFSET 0x4B
+#define CP_SET_BIN_BASE_OFFSET 0x4B
/*
* for a22x
* sets draw initiator flags register in PFP, gets bitwise-ORed into
* every draw initiator
*/
-#define PM4_SET_DRAW_INIT_FLAGS 0x4B
+#define CP_SET_DRAW_INIT_FLAGS 0x4B
-#define PM4_SET_PROTECTED_MODE 0x5f /* sets the register protection mode */
+#define CP_SET_PROTECTED_MODE 0x5f /* sets the register protection mode */
/* packet header building macros */
-#define pm4_type0_packet(regindx, cnt) \
- (PM4_TYPE0_PKT | (((cnt)-1) << 16) | ((regindx) & 0x7FFF))
+#define cp_type0_packet(regindx, cnt) \
+ (CP_TYPE0_PKT | (((cnt)-1) << 16) | ((regindx) & 0x7FFF))
-#define pm4_type0_packet_for_sameregister(regindx, cnt) \
- ((PM4_TYPE0_PKT | (((cnt)-1) << 16) | ((1 << 15) | \
+#define cp_type0_packet_for_sameregister(regindx, cnt) \
+ ((CP_TYPE0_PKT | (((cnt)-1) << 16) | ((1 << 15) | \
((regindx) & 0x7FFF)))
-#define pm4_type1_packet(reg0, reg1) \
- (PM4_TYPE1_PKT | ((reg1) << 12) | (reg0))
+#define cp_type1_packet(reg0, reg1) \
+ (CP_TYPE1_PKT | ((reg1) << 12) | (reg0))
-#define pm4_type3_packet(opcode, cnt) \
- (PM4_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8))
+#define cp_type3_packet(opcode, cnt) \
+ (CP_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8))
-#define pm4_predicated_type3_packet(opcode, cnt) \
- (PM4_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8) | 0x1)
+#define cp_predicated_type3_packet(opcode, cnt) \
+ (CP_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8) | 0x1)
-#define pm4_nop_packet(cnt) \
- (PM4_TYPE3_PKT | (((cnt)-1) << 16) | (PM4_NOP << 8))
+#define cp_nop_packet(cnt) \
+ (CP_TYPE3_PKT | (((cnt)-1) << 16) | (CP_NOP << 8))
/* packet headers */
-#define PM4_HDR_ME_INIT pm4_type3_packet(PM4_ME_INIT, 18)
-#define PM4_HDR_INDIRECT_BUFFER_PFD pm4_type3_packet(PM4_INDIRECT_BUFFER_PFD, 2)
-#define PM4_HDR_INDIRECT_BUFFER pm4_type3_packet(PM4_INDIRECT_BUFFER, 2)
+#define CP_HDR_ME_INIT cp_type3_packet(CP_ME_INIT, 18)
+#define CP_HDR_INDIRECT_BUFFER_PFD cp_type3_packet(CP_INDIRECT_BUFFER_PFD, 2)
+#define CP_HDR_INDIRECT_BUFFER cp_type3_packet(CP_INDIRECT_BUFFER, 2)
/* dword base address of the GFX decode space */
#define SUBBLOCK_OFFSET(reg) ((unsigned int)((reg) - (0x2000)))
diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c
index fedac3d..4b25dcf 100644
--- a/drivers/gpu/msm/adreno_postmortem.c
+++ b/drivers/gpu/msm/adreno_postmortem.c
@@ -42,28 +42,28 @@
};
static const struct pm_id_name pm3_types[] = {
- {PM4_COND_EXEC, "CND_EXEC"},
- {PM4_CONTEXT_UPDATE, "CX__UPDT"},
- {PM4_DRAW_INDX, "DRW_NDX_"},
- {PM4_DRAW_INDX_BIN, "DRW_NDXB"},
- {PM4_EVENT_WRITE, "EVENT_WT"},
- {PM4_IM_LOAD, "IN__LOAD"},
- {PM4_IM_LOAD_IMMEDIATE, "IM_LOADI"},
- {PM4_IM_STORE, "IM_STORE"},
- {PM4_INDIRECT_BUFFER, "IND_BUF_"},
- {PM4_INDIRECT_BUFFER_PFD, "IND_BUFP"},
- {PM4_INTERRUPT, "PM4_INTR"},
- {PM4_INVALIDATE_STATE, "INV_STAT"},
- {PM4_LOAD_CONSTANT_CONTEXT, "LD_CN_CX"},
- {PM4_ME_INIT, "ME__INIT"},
- {PM4_NOP, "PM4__NOP"},
- {PM4_REG_RMW, "REG__RMW"},
- {PM4_REG_TO_MEM, "REG2_MEM"},
- {PM4_SET_BIN_BASE_OFFSET, "ST_BIN_O"},
- {PM4_SET_CONSTANT, "ST_CONST"},
- {PM4_SET_PROTECTED_MODE, "ST_PRT_M"},
- {PM4_SET_SHADER_BASES, "ST_SHD_B"},
- {PM4_WAIT_FOR_IDLE, "WAIT4IDL"},
+ {CP_COND_EXEC, "CND_EXEC"},
+ {CP_CONTEXT_UPDATE, "CX__UPDT"},
+ {CP_DRAW_INDX, "DRW_NDX_"},
+ {CP_DRAW_INDX_BIN, "DRW_NDXB"},
+ {CP_EVENT_WRITE, "EVENT_WT"},
+ {CP_IM_LOAD, "IN__LOAD"},
+ {CP_IM_LOAD_IMMEDIATE, "IM_LOADI"},
+ {CP_IM_STORE, "IM_STORE"},
+ {CP_INDIRECT_BUFFER, "IND_BUF_"},
+ {CP_INDIRECT_BUFFER_PFD, "IND_BUFP"},
+ {CP_INTERRUPT, "PM4_INTR"},
+ {CP_INVALIDATE_STATE, "INV_STAT"},
+ {CP_LOAD_CONSTANT_CONTEXT, "LD_CN_CX"},
+ {CP_ME_INIT, "ME__INIT"},
+ {CP_NOP, "PM4__NOP"},
+ {CP_REG_RMW, "REG__RMW"},
+ {CP_REG_TO_MEM, "REG2_MEM"},
+ {CP_SET_BIN_BASE_OFFSET, "ST_BIN_O"},
+ {CP_SET_CONSTANT, "ST_CONST"},
+ {CP_SET_PROTECTED_MODE, "ST_PRT_M"},
+ {CP_SET_SHADER_BASES, "ST_SHD_B"},
+ {CP_WAIT_FOR_IDLE, "WAIT4IDL"},
};
/* Offset address pairs: start, end of range to dump (inclusive) */
@@ -173,14 +173,14 @@
if (adreno_is_pm4_len(word) > 16)
return 0;
- if ((word & (3<<30)) == PM4_TYPE0_PKT) {
+ if ((word & (3<<30)) == CP_TYPE0_PKT) {
for (i = 0; i < ARRAY_SIZE(pm0_types); ++i) {
if ((word & 0x7FFF) == pm0_types[i].id)
return 1;
}
return 0;
}
- if ((word & (3<<30)) == PM4_TYPE3_PKT) {
+ if ((word & (3<<30)) == CP_TYPE3_PKT) {
for (i = 0; i < ARRAY_SIZE(pm3_types); ++i) {
if ((word & 0xFFFF) == (pm3_types[i].id << 8))
return 1;
@@ -197,14 +197,14 @@
if (word == INVALID_RB_CMD)
return "--------";
- if ((word & (3<<30)) == PM4_TYPE0_PKT) {
+ if ((word & (3<<30)) == CP_TYPE0_PKT) {
for (i = 0; i < ARRAY_SIZE(pm0_types); ++i) {
if ((word & 0x7FFF) == pm0_types[i].id)
return pm0_types[i].name;
}
return "????????";
}
- if ((word & (3<<30)) == PM4_TYPE3_PKT) {
+ if ((word & (3<<30)) == CP_TYPE3_PKT) {
for (i = 0; i < ARRAY_SIZE(pm3_types); ++i) {
if ((word & 0xFFFF) == (pm3_types[i].id << 8))
return pm3_types[i].name;
@@ -288,7 +288,7 @@
for (i = 0; i+3 < ib1_size; ) {
value = ib1_addr[i++];
- if (value == pm4_type3_packet(PM4_INDIRECT_BUFFER_PFD, 2)) {
+ if (value == cp_type3_packet(CP_INDIRECT_BUFFER_PFD, 2)) {
uint32_t ib2_base = ib1_addr[i++];
uint32_t ib2_size = ib1_addr[i++];
@@ -701,7 +701,7 @@
i = 0;
for (read_idx = 0; read_idx < num_item; ) {
uint32_t this_cmd = rb_copy[read_idx++];
- if (this_cmd == pm4_type3_packet(PM4_INDIRECT_BUFFER_PFD, 2)) {
+ if (this_cmd == cp_type3_packet(CP_INDIRECT_BUFFER_PFD, 2)) {
uint32_t ib_addr = rb_copy[read_idx++];
uint32_t ib_size = rb_copy[read_idx++];
dump_ib1(device, cur_pt_base, (read_idx-3)<<2, ib_addr,
@@ -711,7 +711,7 @@
ib_list.offsets[i],
ib_list.bases[i],
ib_list.sizes[i], 0);
- } else if (this_cmd == pm4_type0_packet(MH_MMU_PT_BASE, 1)) {
+ } else if (this_cmd == cp_type0_packet(MH_MMU_PT_BASE, 1)) {
KGSL_LOG_DUMP(device, "Current pagetable: %x\t"
"pagetable base: %x\n",
@@ -743,8 +743,8 @@
if (adreno_ib_dump_enabled()) {
for (read_idx = 64; read_idx >= 0; --read_idx) {
uint32_t this_cmd = rb_copy[read_idx];
- if (this_cmd == pm4_type3_packet(
- PM4_INDIRECT_BUFFER_PFD, 2)) {
+ if (this_cmd == cp_type3_packet(
+ CP_INDIRECT_BUFFER_PFD, 2)) {
uint32_t ib_addr = rb_copy[read_idx+1];
uint32_t ib_size = rb_copy[read_idx+2];
if (cp_ib1_bufsz && cp_ib1_base == ib_addr) {
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 1d44d20..ee123fb 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -74,7 +74,7 @@
cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*rb->wptr;
- GSL_RB_WRITE(cmds, cmds_gpu, pm4_nop_packet(nopcount));
+ GSL_RB_WRITE(cmds, cmds_gpu, cp_nop_packet(nopcount));
/* Make sure that rptr is not 0 before submitting
* commands at the end of ringbuffer. We do not
@@ -352,7 +352,7 @@
cmds = adreno_ringbuffer_allocspace(rb, 19);
cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19);
- GSL_RB_WRITE(cmds, cmds_gpu, PM4_HDR_ME_INIT);
+ GSL_RB_WRITE(cmds, cmds_gpu, CP_HDR_ME_INIT);
/* All fields present (bits 9:0) */
GSL_RB_WRITE(cmds, cmds_gpu, 0x000003ff);
/* Disable/Enable Real-Time Stream processing (present but ignored) */
@@ -499,13 +499,13 @@
+ sizeof(uint)*(rb->wptr-total_sizedwords);
if (!(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD)) {
- GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_nop_packet(1));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
}
if (flags & KGSL_CMD_FLAGS_PMODE) {
/* disable protected mode error checking */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
- pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1));
+ cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
}
@@ -517,7 +517,7 @@
if (flags & KGSL_CMD_FLAGS_PMODE) {
/* re-enable protected mode error checking */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
- pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1));
+ cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
}
@@ -525,9 +525,9 @@
timestamp = rb->timestamp;
/* start-of-pipeline and end-of-pipeline timestamps */
- GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_type0_packet(REG_CP_TIMESTAMP, 1));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type0_packet(REG_CP_TIMESTAMP, 1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
- GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_type3_packet(PM4_EVENT_WRITE, 3));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_EVENT_WRITE, 3));
GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
GSL_RB_WRITE(ringcmds, rcmd_gpu,
(rb->device->memstore.gpuaddr +
@@ -537,7 +537,7 @@
if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
/* Conditional execution based on memory values */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
- pm4_type3_packet(PM4_COND_EXEC, 4));
+ cp_type3_packet(CP_COND_EXEC, 4));
GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)) >> 2);
GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
@@ -546,7 +546,7 @@
/* # of conditional command DWORDs */
GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
GSL_RB_WRITE(ringcmds, rcmd_gpu,
- pm4_type3_packet(PM4_INTERRUPT, 1));
+ cp_type3_packet(CP_INTERRUPT, 1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
}
@@ -611,7 +611,7 @@
(void)kgsl_cffdump_parse_ibs(dev_priv, NULL,
ibdesc[i].gpuaddr, ibdesc[i].sizedwords, false);
- *cmds++ = PM4_HDR_INDIRECT_BUFFER_PFD;
+ *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
*cmds++ = ibdesc[i].gpuaddr;
*cmds++ = ibdesc[i].sizedwords;
}
@@ -691,9 +691,9 @@
kgsl_sharedmem_readl(&rb->buffer_desc, &val3, rb_rptr);
/* match the pattern found at the end of a command */
if ((val1 == 2 &&
- val2 == pm4_type3_packet(PM4_INTERRUPT, 1)
+ val2 == cp_type3_packet(CP_INTERRUPT, 1)
&& val3 == CP_INT_CNTL__RB_INT_MASK) ||
- (val1 == pm4_type3_packet(PM4_EVENT_WRITE, 3)
+ (val1 == cp_type3_packet(CP_EVENT_WRITE, 3)
&& val2 == CACHE_FLUSH_TS &&
val3 == (rb->device->memstore.gpuaddr +
KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)))) {
@@ -735,7 +735,7 @@
kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
adreno_ringbuffer_inc_wrapped(rb_rptr,
rb->buffer_desc.size));
- if (val1 == pm4_nop_packet(1) && val2 == KGSL_CMD_IDENTIFIER) {
+ if (val1 == cp_nop_packet(1) && val2 == KGSL_CMD_IDENTIFIER) {
KGSL_DRV_ERR(device,
"GPU recovery from hang not possible because "
"of hang in kgsl command\n");
@@ -755,7 +755,7 @@
kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
rb->buffer_desc.size);
- BUG_ON(value != pm4_type3_packet(PM4_MEM_WRITE, 2));
+ BUG_ON(value != cp_type3_packet(CP_MEM_WRITE, 2));
kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
rb->buffer_desc.size);
@@ -778,14 +778,14 @@
* commands can be executed */
if (value != cur_context) {
copy_rb_contents = 1;
- temp_rb_buffer[temp_idx++] = pm4_nop_packet(1);
+ temp_rb_buffer[temp_idx++] = cp_nop_packet(1);
temp_rb_buffer[temp_idx++] =
KGSL_CMD_IDENTIFIER;
- temp_rb_buffer[temp_idx++] = pm4_nop_packet(1);
+ temp_rb_buffer[temp_idx++] = cp_nop_packet(1);
temp_rb_buffer[temp_idx++] =
KGSL_CONTEXT_TO_MEM_IDENTIFIER;
temp_rb_buffer[temp_idx++] =
- pm4_type3_packet(PM4_MEM_WRITE, 2);
+ cp_type3_packet(CP_MEM_WRITE, 2);
temp_rb_buffer[temp_idx++] = val1;
temp_rb_buffer[temp_idx++] = value;
} else {