gpu: msm2: Sync to upstream
* Sync to android-msm-3.4-flo-lollipop-release
Change-Id: Ia0df71c0a1f102de034860f4d212a05c806fef7d
diff --git a/drivers/gpu/msm2/adreno_ringbuffer.c b/drivers/gpu/msm2/adreno_ringbuffer.c
index 9e69a49..6b0eb73 100644
--- a/drivers/gpu/msm2/adreno_ringbuffer.c
+++ b/drivers/gpu/msm2/adreno_ringbuffer.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,7 @@
#include "kgsl.h"
#include "kgsl_sharedmem.h"
#include "kgsl_cffdump.h"
+#include "kgsl_trace.h"
#include "adreno.h"
#include "adreno_pm4types.h"
@@ -30,14 +31,6 @@
#define GSL_RB_NOP_SIZEDWORDS 2
-/*
- * CP DEBUG settings for all cores:
- * DYNAMIC_CLK_DISABLE [27] - turn off the dynamic clock control
- * PROG_END_PTR_ENABLE [25] - Allow 128 bit writes to the VBIF
- */
-
-#define CP_DEBUG_DEFAULT ((1 << 27) | (1 << 25))
-
void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
@@ -215,28 +208,19 @@
* adreno_ringbuffer_load_pm4_ucode() - Load pm4 ucode
* @device: Pointer to a KGSL device
* @start: Starting index in pm4 ucode to load
+ * @end: Ending index of pm4 ucode to load
* @addr: Address to load the pm4 ucode
*
* Load the pm4 ucode from @start at @addr.
*/
-int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device,
- unsigned int start, unsigned int addr)
+inline int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device,
+ unsigned int start, unsigned int end, unsigned int addr)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int i;
- if (adreno_dev->pm4_fw == NULL) {
- int ret = adreno_ringbuffer_read_pm4_ucode(device);
- if (ret)
- return ret;
- }
-
- KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n",
- adreno_dev->pm4_fw_version);
-
- adreno_writereg(adreno_dev, ADRENO_REG_CP_DEBUG, CP_DEBUG_DEFAULT);
adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_RAM_WADDR, addr);
- for (i = 1; i < adreno_dev->pm4_fw_size; i++)
+ for (i = start; i < end; i++)
adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_RAM_DATA,
adreno_dev->pm4_fw[i]);
@@ -278,27 +262,19 @@
* adreno_ringbuffer_load_pfp_ucode() - Load pfp ucode
* @device: Pointer to a KGSL device
* @start: Starting index in pfp ucode to load
+ * @end: Ending index of pfp ucode to load
* @addr: Address to load the pfp ucode
*
* Load the pfp ucode from @start at @addr.
*/
-int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device,
- unsigned int start, unsigned int addr)
+inline int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device,
+ unsigned int start, unsigned int end, unsigned int addr)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int i;
- if (adreno_dev->pfp_fw == NULL) {
- int ret = adreno_ringbuffer_read_pfp_ucode(device);
- if (ret)
- return ret;
- }
-
- KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n",
- adreno_dev->pfp_fw_version);
-
adreno_writereg(adreno_dev, ADRENO_REG_CP_PFP_UCODE_ADDR, addr);
- for (i = 1; i < adreno_dev->pfp_fw_size; i++)
+ for (i = start; i < end; i++)
adreno_writereg(adreno_dev, ADRENO_REG_CP_PFP_UCODE_DATA,
adreno_dev->pfp_fw[i]);
@@ -306,72 +282,113 @@
}
/**
- * _ringbuffer_start_common() - Ringbuffer start
+ * _ringbuffer_bootstrap_ucode() - Bootstrap GPU Ucode
+ * @rb: Pointer to adreno ringbuffer
+ * @load_jt: If non zero only load Jump tables
+ *
+ * Bootstrap ucode for GPU
+ * load_jt == 0, bootstrap full microcode
+ * load_jt == 1, bootstrap jump tables of microcode
+ *
+ * For example a bootstrap packet would like below
+ * Setup a type3 bootstrap packet
+ * PFP size to bootstrap
+ * PFP addr to write the PFP data
+ * PM4 size to bootstrap
+ * PM4 addr to write the PM4 data
+ * PFP dwords from microcode to bootstrap
+ * PM4 size dwords from microcode to bootstrap
+ */
+static int _ringbuffer_bootstrap_ucode(struct adreno_ringbuffer *rb,
+ unsigned int load_jt)
+{
+ unsigned int *cmds, cmds_gpu, bootstrap_size;
+ int i = 0;
+ struct kgsl_device *device = rb->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ unsigned int pm4_size, pm4_idx, pm4_addr, pfp_size, pfp_idx, pfp_addr;
+
+ /* Only bootstrap jump tables of ucode */
+ if (load_jt) {
+ pm4_idx = adreno_dev->pm4_jt_idx;
+ pm4_addr = adreno_dev->pm4_jt_addr;
+ pfp_idx = adreno_dev->pfp_jt_idx;
+ pfp_addr = adreno_dev->pfp_jt_addr;
+ } else {
+ /* Bootstrap full ucode */
+ pm4_idx = 1;
+ pm4_addr = 0;
+ pfp_idx = 1;
+ pfp_addr = 0;
+ }
+
+ pm4_size = (adreno_dev->pm4_fw_size - pm4_idx);
+ pfp_size = (adreno_dev->pfp_fw_size - pfp_idx);
+
+ /*
+ * Below set of commands register with PFP that 6f is the
+ * opcode for bootstrapping
+ */
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_PFP_UCODE_ADDR, 0x200);
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_PFP_UCODE_DATA, 0x6f0005);
+
+ /* clear ME_HALT to start micro engine */
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, 0);
+
+ bootstrap_size = (pm4_size + pfp_size + 5);
+
+ cmds = adreno_ringbuffer_allocspace(rb, NULL, bootstrap_size);
+ if (cmds == NULL)
+ return -ENOMEM;
+
+ cmds_gpu = rb->buffer_desc.gpuaddr +
+ sizeof(uint) * (rb->wptr - bootstrap_size);
+ /* Construct the packet that bootsraps the ucode */
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu,
+ cp_type3_packet(CP_BOOTSTRAP_UCODE,
+ (bootstrap_size - 1)));
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, pfp_size);
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, pfp_addr);
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, pm4_size);
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, pm4_addr);
+ for (i = pfp_idx; i < adreno_dev->pfp_fw_size; i++)
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, adreno_dev->pfp_fw[i]);
+ for (i = pm4_idx; i < adreno_dev->pm4_fw_size; i++)
+ GSL_RB_WRITE(rb->device, cmds, cmds_gpu, adreno_dev->pm4_fw[i]);
+
+ adreno_ringbuffer_submit(rb);
+ /* idle device to validate bootstrap */
+ return adreno_idle(device);
+}
+
+/**
+ * _ringbuffer_setup_common() - Ringbuffer start
* @rb: Pointer to adreno ringbuffer
*
* Setup ringbuffer for GPU.
*/
-int _ringbuffer_start_common(struct adreno_ringbuffer *rb)
+void _ringbuffer_setup_common(struct adreno_ringbuffer *rb)
{
- int status;
- union reg_cp_rb_cntl cp_rb_cntl;
- unsigned int rb_cntl;
struct kgsl_device *device = rb->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- if (rb->flags & KGSL_FLAGS_STARTED)
- return 0;
-
- kgsl_sharedmem_set(rb->device, &rb->memptrs_desc, 0, 0,
- sizeof(struct kgsl_rbmemptrs));
-
kgsl_sharedmem_set(rb->device, &rb->buffer_desc, 0, 0xAA,
(rb->sizedwords << 2));
- if (adreno_is_a2xx(adreno_dev)) {
- kgsl_regwrite(device, REG_CP_RB_WPTR_BASE,
- (rb->memptrs_desc.gpuaddr
- + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
-
- /* setup WPTR delay */
- kgsl_regwrite(device, REG_CP_RB_WPTR_DELAY,
- 0 /*0x70000010 */);
- }
-
- /*setup REG_CP_RB_CNTL */
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_CNTL, &rb_cntl);
- cp_rb_cntl.val = rb_cntl;
-
/*
* The size of the ringbuffer in the hardware is the log2
- * representation of the size in quadwords (sizedwords / 2)
+ * representation of the size in quadwords (sizedwords / 2).
+ * Also disable the host RPTR shadow register as it might be unreliable
+ * in certain circumstances.
*/
- cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);
- /*
- * Specify the quadwords to read before updating mem RPTR.
- * Like above, pass the log2 representation of the blocksize
- * in quadwords.
- */
- cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);
-
- if (adreno_is_a2xx(adreno_dev)) {
- /* WPTR polling */
- cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN;
- }
-
- /* mem RPTR writebacks */
- cp_rb_cntl.f.rb_no_update = GSL_RB_CNTL_NO_UPDATE;
-
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL, cp_rb_cntl.val);
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
+ (ilog2(rb->sizedwords >> 1) & 0x3F) |
+ (1 << 27));
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
rb->buffer_desc.gpuaddr);
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_RPTR_ADDR,
- rb->memptrs_desc.gpuaddr +
- GSL_RB_MEMPTRS_RPTR_OFFSET);
-
if (adreno_is_a2xx(adreno_dev)) {
/* explicitly clear all cp interrupts */
kgsl_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
@@ -394,6 +411,19 @@
kgsl_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x003E2008);
rb->wptr = 0;
+}
+
+/**
+ * _ringbuffer_start_common() - Ringbuffer start
+ * @rb: Pointer to adreno ringbuffer
+ *
+ * Start ringbuffer for GPU.
+ */
+int _ringbuffer_start_common(struct adreno_ringbuffer *rb)
+{
+ int status;
+ struct kgsl_device *device = rb->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
/* clear ME_HALT to start micro engine */
adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, 0);
@@ -425,39 +455,99 @@
struct kgsl_device *device = rb->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- /* load the CP ucode */
- status = adreno_ringbuffer_load_pm4_ucode(device,
- adreno_dev->pm4_jt_idx, adreno_dev->pm4_jt_addr);
- if (status != 0)
- return status;
+ if (rb->flags & KGSL_FLAGS_STARTED)
+ return 0;
- /* load the prefetch parser ucode */
- status = adreno_ringbuffer_load_pfp_ucode(device,
- adreno_dev->pfp_jt_idx, adreno_dev->pfp_jt_addr);
- if (status != 0)
- return status;
+ _ringbuffer_setup_common(rb);
- return _ringbuffer_start_common(rb);
+ /* If bootstrapping if supported to load jump tables */
+ if (adreno_bootstrap_ucode(adreno_dev)) {
+ status = _ringbuffer_bootstrap_ucode(rb, 1);
+ if (status != 0)
+ return status;
+
+ } else {
+ /* load the CP jump tables using AHB writes */
+ status = adreno_ringbuffer_load_pm4_ucode(device,
+ adreno_dev->pm4_jt_idx, adreno_dev->pm4_fw_size,
+ adreno_dev->pm4_jt_addr);
+ if (status != 0)
+ return status;
+
+ /* load the prefetch parser jump tables using AHB writes */
+ status = adreno_ringbuffer_load_pfp_ucode(device,
+ adreno_dev->pfp_jt_idx, adreno_dev->pfp_fw_size,
+ adreno_dev->pfp_jt_addr);
+ if (status != 0)
+ return status;
+ }
+
+ status = _ringbuffer_start_common(rb);
+
+ return status;
}
-int adreno_ringbuffer_start(struct adreno_ringbuffer *rb)
+/**
+ * adreno_ringbuffer_cold_start() - Ringbuffer cold start
+ * @rb: Pointer to adreno ringbuffer
+ *
+ * Start the ringbuffer from power collapse.
+ */
+int adreno_ringbuffer_cold_start(struct adreno_ringbuffer *rb)
{
int status;
+ struct kgsl_device *device = rb->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
if (rb->flags & KGSL_FLAGS_STARTED)
return 0;
- /* load the CP ucode */
- status = adreno_ringbuffer_load_pm4_ucode(rb->device, 1, 0);
- if (status != 0)
- return status;
+ _ringbuffer_setup_common(rb);
- /* load the prefetch parser ucode */
- status = adreno_ringbuffer_load_pfp_ucode(rb->device, 1, 0);
- if (status != 0)
- return status;
+ /* If bootstrapping if supported to load ucode */
+ if (adreno_bootstrap_ucode(adreno_dev)) {
- return _ringbuffer_start_common(rb);
+ /*
+ * load first adreno_dev->pm4_bstrp_size +
+ * adreno_dev->pfp_bstrp_size microcode dwords using AHB write,
+ * this small microcode has dispatcher + booter, this initial
+ * microcode enables CP to understand CP_BOOTSTRAP_UCODE packet
+ * in function _ringbuffer_bootstrap_ucode. CP_BOOTSTRAP_UCODE
+ * packet loads rest of the microcode.
+ */
+
+ status = adreno_ringbuffer_load_pm4_ucode(rb->device, 1,
+ adreno_dev->pm4_bstrp_size+1, 0);
+ if (status != 0)
+ return status;
+
+ status = adreno_ringbuffer_load_pfp_ucode(rb->device, 1,
+ adreno_dev->pfp_bstrp_size+1, 0);
+ if (status != 0)
+ return status;
+
+ /* Bootstrap rest of the ucode here */
+ status = _ringbuffer_bootstrap_ucode(rb, 0);
+ if (status != 0)
+ return status;
+
+ } else {
+ /* load the CP ucode using AHB writes */
+ status = adreno_ringbuffer_load_pm4_ucode(rb->device, 1,
+ adreno_dev->pm4_fw_size, 0);
+ if (status != 0)
+ return status;
+
+ /* load the prefetch parser ucode using AHB writes */
+ status = adreno_ringbuffer_load_pfp_ucode(rb->device, 1,
+ adreno_dev->pfp_fw_size, 0);
+ if (status != 0)
+ return status;
+ }
+
+ status = _ringbuffer_start_common(rb);
+
+ return status;
}
void adreno_ringbuffer_stop(struct adreno_ringbuffer *rb)
@@ -497,20 +587,6 @@
return status;
}
- /* allocate memory for polling and timestamps */
- /* This really can be at 4 byte alignment boundry but for using MMU
- * we need to make it at page boundary */
- status = kgsl_allocate_contiguous(&rb->memptrs_desc,
- sizeof(struct kgsl_rbmemptrs));
-
- if (status != 0) {
- adreno_ringbuffer_close(rb);
- return status;
- }
-
- /* overlay structure on memptrs memory */
- rb->memptrs = (struct kgsl_rbmemptrs *) rb->memptrs_desc.hostptr;
-
rb->global_ts = 0;
return 0;
@@ -521,7 +597,6 @@
struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
kgsl_sharedmem_free(&rb->buffer_desc);
- kgsl_sharedmem_free(&rb->memptrs_desc);
kfree(adreno_dev->pfp_fw);
kfree(adreno_dev->pm4_fw);
@@ -578,7 +653,8 @@
total_sizedwords += (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) ? 2 : 0;
/* Add two dwords for the CP_INTERRUPT */
- total_sizedwords += drawctxt ? 2 : 0;
+ total_sizedwords +=
+ (drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) ? 2 : 0;
/* context rollover */
if (adreno_is_a3xx(adreno_dev))
@@ -594,6 +670,9 @@
total_sizedwords += 3; /* sop timestamp */
total_sizedwords += 4; /* eop timestamp */
+ if (adreno_is_a20x(adreno_dev))
+ total_sizedwords += 2; /* CACHE_FLUSH */
+
if (drawctxt) {
total_sizedwords += 3; /* global timestamp without cache
* flush for non-zero context */
@@ -607,7 +686,7 @@
/* Add space for the power on shader fixup if we need it */
if (flags & KGSL_CMD_FLAGS_PWRON_FIXUP)
- total_sizedwords += 5;
+ total_sizedwords += 9;
ringcmds = adreno_ringbuffer_allocspace(rb, drawctxt, total_sizedwords);
@@ -629,6 +708,11 @@
}
if (flags & KGSL_CMD_FLAGS_PWRON_FIXUP) {
+ /* Disable protected mode for the fixup */
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 0);
+
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, cp_nop_packet(1));
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
KGSL_PWRON_FIXUP_IDENTIFIER);
@@ -638,6 +722,11 @@
adreno_dev->pwron_fixup.gpuaddr);
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
adreno_dev->pwron_fixup_dwords);
+
+ /* Re-enable protected mode */
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, 1);
}
/* start-of-pipeline timestamp */
@@ -709,7 +798,8 @@
GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, (gpuaddr +
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
eoptimestamp)));
- GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu, rb->global_ts);
+ GSL_RB_WRITE(rb->device, ringcmds, rcmd_gpu,
+ rb->global_ts);
}
if (adreno_is_a20x(adreno_dev)) {
@@ -1014,7 +1104,7 @@
timestamp);
if (ret)
- KGSL_DRV_ERR(device, "adreno_dispatcher_queue_cmd returned %d\n",
+ KGSL_DRV_ERR(device, "adreno_context_queue_cmd returned %d\n",
ret);
else {
/*
@@ -1033,9 +1123,89 @@
}
}
+ /*
+ * Return -EPROTO if the device has faulted since the last time we
+ * checked - userspace uses this to perform post-fault activities
+ */
+ if (!ret && test_and_clear_bit(ADRENO_CONTEXT_FAULT, &drawctxt->priv))
+ ret = -EPROTO;
+
return ret;
}
+unsigned int adreno_ringbuffer_get_constraint(struct kgsl_device *device,
+ struct kgsl_context *context)
+{
+ unsigned int pwrlevel = device->pwrctrl.active_pwrlevel;
+
+ switch (context->pwr_constraint.type) {
+ case KGSL_CONSTRAINT_PWRLEVEL: {
+ switch (context->pwr_constraint.sub_type) {
+ case KGSL_CONSTRAINT_PWR_MAX:
+ pwrlevel = device->pwrctrl.max_pwrlevel;
+ break;
+ case KGSL_CONSTRAINT_PWR_MIN:
+ pwrlevel = device->pwrctrl.min_pwrlevel;
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+
+ }
+
+ return pwrlevel;
+}
+
+void adreno_ringbuffer_set_constraint(struct kgsl_device *device,
+ struct kgsl_cmdbatch *cmdbatch)
+{
+ unsigned int constraint;
+ struct kgsl_context *context = cmdbatch->context;
+ /*
+ * Check if the context has a constraint and constraint flags are
+ * set.
+ */
+ if (context->pwr_constraint.type &&
+ ((context->flags & KGSL_CONTEXT_PWR_CONSTRAINT) ||
+ (cmdbatch->flags & KGSL_CONTEXT_PWR_CONSTRAINT))) {
+
+ constraint = adreno_ringbuffer_get_constraint(device, context);
+
+ /*
+ * If a constraint is already set, set a new constraint only
+ * if it is faster. If the requested constraint is the same
+ * as the current one, update ownership and timestamp.
+ */
+ if ((device->pwrctrl.constraint.type ==
+ KGSL_CONSTRAINT_NONE) || (constraint <
+ device->pwrctrl.constraint.hint.pwrlevel.level)) {
+
+ kgsl_pwrctrl_pwrlevel_change(device, constraint);
+ device->pwrctrl.constraint.type =
+ context->pwr_constraint.type;
+ device->pwrctrl.constraint.hint.
+ pwrlevel.level = constraint;
+ device->pwrctrl.constraint.owner_id = context->id;
+ device->pwrctrl.constraint.expires = jiffies +
+ device->pwrctrl.interval_timeout;
+ /* Trace the constraint being set by the driver */
+ trace_kgsl_constraint(device,
+ device->pwrctrl.constraint.type,
+ constraint, 1);
+ } else if ((device->pwrctrl.constraint.type ==
+ context->pwr_constraint.type) &&
+ (device->pwrctrl.constraint.hint.pwrlevel.level ==
+ constraint)) {
+ device->pwrctrl.constraint.owner_id = context->id;
+ device->pwrctrl.constraint.expires = jiffies +
+ device->pwrctrl.interval_timeout;
+ }
+ }
+
+}
+
/* adreno_rindbuffer_submitcmd - submit userspace IBs to the GPU */
int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
struct kgsl_cmdbatch *cmdbatch)
@@ -1062,7 +1232,7 @@
commands are stored in the first node of the IB chain. We can skip that
if a context switch hasn't occured */
- if ((drawctxt->flags & CTXT_FLAGS_PREAMBLE) &&
+ if ((drawctxt->base.flags & KGSL_CONTEXT_PREAMBLE) &&
!test_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv) &&
(adreno_dev->drawctxt_active == drawctxt))
start_index = 1;
@@ -1143,6 +1313,9 @@
test_bit(ADRENO_DEVICE_PWRON_FIXUP, &adreno_dev->priv))
flags |= KGSL_CMD_FLAGS_PWRON_FIXUP;
+ /* Set the constraints before adding to ringbuffer */
+ adreno_ringbuffer_set_constraint(device, cmdbatch);
+
ret = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
drawctxt,
flags,
@@ -1164,7 +1337,7 @@
device->pwrctrl.irq_last = 0;
kgsl_trace_issueibcmds(device, context->id, cmdbatch,
cmdbatch->timestamp, cmdbatch->flags, ret,
- drawctxt ? drawctxt->type : 0);
+ drawctxt->type);
kfree(link);
return ret;