msm-3.4 (commit 35cca8ba3ee0e6a2085dbcac48fb2ccbaa72ba98) video/gpu/iommu .. and all the hacks that goes with that
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 179027c..a4bb4fa 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -18,6 +18,7 @@
#include "kgsl.h"
#include "kgsl_sharedmem.h"
#include "kgsl_cffdump.h"
+#include "kgsl_trace.h"
#include "adreno.h"
#include "adreno_pm4types.h"
@@ -319,7 +320,7 @@
return 0;
}
-int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
+int adreno_ringbuffer_start(struct adreno_ringbuffer *rb)
{
int status;
/*cp_rb_cntl_u cp_rb_cntl; */
@@ -331,9 +332,6 @@
if (rb->flags & KGSL_FLAGS_STARTED)
return 0;
- if (init_ram)
- rb->timestamp[KGSL_MEMSTORE_GLOBAL] = 0;
-
kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
sizeof(struct kgsl_rbmemptrs));
@@ -433,8 +431,11 @@
return status;
/* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
- if (adreno_is_a305(adreno_dev) || adreno_is_a320(adreno_dev))
+ if (adreno_is_a305(adreno_dev) || adreno_is_a305c(adreno_dev) ||
+ adreno_is_a320(adreno_dev))
adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000E0602);
+ else if (adreno_is_a330(adreno_dev) || adreno_is_a305b(adreno_dev))
+ adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x003E2008);
rb->rptr = 0;
rb->wptr = 0;
@@ -443,7 +444,9 @@
adreno_regwrite(device, REG_CP_ME_CNTL, 0);
/* ME init is GPU specific, so jump into the sub-function */
- adreno_dev->gpudev->rb_init(adreno_dev, rb);
+ status = adreno_dev->gpudev->rb_init(adreno_dev, rb);
+ if (status)
+ return status;
/* idle device to validate ME INIT */
status = adreno_idle(device);
@@ -481,6 +484,7 @@
*/
rb->sizedwords = KGSL_RB_SIZE >> 2;
+ rb->buffer_desc.flags = KGSL_MEMFLAGS_GPUREADONLY;
/* allocate memory for ringbuffer */
status = kgsl_allocate_contiguous(&rb->buffer_desc,
(rb->sizedwords << 2));
@@ -564,6 +568,8 @@
total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
/* 2 dwords to store the start of command sequence */
total_sizedwords += 2;
+ /* internal ib command identifier for the ringbuffer */
+ total_sizedwords += (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) ? 2 : 0;
/* Add CP_COND_EXEC commands to generate CP_INTERRUPT */
total_sizedwords += context ? 13 : 0;
@@ -571,17 +577,25 @@
if (adreno_is_a3xx(adreno_dev))
total_sizedwords += 7;
+ if (adreno_is_a2xx(adreno_dev))
+ total_sizedwords += 2; /* CP_WAIT_FOR_IDLE */
+
total_sizedwords += 2; /* scratchpad ts for fault tolerance */
+ total_sizedwords += 3; /* sop timestamp */
+ total_sizedwords += 4; /* eop timestamp */
+
if (context && context->flags & CTXT_FLAGS_PER_CONTEXT_TS &&
!(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
- total_sizedwords += 3; /* sop timestamp */
- total_sizedwords += 4; /* eop timestamp */
total_sizedwords += 3; /* global timestamp without cache
* flush for non-zero context */
- } else {
- total_sizedwords += 4; /* global timestamp for fault tolerance*/
}
+ if (adreno_is_a20x(adreno_dev))
+ total_sizedwords += 2; /* CACHE_FLUSH */
+
+ if (flags & KGSL_CMD_FLAGS_EOF)
+ total_sizedwords += 2;
+
ringcmds = adreno_ringbuffer_allocspace(rb, context, total_sizedwords);
if (!ringcmds) {
/*
@@ -597,23 +611,9 @@
GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
- if (flags & KGSL_CMD_FLAGS_PMODE) {
- /* disable protected mode error checking */
- GSL_RB_WRITE(ringcmds, rcmd_gpu,
- cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
- GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
- }
-
- for (i = 0; i < sizedwords; i++) {
- GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
- cmds++;
- }
-
- if (flags & KGSL_CMD_FLAGS_PMODE) {
- /* re-enable protected mode error checking */
- GSL_RB_WRITE(ringcmds, rcmd_gpu,
- cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
- GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
+ if (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) {
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_INTERNAL_IDENTIFIER);
}
/* always increment the global timestamp. once. */
@@ -635,10 +635,45 @@
GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type0_packet(REG_CP_TIMESTAMP, 1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
+ /* start-of-pipeline timestamp */
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_MEM_WRITE, 2));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
+ KGSL_MEMSTORE_OFFSET(context_id, soptimestamp)));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
+
+ if (flags & KGSL_CMD_FLAGS_PMODE) {
+ /* disable protected mode error checking */
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
+ }
+
+ for (i = 0; i < sizedwords; i++) {
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
+ cmds++;
+ }
+
+ if (flags & KGSL_CMD_FLAGS_PMODE) {
+ /* re-enable protected mode error checking */
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
+ }
+
+ /* HW Workaround for MMU Page fault
+ * due to memory getting free early before
+ * GPU completes it.
+ */
+ if (adreno_is_a2xx(adreno_dev)) {
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_WAIT_FOR_IDLE, 1));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x00);
+ }
+
if (adreno_is_a3xx(adreno_dev)) {
/*
- * FLush HLSQ lazy updates to make sure there are no
- * rsources pending for indirect loads after the timestamp
+ * Flush HLSQ lazy updates to make sure there are no
+ * resources pending for indirect loads after the timestamp
*/
GSL_RB_WRITE(ringcmds, rcmd_gpu,
@@ -649,22 +684,19 @@
GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x00);
}
+ /*
+ * end-of-pipeline timestamp. If per context timestamps is not
+ * enabled, then context_id will be KGSL_MEMSTORE_GLOBAL so all
+ * eop timestamps will work out.
+ */
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_EVENT_WRITE, 3));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
+ KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp)));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
+
if (context && context->flags & CTXT_FLAGS_PER_CONTEXT_TS
&& !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
- /* start-of-pipeline timestamp */
- GSL_RB_WRITE(ringcmds, rcmd_gpu,
- cp_type3_packet(CP_MEM_WRITE, 2));
- GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
- KGSL_MEMSTORE_OFFSET(context_id, soptimestamp)));
- GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
-
- /* end-of-pipeline timestamp */
- GSL_RB_WRITE(ringcmds, rcmd_gpu,
- cp_type3_packet(CP_EVENT_WRITE, 3));
- GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
- GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
- KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp)));
- GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
GSL_RB_WRITE(ringcmds, rcmd_gpu,
cp_type3_packet(CP_MEM_WRITE, 2));
@@ -673,16 +705,14 @@
eoptimestamp)));
GSL_RB_WRITE(ringcmds, rcmd_gpu,
rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
- } else {
- GSL_RB_WRITE(ringcmds, rcmd_gpu,
- cp_type3_packet(CP_EVENT_WRITE, 3));
- GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
- GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- eoptimestamp)));
- GSL_RB_WRITE(ringcmds, rcmd_gpu,
- rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
}
+
+ if (adreno_is_a20x(adreno_dev)) {
+ GSL_RB_WRITE(ringcmds, rcmd_gpu,
+ cp_type3_packet(CP_EVENT_WRITE, 1));
+ GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH);
+ }
+
if (context) {
/* Conditional execution based on memory values */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
@@ -960,43 +990,31 @@
{
struct kgsl_device *device = dev_priv->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- unsigned int *link;
+ unsigned int *link = 0;
unsigned int *cmds;
unsigned int i;
- struct adreno_context *drawctxt;
+ struct adreno_context *drawctxt = NULL;
unsigned int start_index = 0;
+ int ret;
- if (device->state & KGSL_STATE_HUNG)
- return -EBUSY;
+ if (device->state & KGSL_STATE_HUNG) {
+ ret = -EBUSY;
+ goto done;
+ }
+
if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED) ||
- context == NULL || ibdesc == 0 || numibs == 0)
- return -EINVAL;
-
+ context == NULL || ibdesc == 0 || numibs == 0) {
+ ret = -EINVAL;
+ goto done;
+ }
drawctxt = context->devctxt;
if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
KGSL_CTXT_ERR(device, "proc %s failed fault tolerance"
" will not accept commands for context %d\n",
drawctxt->pid_name, drawctxt->id);
- return -EDEADLK;
- }
-
- if (drawctxt->flags & CTXT_FLAGS_SKIP_EOF) {
- KGSL_CTXT_ERR(device,
- "proc %s triggered fault tolerance"
- " skipping commands for context till EOF %d\n",
- drawctxt->pid_name, drawctxt->id);
- if (flags & KGSL_CMD_FLAGS_EOF)
- drawctxt->flags &= ~CTXT_FLAGS_SKIP_EOF;
- numibs = 0;
- }
-
- cmds = link = kzalloc(sizeof(unsigned int) * (numibs * 3 + 4),
- GFP_KERNEL);
- if (!link) {
- KGSL_CORE_ERR("kzalloc(%d) failed\n",
- sizeof(unsigned int) * (numibs * 3 + 4));
- return -ENOMEM;
+ ret = -EDEADLK;
+ goto done;
}
/*When preamble is enabled, the preamble buffer with state restoration
@@ -1007,6 +1025,26 @@
adreno_dev->drawctxt_active == drawctxt)
start_index = 1;
+ if (drawctxt->flags & CTXT_FLAGS_SKIP_EOF) {
+ KGSL_CTXT_ERR(device,
+ "proc %s triggered fault tolerance"
+ " skipping commands for context till EOF %d\n",
+ drawctxt->pid_name, drawctxt->id);
+ if (flags & KGSL_CMD_FLAGS_EOF)
+ drawctxt->flags &= ~CTXT_FLAGS_SKIP_EOF;
+ if (start_index)
+ numibs = 1;
+ else
+ numibs = 0;
+ }
+
+ cmds = link = kzalloc(sizeof(unsigned int) * (numibs * 3 + 4),
+ GFP_KERNEL);
+ if (!link) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
if (!start_index) {
*cmds++ = cp_nop_packet(1);
*cmds++ = KGSL_START_OF_IB_IDENTIFIER;
@@ -1021,9 +1059,15 @@
if (unlikely(adreno_dev->ib_check_level >= 1 &&
!_parse_ibs(dev_priv, ibdesc[i].gpuaddr,
ibdesc[i].sizedwords))) {
- kfree(link);
- return -EINVAL;
+ ret = -EINVAL;
+ goto done;
}
+
+ if (ibdesc[i].sizedwords == 0) {
+ ret = -EINVAL;
+ goto done;
+ }
+
*cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
*cmds++ = ibdesc[i].gpuaddr;
*cmds++ = ibdesc[i].sizedwords;
@@ -1043,11 +1087,6 @@
(flags & KGSL_CMD_FLAGS_EOF),
&link[0], (cmds - link), *timestamp);
- KGSL_CMD_INFO(device, "ctxt %d g %08x numibs %d ts %d\n",
- context->id, (unsigned int)ibdesc, numibs, *timestamp);
-
- kfree(link);
-
#ifdef CONFIG_MSM_KGSL_CFF_DUMP
/*
* insert wait for idle after every IB1
@@ -1063,9 +1102,16 @@
*/
if (drawctxt->flags & CTXT_FLAGS_GPU_HANG_FT) {
drawctxt->flags &= ~CTXT_FLAGS_GPU_HANG_FT;
- return -EPROTO;
+ ret = -EPROTO;
} else
- return 0;
+ ret = 0;
+
+done:
+ trace_kgsl_issueibcmds(device, context->id, ibdesc, numibs,
+ *timestamp, flags, ret, drawctxt->type);
+
+ kfree(link);
+ return ret;
}
static void _turn_preamble_on_for_ib_seq(struct adreno_ringbuffer *rb,