blob: 4eb3bdd0dfa8ba053beb24ae02980d6dafc1afa3 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/firmware.h>
14#include <linux/slab.h>
15#include <linux/sched.h>
16#include <linux/log2.h>
17
18#include "kgsl.h"
19#include "kgsl_sharedmem.h"
20#include "kgsl_cffdump.h"
21
22#include "adreno.h"
23#include "adreno_pm4types.h"
24#include "adreno_ringbuffer.h"
25
Jeremy Gebbeneebc4612011-08-31 10:15:21 -070026#include "a2xx_reg.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028#define GSL_RB_NOP_SIZEDWORDS 2
29/* protected mode error checking below register address 0x800
30* note: if CP_INTERRUPT packet is used then checking needs
31* to change to below register address 0x7C8
32*/
33#define GSL_RB_PROTECTED_MODE_CONTROL 0x200001F2
34
35/* Firmware file names
36 * Legacy names must remain but replacing macro names to
37 * match current kgsl model.
38 * a200 is yamato
39 * a220 is leia
40 */
41#define A200_PFP_FW "yamato_pfp.fw"
42#define A200_PM4_FW "yamato_pm4.fw"
43#define A220_PFP_470_FW "leia_pfp_470.fw"
44#define A220_PM4_470_FW "leia_pm4_470.fw"
45#define A225_PFP_FW "a225_pfp.fw"
46#define A225_PM4_FW "a225_pm4.fw"
47
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048static void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
49{
50 BUG_ON(rb->wptr == 0);
51
Lucille Sylvester958dc942011-09-06 18:19:49 -060052 /* Let the pwrscale policy know that new commands have
53 been submitted. */
54 kgsl_pwrscale_busy(rb->device);
55
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056 /*synchronize memory before informing the hardware of the
57 *new commands.
58 */
59 mb();
60
61 adreno_regwrite(rb->device, REG_CP_RB_WPTR, rb->wptr);
62}
63
64static int
65adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds,
66 int wptr_ahead)
67{
68 int nopcount;
69 unsigned int freecmds;
70 unsigned int *cmds;
71 uint cmds_gpu;
72
73 /* if wptr ahead, fill the remaining with NOPs */
74 if (wptr_ahead) {
75 /* -1 for header */
76 nopcount = rb->sizedwords - rb->wptr - 1;
77
78 cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
79 cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*rb->wptr;
80
Jordan Crouse084427d2011-07-28 08:37:58 -060081 GSL_RB_WRITE(cmds, cmds_gpu, cp_nop_packet(nopcount));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082
83 /* Make sure that rptr is not 0 before submitting
84 * commands at the end of ringbuffer. We do not
85 * want the rptr and wptr to become equal when
86 * the ringbuffer is not empty */
87 do {
88 GSL_RB_GET_READPTR(rb, &rb->rptr);
89 } while (!rb->rptr);
90
91 rb->wptr++;
92
93 adreno_ringbuffer_submit(rb);
94
95 rb->wptr = 0;
96 }
97
98 /* wait for space in ringbuffer */
99 do {
100 GSL_RB_GET_READPTR(rb, &rb->rptr);
101
102 freecmds = rb->rptr - rb->wptr;
103
104 } while ((freecmds != 0) && (freecmds <= numcmds));
105
106 return 0;
107}
108
109
110static unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
111 unsigned int numcmds)
112{
113 unsigned int *ptr = NULL;
114 int status = 0;
115
116 BUG_ON(numcmds >= rb->sizedwords);
117
118 GSL_RB_GET_READPTR(rb, &rb->rptr);
119 /* check for available space */
120 if (rb->wptr >= rb->rptr) {
121 /* wptr ahead or equal to rptr */
122 /* reserve dwords for nop packet */
123 if ((rb->wptr + numcmds) > (rb->sizedwords -
124 GSL_RB_NOP_SIZEDWORDS))
125 status = adreno_ringbuffer_waitspace(rb, numcmds, 1);
126 } else {
127 /* wptr behind rptr */
128 if ((rb->wptr + numcmds) >= rb->rptr)
129 status = adreno_ringbuffer_waitspace(rb, numcmds, 0);
130 /* check for remaining space */
131 /* reserve dwords for nop packet */
132 if ((rb->wptr + numcmds) > (rb->sizedwords -
133 GSL_RB_NOP_SIZEDWORDS))
134 status = adreno_ringbuffer_waitspace(rb, numcmds, 1);
135 }
136
137 if (status == 0) {
138 ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
139 rb->wptr += numcmds;
140 }
141
142 return ptr;
143}
144
145static int _load_firmware(struct kgsl_device *device, const char *fwfile,
146 void **data, int *len)
147{
148 const struct firmware *fw = NULL;
149 int ret;
150
151 ret = request_firmware(&fw, fwfile, device->dev);
152
153 if (ret) {
154 KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n",
155 fwfile, ret);
156 return ret;
157 }
158
159 *data = kmalloc(fw->size, GFP_KERNEL);
160
161 if (*data) {
162 memcpy(*data, fw->data, fw->size);
163 *len = fw->size;
164 } else
165 KGSL_MEM_ERR(device, "kmalloc(%d) failed\n", fw->size);
166
167 release_firmware(fw);
168 return (*data != NULL) ? 0 : -ENOMEM;
169}
170
171static int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device)
172{
173 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174 int i, ret = 0;
175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 if (adreno_dev->pm4_fw == NULL) {
177 int len;
Jordan Crouse505df9c2011-07-28 08:37:59 -0600178 void *ptr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179
Jordan Crouse505df9c2011-07-28 08:37:59 -0600180 ret = _load_firmware(device, adreno_dev->pm4_fwfile,
181 &ptr, &len);
182
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183 if (ret)
184 goto err;
185
186 /* PM4 size is 3 dword aligned plus 1 dword of version */
187 if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) {
188 KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
189 ret = -EINVAL;
Jeremy Gebben79acee62011-08-08 16:44:07 -0600190 kfree(ptr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 goto err;
192 }
193
194 adreno_dev->pm4_fw_size = len / sizeof(uint32_t);
195 adreno_dev->pm4_fw = ptr;
196 }
197
198 KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n",
199 adreno_dev->pm4_fw[0]);
200
201 adreno_regwrite(device, REG_CP_DEBUG, 0x02000000);
202 adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0);
203 for (i = 1; i < adreno_dev->pm4_fw_size; i++)
204 adreno_regwrite(device, REG_CP_ME_RAM_DATA,
205 adreno_dev->pm4_fw[i]);
206err:
207 return ret;
208}
209
210static int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device)
211{
212 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700213 int i, ret = 0;
214
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215 if (adreno_dev->pfp_fw == NULL) {
216 int len;
Jordan Crouse505df9c2011-07-28 08:37:59 -0600217 void *ptr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218
Jordan Crouse505df9c2011-07-28 08:37:59 -0600219 ret = _load_firmware(device, adreno_dev->pfp_fwfile,
220 &ptr, &len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700221 if (ret)
222 goto err;
223
224 /* PFP size shold be dword aligned */
225 if (len % sizeof(uint32_t) != 0) {
226 KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
227 ret = -EINVAL;
Jeremy Gebben79acee62011-08-08 16:44:07 -0600228 kfree(ptr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700229 goto err;
230 }
231
232 adreno_dev->pfp_fw_size = len / sizeof(uint32_t);
233 adreno_dev->pfp_fw = ptr;
234 }
235
236 KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n",
237 adreno_dev->pfp_fw[0]);
238
239 adreno_regwrite(device, REG_CP_PFP_UCODE_ADDR, 0);
240 for (i = 1; i < adreno_dev->pfp_fw_size; i++)
241 adreno_regwrite(device, REG_CP_PFP_UCODE_DATA,
242 adreno_dev->pfp_fw[i]);
243err:
244 return ret;
245}
246
247int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
248{
249 int status;
250 /*cp_rb_cntl_u cp_rb_cntl; */
251 union reg_cp_rb_cntl cp_rb_cntl;
252 unsigned int *cmds, rb_cntl;
253 struct kgsl_device *device = rb->device;
254 uint cmds_gpu;
255
256 if (rb->flags & KGSL_FLAGS_STARTED)
257 return 0;
258
259 if (init_ram) {
260 rb->timestamp = 0;
261 GSL_RB_INIT_TIMESTAMP(rb);
262 }
263
264 kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
265 sizeof(struct kgsl_rbmemptrs));
266
267 kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
268 (rb->sizedwords << 2));
269
270 adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
271 (rb->memptrs_desc.gpuaddr
272 + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
273
274 /* setup WPTR delay */
275 adreno_regwrite(device, REG_CP_RB_WPTR_DELAY, 0 /*0x70000010 */);
276
277 /*setup REG_CP_RB_CNTL */
278 adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
279 cp_rb_cntl.val = rb_cntl;
280
281 /*
282 * The size of the ringbuffer in the hardware is the log2
283 * representation of the size in quadwords (sizedwords / 2)
284 */
285 cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);
286
287 /*
288 * Specify the quadwords to read before updating mem RPTR.
289 * Like above, pass the log2 representation of the blocksize
290 * in quadwords.
291 */
292 cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);
293
294 cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN; /* WPTR polling */
295 /* mem RPTR writebacks */
296 cp_rb_cntl.f.rb_no_update = GSL_RB_CNTL_NO_UPDATE;
297
298 adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);
299
300 adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);
301
302 adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
303 rb->memptrs_desc.gpuaddr +
304 GSL_RB_MEMPTRS_RPTR_OFFSET);
305
306 /* explicitly clear all cp interrupts */
307 adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
308
309 /* setup scratch/timestamp */
310 adreno_regwrite(device, REG_SCRATCH_ADDR,
311 device->memstore.gpuaddr +
312 KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp));
313
314 adreno_regwrite(device, REG_SCRATCH_UMSK,
315 GSL_RB_MEMPTRS_SCRATCH_MASK);
316
317 /* load the CP ucode */
318
319 status = adreno_ringbuffer_load_pm4_ucode(device);
320 if (status != 0)
321 return status;
322
323 /* load the prefetch parser ucode */
324 status = adreno_ringbuffer_load_pfp_ucode(device);
325 if (status != 0)
326 return status;
327
328 adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000C0804);
329
330 rb->rptr = 0;
331 rb->wptr = 0;
332
333 /* clear ME_HALT to start micro engine */
334 adreno_regwrite(device, REG_CP_ME_CNTL, 0);
335
336 /* ME_INIT */
337 cmds = adreno_ringbuffer_allocspace(rb, 19);
338 cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19);
339
Jordan Crouse084427d2011-07-28 08:37:58 -0600340 GSL_RB_WRITE(cmds, cmds_gpu, CP_HDR_ME_INIT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700341 /* All fields present (bits 9:0) */
342 GSL_RB_WRITE(cmds, cmds_gpu, 0x000003ff);
343 /* Disable/Enable Real-Time Stream processing (present but ignored) */
344 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
345 /* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
346 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
347
348 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600349 SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700350 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600351 SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700352 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600353 SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700354 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600355 SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600357 SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600359 SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600361 SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600363 SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700364
365 /* Vertex and Pixel Shader Start Addresses in instructions
366 * (3 DWORDS per instruction) */
367 GSL_RB_WRITE(cmds, cmds_gpu, 0x80000180);
368 /* Maximum Contexts */
369 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000001);
370 /* Write Confirm Interval and The CP will wait the
371 * wait_interval * 16 clocks between polling */
372 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
373
374 /* NQ and External Memory Swap */
375 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
376 /* Protected mode error checking */
377 GSL_RB_WRITE(cmds, cmds_gpu, GSL_RB_PROTECTED_MODE_CONTROL);
378 /* Disable header dumping and Header dump address */
379 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
380 /* Header dump size */
381 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
382
383 adreno_ringbuffer_submit(rb);
384
385 /* idle device to validate ME INIT */
386 status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
387
388 if (status == 0)
389 rb->flags |= KGSL_FLAGS_STARTED;
390
391 return status;
392}
393
394int adreno_ringbuffer_stop(struct adreno_ringbuffer *rb)
395{
396 if (rb->flags & KGSL_FLAGS_STARTED) {
397 /* ME_HALT */
398 adreno_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000);
399
400 rb->flags &= ~KGSL_FLAGS_STARTED;
401 }
402
403 return 0;
404}
405
406int adreno_ringbuffer_init(struct kgsl_device *device)
407{
408 int status;
409 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
410 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
411
412 rb->device = device;
413 /*
414 * It is silly to convert this to words and then back to bytes
415 * immediately below, but most of the rest of the code deals
416 * in words, so we might as well only do the math once
417 */
418 rb->sizedwords = KGSL_RB_SIZE >> 2;
419
420 /* allocate memory for ringbuffer */
421 status = kgsl_allocate_contiguous(&rb->buffer_desc,
422 (rb->sizedwords << 2));
423
424 if (status != 0) {
425 adreno_ringbuffer_close(rb);
426 return status;
427 }
428
429 /* allocate memory for polling and timestamps */
430 /* This really can be at 4 byte alignment boundry but for using MMU
431 * we need to make it at page boundary */
432 status = kgsl_allocate_contiguous(&rb->memptrs_desc,
433 sizeof(struct kgsl_rbmemptrs));
434
435 if (status != 0) {
436 adreno_ringbuffer_close(rb);
437 return status;
438 }
439
440 /* overlay structure on memptrs memory */
441 rb->memptrs = (struct kgsl_rbmemptrs *) rb->memptrs_desc.hostptr;
442
443 return 0;
444}
445
446int adreno_ringbuffer_close(struct adreno_ringbuffer *rb)
447{
448 struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
449
450 kgsl_sharedmem_free(&rb->buffer_desc);
451 kgsl_sharedmem_free(&rb->memptrs_desc);
452
453 kfree(adreno_dev->pfp_fw);
454 kfree(adreno_dev->pm4_fw);
455
456 adreno_dev->pfp_fw = NULL;
457 adreno_dev->pm4_fw = NULL;
458
459 memset(rb, 0, sizeof(struct adreno_ringbuffer));
460
461 return 0;
462}
463
464static uint32_t
465adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
466 unsigned int flags, unsigned int *cmds,
467 int sizedwords)
468{
469 unsigned int *ringcmds;
470 unsigned int timestamp;
471 unsigned int total_sizedwords = sizedwords + 6;
472 unsigned int i;
473 unsigned int rcmd_gpu;
474
475 /* reserve space to temporarily turn off protected mode
476 * error checking if needed
477 */
478 total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
479 total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0;
480 total_sizedwords += !(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD) ? 2 : 0;
481
482 ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
483 rcmd_gpu = rb->buffer_desc.gpuaddr
484 + sizeof(uint)*(rb->wptr-total_sizedwords);
485
486 if (!(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD)) {
Jordan Crouse084427d2011-07-28 08:37:58 -0600487 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700488 GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
489 }
490 if (flags & KGSL_CMD_FLAGS_PMODE) {
491 /* disable protected mode error checking */
492 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600493 cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700494 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
495 }
496
497 for (i = 0; i < sizedwords; i++) {
498 GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
499 cmds++;
500 }
501
502 if (flags & KGSL_CMD_FLAGS_PMODE) {
503 /* re-enable protected mode error checking */
504 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600505 cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700506 GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
507 }
508
509 rb->timestamp++;
510 timestamp = rb->timestamp;
511
512 /* start-of-pipeline and end-of-pipeline timestamps */
Jordan Crouse084427d2011-07-28 08:37:58 -0600513 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type0_packet(REG_CP_TIMESTAMP, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700514 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
Jordan Crouse084427d2011-07-28 08:37:58 -0600515 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_EVENT_WRITE, 3));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700516 GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
517 GSL_RB_WRITE(ringcmds, rcmd_gpu,
518 (rb->device->memstore.gpuaddr +
519 KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)));
520 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
521
522 if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
523 /* Conditional execution based on memory values */
524 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600525 cp_type3_packet(CP_COND_EXEC, 4));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
527 KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)) >> 2);
528 GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
529 KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)) >> 2);
530 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
531 /* # of conditional command DWORDs */
532 GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
533 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600534 cp_type3_packet(CP_INTERRUPT, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700535 GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
536 }
537
538 adreno_ringbuffer_submit(rb);
539
540 /* return timestamp of issued coREG_ands */
541 return timestamp;
542}
543
544void
545adreno_ringbuffer_issuecmds(struct kgsl_device *device,
546 unsigned int flags,
547 unsigned int *cmds,
548 int sizedwords)
549{
550 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
551 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
552
553 if (device->state & KGSL_STATE_HUNG)
554 return;
555 adreno_ringbuffer_addcmds(rb, flags, cmds, sizedwords);
556}
557
558int
559adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
560 struct kgsl_context *context,
561 struct kgsl_ibdesc *ibdesc,
562 unsigned int numibs,
563 uint32_t *timestamp,
564 unsigned int flags)
565{
566 struct kgsl_device *device = dev_priv->device;
567 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
568 unsigned int *link;
569 unsigned int *cmds;
570 unsigned int i;
Jeremy Gebben3c127f52011-08-08 17:04:11 -0600571 struct adreno_context *drawctxt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572
573 if (device->state & KGSL_STATE_HUNG)
574 return -EBUSY;
575 if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED) ||
Jeremy Gebben3c127f52011-08-08 17:04:11 -0600576 context == NULL || ibdesc == 0 || numibs == 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700577 return -EINVAL;
578
Jeremy Gebben3c127f52011-08-08 17:04:11 -0600579 drawctxt = context->devctxt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700580
581 if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
582 KGSL_CTXT_WARN(device, "Context %p caused a gpu hang.."
583 " will not accept commands for this context\n",
584 drawctxt);
585 return -EDEADLK;
586 }
587 link = kzalloc(sizeof(unsigned int) * numibs * 3, GFP_KERNEL);
588 cmds = link;
589 if (!link) {
590 KGSL_MEM_ERR(device, "Failed to allocate memory for for command"
591 " submission, size %x\n", numibs * 3);
592 return -ENOMEM;
593 }
594 for (i = 0; i < numibs; i++) {
595 (void)kgsl_cffdump_parse_ibs(dev_priv, NULL,
596 ibdesc[i].gpuaddr, ibdesc[i].sizedwords, false);
597
Jordan Crouse084427d2011-07-28 08:37:58 -0600598 *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700599 *cmds++ = ibdesc[i].gpuaddr;
600 *cmds++ = ibdesc[i].sizedwords;
601 }
602
603 kgsl_setstate(device,
604 kgsl_pt_get_flags(device->mmu.hwpagetable,
605 device->id));
606
607 adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
608
609 *timestamp = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
610 KGSL_CMD_FLAGS_NOT_KERNEL_CMD,
611 &link[0], (cmds - link));
612
613 KGSL_CMD_INFO(device, "ctxt %d g %08x numibs %d ts %d\n",
614 context->id, (unsigned int)ibdesc, numibs, *timestamp);
615
616 kfree(link);
617
618#ifdef CONFIG_MSM_KGSL_CFF_DUMP
619 /*
620 * insert wait for idle after every IB1
621 * this is conservative but works reliably and is ok
622 * even for performance simulations
623 */
624 adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
625#endif
626
627 return 0;
628}
629
630int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
631 unsigned int *temp_rb_buffer,
632 int *rb_size)
633{
634 struct kgsl_device *device = rb->device;
635 unsigned int rb_rptr;
636 unsigned int retired_timestamp;
637 unsigned int temp_idx = 0;
638 unsigned int value;
639 unsigned int val1;
640 unsigned int val2;
641 unsigned int val3;
642 unsigned int copy_rb_contents = 0;
643 unsigned int cur_context;
644 unsigned int j;
645
646 GSL_RB_GET_READPTR(rb, &rb->rptr);
647
648 retired_timestamp = device->ftbl->readtimestamp(device,
649 KGSL_TIMESTAMP_RETIRED);
650 KGSL_DRV_ERR(device, "GPU successfully executed till ts: %x\n",
651 retired_timestamp);
652 /*
653 * We need to go back in history by 4 dwords from the current location
654 * of read pointer as 4 dwords are read to match the end of a command.
655 * Also, take care of wrap around when moving back
656 */
657 if (rb->rptr >= 4)
658 rb_rptr = (rb->rptr - 4) * sizeof(unsigned int);
659 else
660 rb_rptr = rb->buffer_desc.size -
661 ((4 - rb->rptr) * sizeof(unsigned int));
662 /* Read the rb contents going backwards to locate end of last
663 * sucessfully executed command */
664 while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
665 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
666 if (value == retired_timestamp) {
667 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
668 rb->buffer_desc.size);
669 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
670 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
671 rb->buffer_desc.size);
672 kgsl_sharedmem_readl(&rb->buffer_desc, &val2, rb_rptr);
673 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
674 rb->buffer_desc.size);
675 kgsl_sharedmem_readl(&rb->buffer_desc, &val3, rb_rptr);
676 /* match the pattern found at the end of a command */
677 if ((val1 == 2 &&
Jordan Crouse084427d2011-07-28 08:37:58 -0600678 val2 == cp_type3_packet(CP_INTERRUPT, 1)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700679 && val3 == CP_INT_CNTL__RB_INT_MASK) ||
Jordan Crouse084427d2011-07-28 08:37:58 -0600680 (val1 == cp_type3_packet(CP_EVENT_WRITE, 3)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700681 && val2 == CACHE_FLUSH_TS &&
682 val3 == (rb->device->memstore.gpuaddr +
683 KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)))) {
684 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
685 rb->buffer_desc.size);
686 KGSL_DRV_ERR(device,
687 "Found end of last executed "
688 "command at offset: %x\n",
689 rb_rptr / sizeof(unsigned int));
690 break;
691 } else {
692 if (rb_rptr < (3 * sizeof(unsigned int)))
693 rb_rptr = rb->buffer_desc.size -
694 (3 * sizeof(unsigned int))
695 + rb_rptr;
696 else
697 rb_rptr -= (3 * sizeof(unsigned int));
698 }
699 }
700
701 if (rb_rptr == 0)
702 rb_rptr = rb->buffer_desc.size - sizeof(unsigned int);
703 else
704 rb_rptr -= sizeof(unsigned int);
705 }
706
707 if ((rb_rptr / sizeof(unsigned int)) == rb->wptr) {
708 KGSL_DRV_ERR(device,
709 "GPU recovery from hang not possible because last"
710 " successful timestamp is overwritten\n");
711 return -EINVAL;
712 }
713 /* rb_rptr is now pointing to the first dword of the command following
714 * the last sucessfully executed command sequence. Assumption is that
715 * GPU is hung in the command sequence pointed by rb_rptr */
716 /* make sure the GPU is not hung in a command submitted by kgsl
717 * itself */
718 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
719 kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
720 adreno_ringbuffer_inc_wrapped(rb_rptr,
721 rb->buffer_desc.size));
Jordan Crouse084427d2011-07-28 08:37:58 -0600722 if (val1 == cp_nop_packet(1) && val2 == KGSL_CMD_IDENTIFIER) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700723 KGSL_DRV_ERR(device,
724 "GPU recovery from hang not possible because "
725 "of hang in kgsl command\n");
726 return -EINVAL;
727 }
728
729 /* current_context is the context that is presently active in the
730 * GPU, i.e the context in which the hang is caused */
731 kgsl_sharedmem_readl(&device->memstore, &cur_context,
732 KGSL_DEVICE_MEMSTORE_OFFSET(current_context));
733 while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
734 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
735 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
736 rb->buffer_desc.size);
737 /* check for context switch indicator */
738 if (value == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
739 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
740 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
741 rb->buffer_desc.size);
Jordan Crouse084427d2011-07-28 08:37:58 -0600742 BUG_ON(value != cp_type3_packet(CP_MEM_WRITE, 2));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700743 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
744 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
745 rb->buffer_desc.size);
746 BUG_ON(val1 != (device->memstore.gpuaddr +
747 KGSL_DEVICE_MEMSTORE_OFFSET(current_context)));
748 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
749 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
750 rb->buffer_desc.size);
751 BUG_ON((copy_rb_contents == 0) &&
752 (value == cur_context));
753 /*
754 * If we were copying the commands and got to this point
755 * then we need to remove the 3 commands that appear
756 * before KGSL_CONTEXT_TO_MEM_IDENTIFIER
757 */
758 if (temp_idx)
759 temp_idx -= 3;
760 /* if context switches to a context that did not cause
761 * hang then start saving the rb contents as those
762 * commands can be executed */
763 if (value != cur_context) {
764 copy_rb_contents = 1;
Jordan Crouse084427d2011-07-28 08:37:58 -0600765 temp_rb_buffer[temp_idx++] = cp_nop_packet(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700766 temp_rb_buffer[temp_idx++] =
767 KGSL_CMD_IDENTIFIER;
Jordan Crouse084427d2011-07-28 08:37:58 -0600768 temp_rb_buffer[temp_idx++] = cp_nop_packet(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700769 temp_rb_buffer[temp_idx++] =
770 KGSL_CONTEXT_TO_MEM_IDENTIFIER;
771 temp_rb_buffer[temp_idx++] =
Jordan Crouse084427d2011-07-28 08:37:58 -0600772 cp_type3_packet(CP_MEM_WRITE, 2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700773 temp_rb_buffer[temp_idx++] = val1;
774 temp_rb_buffer[temp_idx++] = value;
775 } else {
776 copy_rb_contents = 0;
777 }
778 } else if (copy_rb_contents)
779 temp_rb_buffer[temp_idx++] = value;
780 }
781
782 *rb_size = temp_idx;
783 KGSL_DRV_ERR(device, "Extracted rb contents, size: %x\n", *rb_size);
784 for (temp_idx = 0; temp_idx < *rb_size;) {
785 char str[80];
786 int idx = 0;
787 if ((temp_idx + 8) <= *rb_size)
788 j = 8;
789 else
790 j = *rb_size - temp_idx;
791 for (; j != 0; j--)
792 idx += scnprintf(str + idx, 80 - idx,
793 "%8.8X ", temp_rb_buffer[temp_idx++]);
794 printk(KERN_ALERT "%s", str);
795 }
796 return 0;
797}
798
799void
800adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
801 int num_rb_contents)
802{
803 int i;
804 unsigned int *ringcmds;
805 unsigned int rcmd_gpu;
806
807 if (!num_rb_contents)
808 return;
809
810 if (num_rb_contents > (rb->buffer_desc.size - rb->wptr)) {
811 adreno_regwrite(rb->device, REG_CP_RB_RPTR, 0);
812 rb->rptr = 0;
813 BUG_ON(num_rb_contents > rb->buffer_desc.size);
814 }
815 ringcmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
816 rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(unsigned int) * rb->wptr;
817 for (i = 0; i < num_rb_contents; i++)
818 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb_buff[i]);
819 rb->wptr += num_rb_contents;
820 adreno_ringbuffer_submit(rb);
821}