blob: 55b09653725aa9ffc633913fd8d78bedf8ac00bf [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/firmware.h>
14#include <linux/slab.h>
15#include <linux/sched.h>
16#include <linux/log2.h>
17
18#include "kgsl.h"
19#include "kgsl_sharedmem.h"
20#include "kgsl_cffdump.h"
21
22#include "adreno.h"
23#include "adreno_pm4types.h"
24#include "adreno_ringbuffer.h"
25
26#include "a200_reg.h"
27
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028#define GSL_RB_NOP_SIZEDWORDS 2
29/* protected mode error checking below register address 0x800
30* note: if CP_INTERRUPT packet is used then checking needs
31* to change to below register address 0x7C8
32*/
33#define GSL_RB_PROTECTED_MODE_CONTROL 0x200001F2
34
35/* Firmware file names
36 * Legacy names must remain but replacing macro names to
37 * match current kgsl model.
38 * a200 is yamato
39 * a220 is leia
40 */
41#define A200_PFP_FW "yamato_pfp.fw"
42#define A200_PM4_FW "yamato_pm4.fw"
43#define A220_PFP_470_FW "leia_pfp_470.fw"
44#define A220_PM4_470_FW "leia_pm4_470.fw"
45#define A225_PFP_FW "a225_pfp.fw"
46#define A225_PM4_FW "a225_pm4.fw"
47
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048static void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
49{
50 BUG_ON(rb->wptr == 0);
51
52 /*synchronize memory before informing the hardware of the
53 *new commands.
54 */
55 mb();
56
57 adreno_regwrite(rb->device, REG_CP_RB_WPTR, rb->wptr);
58}
59
60static int
61adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds,
62 int wptr_ahead)
63{
64 int nopcount;
65 unsigned int freecmds;
66 unsigned int *cmds;
67 uint cmds_gpu;
68
69 /* if wptr ahead, fill the remaining with NOPs */
70 if (wptr_ahead) {
71 /* -1 for header */
72 nopcount = rb->sizedwords - rb->wptr - 1;
73
74 cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
75 cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*rb->wptr;
76
Jordan Crouse084427d2011-07-28 08:37:58 -060077 GSL_RB_WRITE(cmds, cmds_gpu, cp_nop_packet(nopcount));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070078
79 /* Make sure that rptr is not 0 before submitting
80 * commands at the end of ringbuffer. We do not
81 * want the rptr and wptr to become equal when
82 * the ringbuffer is not empty */
83 do {
84 GSL_RB_GET_READPTR(rb, &rb->rptr);
85 } while (!rb->rptr);
86
87 rb->wptr++;
88
89 adreno_ringbuffer_submit(rb);
90
91 rb->wptr = 0;
92 }
93
94 /* wait for space in ringbuffer */
95 do {
96 GSL_RB_GET_READPTR(rb, &rb->rptr);
97
98 freecmds = rb->rptr - rb->wptr;
99
100 } while ((freecmds != 0) && (freecmds <= numcmds));
101
102 return 0;
103}
104
105
106static unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
107 unsigned int numcmds)
108{
109 unsigned int *ptr = NULL;
110 int status = 0;
111
112 BUG_ON(numcmds >= rb->sizedwords);
113
114 GSL_RB_GET_READPTR(rb, &rb->rptr);
115 /* check for available space */
116 if (rb->wptr >= rb->rptr) {
117 /* wptr ahead or equal to rptr */
118 /* reserve dwords for nop packet */
119 if ((rb->wptr + numcmds) > (rb->sizedwords -
120 GSL_RB_NOP_SIZEDWORDS))
121 status = adreno_ringbuffer_waitspace(rb, numcmds, 1);
122 } else {
123 /* wptr behind rptr */
124 if ((rb->wptr + numcmds) >= rb->rptr)
125 status = adreno_ringbuffer_waitspace(rb, numcmds, 0);
126 /* check for remaining space */
127 /* reserve dwords for nop packet */
128 if ((rb->wptr + numcmds) > (rb->sizedwords -
129 GSL_RB_NOP_SIZEDWORDS))
130 status = adreno_ringbuffer_waitspace(rb, numcmds, 1);
131 }
132
133 if (status == 0) {
134 ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
135 rb->wptr += numcmds;
136 }
137
138 return ptr;
139}
140
141static int _load_firmware(struct kgsl_device *device, const char *fwfile,
142 void **data, int *len)
143{
144 const struct firmware *fw = NULL;
145 int ret;
146
147 ret = request_firmware(&fw, fwfile, device->dev);
148
149 if (ret) {
150 KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n",
151 fwfile, ret);
152 return ret;
153 }
154
155 *data = kmalloc(fw->size, GFP_KERNEL);
156
157 if (*data) {
158 memcpy(*data, fw->data, fw->size);
159 *len = fw->size;
160 } else
161 KGSL_MEM_ERR(device, "kmalloc(%d) failed\n", fw->size);
162
163 release_firmware(fw);
164 return (*data != NULL) ? 0 : -ENOMEM;
165}
166
167static int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device)
168{
169 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700170 int i, ret = 0;
171
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700172 if (adreno_dev->pm4_fw == NULL) {
173 int len;
Jordan Crouse505df9c2011-07-28 08:37:59 -0600174 void *ptr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175
Jordan Crouse505df9c2011-07-28 08:37:59 -0600176 ret = _load_firmware(device, adreno_dev->pm4_fwfile,
177 &ptr, &len);
178
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179 if (ret)
180 goto err;
181
182 /* PM4 size is 3 dword aligned plus 1 dword of version */
183 if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) {
184 KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
185 ret = -EINVAL;
Jeremy Gebben79acee62011-08-08 16:44:07 -0600186 kfree(ptr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700187 goto err;
188 }
189
190 adreno_dev->pm4_fw_size = len / sizeof(uint32_t);
191 adreno_dev->pm4_fw = ptr;
192 }
193
194 KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n",
195 adreno_dev->pm4_fw[0]);
196
197 adreno_regwrite(device, REG_CP_DEBUG, 0x02000000);
198 adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0);
199 for (i = 1; i < adreno_dev->pm4_fw_size; i++)
200 adreno_regwrite(device, REG_CP_ME_RAM_DATA,
201 adreno_dev->pm4_fw[i]);
202err:
203 return ret;
204}
205
206static int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device)
207{
208 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700209 int i, ret = 0;
210
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700211 if (adreno_dev->pfp_fw == NULL) {
212 int len;
Jordan Crouse505df9c2011-07-28 08:37:59 -0600213 void *ptr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700214
Jordan Crouse505df9c2011-07-28 08:37:59 -0600215 ret = _load_firmware(device, adreno_dev->pfp_fwfile,
216 &ptr, &len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217 if (ret)
218 goto err;
219
220 /* PFP size shold be dword aligned */
221 if (len % sizeof(uint32_t) != 0) {
222 KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
223 ret = -EINVAL;
Jeremy Gebben79acee62011-08-08 16:44:07 -0600224 kfree(ptr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225 goto err;
226 }
227
228 adreno_dev->pfp_fw_size = len / sizeof(uint32_t);
229 adreno_dev->pfp_fw = ptr;
230 }
231
232 KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n",
233 adreno_dev->pfp_fw[0]);
234
235 adreno_regwrite(device, REG_CP_PFP_UCODE_ADDR, 0);
236 for (i = 1; i < adreno_dev->pfp_fw_size; i++)
237 adreno_regwrite(device, REG_CP_PFP_UCODE_DATA,
238 adreno_dev->pfp_fw[i]);
239err:
240 return ret;
241}
242
243int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
244{
245 int status;
246 /*cp_rb_cntl_u cp_rb_cntl; */
247 union reg_cp_rb_cntl cp_rb_cntl;
248 unsigned int *cmds, rb_cntl;
249 struct kgsl_device *device = rb->device;
250 uint cmds_gpu;
251
252 if (rb->flags & KGSL_FLAGS_STARTED)
253 return 0;
254
255 if (init_ram) {
256 rb->timestamp = 0;
257 GSL_RB_INIT_TIMESTAMP(rb);
258 }
259
260 kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
261 sizeof(struct kgsl_rbmemptrs));
262
263 kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
264 (rb->sizedwords << 2));
265
266 adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
267 (rb->memptrs_desc.gpuaddr
268 + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
269
270 /* setup WPTR delay */
271 adreno_regwrite(device, REG_CP_RB_WPTR_DELAY, 0 /*0x70000010 */);
272
273 /*setup REG_CP_RB_CNTL */
274 adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
275 cp_rb_cntl.val = rb_cntl;
276
277 /*
278 * The size of the ringbuffer in the hardware is the log2
279 * representation of the size in quadwords (sizedwords / 2)
280 */
281 cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);
282
283 /*
284 * Specify the quadwords to read before updating mem RPTR.
285 * Like above, pass the log2 representation of the blocksize
286 * in quadwords.
287 */
288 cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);
289
290 cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN; /* WPTR polling */
291 /* mem RPTR writebacks */
292 cp_rb_cntl.f.rb_no_update = GSL_RB_CNTL_NO_UPDATE;
293
294 adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);
295
296 adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);
297
298 adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
299 rb->memptrs_desc.gpuaddr +
300 GSL_RB_MEMPTRS_RPTR_OFFSET);
301
302 /* explicitly clear all cp interrupts */
303 adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
304
305 /* setup scratch/timestamp */
306 adreno_regwrite(device, REG_SCRATCH_ADDR,
307 device->memstore.gpuaddr +
308 KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp));
309
310 adreno_regwrite(device, REG_SCRATCH_UMSK,
311 GSL_RB_MEMPTRS_SCRATCH_MASK);
312
313 /* load the CP ucode */
314
315 status = adreno_ringbuffer_load_pm4_ucode(device);
316 if (status != 0)
317 return status;
318
319 /* load the prefetch parser ucode */
320 status = adreno_ringbuffer_load_pfp_ucode(device);
321 if (status != 0)
322 return status;
323
324 adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000C0804);
325
326 rb->rptr = 0;
327 rb->wptr = 0;
328
329 /* clear ME_HALT to start micro engine */
330 adreno_regwrite(device, REG_CP_ME_CNTL, 0);
331
332 /* ME_INIT */
333 cmds = adreno_ringbuffer_allocspace(rb, 19);
334 cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19);
335
Jordan Crouse084427d2011-07-28 08:37:58 -0600336 GSL_RB_WRITE(cmds, cmds_gpu, CP_HDR_ME_INIT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700337 /* All fields present (bits 9:0) */
338 GSL_RB_WRITE(cmds, cmds_gpu, 0x000003ff);
339 /* Disable/Enable Real-Time Stream processing (present but ignored) */
340 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
341 /* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
342 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
343
344 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600345 SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700346 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600347 SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700348 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600349 SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700350 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600351 SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700352 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600353 SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700354 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600355 SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600357 SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600359 SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700360
361 /* Vertex and Pixel Shader Start Addresses in instructions
362 * (3 DWORDS per instruction) */
363 GSL_RB_WRITE(cmds, cmds_gpu, 0x80000180);
364 /* Maximum Contexts */
365 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000001);
366 /* Write Confirm Interval and The CP will wait the
367 * wait_interval * 16 clocks between polling */
368 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
369
370 /* NQ and External Memory Swap */
371 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
372 /* Protected mode error checking */
373 GSL_RB_WRITE(cmds, cmds_gpu, GSL_RB_PROTECTED_MODE_CONTROL);
374 /* Disable header dumping and Header dump address */
375 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
376 /* Header dump size */
377 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
378
379 adreno_ringbuffer_submit(rb);
380
381 /* idle device to validate ME INIT */
382 status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
383
384 if (status == 0)
385 rb->flags |= KGSL_FLAGS_STARTED;
386
387 return status;
388}
389
390int adreno_ringbuffer_stop(struct adreno_ringbuffer *rb)
391{
392 if (rb->flags & KGSL_FLAGS_STARTED) {
393 /* ME_HALT */
394 adreno_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000);
395
396 rb->flags &= ~KGSL_FLAGS_STARTED;
397 }
398
399 return 0;
400}
401
402int adreno_ringbuffer_init(struct kgsl_device *device)
403{
404 int status;
405 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
406 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
407
408 rb->device = device;
409 /*
410 * It is silly to convert this to words and then back to bytes
411 * immediately below, but most of the rest of the code deals
412 * in words, so we might as well only do the math once
413 */
414 rb->sizedwords = KGSL_RB_SIZE >> 2;
415
416 /* allocate memory for ringbuffer */
417 status = kgsl_allocate_contiguous(&rb->buffer_desc,
418 (rb->sizedwords << 2));
419
420 if (status != 0) {
421 adreno_ringbuffer_close(rb);
422 return status;
423 }
424
425 /* allocate memory for polling and timestamps */
426 /* This really can be at 4 byte alignment boundry but for using MMU
427 * we need to make it at page boundary */
428 status = kgsl_allocate_contiguous(&rb->memptrs_desc,
429 sizeof(struct kgsl_rbmemptrs));
430
431 if (status != 0) {
432 adreno_ringbuffer_close(rb);
433 return status;
434 }
435
436 /* overlay structure on memptrs memory */
437 rb->memptrs = (struct kgsl_rbmemptrs *) rb->memptrs_desc.hostptr;
438
439 return 0;
440}
441
442int adreno_ringbuffer_close(struct adreno_ringbuffer *rb)
443{
444 struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
445
446 kgsl_sharedmem_free(&rb->buffer_desc);
447 kgsl_sharedmem_free(&rb->memptrs_desc);
448
449 kfree(adreno_dev->pfp_fw);
450 kfree(adreno_dev->pm4_fw);
451
452 adreno_dev->pfp_fw = NULL;
453 adreno_dev->pm4_fw = NULL;
454
455 memset(rb, 0, sizeof(struct adreno_ringbuffer));
456
457 return 0;
458}
459
460static uint32_t
461adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
462 unsigned int flags, unsigned int *cmds,
463 int sizedwords)
464{
465 unsigned int *ringcmds;
466 unsigned int timestamp;
467 unsigned int total_sizedwords = sizedwords + 6;
468 unsigned int i;
469 unsigned int rcmd_gpu;
470
471 /* reserve space to temporarily turn off protected mode
472 * error checking if needed
473 */
474 total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
475 total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0;
476 total_sizedwords += !(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD) ? 2 : 0;
477
478 ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
479 rcmd_gpu = rb->buffer_desc.gpuaddr
480 + sizeof(uint)*(rb->wptr-total_sizedwords);
481
482 if (!(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD)) {
Jordan Crouse084427d2011-07-28 08:37:58 -0600483 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484 GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
485 }
486 if (flags & KGSL_CMD_FLAGS_PMODE) {
487 /* disable protected mode error checking */
488 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600489 cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700490 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
491 }
492
493 for (i = 0; i < sizedwords; i++) {
494 GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
495 cmds++;
496 }
497
498 if (flags & KGSL_CMD_FLAGS_PMODE) {
499 /* re-enable protected mode error checking */
500 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600501 cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700502 GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
503 }
504
505 rb->timestamp++;
506 timestamp = rb->timestamp;
507
508 /* start-of-pipeline and end-of-pipeline timestamps */
Jordan Crouse084427d2011-07-28 08:37:58 -0600509 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type0_packet(REG_CP_TIMESTAMP, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
Jordan Crouse084427d2011-07-28 08:37:58 -0600511 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_EVENT_WRITE, 3));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700512 GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
513 GSL_RB_WRITE(ringcmds, rcmd_gpu,
514 (rb->device->memstore.gpuaddr +
515 KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)));
516 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
517
518 if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
519 /* Conditional execution based on memory values */
520 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600521 cp_type3_packet(CP_COND_EXEC, 4));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522 GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
523 KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)) >> 2);
524 GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
525 KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)) >> 2);
526 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
527 /* # of conditional command DWORDs */
528 GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
529 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600530 cp_type3_packet(CP_INTERRUPT, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531 GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
532 }
533
534 adreno_ringbuffer_submit(rb);
535
536 /* return timestamp of issued coREG_ands */
537 return timestamp;
538}
539
540void
541adreno_ringbuffer_issuecmds(struct kgsl_device *device,
542 unsigned int flags,
543 unsigned int *cmds,
544 int sizedwords)
545{
546 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
547 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
548
549 if (device->state & KGSL_STATE_HUNG)
550 return;
551 adreno_ringbuffer_addcmds(rb, flags, cmds, sizedwords);
552}
553
554int
555adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
556 struct kgsl_context *context,
557 struct kgsl_ibdesc *ibdesc,
558 unsigned int numibs,
559 uint32_t *timestamp,
560 unsigned int flags)
561{
562 struct kgsl_device *device = dev_priv->device;
563 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
564 unsigned int *link;
565 unsigned int *cmds;
566 unsigned int i;
Jeremy Gebben3c127f52011-08-08 17:04:11 -0600567 struct adreno_context *drawctxt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700568
569 if (device->state & KGSL_STATE_HUNG)
570 return -EBUSY;
571 if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED) ||
Jeremy Gebben3c127f52011-08-08 17:04:11 -0600572 context == NULL || ibdesc == 0 || numibs == 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700573 return -EINVAL;
574
Jeremy Gebben3c127f52011-08-08 17:04:11 -0600575 drawctxt = context->devctxt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700576
577 if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
578 KGSL_CTXT_WARN(device, "Context %p caused a gpu hang.."
579 " will not accept commands for this context\n",
580 drawctxt);
581 return -EDEADLK;
582 }
583 link = kzalloc(sizeof(unsigned int) * numibs * 3, GFP_KERNEL);
584 cmds = link;
585 if (!link) {
586 KGSL_MEM_ERR(device, "Failed to allocate memory for for command"
587 " submission, size %x\n", numibs * 3);
588 return -ENOMEM;
589 }
590 for (i = 0; i < numibs; i++) {
591 (void)kgsl_cffdump_parse_ibs(dev_priv, NULL,
592 ibdesc[i].gpuaddr, ibdesc[i].sizedwords, false);
593
Jordan Crouse084427d2011-07-28 08:37:58 -0600594 *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700595 *cmds++ = ibdesc[i].gpuaddr;
596 *cmds++ = ibdesc[i].sizedwords;
597 }
598
599 kgsl_setstate(device,
600 kgsl_pt_get_flags(device->mmu.hwpagetable,
601 device->id));
602
603 adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
604
605 *timestamp = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
606 KGSL_CMD_FLAGS_NOT_KERNEL_CMD,
607 &link[0], (cmds - link));
608
609 KGSL_CMD_INFO(device, "ctxt %d g %08x numibs %d ts %d\n",
610 context->id, (unsigned int)ibdesc, numibs, *timestamp);
611
612 kfree(link);
613
614#ifdef CONFIG_MSM_KGSL_CFF_DUMP
615 /*
616 * insert wait for idle after every IB1
617 * this is conservative but works reliably and is ok
618 * even for performance simulations
619 */
620 adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
621#endif
622
623 return 0;
624}
625
626int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
627 unsigned int *temp_rb_buffer,
628 int *rb_size)
629{
630 struct kgsl_device *device = rb->device;
631 unsigned int rb_rptr;
632 unsigned int retired_timestamp;
633 unsigned int temp_idx = 0;
634 unsigned int value;
635 unsigned int val1;
636 unsigned int val2;
637 unsigned int val3;
638 unsigned int copy_rb_contents = 0;
639 unsigned int cur_context;
640 unsigned int j;
641
642 GSL_RB_GET_READPTR(rb, &rb->rptr);
643
644 retired_timestamp = device->ftbl->readtimestamp(device,
645 KGSL_TIMESTAMP_RETIRED);
646 KGSL_DRV_ERR(device, "GPU successfully executed till ts: %x\n",
647 retired_timestamp);
648 /*
649 * We need to go back in history by 4 dwords from the current location
650 * of read pointer as 4 dwords are read to match the end of a command.
651 * Also, take care of wrap around when moving back
652 */
653 if (rb->rptr >= 4)
654 rb_rptr = (rb->rptr - 4) * sizeof(unsigned int);
655 else
656 rb_rptr = rb->buffer_desc.size -
657 ((4 - rb->rptr) * sizeof(unsigned int));
658 /* Read the rb contents going backwards to locate end of last
659 * sucessfully executed command */
660 while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
661 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
662 if (value == retired_timestamp) {
663 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
664 rb->buffer_desc.size);
665 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
666 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
667 rb->buffer_desc.size);
668 kgsl_sharedmem_readl(&rb->buffer_desc, &val2, rb_rptr);
669 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
670 rb->buffer_desc.size);
671 kgsl_sharedmem_readl(&rb->buffer_desc, &val3, rb_rptr);
672 /* match the pattern found at the end of a command */
673 if ((val1 == 2 &&
Jordan Crouse084427d2011-07-28 08:37:58 -0600674 val2 == cp_type3_packet(CP_INTERRUPT, 1)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700675 && val3 == CP_INT_CNTL__RB_INT_MASK) ||
Jordan Crouse084427d2011-07-28 08:37:58 -0600676 (val1 == cp_type3_packet(CP_EVENT_WRITE, 3)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700677 && val2 == CACHE_FLUSH_TS &&
678 val3 == (rb->device->memstore.gpuaddr +
679 KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)))) {
680 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
681 rb->buffer_desc.size);
682 KGSL_DRV_ERR(device,
683 "Found end of last executed "
684 "command at offset: %x\n",
685 rb_rptr / sizeof(unsigned int));
686 break;
687 } else {
688 if (rb_rptr < (3 * sizeof(unsigned int)))
689 rb_rptr = rb->buffer_desc.size -
690 (3 * sizeof(unsigned int))
691 + rb_rptr;
692 else
693 rb_rptr -= (3 * sizeof(unsigned int));
694 }
695 }
696
697 if (rb_rptr == 0)
698 rb_rptr = rb->buffer_desc.size - sizeof(unsigned int);
699 else
700 rb_rptr -= sizeof(unsigned int);
701 }
702
703 if ((rb_rptr / sizeof(unsigned int)) == rb->wptr) {
704 KGSL_DRV_ERR(device,
705 "GPU recovery from hang not possible because last"
706 " successful timestamp is overwritten\n");
707 return -EINVAL;
708 }
709 /* rb_rptr is now pointing to the first dword of the command following
710 * the last sucessfully executed command sequence. Assumption is that
711 * GPU is hung in the command sequence pointed by rb_rptr */
712 /* make sure the GPU is not hung in a command submitted by kgsl
713 * itself */
714 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
715 kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
716 adreno_ringbuffer_inc_wrapped(rb_rptr,
717 rb->buffer_desc.size));
Jordan Crouse084427d2011-07-28 08:37:58 -0600718 if (val1 == cp_nop_packet(1) && val2 == KGSL_CMD_IDENTIFIER) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700719 KGSL_DRV_ERR(device,
720 "GPU recovery from hang not possible because "
721 "of hang in kgsl command\n");
722 return -EINVAL;
723 }
724
725 /* current_context is the context that is presently active in the
726 * GPU, i.e the context in which the hang is caused */
727 kgsl_sharedmem_readl(&device->memstore, &cur_context,
728 KGSL_DEVICE_MEMSTORE_OFFSET(current_context));
729 while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
730 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
731 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
732 rb->buffer_desc.size);
733 /* check for context switch indicator */
734 if (value == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
735 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
736 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
737 rb->buffer_desc.size);
Jordan Crouse084427d2011-07-28 08:37:58 -0600738 BUG_ON(value != cp_type3_packet(CP_MEM_WRITE, 2));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700739 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
740 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
741 rb->buffer_desc.size);
742 BUG_ON(val1 != (device->memstore.gpuaddr +
743 KGSL_DEVICE_MEMSTORE_OFFSET(current_context)));
744 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
745 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
746 rb->buffer_desc.size);
747 BUG_ON((copy_rb_contents == 0) &&
748 (value == cur_context));
749 /*
750 * If we were copying the commands and got to this point
751 * then we need to remove the 3 commands that appear
752 * before KGSL_CONTEXT_TO_MEM_IDENTIFIER
753 */
754 if (temp_idx)
755 temp_idx -= 3;
756 /* if context switches to a context that did not cause
757 * hang then start saving the rb contents as those
758 * commands can be executed */
759 if (value != cur_context) {
760 copy_rb_contents = 1;
Jordan Crouse084427d2011-07-28 08:37:58 -0600761 temp_rb_buffer[temp_idx++] = cp_nop_packet(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700762 temp_rb_buffer[temp_idx++] =
763 KGSL_CMD_IDENTIFIER;
Jordan Crouse084427d2011-07-28 08:37:58 -0600764 temp_rb_buffer[temp_idx++] = cp_nop_packet(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700765 temp_rb_buffer[temp_idx++] =
766 KGSL_CONTEXT_TO_MEM_IDENTIFIER;
767 temp_rb_buffer[temp_idx++] =
Jordan Crouse084427d2011-07-28 08:37:58 -0600768 cp_type3_packet(CP_MEM_WRITE, 2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700769 temp_rb_buffer[temp_idx++] = val1;
770 temp_rb_buffer[temp_idx++] = value;
771 } else {
772 copy_rb_contents = 0;
773 }
774 } else if (copy_rb_contents)
775 temp_rb_buffer[temp_idx++] = value;
776 }
777
778 *rb_size = temp_idx;
779 KGSL_DRV_ERR(device, "Extracted rb contents, size: %x\n", *rb_size);
780 for (temp_idx = 0; temp_idx < *rb_size;) {
781 char str[80];
782 int idx = 0;
783 if ((temp_idx + 8) <= *rb_size)
784 j = 8;
785 else
786 j = *rb_size - temp_idx;
787 for (; j != 0; j--)
788 idx += scnprintf(str + idx, 80 - idx,
789 "%8.8X ", temp_rb_buffer[temp_idx++]);
790 printk(KERN_ALERT "%s", str);
791 }
792 return 0;
793}
794
795void
796adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
797 int num_rb_contents)
798{
799 int i;
800 unsigned int *ringcmds;
801 unsigned int rcmd_gpu;
802
803 if (!num_rb_contents)
804 return;
805
806 if (num_rb_contents > (rb->buffer_desc.size - rb->wptr)) {
807 adreno_regwrite(rb->device, REG_CP_RB_RPTR, 0);
808 rb->rptr = 0;
809 BUG_ON(num_rb_contents > rb->buffer_desc.size);
810 }
811 ringcmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
812 rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(unsigned int) * rb->wptr;
813 for (i = 0; i < num_rb_contents; i++)
814 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb_buff[i]);
815 rb->wptr += num_rb_contents;
816 adreno_ringbuffer_submit(rb);
817}