blob: d59057c86bc8f555a90a11ea5473871fd562624c [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/firmware.h>
14#include <linux/slab.h>
15#include <linux/sched.h>
16#include <linux/log2.h>
17
18#include "kgsl.h"
19#include "kgsl_sharedmem.h"
20#include "kgsl_cffdump.h"
21
22#include "adreno.h"
23#include "adreno_pm4types.h"
24#include "adreno_ringbuffer.h"
25
Jeremy Gebbeneebc4612011-08-31 10:15:21 -070026#include "a2xx_reg.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070027
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028#define GSL_RB_NOP_SIZEDWORDS 2
29/* protected mode error checking below register address 0x800
30* note: if CP_INTERRUPT packet is used then checking needs
31* to change to below register address 0x7C8
32*/
33#define GSL_RB_PROTECTED_MODE_CONTROL 0x200001F2
34
35/* Firmware file names
36 * Legacy names must remain but replacing macro names to
37 * match current kgsl model.
38 * a200 is yamato
39 * a220 is leia
40 */
41#define A200_PFP_FW "yamato_pfp.fw"
42#define A200_PM4_FW "yamato_pm4.fw"
43#define A220_PFP_470_FW "leia_pfp_470.fw"
44#define A220_PM4_470_FW "leia_pm4_470.fw"
45#define A225_PFP_FW "a225_pfp.fw"
46#define A225_PM4_FW "a225_pm4.fw"
47
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048static void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
49{
50 BUG_ON(rb->wptr == 0);
51
Lucille Sylvester958dc942011-09-06 18:19:49 -060052 /* Let the pwrscale policy know that new commands have
53 been submitted. */
54 kgsl_pwrscale_busy(rb->device);
55
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056 /*synchronize memory before informing the hardware of the
57 *new commands.
58 */
59 mb();
60
61 adreno_regwrite(rb->device, REG_CP_RB_WPTR, rb->wptr);
62}
63
Carter Cooper6dd94c82011-10-13 14:43:53 -060064static void
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds,
66 int wptr_ahead)
67{
68 int nopcount;
69 unsigned int freecmds;
70 unsigned int *cmds;
71 uint cmds_gpu;
72
73 /* if wptr ahead, fill the remaining with NOPs */
74 if (wptr_ahead) {
75 /* -1 for header */
76 nopcount = rb->sizedwords - rb->wptr - 1;
77
78 cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
79 cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*rb->wptr;
80
Jordan Crouse084427d2011-07-28 08:37:58 -060081 GSL_RB_WRITE(cmds, cmds_gpu, cp_nop_packet(nopcount));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082
83 /* Make sure that rptr is not 0 before submitting
84 * commands at the end of ringbuffer. We do not
85 * want the rptr and wptr to become equal when
86 * the ringbuffer is not empty */
87 do {
88 GSL_RB_GET_READPTR(rb, &rb->rptr);
89 } while (!rb->rptr);
90
91 rb->wptr++;
92
93 adreno_ringbuffer_submit(rb);
94
95 rb->wptr = 0;
96 }
97
98 /* wait for space in ringbuffer */
99 do {
100 GSL_RB_GET_READPTR(rb, &rb->rptr);
101
102 freecmds = rb->rptr - rb->wptr;
103
104 } while ((freecmds != 0) && (freecmds <= numcmds));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700105}
106
107
108static unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
109 unsigned int numcmds)
110{
111 unsigned int *ptr = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700112
113 BUG_ON(numcmds >= rb->sizedwords);
114
115 GSL_RB_GET_READPTR(rb, &rb->rptr);
116 /* check for available space */
117 if (rb->wptr >= rb->rptr) {
118 /* wptr ahead or equal to rptr */
119 /* reserve dwords for nop packet */
120 if ((rb->wptr + numcmds) > (rb->sizedwords -
121 GSL_RB_NOP_SIZEDWORDS))
Carter Cooper6dd94c82011-10-13 14:43:53 -0600122 adreno_ringbuffer_waitspace(rb, numcmds, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700123 } else {
124 /* wptr behind rptr */
125 if ((rb->wptr + numcmds) >= rb->rptr)
Carter Cooper6dd94c82011-10-13 14:43:53 -0600126 adreno_ringbuffer_waitspace(rb, numcmds, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127 /* check for remaining space */
128 /* reserve dwords for nop packet */
129 if ((rb->wptr + numcmds) > (rb->sizedwords -
130 GSL_RB_NOP_SIZEDWORDS))
Carter Cooper6dd94c82011-10-13 14:43:53 -0600131 adreno_ringbuffer_waitspace(rb, numcmds, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700132 }
133
Carter Cooper6dd94c82011-10-13 14:43:53 -0600134 ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
135 rb->wptr += numcmds;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700136
137 return ptr;
138}
139
140static int _load_firmware(struct kgsl_device *device, const char *fwfile,
141 void **data, int *len)
142{
143 const struct firmware *fw = NULL;
144 int ret;
145
146 ret = request_firmware(&fw, fwfile, device->dev);
147
148 if (ret) {
149 KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n",
150 fwfile, ret);
151 return ret;
152 }
153
154 *data = kmalloc(fw->size, GFP_KERNEL);
155
156 if (*data) {
157 memcpy(*data, fw->data, fw->size);
158 *len = fw->size;
159 } else
160 KGSL_MEM_ERR(device, "kmalloc(%d) failed\n", fw->size);
161
162 release_firmware(fw);
163 return (*data != NULL) ? 0 : -ENOMEM;
164}
165
166static int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device)
167{
168 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700169 int i, ret = 0;
170
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171 if (adreno_dev->pm4_fw == NULL) {
172 int len;
Jordan Crouse505df9c2011-07-28 08:37:59 -0600173 void *ptr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174
Jordan Crouse505df9c2011-07-28 08:37:59 -0600175 ret = _load_firmware(device, adreno_dev->pm4_fwfile,
176 &ptr, &len);
177
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 if (ret)
179 goto err;
180
181 /* PM4 size is 3 dword aligned plus 1 dword of version */
182 if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) {
183 KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
184 ret = -EINVAL;
Jeremy Gebben79acee62011-08-08 16:44:07 -0600185 kfree(ptr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700186 goto err;
187 }
188
189 adreno_dev->pm4_fw_size = len / sizeof(uint32_t);
190 adreno_dev->pm4_fw = ptr;
191 }
192
193 KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n",
194 adreno_dev->pm4_fw[0]);
195
196 adreno_regwrite(device, REG_CP_DEBUG, 0x02000000);
197 adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0);
198 for (i = 1; i < adreno_dev->pm4_fw_size; i++)
199 adreno_regwrite(device, REG_CP_ME_RAM_DATA,
200 adreno_dev->pm4_fw[i]);
201err:
202 return ret;
203}
204
205static int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device)
206{
207 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208 int i, ret = 0;
209
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700210 if (adreno_dev->pfp_fw == NULL) {
211 int len;
Jordan Crouse505df9c2011-07-28 08:37:59 -0600212 void *ptr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700213
Jordan Crouse505df9c2011-07-28 08:37:59 -0600214 ret = _load_firmware(device, adreno_dev->pfp_fwfile,
215 &ptr, &len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216 if (ret)
217 goto err;
218
219 /* PFP size shold be dword aligned */
220 if (len % sizeof(uint32_t) != 0) {
221 KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
222 ret = -EINVAL;
Jeremy Gebben79acee62011-08-08 16:44:07 -0600223 kfree(ptr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224 goto err;
225 }
226
227 adreno_dev->pfp_fw_size = len / sizeof(uint32_t);
228 adreno_dev->pfp_fw = ptr;
229 }
230
231 KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n",
232 adreno_dev->pfp_fw[0]);
233
234 adreno_regwrite(device, REG_CP_PFP_UCODE_ADDR, 0);
235 for (i = 1; i < adreno_dev->pfp_fw_size; i++)
236 adreno_regwrite(device, REG_CP_PFP_UCODE_DATA,
237 adreno_dev->pfp_fw[i]);
238err:
239 return ret;
240}
241
242int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
243{
244 int status;
245 /*cp_rb_cntl_u cp_rb_cntl; */
246 union reg_cp_rb_cntl cp_rb_cntl;
247 unsigned int *cmds, rb_cntl;
248 struct kgsl_device *device = rb->device;
249 uint cmds_gpu;
250
251 if (rb->flags & KGSL_FLAGS_STARTED)
252 return 0;
253
254 if (init_ram) {
255 rb->timestamp = 0;
256 GSL_RB_INIT_TIMESTAMP(rb);
257 }
258
259 kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
260 sizeof(struct kgsl_rbmemptrs));
261
262 kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
263 (rb->sizedwords << 2));
264
265 adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
266 (rb->memptrs_desc.gpuaddr
267 + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
268
269 /* setup WPTR delay */
270 adreno_regwrite(device, REG_CP_RB_WPTR_DELAY, 0 /*0x70000010 */);
271
272 /*setup REG_CP_RB_CNTL */
273 adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
274 cp_rb_cntl.val = rb_cntl;
275
276 /*
277 * The size of the ringbuffer in the hardware is the log2
278 * representation of the size in quadwords (sizedwords / 2)
279 */
280 cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);
281
282 /*
283 * Specify the quadwords to read before updating mem RPTR.
284 * Like above, pass the log2 representation of the blocksize
285 * in quadwords.
286 */
287 cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);
288
289 cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN; /* WPTR polling */
290 /* mem RPTR writebacks */
291 cp_rb_cntl.f.rb_no_update = GSL_RB_CNTL_NO_UPDATE;
292
293 adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);
294
295 adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);
296
297 adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
298 rb->memptrs_desc.gpuaddr +
299 GSL_RB_MEMPTRS_RPTR_OFFSET);
300
301 /* explicitly clear all cp interrupts */
302 adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
303
304 /* setup scratch/timestamp */
305 adreno_regwrite(device, REG_SCRATCH_ADDR,
306 device->memstore.gpuaddr +
307 KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp));
308
309 adreno_regwrite(device, REG_SCRATCH_UMSK,
310 GSL_RB_MEMPTRS_SCRATCH_MASK);
311
312 /* load the CP ucode */
313
314 status = adreno_ringbuffer_load_pm4_ucode(device);
315 if (status != 0)
316 return status;
317
318 /* load the prefetch parser ucode */
319 status = adreno_ringbuffer_load_pfp_ucode(device);
320 if (status != 0)
321 return status;
322
323 adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000C0804);
324
325 rb->rptr = 0;
326 rb->wptr = 0;
327
328 /* clear ME_HALT to start micro engine */
329 adreno_regwrite(device, REG_CP_ME_CNTL, 0);
330
331 /* ME_INIT */
332 cmds = adreno_ringbuffer_allocspace(rb, 19);
333 cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19);
334
Jordan Crouse084427d2011-07-28 08:37:58 -0600335 GSL_RB_WRITE(cmds, cmds_gpu, CP_HDR_ME_INIT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700336 /* All fields present (bits 9:0) */
337 GSL_RB_WRITE(cmds, cmds_gpu, 0x000003ff);
338 /* Disable/Enable Real-Time Stream processing (present but ignored) */
339 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
340 /* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
341 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
342
343 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600344 SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700345 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600346 SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700347 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600348 SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600350 SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600352 SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700353 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600354 SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700355 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600356 SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700357 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600358 SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700359
360 /* Vertex and Pixel Shader Start Addresses in instructions
361 * (3 DWORDS per instruction) */
362 GSL_RB_WRITE(cmds, cmds_gpu, 0x80000180);
363 /* Maximum Contexts */
364 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000001);
365 /* Write Confirm Interval and The CP will wait the
366 * wait_interval * 16 clocks between polling */
367 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
368
369 /* NQ and External Memory Swap */
370 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
371 /* Protected mode error checking */
372 GSL_RB_WRITE(cmds, cmds_gpu, GSL_RB_PROTECTED_MODE_CONTROL);
373 /* Disable header dumping and Header dump address */
374 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
375 /* Header dump size */
376 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
377
378 adreno_ringbuffer_submit(rb);
379
380 /* idle device to validate ME INIT */
381 status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
382
383 if (status == 0)
384 rb->flags |= KGSL_FLAGS_STARTED;
385
386 return status;
387}
388
Carter Cooper6dd94c82011-10-13 14:43:53 -0600389void adreno_ringbuffer_stop(struct adreno_ringbuffer *rb)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700390{
391 if (rb->flags & KGSL_FLAGS_STARTED) {
392 /* ME_HALT */
393 adreno_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000);
394
395 rb->flags &= ~KGSL_FLAGS_STARTED;
396 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700397}
398
399int adreno_ringbuffer_init(struct kgsl_device *device)
400{
401 int status;
402 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
403 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
404
405 rb->device = device;
406 /*
407 * It is silly to convert this to words and then back to bytes
408 * immediately below, but most of the rest of the code deals
409 * in words, so we might as well only do the math once
410 */
411 rb->sizedwords = KGSL_RB_SIZE >> 2;
412
413 /* allocate memory for ringbuffer */
414 status = kgsl_allocate_contiguous(&rb->buffer_desc,
415 (rb->sizedwords << 2));
416
417 if (status != 0) {
418 adreno_ringbuffer_close(rb);
419 return status;
420 }
421
422 /* allocate memory for polling and timestamps */
423 /* This really can be at 4 byte alignment boundry but for using MMU
424 * we need to make it at page boundary */
425 status = kgsl_allocate_contiguous(&rb->memptrs_desc,
426 sizeof(struct kgsl_rbmemptrs));
427
428 if (status != 0) {
429 adreno_ringbuffer_close(rb);
430 return status;
431 }
432
433 /* overlay structure on memptrs memory */
434 rb->memptrs = (struct kgsl_rbmemptrs *) rb->memptrs_desc.hostptr;
435
436 return 0;
437}
438
Carter Cooper6dd94c82011-10-13 14:43:53 -0600439void adreno_ringbuffer_close(struct adreno_ringbuffer *rb)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700440{
441 struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
442
443 kgsl_sharedmem_free(&rb->buffer_desc);
444 kgsl_sharedmem_free(&rb->memptrs_desc);
445
446 kfree(adreno_dev->pfp_fw);
447 kfree(adreno_dev->pm4_fw);
448
449 adreno_dev->pfp_fw = NULL;
450 adreno_dev->pm4_fw = NULL;
451
452 memset(rb, 0, sizeof(struct adreno_ringbuffer));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700453}
454
455static uint32_t
456adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
457 unsigned int flags, unsigned int *cmds,
458 int sizedwords)
459{
460 unsigned int *ringcmds;
461 unsigned int timestamp;
462 unsigned int total_sizedwords = sizedwords + 6;
463 unsigned int i;
464 unsigned int rcmd_gpu;
465
466 /* reserve space to temporarily turn off protected mode
467 * error checking if needed
468 */
469 total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
470 total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0;
471 total_sizedwords += !(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD) ? 2 : 0;
472
473 ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
474 rcmd_gpu = rb->buffer_desc.gpuaddr
475 + sizeof(uint)*(rb->wptr-total_sizedwords);
476
477 if (!(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD)) {
Jordan Crouse084427d2011-07-28 08:37:58 -0600478 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479 GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
480 }
481 if (flags & KGSL_CMD_FLAGS_PMODE) {
482 /* disable protected mode error checking */
483 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600484 cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700485 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
486 }
487
488 for (i = 0; i < sizedwords; i++) {
489 GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
490 cmds++;
491 }
492
493 if (flags & KGSL_CMD_FLAGS_PMODE) {
494 /* re-enable protected mode error checking */
495 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600496 cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700497 GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
498 }
499
500 rb->timestamp++;
501 timestamp = rb->timestamp;
502
503 /* start-of-pipeline and end-of-pipeline timestamps */
Jordan Crouse084427d2011-07-28 08:37:58 -0600504 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type0_packet(REG_CP_TIMESTAMP, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700505 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
Jordan Crouse084427d2011-07-28 08:37:58 -0600506 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_EVENT_WRITE, 3));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700507 GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
508 GSL_RB_WRITE(ringcmds, rcmd_gpu,
509 (rb->device->memstore.gpuaddr +
510 KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)));
511 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
512
513 if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
514 /* Conditional execution based on memory values */
515 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600516 cp_type3_packet(CP_COND_EXEC, 4));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700517 GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
518 KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)) >> 2);
519 GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
520 KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)) >> 2);
521 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
522 /* # of conditional command DWORDs */
523 GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
524 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600525 cp_type3_packet(CP_INTERRUPT, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
527 }
528
529 adreno_ringbuffer_submit(rb);
530
531 /* return timestamp of issued coREG_ands */
532 return timestamp;
533}
534
535void
536adreno_ringbuffer_issuecmds(struct kgsl_device *device,
537 unsigned int flags,
538 unsigned int *cmds,
539 int sizedwords)
540{
541 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
542 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
543
544 if (device->state & KGSL_STATE_HUNG)
545 return;
546 adreno_ringbuffer_addcmds(rb, flags, cmds, sizedwords);
547}
548
549int
550adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
551 struct kgsl_context *context,
552 struct kgsl_ibdesc *ibdesc,
553 unsigned int numibs,
554 uint32_t *timestamp,
555 unsigned int flags)
556{
557 struct kgsl_device *device = dev_priv->device;
558 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
559 unsigned int *link;
560 unsigned int *cmds;
561 unsigned int i;
Jeremy Gebben3c127f52011-08-08 17:04:11 -0600562 struct adreno_context *drawctxt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700563
564 if (device->state & KGSL_STATE_HUNG)
565 return -EBUSY;
566 if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED) ||
Jeremy Gebben3c127f52011-08-08 17:04:11 -0600567 context == NULL || ibdesc == 0 || numibs == 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700568 return -EINVAL;
569
Jeremy Gebben3c127f52011-08-08 17:04:11 -0600570 drawctxt = context->devctxt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571
572 if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
573 KGSL_CTXT_WARN(device, "Context %p caused a gpu hang.."
574 " will not accept commands for this context\n",
575 drawctxt);
576 return -EDEADLK;
577 }
578 link = kzalloc(sizeof(unsigned int) * numibs * 3, GFP_KERNEL);
579 cmds = link;
580 if (!link) {
581 KGSL_MEM_ERR(device, "Failed to allocate memory for for command"
582 " submission, size %x\n", numibs * 3);
583 return -ENOMEM;
584 }
585 for (i = 0; i < numibs; i++) {
586 (void)kgsl_cffdump_parse_ibs(dev_priv, NULL,
587 ibdesc[i].gpuaddr, ibdesc[i].sizedwords, false);
588
Jordan Crouse084427d2011-07-28 08:37:58 -0600589 *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700590 *cmds++ = ibdesc[i].gpuaddr;
591 *cmds++ = ibdesc[i].sizedwords;
592 }
593
594 kgsl_setstate(device,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600595 kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700596 device->id));
597
598 adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
599
600 *timestamp = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
601 KGSL_CMD_FLAGS_NOT_KERNEL_CMD,
602 &link[0], (cmds - link));
603
604 KGSL_CMD_INFO(device, "ctxt %d g %08x numibs %d ts %d\n",
605 context->id, (unsigned int)ibdesc, numibs, *timestamp);
606
607 kfree(link);
608
609#ifdef CONFIG_MSM_KGSL_CFF_DUMP
610 /*
611 * insert wait for idle after every IB1
612 * this is conservative but works reliably and is ok
613 * even for performance simulations
614 */
615 adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
616#endif
617
618 return 0;
619}
620
621int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
622 unsigned int *temp_rb_buffer,
623 int *rb_size)
624{
625 struct kgsl_device *device = rb->device;
626 unsigned int rb_rptr;
627 unsigned int retired_timestamp;
628 unsigned int temp_idx = 0;
629 unsigned int value;
630 unsigned int val1;
631 unsigned int val2;
632 unsigned int val3;
633 unsigned int copy_rb_contents = 0;
634 unsigned int cur_context;
635 unsigned int j;
636
637 GSL_RB_GET_READPTR(rb, &rb->rptr);
638
639 retired_timestamp = device->ftbl->readtimestamp(device,
640 KGSL_TIMESTAMP_RETIRED);
641 KGSL_DRV_ERR(device, "GPU successfully executed till ts: %x\n",
642 retired_timestamp);
643 /*
644 * We need to go back in history by 4 dwords from the current location
645 * of read pointer as 4 dwords are read to match the end of a command.
646 * Also, take care of wrap around when moving back
647 */
648 if (rb->rptr >= 4)
649 rb_rptr = (rb->rptr - 4) * sizeof(unsigned int);
650 else
651 rb_rptr = rb->buffer_desc.size -
652 ((4 - rb->rptr) * sizeof(unsigned int));
653 /* Read the rb contents going backwards to locate end of last
654 * sucessfully executed command */
655 while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
656 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
657 if (value == retired_timestamp) {
658 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
659 rb->buffer_desc.size);
660 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
661 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
662 rb->buffer_desc.size);
663 kgsl_sharedmem_readl(&rb->buffer_desc, &val2, rb_rptr);
664 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
665 rb->buffer_desc.size);
666 kgsl_sharedmem_readl(&rb->buffer_desc, &val3, rb_rptr);
667 /* match the pattern found at the end of a command */
668 if ((val1 == 2 &&
Jordan Crouse084427d2011-07-28 08:37:58 -0600669 val2 == cp_type3_packet(CP_INTERRUPT, 1)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700670 && val3 == CP_INT_CNTL__RB_INT_MASK) ||
Jordan Crouse084427d2011-07-28 08:37:58 -0600671 (val1 == cp_type3_packet(CP_EVENT_WRITE, 3)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700672 && val2 == CACHE_FLUSH_TS &&
673 val3 == (rb->device->memstore.gpuaddr +
674 KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)))) {
675 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
676 rb->buffer_desc.size);
677 KGSL_DRV_ERR(device,
678 "Found end of last executed "
679 "command at offset: %x\n",
680 rb_rptr / sizeof(unsigned int));
681 break;
682 } else {
683 if (rb_rptr < (3 * sizeof(unsigned int)))
684 rb_rptr = rb->buffer_desc.size -
685 (3 * sizeof(unsigned int))
686 + rb_rptr;
687 else
688 rb_rptr -= (3 * sizeof(unsigned int));
689 }
690 }
691
692 if (rb_rptr == 0)
693 rb_rptr = rb->buffer_desc.size - sizeof(unsigned int);
694 else
695 rb_rptr -= sizeof(unsigned int);
696 }
697
698 if ((rb_rptr / sizeof(unsigned int)) == rb->wptr) {
699 KGSL_DRV_ERR(device,
700 "GPU recovery from hang not possible because last"
701 " successful timestamp is overwritten\n");
702 return -EINVAL;
703 }
704 /* rb_rptr is now pointing to the first dword of the command following
705 * the last sucessfully executed command sequence. Assumption is that
706 * GPU is hung in the command sequence pointed by rb_rptr */
707 /* make sure the GPU is not hung in a command submitted by kgsl
708 * itself */
709 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
710 kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
711 adreno_ringbuffer_inc_wrapped(rb_rptr,
712 rb->buffer_desc.size));
Jordan Crouse084427d2011-07-28 08:37:58 -0600713 if (val1 == cp_nop_packet(1) && val2 == KGSL_CMD_IDENTIFIER) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700714 KGSL_DRV_ERR(device,
715 "GPU recovery from hang not possible because "
716 "of hang in kgsl command\n");
717 return -EINVAL;
718 }
719
720 /* current_context is the context that is presently active in the
721 * GPU, i.e the context in which the hang is caused */
722 kgsl_sharedmem_readl(&device->memstore, &cur_context,
723 KGSL_DEVICE_MEMSTORE_OFFSET(current_context));
724 while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
725 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
726 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
727 rb->buffer_desc.size);
728 /* check for context switch indicator */
729 if (value == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
730 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
731 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
732 rb->buffer_desc.size);
Jordan Crouse084427d2011-07-28 08:37:58 -0600733 BUG_ON(value != cp_type3_packet(CP_MEM_WRITE, 2));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700734 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
735 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
736 rb->buffer_desc.size);
737 BUG_ON(val1 != (device->memstore.gpuaddr +
738 KGSL_DEVICE_MEMSTORE_OFFSET(current_context)));
739 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
740 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
741 rb->buffer_desc.size);
742 BUG_ON((copy_rb_contents == 0) &&
743 (value == cur_context));
744 /*
745 * If we were copying the commands and got to this point
746 * then we need to remove the 3 commands that appear
747 * before KGSL_CONTEXT_TO_MEM_IDENTIFIER
748 */
749 if (temp_idx)
750 temp_idx -= 3;
751 /* if context switches to a context that did not cause
752 * hang then start saving the rb contents as those
753 * commands can be executed */
754 if (value != cur_context) {
755 copy_rb_contents = 1;
Jordan Crouse084427d2011-07-28 08:37:58 -0600756 temp_rb_buffer[temp_idx++] = cp_nop_packet(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700757 temp_rb_buffer[temp_idx++] =
758 KGSL_CMD_IDENTIFIER;
Jordan Crouse084427d2011-07-28 08:37:58 -0600759 temp_rb_buffer[temp_idx++] = cp_nop_packet(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700760 temp_rb_buffer[temp_idx++] =
761 KGSL_CONTEXT_TO_MEM_IDENTIFIER;
762 temp_rb_buffer[temp_idx++] =
Jordan Crouse084427d2011-07-28 08:37:58 -0600763 cp_type3_packet(CP_MEM_WRITE, 2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700764 temp_rb_buffer[temp_idx++] = val1;
765 temp_rb_buffer[temp_idx++] = value;
766 } else {
767 copy_rb_contents = 0;
768 }
769 } else if (copy_rb_contents)
770 temp_rb_buffer[temp_idx++] = value;
771 }
772
773 *rb_size = temp_idx;
774 KGSL_DRV_ERR(device, "Extracted rb contents, size: %x\n", *rb_size);
775 for (temp_idx = 0; temp_idx < *rb_size;) {
776 char str[80];
777 int idx = 0;
778 if ((temp_idx + 8) <= *rb_size)
779 j = 8;
780 else
781 j = *rb_size - temp_idx;
782 for (; j != 0; j--)
783 idx += scnprintf(str + idx, 80 - idx,
784 "%8.8X ", temp_rb_buffer[temp_idx++]);
785 printk(KERN_ALERT "%s", str);
786 }
787 return 0;
788}
789
790void
791adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
792 int num_rb_contents)
793{
794 int i;
795 unsigned int *ringcmds;
796 unsigned int rcmd_gpu;
797
798 if (!num_rb_contents)
799 return;
800
801 if (num_rb_contents > (rb->buffer_desc.size - rb->wptr)) {
802 adreno_regwrite(rb->device, REG_CP_RB_RPTR, 0);
803 rb->rptr = 0;
804 BUG_ON(num_rb_contents > rb->buffer_desc.size);
805 }
806 ringcmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
807 rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(unsigned int) * rb->wptr;
808 for (i = 0; i < num_rb_contents; i++)
809 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb_buff[i]);
810 rb->wptr += num_rb_contents;
811 adreno_ringbuffer_submit(rb);
812}