blob: 1d44d20b2c4751c856a9a996abf5e1c76abf7924 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/firmware.h>
14#include <linux/slab.h>
15#include <linux/sched.h>
16#include <linux/log2.h>
17
18#include "kgsl.h"
19#include "kgsl_sharedmem.h"
20#include "kgsl_cffdump.h"
21
22#include "adreno.h"
23#include "adreno_pm4types.h"
24#include "adreno_ringbuffer.h"
25
26#include "a200_reg.h"
27
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028#define GSL_RB_NOP_SIZEDWORDS 2
29/* protected mode error checking below register address 0x800
30* note: if CP_INTERRUPT packet is used then checking needs
31* to change to below register address 0x7C8
32*/
33#define GSL_RB_PROTECTED_MODE_CONTROL 0x200001F2
34
35/* Firmware file names
36 * Legacy names must remain but replacing macro names to
37 * match current kgsl model.
38 * a200 is yamato
39 * a220 is leia
40 */
41#define A200_PFP_FW "yamato_pfp.fw"
42#define A200_PM4_FW "yamato_pm4.fw"
43#define A220_PFP_470_FW "leia_pfp_470.fw"
44#define A220_PM4_470_FW "leia_pm4_470.fw"
45#define A225_PFP_FW "a225_pfp.fw"
46#define A225_PM4_FW "a225_pm4.fw"
47
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048static void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
49{
50 BUG_ON(rb->wptr == 0);
51
52 /*synchronize memory before informing the hardware of the
53 *new commands.
54 */
55 mb();
56
57 adreno_regwrite(rb->device, REG_CP_RB_WPTR, rb->wptr);
58}
59
60static int
61adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds,
62 int wptr_ahead)
63{
64 int nopcount;
65 unsigned int freecmds;
66 unsigned int *cmds;
67 uint cmds_gpu;
68
69 /* if wptr ahead, fill the remaining with NOPs */
70 if (wptr_ahead) {
71 /* -1 for header */
72 nopcount = rb->sizedwords - rb->wptr - 1;
73
74 cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
75 cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*rb->wptr;
76
77 GSL_RB_WRITE(cmds, cmds_gpu, pm4_nop_packet(nopcount));
78
79 /* Make sure that rptr is not 0 before submitting
80 * commands at the end of ringbuffer. We do not
81 * want the rptr and wptr to become equal when
82 * the ringbuffer is not empty */
83 do {
84 GSL_RB_GET_READPTR(rb, &rb->rptr);
85 } while (!rb->rptr);
86
87 rb->wptr++;
88
89 adreno_ringbuffer_submit(rb);
90
91 rb->wptr = 0;
92 }
93
94 /* wait for space in ringbuffer */
95 do {
96 GSL_RB_GET_READPTR(rb, &rb->rptr);
97
98 freecmds = rb->rptr - rb->wptr;
99
100 } while ((freecmds != 0) && (freecmds <= numcmds));
101
102 return 0;
103}
104
105
106static unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
107 unsigned int numcmds)
108{
109 unsigned int *ptr = NULL;
110 int status = 0;
111
112 BUG_ON(numcmds >= rb->sizedwords);
113
114 GSL_RB_GET_READPTR(rb, &rb->rptr);
115 /* check for available space */
116 if (rb->wptr >= rb->rptr) {
117 /* wptr ahead or equal to rptr */
118 /* reserve dwords for nop packet */
119 if ((rb->wptr + numcmds) > (rb->sizedwords -
120 GSL_RB_NOP_SIZEDWORDS))
121 status = adreno_ringbuffer_waitspace(rb, numcmds, 1);
122 } else {
123 /* wptr behind rptr */
124 if ((rb->wptr + numcmds) >= rb->rptr)
125 status = adreno_ringbuffer_waitspace(rb, numcmds, 0);
126 /* check for remaining space */
127 /* reserve dwords for nop packet */
128 if ((rb->wptr + numcmds) > (rb->sizedwords -
129 GSL_RB_NOP_SIZEDWORDS))
130 status = adreno_ringbuffer_waitspace(rb, numcmds, 1);
131 }
132
133 if (status == 0) {
134 ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
135 rb->wptr += numcmds;
136 }
137
138 return ptr;
139}
140
141static int _load_firmware(struct kgsl_device *device, const char *fwfile,
142 void **data, int *len)
143{
144 const struct firmware *fw = NULL;
145 int ret;
146
147 ret = request_firmware(&fw, fwfile, device->dev);
148
149 if (ret) {
150 KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n",
151 fwfile, ret);
152 return ret;
153 }
154
155 *data = kmalloc(fw->size, GFP_KERNEL);
156
157 if (*data) {
158 memcpy(*data, fw->data, fw->size);
159 *len = fw->size;
160 } else
161 KGSL_MEM_ERR(device, "kmalloc(%d) failed\n", fw->size);
162
163 release_firmware(fw);
164 return (*data != NULL) ? 0 : -ENOMEM;
165}
166
167static int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device)
168{
169 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
170 const char *fwfile;
171 int i, ret = 0;
172
173 if (adreno_is_a220(adreno_dev)) {
174 fwfile = A220_PM4_470_FW;
175 } else if (adreno_is_a225(adreno_dev)) {
176 fwfile = A225_PM4_FW;
177 } else if (adreno_is_a20x(adreno_dev)) {
178 fwfile = A200_PM4_FW;
179 } else {
180 KGSL_DRV_ERR(device, "Could not load PM4 file\n");
181 return -EINVAL;
182 }
183
184 if (adreno_dev->pm4_fw == NULL) {
185 int len;
186 unsigned int *ptr;
187
188 ret = _load_firmware(device, fwfile, (void *) &ptr, &len);
189 if (ret)
190 goto err;
191
192 /* PM4 size is 3 dword aligned plus 1 dword of version */
193 if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) {
194 KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
195 ret = -EINVAL;
196 goto err;
197 }
198
199 adreno_dev->pm4_fw_size = len / sizeof(uint32_t);
200 adreno_dev->pm4_fw = ptr;
201 }
202
203 KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n",
204 adreno_dev->pm4_fw[0]);
205
206 adreno_regwrite(device, REG_CP_DEBUG, 0x02000000);
207 adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0);
208 for (i = 1; i < adreno_dev->pm4_fw_size; i++)
209 adreno_regwrite(device, REG_CP_ME_RAM_DATA,
210 adreno_dev->pm4_fw[i]);
211err:
212 return ret;
213}
214
215static int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device)
216{
217 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
218 const char *fwfile;
219 int i, ret = 0;
220
221 if (adreno_is_a220(adreno_dev)) {
222 fwfile = A220_PFP_470_FW;
223 } else if (adreno_is_a225(adreno_dev)) {
224 fwfile = A225_PFP_FW;
225 } else if (adreno_is_a20x(adreno_dev)) {
226 fwfile = A200_PFP_FW;
227 } else {
228 KGSL_DRV_ERR(device, "Could not load PFP firmware\n");
229 return -EINVAL;
230 }
231
232 if (adreno_dev->pfp_fw == NULL) {
233 int len;
234 unsigned int *ptr;
235
236 ret = _load_firmware(device, fwfile, (void *) &ptr, &len);
237 if (ret)
238 goto err;
239
240 /* PFP size shold be dword aligned */
241 if (len % sizeof(uint32_t) != 0) {
242 KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
243 ret = -EINVAL;
244 goto err;
245 }
246
247 adreno_dev->pfp_fw_size = len / sizeof(uint32_t);
248 adreno_dev->pfp_fw = ptr;
249 }
250
251 KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n",
252 adreno_dev->pfp_fw[0]);
253
254 adreno_regwrite(device, REG_CP_PFP_UCODE_ADDR, 0);
255 for (i = 1; i < adreno_dev->pfp_fw_size; i++)
256 adreno_regwrite(device, REG_CP_PFP_UCODE_DATA,
257 adreno_dev->pfp_fw[i]);
258err:
259 return ret;
260}
261
262int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
263{
264 int status;
265 /*cp_rb_cntl_u cp_rb_cntl; */
266 union reg_cp_rb_cntl cp_rb_cntl;
267 unsigned int *cmds, rb_cntl;
268 struct kgsl_device *device = rb->device;
269 uint cmds_gpu;
270
271 if (rb->flags & KGSL_FLAGS_STARTED)
272 return 0;
273
274 if (init_ram) {
275 rb->timestamp = 0;
276 GSL_RB_INIT_TIMESTAMP(rb);
277 }
278
279 kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
280 sizeof(struct kgsl_rbmemptrs));
281
282 kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
283 (rb->sizedwords << 2));
284
285 adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
286 (rb->memptrs_desc.gpuaddr
287 + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
288
289 /* setup WPTR delay */
290 adreno_regwrite(device, REG_CP_RB_WPTR_DELAY, 0 /*0x70000010 */);
291
292 /*setup REG_CP_RB_CNTL */
293 adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
294 cp_rb_cntl.val = rb_cntl;
295
296 /*
297 * The size of the ringbuffer in the hardware is the log2
298 * representation of the size in quadwords (sizedwords / 2)
299 */
300 cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);
301
302 /*
303 * Specify the quadwords to read before updating mem RPTR.
304 * Like above, pass the log2 representation of the blocksize
305 * in quadwords.
306 */
307 cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);
308
309 cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN; /* WPTR polling */
310 /* mem RPTR writebacks */
311 cp_rb_cntl.f.rb_no_update = GSL_RB_CNTL_NO_UPDATE;
312
313 adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);
314
315 adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);
316
317 adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
318 rb->memptrs_desc.gpuaddr +
319 GSL_RB_MEMPTRS_RPTR_OFFSET);
320
321 /* explicitly clear all cp interrupts */
322 adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
323
324 /* setup scratch/timestamp */
325 adreno_regwrite(device, REG_SCRATCH_ADDR,
326 device->memstore.gpuaddr +
327 KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp));
328
329 adreno_regwrite(device, REG_SCRATCH_UMSK,
330 GSL_RB_MEMPTRS_SCRATCH_MASK);
331
332 /* load the CP ucode */
333
334 status = adreno_ringbuffer_load_pm4_ucode(device);
335 if (status != 0)
336 return status;
337
338 /* load the prefetch parser ucode */
339 status = adreno_ringbuffer_load_pfp_ucode(device);
340 if (status != 0)
341 return status;
342
343 adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000C0804);
344
345 rb->rptr = 0;
346 rb->wptr = 0;
347
348 /* clear ME_HALT to start micro engine */
349 adreno_regwrite(device, REG_CP_ME_CNTL, 0);
350
351 /* ME_INIT */
352 cmds = adreno_ringbuffer_allocspace(rb, 19);
353 cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19);
354
355 GSL_RB_WRITE(cmds, cmds_gpu, PM4_HDR_ME_INIT);
356 /* All fields present (bits 9:0) */
357 GSL_RB_WRITE(cmds, cmds_gpu, 0x000003ff);
358 /* Disable/Enable Real-Time Stream processing (present but ignored) */
359 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
360 /* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
361 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
362
363 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600364 SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700365 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600366 SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600368 SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700369 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600370 SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700371 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600372 SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600374 SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700375 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600376 SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700377 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600378 SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700379
380 /* Vertex and Pixel Shader Start Addresses in instructions
381 * (3 DWORDS per instruction) */
382 GSL_RB_WRITE(cmds, cmds_gpu, 0x80000180);
383 /* Maximum Contexts */
384 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000001);
385 /* Write Confirm Interval and The CP will wait the
386 * wait_interval * 16 clocks between polling */
387 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
388
389 /* NQ and External Memory Swap */
390 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
391 /* Protected mode error checking */
392 GSL_RB_WRITE(cmds, cmds_gpu, GSL_RB_PROTECTED_MODE_CONTROL);
393 /* Disable header dumping and Header dump address */
394 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
395 /* Header dump size */
396 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
397
398 adreno_ringbuffer_submit(rb);
399
400 /* idle device to validate ME INIT */
401 status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
402
403 if (status == 0)
404 rb->flags |= KGSL_FLAGS_STARTED;
405
406 return status;
407}
408
409int adreno_ringbuffer_stop(struct adreno_ringbuffer *rb)
410{
411 if (rb->flags & KGSL_FLAGS_STARTED) {
412 /* ME_HALT */
413 adreno_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000);
414
415 rb->flags &= ~KGSL_FLAGS_STARTED;
416 }
417
418 return 0;
419}
420
421int adreno_ringbuffer_init(struct kgsl_device *device)
422{
423 int status;
424 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
425 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
426
427 rb->device = device;
428 /*
429 * It is silly to convert this to words and then back to bytes
430 * immediately below, but most of the rest of the code deals
431 * in words, so we might as well only do the math once
432 */
433 rb->sizedwords = KGSL_RB_SIZE >> 2;
434
435 /* allocate memory for ringbuffer */
436 status = kgsl_allocate_contiguous(&rb->buffer_desc,
437 (rb->sizedwords << 2));
438
439 if (status != 0) {
440 adreno_ringbuffer_close(rb);
441 return status;
442 }
443
444 /* allocate memory for polling and timestamps */
445 /* This really can be at 4 byte alignment boundry but for using MMU
446 * we need to make it at page boundary */
447 status = kgsl_allocate_contiguous(&rb->memptrs_desc,
448 sizeof(struct kgsl_rbmemptrs));
449
450 if (status != 0) {
451 adreno_ringbuffer_close(rb);
452 return status;
453 }
454
455 /* overlay structure on memptrs memory */
456 rb->memptrs = (struct kgsl_rbmemptrs *) rb->memptrs_desc.hostptr;
457
458 return 0;
459}
460
461int adreno_ringbuffer_close(struct adreno_ringbuffer *rb)
462{
463 struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
464
465 kgsl_sharedmem_free(&rb->buffer_desc);
466 kgsl_sharedmem_free(&rb->memptrs_desc);
467
468 kfree(adreno_dev->pfp_fw);
469 kfree(adreno_dev->pm4_fw);
470
471 adreno_dev->pfp_fw = NULL;
472 adreno_dev->pm4_fw = NULL;
473
474 memset(rb, 0, sizeof(struct adreno_ringbuffer));
475
476 return 0;
477}
478
479static uint32_t
480adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
481 unsigned int flags, unsigned int *cmds,
482 int sizedwords)
483{
484 unsigned int *ringcmds;
485 unsigned int timestamp;
486 unsigned int total_sizedwords = sizedwords + 6;
487 unsigned int i;
488 unsigned int rcmd_gpu;
489
490 /* reserve space to temporarily turn off protected mode
491 * error checking if needed
492 */
493 total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
494 total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0;
495 total_sizedwords += !(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD) ? 2 : 0;
496
497 ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
498 rcmd_gpu = rb->buffer_desc.gpuaddr
499 + sizeof(uint)*(rb->wptr-total_sizedwords);
500
501 if (!(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD)) {
502 GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_nop_packet(1));
503 GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
504 }
505 if (flags & KGSL_CMD_FLAGS_PMODE) {
506 /* disable protected mode error checking */
507 GSL_RB_WRITE(ringcmds, rcmd_gpu,
508 pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1));
509 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
510 }
511
512 for (i = 0; i < sizedwords; i++) {
513 GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
514 cmds++;
515 }
516
517 if (flags & KGSL_CMD_FLAGS_PMODE) {
518 /* re-enable protected mode error checking */
519 GSL_RB_WRITE(ringcmds, rcmd_gpu,
520 pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1));
521 GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
522 }
523
524 rb->timestamp++;
525 timestamp = rb->timestamp;
526
527 /* start-of-pipeline and end-of-pipeline timestamps */
528 GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_type0_packet(REG_CP_TIMESTAMP, 1));
529 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
530 GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_type3_packet(PM4_EVENT_WRITE, 3));
531 GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
532 GSL_RB_WRITE(ringcmds, rcmd_gpu,
533 (rb->device->memstore.gpuaddr +
534 KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)));
535 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
536
537 if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
538 /* Conditional execution based on memory values */
539 GSL_RB_WRITE(ringcmds, rcmd_gpu,
540 pm4_type3_packet(PM4_COND_EXEC, 4));
541 GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
542 KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)) >> 2);
543 GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
544 KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)) >> 2);
545 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
546 /* # of conditional command DWORDs */
547 GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
548 GSL_RB_WRITE(ringcmds, rcmd_gpu,
549 pm4_type3_packet(PM4_INTERRUPT, 1));
550 GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
551 }
552
553 adreno_ringbuffer_submit(rb);
554
555 /* return timestamp of issued coREG_ands */
556 return timestamp;
557}
558
559void
560adreno_ringbuffer_issuecmds(struct kgsl_device *device,
561 unsigned int flags,
562 unsigned int *cmds,
563 int sizedwords)
564{
565 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
566 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
567
568 if (device->state & KGSL_STATE_HUNG)
569 return;
570 adreno_ringbuffer_addcmds(rb, flags, cmds, sizedwords);
571}
572
573int
574adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
575 struct kgsl_context *context,
576 struct kgsl_ibdesc *ibdesc,
577 unsigned int numibs,
578 uint32_t *timestamp,
579 unsigned int flags)
580{
581 struct kgsl_device *device = dev_priv->device;
582 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
583 unsigned int *link;
584 unsigned int *cmds;
585 unsigned int i;
586 struct adreno_context *drawctxt = context->devctxt;
587
588 if (device->state & KGSL_STATE_HUNG)
589 return -EBUSY;
590 if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED) ||
591 context == NULL)
592 return -EINVAL;
593
594 BUG_ON(ibdesc == 0);
595 BUG_ON(numibs == 0);
596
597 if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
598 KGSL_CTXT_WARN(device, "Context %p caused a gpu hang.."
599 " will not accept commands for this context\n",
600 drawctxt);
601 return -EDEADLK;
602 }
603 link = kzalloc(sizeof(unsigned int) * numibs * 3, GFP_KERNEL);
604 cmds = link;
605 if (!link) {
606 KGSL_MEM_ERR(device, "Failed to allocate memory for for command"
607 " submission, size %x\n", numibs * 3);
608 return -ENOMEM;
609 }
610 for (i = 0; i < numibs; i++) {
611 (void)kgsl_cffdump_parse_ibs(dev_priv, NULL,
612 ibdesc[i].gpuaddr, ibdesc[i].sizedwords, false);
613
614 *cmds++ = PM4_HDR_INDIRECT_BUFFER_PFD;
615 *cmds++ = ibdesc[i].gpuaddr;
616 *cmds++ = ibdesc[i].sizedwords;
617 }
618
619 kgsl_setstate(device,
620 kgsl_pt_get_flags(device->mmu.hwpagetable,
621 device->id));
622
623 adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
624
625 *timestamp = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
626 KGSL_CMD_FLAGS_NOT_KERNEL_CMD,
627 &link[0], (cmds - link));
628
629 KGSL_CMD_INFO(device, "ctxt %d g %08x numibs %d ts %d\n",
630 context->id, (unsigned int)ibdesc, numibs, *timestamp);
631
632 kfree(link);
633
634#ifdef CONFIG_MSM_KGSL_CFF_DUMP
635 /*
636 * insert wait for idle after every IB1
637 * this is conservative but works reliably and is ok
638 * even for performance simulations
639 */
640 adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
641#endif
642
643 return 0;
644}
645
646int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
647 unsigned int *temp_rb_buffer,
648 int *rb_size)
649{
650 struct kgsl_device *device = rb->device;
651 unsigned int rb_rptr;
652 unsigned int retired_timestamp;
653 unsigned int temp_idx = 0;
654 unsigned int value;
655 unsigned int val1;
656 unsigned int val2;
657 unsigned int val3;
658 unsigned int copy_rb_contents = 0;
659 unsigned int cur_context;
660 unsigned int j;
661
662 GSL_RB_GET_READPTR(rb, &rb->rptr);
663
664 retired_timestamp = device->ftbl->readtimestamp(device,
665 KGSL_TIMESTAMP_RETIRED);
666 KGSL_DRV_ERR(device, "GPU successfully executed till ts: %x\n",
667 retired_timestamp);
668 /*
669 * We need to go back in history by 4 dwords from the current location
670 * of read pointer as 4 dwords are read to match the end of a command.
671 * Also, take care of wrap around when moving back
672 */
673 if (rb->rptr >= 4)
674 rb_rptr = (rb->rptr - 4) * sizeof(unsigned int);
675 else
676 rb_rptr = rb->buffer_desc.size -
677 ((4 - rb->rptr) * sizeof(unsigned int));
678 /* Read the rb contents going backwards to locate end of last
679 * sucessfully executed command */
680 while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
681 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
682 if (value == retired_timestamp) {
683 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
684 rb->buffer_desc.size);
685 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
686 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
687 rb->buffer_desc.size);
688 kgsl_sharedmem_readl(&rb->buffer_desc, &val2, rb_rptr);
689 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
690 rb->buffer_desc.size);
691 kgsl_sharedmem_readl(&rb->buffer_desc, &val3, rb_rptr);
692 /* match the pattern found at the end of a command */
693 if ((val1 == 2 &&
694 val2 == pm4_type3_packet(PM4_INTERRUPT, 1)
695 && val3 == CP_INT_CNTL__RB_INT_MASK) ||
696 (val1 == pm4_type3_packet(PM4_EVENT_WRITE, 3)
697 && val2 == CACHE_FLUSH_TS &&
698 val3 == (rb->device->memstore.gpuaddr +
699 KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)))) {
700 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
701 rb->buffer_desc.size);
702 KGSL_DRV_ERR(device,
703 "Found end of last executed "
704 "command at offset: %x\n",
705 rb_rptr / sizeof(unsigned int));
706 break;
707 } else {
708 if (rb_rptr < (3 * sizeof(unsigned int)))
709 rb_rptr = rb->buffer_desc.size -
710 (3 * sizeof(unsigned int))
711 + rb_rptr;
712 else
713 rb_rptr -= (3 * sizeof(unsigned int));
714 }
715 }
716
717 if (rb_rptr == 0)
718 rb_rptr = rb->buffer_desc.size - sizeof(unsigned int);
719 else
720 rb_rptr -= sizeof(unsigned int);
721 }
722
723 if ((rb_rptr / sizeof(unsigned int)) == rb->wptr) {
724 KGSL_DRV_ERR(device,
725 "GPU recovery from hang not possible because last"
726 " successful timestamp is overwritten\n");
727 return -EINVAL;
728 }
729 /* rb_rptr is now pointing to the first dword of the command following
730 * the last sucessfully executed command sequence. Assumption is that
731 * GPU is hung in the command sequence pointed by rb_rptr */
732 /* make sure the GPU is not hung in a command submitted by kgsl
733 * itself */
734 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
735 kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
736 adreno_ringbuffer_inc_wrapped(rb_rptr,
737 rb->buffer_desc.size));
738 if (val1 == pm4_nop_packet(1) && val2 == KGSL_CMD_IDENTIFIER) {
739 KGSL_DRV_ERR(device,
740 "GPU recovery from hang not possible because "
741 "of hang in kgsl command\n");
742 return -EINVAL;
743 }
744
745 /* current_context is the context that is presently active in the
746 * GPU, i.e the context in which the hang is caused */
747 kgsl_sharedmem_readl(&device->memstore, &cur_context,
748 KGSL_DEVICE_MEMSTORE_OFFSET(current_context));
749 while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
750 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
751 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
752 rb->buffer_desc.size);
753 /* check for context switch indicator */
754 if (value == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
755 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
756 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
757 rb->buffer_desc.size);
758 BUG_ON(value != pm4_type3_packet(PM4_MEM_WRITE, 2));
759 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
760 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
761 rb->buffer_desc.size);
762 BUG_ON(val1 != (device->memstore.gpuaddr +
763 KGSL_DEVICE_MEMSTORE_OFFSET(current_context)));
764 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
765 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
766 rb->buffer_desc.size);
767 BUG_ON((copy_rb_contents == 0) &&
768 (value == cur_context));
769 /*
770 * If we were copying the commands and got to this point
771 * then we need to remove the 3 commands that appear
772 * before KGSL_CONTEXT_TO_MEM_IDENTIFIER
773 */
774 if (temp_idx)
775 temp_idx -= 3;
776 /* if context switches to a context that did not cause
777 * hang then start saving the rb contents as those
778 * commands can be executed */
779 if (value != cur_context) {
780 copy_rb_contents = 1;
781 temp_rb_buffer[temp_idx++] = pm4_nop_packet(1);
782 temp_rb_buffer[temp_idx++] =
783 KGSL_CMD_IDENTIFIER;
784 temp_rb_buffer[temp_idx++] = pm4_nop_packet(1);
785 temp_rb_buffer[temp_idx++] =
786 KGSL_CONTEXT_TO_MEM_IDENTIFIER;
787 temp_rb_buffer[temp_idx++] =
788 pm4_type3_packet(PM4_MEM_WRITE, 2);
789 temp_rb_buffer[temp_idx++] = val1;
790 temp_rb_buffer[temp_idx++] = value;
791 } else {
792 copy_rb_contents = 0;
793 }
794 } else if (copy_rb_contents)
795 temp_rb_buffer[temp_idx++] = value;
796 }
797
798 *rb_size = temp_idx;
799 KGSL_DRV_ERR(device, "Extracted rb contents, size: %x\n", *rb_size);
800 for (temp_idx = 0; temp_idx < *rb_size;) {
801 char str[80];
802 int idx = 0;
803 if ((temp_idx + 8) <= *rb_size)
804 j = 8;
805 else
806 j = *rb_size - temp_idx;
807 for (; j != 0; j--)
808 idx += scnprintf(str + idx, 80 - idx,
809 "%8.8X ", temp_rb_buffer[temp_idx++]);
810 printk(KERN_ALERT "%s", str);
811 }
812 return 0;
813}
814
815void
816adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
817 int num_rb_contents)
818{
819 int i;
820 unsigned int *ringcmds;
821 unsigned int rcmd_gpu;
822
823 if (!num_rb_contents)
824 return;
825
826 if (num_rb_contents > (rb->buffer_desc.size - rb->wptr)) {
827 adreno_regwrite(rb->device, REG_CP_RB_RPTR, 0);
828 rb->rptr = 0;
829 BUG_ON(num_rb_contents > rb->buffer_desc.size);
830 }
831 ringcmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
832 rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(unsigned int) * rb->wptr;
833 for (i = 0; i < num_rb_contents; i++)
834 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb_buff[i]);
835 rb->wptr += num_rb_contents;
836 adreno_ringbuffer_submit(rb);
837}