blob: e7aaa14249834819b8ab83eaa9f1470b2797120b [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/firmware.h>
14#include <linux/slab.h>
15#include <linux/sched.h>
16#include <linux/log2.h>
17
18#include "kgsl.h"
19#include "kgsl_sharedmem.h"
20#include "kgsl_cffdump.h"
21
22#include "adreno.h"
23#include "adreno_pm4types.h"
24#include "adreno_ringbuffer.h"
25
26#include "a200_reg.h"
27
28#define VALID_STATUS_COUNT_MAX 10
29#define GSL_RB_NOP_SIZEDWORDS 2
30/* protected mode error checking below register address 0x800
31* note: if CP_INTERRUPT packet is used then checking needs
32* to change to below register address 0x7C8
33*/
34#define GSL_RB_PROTECTED_MODE_CONTROL 0x200001F2
35
36/* Firmware file names
37 * Legacy names must remain but replacing macro names to
38 * match current kgsl model.
39 * a200 is yamato
40 * a220 is leia
41 */
42#define A200_PFP_FW "yamato_pfp.fw"
43#define A200_PM4_FW "yamato_pm4.fw"
44#define A220_PFP_470_FW "leia_pfp_470.fw"
45#define A220_PM4_470_FW "leia_pm4_470.fw"
46#define A225_PFP_FW "a225_pfp.fw"
47#define A225_PM4_FW "a225_pm4.fw"
48
49/* functions */
50void kgsl_cp_intrcallback(struct kgsl_device *device)
51{
52 unsigned int status = 0, num_reads = 0, master_status = 0;
53 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
54 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
55
56 adreno_regread(device, REG_MASTER_INT_SIGNAL, &master_status);
57 while (!status && (num_reads < VALID_STATUS_COUNT_MAX) &&
58 (master_status & MASTER_INT_SIGNAL__CP_INT_STAT)) {
59 adreno_regread(device, REG_CP_INT_STATUS, &status);
60 adreno_regread(device, REG_MASTER_INT_SIGNAL,
61 &master_status);
62 num_reads++;
63 }
64 if (num_reads > 1)
65 KGSL_DRV_WARN(device,
66 "Looped %d times to read REG_CP_INT_STATUS\n",
67 num_reads);
68 if (!status) {
69 if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) {
70 /* This indicates that we could not read CP_INT_STAT.
71 * As a precaution just wake up processes so
72 * they can check their timestamps. Since, we
73 * did not ack any interrupts this interrupt will
74 * be generated again */
75 KGSL_DRV_WARN(device, "Unable to read CP_INT_STATUS\n");
76 wake_up_interruptible_all(&device->wait_queue);
77 } else
78 KGSL_DRV_WARN(device, "Spurious interrput detected\n");
79 return;
80 }
81
82 if (status & CP_INT_CNTL__RB_INT_MASK) {
83 /* signal intr completion event */
84 unsigned int enableflag = 0;
85 kgsl_sharedmem_writel(&rb->device->memstore,
86 KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable),
87 enableflag);
88 wmb();
89 KGSL_CMD_WARN(rb->device, "ringbuffer rb interrupt\n");
90 }
91
92 if (status & CP_INT_CNTL__T0_PACKET_IN_IB_MASK) {
93 KGSL_CMD_CRIT(rb->device,
94 "ringbuffer TO packet in IB interrupt\n");
95 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
96 }
97 if (status & CP_INT_CNTL__OPCODE_ERROR_MASK) {
98 KGSL_CMD_CRIT(rb->device,
99 "ringbuffer opcode error interrupt\n");
100 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
101 }
102 if (status & CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK) {
103 KGSL_CMD_CRIT(rb->device,
104 "ringbuffer protected mode error interrupt\n");
105 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
106 }
107 if (status & CP_INT_CNTL__RESERVED_BIT_ERROR_MASK) {
108 KGSL_CMD_CRIT(rb->device,
109 "ringbuffer reserved bit error interrupt\n");
110 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
111 }
112 if (status & CP_INT_CNTL__IB_ERROR_MASK) {
113 KGSL_CMD_CRIT(rb->device,
114 "ringbuffer IB error interrupt\n");
115 kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
116 }
117 if (status & CP_INT_CNTL__SW_INT_MASK)
118 KGSL_CMD_INFO(rb->device, "ringbuffer software interrupt\n");
119
120 if (status & CP_INT_CNTL__IB2_INT_MASK)
121 KGSL_CMD_INFO(rb->device, "ringbuffer ib2 interrupt\n");
122
123 if (status & (~KGSL_CP_INT_MASK))
124 KGSL_CMD_WARN(rb->device,
125 "bad bits in REG_CP_INT_STATUS %08x\n", status);
126
127 /* only ack bits we understand */
128 status &= KGSL_CP_INT_MASK;
129 adreno_regwrite(device, REG_CP_INT_ACK, status);
130
131 if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) {
132 KGSL_CMD_WARN(rb->device, "ringbuffer ib1/rb interrupt\n");
133 wake_up_interruptible_all(&device->wait_queue);
134 atomic_notifier_call_chain(&(device->ts_notifier_list),
135 device->id,
136 NULL);
137 }
138}
139
140static void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
141{
142 BUG_ON(rb->wptr == 0);
143
144 /*synchronize memory before informing the hardware of the
145 *new commands.
146 */
147 mb();
148
149 adreno_regwrite(rb->device, REG_CP_RB_WPTR, rb->wptr);
150}
151
152static int
153adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds,
154 int wptr_ahead)
155{
156 int nopcount;
157 unsigned int freecmds;
158 unsigned int *cmds;
159 uint cmds_gpu;
160
161 /* if wptr ahead, fill the remaining with NOPs */
162 if (wptr_ahead) {
163 /* -1 for header */
164 nopcount = rb->sizedwords - rb->wptr - 1;
165
166 cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
167 cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*rb->wptr;
168
169 GSL_RB_WRITE(cmds, cmds_gpu, pm4_nop_packet(nopcount));
170
171 /* Make sure that rptr is not 0 before submitting
172 * commands at the end of ringbuffer. We do not
173 * want the rptr and wptr to become equal when
174 * the ringbuffer is not empty */
175 do {
176 GSL_RB_GET_READPTR(rb, &rb->rptr);
177 } while (!rb->rptr);
178
179 rb->wptr++;
180
181 adreno_ringbuffer_submit(rb);
182
183 rb->wptr = 0;
184 }
185
186 /* wait for space in ringbuffer */
187 do {
188 GSL_RB_GET_READPTR(rb, &rb->rptr);
189
190 freecmds = rb->rptr - rb->wptr;
191
192 } while ((freecmds != 0) && (freecmds <= numcmds));
193
194 return 0;
195}
196
197
198static unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
199 unsigned int numcmds)
200{
201 unsigned int *ptr = NULL;
202 int status = 0;
203
204 BUG_ON(numcmds >= rb->sizedwords);
205
206 GSL_RB_GET_READPTR(rb, &rb->rptr);
207 /* check for available space */
208 if (rb->wptr >= rb->rptr) {
209 /* wptr ahead or equal to rptr */
210 /* reserve dwords for nop packet */
211 if ((rb->wptr + numcmds) > (rb->sizedwords -
212 GSL_RB_NOP_SIZEDWORDS))
213 status = adreno_ringbuffer_waitspace(rb, numcmds, 1);
214 } else {
215 /* wptr behind rptr */
216 if ((rb->wptr + numcmds) >= rb->rptr)
217 status = adreno_ringbuffer_waitspace(rb, numcmds, 0);
218 /* check for remaining space */
219 /* reserve dwords for nop packet */
220 if ((rb->wptr + numcmds) > (rb->sizedwords -
221 GSL_RB_NOP_SIZEDWORDS))
222 status = adreno_ringbuffer_waitspace(rb, numcmds, 1);
223 }
224
225 if (status == 0) {
226 ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
227 rb->wptr += numcmds;
228 }
229
230 return ptr;
231}
232
233static int _load_firmware(struct kgsl_device *device, const char *fwfile,
234 void **data, int *len)
235{
236 const struct firmware *fw = NULL;
237 int ret;
238
239 ret = request_firmware(&fw, fwfile, device->dev);
240
241 if (ret) {
242 KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n",
243 fwfile, ret);
244 return ret;
245 }
246
247 *data = kmalloc(fw->size, GFP_KERNEL);
248
249 if (*data) {
250 memcpy(*data, fw->data, fw->size);
251 *len = fw->size;
252 } else
253 KGSL_MEM_ERR(device, "kmalloc(%d) failed\n", fw->size);
254
255 release_firmware(fw);
256 return (*data != NULL) ? 0 : -ENOMEM;
257}
258
259static int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device)
260{
261 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
262 const char *fwfile;
263 int i, ret = 0;
264
265 if (adreno_is_a220(adreno_dev)) {
266 fwfile = A220_PM4_470_FW;
267 } else if (adreno_is_a225(adreno_dev)) {
268 fwfile = A225_PM4_FW;
269 } else if (adreno_is_a20x(adreno_dev)) {
270 fwfile = A200_PM4_FW;
271 } else {
272 KGSL_DRV_ERR(device, "Could not load PM4 file\n");
273 return -EINVAL;
274 }
275
276 if (adreno_dev->pm4_fw == NULL) {
277 int len;
278 unsigned int *ptr;
279
280 ret = _load_firmware(device, fwfile, (void *) &ptr, &len);
281 if (ret)
282 goto err;
283
284 /* PM4 size is 3 dword aligned plus 1 dword of version */
285 if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) {
286 KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
287 ret = -EINVAL;
288 goto err;
289 }
290
291 adreno_dev->pm4_fw_size = len / sizeof(uint32_t);
292 adreno_dev->pm4_fw = ptr;
293 }
294
295 KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n",
296 adreno_dev->pm4_fw[0]);
297
298 adreno_regwrite(device, REG_CP_DEBUG, 0x02000000);
299 adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0);
300 for (i = 1; i < adreno_dev->pm4_fw_size; i++)
301 adreno_regwrite(device, REG_CP_ME_RAM_DATA,
302 adreno_dev->pm4_fw[i]);
303err:
304 return ret;
305}
306
307static int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device)
308{
309 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
310 const char *fwfile;
311 int i, ret = 0;
312
313 if (adreno_is_a220(adreno_dev)) {
314 fwfile = A220_PFP_470_FW;
315 } else if (adreno_is_a225(adreno_dev)) {
316 fwfile = A225_PFP_FW;
317 } else if (adreno_is_a20x(adreno_dev)) {
318 fwfile = A200_PFP_FW;
319 } else {
320 KGSL_DRV_ERR(device, "Could not load PFP firmware\n");
321 return -EINVAL;
322 }
323
324 if (adreno_dev->pfp_fw == NULL) {
325 int len;
326 unsigned int *ptr;
327
328 ret = _load_firmware(device, fwfile, (void *) &ptr, &len);
329 if (ret)
330 goto err;
331
332 /* PFP size shold be dword aligned */
333 if (len % sizeof(uint32_t) != 0) {
334 KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
335 ret = -EINVAL;
336 goto err;
337 }
338
339 adreno_dev->pfp_fw_size = len / sizeof(uint32_t);
340 adreno_dev->pfp_fw = ptr;
341 }
342
343 KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n",
344 adreno_dev->pfp_fw[0]);
345
346 adreno_regwrite(device, REG_CP_PFP_UCODE_ADDR, 0);
347 for (i = 1; i < adreno_dev->pfp_fw_size; i++)
348 adreno_regwrite(device, REG_CP_PFP_UCODE_DATA,
349 adreno_dev->pfp_fw[i]);
350err:
351 return ret;
352}
353
354int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
355{
356 int status;
357 /*cp_rb_cntl_u cp_rb_cntl; */
358 union reg_cp_rb_cntl cp_rb_cntl;
359 unsigned int *cmds, rb_cntl;
360 struct kgsl_device *device = rb->device;
361 uint cmds_gpu;
362
363 if (rb->flags & KGSL_FLAGS_STARTED)
364 return 0;
365
366 if (init_ram) {
367 rb->timestamp = 0;
368 GSL_RB_INIT_TIMESTAMP(rb);
369 }
370
371 kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
372 sizeof(struct kgsl_rbmemptrs));
373
374 kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
375 (rb->sizedwords << 2));
376
377 adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
378 (rb->memptrs_desc.gpuaddr
379 + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
380
381 /* setup WPTR delay */
382 adreno_regwrite(device, REG_CP_RB_WPTR_DELAY, 0 /*0x70000010 */);
383
384 /*setup REG_CP_RB_CNTL */
385 adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
386 cp_rb_cntl.val = rb_cntl;
387
388 /*
389 * The size of the ringbuffer in the hardware is the log2
390 * representation of the size in quadwords (sizedwords / 2)
391 */
392 cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);
393
394 /*
395 * Specify the quadwords to read before updating mem RPTR.
396 * Like above, pass the log2 representation of the blocksize
397 * in quadwords.
398 */
399 cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);
400
401 cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN; /* WPTR polling */
402 /* mem RPTR writebacks */
403 cp_rb_cntl.f.rb_no_update = GSL_RB_CNTL_NO_UPDATE;
404
405 adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);
406
407 adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);
408
409 adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
410 rb->memptrs_desc.gpuaddr +
411 GSL_RB_MEMPTRS_RPTR_OFFSET);
412
413 /* explicitly clear all cp interrupts */
414 adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
415
416 /* setup scratch/timestamp */
417 adreno_regwrite(device, REG_SCRATCH_ADDR,
418 device->memstore.gpuaddr +
419 KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp));
420
421 adreno_regwrite(device, REG_SCRATCH_UMSK,
422 GSL_RB_MEMPTRS_SCRATCH_MASK);
423
424 /* load the CP ucode */
425
426 status = adreno_ringbuffer_load_pm4_ucode(device);
427 if (status != 0)
428 return status;
429
430 /* load the prefetch parser ucode */
431 status = adreno_ringbuffer_load_pfp_ucode(device);
432 if (status != 0)
433 return status;
434
435 adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000C0804);
436
437 rb->rptr = 0;
438 rb->wptr = 0;
439
440 /* clear ME_HALT to start micro engine */
441 adreno_regwrite(device, REG_CP_ME_CNTL, 0);
442
443 /* ME_INIT */
444 cmds = adreno_ringbuffer_allocspace(rb, 19);
445 cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19);
446
447 GSL_RB_WRITE(cmds, cmds_gpu, PM4_HDR_ME_INIT);
448 /* All fields present (bits 9:0) */
449 GSL_RB_WRITE(cmds, cmds_gpu, 0x000003ff);
450 /* Disable/Enable Real-Time Stream processing (present but ignored) */
451 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
452 /* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
453 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
454
455 GSL_RB_WRITE(cmds, cmds_gpu,
456 GSL_HAL_SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO));
457 GSL_RB_WRITE(cmds, cmds_gpu,
458 GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET));
459 GSL_RB_WRITE(cmds, cmds_gpu,
460 GSL_HAL_SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX));
461 GSL_RB_WRITE(cmds, cmds_gpu,
462 GSL_HAL_SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL));
463 GSL_RB_WRITE(cmds, cmds_gpu,
464 GSL_HAL_SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL));
465 GSL_RB_WRITE(cmds, cmds_gpu,
466 GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE));
467 GSL_RB_WRITE(cmds, cmds_gpu,
468 GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL));
469 GSL_RB_WRITE(cmds, cmds_gpu,
470 GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE));
471
472 /* Vertex and Pixel Shader Start Addresses in instructions
473 * (3 DWORDS per instruction) */
474 GSL_RB_WRITE(cmds, cmds_gpu, 0x80000180);
475 /* Maximum Contexts */
476 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000001);
477 /* Write Confirm Interval and The CP will wait the
478 * wait_interval * 16 clocks between polling */
479 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
480
481 /* NQ and External Memory Swap */
482 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
483 /* Protected mode error checking */
484 GSL_RB_WRITE(cmds, cmds_gpu, GSL_RB_PROTECTED_MODE_CONTROL);
485 /* Disable header dumping and Header dump address */
486 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
487 /* Header dump size */
488 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
489
490 adreno_ringbuffer_submit(rb);
491
492 /* idle device to validate ME INIT */
493 status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
494
495 if (status == 0)
496 rb->flags |= KGSL_FLAGS_STARTED;
497
498 return status;
499}
500
501int adreno_ringbuffer_stop(struct adreno_ringbuffer *rb)
502{
503 if (rb->flags & KGSL_FLAGS_STARTED) {
504 /* ME_HALT */
505 adreno_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000);
506
507 rb->flags &= ~KGSL_FLAGS_STARTED;
508 }
509
510 return 0;
511}
512
513int adreno_ringbuffer_init(struct kgsl_device *device)
514{
515 int status;
516 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
517 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
518
519 rb->device = device;
520 /*
521 * It is silly to convert this to words and then back to bytes
522 * immediately below, but most of the rest of the code deals
523 * in words, so we might as well only do the math once
524 */
525 rb->sizedwords = KGSL_RB_SIZE >> 2;
526
527 /* allocate memory for ringbuffer */
528 status = kgsl_allocate_contiguous(&rb->buffer_desc,
529 (rb->sizedwords << 2));
530
531 if (status != 0) {
532 adreno_ringbuffer_close(rb);
533 return status;
534 }
535
536 /* allocate memory for polling and timestamps */
537 /* This really can be at 4 byte alignment boundry but for using MMU
538 * we need to make it at page boundary */
539 status = kgsl_allocate_contiguous(&rb->memptrs_desc,
540 sizeof(struct kgsl_rbmemptrs));
541
542 if (status != 0) {
543 adreno_ringbuffer_close(rb);
544 return status;
545 }
546
547 /* overlay structure on memptrs memory */
548 rb->memptrs = (struct kgsl_rbmemptrs *) rb->memptrs_desc.hostptr;
549
550 return 0;
551}
552
553int adreno_ringbuffer_close(struct adreno_ringbuffer *rb)
554{
555 struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
556
557 kgsl_sharedmem_free(&rb->buffer_desc);
558 kgsl_sharedmem_free(&rb->memptrs_desc);
559
560 kfree(adreno_dev->pfp_fw);
561 kfree(adreno_dev->pm4_fw);
562
563 adreno_dev->pfp_fw = NULL;
564 adreno_dev->pm4_fw = NULL;
565
566 memset(rb, 0, sizeof(struct adreno_ringbuffer));
567
568 return 0;
569}
570
571static uint32_t
572adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
573 unsigned int flags, unsigned int *cmds,
574 int sizedwords)
575{
576 unsigned int *ringcmds;
577 unsigned int timestamp;
578 unsigned int total_sizedwords = sizedwords + 6;
579 unsigned int i;
580 unsigned int rcmd_gpu;
581
582 /* reserve space to temporarily turn off protected mode
583 * error checking if needed
584 */
585 total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
586 total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0;
587 total_sizedwords += !(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD) ? 2 : 0;
588
589 ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
590 rcmd_gpu = rb->buffer_desc.gpuaddr
591 + sizeof(uint)*(rb->wptr-total_sizedwords);
592
593 if (!(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD)) {
594 GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_nop_packet(1));
595 GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
596 }
597 if (flags & KGSL_CMD_FLAGS_PMODE) {
598 /* disable protected mode error checking */
599 GSL_RB_WRITE(ringcmds, rcmd_gpu,
600 pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1));
601 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
602 }
603
604 for (i = 0; i < sizedwords; i++) {
605 GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
606 cmds++;
607 }
608
609 if (flags & KGSL_CMD_FLAGS_PMODE) {
610 /* re-enable protected mode error checking */
611 GSL_RB_WRITE(ringcmds, rcmd_gpu,
612 pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1));
613 GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
614 }
615
616 rb->timestamp++;
617 timestamp = rb->timestamp;
618
619 /* start-of-pipeline and end-of-pipeline timestamps */
620 GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_type0_packet(REG_CP_TIMESTAMP, 1));
621 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
622 GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_type3_packet(PM4_EVENT_WRITE, 3));
623 GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
624 GSL_RB_WRITE(ringcmds, rcmd_gpu,
625 (rb->device->memstore.gpuaddr +
626 KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)));
627 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
628
629 if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
630 /* Conditional execution based on memory values */
631 GSL_RB_WRITE(ringcmds, rcmd_gpu,
632 pm4_type3_packet(PM4_COND_EXEC, 4));
633 GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
634 KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)) >> 2);
635 GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
636 KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)) >> 2);
637 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
638 /* # of conditional command DWORDs */
639 GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
640 GSL_RB_WRITE(ringcmds, rcmd_gpu,
641 pm4_type3_packet(PM4_INTERRUPT, 1));
642 GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
643 }
644
645 adreno_ringbuffer_submit(rb);
646
647 /* return timestamp of issued coREG_ands */
648 return timestamp;
649}
650
651void
652adreno_ringbuffer_issuecmds(struct kgsl_device *device,
653 unsigned int flags,
654 unsigned int *cmds,
655 int sizedwords)
656{
657 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
658 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
659
660 if (device->state & KGSL_STATE_HUNG)
661 return;
662 adreno_ringbuffer_addcmds(rb, flags, cmds, sizedwords);
663}
664
665int
666adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
667 struct kgsl_context *context,
668 struct kgsl_ibdesc *ibdesc,
669 unsigned int numibs,
670 uint32_t *timestamp,
671 unsigned int flags)
672{
673 struct kgsl_device *device = dev_priv->device;
674 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
675 unsigned int *link;
676 unsigned int *cmds;
677 unsigned int i;
678 struct adreno_context *drawctxt = context->devctxt;
679
680 if (device->state & KGSL_STATE_HUNG)
681 return -EBUSY;
682 if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED) ||
683 context == NULL)
684 return -EINVAL;
685
686 BUG_ON(ibdesc == 0);
687 BUG_ON(numibs == 0);
688
689 if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
690 KGSL_CTXT_WARN(device, "Context %p caused a gpu hang.."
691 " will not accept commands for this context\n",
692 drawctxt);
693 return -EDEADLK;
694 }
695 link = kzalloc(sizeof(unsigned int) * numibs * 3, GFP_KERNEL);
696 cmds = link;
697 if (!link) {
698 KGSL_MEM_ERR(device, "Failed to allocate memory for for command"
699 " submission, size %x\n", numibs * 3);
700 return -ENOMEM;
701 }
702 for (i = 0; i < numibs; i++) {
703 (void)kgsl_cffdump_parse_ibs(dev_priv, NULL,
704 ibdesc[i].gpuaddr, ibdesc[i].sizedwords, false);
705
706 *cmds++ = PM4_HDR_INDIRECT_BUFFER_PFD;
707 *cmds++ = ibdesc[i].gpuaddr;
708 *cmds++ = ibdesc[i].sizedwords;
709 }
710
711 kgsl_setstate(device,
712 kgsl_pt_get_flags(device->mmu.hwpagetable,
713 device->id));
714
715 adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
716
717 *timestamp = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
718 KGSL_CMD_FLAGS_NOT_KERNEL_CMD,
719 &link[0], (cmds - link));
720
721 KGSL_CMD_INFO(device, "ctxt %d g %08x numibs %d ts %d\n",
722 context->id, (unsigned int)ibdesc, numibs, *timestamp);
723
724 kfree(link);
725
726#ifdef CONFIG_MSM_KGSL_CFF_DUMP
727 /*
728 * insert wait for idle after every IB1
729 * this is conservative but works reliably and is ok
730 * even for performance simulations
731 */
732 adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
733#endif
734
735 return 0;
736}
737
738int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
739 unsigned int *temp_rb_buffer,
740 int *rb_size)
741{
742 struct kgsl_device *device = rb->device;
743 unsigned int rb_rptr;
744 unsigned int retired_timestamp;
745 unsigned int temp_idx = 0;
746 unsigned int value;
747 unsigned int val1;
748 unsigned int val2;
749 unsigned int val3;
750 unsigned int copy_rb_contents = 0;
751 unsigned int cur_context;
752 unsigned int j;
753
754 GSL_RB_GET_READPTR(rb, &rb->rptr);
755
756 retired_timestamp = device->ftbl->readtimestamp(device,
757 KGSL_TIMESTAMP_RETIRED);
758 KGSL_DRV_ERR(device, "GPU successfully executed till ts: %x\n",
759 retired_timestamp);
760 /*
761 * We need to go back in history by 4 dwords from the current location
762 * of read pointer as 4 dwords are read to match the end of a command.
763 * Also, take care of wrap around when moving back
764 */
765 if (rb->rptr >= 4)
766 rb_rptr = (rb->rptr - 4) * sizeof(unsigned int);
767 else
768 rb_rptr = rb->buffer_desc.size -
769 ((4 - rb->rptr) * sizeof(unsigned int));
770 /* Read the rb contents going backwards to locate end of last
771 * sucessfully executed command */
772 while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
773 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
774 if (value == retired_timestamp) {
775 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
776 rb->buffer_desc.size);
777 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
778 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
779 rb->buffer_desc.size);
780 kgsl_sharedmem_readl(&rb->buffer_desc, &val2, rb_rptr);
781 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
782 rb->buffer_desc.size);
783 kgsl_sharedmem_readl(&rb->buffer_desc, &val3, rb_rptr);
784 /* match the pattern found at the end of a command */
785 if ((val1 == 2 &&
786 val2 == pm4_type3_packet(PM4_INTERRUPT, 1)
787 && val3 == CP_INT_CNTL__RB_INT_MASK) ||
788 (val1 == pm4_type3_packet(PM4_EVENT_WRITE, 3)
789 && val2 == CACHE_FLUSH_TS &&
790 val3 == (rb->device->memstore.gpuaddr +
791 KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)))) {
792 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
793 rb->buffer_desc.size);
794 KGSL_DRV_ERR(device,
795 "Found end of last executed "
796 "command at offset: %x\n",
797 rb_rptr / sizeof(unsigned int));
798 break;
799 } else {
800 if (rb_rptr < (3 * sizeof(unsigned int)))
801 rb_rptr = rb->buffer_desc.size -
802 (3 * sizeof(unsigned int))
803 + rb_rptr;
804 else
805 rb_rptr -= (3 * sizeof(unsigned int));
806 }
807 }
808
809 if (rb_rptr == 0)
810 rb_rptr = rb->buffer_desc.size - sizeof(unsigned int);
811 else
812 rb_rptr -= sizeof(unsigned int);
813 }
814
815 if ((rb_rptr / sizeof(unsigned int)) == rb->wptr) {
816 KGSL_DRV_ERR(device,
817 "GPU recovery from hang not possible because last"
818 " successful timestamp is overwritten\n");
819 return -EINVAL;
820 }
821 /* rb_rptr is now pointing to the first dword of the command following
822 * the last sucessfully executed command sequence. Assumption is that
823 * GPU is hung in the command sequence pointed by rb_rptr */
824 /* make sure the GPU is not hung in a command submitted by kgsl
825 * itself */
826 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
827 kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
828 adreno_ringbuffer_inc_wrapped(rb_rptr,
829 rb->buffer_desc.size));
830 if (val1 == pm4_nop_packet(1) && val2 == KGSL_CMD_IDENTIFIER) {
831 KGSL_DRV_ERR(device,
832 "GPU recovery from hang not possible because "
833 "of hang in kgsl command\n");
834 return -EINVAL;
835 }
836
837 /* current_context is the context that is presently active in the
838 * GPU, i.e the context in which the hang is caused */
839 kgsl_sharedmem_readl(&device->memstore, &cur_context,
840 KGSL_DEVICE_MEMSTORE_OFFSET(current_context));
841 while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
842 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
843 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
844 rb->buffer_desc.size);
845 /* check for context switch indicator */
846 if (value == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
847 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
848 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
849 rb->buffer_desc.size);
850 BUG_ON(value != pm4_type3_packet(PM4_MEM_WRITE, 2));
851 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
852 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
853 rb->buffer_desc.size);
854 BUG_ON(val1 != (device->memstore.gpuaddr +
855 KGSL_DEVICE_MEMSTORE_OFFSET(current_context)));
856 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
857 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
858 rb->buffer_desc.size);
859 BUG_ON((copy_rb_contents == 0) &&
860 (value == cur_context));
861 /*
862 * If we were copying the commands and got to this point
863 * then we need to remove the 3 commands that appear
864 * before KGSL_CONTEXT_TO_MEM_IDENTIFIER
865 */
866 if (temp_idx)
867 temp_idx -= 3;
868 /* if context switches to a context that did not cause
869 * hang then start saving the rb contents as those
870 * commands can be executed */
871 if (value != cur_context) {
872 copy_rb_contents = 1;
873 temp_rb_buffer[temp_idx++] = pm4_nop_packet(1);
874 temp_rb_buffer[temp_idx++] =
875 KGSL_CMD_IDENTIFIER;
876 temp_rb_buffer[temp_idx++] = pm4_nop_packet(1);
877 temp_rb_buffer[temp_idx++] =
878 KGSL_CONTEXT_TO_MEM_IDENTIFIER;
879 temp_rb_buffer[temp_idx++] =
880 pm4_type3_packet(PM4_MEM_WRITE, 2);
881 temp_rb_buffer[temp_idx++] = val1;
882 temp_rb_buffer[temp_idx++] = value;
883 } else {
884 copy_rb_contents = 0;
885 }
886 } else if (copy_rb_contents)
887 temp_rb_buffer[temp_idx++] = value;
888 }
889
890 *rb_size = temp_idx;
891 KGSL_DRV_ERR(device, "Extracted rb contents, size: %x\n", *rb_size);
892 for (temp_idx = 0; temp_idx < *rb_size;) {
893 char str[80];
894 int idx = 0;
895 if ((temp_idx + 8) <= *rb_size)
896 j = 8;
897 else
898 j = *rb_size - temp_idx;
899 for (; j != 0; j--)
900 idx += scnprintf(str + idx, 80 - idx,
901 "%8.8X ", temp_rb_buffer[temp_idx++]);
902 printk(KERN_ALERT "%s", str);
903 }
904 return 0;
905}
906
907void
908adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
909 int num_rb_contents)
910{
911 int i;
912 unsigned int *ringcmds;
913 unsigned int rcmd_gpu;
914
915 if (!num_rb_contents)
916 return;
917
918 if (num_rb_contents > (rb->buffer_desc.size - rb->wptr)) {
919 adreno_regwrite(rb->device, REG_CP_RB_RPTR, 0);
920 rb->rptr = 0;
921 BUG_ON(num_rb_contents > rb->buffer_desc.size);
922 }
923 ringcmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
924 rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(unsigned int) * rb->wptr;
925 for (i = 0; i < num_rb_contents; i++)
926 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb_buff[i]);
927 rb->wptr += num_rb_contents;
928 adreno_ringbuffer_submit(rb);
929}