blob: a4a769ccaa822c0d7a739bae5e1e0ec5c526cdc9 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/firmware.h>
14#include <linux/slab.h>
15#include <linux/sched.h>
16#include <linux/log2.h>
17
18#include "kgsl.h"
19#include "kgsl_sharedmem.h"
20#include "kgsl_cffdump.h"
21
22#include "adreno.h"
23#include "adreno_pm4types.h"
24#include "adreno_ringbuffer.h"
25
26#include "a200_reg.h"
27
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028#define GSL_RB_NOP_SIZEDWORDS 2
29/* protected mode error checking below register address 0x800
30* note: if CP_INTERRUPT packet is used then checking needs
31* to change to below register address 0x7C8
32*/
33#define GSL_RB_PROTECTED_MODE_CONTROL 0x200001F2
34
35/* Firmware file names
36 * Legacy names must remain but replacing macro names to
37 * match current kgsl model.
38 * a200 is yamato
39 * a220 is leia
40 */
41#define A200_PFP_FW "yamato_pfp.fw"
42#define A200_PM4_FW "yamato_pm4.fw"
43#define A220_PFP_470_FW "leia_pfp_470.fw"
44#define A220_PM4_470_FW "leia_pm4_470.fw"
45#define A225_PFP_FW "a225_pfp.fw"
46#define A225_PM4_FW "a225_pm4.fw"
47
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048static void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
49{
50 BUG_ON(rb->wptr == 0);
51
52 /*synchronize memory before informing the hardware of the
53 *new commands.
54 */
55 mb();
56
57 adreno_regwrite(rb->device, REG_CP_RB_WPTR, rb->wptr);
58}
59
60static int
61adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds,
62 int wptr_ahead)
63{
64 int nopcount;
65 unsigned int freecmds;
66 unsigned int *cmds;
67 uint cmds_gpu;
68
69 /* if wptr ahead, fill the remaining with NOPs */
70 if (wptr_ahead) {
71 /* -1 for header */
72 nopcount = rb->sizedwords - rb->wptr - 1;
73
74 cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
75 cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*rb->wptr;
76
Jordan Crouse084427d2011-07-28 08:37:58 -060077 GSL_RB_WRITE(cmds, cmds_gpu, cp_nop_packet(nopcount));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070078
79 /* Make sure that rptr is not 0 before submitting
80 * commands at the end of ringbuffer. We do not
81 * want the rptr and wptr to become equal when
82 * the ringbuffer is not empty */
83 do {
84 GSL_RB_GET_READPTR(rb, &rb->rptr);
85 } while (!rb->rptr);
86
87 rb->wptr++;
88
89 adreno_ringbuffer_submit(rb);
90
91 rb->wptr = 0;
92 }
93
94 /* wait for space in ringbuffer */
95 do {
96 GSL_RB_GET_READPTR(rb, &rb->rptr);
97
98 freecmds = rb->rptr - rb->wptr;
99
100 } while ((freecmds != 0) && (freecmds <= numcmds));
101
102 return 0;
103}
104
105
106static unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
107 unsigned int numcmds)
108{
109 unsigned int *ptr = NULL;
110 int status = 0;
111
112 BUG_ON(numcmds >= rb->sizedwords);
113
114 GSL_RB_GET_READPTR(rb, &rb->rptr);
115 /* check for available space */
116 if (rb->wptr >= rb->rptr) {
117 /* wptr ahead or equal to rptr */
118 /* reserve dwords for nop packet */
119 if ((rb->wptr + numcmds) > (rb->sizedwords -
120 GSL_RB_NOP_SIZEDWORDS))
121 status = adreno_ringbuffer_waitspace(rb, numcmds, 1);
122 } else {
123 /* wptr behind rptr */
124 if ((rb->wptr + numcmds) >= rb->rptr)
125 status = adreno_ringbuffer_waitspace(rb, numcmds, 0);
126 /* check for remaining space */
127 /* reserve dwords for nop packet */
128 if ((rb->wptr + numcmds) > (rb->sizedwords -
129 GSL_RB_NOP_SIZEDWORDS))
130 status = adreno_ringbuffer_waitspace(rb, numcmds, 1);
131 }
132
133 if (status == 0) {
134 ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
135 rb->wptr += numcmds;
136 }
137
138 return ptr;
139}
140
141static int _load_firmware(struct kgsl_device *device, const char *fwfile,
142 void **data, int *len)
143{
144 const struct firmware *fw = NULL;
145 int ret;
146
147 ret = request_firmware(&fw, fwfile, device->dev);
148
149 if (ret) {
150 KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n",
151 fwfile, ret);
152 return ret;
153 }
154
155 *data = kmalloc(fw->size, GFP_KERNEL);
156
157 if (*data) {
158 memcpy(*data, fw->data, fw->size);
159 *len = fw->size;
160 } else
161 KGSL_MEM_ERR(device, "kmalloc(%d) failed\n", fw->size);
162
163 release_firmware(fw);
164 return (*data != NULL) ? 0 : -ENOMEM;
165}
166
167static int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device)
168{
169 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700170 int i, ret = 0;
171
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700172 if (adreno_dev->pm4_fw == NULL) {
173 int len;
Jordan Crouse505df9c2011-07-28 08:37:59 -0600174 void *ptr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175
Jordan Crouse505df9c2011-07-28 08:37:59 -0600176 ret = _load_firmware(device, adreno_dev->pm4_fwfile,
177 &ptr, &len);
178
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179 if (ret)
180 goto err;
181
182 /* PM4 size is 3 dword aligned plus 1 dword of version */
183 if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) {
184 KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
185 ret = -EINVAL;
186 goto err;
187 }
188
189 adreno_dev->pm4_fw_size = len / sizeof(uint32_t);
190 adreno_dev->pm4_fw = ptr;
191 }
192
193 KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n",
194 adreno_dev->pm4_fw[0]);
195
196 adreno_regwrite(device, REG_CP_DEBUG, 0x02000000);
197 adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0);
198 for (i = 1; i < adreno_dev->pm4_fw_size; i++)
199 adreno_regwrite(device, REG_CP_ME_RAM_DATA,
200 adreno_dev->pm4_fw[i]);
201err:
202 return ret;
203}
204
205static int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device)
206{
207 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208 int i, ret = 0;
209
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700210 if (adreno_dev->pfp_fw == NULL) {
211 int len;
Jordan Crouse505df9c2011-07-28 08:37:59 -0600212 void *ptr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700213
Jordan Crouse505df9c2011-07-28 08:37:59 -0600214 ret = _load_firmware(device, adreno_dev->pfp_fwfile,
215 &ptr, &len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216 if (ret)
217 goto err;
218
219 /* PFP size shold be dword aligned */
220 if (len % sizeof(uint32_t) != 0) {
221 KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
222 ret = -EINVAL;
223 goto err;
224 }
225
226 adreno_dev->pfp_fw_size = len / sizeof(uint32_t);
227 adreno_dev->pfp_fw = ptr;
228 }
229
230 KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n",
231 adreno_dev->pfp_fw[0]);
232
233 adreno_regwrite(device, REG_CP_PFP_UCODE_ADDR, 0);
234 for (i = 1; i < adreno_dev->pfp_fw_size; i++)
235 adreno_regwrite(device, REG_CP_PFP_UCODE_DATA,
236 adreno_dev->pfp_fw[i]);
237err:
238 return ret;
239}
240
241int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
242{
243 int status;
244 /*cp_rb_cntl_u cp_rb_cntl; */
245 union reg_cp_rb_cntl cp_rb_cntl;
246 unsigned int *cmds, rb_cntl;
247 struct kgsl_device *device = rb->device;
248 uint cmds_gpu;
249
250 if (rb->flags & KGSL_FLAGS_STARTED)
251 return 0;
252
253 if (init_ram) {
254 rb->timestamp = 0;
255 GSL_RB_INIT_TIMESTAMP(rb);
256 }
257
258 kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
259 sizeof(struct kgsl_rbmemptrs));
260
261 kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
262 (rb->sizedwords << 2));
263
264 adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
265 (rb->memptrs_desc.gpuaddr
266 + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
267
268 /* setup WPTR delay */
269 adreno_regwrite(device, REG_CP_RB_WPTR_DELAY, 0 /*0x70000010 */);
270
271 /*setup REG_CP_RB_CNTL */
272 adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
273 cp_rb_cntl.val = rb_cntl;
274
275 /*
276 * The size of the ringbuffer in the hardware is the log2
277 * representation of the size in quadwords (sizedwords / 2)
278 */
279 cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);
280
281 /*
282 * Specify the quadwords to read before updating mem RPTR.
283 * Like above, pass the log2 representation of the blocksize
284 * in quadwords.
285 */
286 cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);
287
288 cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN; /* WPTR polling */
289 /* mem RPTR writebacks */
290 cp_rb_cntl.f.rb_no_update = GSL_RB_CNTL_NO_UPDATE;
291
292 adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);
293
294 adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);
295
296 adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
297 rb->memptrs_desc.gpuaddr +
298 GSL_RB_MEMPTRS_RPTR_OFFSET);
299
300 /* explicitly clear all cp interrupts */
301 adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
302
303 /* setup scratch/timestamp */
304 adreno_regwrite(device, REG_SCRATCH_ADDR,
305 device->memstore.gpuaddr +
306 KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp));
307
308 adreno_regwrite(device, REG_SCRATCH_UMSK,
309 GSL_RB_MEMPTRS_SCRATCH_MASK);
310
311 /* load the CP ucode */
312
313 status = adreno_ringbuffer_load_pm4_ucode(device);
314 if (status != 0)
315 return status;
316
317 /* load the prefetch parser ucode */
318 status = adreno_ringbuffer_load_pfp_ucode(device);
319 if (status != 0)
320 return status;
321
322 adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000C0804);
323
324 rb->rptr = 0;
325 rb->wptr = 0;
326
327 /* clear ME_HALT to start micro engine */
328 adreno_regwrite(device, REG_CP_ME_CNTL, 0);
329
330 /* ME_INIT */
331 cmds = adreno_ringbuffer_allocspace(rb, 19);
332 cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19);
333
Jordan Crouse084427d2011-07-28 08:37:58 -0600334 GSL_RB_WRITE(cmds, cmds_gpu, CP_HDR_ME_INIT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700335 /* All fields present (bits 9:0) */
336 GSL_RB_WRITE(cmds, cmds_gpu, 0x000003ff);
337 /* Disable/Enable Real-Time Stream processing (present but ignored) */
338 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
339 /* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
340 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
341
342 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600343 SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700344 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600345 SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700346 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600347 SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700348 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600349 SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700350 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600351 SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700352 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600353 SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700354 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600355 SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356 GSL_RB_WRITE(cmds, cmds_gpu,
Jordan Crouse0e0486f2011-07-28 08:37:58 -0600357 SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700358
359 /* Vertex and Pixel Shader Start Addresses in instructions
360 * (3 DWORDS per instruction) */
361 GSL_RB_WRITE(cmds, cmds_gpu, 0x80000180);
362 /* Maximum Contexts */
363 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000001);
364 /* Write Confirm Interval and The CP will wait the
365 * wait_interval * 16 clocks between polling */
366 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
367
368 /* NQ and External Memory Swap */
369 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
370 /* Protected mode error checking */
371 GSL_RB_WRITE(cmds, cmds_gpu, GSL_RB_PROTECTED_MODE_CONTROL);
372 /* Disable header dumping and Header dump address */
373 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
374 /* Header dump size */
375 GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
376
377 adreno_ringbuffer_submit(rb);
378
379 /* idle device to validate ME INIT */
380 status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
381
382 if (status == 0)
383 rb->flags |= KGSL_FLAGS_STARTED;
384
385 return status;
386}
387
388int adreno_ringbuffer_stop(struct adreno_ringbuffer *rb)
389{
390 if (rb->flags & KGSL_FLAGS_STARTED) {
391 /* ME_HALT */
392 adreno_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000);
393
394 rb->flags &= ~KGSL_FLAGS_STARTED;
395 }
396
397 return 0;
398}
399
400int adreno_ringbuffer_init(struct kgsl_device *device)
401{
402 int status;
403 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
404 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
405
406 rb->device = device;
407 /*
408 * It is silly to convert this to words and then back to bytes
409 * immediately below, but most of the rest of the code deals
410 * in words, so we might as well only do the math once
411 */
412 rb->sizedwords = KGSL_RB_SIZE >> 2;
413
414 /* allocate memory for ringbuffer */
415 status = kgsl_allocate_contiguous(&rb->buffer_desc,
416 (rb->sizedwords << 2));
417
418 if (status != 0) {
419 adreno_ringbuffer_close(rb);
420 return status;
421 }
422
423 /* allocate memory for polling and timestamps */
424 /* This really can be at 4 byte alignment boundry but for using MMU
425 * we need to make it at page boundary */
426 status = kgsl_allocate_contiguous(&rb->memptrs_desc,
427 sizeof(struct kgsl_rbmemptrs));
428
429 if (status != 0) {
430 adreno_ringbuffer_close(rb);
431 return status;
432 }
433
434 /* overlay structure on memptrs memory */
435 rb->memptrs = (struct kgsl_rbmemptrs *) rb->memptrs_desc.hostptr;
436
437 return 0;
438}
439
440int adreno_ringbuffer_close(struct adreno_ringbuffer *rb)
441{
442 struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
443
444 kgsl_sharedmem_free(&rb->buffer_desc);
445 kgsl_sharedmem_free(&rb->memptrs_desc);
446
447 kfree(adreno_dev->pfp_fw);
448 kfree(adreno_dev->pm4_fw);
449
450 adreno_dev->pfp_fw = NULL;
451 adreno_dev->pm4_fw = NULL;
452
453 memset(rb, 0, sizeof(struct adreno_ringbuffer));
454
455 return 0;
456}
457
458static uint32_t
459adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
460 unsigned int flags, unsigned int *cmds,
461 int sizedwords)
462{
463 unsigned int *ringcmds;
464 unsigned int timestamp;
465 unsigned int total_sizedwords = sizedwords + 6;
466 unsigned int i;
467 unsigned int rcmd_gpu;
468
469 /* reserve space to temporarily turn off protected mode
470 * error checking if needed
471 */
472 total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
473 total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0;
474 total_sizedwords += !(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD) ? 2 : 0;
475
476 ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
477 rcmd_gpu = rb->buffer_desc.gpuaddr
478 + sizeof(uint)*(rb->wptr-total_sizedwords);
479
480 if (!(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD)) {
Jordan Crouse084427d2011-07-28 08:37:58 -0600481 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700482 GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
483 }
484 if (flags & KGSL_CMD_FLAGS_PMODE) {
485 /* disable protected mode error checking */
486 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600487 cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700488 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
489 }
490
491 for (i = 0; i < sizedwords; i++) {
492 GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
493 cmds++;
494 }
495
496 if (flags & KGSL_CMD_FLAGS_PMODE) {
497 /* re-enable protected mode error checking */
498 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600499 cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700500 GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
501 }
502
503 rb->timestamp++;
504 timestamp = rb->timestamp;
505
506 /* start-of-pipeline and end-of-pipeline timestamps */
Jordan Crouse084427d2011-07-28 08:37:58 -0600507 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type0_packet(REG_CP_TIMESTAMP, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
Jordan Crouse084427d2011-07-28 08:37:58 -0600509 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_EVENT_WRITE, 3));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510 GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
511 GSL_RB_WRITE(ringcmds, rcmd_gpu,
512 (rb->device->memstore.gpuaddr +
513 KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)));
514 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
515
516 if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
517 /* Conditional execution based on memory values */
518 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600519 cp_type3_packet(CP_COND_EXEC, 4));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700520 GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
521 KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)) >> 2);
522 GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
523 KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)) >> 2);
524 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
525 /* # of conditional command DWORDs */
526 GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
527 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600528 cp_type3_packet(CP_INTERRUPT, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529 GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
530 }
531
532 adreno_ringbuffer_submit(rb);
533
534 /* return timestamp of issued coREG_ands */
535 return timestamp;
536}
537
538void
539adreno_ringbuffer_issuecmds(struct kgsl_device *device,
540 unsigned int flags,
541 unsigned int *cmds,
542 int sizedwords)
543{
544 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
545 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
546
547 if (device->state & KGSL_STATE_HUNG)
548 return;
549 adreno_ringbuffer_addcmds(rb, flags, cmds, sizedwords);
550}
551
552int
553adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
554 struct kgsl_context *context,
555 struct kgsl_ibdesc *ibdesc,
556 unsigned int numibs,
557 uint32_t *timestamp,
558 unsigned int flags)
559{
560 struct kgsl_device *device = dev_priv->device;
561 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
562 unsigned int *link;
563 unsigned int *cmds;
564 unsigned int i;
565 struct adreno_context *drawctxt = context->devctxt;
566
567 if (device->state & KGSL_STATE_HUNG)
568 return -EBUSY;
569 if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED) ||
570 context == NULL)
571 return -EINVAL;
572
573 BUG_ON(ibdesc == 0);
574 BUG_ON(numibs == 0);
575
576 if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
577 KGSL_CTXT_WARN(device, "Context %p caused a gpu hang.."
578 " will not accept commands for this context\n",
579 drawctxt);
580 return -EDEADLK;
581 }
582 link = kzalloc(sizeof(unsigned int) * numibs * 3, GFP_KERNEL);
583 cmds = link;
584 if (!link) {
585 KGSL_MEM_ERR(device, "Failed to allocate memory for for command"
586 " submission, size %x\n", numibs * 3);
587 return -ENOMEM;
588 }
589 for (i = 0; i < numibs; i++) {
590 (void)kgsl_cffdump_parse_ibs(dev_priv, NULL,
591 ibdesc[i].gpuaddr, ibdesc[i].sizedwords, false);
592
Jordan Crouse084427d2011-07-28 08:37:58 -0600593 *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700594 *cmds++ = ibdesc[i].gpuaddr;
595 *cmds++ = ibdesc[i].sizedwords;
596 }
597
598 kgsl_setstate(device,
599 kgsl_pt_get_flags(device->mmu.hwpagetable,
600 device->id));
601
602 adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
603
604 *timestamp = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
605 KGSL_CMD_FLAGS_NOT_KERNEL_CMD,
606 &link[0], (cmds - link));
607
608 KGSL_CMD_INFO(device, "ctxt %d g %08x numibs %d ts %d\n",
609 context->id, (unsigned int)ibdesc, numibs, *timestamp);
610
611 kfree(link);
612
613#ifdef CONFIG_MSM_KGSL_CFF_DUMP
614 /*
615 * insert wait for idle after every IB1
616 * this is conservative but works reliably and is ok
617 * even for performance simulations
618 */
619 adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
620#endif
621
622 return 0;
623}
624
625int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
626 unsigned int *temp_rb_buffer,
627 int *rb_size)
628{
629 struct kgsl_device *device = rb->device;
630 unsigned int rb_rptr;
631 unsigned int retired_timestamp;
632 unsigned int temp_idx = 0;
633 unsigned int value;
634 unsigned int val1;
635 unsigned int val2;
636 unsigned int val3;
637 unsigned int copy_rb_contents = 0;
638 unsigned int cur_context;
639 unsigned int j;
640
641 GSL_RB_GET_READPTR(rb, &rb->rptr);
642
643 retired_timestamp = device->ftbl->readtimestamp(device,
644 KGSL_TIMESTAMP_RETIRED);
645 KGSL_DRV_ERR(device, "GPU successfully executed till ts: %x\n",
646 retired_timestamp);
647 /*
648 * We need to go back in history by 4 dwords from the current location
649 * of read pointer as 4 dwords are read to match the end of a command.
650 * Also, take care of wrap around when moving back
651 */
652 if (rb->rptr >= 4)
653 rb_rptr = (rb->rptr - 4) * sizeof(unsigned int);
654 else
655 rb_rptr = rb->buffer_desc.size -
656 ((4 - rb->rptr) * sizeof(unsigned int));
657 /* Read the rb contents going backwards to locate end of last
658 * sucessfully executed command */
659 while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
660 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
661 if (value == retired_timestamp) {
662 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
663 rb->buffer_desc.size);
664 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
665 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
666 rb->buffer_desc.size);
667 kgsl_sharedmem_readl(&rb->buffer_desc, &val2, rb_rptr);
668 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
669 rb->buffer_desc.size);
670 kgsl_sharedmem_readl(&rb->buffer_desc, &val3, rb_rptr);
671 /* match the pattern found at the end of a command */
672 if ((val1 == 2 &&
Jordan Crouse084427d2011-07-28 08:37:58 -0600673 val2 == cp_type3_packet(CP_INTERRUPT, 1)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674 && val3 == CP_INT_CNTL__RB_INT_MASK) ||
Jordan Crouse084427d2011-07-28 08:37:58 -0600675 (val1 == cp_type3_packet(CP_EVENT_WRITE, 3)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700676 && val2 == CACHE_FLUSH_TS &&
677 val3 == (rb->device->memstore.gpuaddr +
678 KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)))) {
679 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
680 rb->buffer_desc.size);
681 KGSL_DRV_ERR(device,
682 "Found end of last executed "
683 "command at offset: %x\n",
684 rb_rptr / sizeof(unsigned int));
685 break;
686 } else {
687 if (rb_rptr < (3 * sizeof(unsigned int)))
688 rb_rptr = rb->buffer_desc.size -
689 (3 * sizeof(unsigned int))
690 + rb_rptr;
691 else
692 rb_rptr -= (3 * sizeof(unsigned int));
693 }
694 }
695
696 if (rb_rptr == 0)
697 rb_rptr = rb->buffer_desc.size - sizeof(unsigned int);
698 else
699 rb_rptr -= sizeof(unsigned int);
700 }
701
702 if ((rb_rptr / sizeof(unsigned int)) == rb->wptr) {
703 KGSL_DRV_ERR(device,
704 "GPU recovery from hang not possible because last"
705 " successful timestamp is overwritten\n");
706 return -EINVAL;
707 }
708 /* rb_rptr is now pointing to the first dword of the command following
709 * the last sucessfully executed command sequence. Assumption is that
710 * GPU is hung in the command sequence pointed by rb_rptr */
711 /* make sure the GPU is not hung in a command submitted by kgsl
712 * itself */
713 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
714 kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
715 adreno_ringbuffer_inc_wrapped(rb_rptr,
716 rb->buffer_desc.size));
Jordan Crouse084427d2011-07-28 08:37:58 -0600717 if (val1 == cp_nop_packet(1) && val2 == KGSL_CMD_IDENTIFIER) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700718 KGSL_DRV_ERR(device,
719 "GPU recovery from hang not possible because "
720 "of hang in kgsl command\n");
721 return -EINVAL;
722 }
723
724 /* current_context is the context that is presently active in the
725 * GPU, i.e the context in which the hang is caused */
726 kgsl_sharedmem_readl(&device->memstore, &cur_context,
727 KGSL_DEVICE_MEMSTORE_OFFSET(current_context));
728 while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
729 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
730 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
731 rb->buffer_desc.size);
732 /* check for context switch indicator */
733 if (value == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
734 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
735 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
736 rb->buffer_desc.size);
Jordan Crouse084427d2011-07-28 08:37:58 -0600737 BUG_ON(value != cp_type3_packet(CP_MEM_WRITE, 2));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
739 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
740 rb->buffer_desc.size);
741 BUG_ON(val1 != (device->memstore.gpuaddr +
742 KGSL_DEVICE_MEMSTORE_OFFSET(current_context)));
743 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
744 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
745 rb->buffer_desc.size);
746 BUG_ON((copy_rb_contents == 0) &&
747 (value == cur_context));
748 /*
749 * If we were copying the commands and got to this point
750 * then we need to remove the 3 commands that appear
751 * before KGSL_CONTEXT_TO_MEM_IDENTIFIER
752 */
753 if (temp_idx)
754 temp_idx -= 3;
755 /* if context switches to a context that did not cause
756 * hang then start saving the rb contents as those
757 * commands can be executed */
758 if (value != cur_context) {
759 copy_rb_contents = 1;
Jordan Crouse084427d2011-07-28 08:37:58 -0600760 temp_rb_buffer[temp_idx++] = cp_nop_packet(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700761 temp_rb_buffer[temp_idx++] =
762 KGSL_CMD_IDENTIFIER;
Jordan Crouse084427d2011-07-28 08:37:58 -0600763 temp_rb_buffer[temp_idx++] = cp_nop_packet(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700764 temp_rb_buffer[temp_idx++] =
765 KGSL_CONTEXT_TO_MEM_IDENTIFIER;
766 temp_rb_buffer[temp_idx++] =
Jordan Crouse084427d2011-07-28 08:37:58 -0600767 cp_type3_packet(CP_MEM_WRITE, 2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700768 temp_rb_buffer[temp_idx++] = val1;
769 temp_rb_buffer[temp_idx++] = value;
770 } else {
771 copy_rb_contents = 0;
772 }
773 } else if (copy_rb_contents)
774 temp_rb_buffer[temp_idx++] = value;
775 }
776
777 *rb_size = temp_idx;
778 KGSL_DRV_ERR(device, "Extracted rb contents, size: %x\n", *rb_size);
779 for (temp_idx = 0; temp_idx < *rb_size;) {
780 char str[80];
781 int idx = 0;
782 if ((temp_idx + 8) <= *rb_size)
783 j = 8;
784 else
785 j = *rb_size - temp_idx;
786 for (; j != 0; j--)
787 idx += scnprintf(str + idx, 80 - idx,
788 "%8.8X ", temp_rb_buffer[temp_idx++]);
789 printk(KERN_ALERT "%s", str);
790 }
791 return 0;
792}
793
794void
795adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
796 int num_rb_contents)
797{
798 int i;
799 unsigned int *ringcmds;
800 unsigned int rcmd_gpu;
801
802 if (!num_rb_contents)
803 return;
804
805 if (num_rb_contents > (rb->buffer_desc.size - rb->wptr)) {
806 adreno_regwrite(rb->device, REG_CP_RB_RPTR, 0);
807 rb->rptr = 0;
808 BUG_ON(num_rb_contents > rb->buffer_desc.size);
809 }
810 ringcmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
811 rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(unsigned int) * rb->wptr;
812 for (i = 0; i < num_rb_contents; i++)
813 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb_buff[i]);
814 rb->wptr += num_rb_contents;
815 adreno_ringbuffer_submit(rb);
816}