blob: 8bc933a184dc8d46113b6a1f13236ba5f78671d9 [file] [log] [blame]
Vijay Krishnamoorthybef66932012-01-24 09:32:05 -07001/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/firmware.h>
14#include <linux/slab.h>
15#include <linux/sched.h>
16#include <linux/log2.h>
17
18#include "kgsl.h"
19#include "kgsl_sharedmem.h"
20#include "kgsl_cffdump.h"
21
22#include "adreno.h"
23#include "adreno_pm4types.h"
24#include "adreno_ringbuffer.h"
Jeremy Gebbend0ab6ad2012-04-06 11:13:35 -060025#include "adreno_debugfs.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070026
Jeremy Gebbeneebc4612011-08-31 10:15:21 -070027#include "a2xx_reg.h"
Jordan Crouseb4d31bd2012-02-01 22:11:12 -070028#include "a3xx_reg.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030#define GSL_RB_NOP_SIZEDWORDS 2
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031
Jordan Crouseb4d31bd2012-02-01 22:11:12 -070032void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033{
34 BUG_ON(rb->wptr == 0);
35
Lucille Sylvester958dc942011-09-06 18:19:49 -060036 /* Let the pwrscale policy know that new commands have
37 been submitted. */
38 kgsl_pwrscale_busy(rb->device);
39
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040 /*synchronize memory before informing the hardware of the
41 *new commands.
42 */
43 mb();
44
45 adreno_regwrite(rb->device, REG_CP_RB_WPTR, rb->wptr);
46}
47
Carter Cooper6dd94c82011-10-13 14:43:53 -060048static void
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds,
50 int wptr_ahead)
51{
52 int nopcount;
53 unsigned int freecmds;
54 unsigned int *cmds;
55 uint cmds_gpu;
56
57 /* if wptr ahead, fill the remaining with NOPs */
58 if (wptr_ahead) {
59 /* -1 for header */
60 nopcount = rb->sizedwords - rb->wptr - 1;
61
62 cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
63 cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*rb->wptr;
64
Jordan Crouse084427d2011-07-28 08:37:58 -060065 GSL_RB_WRITE(cmds, cmds_gpu, cp_nop_packet(nopcount));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070066
67 /* Make sure that rptr is not 0 before submitting
68 * commands at the end of ringbuffer. We do not
69 * want the rptr and wptr to become equal when
70 * the ringbuffer is not empty */
71 do {
72 GSL_RB_GET_READPTR(rb, &rb->rptr);
73 } while (!rb->rptr);
74
75 rb->wptr++;
76
77 adreno_ringbuffer_submit(rb);
78
79 rb->wptr = 0;
80 }
81
82 /* wait for space in ringbuffer */
83 do {
84 GSL_RB_GET_READPTR(rb, &rb->rptr);
85
86 freecmds = rb->rptr - rb->wptr;
87
88 } while ((freecmds != 0) && (freecmds <= numcmds));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070089}
90
Jordan Crouseb4d31bd2012-02-01 22:11:12 -070091unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070092 unsigned int numcmds)
93{
94 unsigned int *ptr = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095
96 BUG_ON(numcmds >= rb->sizedwords);
97
98 GSL_RB_GET_READPTR(rb, &rb->rptr);
99 /* check for available space */
100 if (rb->wptr >= rb->rptr) {
101 /* wptr ahead or equal to rptr */
102 /* reserve dwords for nop packet */
103 if ((rb->wptr + numcmds) > (rb->sizedwords -
104 GSL_RB_NOP_SIZEDWORDS))
Carter Cooper6dd94c82011-10-13 14:43:53 -0600105 adreno_ringbuffer_waitspace(rb, numcmds, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700106 } else {
107 /* wptr behind rptr */
108 if ((rb->wptr + numcmds) >= rb->rptr)
Carter Cooper6dd94c82011-10-13 14:43:53 -0600109 adreno_ringbuffer_waitspace(rb, numcmds, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700110 /* check for remaining space */
111 /* reserve dwords for nop packet */
112 if ((rb->wptr + numcmds) > (rb->sizedwords -
113 GSL_RB_NOP_SIZEDWORDS))
Carter Cooper6dd94c82011-10-13 14:43:53 -0600114 adreno_ringbuffer_waitspace(rb, numcmds, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700115 }
116
Carter Cooper6dd94c82011-10-13 14:43:53 -0600117 ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
118 rb->wptr += numcmds;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119
120 return ptr;
121}
122
123static int _load_firmware(struct kgsl_device *device, const char *fwfile,
124 void **data, int *len)
125{
126 const struct firmware *fw = NULL;
127 int ret;
128
129 ret = request_firmware(&fw, fwfile, device->dev);
130
131 if (ret) {
132 KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n",
133 fwfile, ret);
134 return ret;
135 }
136
137 *data = kmalloc(fw->size, GFP_KERNEL);
138
139 if (*data) {
140 memcpy(*data, fw->data, fw->size);
141 *len = fw->size;
142 } else
143 KGSL_MEM_ERR(device, "kmalloc(%d) failed\n", fw->size);
144
145 release_firmware(fw);
146 return (*data != NULL) ? 0 : -ENOMEM;
147}
148
149static int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device)
150{
151 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700152 int i, ret = 0;
153
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700154 if (adreno_dev->pm4_fw == NULL) {
155 int len;
Jordan Crouse505df9c2011-07-28 08:37:59 -0600156 void *ptr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157
Jordan Crouse505df9c2011-07-28 08:37:59 -0600158 ret = _load_firmware(device, adreno_dev->pm4_fwfile,
159 &ptr, &len);
160
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700161 if (ret)
162 goto err;
163
164 /* PM4 size is 3 dword aligned plus 1 dword of version */
165 if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) {
166 KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
167 ret = -EINVAL;
Jeremy Gebben79acee62011-08-08 16:44:07 -0600168 kfree(ptr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700169 goto err;
170 }
171
172 adreno_dev->pm4_fw_size = len / sizeof(uint32_t);
173 adreno_dev->pm4_fw = ptr;
174 }
175
176 KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n",
177 adreno_dev->pm4_fw[0]);
178
179 adreno_regwrite(device, REG_CP_DEBUG, 0x02000000);
180 adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0);
181 for (i = 1; i < adreno_dev->pm4_fw_size; i++)
182 adreno_regwrite(device, REG_CP_ME_RAM_DATA,
183 adreno_dev->pm4_fw[i]);
184err:
185 return ret;
186}
187
188static int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device)
189{
190 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 int i, ret = 0;
192
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700193 if (adreno_dev->pfp_fw == NULL) {
194 int len;
Jordan Crouse505df9c2011-07-28 08:37:59 -0600195 void *ptr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196
Jordan Crouse505df9c2011-07-28 08:37:59 -0600197 ret = _load_firmware(device, adreno_dev->pfp_fwfile,
198 &ptr, &len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199 if (ret)
200 goto err;
201
202 /* PFP size shold be dword aligned */
203 if (len % sizeof(uint32_t) != 0) {
204 KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
205 ret = -EINVAL;
Jeremy Gebben79acee62011-08-08 16:44:07 -0600206 kfree(ptr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207 goto err;
208 }
209
210 adreno_dev->pfp_fw_size = len / sizeof(uint32_t);
211 adreno_dev->pfp_fw = ptr;
212 }
213
214 KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n",
215 adreno_dev->pfp_fw[0]);
216
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700217 adreno_regwrite(device, adreno_dev->gpudev->reg_cp_pfp_ucode_addr, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218 for (i = 1; i < adreno_dev->pfp_fw_size; i++)
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700219 adreno_regwrite(device,
220 adreno_dev->gpudev->reg_cp_pfp_ucode_data,
221 adreno_dev->pfp_fw[i]);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700222err:
223 return ret;
224}
225
226int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
227{
228 int status;
229 /*cp_rb_cntl_u cp_rb_cntl; */
230 union reg_cp_rb_cntl cp_rb_cntl;
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700231 unsigned int rb_cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232 struct kgsl_device *device = rb->device;
Jeremy Gebbenddf6b572011-09-09 13:39:49 -0700233 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700234
235 if (rb->flags & KGSL_FLAGS_STARTED)
236 return 0;
237
Carter Coopercb3e8eb2012-04-11 09:39:40 -0600238 if (init_ram)
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700239 rb->timestamp[KGSL_MEMSTORE_GLOBAL] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700240
241 kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
242 sizeof(struct kgsl_rbmemptrs));
243
244 kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
245 (rb->sizedwords << 2));
246
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700247 if (adreno_is_a2xx(adreno_dev)) {
248 adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
249 (rb->memptrs_desc.gpuaddr
250 + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700251
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700252 /* setup WPTR delay */
253 adreno_regwrite(device, REG_CP_RB_WPTR_DELAY,
254 0 /*0x70000010 */);
255 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700256
257 /*setup REG_CP_RB_CNTL */
258 adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
259 cp_rb_cntl.val = rb_cntl;
260
261 /*
262 * The size of the ringbuffer in the hardware is the log2
263 * representation of the size in quadwords (sizedwords / 2)
264 */
265 cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);
266
267 /*
268 * Specify the quadwords to read before updating mem RPTR.
269 * Like above, pass the log2 representation of the blocksize
270 * in quadwords.
271 */
272 cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);
273
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700274 if (adreno_is_a2xx(adreno_dev)) {
275 /* WPTR polling */
276 cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN;
277 }
278
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700279 /* mem RPTR writebacks */
280 cp_rb_cntl.f.rb_no_update = GSL_RB_CNTL_NO_UPDATE;
281
282 adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);
283
284 adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);
285
286 adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
287 rb->memptrs_desc.gpuaddr +
288 GSL_RB_MEMPTRS_RPTR_OFFSET);
289
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700290 if (adreno_is_a3xx(adreno_dev)) {
291 /* enable access protection to privileged registers */
292 adreno_regwrite(device, A3XX_CP_PROTECT_CTRL, 0x00000007);
293
294 /* RBBM registers */
295 adreno_regwrite(device, A3XX_CP_PROTECT_REG_0, 0x63000040);
296 adreno_regwrite(device, A3XX_CP_PROTECT_REG_1, 0x62000080);
297 adreno_regwrite(device, A3XX_CP_PROTECT_REG_2, 0x600000CC);
298 adreno_regwrite(device, A3XX_CP_PROTECT_REG_3, 0x60000108);
299 adreno_regwrite(device, A3XX_CP_PROTECT_REG_4, 0x64000140);
300 adreno_regwrite(device, A3XX_CP_PROTECT_REG_5, 0x66000400);
301
302 /* CP registers */
303 adreno_regwrite(device, A3XX_CP_PROTECT_REG_6, 0x65000700);
304 adreno_regwrite(device, A3XX_CP_PROTECT_REG_7, 0x610007D8);
305 adreno_regwrite(device, A3XX_CP_PROTECT_REG_8, 0x620007E0);
306 adreno_regwrite(device, A3XX_CP_PROTECT_REG_9, 0x61001178);
307 adreno_regwrite(device, A3XX_CP_PROTECT_REG_A, 0x64001180);
308
309 /* RB registers */
310 adreno_regwrite(device, A3XX_CP_PROTECT_REG_B, 0x60003300);
311
312 /* VBIF registers */
313 adreno_regwrite(device, A3XX_CP_PROTECT_REG_C, 0x6B00C000);
314 }
315
316 if (adreno_is_a2xx(adreno_dev)) {
317 /* explicitly clear all cp interrupts */
318 adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
319 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700320
321 /* setup scratch/timestamp */
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700322 adreno_regwrite(device, REG_SCRATCH_ADDR, device->memstore.gpuaddr +
323 KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
324 soptimestamp));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700325
326 adreno_regwrite(device, REG_SCRATCH_UMSK,
327 GSL_RB_MEMPTRS_SCRATCH_MASK);
328
329 /* load the CP ucode */
330
331 status = adreno_ringbuffer_load_pm4_ucode(device);
332 if (status != 0)
333 return status;
334
335 /* load the prefetch parser ucode */
336 status = adreno_ringbuffer_load_pfp_ucode(device);
337 if (status != 0)
338 return status;
339
Kevin Matlageff806df2012-05-07 18:13:21 -0600340 /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
Kevin Matlagee8d35862012-04-26 12:58:15 -0600341 if (adreno_is_a305(adreno_dev) || adreno_is_a320(adreno_dev))
Kevin Matlageff806df2012-05-07 18:13:21 -0600342 adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000E0602);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700343
344 rb->rptr = 0;
345 rb->wptr = 0;
346
347 /* clear ME_HALT to start micro engine */
348 adreno_regwrite(device, REG_CP_ME_CNTL, 0);
349
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700350 /* ME init is GPU specific, so jump into the sub-function */
351 adreno_dev->gpudev->rb_init(adreno_dev, rb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700352
353 /* idle device to validate ME INIT */
354 status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
355
356 if (status == 0)
357 rb->flags |= KGSL_FLAGS_STARTED;
358
359 return status;
360}
361
Carter Cooper6dd94c82011-10-13 14:43:53 -0600362void adreno_ringbuffer_stop(struct adreno_ringbuffer *rb)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700363{
364 if (rb->flags & KGSL_FLAGS_STARTED) {
365 /* ME_HALT */
366 adreno_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367 rb->flags &= ~KGSL_FLAGS_STARTED;
368 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700369}
370
371int adreno_ringbuffer_init(struct kgsl_device *device)
372{
373 int status;
374 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
375 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
376
377 rb->device = device;
378 /*
379 * It is silly to convert this to words and then back to bytes
380 * immediately below, but most of the rest of the code deals
381 * in words, so we might as well only do the math once
382 */
383 rb->sizedwords = KGSL_RB_SIZE >> 2;
384
385 /* allocate memory for ringbuffer */
386 status = kgsl_allocate_contiguous(&rb->buffer_desc,
387 (rb->sizedwords << 2));
388
389 if (status != 0) {
390 adreno_ringbuffer_close(rb);
391 return status;
392 }
393
394 /* allocate memory for polling and timestamps */
395 /* This really can be at 4 byte alignment boundry but for using MMU
396 * we need to make it at page boundary */
397 status = kgsl_allocate_contiguous(&rb->memptrs_desc,
398 sizeof(struct kgsl_rbmemptrs));
399
400 if (status != 0) {
401 adreno_ringbuffer_close(rb);
402 return status;
403 }
404
405 /* overlay structure on memptrs memory */
406 rb->memptrs = (struct kgsl_rbmemptrs *) rb->memptrs_desc.hostptr;
407
408 return 0;
409}
410
Carter Cooper6dd94c82011-10-13 14:43:53 -0600411void adreno_ringbuffer_close(struct adreno_ringbuffer *rb)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700412{
413 struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
414
415 kgsl_sharedmem_free(&rb->buffer_desc);
416 kgsl_sharedmem_free(&rb->memptrs_desc);
417
418 kfree(adreno_dev->pfp_fw);
419 kfree(adreno_dev->pm4_fw);
420
421 adreno_dev->pfp_fw = NULL;
422 adreno_dev->pm4_fw = NULL;
423
424 memset(rb, 0, sizeof(struct adreno_ringbuffer));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425}
426
427static uint32_t
428adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700429 struct adreno_context *context,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430 unsigned int flags, unsigned int *cmds,
431 int sizedwords)
432{
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700433 struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700434 unsigned int *ringcmds;
435 unsigned int timestamp;
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700436 unsigned int total_sizedwords = sizedwords;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700437 unsigned int i;
438 unsigned int rcmd_gpu;
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700439 unsigned int context_id = KGSL_MEMSTORE_GLOBAL;
440 unsigned int gpuaddr = rb->device->memstore.gpuaddr;
441
442 if (context != NULL) {
443 /*
444 * if the context was not created with per context timestamp
445 * support, we must use the global timestamp since issueibcmds
446 * will be returning that one.
447 */
448 if (context->flags & CTXT_FLAGS_PER_CONTEXT_TS)
449 context_id = context->id;
450 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700451
452 /* reserve space to temporarily turn off protected mode
453 * error checking if needed
454 */
455 total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
Carter Cooper60946702012-05-24 13:59:53 -0600456 total_sizedwords += context ? 7 : 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457 total_sizedwords += !(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD) ? 2 : 0;
458
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700459 if (adreno_is_a3xx(adreno_dev))
460 total_sizedwords += 7;
461
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700462 total_sizedwords += 2; /* scratchpad ts for recovery */
463 if (context) {
464 total_sizedwords += 3; /* sop timestamp */
465 total_sizedwords += 4; /* eop timestamp */
Rajesh Kemisettic5699302012-04-21 21:09:05 +0530466 total_sizedwords += 3; /* global timestamp without cache
467 * flush for non-zero context */
468 } else {
469 total_sizedwords += 4; /* global timestamp for recovery*/
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700470 }
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700471
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700472 ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
473 rcmd_gpu = rb->buffer_desc.gpuaddr
474 + sizeof(uint)*(rb->wptr-total_sizedwords);
475
476 if (!(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD)) {
Jordan Crouse084427d2011-07-28 08:37:58 -0600477 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700478 GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
479 }
480 if (flags & KGSL_CMD_FLAGS_PMODE) {
481 /* disable protected mode error checking */
482 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600483 cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
485 }
486
487 for (i = 0; i < sizedwords; i++) {
488 GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
489 cmds++;
490 }
491
492 if (flags & KGSL_CMD_FLAGS_PMODE) {
493 /* re-enable protected mode error checking */
494 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600495 cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700496 GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
497 }
498
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700499 /* always increment the global timestamp. once. */
500 rb->timestamp[KGSL_MEMSTORE_GLOBAL]++;
Carter Cooper60946702012-05-24 13:59:53 -0600501
502 if (context && !(flags & KGSL_CMD_FLAGS_DUMMY_INTR_CMD)) {
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700503 if (context_id == KGSL_MEMSTORE_GLOBAL)
Carter Cooper60946702012-05-24 13:59:53 -0600504 rb->timestamp[context->id] =
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700505 rb->timestamp[KGSL_MEMSTORE_GLOBAL];
506 else
507 rb->timestamp[context_id]++;
508 }
509 timestamp = rb->timestamp[context_id];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700511 /* scratchpad ts for recovery */
Jordan Crouse084427d2011-07-28 08:37:58 -0600512 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type0_packet(REG_CP_TIMESTAMP, 1));
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700513 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700514
515 if (adreno_is_a3xx(adreno_dev)) {
516 /*
517 * FLush HLSQ lazy updates to make sure there are no
518 * rsources pending for indirect loads after the timestamp
519 */
520
521 GSL_RB_WRITE(ringcmds, rcmd_gpu,
522 cp_type3_packet(CP_EVENT_WRITE, 1));
523 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x07); /* HLSQ_FLUSH */
524 GSL_RB_WRITE(ringcmds, rcmd_gpu,
525 cp_type3_packet(CP_WAIT_FOR_IDLE, 1));
526 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x00);
527 }
528
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700529 if (context) {
530 /* start-of-pipeline timestamp */
531 GSL_RB_WRITE(ringcmds, rcmd_gpu,
532 cp_type3_packet(CP_MEM_WRITE, 2));
533 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
Carter Cooper60946702012-05-24 13:59:53 -0600534 KGSL_MEMSTORE_OFFSET(context_id, soptimestamp)));
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700535 GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
536
537 /* end-of-pipeline timestamp */
538 GSL_RB_WRITE(ringcmds, rcmd_gpu,
539 cp_type3_packet(CP_EVENT_WRITE, 3));
540 GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
541 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
Carter Cooper60946702012-05-24 13:59:53 -0600542 KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp)));
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700543 GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700544
Rajesh Kemisettic5699302012-04-21 21:09:05 +0530545 GSL_RB_WRITE(ringcmds, rcmd_gpu,
546 cp_type3_packet(CP_MEM_WRITE, 2));
547 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
Carter Cooper60946702012-05-24 13:59:53 -0600548 KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
549 eoptimestamp)));
Rajesh Kemisettic5699302012-04-21 21:09:05 +0530550 GSL_RB_WRITE(ringcmds, rcmd_gpu,
551 rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
552 } else {
553 GSL_RB_WRITE(ringcmds, rcmd_gpu,
554 cp_type3_packet(CP_EVENT_WRITE, 3));
555 GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
556 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
Carter Cooper60946702012-05-24 13:59:53 -0600557 KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp)));
558 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp[context_id]);
Rajesh Kemisettic5699302012-04-21 21:09:05 +0530559 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700560
Carter Cooper60946702012-05-24 13:59:53 -0600561 if (context) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700562 /* Conditional execution based on memory values */
563 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600564 cp_type3_packet(CP_COND_EXEC, 4));
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700565 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
566 KGSL_MEMSTORE_OFFSET(
567 context_id, ts_cmp_enable)) >> 2);
568 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
569 KGSL_MEMSTORE_OFFSET(
570 context_id, ref_wait_ts)) >> 2);
571 GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572 /* # of conditional command DWORDs */
573 GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
574 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600575 cp_type3_packet(CP_INTERRUPT, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700576 GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
577 }
578
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700579 if (adreno_is_a3xx(adreno_dev)) {
580 /* Dummy set-constant to trigger context rollover */
581 GSL_RB_WRITE(ringcmds, rcmd_gpu,
582 cp_type3_packet(CP_SET_CONSTANT, 2));
583 GSL_RB_WRITE(ringcmds, rcmd_gpu,
584 (0x4<<16)|(A3XX_HLSQ_CL_KERNEL_GROUP_X_REG - 0x2000));
585 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
586 }
587
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700588 adreno_ringbuffer_submit(rb);
589
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700590 return timestamp;
591}
592
593void
Carter Cooper60946702012-05-24 13:59:53 -0600594adreno_ringbuffer_issuecmds_intr(struct kgsl_device *device,
595 struct kgsl_context *k_ctxt,
596 unsigned int *cmds,
597 int sizedwords)
598{
599 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
600 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
601 struct adreno_context *a_ctxt = NULL;
602
603 if (!k_ctxt)
604 return;
605
606 a_ctxt = k_ctxt->devctxt;
607
608 if (k_ctxt->id == KGSL_CONTEXT_INVALID ||
609 a_ctxt == NULL ||
610 device->state & KGSL_STATE_HUNG)
611 return;
612
613 adreno_ringbuffer_addcmds(rb, a_ctxt, KGSL_CMD_FLAGS_DUMMY_INTR_CMD,
614 cmds, sizedwords);
615}
616
617void
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700618adreno_ringbuffer_issuecmds(struct kgsl_device *device,
619 unsigned int flags,
620 unsigned int *cmds,
621 int sizedwords)
622{
623 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
624 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
625
626 if (device->state & KGSL_STATE_HUNG)
627 return;
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700628 adreno_ringbuffer_addcmds(rb, NULL, flags, cmds, sizedwords);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629}
630
Jeremy Gebbend0ab6ad2012-04-06 11:13:35 -0600631static bool _parse_ibs(struct kgsl_device_private *dev_priv, uint gpuaddr,
632 int sizedwords);
633
634static bool
635_handle_type3(struct kgsl_device_private *dev_priv, uint *hostaddr)
636{
637 unsigned int opcode = cp_type3_opcode(*hostaddr);
638 switch (opcode) {
639 case CP_INDIRECT_BUFFER_PFD:
640 case CP_INDIRECT_BUFFER_PFE:
641 case CP_COND_INDIRECT_BUFFER_PFE:
642 case CP_COND_INDIRECT_BUFFER_PFD:
643 return _parse_ibs(dev_priv, hostaddr[1], hostaddr[2]);
644 case CP_NOP:
645 case CP_WAIT_FOR_IDLE:
646 case CP_WAIT_REG_MEM:
647 case CP_WAIT_REG_EQ:
648 case CP_WAT_REG_GTE:
649 case CP_WAIT_UNTIL_READ:
650 case CP_WAIT_IB_PFD_COMPLETE:
651 case CP_REG_RMW:
652 case CP_REG_TO_MEM:
653 case CP_MEM_WRITE:
654 case CP_MEM_WRITE_CNTR:
655 case CP_COND_EXEC:
656 case CP_COND_WRITE:
657 case CP_EVENT_WRITE:
658 case CP_EVENT_WRITE_SHD:
659 case CP_EVENT_WRITE_CFL:
660 case CP_EVENT_WRITE_ZPD:
661 case CP_DRAW_INDX:
662 case CP_DRAW_INDX_2:
663 case CP_DRAW_INDX_BIN:
664 case CP_DRAW_INDX_2_BIN:
665 case CP_VIZ_QUERY:
666 case CP_SET_STATE:
667 case CP_SET_CONSTANT:
668 case CP_IM_LOAD:
669 case CP_IM_LOAD_IMMEDIATE:
670 case CP_LOAD_CONSTANT_CONTEXT:
671 case CP_INVALIDATE_STATE:
672 case CP_SET_SHADER_BASES:
673 case CP_SET_BIN_MASK:
674 case CP_SET_BIN_SELECT:
675 case CP_SET_BIN_BASE_OFFSET:
676 case CP_SET_BIN_DATA:
677 case CP_CONTEXT_UPDATE:
678 case CP_INTERRUPT:
679 case CP_IM_STORE:
680 case CP_LOAD_STATE:
681 break;
682 /* these shouldn't come from userspace */
683 case CP_ME_INIT:
684 case CP_SET_PROTECTED_MODE:
685 default:
686 KGSL_CMD_ERR(dev_priv->device, "bad CP opcode %0x\n", opcode);
687 return false;
688 break;
689 }
690
691 return true;
692}
693
694static bool
695_handle_type0(struct kgsl_device_private *dev_priv, uint *hostaddr)
696{
697 unsigned int reg = type0_pkt_offset(*hostaddr);
698 unsigned int cnt = type0_pkt_size(*hostaddr);
699 if (reg < 0x0192 || (reg + cnt) >= 0x8000) {
700 KGSL_CMD_ERR(dev_priv->device, "bad type0 reg: 0x%0x cnt: %d\n",
701 reg, cnt);
702 return false;
703 }
704 return true;
705}
706
707/*
708 * Traverse IBs and dump them to test vector. Detect swap by inspecting
709 * register writes, keeping note of the current state, and dump
710 * framebuffer config to test vector
711 */
712static bool _parse_ibs(struct kgsl_device_private *dev_priv,
713 uint gpuaddr, int sizedwords)
714{
715 static uint level; /* recursion level */
716 bool ret = false;
717 uint *hostaddr, *hoststart;
718 int dwords_left = sizedwords; /* dwords left in the current command
719 buffer */
720 struct kgsl_mem_entry *entry;
721
722 spin_lock(&dev_priv->process_priv->mem_lock);
723 entry = kgsl_sharedmem_find_region(dev_priv->process_priv,
724 gpuaddr, sizedwords * sizeof(uint));
725 spin_unlock(&dev_priv->process_priv->mem_lock);
726 if (entry == NULL) {
727 KGSL_CMD_ERR(dev_priv->device,
728 "no mapping for gpuaddr: 0x%08x\n", gpuaddr);
729 return false;
730 }
731
732 hostaddr = (uint *)kgsl_gpuaddr_to_vaddr(&entry->memdesc, gpuaddr);
733 if (hostaddr == NULL) {
734 KGSL_CMD_ERR(dev_priv->device,
735 "no mapping for gpuaddr: 0x%08x\n", gpuaddr);
736 return false;
737 }
738
739 hoststart = hostaddr;
740
741 level++;
742
743 KGSL_CMD_INFO(dev_priv->device, "ib: gpuaddr:0x%08x, wc:%d, hptr:%p\n",
744 gpuaddr, sizedwords, hostaddr);
745
746 mb();
747 while (dwords_left > 0) {
748 bool cur_ret = true;
749 int count = 0; /* dword count including packet header */
750
751 switch (*hostaddr >> 30) {
752 case 0x0: /* type-0 */
753 count = (*hostaddr >> 16)+2;
754 cur_ret = _handle_type0(dev_priv, hostaddr);
755 break;
756 case 0x1: /* type-1 */
757 count = 2;
758 break;
759 case 0x3: /* type-3 */
760 count = ((*hostaddr >> 16) & 0x3fff) + 2;
761 cur_ret = _handle_type3(dev_priv, hostaddr);
762 break;
763 default:
764 KGSL_CMD_ERR(dev_priv->device, "unexpected type: "
765 "type:%d, word:0x%08x @ 0x%p, gpu:0x%08x\n",
766 *hostaddr >> 30, *hostaddr, hostaddr,
767 gpuaddr+4*(sizedwords-dwords_left));
768 cur_ret = false;
769 count = dwords_left;
770 break;
771 }
772
773 if (!cur_ret) {
774 KGSL_CMD_ERR(dev_priv->device,
775 "bad sub-type: #:%d/%d, v:0x%08x"
776 " @ 0x%p[gb:0x%08x], level:%d\n",
777 sizedwords-dwords_left, sizedwords, *hostaddr,
778 hostaddr, gpuaddr+4*(sizedwords-dwords_left),
779 level);
780
781 if (ADRENO_DEVICE(dev_priv->device)->ib_check_level
782 >= 2)
783 print_hex_dump(KERN_ERR,
784 level == 1 ? "IB1:" : "IB2:",
785 DUMP_PREFIX_OFFSET, 32, 4, hoststart,
786 sizedwords*4, 0);
787 goto done;
788 }
789
790 /* jump to next packet */
791 dwords_left -= count;
792 hostaddr += count;
793 if (dwords_left < 0) {
794 KGSL_CMD_ERR(dev_priv->device,
795 "bad count: c:%d, #:%d/%d, "
796 "v:0x%08x @ 0x%p[gb:0x%08x], level:%d\n",
797 count, sizedwords-(dwords_left+count),
798 sizedwords, *(hostaddr-count), hostaddr-count,
799 gpuaddr+4*(sizedwords-(dwords_left+count)),
800 level);
801 if (ADRENO_DEVICE(dev_priv->device)->ib_check_level
802 >= 2)
803 print_hex_dump(KERN_ERR,
804 level == 1 ? "IB1:" : "IB2:",
805 DUMP_PREFIX_OFFSET, 32, 4, hoststart,
806 sizedwords*4, 0);
807 goto done;
808 }
809 }
810
811 ret = true;
812done:
813 if (!ret)
814 KGSL_DRV_ERR(dev_priv->device,
815 "parsing failed: gpuaddr:0x%08x, "
816 "host:0x%p, wc:%d\n", gpuaddr, hoststart, sizedwords);
817
818 level--;
819
820 return ret;
821}
822
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700823int
824adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
825 struct kgsl_context *context,
826 struct kgsl_ibdesc *ibdesc,
827 unsigned int numibs,
828 uint32_t *timestamp,
829 unsigned int flags)
830{
831 struct kgsl_device *device = dev_priv->device;
832 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
833 unsigned int *link;
834 unsigned int *cmds;
835 unsigned int i;
Jeremy Gebben3c127f52011-08-08 17:04:11 -0600836 struct adreno_context *drawctxt;
Vijay Krishnamoorthybef66932012-01-24 09:32:05 -0700837 unsigned int start_index = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700838
839 if (device->state & KGSL_STATE_HUNG)
840 return -EBUSY;
841 if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED) ||
Jeremy Gebben3c127f52011-08-08 17:04:11 -0600842 context == NULL || ibdesc == 0 || numibs == 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700843 return -EINVAL;
844
Jeremy Gebben3c127f52011-08-08 17:04:11 -0600845 drawctxt = context->devctxt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700846
847 if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
848 KGSL_CTXT_WARN(device, "Context %p caused a gpu hang.."
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700849 " will not accept commands for context %d\n",
850 drawctxt, drawctxt->id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700851 return -EDEADLK;
852 }
Shubhraprakash Dasd23ff4b2012-04-05 16:55:54 -0600853
854 cmds = link = kzalloc(sizeof(unsigned int) * (numibs * 3 + 4),
855 GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700856 if (!link) {
Shubhraprakash Dasd23ff4b2012-04-05 16:55:54 -0600857 KGSL_CORE_ERR("kzalloc(%d) failed\n",
858 sizeof(unsigned int) * (numibs * 3 + 4));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700859 return -ENOMEM;
860 }
Vijay Krishnamoorthybef66932012-01-24 09:32:05 -0700861
862 /*When preamble is enabled, the preamble buffer with state restoration
863 commands are stored in the first node of the IB chain. We can skip that
864 if a context switch hasn't occured */
865
866 if (drawctxt->flags & CTXT_FLAGS_PREAMBLE &&
867 adreno_dev->drawctxt_active == drawctxt)
868 start_index = 1;
869
Shubhraprakash Dasd23ff4b2012-04-05 16:55:54 -0600870 if (!start_index) {
871 *cmds++ = cp_nop_packet(1);
872 *cmds++ = KGSL_START_OF_IB_IDENTIFIER;
873 } else {
874 *cmds++ = cp_nop_packet(4);
875 *cmds++ = KGSL_START_OF_IB_IDENTIFIER;
876 *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
877 *cmds++ = ibdesc[0].gpuaddr;
878 *cmds++ = ibdesc[0].sizedwords;
879 }
Vijay Krishnamoorthybef66932012-01-24 09:32:05 -0700880 for (i = start_index; i < numibs; i++) {
Jeremy Gebbend0ab6ad2012-04-06 11:13:35 -0600881 if (unlikely(adreno_dev->ib_check_level >= 1 &&
882 !_parse_ibs(dev_priv, ibdesc[i].gpuaddr,
883 ibdesc[i].sizedwords))) {
884 kfree(link);
885 return -EINVAL;
886 }
Jordan Crouse084427d2011-07-28 08:37:58 -0600887 *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700888 *cmds++ = ibdesc[i].gpuaddr;
889 *cmds++ = ibdesc[i].sizedwords;
890 }
891
Shubhraprakash Dasd23ff4b2012-04-05 16:55:54 -0600892 *cmds++ = cp_nop_packet(1);
893 *cmds++ = KGSL_END_OF_IB_IDENTIFIER;
894
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600895 kgsl_setstate(&device->mmu,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600896 kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700897 device->id));
898
899 adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
900
901 *timestamp = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700902 drawctxt,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700903 KGSL_CMD_FLAGS_NOT_KERNEL_CMD,
904 &link[0], (cmds - link));
905
906 KGSL_CMD_INFO(device, "ctxt %d g %08x numibs %d ts %d\n",
907 context->id, (unsigned int)ibdesc, numibs, *timestamp);
908
909 kfree(link);
910
911#ifdef CONFIG_MSM_KGSL_CFF_DUMP
912 /*
913 * insert wait for idle after every IB1
914 * this is conservative but works reliably and is ok
915 * even for performance simulations
916 */
917 adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
918#endif
919
920 return 0;
921}
922
923int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
924 unsigned int *temp_rb_buffer,
925 int *rb_size)
926{
927 struct kgsl_device *device = rb->device;
928 unsigned int rb_rptr;
929 unsigned int retired_timestamp;
930 unsigned int temp_idx = 0;
931 unsigned int value;
932 unsigned int val1;
933 unsigned int val2;
934 unsigned int val3;
935 unsigned int copy_rb_contents = 0;
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700936 struct kgsl_context *context;
937 unsigned int context_id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700938
939 GSL_RB_GET_READPTR(rb, &rb->rptr);
940
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700941 /* current_context is the context that is presently active in the
942 * GPU, i.e the context in which the hang is caused */
943 kgsl_sharedmem_readl(&device->memstore, &context_id,
944 KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
945 current_context));
946 KGSL_DRV_ERR(device, "Last context id: %d\n", context_id);
947 context = idr_find(&device->context_idr, context_id);
948 if (context == NULL) {
949 KGSL_DRV_ERR(device,
950 "GPU recovery from hang not possible because last"
951 " context id is invalid.\n");
952 return -EINVAL;
953 }
Jeremy Gebben731dac52012-05-10 11:13:42 -0600954 retired_timestamp = kgsl_readtimestamp(device, context,
955 KGSL_TIMESTAMP_RETIRED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700956 KGSL_DRV_ERR(device, "GPU successfully executed till ts: %x\n",
957 retired_timestamp);
958 /*
959 * We need to go back in history by 4 dwords from the current location
960 * of read pointer as 4 dwords are read to match the end of a command.
961 * Also, take care of wrap around when moving back
962 */
963 if (rb->rptr >= 4)
964 rb_rptr = (rb->rptr - 4) * sizeof(unsigned int);
965 else
966 rb_rptr = rb->buffer_desc.size -
967 ((4 - rb->rptr) * sizeof(unsigned int));
968 /* Read the rb contents going backwards to locate end of last
969 * sucessfully executed command */
970 while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
971 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
972 if (value == retired_timestamp) {
973 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
974 rb->buffer_desc.size);
975 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
976 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
977 rb->buffer_desc.size);
978 kgsl_sharedmem_readl(&rb->buffer_desc, &val2, rb_rptr);
979 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
980 rb->buffer_desc.size);
981 kgsl_sharedmem_readl(&rb->buffer_desc, &val3, rb_rptr);
982 /* match the pattern found at the end of a command */
983 if ((val1 == 2 &&
Jordan Crouse084427d2011-07-28 08:37:58 -0600984 val2 == cp_type3_packet(CP_INTERRUPT, 1)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700985 && val3 == CP_INT_CNTL__RB_INT_MASK) ||
Jordan Crouse084427d2011-07-28 08:37:58 -0600986 (val1 == cp_type3_packet(CP_EVENT_WRITE, 3)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700987 && val2 == CACHE_FLUSH_TS &&
988 val3 == (rb->device->memstore.gpuaddr +
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700989 KGSL_MEMSTORE_OFFSET(context_id,
990 eoptimestamp)))) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700991 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
992 rb->buffer_desc.size);
993 KGSL_DRV_ERR(device,
994 "Found end of last executed "
995 "command at offset: %x\n",
996 rb_rptr / sizeof(unsigned int));
997 break;
998 } else {
999 if (rb_rptr < (3 * sizeof(unsigned int)))
1000 rb_rptr = rb->buffer_desc.size -
1001 (3 * sizeof(unsigned int))
1002 + rb_rptr;
1003 else
1004 rb_rptr -= (3 * sizeof(unsigned int));
1005 }
1006 }
1007
1008 if (rb_rptr == 0)
1009 rb_rptr = rb->buffer_desc.size - sizeof(unsigned int);
1010 else
1011 rb_rptr -= sizeof(unsigned int);
1012 }
1013
1014 if ((rb_rptr / sizeof(unsigned int)) == rb->wptr) {
1015 KGSL_DRV_ERR(device,
1016 "GPU recovery from hang not possible because last"
1017 " successful timestamp is overwritten\n");
1018 return -EINVAL;
1019 }
1020 /* rb_rptr is now pointing to the first dword of the command following
1021 * the last sucessfully executed command sequence. Assumption is that
1022 * GPU is hung in the command sequence pointed by rb_rptr */
1023 /* make sure the GPU is not hung in a command submitted by kgsl
1024 * itself */
1025 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
1026 kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
1027 adreno_ringbuffer_inc_wrapped(rb_rptr,
1028 rb->buffer_desc.size));
Jordan Crouse084427d2011-07-28 08:37:58 -06001029 if (val1 == cp_nop_packet(1) && val2 == KGSL_CMD_IDENTIFIER) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001030 KGSL_DRV_ERR(device,
1031 "GPU recovery from hang not possible because "
1032 "of hang in kgsl command\n");
1033 return -EINVAL;
1034 }
1035
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001036 while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
1037 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
1038 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
1039 rb->buffer_desc.size);
1040 /* check for context switch indicator */
1041 if (value == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
1042 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
1043 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
1044 rb->buffer_desc.size);
Jordan Crouse084427d2011-07-28 08:37:58 -06001045 BUG_ON(value != cp_type3_packet(CP_MEM_WRITE, 2));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001046 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
1047 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
1048 rb->buffer_desc.size);
1049 BUG_ON(val1 != (device->memstore.gpuaddr +
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001050 KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
1051 current_context)));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001052 kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
1053 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
1054 rb->buffer_desc.size);
Jordan Crousea400d8d2012-03-16 14:53:39 -06001055
1056 /*
1057 * If other context switches were already lost and
1058 * and the current context is the one that is hanging,
1059 * then we cannot recover. Print an error message
1060 * and leave.
1061 */
1062
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001063 if ((copy_rb_contents == 0) && (value == context_id)) {
Jordan Crousea400d8d2012-03-16 14:53:39 -06001064 KGSL_DRV_ERR(device, "GPU recovery could not "
1065 "find the previous context\n");
1066 return -EINVAL;
1067 }
1068
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001069 /*
1070 * If we were copying the commands and got to this point
1071 * then we need to remove the 3 commands that appear
1072 * before KGSL_CONTEXT_TO_MEM_IDENTIFIER
1073 */
1074 if (temp_idx)
1075 temp_idx -= 3;
1076 /* if context switches to a context that did not cause
1077 * hang then start saving the rb contents as those
1078 * commands can be executed */
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001079 if (value != context_id) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001080 copy_rb_contents = 1;
Jordan Crouse084427d2011-07-28 08:37:58 -06001081 temp_rb_buffer[temp_idx++] = cp_nop_packet(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001082 temp_rb_buffer[temp_idx++] =
1083 KGSL_CMD_IDENTIFIER;
Jordan Crouse084427d2011-07-28 08:37:58 -06001084 temp_rb_buffer[temp_idx++] = cp_nop_packet(1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001085 temp_rb_buffer[temp_idx++] =
1086 KGSL_CONTEXT_TO_MEM_IDENTIFIER;
1087 temp_rb_buffer[temp_idx++] =
Jordan Crouse084427d2011-07-28 08:37:58 -06001088 cp_type3_packet(CP_MEM_WRITE, 2);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001089 temp_rb_buffer[temp_idx++] = val1;
1090 temp_rb_buffer[temp_idx++] = value;
1091 } else {
1092 copy_rb_contents = 0;
1093 }
1094 } else if (copy_rb_contents)
1095 temp_rb_buffer[temp_idx++] = value;
1096 }
1097
1098 *rb_size = temp_idx;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001099 return 0;
1100}
1101
1102void
1103adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
1104 int num_rb_contents)
1105{
1106 int i;
1107 unsigned int *ringcmds;
1108 unsigned int rcmd_gpu;
1109
1110 if (!num_rb_contents)
1111 return;
1112
1113 if (num_rb_contents > (rb->buffer_desc.size - rb->wptr)) {
1114 adreno_regwrite(rb->device, REG_CP_RB_RPTR, 0);
1115 rb->rptr = 0;
1116 BUG_ON(num_rb_contents > rb->buffer_desc.size);
1117 }
1118 ringcmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
1119 rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(unsigned int) * rb->wptr;
1120 for (i = 0; i < num_rb_contents; i++)
1121 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb_buff[i]);
1122 rb->wptr += num_rb_contents;
1123 adreno_ringbuffer_submit(rb);
1124}