blob: c2a0270be2332eb39b5411cb159f23b3c209d2dd [file] [log] [blame]
Carter Cooper740f6742013-01-03 16:19:23 -07001/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/firmware.h>
14#include <linux/slab.h>
15#include <linux/sched.h>
16#include <linux/log2.h>
17
18#include "kgsl.h"
19#include "kgsl_sharedmem.h"
20#include "kgsl_cffdump.h"
21
22#include "adreno.h"
23#include "adreno_pm4types.h"
24#include "adreno_ringbuffer.h"
25
Jeremy Gebbeneebc4612011-08-31 10:15:21 -070026#include "a2xx_reg.h"
Jordan Crouseb4d31bd2012-02-01 22:11:12 -070027#include "a3xx_reg.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070028
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029#define GSL_RB_NOP_SIZEDWORDS 2
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030
Jordan Crousef50bfdc2012-11-01 13:48:35 -060031/*
32 * CP DEBUG settings for all cores:
33 * DYNAMIC_CLK_DISABLE [27] - turn off the dynamic clock control
34 * PROG_END_PTR_ENABLE [25] - Allow 128 bit writes to the VBIF
35 */
36
37#define CP_DEBUG_DEFAULT ((1 << 27) | (1 << 25))
38
Jordan Crouseb4d31bd2012-02-01 22:11:12 -070039void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040{
41 BUG_ON(rb->wptr == 0);
42
Lucille Sylvester958dc942011-09-06 18:19:49 -060043 /* Let the pwrscale policy know that new commands have
44 been submitted. */
45 kgsl_pwrscale_busy(rb->device);
46
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047 /*synchronize memory before informing the hardware of the
48 *new commands.
49 */
50 mb();
51
52 adreno_regwrite(rb->device, REG_CP_RB_WPTR, rb->wptr);
53}
54
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -070055static int
56adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb,
57 struct adreno_context *context,
58 unsigned int numcmds, int wptr_ahead)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059{
60 int nopcount;
61 unsigned int freecmds;
62 unsigned int *cmds;
63 uint cmds_gpu;
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -060064 unsigned long wait_time;
Jordan Crouse21f75a02012-08-09 15:08:59 -060065 unsigned long wait_timeout = msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
Tarun Karra3335f142012-06-19 14:11:48 -070066 unsigned long wait_time_part;
Hareesh Gundub17232c2013-07-22 17:34:59 +053067 unsigned int prev_reg_val[FT_DETECT_REGS_COUNT];
Tarun Karra3335f142012-06-19 14:11:48 -070068
69 memset(prev_reg_val, 0, sizeof(prev_reg_val));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070
71 /* if wptr ahead, fill the remaining with NOPs */
72 if (wptr_ahead) {
73 /* -1 for header */
74 nopcount = rb->sizedwords - rb->wptr - 1;
75
76 cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
77 cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*rb->wptr;
78
Jordan Crouse084427d2011-07-28 08:37:58 -060079 GSL_RB_WRITE(cmds, cmds_gpu, cp_nop_packet(nopcount));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070080
81 /* Make sure that rptr is not 0 before submitting
82 * commands at the end of ringbuffer. We do not
83 * want the rptr and wptr to become equal when
84 * the ringbuffer is not empty */
85 do {
86 GSL_RB_GET_READPTR(rb, &rb->rptr);
87 } while (!rb->rptr);
88
89 rb->wptr++;
90
91 adreno_ringbuffer_submit(rb);
92
93 rb->wptr = 0;
94 }
95
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -060096 wait_time = jiffies + wait_timeout;
Jordan Crouse21f75a02012-08-09 15:08:59 -060097 wait_time_part = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098 /* wait for space in ringbuffer */
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -060099 while (1) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700100 GSL_RB_GET_READPTR(rb, &rb->rptr);
101
102 freecmds = rb->rptr - rb->wptr;
103
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600104 if (freecmds == 0 || freecmds > numcmds)
105 break;
106
Tarun Karra3335f142012-06-19 14:11:48 -0700107 /* Dont wait for timeout, detect hang faster.
108 */
109 if (time_after(jiffies, wait_time_part)) {
110 wait_time_part = jiffies +
Jordan Crouse21f75a02012-08-09 15:08:59 -0600111 msecs_to_jiffies(KGSL_TIMEOUT_PART);
Tarun Karra696f89e2013-01-27 21:31:40 -0800112 if ((adreno_ft_detect(rb->device,
Tarun Karra3335f142012-06-19 14:11:48 -0700113 prev_reg_val))){
114 KGSL_DRV_ERR(rb->device,
115 "Hang detected while waiting for freespace in"
116 "ringbuffer rptr: 0x%x, wptr: 0x%x\n",
117 rb->rptr, rb->wptr);
118 goto err;
119 }
120 }
121
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600122 if (time_after(jiffies, wait_time)) {
123 KGSL_DRV_ERR(rb->device,
124 "Timed out while waiting for freespace in ringbuffer "
125 "rptr: 0x%x, wptr: 0x%x\n", rb->rptr, rb->wptr);
Tarun Karra3335f142012-06-19 14:11:48 -0700126 goto err;
127 }
128
Wei Zou50ec3372012-07-17 15:46:52 -0700129 continue;
130
Tarun Karra3335f142012-06-19 14:11:48 -0700131err:
Tarun Karrad20d71a2013-01-25 15:38:57 -0800132 if (!adreno_dump_and_exec_ft(rb->device)) {
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -0700133 if (context && context->flags & CTXT_FLAGS_GPU_HANG) {
134 KGSL_CTXT_WARN(rb->device,
135 "Context %p caused a gpu hang. Will not accept commands for context %d\n",
136 context, context->id);
137 return -EDEADLK;
138 }
139 wait_time = jiffies + wait_timeout;
140 } else {
Tarun Karrad20d71a2013-01-25 15:38:57 -0800141 /* GPU is hung and fault tolerance failed */
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -0700142 BUG();
143 }
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600144 }
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -0700145 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146}
147
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700148unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -0700149 struct adreno_context *context,
150 unsigned int numcmds)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700151{
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -0700152 unsigned int *ptr = NULL;
153 int ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700154 BUG_ON(numcmds >= rb->sizedwords);
155
156 GSL_RB_GET_READPTR(rb, &rb->rptr);
157 /* check for available space */
158 if (rb->wptr >= rb->rptr) {
159 /* wptr ahead or equal to rptr */
160 /* reserve dwords for nop packet */
161 if ((rb->wptr + numcmds) > (rb->sizedwords -
162 GSL_RB_NOP_SIZEDWORDS))
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -0700163 ret = adreno_ringbuffer_waitspace(rb, context,
164 numcmds, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700165 } else {
166 /* wptr behind rptr */
167 if ((rb->wptr + numcmds) >= rb->rptr)
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -0700168 ret = adreno_ringbuffer_waitspace(rb, context,
169 numcmds, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700170 /* check for remaining space */
171 /* reserve dwords for nop packet */
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -0700172 if (!ret && (rb->wptr + numcmds) > (rb->sizedwords -
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700173 GSL_RB_NOP_SIZEDWORDS))
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -0700174 ret = adreno_ringbuffer_waitspace(rb, context,
175 numcmds, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 }
177
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -0700178 if (!ret) {
179 ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
180 rb->wptr += numcmds;
181 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182
183 return ptr;
184}
185
186static int _load_firmware(struct kgsl_device *device, const char *fwfile,
187 void **data, int *len)
188{
189 const struct firmware *fw = NULL;
190 int ret;
191
192 ret = request_firmware(&fw, fwfile, device->dev);
193
194 if (ret) {
195 KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n",
196 fwfile, ret);
197 return ret;
198 }
199
200 *data = kmalloc(fw->size, GFP_KERNEL);
201
202 if (*data) {
203 memcpy(*data, fw->data, fw->size);
204 *len = fw->size;
205 } else
206 KGSL_MEM_ERR(device, "kmalloc(%d) failed\n", fw->size);
207
208 release_firmware(fw);
209 return (*data != NULL) ? 0 : -ENOMEM;
210}
211
Tarun Karra9c070822012-11-27 16:43:51 -0700212int adreno_ringbuffer_read_pm4_ucode(struct kgsl_device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700213{
214 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Tarun Karra9c070822012-11-27 16:43:51 -0700215 int ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217 if (adreno_dev->pm4_fw == NULL) {
218 int len;
Jordan Crouse505df9c2011-07-28 08:37:59 -0600219 void *ptr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700220
Jordan Crouse505df9c2011-07-28 08:37:59 -0600221 ret = _load_firmware(device, adreno_dev->pm4_fwfile,
222 &ptr, &len);
223
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224 if (ret)
225 goto err;
226
227 /* PM4 size is 3 dword aligned plus 1 dword of version */
228 if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) {
229 KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
230 ret = -EINVAL;
Jeremy Gebben79acee62011-08-08 16:44:07 -0600231 kfree(ptr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232 goto err;
233 }
234
235 adreno_dev->pm4_fw_size = len / sizeof(uint32_t);
236 adreno_dev->pm4_fw = ptr;
Tarun Karra9c070822012-11-27 16:43:51 -0700237 adreno_dev->pm4_fw_version = adreno_dev->pm4_fw[1];
238 }
239
240err:
241 return ret;
242}
243
244
245int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device)
246{
247 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
248 int i;
249
250 if (adreno_dev->pm4_fw == NULL) {
251 int ret = adreno_ringbuffer_read_pm4_ucode(device);
252 if (ret)
253 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700254 }
255
256 KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n",
Tarun Karra9c070822012-11-27 16:43:51 -0700257 adreno_dev->pm4_fw_version);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700258
Jordan Crousef50bfdc2012-11-01 13:48:35 -0600259 adreno_regwrite(device, REG_CP_DEBUG, CP_DEBUG_DEFAULT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0);
261 for (i = 1; i < adreno_dev->pm4_fw_size; i++)
262 adreno_regwrite(device, REG_CP_ME_RAM_DATA,
Tarun Karra9c070822012-11-27 16:43:51 -0700263 adreno_dev->pm4_fw[i]);
264
265 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266}
267
Tarun Karra9c070822012-11-27 16:43:51 -0700268int adreno_ringbuffer_read_pfp_ucode(struct kgsl_device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269{
270 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Tarun Karra9c070822012-11-27 16:43:51 -0700271 int ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700273 if (adreno_dev->pfp_fw == NULL) {
274 int len;
Jordan Crouse505df9c2011-07-28 08:37:59 -0600275 void *ptr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276
Jordan Crouse505df9c2011-07-28 08:37:59 -0600277 ret = _load_firmware(device, adreno_dev->pfp_fwfile,
278 &ptr, &len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700279 if (ret)
280 goto err;
281
282 /* PFP size shold be dword aligned */
283 if (len % sizeof(uint32_t) != 0) {
284 KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
285 ret = -EINVAL;
Jeremy Gebben79acee62011-08-08 16:44:07 -0600286 kfree(ptr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287 goto err;
288 }
289
290 adreno_dev->pfp_fw_size = len / sizeof(uint32_t);
291 adreno_dev->pfp_fw = ptr;
Tarun Karra9c070822012-11-27 16:43:51 -0700292 adreno_dev->pfp_fw_version = adreno_dev->pfp_fw[5];
293 }
294
295err:
296 return ret;
297}
298
299int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device)
300{
301 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
302 int i;
303
304 if (adreno_dev->pfp_fw == NULL) {
305 int ret = adreno_ringbuffer_read_pfp_ucode(device);
306 if (ret)
307 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700308 }
309
310 KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n",
Tarun Karra9c070822012-11-27 16:43:51 -0700311 adreno_dev->pfp_fw_version);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700312
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700313 adreno_regwrite(device, adreno_dev->gpudev->reg_cp_pfp_ucode_addr, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700314 for (i = 1; i < adreno_dev->pfp_fw_size; i++)
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700315 adreno_regwrite(device,
Tarun Karra9c070822012-11-27 16:43:51 -0700316 adreno_dev->gpudev->reg_cp_pfp_ucode_data,
317 adreno_dev->pfp_fw[i]);
318
319 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700320}
321
Carter Cooper1013dda2013-05-28 17:07:13 -0600322int adreno_ringbuffer_start(struct adreno_ringbuffer *rb)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700323{
324 int status;
325 /*cp_rb_cntl_u cp_rb_cntl; */
326 union reg_cp_rb_cntl cp_rb_cntl;
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700327 unsigned int rb_cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700328 struct kgsl_device *device = rb->device;
Jeremy Gebbenddf6b572011-09-09 13:39:49 -0700329 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700330
331 if (rb->flags & KGSL_FLAGS_STARTED)
332 return 0;
333
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700334 kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
335 sizeof(struct kgsl_rbmemptrs));
336
337 kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
338 (rb->sizedwords << 2));
339
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700340 if (adreno_is_a2xx(adreno_dev)) {
341 adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
342 (rb->memptrs_desc.gpuaddr
343 + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700344
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700345 /* setup WPTR delay */
346 adreno_regwrite(device, REG_CP_RB_WPTR_DELAY,
347 0 /*0x70000010 */);
348 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700349
350 /*setup REG_CP_RB_CNTL */
351 adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
352 cp_rb_cntl.val = rb_cntl;
353
354 /*
355 * The size of the ringbuffer in the hardware is the log2
356 * representation of the size in quadwords (sizedwords / 2)
357 */
358 cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);
359
360 /*
361 * Specify the quadwords to read before updating mem RPTR.
362 * Like above, pass the log2 representation of the blocksize
363 * in quadwords.
364 */
365 cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);
366
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700367 if (adreno_is_a2xx(adreno_dev)) {
368 /* WPTR polling */
369 cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN;
370 }
371
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372 /* mem RPTR writebacks */
373 cp_rb_cntl.f.rb_no_update = GSL_RB_CNTL_NO_UPDATE;
374
375 adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);
376
377 adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);
378
379 adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
380 rb->memptrs_desc.gpuaddr +
381 GSL_RB_MEMPTRS_RPTR_OFFSET);
382
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700383 if (adreno_is_a3xx(adreno_dev)) {
384 /* enable access protection to privileged registers */
385 adreno_regwrite(device, A3XX_CP_PROTECT_CTRL, 0x00000007);
386
387 /* RBBM registers */
388 adreno_regwrite(device, A3XX_CP_PROTECT_REG_0, 0x63000040);
389 adreno_regwrite(device, A3XX_CP_PROTECT_REG_1, 0x62000080);
390 adreno_regwrite(device, A3XX_CP_PROTECT_REG_2, 0x600000CC);
391 adreno_regwrite(device, A3XX_CP_PROTECT_REG_3, 0x60000108);
392 adreno_regwrite(device, A3XX_CP_PROTECT_REG_4, 0x64000140);
393 adreno_regwrite(device, A3XX_CP_PROTECT_REG_5, 0x66000400);
394
395 /* CP registers */
396 adreno_regwrite(device, A3XX_CP_PROTECT_REG_6, 0x65000700);
397 adreno_regwrite(device, A3XX_CP_PROTECT_REG_7, 0x610007D8);
398 adreno_regwrite(device, A3XX_CP_PROTECT_REG_8, 0x620007E0);
399 adreno_regwrite(device, A3XX_CP_PROTECT_REG_9, 0x61001178);
400 adreno_regwrite(device, A3XX_CP_PROTECT_REG_A, 0x64001180);
401
402 /* RB registers */
403 adreno_regwrite(device, A3XX_CP_PROTECT_REG_B, 0x60003300);
404
405 /* VBIF registers */
406 adreno_regwrite(device, A3XX_CP_PROTECT_REG_C, 0x6B00C000);
407 }
408
409 if (adreno_is_a2xx(adreno_dev)) {
410 /* explicitly clear all cp interrupts */
411 adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
412 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413
414 /* setup scratch/timestamp */
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700415 adreno_regwrite(device, REG_SCRATCH_ADDR, device->memstore.gpuaddr +
416 KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
417 soptimestamp));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700418
419 adreno_regwrite(device, REG_SCRATCH_UMSK,
420 GSL_RB_MEMPTRS_SCRATCH_MASK);
421
422 /* load the CP ucode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700423 status = adreno_ringbuffer_load_pm4_ucode(device);
424 if (status != 0)
425 return status;
426
427 /* load the prefetch parser ucode */
428 status = adreno_ringbuffer_load_pfp_ucode(device);
429 if (status != 0)
430 return status;
431
Kevin Matlageff806df2012-05-07 18:13:21 -0600432 /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
Kevin Matlagee8d35862012-04-26 12:58:15 -0600433 if (adreno_is_a305(adreno_dev) || adreno_is_a320(adreno_dev))
Kevin Matlageff806df2012-05-07 18:13:21 -0600434 adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000E0602);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700435
436 rb->rptr = 0;
437 rb->wptr = 0;
438
439 /* clear ME_HALT to start micro engine */
440 adreno_regwrite(device, REG_CP_ME_CNTL, 0);
441
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700442 /* ME init is GPU specific, so jump into the sub-function */
Jordan Crousee75de272013-05-28 17:09:08 -0600443 status = adreno_dev->gpudev->rb_init(adreno_dev, rb);
444 if (status)
445 return status;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700446
447 /* idle device to validate ME INIT */
Jordan Crousea29a2e02012-08-14 09:09:23 -0600448 status = adreno_idle(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700449
450 if (status == 0)
451 rb->flags |= KGSL_FLAGS_STARTED;
452
453 return status;
454}
455
Carter Cooper6dd94c82011-10-13 14:43:53 -0600456void adreno_ringbuffer_stop(struct adreno_ringbuffer *rb)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457{
Rajeev Kulkarnibf7a3822012-08-14 21:21:14 +0530458 struct kgsl_device *device = rb->device;
459 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
460
461 if (rb->flags & KGSL_FLAGS_STARTED) {
462 if (adreno_is_a200(adreno_dev))
463 adreno_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000);
464
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700465 rb->flags &= ~KGSL_FLAGS_STARTED;
Rajeev Kulkarnibf7a3822012-08-14 21:21:14 +0530466 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467}
468
469int adreno_ringbuffer_init(struct kgsl_device *device)
470{
471 int status;
472 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
473 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
474
475 rb->device = device;
476 /*
477 * It is silly to convert this to words and then back to bytes
478 * immediately below, but most of the rest of the code deals
479 * in words, so we might as well only do the math once
480 */
481 rb->sizedwords = KGSL_RB_SIZE >> 2;
482
Jeremy Gebbena4bbf572013-05-28 17:09:35 -0600483 rb->buffer_desc.flags = KGSL_MEMFLAGS_GPUREADONLY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484 /* allocate memory for ringbuffer */
485 status = kgsl_allocate_contiguous(&rb->buffer_desc,
486 (rb->sizedwords << 2));
487
488 if (status != 0) {
489 adreno_ringbuffer_close(rb);
490 return status;
491 }
492
493 /* allocate memory for polling and timestamps */
494 /* This really can be at 4 byte alignment boundry but for using MMU
495 * we need to make it at page boundary */
496 status = kgsl_allocate_contiguous(&rb->memptrs_desc,
497 sizeof(struct kgsl_rbmemptrs));
498
499 if (status != 0) {
500 adreno_ringbuffer_close(rb);
501 return status;
502 }
503
504 /* overlay structure on memptrs memory */
505 rb->memptrs = (struct kgsl_rbmemptrs *) rb->memptrs_desc.hostptr;
506
507 return 0;
508}
509
Carter Cooper6dd94c82011-10-13 14:43:53 -0600510void adreno_ringbuffer_close(struct adreno_ringbuffer *rb)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511{
512 struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
513
514 kgsl_sharedmem_free(&rb->buffer_desc);
515 kgsl_sharedmem_free(&rb->memptrs_desc);
516
517 kfree(adreno_dev->pfp_fw);
518 kfree(adreno_dev->pm4_fw);
519
520 adreno_dev->pfp_fw = NULL;
521 adreno_dev->pm4_fw = NULL;
522
523 memset(rb, 0, sizeof(struct adreno_ringbuffer));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700524}
525
Carter Cooper9cf77b62013-05-28 17:04:26 -0600526static int
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700527adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700528 struct adreno_context *context,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700529 unsigned int flags, unsigned int *cmds,
Carter Cooper9cf77b62013-05-28 17:04:26 -0600530 int sizedwords)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531{
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700532 struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533 unsigned int *ringcmds;
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700534 unsigned int total_sizedwords = sizedwords;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700535 unsigned int i;
536 unsigned int rcmd_gpu;
Shubhraprakash Dasd9e2cc12013-05-28 17:05:38 -0600537 unsigned int context_id;
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700538 unsigned int gpuaddr = rb->device->memstore.gpuaddr;
Carter Cooper9cf77b62013-05-28 17:04:26 -0600539 unsigned int timestamp;
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700540
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600541 /*
542 * if the context was not created with per context timestamp
543 * support, we must use the global timestamp since issueibcmds
Shubhraprakash Dasd9e2cc12013-05-28 17:05:38 -0600544 * will be returning that one, or if an internal issue then
545 * use global timestamp.
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600546 */
Shubhraprakash Dasd9e2cc12013-05-28 17:05:38 -0600547 if ((context && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS)) &&
548 !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600549 context_id = context->id;
Shubhraprakash Dasd9e2cc12013-05-28 17:05:38 -0600550 else
551 context_id = KGSL_MEMSTORE_GLOBAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700552
553 /* reserve space to temporarily turn off protected mode
554 * error checking if needed
555 */
556 total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
Shubhraprakash Dasc3ad5802012-05-30 18:10:06 -0600557 /* 2 dwords to store the start of command sequence */
558 total_sizedwords += 2;
Carter Cooper728bd152013-05-28 17:00:06 -0600559 /* internal ib command identifier for the ringbuffer */
560 total_sizedwords += (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) ? 2 : 0;
561
Jordan Crouseef02fc02013-03-05 11:19:31 -0700562 /* Add CP_COND_EXEC commands to generate CP_INTERRUPT */
563 total_sizedwords += context ? 13 : 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700564
Shubhraprakash Dasd9e2cc12013-05-28 17:05:38 -0600565 if ((context) && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) &&
566 (flags & (KGSL_CMD_FLAGS_INTERNAL_ISSUE |
567 KGSL_CMD_FLAGS_GET_INT)))
568 total_sizedwords += 2;
569
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700570 if (adreno_is_a3xx(adreno_dev))
571 total_sizedwords += 7;
572
Anshuman Danica4e1a72012-11-06 22:19:50 +0530573 if (adreno_is_a2xx(adreno_dev))
574 total_sizedwords += 2; /* CP_WAIT_FOR_IDLE */
575
Shubhraprakash Dasd9e2cc12013-05-28 17:05:38 -0600576 total_sizedwords += 2; /* scratchpad ts for recovery */
577 total_sizedwords += 3; /* sop timestamp */
578 total_sizedwords += 4; /* eop timestamp */
Anshuman Dani9ce83972013-05-28 17:01:10 -0600579
Shubhraprakash Dasd9e2cc12013-05-28 17:05:38 -0600580 if (KGSL_MEMSTORE_GLOBAL != context_id)
Rajesh Kemisettic5699302012-04-21 21:09:05 +0530581 total_sizedwords += 3; /* global timestamp without cache
582 * flush for non-zero context */
Tarun Karra6479d072013-03-27 19:37:55 -0700583
Tarun Karraac549fd2013-03-27 19:37:55 -0700584 if (flags & KGSL_CMD_FLAGS_EOF)
585 total_sizedwords += 2;
586
Richard Ruigrok28eabf62013-09-13 18:09:49 -0600587 /* Add space for the power on shader fixup if we need it */
588 if (flags & KGSL_CMD_FLAGS_PWRON_FIXUP)
589 total_sizedwords += 5;
590
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -0700591 ringcmds = adreno_ringbuffer_allocspace(rb, context, total_sizedwords);
Carter Cooper9cf77b62013-05-28 17:04:26 -0600592 if (!ringcmds)
593 return -ENOSPC;
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600594
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700595 rcmd_gpu = rb->buffer_desc.gpuaddr
596 + sizeof(uint)*(rb->wptr-total_sizedwords);
597
Richard Ruigrok28eabf62013-09-13 18:09:49 -0600598 if (flags & KGSL_CMD_FLAGS_PWRON_FIXUP) {
599 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
600 GSL_RB_WRITE(ringcmds, rcmd_gpu,
601 KGSL_PWRON_FIXUP_IDENTIFIER);
602 GSL_RB_WRITE(ringcmds, rcmd_gpu,
603 CP_HDR_INDIRECT_BUFFER_PFD);
604 GSL_RB_WRITE(ringcmds, rcmd_gpu,
605 adreno_dev->pwron_fixup.gpuaddr);
606 GSL_RB_WRITE(ringcmds, rcmd_gpu,
607 adreno_dev->pwron_fixup_dwords);
608 }
609
Shubhraprakash Dasc3ad5802012-05-30 18:10:06 -0600610 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
611 GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
612
Carter Cooper728bd152013-05-28 17:00:06 -0600613 if (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) {
614 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
615 GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_INTERNAL_IDENTIFIER);
616 }
617
Carter Cooper9cf77b62013-05-28 17:04:26 -0600618 /* always increment the global timestamp. once. */
619 rb->global_ts++;
620
621 if (KGSL_MEMSTORE_GLOBAL != context_id)
622 timestamp = context->timestamp;
623 else
624 timestamp = rb->global_ts;
625
Shubhraprakash Dasd9e2cc12013-05-28 17:05:38 -0600626 /* scratchpad ts for recovery */
627 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type0_packet(REG_CP_TIMESTAMP, 1));
628 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->global_ts);
629
630 /* start-of-pipeline timestamp */
631 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_MEM_WRITE, 2));
632 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
633 KGSL_MEMSTORE_OFFSET(context_id, soptimestamp)));
634 GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
635
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700636 if (flags & KGSL_CMD_FLAGS_PMODE) {
637 /* disable protected mode error checking */
638 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600639 cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700640 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
641 }
642
643 for (i = 0; i < sizedwords; i++) {
644 GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
645 cmds++;
646 }
647
648 if (flags & KGSL_CMD_FLAGS_PMODE) {
649 /* re-enable protected mode error checking */
650 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600651 cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700652 GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
653 }
654
Anshuman Danica4e1a72012-11-06 22:19:50 +0530655 /* HW Workaround for MMU Page fault
656 * due to memory getting free early before
657 * GPU completes it.
658 */
659 if (adreno_is_a2xx(adreno_dev)) {
660 GSL_RB_WRITE(ringcmds, rcmd_gpu,
661 cp_type3_packet(CP_WAIT_FOR_IDLE, 1));
662 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x00);
663 }
664
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700665 if (adreno_is_a3xx(adreno_dev)) {
666 /*
Shubhraprakash Dasd9e2cc12013-05-28 17:05:38 -0600667 * Flush HLSQ lazy updates to make sure there are no
668 * resources pending for indirect loads after the timestamp
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700669 */
670
671 GSL_RB_WRITE(ringcmds, rcmd_gpu,
672 cp_type3_packet(CP_EVENT_WRITE, 1));
673 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x07); /* HLSQ_FLUSH */
674 GSL_RB_WRITE(ringcmds, rcmd_gpu,
675 cp_type3_packet(CP_WAIT_FOR_IDLE, 1));
676 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x00);
677 }
678
Shubhraprakash Dasd9e2cc12013-05-28 17:05:38 -0600679 /*
680 * end-of-pipeline timestamp. If per context timestamps is not
681 * enabled, then context_id will be KGSL_MEMSTORE_GLOBAL so all
682 * eop timestamps will work out.
683 */
684 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_EVENT_WRITE, 3));
685 GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
686 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
687 KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp)));
688 GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
689
690 if (KGSL_MEMSTORE_GLOBAL != context_id) {
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700691 GSL_RB_WRITE(ringcmds, rcmd_gpu,
692 cp_type3_packet(CP_MEM_WRITE, 2));
693 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
Shubhraprakash Dasd9e2cc12013-05-28 17:05:38 -0600694 KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
695 eoptimestamp)));
Carter Cooper9cf77b62013-05-28 17:04:26 -0600696 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->global_ts);
Rajesh Kemisettic5699302012-04-21 21:09:05 +0530697 }
Rajeev Kulkarnid98d6562013-01-02 16:10:56 -0800698 if (context) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700699 /* Conditional execution based on memory values */
700 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600701 cp_type3_packet(CP_COND_EXEC, 4));
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700702 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
703 KGSL_MEMSTORE_OFFSET(
704 context_id, ts_cmp_enable)) >> 2);
705 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
706 KGSL_MEMSTORE_OFFSET(
707 context_id, ref_wait_ts)) >> 2);
708 GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700709 /* # of conditional command DWORDs */
Jordan Crouseef02fc02013-03-05 11:19:31 -0700710 GSL_RB_WRITE(ringcmds, rcmd_gpu, 8);
711
712 /* Clear the ts_cmp_enable for the context */
713 GSL_RB_WRITE(ringcmds, rcmd_gpu,
714 cp_type3_packet(CP_MEM_WRITE, 2));
715 GSL_RB_WRITE(ringcmds, rcmd_gpu, gpuaddr +
716 KGSL_MEMSTORE_OFFSET(
717 context_id, ts_cmp_enable));
718 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x0);
719
720 /* Clear the ts_cmp_enable for the global timestamp */
721 GSL_RB_WRITE(ringcmds, rcmd_gpu,
722 cp_type3_packet(CP_MEM_WRITE, 2));
723 GSL_RB_WRITE(ringcmds, rcmd_gpu, gpuaddr +
724 KGSL_MEMSTORE_OFFSET(
725 KGSL_MEMSTORE_GLOBAL, ts_cmp_enable));
726 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x0);
727
728 /* Trigger the interrupt */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700729 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600730 cp_type3_packet(CP_INTERRUPT, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700731 GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
732 }
733
Shubhraprakash Dasd9e2cc12013-05-28 17:05:38 -0600734 /*
735 * If per context timestamps are enabled and any of the kgsl
736 * internal commands want INT to be generated trigger the INT
737 */
738 if ((context) && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) &&
739 (flags & (KGSL_CMD_FLAGS_INTERNAL_ISSUE |
740 KGSL_CMD_FLAGS_GET_INT))) {
741 GSL_RB_WRITE(ringcmds, rcmd_gpu,
742 cp_type3_packet(CP_INTERRUPT, 1));
743 GSL_RB_WRITE(ringcmds, rcmd_gpu,
744 CP_INT_CNTL__RB_INT_MASK);
745 }
746
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700747 if (adreno_is_a3xx(adreno_dev)) {
748 /* Dummy set-constant to trigger context rollover */
749 GSL_RB_WRITE(ringcmds, rcmd_gpu,
750 cp_type3_packet(CP_SET_CONSTANT, 2));
751 GSL_RB_WRITE(ringcmds, rcmd_gpu,
752 (0x4<<16)|(A3XX_HLSQ_CL_KERNEL_GROUP_X_REG - 0x2000));
753 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
754 }
755
Tarun Karradeeecc02013-01-21 23:42:17 -0800756 if (flags & KGSL_CMD_FLAGS_EOF) {
757 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
758 GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_END_OF_FRAME_IDENTIFIER);
759 }
760
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700761 adreno_ringbuffer_submit(rb);
762
Carter Cooper9cf77b62013-05-28 17:04:26 -0600763 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700764}
765
Shubhraprakash Dascb068072012-06-07 17:52:41 -0600766unsigned int
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700767adreno_ringbuffer_issuecmds(struct kgsl_device *device,
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600768 struct adreno_context *drawctxt,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700769 unsigned int flags,
770 unsigned int *cmds,
771 int sizedwords)
772{
773 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
774 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
775
776 if (device->state & KGSL_STATE_HUNG)
Shubhraprakash Dascb068072012-06-07 17:52:41 -0600777 return kgsl_readtimestamp(device, KGSL_MEMSTORE_GLOBAL,
778 KGSL_TIMESTAMP_RETIRED);
Vijay Krishnamoorthye80c3462012-08-27 14:07:32 -0700779
780 flags |= KGSL_CMD_FLAGS_INTERNAL_ISSUE;
781
782 return adreno_ringbuffer_addcmds(rb, drawctxt, flags, cmds,
Carter Cooper9cf77b62013-05-28 17:04:26 -0600783 sizedwords);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700784}
785
Jeremy Gebbend0ab6ad2012-04-06 11:13:35 -0600786static bool _parse_ibs(struct kgsl_device_private *dev_priv, uint gpuaddr,
787 int sizedwords);
788
789static bool
790_handle_type3(struct kgsl_device_private *dev_priv, uint *hostaddr)
791{
792 unsigned int opcode = cp_type3_opcode(*hostaddr);
793 switch (opcode) {
794 case CP_INDIRECT_BUFFER_PFD:
795 case CP_INDIRECT_BUFFER_PFE:
796 case CP_COND_INDIRECT_BUFFER_PFE:
797 case CP_COND_INDIRECT_BUFFER_PFD:
798 return _parse_ibs(dev_priv, hostaddr[1], hostaddr[2]);
799 case CP_NOP:
800 case CP_WAIT_FOR_IDLE:
801 case CP_WAIT_REG_MEM:
802 case CP_WAIT_REG_EQ:
803 case CP_WAT_REG_GTE:
804 case CP_WAIT_UNTIL_READ:
805 case CP_WAIT_IB_PFD_COMPLETE:
806 case CP_REG_RMW:
807 case CP_REG_TO_MEM:
808 case CP_MEM_WRITE:
809 case CP_MEM_WRITE_CNTR:
810 case CP_COND_EXEC:
811 case CP_COND_WRITE:
812 case CP_EVENT_WRITE:
813 case CP_EVENT_WRITE_SHD:
814 case CP_EVENT_WRITE_CFL:
815 case CP_EVENT_WRITE_ZPD:
816 case CP_DRAW_INDX:
817 case CP_DRAW_INDX_2:
818 case CP_DRAW_INDX_BIN:
819 case CP_DRAW_INDX_2_BIN:
820 case CP_VIZ_QUERY:
821 case CP_SET_STATE:
822 case CP_SET_CONSTANT:
823 case CP_IM_LOAD:
824 case CP_IM_LOAD_IMMEDIATE:
825 case CP_LOAD_CONSTANT_CONTEXT:
826 case CP_INVALIDATE_STATE:
827 case CP_SET_SHADER_BASES:
828 case CP_SET_BIN_MASK:
829 case CP_SET_BIN_SELECT:
830 case CP_SET_BIN_BASE_OFFSET:
831 case CP_SET_BIN_DATA:
832 case CP_CONTEXT_UPDATE:
833 case CP_INTERRUPT:
834 case CP_IM_STORE:
835 case CP_LOAD_STATE:
836 break;
837 /* these shouldn't come from userspace */
838 case CP_ME_INIT:
839 case CP_SET_PROTECTED_MODE:
840 default:
841 KGSL_CMD_ERR(dev_priv->device, "bad CP opcode %0x\n", opcode);
842 return false;
843 break;
844 }
845
846 return true;
847}
848
849static bool
850_handle_type0(struct kgsl_device_private *dev_priv, uint *hostaddr)
851{
852 unsigned int reg = type0_pkt_offset(*hostaddr);
853 unsigned int cnt = type0_pkt_size(*hostaddr);
854 if (reg < 0x0192 || (reg + cnt) >= 0x8000) {
855 KGSL_CMD_ERR(dev_priv->device, "bad type0 reg: 0x%0x cnt: %d\n",
856 reg, cnt);
857 return false;
858 }
859 return true;
860}
861
862/*
863 * Traverse IBs and dump them to test vector. Detect swap by inspecting
864 * register writes, keeping note of the current state, and dump
865 * framebuffer config to test vector
866 */
867static bool _parse_ibs(struct kgsl_device_private *dev_priv,
868 uint gpuaddr, int sizedwords)
869{
870 static uint level; /* recursion level */
871 bool ret = false;
872 uint *hostaddr, *hoststart;
873 int dwords_left = sizedwords; /* dwords left in the current command
874 buffer */
875 struct kgsl_mem_entry *entry;
876
Jeremy Gebbend0ab6ad2012-04-06 11:13:35 -0600877 entry = kgsl_sharedmem_find_region(dev_priv->process_priv,
878 gpuaddr, sizedwords * sizeof(uint));
Jeremy Gebbend0ab6ad2012-04-06 11:13:35 -0600879 if (entry == NULL) {
880 KGSL_CMD_ERR(dev_priv->device,
881 "no mapping for gpuaddr: 0x%08x\n", gpuaddr);
882 return false;
883 }
884
885 hostaddr = (uint *)kgsl_gpuaddr_to_vaddr(&entry->memdesc, gpuaddr);
886 if (hostaddr == NULL) {
887 KGSL_CMD_ERR(dev_priv->device,
888 "no mapping for gpuaddr: 0x%08x\n", gpuaddr);
889 return false;
890 }
891
892 hoststart = hostaddr;
893
894 level++;
895
896 KGSL_CMD_INFO(dev_priv->device, "ib: gpuaddr:0x%08x, wc:%d, hptr:%p\n",
897 gpuaddr, sizedwords, hostaddr);
898
899 mb();
900 while (dwords_left > 0) {
901 bool cur_ret = true;
902 int count = 0; /* dword count including packet header */
903
904 switch (*hostaddr >> 30) {
905 case 0x0: /* type-0 */
906 count = (*hostaddr >> 16)+2;
907 cur_ret = _handle_type0(dev_priv, hostaddr);
908 break;
909 case 0x1: /* type-1 */
910 count = 2;
911 break;
912 case 0x3: /* type-3 */
913 count = ((*hostaddr >> 16) & 0x3fff) + 2;
914 cur_ret = _handle_type3(dev_priv, hostaddr);
915 break;
916 default:
917 KGSL_CMD_ERR(dev_priv->device, "unexpected type: "
918 "type:%d, word:0x%08x @ 0x%p, gpu:0x%08x\n",
919 *hostaddr >> 30, *hostaddr, hostaddr,
920 gpuaddr+4*(sizedwords-dwords_left));
921 cur_ret = false;
922 count = dwords_left;
923 break;
924 }
925
926 if (!cur_ret) {
927 KGSL_CMD_ERR(dev_priv->device,
928 "bad sub-type: #:%d/%d, v:0x%08x"
929 " @ 0x%p[gb:0x%08x], level:%d\n",
930 sizedwords-dwords_left, sizedwords, *hostaddr,
931 hostaddr, gpuaddr+4*(sizedwords-dwords_left),
932 level);
933
934 if (ADRENO_DEVICE(dev_priv->device)->ib_check_level
935 >= 2)
936 print_hex_dump(KERN_ERR,
937 level == 1 ? "IB1:" : "IB2:",
938 DUMP_PREFIX_OFFSET, 32, 4, hoststart,
939 sizedwords*4, 0);
940 goto done;
941 }
942
943 /* jump to next packet */
944 dwords_left -= count;
945 hostaddr += count;
946 if (dwords_left < 0) {
947 KGSL_CMD_ERR(dev_priv->device,
948 "bad count: c:%d, #:%d/%d, "
949 "v:0x%08x @ 0x%p[gb:0x%08x], level:%d\n",
950 count, sizedwords-(dwords_left+count),
951 sizedwords, *(hostaddr-count), hostaddr-count,
952 gpuaddr+4*(sizedwords-(dwords_left+count)),
953 level);
954 if (ADRENO_DEVICE(dev_priv->device)->ib_check_level
955 >= 2)
956 print_hex_dump(KERN_ERR,
957 level == 1 ? "IB1:" : "IB2:",
958 DUMP_PREFIX_OFFSET, 32, 4, hoststart,
959 sizedwords*4, 0);
960 goto done;
961 }
962 }
963
964 ret = true;
965done:
966 if (!ret)
967 KGSL_DRV_ERR(dev_priv->device,
968 "parsing failed: gpuaddr:0x%08x, "
969 "host:0x%p, wc:%d\n", gpuaddr, hoststart, sizedwords);
970
971 level--;
972
973 return ret;
974}
975
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700976int
977adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
978 struct kgsl_context *context,
979 struct kgsl_ibdesc *ibdesc,
980 unsigned int numibs,
981 uint32_t *timestamp,
982 unsigned int flags)
983{
984 struct kgsl_device *device = dev_priv->device;
985 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Jordan Crouse72bb70b2013-05-28 17:03:52 -0600986 unsigned int *link = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700987 unsigned int *cmds;
988 unsigned int i;
Jordan Crouse72bb70b2013-05-28 17:03:52 -0600989 struct adreno_context *drawctxt = NULL;
Vijay Krishnamoorthybef66932012-01-24 09:32:05 -0700990 unsigned int start_index = 0;
Jordan Crouse2d1d6622013-05-28 17:02:44 -0600991 int ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700992
Jordan Crouse72bb70b2013-05-28 17:03:52 -0600993 if (device->state & KGSL_STATE_HUNG) {
994 ret = -EBUSY;
995 goto done;
996 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700997
Jordan Crouse72bb70b2013-05-28 17:03:52 -0600998 if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED) ||
999 context == NULL || ibdesc == 0 || numibs == 0) {
1000 ret = -EINVAL;
1001 goto done;
1002 }
Jeremy Gebben3c127f52011-08-08 17:04:11 -06001003 drawctxt = context->devctxt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001004
1005 if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
Tarun Karra696f89e2013-01-27 21:31:40 -08001006 KGSL_CTXT_ERR(device, "proc %s failed fault tolerance"
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001007 " will not accept commands for context %d\n",
Tarun Karra696f89e2013-01-27 21:31:40 -08001008 drawctxt->pid_name, drawctxt->id);
Jordan Crouse72bb70b2013-05-28 17:03:52 -06001009 ret = -EDEADLK;
1010 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001011 }
Shubhraprakash Dasd23ff4b2012-04-05 16:55:54 -06001012
Tarun Karradeeecc02013-01-21 23:42:17 -08001013 if (drawctxt->flags & CTXT_FLAGS_SKIP_EOF) {
1014 KGSL_CTXT_ERR(device,
Tarun Karra696f89e2013-01-27 21:31:40 -08001015 "proc %s triggered fault tolerance"
Tarun Karradeeecc02013-01-21 23:42:17 -08001016 " skipping commands for context till EOF %d\n",
Tarun Karra696f89e2013-01-27 21:31:40 -08001017 drawctxt->pid_name, drawctxt->id);
Tarun Karradeeecc02013-01-21 23:42:17 -08001018 if (flags & KGSL_CMD_FLAGS_EOF)
1019 drawctxt->flags &= ~CTXT_FLAGS_SKIP_EOF;
1020 numibs = 0;
1021 }
1022
Shubhraprakash Dasd23ff4b2012-04-05 16:55:54 -06001023 cmds = link = kzalloc(sizeof(unsigned int) * (numibs * 3 + 4),
1024 GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001025 if (!link) {
Jordan Crouse72bb70b2013-05-28 17:03:52 -06001026 ret = -ENOMEM;
1027 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001028 }
Vijay Krishnamoorthybef66932012-01-24 09:32:05 -07001029
1030 /*When preamble is enabled, the preamble buffer with state restoration
1031 commands are stored in the first node of the IB chain. We can skip that
1032 if a context switch hasn't occured */
1033
1034 if (drawctxt->flags & CTXT_FLAGS_PREAMBLE &&
1035 adreno_dev->drawctxt_active == drawctxt)
1036 start_index = 1;
1037
Shubhraprakash Dasd23ff4b2012-04-05 16:55:54 -06001038 if (!start_index) {
1039 *cmds++ = cp_nop_packet(1);
1040 *cmds++ = KGSL_START_OF_IB_IDENTIFIER;
1041 } else {
1042 *cmds++ = cp_nop_packet(4);
1043 *cmds++ = KGSL_START_OF_IB_IDENTIFIER;
1044 *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
1045 *cmds++ = ibdesc[0].gpuaddr;
1046 *cmds++ = ibdesc[0].sizedwords;
1047 }
Vijay Krishnamoorthybef66932012-01-24 09:32:05 -07001048 for (i = start_index; i < numibs; i++) {
Jeremy Gebbend0ab6ad2012-04-06 11:13:35 -06001049 if (unlikely(adreno_dev->ib_check_level >= 1 &&
1050 !_parse_ibs(dev_priv, ibdesc[i].gpuaddr,
1051 ibdesc[i].sizedwords))) {
Jordan Crouse2d1d6622013-05-28 17:02:44 -06001052 ret = -EINVAL;
1053 goto done;
Jeremy Gebbend0ab6ad2012-04-06 11:13:35 -06001054 }
Jordan Crouse2d1d6622013-05-28 17:02:44 -06001055
1056 if (ibdesc[i].sizedwords == 0) {
1057 ret = -EINVAL;
1058 goto done;
1059 }
1060
Jordan Crouse084427d2011-07-28 08:37:58 -06001061 *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001062 *cmds++ = ibdesc[i].gpuaddr;
1063 *cmds++ = ibdesc[i].sizedwords;
1064 }
1065
Shubhraprakash Dasd23ff4b2012-04-05 16:55:54 -06001066 *cmds++ = cp_nop_packet(1);
1067 *cmds++ = KGSL_END_OF_IB_IDENTIFIER;
1068
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -06001069 kgsl_setstate(&device->mmu, context->id,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06001070 kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001071 device->id));
1072
1073 adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
1074
Carter Cooper9cf77b62013-05-28 17:04:26 -06001075 if (drawctxt->flags & CTXT_FLAGS_USER_GENERATED_TS) {
1076 if (timestamp_cmp(drawctxt->timestamp, *timestamp) >= 0) {
1077 KGSL_DRV_ERR(device,
1078 "Invalid user generated ts <%d:0x%x>, "
1079 "less than last issued ts <%d:0x%x>\n",
1080 drawctxt->id, *timestamp, drawctxt->id,
1081 drawctxt->timestamp);
1082 return -ERANGE;
1083 }
1084 drawctxt->timestamp = *timestamp;
1085 } else
1086 drawctxt->timestamp++;
1087
Anshuman Dani87e24a92013-10-22 23:30:48 +05301088 flags &= KGSL_CMD_FLAGS_EOF;
1089
Richard Ruigrok28eabf62013-09-13 18:09:49 -06001090 /*
1091 * For some targets, we need to execute a dummy shader operation after a
1092 * power collapse
1093 */
1094
1095 if (test_and_clear_bit(ADRENO_DEVICE_PWRON, &adreno_dev->priv) &&
1096 test_bit(ADRENO_DEVICE_PWRON_FIXUP, &adreno_dev->priv))
1097 {
1098 flags |= KGSL_CMD_FLAGS_PWRON_FIXUP;
1099 }
1100
Carter Cooper9cf77b62013-05-28 17:04:26 -06001101 ret = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
Vijay Krishnamoorthye80c3462012-08-27 14:07:32 -07001102 drawctxt,
Richard Ruigrok28eabf62013-09-13 18:09:49 -06001103 flags,
Carter Cooper9cf77b62013-05-28 17:04:26 -06001104 &link[0], (cmds - link));
1105 if (ret)
1106 goto done;
1107
1108 if (drawctxt->flags & CTXT_FLAGS_PER_CONTEXT_TS)
1109 *timestamp = drawctxt->timestamp;
1110 else
1111 *timestamp = adreno_dev->ringbuffer.global_ts;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001112
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001113#ifdef CONFIG_MSM_KGSL_CFF_DUMP
1114 /*
1115 * insert wait for idle after every IB1
1116 * this is conservative but works reliably and is ok
1117 * even for performance simulations
1118 */
Jordan Crousea29a2e02012-08-14 09:09:23 -06001119 adreno_idle(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001120#endif
Tarun Karradeeecc02013-01-21 23:42:17 -08001121
Tarun Karrad20d71a2013-01-25 15:38:57 -08001122 /*
1123 * If context hung and recovered then return error so that the
1124 * application may handle it
1125 */
1126 if (drawctxt->flags & CTXT_FLAGS_GPU_HANG_FT) {
1127 drawctxt->flags &= ~CTXT_FLAGS_GPU_HANG_FT;
Jordan Crouse2d1d6622013-05-28 17:02:44 -06001128 ret = -EPROTO;
1129 }
1130
1131done:
Jordan Crouse7890e8e2013-04-18 14:24:02 -06001132 kgsl_trace_issueibcmds(device, context ? context->id : 0, ibdesc,
1133 numibs, *timestamp, flags, ret,
1134 drawctxt ? drawctxt->type : 0);
Jordan Crouse72bb70b2013-05-28 17:03:52 -06001135
Jordan Crouse2d1d6622013-05-28 17:02:44 -06001136 kfree(link);
1137 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001138}
1139
Shubhraprakash Das2a85f1f2012-06-04 17:01:39 -06001140static void _turn_preamble_on_for_ib_seq(struct adreno_ringbuffer *rb,
1141 unsigned int rb_rptr)
1142{
1143 unsigned int temp_rb_rptr = rb_rptr;
1144 unsigned int size = rb->buffer_desc.size;
1145 unsigned int val[2];
1146 int i = 0;
1147 bool check = false;
1148 bool cmd_start = false;
1149
1150 /* Go till the start of the ib sequence and turn on preamble */
1151 while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
1152 kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
1153 if (check && KGSL_START_OF_IB_IDENTIFIER == val[i]) {
1154 /* decrement i */
1155 i = (i + 1) % 2;
1156 if (val[i] == cp_nop_packet(4)) {
1157 temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
1158 temp_rb_rptr, size);
1159 kgsl_sharedmem_writel(&rb->buffer_desc,
1160 temp_rb_rptr, cp_nop_packet(1));
1161 }
Tarun Karrad20d71a2013-01-25 15:38:57 -08001162 KGSL_FT_INFO(rb->device,
Shubhraprakash Das2a85f1f2012-06-04 17:01:39 -06001163 "Turned preamble on at offset 0x%x\n",
1164 temp_rb_rptr / 4);
1165 break;
1166 }
1167 /* If you reach beginning of next command sequence then exit
1168 * First command encountered is the current one so don't break
1169 * on that. */
1170 if (KGSL_CMD_IDENTIFIER == val[i]) {
1171 if (cmd_start)
1172 break;
1173 cmd_start = true;
1174 }
1175
1176 i = (i + 1) % 2;
1177 if (1 == i)
1178 check = true;
1179 temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
1180 size);
1181 }
1182}
1183
Shubhraprakash Das460cc762013-01-16 16:57:46 -08001184void adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
Tarun Karrad20d71a2013-01-25 15:38:57 -08001185 struct adreno_ft_data *ft_data)
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001186{
Shubhraprakash Das460cc762013-01-16 16:57:46 -08001187 struct kgsl_device *device = rb->device;
Tarun Karrad20d71a2013-01-25 15:38:57 -08001188 unsigned int rb_rptr = ft_data->start_of_replay_cmds;
Tarun Karradeeecc02013-01-21 23:42:17 -08001189 unsigned int good_rb_idx = 0, bad_rb_idx = 0, temp_rb_idx = 0;
1190 unsigned int last_good_cmd_end_idx = 0, last_bad_cmd_end_idx = 0;
1191 unsigned int cmd_start_idx = 0;
1192 unsigned int val1 = 0;
1193 int copy_rb_contents = 0;
1194 unsigned int temp_rb_rptr;
1195 struct kgsl_context *k_ctxt;
1196 struct adreno_context *a_ctxt;
1197 unsigned int size = rb->buffer_desc.size;
Tarun Karrad20d71a2013-01-25 15:38:57 -08001198 unsigned int *temp_rb_buffer = ft_data->rb_buffer;
1199 int *rb_size = &ft_data->rb_size;
1200 unsigned int *bad_rb_buffer = ft_data->bad_rb_buffer;
1201 int *bad_rb_size = &ft_data->bad_rb_size;
1202 unsigned int *good_rb_buffer = ft_data->good_rb_buffer;
1203 int *good_rb_size = &ft_data->good_rb_size;
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001204
Shubhraprakash Das460cc762013-01-16 16:57:46 -08001205 /*
1206 * If the start index from where commands need to be copied is invalid
1207 * then no need to save off any commands
1208 */
Tarun Karrad20d71a2013-01-25 15:38:57 -08001209 if (0xFFFFFFFF == ft_data->start_of_replay_cmds)
Shubhraprakash Das460cc762013-01-16 16:57:46 -08001210 return;
1211
Jordan Crouse67db48d2013-05-28 17:04:17 -06001212 k_ctxt = kgsl_context_get(device, ft_data->context_id);
1213
Shubhraprakash Das460cc762013-01-16 16:57:46 -08001214 if (k_ctxt) {
1215 a_ctxt = k_ctxt->devctxt;
1216 if (a_ctxt->flags & CTXT_FLAGS_PREAMBLE)
1217 _turn_preamble_on_for_ib_seq(rb, rb_rptr);
Jordan Crouse67db48d2013-05-28 17:04:17 -06001218 kgsl_context_put(k_ctxt);
Shubhraprakash Das460cc762013-01-16 16:57:46 -08001219 }
1220 k_ctxt = NULL;
1221
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001222 /* Walk the rb from the context switch. Omit any commands
1223 * for an invalid context. */
1224 while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
1225 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
1226
1227 if (KGSL_CMD_IDENTIFIER == val1) {
1228 /* Start is the NOP dword that comes before
1229 * KGSL_CMD_IDENTIFIER */
Tarun Karradeeecc02013-01-21 23:42:17 -08001230 cmd_start_idx = temp_rb_idx - 1;
1231 if ((copy_rb_contents) && (good_rb_idx))
1232 last_good_cmd_end_idx = good_rb_idx - 1;
1233 if ((!copy_rb_contents) && (bad_rb_idx))
1234 last_bad_cmd_end_idx = bad_rb_idx - 1;
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001235 }
1236
1237 /* check for context switch indicator */
1238 if (val1 == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
1239 unsigned int temp_idx, val2;
1240 /* increment by 3 to get to the context_id */
1241 temp_rb_rptr = rb_rptr + (3 * sizeof(unsigned int)) %
1242 size;
1243 kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
1244 temp_rb_rptr);
1245
1246 /* if context switches to a context that did not cause
1247 * hang then start saving the rb contents as those
1248 * commands can be executed */
Jordan Crouse67db48d2013-05-28 17:04:17 -06001249 k_ctxt = kgsl_context_get(rb->device, val2);
1250
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001251 if (k_ctxt) {
1252 a_ctxt = k_ctxt->devctxt;
1253
1254 /* If we are changing to a good context and were not
1255 * copying commands then copy over commands to the good
1256 * context */
1257 if (!copy_rb_contents && ((k_ctxt &&
1258 !(a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) ||
1259 !k_ctxt)) {
1260 for (temp_idx = cmd_start_idx;
Tarun Karradeeecc02013-01-21 23:42:17 -08001261 temp_idx < temp_rb_idx;
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001262 temp_idx++)
Tarun Karradeeecc02013-01-21 23:42:17 -08001263 good_rb_buffer[good_rb_idx++] =
1264 temp_rb_buffer[temp_idx];
Tarun Karrad20d71a2013-01-25 15:38:57 -08001265 ft_data->last_valid_ctx_id = val2;
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001266 copy_rb_contents = 1;
Tarun Karradeeecc02013-01-21 23:42:17 -08001267 /* remove the good commands from bad buffer */
1268 bad_rb_idx = last_bad_cmd_end_idx;
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001269 } else if (copy_rb_contents && k_ctxt &&
1270 (a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) {
Tarun Karradeeecc02013-01-21 23:42:17 -08001271
1272 /* If we are changing back to a bad context
1273 * from good ctxt and were not copying commands
1274 * to bad ctxt then copy over commands to
1275 * the bad context */
1276 for (temp_idx = cmd_start_idx;
1277 temp_idx < temp_rb_idx;
1278 temp_idx++)
1279 bad_rb_buffer[bad_rb_idx++] =
1280 temp_rb_buffer[temp_idx];
1281 /* If we are changing to bad context then
1282 * remove the dwords we copied for this
1283 * sequence from the good buffer */
1284 good_rb_idx = last_good_cmd_end_idx;
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001285 copy_rb_contents = 0;
1286 }
1287 }
Jordan Crouse67db48d2013-05-28 17:04:17 -06001288 kgsl_context_put(k_ctxt);
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001289 }
1290
1291 if (copy_rb_contents)
Tarun Karradeeecc02013-01-21 23:42:17 -08001292 good_rb_buffer[good_rb_idx++] = val1;
1293 else
1294 bad_rb_buffer[bad_rb_idx++] = val1;
1295
1296 /* Copy both good and bad commands to temp buffer */
1297 temp_rb_buffer[temp_rb_idx++] = val1;
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001298
1299 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr, size);
1300 }
Tarun Karradeeecc02013-01-21 23:42:17 -08001301 *good_rb_size = good_rb_idx;
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001302 *bad_rb_size = bad_rb_idx;
Tarun Karradeeecc02013-01-21 23:42:17 -08001303 *rb_size = temp_rb_idx;
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001304}
1305
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001306void
1307adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
1308 int num_rb_contents)
1309{
1310 int i;
1311 unsigned int *ringcmds;
1312 unsigned int rcmd_gpu;
1313
1314 if (!num_rb_contents)
1315 return;
1316
1317 if (num_rb_contents > (rb->buffer_desc.size - rb->wptr)) {
1318 adreno_regwrite(rb->device, REG_CP_RB_RPTR, 0);
1319 rb->rptr = 0;
1320 BUG_ON(num_rb_contents > rb->buffer_desc.size);
1321 }
1322 ringcmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
1323 rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(unsigned int) * rb->wptr;
1324 for (i = 0; i < num_rb_contents; i++)
1325 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb_buff[i]);
1326 rb->wptr += num_rb_contents;
1327 adreno_ringbuffer_submit(rb);
1328}