blob: 9d50e6be39a71967143f0afa265508c14b4f9e65 [file] [log] [blame]
Carter Cooper740f6742013-01-03 16:19:23 -07001/* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/firmware.h>
14#include <linux/slab.h>
15#include <linux/sched.h>
16#include <linux/log2.h>
17
18#include "kgsl.h"
19#include "kgsl_sharedmem.h"
20#include "kgsl_cffdump.h"
Jordan Crouse72bb70b2013-05-28 17:03:52 -060021#include "kgsl_trace.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070022
23#include "adreno.h"
24#include "adreno_pm4types.h"
25#include "adreno_ringbuffer.h"
26
Jeremy Gebbeneebc4612011-08-31 10:15:21 -070027#include "a2xx_reg.h"
Jordan Crouseb4d31bd2012-02-01 22:11:12 -070028#include "a3xx_reg.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030#define GSL_RB_NOP_SIZEDWORDS 2
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031
Jordan Crousef50bfdc2012-11-01 13:48:35 -060032/*
33 * CP DEBUG settings for all cores:
34 * DYNAMIC_CLK_DISABLE [27] - turn off the dynamic clock control
35 * PROG_END_PTR_ENABLE [25] - Allow 128 bit writes to the VBIF
36 */
37
38#define CP_DEBUG_DEFAULT ((1 << 27) | (1 << 25))
39
Jordan Crouseb4d31bd2012-02-01 22:11:12 -070040void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041{
42 BUG_ON(rb->wptr == 0);
43
Lucille Sylvester958dc942011-09-06 18:19:49 -060044 /* Let the pwrscale policy know that new commands have
45 been submitted. */
46 kgsl_pwrscale_busy(rb->device);
47
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048 /*synchronize memory before informing the hardware of the
49 *new commands.
50 */
51 mb();
52
53 adreno_regwrite(rb->device, REG_CP_RB_WPTR, rb->wptr);
54}
55
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -070056static int
57adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb,
58 struct adreno_context *context,
59 unsigned int numcmds, int wptr_ahead)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070060{
61 int nopcount;
62 unsigned int freecmds;
63 unsigned int *cmds;
64 uint cmds_gpu;
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -060065 unsigned long wait_time;
Jordan Crouse21f75a02012-08-09 15:08:59 -060066 unsigned long wait_timeout = msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
Tarun Karra3335f142012-06-19 14:11:48 -070067 unsigned long wait_time_part;
Tarun Karra696f89e2013-01-27 21:31:40 -080068 unsigned int prev_reg_val[ft_detect_regs_count];
Tarun Karra3335f142012-06-19 14:11:48 -070069
70 memset(prev_reg_val, 0, sizeof(prev_reg_val));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071
72 /* if wptr ahead, fill the remaining with NOPs */
73 if (wptr_ahead) {
74 /* -1 for header */
75 nopcount = rb->sizedwords - rb->wptr - 1;
76
77 cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
78 cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*rb->wptr;
79
Jordan Crouse084427d2011-07-28 08:37:58 -060080 GSL_RB_WRITE(cmds, cmds_gpu, cp_nop_packet(nopcount));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070081
82 /* Make sure that rptr is not 0 before submitting
83 * commands at the end of ringbuffer. We do not
84 * want the rptr and wptr to become equal when
85 * the ringbuffer is not empty */
86 do {
87 GSL_RB_GET_READPTR(rb, &rb->rptr);
88 } while (!rb->rptr);
89
90 rb->wptr++;
91
92 adreno_ringbuffer_submit(rb);
93
94 rb->wptr = 0;
95 }
96
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -060097 wait_time = jiffies + wait_timeout;
Jordan Crouse21f75a02012-08-09 15:08:59 -060098 wait_time_part = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070099 /* wait for space in ringbuffer */
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600100 while (1) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101 GSL_RB_GET_READPTR(rb, &rb->rptr);
102
103 freecmds = rb->rptr - rb->wptr;
104
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600105 if (freecmds == 0 || freecmds > numcmds)
106 break;
107
Tarun Karra3335f142012-06-19 14:11:48 -0700108 /* Dont wait for timeout, detect hang faster.
109 */
110 if (time_after(jiffies, wait_time_part)) {
111 wait_time_part = jiffies +
Jordan Crouse21f75a02012-08-09 15:08:59 -0600112 msecs_to_jiffies(KGSL_TIMEOUT_PART);
Tarun Karra696f89e2013-01-27 21:31:40 -0800113 if ((adreno_ft_detect(rb->device,
Tarun Karra3335f142012-06-19 14:11:48 -0700114 prev_reg_val))){
115 KGSL_DRV_ERR(rb->device,
116 "Hang detected while waiting for freespace in"
117 "ringbuffer rptr: 0x%x, wptr: 0x%x\n",
118 rb->rptr, rb->wptr);
119 goto err;
120 }
121 }
122
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600123 if (time_after(jiffies, wait_time)) {
124 KGSL_DRV_ERR(rb->device,
125 "Timed out while waiting for freespace in ringbuffer "
126 "rptr: 0x%x, wptr: 0x%x\n", rb->rptr, rb->wptr);
Tarun Karra3335f142012-06-19 14:11:48 -0700127 goto err;
128 }
129
Wei Zou50ec3372012-07-17 15:46:52 -0700130 continue;
131
Tarun Karra3335f142012-06-19 14:11:48 -0700132err:
Tarun Karrad20d71a2013-01-25 15:38:57 -0800133 if (!adreno_dump_and_exec_ft(rb->device)) {
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -0700134 if (context && context->flags & CTXT_FLAGS_GPU_HANG) {
135 KGSL_CTXT_WARN(rb->device,
136 "Context %p caused a gpu hang. Will not accept commands for context %d\n",
137 context, context->id);
138 return -EDEADLK;
139 }
140 wait_time = jiffies + wait_timeout;
141 } else {
Tarun Karrad20d71a2013-01-25 15:38:57 -0800142 /* GPU is hung and fault tolerance failed */
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -0700143 BUG();
144 }
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600145 }
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -0700146 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700147}
148
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700149unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -0700150 struct adreno_context *context,
151 unsigned int numcmds)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700152{
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -0700153 unsigned int *ptr = NULL;
154 int ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700155 BUG_ON(numcmds >= rb->sizedwords);
156
157 GSL_RB_GET_READPTR(rb, &rb->rptr);
158 /* check for available space */
159 if (rb->wptr >= rb->rptr) {
160 /* wptr ahead or equal to rptr */
161 /* reserve dwords for nop packet */
162 if ((rb->wptr + numcmds) > (rb->sizedwords -
163 GSL_RB_NOP_SIZEDWORDS))
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -0700164 ret = adreno_ringbuffer_waitspace(rb, context,
165 numcmds, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700166 } else {
167 /* wptr behind rptr */
168 if ((rb->wptr + numcmds) >= rb->rptr)
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -0700169 ret = adreno_ringbuffer_waitspace(rb, context,
170 numcmds, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700171 /* check for remaining space */
172 /* reserve dwords for nop packet */
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -0700173 if (!ret && (rb->wptr + numcmds) > (rb->sizedwords -
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174 GSL_RB_NOP_SIZEDWORDS))
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -0700175 ret = adreno_ringbuffer_waitspace(rb, context,
176 numcmds, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177 }
178
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -0700179 if (!ret) {
180 ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
181 rb->wptr += numcmds;
182 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183
184 return ptr;
185}
186
187static int _load_firmware(struct kgsl_device *device, const char *fwfile,
188 void **data, int *len)
189{
190 const struct firmware *fw = NULL;
191 int ret;
192
193 ret = request_firmware(&fw, fwfile, device->dev);
194
195 if (ret) {
196 KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n",
197 fwfile, ret);
198 return ret;
199 }
200
201 *data = kmalloc(fw->size, GFP_KERNEL);
202
203 if (*data) {
204 memcpy(*data, fw->data, fw->size);
205 *len = fw->size;
206 } else
207 KGSL_MEM_ERR(device, "kmalloc(%d) failed\n", fw->size);
208
209 release_firmware(fw);
210 return (*data != NULL) ? 0 : -ENOMEM;
211}
212
Tarun Karra9c070822012-11-27 16:43:51 -0700213int adreno_ringbuffer_read_pm4_ucode(struct kgsl_device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700214{
215 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Tarun Karra9c070822012-11-27 16:43:51 -0700216 int ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218 if (adreno_dev->pm4_fw == NULL) {
219 int len;
Jordan Crouse505df9c2011-07-28 08:37:59 -0600220 void *ptr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700221
Jordan Crouse505df9c2011-07-28 08:37:59 -0600222 ret = _load_firmware(device, adreno_dev->pm4_fwfile,
223 &ptr, &len);
224
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225 if (ret)
226 goto err;
227
228 /* PM4 size is 3 dword aligned plus 1 dword of version */
229 if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) {
230 KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
231 ret = -EINVAL;
Jeremy Gebben79acee62011-08-08 16:44:07 -0600232 kfree(ptr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700233 goto err;
234 }
235
236 adreno_dev->pm4_fw_size = len / sizeof(uint32_t);
237 adreno_dev->pm4_fw = ptr;
Tarun Karra9c070822012-11-27 16:43:51 -0700238 adreno_dev->pm4_fw_version = adreno_dev->pm4_fw[1];
239 }
240
241err:
242 return ret;
243}
244
245
246int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device)
247{
248 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
249 int i;
250
251 if (adreno_dev->pm4_fw == NULL) {
252 int ret = adreno_ringbuffer_read_pm4_ucode(device);
253 if (ret)
254 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700255 }
256
257 KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n",
Tarun Karra9c070822012-11-27 16:43:51 -0700258 adreno_dev->pm4_fw_version);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259
Jordan Crousef50bfdc2012-11-01 13:48:35 -0600260 adreno_regwrite(device, REG_CP_DEBUG, CP_DEBUG_DEFAULT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700261 adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0);
262 for (i = 1; i < adreno_dev->pm4_fw_size; i++)
263 adreno_regwrite(device, REG_CP_ME_RAM_DATA,
Tarun Karra9c070822012-11-27 16:43:51 -0700264 adreno_dev->pm4_fw[i]);
265
266 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700267}
268
Tarun Karra9c070822012-11-27 16:43:51 -0700269int adreno_ringbuffer_read_pfp_ucode(struct kgsl_device *device)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700270{
271 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Tarun Karra9c070822012-11-27 16:43:51 -0700272 int ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700273
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700274 if (adreno_dev->pfp_fw == NULL) {
275 int len;
Jordan Crouse505df9c2011-07-28 08:37:59 -0600276 void *ptr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700277
Jordan Crouse505df9c2011-07-28 08:37:59 -0600278 ret = _load_firmware(device, adreno_dev->pfp_fwfile,
279 &ptr, &len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700280 if (ret)
281 goto err;
282
283 /* PFP size shold be dword aligned */
284 if (len % sizeof(uint32_t) != 0) {
285 KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
286 ret = -EINVAL;
Jeremy Gebben79acee62011-08-08 16:44:07 -0600287 kfree(ptr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288 goto err;
289 }
290
291 adreno_dev->pfp_fw_size = len / sizeof(uint32_t);
292 adreno_dev->pfp_fw = ptr;
Tarun Karra9c070822012-11-27 16:43:51 -0700293 adreno_dev->pfp_fw_version = adreno_dev->pfp_fw[5];
294 }
295
296err:
297 return ret;
298}
299
300int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device)
301{
302 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
303 int i;
304
305 if (adreno_dev->pfp_fw == NULL) {
306 int ret = adreno_ringbuffer_read_pfp_ucode(device);
307 if (ret)
308 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700309 }
310
311 KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n",
Tarun Karra9c070822012-11-27 16:43:51 -0700312 adreno_dev->pfp_fw_version);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700314 adreno_regwrite(device, adreno_dev->gpudev->reg_cp_pfp_ucode_addr, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700315 for (i = 1; i < adreno_dev->pfp_fw_size; i++)
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700316 adreno_regwrite(device,
Tarun Karra9c070822012-11-27 16:43:51 -0700317 adreno_dev->gpudev->reg_cp_pfp_ucode_data,
318 adreno_dev->pfp_fw[i]);
319
320 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700321}
322
323int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
324{
325 int status;
326 /*cp_rb_cntl_u cp_rb_cntl; */
327 union reg_cp_rb_cntl cp_rb_cntl;
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700328 unsigned int rb_cntl;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700329 struct kgsl_device *device = rb->device;
Jeremy Gebbenddf6b572011-09-09 13:39:49 -0700330 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700331
332 if (rb->flags & KGSL_FLAGS_STARTED)
333 return 0;
334
Carter Coopercb3e8eb2012-04-11 09:39:40 -0600335 if (init_ram)
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700336 rb->timestamp[KGSL_MEMSTORE_GLOBAL] = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700337
338 kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
339 sizeof(struct kgsl_rbmemptrs));
340
341 kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
342 (rb->sizedwords << 2));
343
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700344 if (adreno_is_a2xx(adreno_dev)) {
345 adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
346 (rb->memptrs_desc.gpuaddr
347 + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700348
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700349 /* setup WPTR delay */
350 adreno_regwrite(device, REG_CP_RB_WPTR_DELAY,
351 0 /*0x70000010 */);
352 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700353
354 /*setup REG_CP_RB_CNTL */
355 adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
356 cp_rb_cntl.val = rb_cntl;
357
358 /*
359 * The size of the ringbuffer in the hardware is the log2
360 * representation of the size in quadwords (sizedwords / 2)
361 */
362 cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);
363
364 /*
365 * Specify the quadwords to read before updating mem RPTR.
366 * Like above, pass the log2 representation of the blocksize
367 * in quadwords.
368 */
369 cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);
370
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700371 if (adreno_is_a2xx(adreno_dev)) {
372 /* WPTR polling */
373 cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN;
374 }
375
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700376 /* mem RPTR writebacks */
377 cp_rb_cntl.f.rb_no_update = GSL_RB_CNTL_NO_UPDATE;
378
379 adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);
380
381 adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);
382
383 adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
384 rb->memptrs_desc.gpuaddr +
385 GSL_RB_MEMPTRS_RPTR_OFFSET);
386
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700387 if (adreno_is_a3xx(adreno_dev)) {
388 /* enable access protection to privileged registers */
389 adreno_regwrite(device, A3XX_CP_PROTECT_CTRL, 0x00000007);
390
391 /* RBBM registers */
392 adreno_regwrite(device, A3XX_CP_PROTECT_REG_0, 0x63000040);
393 adreno_regwrite(device, A3XX_CP_PROTECT_REG_1, 0x62000080);
394 adreno_regwrite(device, A3XX_CP_PROTECT_REG_2, 0x600000CC);
395 adreno_regwrite(device, A3XX_CP_PROTECT_REG_3, 0x60000108);
396 adreno_regwrite(device, A3XX_CP_PROTECT_REG_4, 0x64000140);
397 adreno_regwrite(device, A3XX_CP_PROTECT_REG_5, 0x66000400);
398
399 /* CP registers */
400 adreno_regwrite(device, A3XX_CP_PROTECT_REG_6, 0x65000700);
401 adreno_regwrite(device, A3XX_CP_PROTECT_REG_7, 0x610007D8);
402 adreno_regwrite(device, A3XX_CP_PROTECT_REG_8, 0x620007E0);
403 adreno_regwrite(device, A3XX_CP_PROTECT_REG_9, 0x61001178);
404 adreno_regwrite(device, A3XX_CP_PROTECT_REG_A, 0x64001180);
405
406 /* RB registers */
407 adreno_regwrite(device, A3XX_CP_PROTECT_REG_B, 0x60003300);
408
409 /* VBIF registers */
410 adreno_regwrite(device, A3XX_CP_PROTECT_REG_C, 0x6B00C000);
411 }
412
413 if (adreno_is_a2xx(adreno_dev)) {
414 /* explicitly clear all cp interrupts */
415 adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
416 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700417
418 /* setup scratch/timestamp */
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700419 adreno_regwrite(device, REG_SCRATCH_ADDR, device->memstore.gpuaddr +
420 KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
421 soptimestamp));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700422
423 adreno_regwrite(device, REG_SCRATCH_UMSK,
424 GSL_RB_MEMPTRS_SCRATCH_MASK);
425
426 /* load the CP ucode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427 status = adreno_ringbuffer_load_pm4_ucode(device);
428 if (status != 0)
429 return status;
430
431 /* load the prefetch parser ucode */
432 status = adreno_ringbuffer_load_pfp_ucode(device);
433 if (status != 0)
434 return status;
435
Kevin Matlageff806df2012-05-07 18:13:21 -0600436 /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
Kevin Matlagee8d35862012-04-26 12:58:15 -0600437 if (adreno_is_a305(adreno_dev) || adreno_is_a320(adreno_dev))
Kevin Matlageff806df2012-05-07 18:13:21 -0600438 adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000E0602);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439
440 rb->rptr = 0;
441 rb->wptr = 0;
442
443 /* clear ME_HALT to start micro engine */
444 adreno_regwrite(device, REG_CP_ME_CNTL, 0);
445
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700446 /* ME init is GPU specific, so jump into the sub-function */
447 adreno_dev->gpudev->rb_init(adreno_dev, rb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448
449 /* idle device to validate ME INIT */
Jordan Crousea29a2e02012-08-14 09:09:23 -0600450 status = adreno_idle(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700451
452 if (status == 0)
453 rb->flags |= KGSL_FLAGS_STARTED;
454
455 return status;
456}
457
Carter Cooper6dd94c82011-10-13 14:43:53 -0600458void adreno_ringbuffer_stop(struct adreno_ringbuffer *rb)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700459{
Rajeev Kulkarnibf7a3822012-08-14 21:21:14 +0530460 struct kgsl_device *device = rb->device;
461 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
462
463 if (rb->flags & KGSL_FLAGS_STARTED) {
464 if (adreno_is_a200(adreno_dev))
465 adreno_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000);
466
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467 rb->flags &= ~KGSL_FLAGS_STARTED;
Rajeev Kulkarnibf7a3822012-08-14 21:21:14 +0530468 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469}
470
471int adreno_ringbuffer_init(struct kgsl_device *device)
472{
473 int status;
474 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
475 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
476
477 rb->device = device;
478 /*
479 * It is silly to convert this to words and then back to bytes
480 * immediately below, but most of the rest of the code deals
481 * in words, so we might as well only do the math once
482 */
483 rb->sizedwords = KGSL_RB_SIZE >> 2;
484
485 /* allocate memory for ringbuffer */
486 status = kgsl_allocate_contiguous(&rb->buffer_desc,
487 (rb->sizedwords << 2));
488
489 if (status != 0) {
490 adreno_ringbuffer_close(rb);
491 return status;
492 }
493
494 /* allocate memory for polling and timestamps */
495 /* This really can be at 4 byte alignment boundry but for using MMU
496 * we need to make it at page boundary */
497 status = kgsl_allocate_contiguous(&rb->memptrs_desc,
498 sizeof(struct kgsl_rbmemptrs));
499
500 if (status != 0) {
501 adreno_ringbuffer_close(rb);
502 return status;
503 }
504
505 /* overlay structure on memptrs memory */
506 rb->memptrs = (struct kgsl_rbmemptrs *) rb->memptrs_desc.hostptr;
507
508 return 0;
509}
510
Carter Cooper6dd94c82011-10-13 14:43:53 -0600511void adreno_ringbuffer_close(struct adreno_ringbuffer *rb)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700512{
513 struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
514
515 kgsl_sharedmem_free(&rb->buffer_desc);
516 kgsl_sharedmem_free(&rb->memptrs_desc);
517
518 kfree(adreno_dev->pfp_fw);
519 kfree(adreno_dev->pm4_fw);
520
521 adreno_dev->pfp_fw = NULL;
522 adreno_dev->pm4_fw = NULL;
523
524 memset(rb, 0, sizeof(struct adreno_ringbuffer));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700525}
526
527static uint32_t
528adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700529 struct adreno_context *context,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700530 unsigned int flags, unsigned int *cmds,
Vijay Krishnamoorthye80c3462012-08-27 14:07:32 -0700531 int sizedwords, uint32_t timestamp)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700532{
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700533 struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700534 unsigned int *ringcmds;
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700535 unsigned int total_sizedwords = sizedwords;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536 unsigned int i;
537 unsigned int rcmd_gpu;
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700538 unsigned int context_id = KGSL_MEMSTORE_GLOBAL;
539 unsigned int gpuaddr = rb->device->memstore.gpuaddr;
540
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600541 /*
542 * if the context was not created with per context timestamp
543 * support, we must use the global timestamp since issueibcmds
544 * will be returning that one.
545 */
Carter Cooperedbe4032012-11-20 11:09:38 -0700546 if (context && context->flags & CTXT_FLAGS_PER_CONTEXT_TS)
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600547 context_id = context->id;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548
Carter Cooper4e8b4022012-11-30 11:34:18 -0700549 if ((context && context->flags & CTXT_FLAGS_USER_GENERATED_TS) &&
Vijay Krishnamoorthye80c3462012-08-27 14:07:32 -0700550 (!(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))) {
551 if (timestamp_cmp(rb->timestamp[context_id],
552 timestamp) >= 0) {
553 KGSL_DRV_ERR(rb->device,
554 "Invalid user generated ts <%d:0x%x>, "
555 "less than last issued ts <%d:0x%x>\n",
556 context_id, timestamp, context_id,
557 rb->timestamp[context_id]);
558 return -ERANGE;
559 }
560 }
561
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700562 /* reserve space to temporarily turn off protected mode
563 * error checking if needed
564 */
565 total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
Shubhraprakash Dasc3ad5802012-05-30 18:10:06 -0600566 /* 2 dwords to store the start of command sequence */
567 total_sizedwords += 2;
Jordan Crouseef02fc02013-03-05 11:19:31 -0700568
Carter Cooper728bd152013-05-28 17:00:06 -0600569 /* internal ib command identifier for the ringbuffer */
570 total_sizedwords += (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) ? 2 : 0;
571
Jordan Crouseef02fc02013-03-05 11:19:31 -0700572 /* Add CP_COND_EXEC commands to generate CP_INTERRUPT */
573 total_sizedwords += context ? 13 : 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700574
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700575 if (adreno_is_a3xx(adreno_dev))
576 total_sizedwords += 7;
577
Anshuman Danica4e1a72012-11-06 22:19:50 +0530578 if (adreno_is_a2xx(adreno_dev))
579 total_sizedwords += 2; /* CP_WAIT_FOR_IDLE */
580
Tarun Karrad20d71a2013-01-25 15:38:57 -0800581 total_sizedwords += 2; /* scratchpad ts for fault tolerance */
Anshuman Dani9ce83972013-05-28 17:01:10 -0600582
Vijay Krishnamoorthye80c3462012-08-27 14:07:32 -0700583 if (context && context->flags & CTXT_FLAGS_PER_CONTEXT_TS &&
584 !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700585 total_sizedwords += 3; /* sop timestamp */
586 total_sizedwords += 4; /* eop timestamp */
Rajesh Kemisettic5699302012-04-21 21:09:05 +0530587 total_sizedwords += 3; /* global timestamp without cache
588 * flush for non-zero context */
589 } else {
Tarun Karrad20d71a2013-01-25 15:38:57 -0800590 total_sizedwords += 4; /* global timestamp for fault tolerance*/
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700591 }
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700592
Tarun Karra6479d072013-03-27 19:37:55 -0700593 if (flags & KGSL_CMD_FLAGS_EOF)
594 total_sizedwords += 2;
595
Shubhraprakash Dasd316ff82012-08-02 12:43:48 -0700596 ringcmds = adreno_ringbuffer_allocspace(rb, context, total_sizedwords);
597 if (!ringcmds) {
598 /*
599 * We could not allocate space in ringbuffer, just return the
600 * last timestamp
601 */
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600602 return rb->timestamp[context_id];
603 }
604
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700605 rcmd_gpu = rb->buffer_desc.gpuaddr
606 + sizeof(uint)*(rb->wptr-total_sizedwords);
607
Shubhraprakash Dasc3ad5802012-05-30 18:10:06 -0600608 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
609 GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
610
Carter Cooper728bd152013-05-28 17:00:06 -0600611 if (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) {
612 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
613 GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_INTERNAL_IDENTIFIER);
614 }
615
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700616 if (flags & KGSL_CMD_FLAGS_PMODE) {
617 /* disable protected mode error checking */
618 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600619 cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700620 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
621 }
622
623 for (i = 0; i < sizedwords; i++) {
624 GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
625 cmds++;
626 }
627
628 if (flags & KGSL_CMD_FLAGS_PMODE) {
629 /* re-enable protected mode error checking */
630 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600631 cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700632 GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
633 }
634
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700635 /* always increment the global timestamp. once. */
636 rb->timestamp[KGSL_MEMSTORE_GLOBAL]++;
Carter Cooper7ffaba62012-05-24 13:59:53 -0600637
Vijay Krishnamoorthye80c3462012-08-27 14:07:32 -0700638 /* Do not update context's timestamp for internal submissions */
639 if (context && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700640 if (context_id == KGSL_MEMSTORE_GLOBAL)
Carter Cooper7ffaba62012-05-24 13:59:53 -0600641 rb->timestamp[context->id] =
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700642 rb->timestamp[KGSL_MEMSTORE_GLOBAL];
Vijay Krishnamoorthye80c3462012-08-27 14:07:32 -0700643 else if (context->flags & CTXT_FLAGS_USER_GENERATED_TS)
644 rb->timestamp[context_id] = timestamp;
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700645 else
646 rb->timestamp[context_id]++;
647 }
648 timestamp = rb->timestamp[context_id];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700649
Anshuman Danica4e1a72012-11-06 22:19:50 +0530650 /* HW Workaround for MMU Page fault
651 * due to memory getting free early before
652 * GPU completes it.
653 */
654 if (adreno_is_a2xx(adreno_dev)) {
655 GSL_RB_WRITE(ringcmds, rcmd_gpu,
656 cp_type3_packet(CP_WAIT_FOR_IDLE, 1));
657 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x00);
658 }
659
Tarun Karrad20d71a2013-01-25 15:38:57 -0800660 /* scratchpad ts for fault tolerance */
Jordan Crouse084427d2011-07-28 08:37:58 -0600661 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type0_packet(REG_CP_TIMESTAMP, 1));
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700662 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700663
664 if (adreno_is_a3xx(adreno_dev)) {
665 /*
666 * FLush HLSQ lazy updates to make sure there are no
667 * rsources pending for indirect loads after the timestamp
668 */
669
670 GSL_RB_WRITE(ringcmds, rcmd_gpu,
671 cp_type3_packet(CP_EVENT_WRITE, 1));
672 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x07); /* HLSQ_FLUSH */
673 GSL_RB_WRITE(ringcmds, rcmd_gpu,
674 cp_type3_packet(CP_WAIT_FOR_IDLE, 1));
675 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x00);
676 }
677
Vijay Krishnamoorthye80c3462012-08-27 14:07:32 -0700678 if (context && context->flags & CTXT_FLAGS_PER_CONTEXT_TS
679 && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700680 /* start-of-pipeline timestamp */
681 GSL_RB_WRITE(ringcmds, rcmd_gpu,
682 cp_type3_packet(CP_MEM_WRITE, 2));
683 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
Carter Cooper7ffaba62012-05-24 13:59:53 -0600684 KGSL_MEMSTORE_OFFSET(context_id, soptimestamp)));
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700685 GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
686
687 /* end-of-pipeline timestamp */
688 GSL_RB_WRITE(ringcmds, rcmd_gpu,
689 cp_type3_packet(CP_EVENT_WRITE, 3));
690 GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
691 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
Carter Cooper7ffaba62012-05-24 13:59:53 -0600692 KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp)));
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700693 GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700694
Rajesh Kemisettic5699302012-04-21 21:09:05 +0530695 GSL_RB_WRITE(ringcmds, rcmd_gpu,
696 cp_type3_packet(CP_MEM_WRITE, 2));
697 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
Carter Cooper7ffaba62012-05-24 13:59:53 -0600698 KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
699 eoptimestamp)));
Rajesh Kemisettic5699302012-04-21 21:09:05 +0530700 GSL_RB_WRITE(ringcmds, rcmd_gpu,
701 rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
702 } else {
703 GSL_RB_WRITE(ringcmds, rcmd_gpu,
704 cp_type3_packet(CP_EVENT_WRITE, 3));
705 GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
706 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
Vijay Krishnamoorthye80c3462012-08-27 14:07:32 -0700707 KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
708 eoptimestamp)));
709 GSL_RB_WRITE(ringcmds, rcmd_gpu,
710 rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
Rajesh Kemisettic5699302012-04-21 21:09:05 +0530711 }
Rajeev Kulkarnid98d6562013-01-02 16:10:56 -0800712 if (context) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700713 /* Conditional execution based on memory values */
714 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600715 cp_type3_packet(CP_COND_EXEC, 4));
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700716 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
717 KGSL_MEMSTORE_OFFSET(
718 context_id, ts_cmp_enable)) >> 2);
719 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
720 KGSL_MEMSTORE_OFFSET(
721 context_id, ref_wait_ts)) >> 2);
722 GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700723 /* # of conditional command DWORDs */
Jordan Crouseef02fc02013-03-05 11:19:31 -0700724 GSL_RB_WRITE(ringcmds, rcmd_gpu, 8);
725
726 /* Clear the ts_cmp_enable for the context */
727 GSL_RB_WRITE(ringcmds, rcmd_gpu,
728 cp_type3_packet(CP_MEM_WRITE, 2));
729 GSL_RB_WRITE(ringcmds, rcmd_gpu, gpuaddr +
730 KGSL_MEMSTORE_OFFSET(
731 context_id, ts_cmp_enable));
732 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x0);
733
734 /* Clear the ts_cmp_enable for the global timestamp */
735 GSL_RB_WRITE(ringcmds, rcmd_gpu,
736 cp_type3_packet(CP_MEM_WRITE, 2));
737 GSL_RB_WRITE(ringcmds, rcmd_gpu, gpuaddr +
738 KGSL_MEMSTORE_OFFSET(
739 KGSL_MEMSTORE_GLOBAL, ts_cmp_enable));
740 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x0);
741
742 /* Trigger the interrupt */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700743 GSL_RB_WRITE(ringcmds, rcmd_gpu,
Jordan Crouse084427d2011-07-28 08:37:58 -0600744 cp_type3_packet(CP_INTERRUPT, 1));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700745 GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
746 }
747
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700748 if (adreno_is_a3xx(adreno_dev)) {
749 /* Dummy set-constant to trigger context rollover */
750 GSL_RB_WRITE(ringcmds, rcmd_gpu,
751 cp_type3_packet(CP_SET_CONSTANT, 2));
752 GSL_RB_WRITE(ringcmds, rcmd_gpu,
753 (0x4<<16)|(A3XX_HLSQ_CL_KERNEL_GROUP_X_REG - 0x2000));
754 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
755 }
756
Tarun Karradeeecc02013-01-21 23:42:17 -0800757 if (flags & KGSL_CMD_FLAGS_EOF) {
758 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
759 GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_END_OF_FRAME_IDENTIFIER);
760 }
761
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700762 adreno_ringbuffer_submit(rb);
763
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700764 return timestamp;
765}
766
Shubhraprakash Dascb068072012-06-07 17:52:41 -0600767unsigned int
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700768adreno_ringbuffer_issuecmds(struct kgsl_device *device,
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600769 struct adreno_context *drawctxt,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700770 unsigned int flags,
771 unsigned int *cmds,
772 int sizedwords)
773{
774 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
775 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
776
777 if (device->state & KGSL_STATE_HUNG)
Shubhraprakash Dascb068072012-06-07 17:52:41 -0600778 return kgsl_readtimestamp(device, KGSL_MEMSTORE_GLOBAL,
779 KGSL_TIMESTAMP_RETIRED);
Vijay Krishnamoorthye80c3462012-08-27 14:07:32 -0700780
781 flags |= KGSL_CMD_FLAGS_INTERNAL_ISSUE;
782
783 return adreno_ringbuffer_addcmds(rb, drawctxt, flags, cmds,
784 sizedwords, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700785}
786
Jeremy Gebbend0ab6ad2012-04-06 11:13:35 -0600787static bool _parse_ibs(struct kgsl_device_private *dev_priv, uint gpuaddr,
788 int sizedwords);
789
790static bool
791_handle_type3(struct kgsl_device_private *dev_priv, uint *hostaddr)
792{
793 unsigned int opcode = cp_type3_opcode(*hostaddr);
794 switch (opcode) {
795 case CP_INDIRECT_BUFFER_PFD:
796 case CP_INDIRECT_BUFFER_PFE:
797 case CP_COND_INDIRECT_BUFFER_PFE:
798 case CP_COND_INDIRECT_BUFFER_PFD:
799 return _parse_ibs(dev_priv, hostaddr[1], hostaddr[2]);
800 case CP_NOP:
801 case CP_WAIT_FOR_IDLE:
802 case CP_WAIT_REG_MEM:
803 case CP_WAIT_REG_EQ:
804 case CP_WAT_REG_GTE:
805 case CP_WAIT_UNTIL_READ:
806 case CP_WAIT_IB_PFD_COMPLETE:
807 case CP_REG_RMW:
808 case CP_REG_TO_MEM:
809 case CP_MEM_WRITE:
810 case CP_MEM_WRITE_CNTR:
811 case CP_COND_EXEC:
812 case CP_COND_WRITE:
813 case CP_EVENT_WRITE:
814 case CP_EVENT_WRITE_SHD:
815 case CP_EVENT_WRITE_CFL:
816 case CP_EVENT_WRITE_ZPD:
817 case CP_DRAW_INDX:
818 case CP_DRAW_INDX_2:
819 case CP_DRAW_INDX_BIN:
820 case CP_DRAW_INDX_2_BIN:
821 case CP_VIZ_QUERY:
822 case CP_SET_STATE:
823 case CP_SET_CONSTANT:
824 case CP_IM_LOAD:
825 case CP_IM_LOAD_IMMEDIATE:
826 case CP_LOAD_CONSTANT_CONTEXT:
827 case CP_INVALIDATE_STATE:
828 case CP_SET_SHADER_BASES:
829 case CP_SET_BIN_MASK:
830 case CP_SET_BIN_SELECT:
831 case CP_SET_BIN_BASE_OFFSET:
832 case CP_SET_BIN_DATA:
833 case CP_CONTEXT_UPDATE:
834 case CP_INTERRUPT:
835 case CP_IM_STORE:
836 case CP_LOAD_STATE:
837 break;
838 /* these shouldn't come from userspace */
839 case CP_ME_INIT:
840 case CP_SET_PROTECTED_MODE:
841 default:
842 KGSL_CMD_ERR(dev_priv->device, "bad CP opcode %0x\n", opcode);
843 return false;
844 break;
845 }
846
847 return true;
848}
849
850static bool
851_handle_type0(struct kgsl_device_private *dev_priv, uint *hostaddr)
852{
853 unsigned int reg = type0_pkt_offset(*hostaddr);
854 unsigned int cnt = type0_pkt_size(*hostaddr);
855 if (reg < 0x0192 || (reg + cnt) >= 0x8000) {
856 KGSL_CMD_ERR(dev_priv->device, "bad type0 reg: 0x%0x cnt: %d\n",
857 reg, cnt);
858 return false;
859 }
860 return true;
861}
862
863/*
864 * Traverse IBs and dump them to test vector. Detect swap by inspecting
865 * register writes, keeping note of the current state, and dump
866 * framebuffer config to test vector
867 */
868static bool _parse_ibs(struct kgsl_device_private *dev_priv,
869 uint gpuaddr, int sizedwords)
870{
871 static uint level; /* recursion level */
872 bool ret = false;
873 uint *hostaddr, *hoststart;
874 int dwords_left = sizedwords; /* dwords left in the current command
875 buffer */
876 struct kgsl_mem_entry *entry;
877
878 spin_lock(&dev_priv->process_priv->mem_lock);
879 entry = kgsl_sharedmem_find_region(dev_priv->process_priv,
880 gpuaddr, sizedwords * sizeof(uint));
881 spin_unlock(&dev_priv->process_priv->mem_lock);
882 if (entry == NULL) {
883 KGSL_CMD_ERR(dev_priv->device,
884 "no mapping for gpuaddr: 0x%08x\n", gpuaddr);
885 return false;
886 }
887
888 hostaddr = (uint *)kgsl_gpuaddr_to_vaddr(&entry->memdesc, gpuaddr);
889 if (hostaddr == NULL) {
890 KGSL_CMD_ERR(dev_priv->device,
891 "no mapping for gpuaddr: 0x%08x\n", gpuaddr);
892 return false;
893 }
894
895 hoststart = hostaddr;
896
897 level++;
898
899 KGSL_CMD_INFO(dev_priv->device, "ib: gpuaddr:0x%08x, wc:%d, hptr:%p\n",
900 gpuaddr, sizedwords, hostaddr);
901
902 mb();
903 while (dwords_left > 0) {
904 bool cur_ret = true;
905 int count = 0; /* dword count including packet header */
906
907 switch (*hostaddr >> 30) {
908 case 0x0: /* type-0 */
909 count = (*hostaddr >> 16)+2;
910 cur_ret = _handle_type0(dev_priv, hostaddr);
911 break;
912 case 0x1: /* type-1 */
913 count = 2;
914 break;
915 case 0x3: /* type-3 */
916 count = ((*hostaddr >> 16) & 0x3fff) + 2;
917 cur_ret = _handle_type3(dev_priv, hostaddr);
918 break;
919 default:
920 KGSL_CMD_ERR(dev_priv->device, "unexpected type: "
921 "type:%d, word:0x%08x @ 0x%p, gpu:0x%08x\n",
922 *hostaddr >> 30, *hostaddr, hostaddr,
923 gpuaddr+4*(sizedwords-dwords_left));
924 cur_ret = false;
925 count = dwords_left;
926 break;
927 }
928
929 if (!cur_ret) {
930 KGSL_CMD_ERR(dev_priv->device,
931 "bad sub-type: #:%d/%d, v:0x%08x"
932 " @ 0x%p[gb:0x%08x], level:%d\n",
933 sizedwords-dwords_left, sizedwords, *hostaddr,
934 hostaddr, gpuaddr+4*(sizedwords-dwords_left),
935 level);
936
937 if (ADRENO_DEVICE(dev_priv->device)->ib_check_level
938 >= 2)
939 print_hex_dump(KERN_ERR,
940 level == 1 ? "IB1:" : "IB2:",
941 DUMP_PREFIX_OFFSET, 32, 4, hoststart,
942 sizedwords*4, 0);
943 goto done;
944 }
945
946 /* jump to next packet */
947 dwords_left -= count;
948 hostaddr += count;
949 if (dwords_left < 0) {
950 KGSL_CMD_ERR(dev_priv->device,
951 "bad count: c:%d, #:%d/%d, "
952 "v:0x%08x @ 0x%p[gb:0x%08x], level:%d\n",
953 count, sizedwords-(dwords_left+count),
954 sizedwords, *(hostaddr-count), hostaddr-count,
955 gpuaddr+4*(sizedwords-(dwords_left+count)),
956 level);
957 if (ADRENO_DEVICE(dev_priv->device)->ib_check_level
958 >= 2)
959 print_hex_dump(KERN_ERR,
960 level == 1 ? "IB1:" : "IB2:",
961 DUMP_PREFIX_OFFSET, 32, 4, hoststart,
962 sizedwords*4, 0);
963 goto done;
964 }
965 }
966
967 ret = true;
968done:
969 if (!ret)
970 KGSL_DRV_ERR(dev_priv->device,
971 "parsing failed: gpuaddr:0x%08x, "
972 "host:0x%p, wc:%d\n", gpuaddr, hoststart, sizedwords);
973
974 level--;
975
976 return ret;
977}
978
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700979int
980adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
981 struct kgsl_context *context,
982 struct kgsl_ibdesc *ibdesc,
983 unsigned int numibs,
984 uint32_t *timestamp,
985 unsigned int flags)
986{
987 struct kgsl_device *device = dev_priv->device;
988 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
Jordan Crouse72bb70b2013-05-28 17:03:52 -0600989 unsigned int *link = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700990 unsigned int *cmds;
991 unsigned int i;
Jordan Crouse72bb70b2013-05-28 17:03:52 -0600992 struct adreno_context *drawctxt = NULL;
Vijay Krishnamoorthybef66932012-01-24 09:32:05 -0700993 unsigned int start_index = 0;
Jordan Crouse2d1d6622013-05-28 17:02:44 -0600994 int ret = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700995
Jordan Crouse72bb70b2013-05-28 17:03:52 -0600996 if (device->state & KGSL_STATE_HUNG) {
997 ret = -EBUSY;
998 goto done;
999 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001000
Jordan Crouse72bb70b2013-05-28 17:03:52 -06001001 if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED) ||
1002 context == NULL || ibdesc == 0 || numibs == 0) {
1003 ret = -EINVAL;
1004 goto done;
1005 }
Jeremy Gebben3c127f52011-08-08 17:04:11 -06001006 drawctxt = context->devctxt;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001007
1008 if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
Tarun Karra696f89e2013-01-27 21:31:40 -08001009 KGSL_CTXT_ERR(device, "proc %s failed fault tolerance"
Carter Cooper7e7f02e2012-02-15 09:36:31 -07001010 " will not accept commands for context %d\n",
Tarun Karra696f89e2013-01-27 21:31:40 -08001011 drawctxt->pid_name, drawctxt->id);
Jordan Crouse72bb70b2013-05-28 17:03:52 -06001012 ret = -EDEADLK;
1013 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001014 }
Shubhraprakash Dasd23ff4b2012-04-05 16:55:54 -06001015
Tarun Karradeeecc02013-01-21 23:42:17 -08001016 if (drawctxt->flags & CTXT_FLAGS_SKIP_EOF) {
1017 KGSL_CTXT_ERR(device,
Tarun Karra696f89e2013-01-27 21:31:40 -08001018 "proc %s triggered fault tolerance"
Tarun Karradeeecc02013-01-21 23:42:17 -08001019 " skipping commands for context till EOF %d\n",
Tarun Karra696f89e2013-01-27 21:31:40 -08001020 drawctxt->pid_name, drawctxt->id);
Tarun Karradeeecc02013-01-21 23:42:17 -08001021 if (flags & KGSL_CMD_FLAGS_EOF)
1022 drawctxt->flags &= ~CTXT_FLAGS_SKIP_EOF;
1023 numibs = 0;
1024 }
1025
Shubhraprakash Dasd23ff4b2012-04-05 16:55:54 -06001026 cmds = link = kzalloc(sizeof(unsigned int) * (numibs * 3 + 4),
1027 GFP_KERNEL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001028 if (!link) {
Jordan Crouse72bb70b2013-05-28 17:03:52 -06001029 ret = -ENOMEM;
1030 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001031 }
Vijay Krishnamoorthybef66932012-01-24 09:32:05 -07001032
1033 /*When preamble is enabled, the preamble buffer with state restoration
1034 commands are stored in the first node of the IB chain. We can skip that
1035 if a context switch hasn't occured */
1036
1037 if (drawctxt->flags & CTXT_FLAGS_PREAMBLE &&
1038 adreno_dev->drawctxt_active == drawctxt)
1039 start_index = 1;
1040
Shubhraprakash Dasd23ff4b2012-04-05 16:55:54 -06001041 if (!start_index) {
1042 *cmds++ = cp_nop_packet(1);
1043 *cmds++ = KGSL_START_OF_IB_IDENTIFIER;
1044 } else {
1045 *cmds++ = cp_nop_packet(4);
1046 *cmds++ = KGSL_START_OF_IB_IDENTIFIER;
1047 *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
1048 *cmds++ = ibdesc[0].gpuaddr;
1049 *cmds++ = ibdesc[0].sizedwords;
1050 }
Vijay Krishnamoorthybef66932012-01-24 09:32:05 -07001051 for (i = start_index; i < numibs; i++) {
Jeremy Gebbend0ab6ad2012-04-06 11:13:35 -06001052 if (unlikely(adreno_dev->ib_check_level >= 1 &&
1053 !_parse_ibs(dev_priv, ibdesc[i].gpuaddr,
1054 ibdesc[i].sizedwords))) {
Jordan Crouse2d1d6622013-05-28 17:02:44 -06001055 ret = -EINVAL;
1056 goto done;
Jeremy Gebbend0ab6ad2012-04-06 11:13:35 -06001057 }
Jordan Crouse2d1d6622013-05-28 17:02:44 -06001058
1059 if (ibdesc[i].sizedwords == 0) {
1060 ret = -EINVAL;
1061 goto done;
1062 }
1063
Jordan Crouse084427d2011-07-28 08:37:58 -06001064 *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001065 *cmds++ = ibdesc[i].gpuaddr;
1066 *cmds++ = ibdesc[i].sizedwords;
1067 }
1068
Shubhraprakash Dasd23ff4b2012-04-05 16:55:54 -06001069 *cmds++ = cp_nop_packet(1);
1070 *cmds++ = KGSL_END_OF_IB_IDENTIFIER;
1071
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -06001072 kgsl_setstate(&device->mmu, context->id,
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06001073 kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001074 device->id));
1075
1076 adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
1077
1078 *timestamp = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
Vijay Krishnamoorthye80c3462012-08-27 14:07:32 -07001079 drawctxt,
Tarun Karradeeecc02013-01-21 23:42:17 -08001080 (flags & KGSL_CMD_FLAGS_EOF),
Vijay Krishnamoorthye80c3462012-08-27 14:07:32 -07001081 &link[0], (cmds - link), *timestamp);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001082
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001083#ifdef CONFIG_MSM_KGSL_CFF_DUMP
1084 /*
1085 * insert wait for idle after every IB1
1086 * this is conservative but works reliably and is ok
1087 * even for performance simulations
1088 */
Jordan Crousea29a2e02012-08-14 09:09:23 -06001089 adreno_idle(device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001090#endif
Tarun Karradeeecc02013-01-21 23:42:17 -08001091
Tarun Karrad20d71a2013-01-25 15:38:57 -08001092 /*
1093 * If context hung and recovered then return error so that the
1094 * application may handle it
1095 */
1096 if (drawctxt->flags & CTXT_FLAGS_GPU_HANG_FT) {
1097 drawctxt->flags &= ~CTXT_FLAGS_GPU_HANG_FT;
Jordan Crouse2d1d6622013-05-28 17:02:44 -06001098 ret = -EPROTO;
1099 }
1100
1101done:
Jordan Crouse72bb70b2013-05-28 17:03:52 -06001102 trace_kgsl_issueibcmds(device, context->id, ibdesc, numibs,
1103 *timestamp, flags, ret, drawctxt->type);
1104
Jordan Crouse2d1d6622013-05-28 17:02:44 -06001105 kfree(link);
1106 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001107}
1108
Shubhraprakash Das2a85f1f2012-06-04 17:01:39 -06001109static void _turn_preamble_on_for_ib_seq(struct adreno_ringbuffer *rb,
1110 unsigned int rb_rptr)
1111{
1112 unsigned int temp_rb_rptr = rb_rptr;
1113 unsigned int size = rb->buffer_desc.size;
1114 unsigned int val[2];
1115 int i = 0;
1116 bool check = false;
1117 bool cmd_start = false;
1118
1119 /* Go till the start of the ib sequence and turn on preamble */
1120 while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
1121 kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
1122 if (check && KGSL_START_OF_IB_IDENTIFIER == val[i]) {
1123 /* decrement i */
1124 i = (i + 1) % 2;
1125 if (val[i] == cp_nop_packet(4)) {
1126 temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
1127 temp_rb_rptr, size);
1128 kgsl_sharedmem_writel(&rb->buffer_desc,
1129 temp_rb_rptr, cp_nop_packet(1));
1130 }
Tarun Karrad20d71a2013-01-25 15:38:57 -08001131 KGSL_FT_INFO(rb->device,
Shubhraprakash Das2a85f1f2012-06-04 17:01:39 -06001132 "Turned preamble on at offset 0x%x\n",
1133 temp_rb_rptr / 4);
1134 break;
1135 }
1136 /* If you reach beginning of next command sequence then exit
1137 * First command encountered is the current one so don't break
1138 * on that. */
1139 if (KGSL_CMD_IDENTIFIER == val[i]) {
1140 if (cmd_start)
1141 break;
1142 cmd_start = true;
1143 }
1144
1145 i = (i + 1) % 2;
1146 if (1 == i)
1147 check = true;
1148 temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
1149 size);
1150 }
1151}
1152
Shubhraprakash Das460cc762013-01-16 16:57:46 -08001153void adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
Tarun Karrad20d71a2013-01-25 15:38:57 -08001154 struct adreno_ft_data *ft_data)
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001155{
Shubhraprakash Das460cc762013-01-16 16:57:46 -08001156 struct kgsl_device *device = rb->device;
Tarun Karrad20d71a2013-01-25 15:38:57 -08001157 unsigned int rb_rptr = ft_data->start_of_replay_cmds;
Tarun Karradeeecc02013-01-21 23:42:17 -08001158 unsigned int good_rb_idx = 0, bad_rb_idx = 0, temp_rb_idx = 0;
1159 unsigned int last_good_cmd_end_idx = 0, last_bad_cmd_end_idx = 0;
1160 unsigned int cmd_start_idx = 0;
1161 unsigned int val1 = 0;
1162 int copy_rb_contents = 0;
1163 unsigned int temp_rb_rptr;
1164 struct kgsl_context *k_ctxt;
1165 struct adreno_context *a_ctxt;
1166 unsigned int size = rb->buffer_desc.size;
Tarun Karrad20d71a2013-01-25 15:38:57 -08001167 unsigned int *temp_rb_buffer = ft_data->rb_buffer;
1168 int *rb_size = &ft_data->rb_size;
1169 unsigned int *bad_rb_buffer = ft_data->bad_rb_buffer;
1170 int *bad_rb_size = &ft_data->bad_rb_size;
1171 unsigned int *good_rb_buffer = ft_data->good_rb_buffer;
1172 int *good_rb_size = &ft_data->good_rb_size;
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001173
Shubhraprakash Das460cc762013-01-16 16:57:46 -08001174 /*
1175 * If the start index from where commands need to be copied is invalid
1176 * then no need to save off any commands
1177 */
Tarun Karrad20d71a2013-01-25 15:38:57 -08001178 if (0xFFFFFFFF == ft_data->start_of_replay_cmds)
Shubhraprakash Das460cc762013-01-16 16:57:46 -08001179 return;
1180
Jordan Crouse67db48d2013-05-28 17:04:17 -06001181 k_ctxt = kgsl_context_get(device, ft_data->context_id);
1182
Shubhraprakash Das460cc762013-01-16 16:57:46 -08001183 if (k_ctxt) {
1184 a_ctxt = k_ctxt->devctxt;
1185 if (a_ctxt->flags & CTXT_FLAGS_PREAMBLE)
1186 _turn_preamble_on_for_ib_seq(rb, rb_rptr);
Jordan Crouse67db48d2013-05-28 17:04:17 -06001187 kgsl_context_put(k_ctxt);
Shubhraprakash Das460cc762013-01-16 16:57:46 -08001188 }
1189 k_ctxt = NULL;
1190
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001191 /* Walk the rb from the context switch. Omit any commands
1192 * for an invalid context. */
1193 while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
1194 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
1195
1196 if (KGSL_CMD_IDENTIFIER == val1) {
1197 /* Start is the NOP dword that comes before
1198 * KGSL_CMD_IDENTIFIER */
Tarun Karradeeecc02013-01-21 23:42:17 -08001199 cmd_start_idx = temp_rb_idx - 1;
1200 if ((copy_rb_contents) && (good_rb_idx))
1201 last_good_cmd_end_idx = good_rb_idx - 1;
1202 if ((!copy_rb_contents) && (bad_rb_idx))
1203 last_bad_cmd_end_idx = bad_rb_idx - 1;
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001204 }
1205
1206 /* check for context switch indicator */
1207 if (val1 == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
1208 unsigned int temp_idx, val2;
1209 /* increment by 3 to get to the context_id */
1210 temp_rb_rptr = rb_rptr + (3 * sizeof(unsigned int)) %
1211 size;
1212 kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
1213 temp_rb_rptr);
1214
1215 /* if context switches to a context that did not cause
1216 * hang then start saving the rb contents as those
1217 * commands can be executed */
Jordan Crouse67db48d2013-05-28 17:04:17 -06001218 k_ctxt = kgsl_context_get(rb->device, val2);
1219
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001220 if (k_ctxt) {
1221 a_ctxt = k_ctxt->devctxt;
1222
1223 /* If we are changing to a good context and were not
1224 * copying commands then copy over commands to the good
1225 * context */
1226 if (!copy_rb_contents && ((k_ctxt &&
1227 !(a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) ||
1228 !k_ctxt)) {
1229 for (temp_idx = cmd_start_idx;
Tarun Karradeeecc02013-01-21 23:42:17 -08001230 temp_idx < temp_rb_idx;
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001231 temp_idx++)
Tarun Karradeeecc02013-01-21 23:42:17 -08001232 good_rb_buffer[good_rb_idx++] =
1233 temp_rb_buffer[temp_idx];
Tarun Karrad20d71a2013-01-25 15:38:57 -08001234 ft_data->last_valid_ctx_id = val2;
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001235 copy_rb_contents = 1;
Tarun Karradeeecc02013-01-21 23:42:17 -08001236 /* remove the good commands from bad buffer */
1237 bad_rb_idx = last_bad_cmd_end_idx;
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001238 } else if (copy_rb_contents && k_ctxt &&
1239 (a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) {
Tarun Karradeeecc02013-01-21 23:42:17 -08001240
1241 /* If we are changing back to a bad context
1242 * from good ctxt and were not copying commands
1243 * to bad ctxt then copy over commands to
1244 * the bad context */
1245 for (temp_idx = cmd_start_idx;
1246 temp_idx < temp_rb_idx;
1247 temp_idx++)
1248 bad_rb_buffer[bad_rb_idx++] =
1249 temp_rb_buffer[temp_idx];
1250 /* If we are changing to bad context then
1251 * remove the dwords we copied for this
1252 * sequence from the good buffer */
1253 good_rb_idx = last_good_cmd_end_idx;
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001254 copy_rb_contents = 0;
1255 }
1256 }
Jordan Crouse67db48d2013-05-28 17:04:17 -06001257 kgsl_context_put(k_ctxt);
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001258 }
1259
1260 if (copy_rb_contents)
Tarun Karradeeecc02013-01-21 23:42:17 -08001261 good_rb_buffer[good_rb_idx++] = val1;
1262 else
1263 bad_rb_buffer[bad_rb_idx++] = val1;
1264
1265 /* Copy both good and bad commands to temp buffer */
1266 temp_rb_buffer[temp_rb_idx++] = val1;
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001267
1268 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr, size);
1269 }
Tarun Karradeeecc02013-01-21 23:42:17 -08001270 *good_rb_size = good_rb_idx;
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001271 *bad_rb_size = bad_rb_idx;
Tarun Karradeeecc02013-01-21 23:42:17 -08001272 *rb_size = temp_rb_idx;
Shubhraprakash Das1d577fe2012-05-31 18:28:22 -06001273}
1274
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001275void
1276adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
1277 int num_rb_contents)
1278{
1279 int i;
1280 unsigned int *ringcmds;
1281 unsigned int rcmd_gpu;
1282
1283 if (!num_rb_contents)
1284 return;
1285
1286 if (num_rb_contents > (rb->buffer_desc.size - rb->wptr)) {
1287 adreno_regwrite(rb->device, REG_CP_RB_RPTR, 0);
1288 rb->rptr = 0;
1289 BUG_ON(num_rb_contents > rb->buffer_desc.size);
1290 }
1291 ringcmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
1292 rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(unsigned int) * rb->wptr;
1293 for (i = 0; i < num_rb_contents; i++)
1294 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb_buff[i]);
1295 rb->wptr += num_rb_contents;
1296 adreno_ringbuffer_submit(rb);
1297}