blob: 90ff642aaa7ab430d8ca7138247a7966ead87f31 [file] [log] [blame]
Nicholas Flintham1e3d3112013-04-10 10:48:38 +01001/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/firmware.h>
14#include <linux/slab.h>
15#include <linux/sched.h>
16#include <linux/log2.h>
17
18#include "kgsl.h"
19#include "kgsl_sharedmem.h"
20#include "kgsl_cffdump.h"
21#include "kgsl_trace.h"
22
23#include "adreno.h"
24#include "adreno_pm4types.h"
25#include "adreno_ringbuffer.h"
26#include "adreno_debugfs.h"
27
28#include "a2xx_reg.h"
29#include "a3xx_reg.h"
30
31#define GSL_RB_NOP_SIZEDWORDS 2
32
33#define CP_DEBUG_DEFAULT 0xA000000
34
35void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
36{
37 BUG_ON(rb->wptr == 0);
38
39 kgsl_pwrscale_busy(rb->device);
40
41 mb();
42
43 adreno_regwrite(rb->device, REG_CP_RB_WPTR, rb->wptr);
44}
45
46static void
47adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds,
48 int wptr_ahead)
49{
50 int nopcount;
51 unsigned int freecmds;
52 unsigned int *cmds;
53 uint cmds_gpu;
54 unsigned long wait_time;
55 unsigned long wait_timeout = msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
56 unsigned long wait_time_part;
57 unsigned int prev_reg_val[hang_detect_regs_count];
58
59 memset(prev_reg_val, 0, sizeof(prev_reg_val));
60
61
62 if (wptr_ahead) {
63
64 nopcount = rb->sizedwords - rb->wptr - 1;
65
66 cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
67 cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*rb->wptr;
68
69 GSL_RB_WRITE(cmds, cmds_gpu, cp_nop_packet(nopcount));
70
71 do {
72 GSL_RB_GET_READPTR(rb, &rb->rptr);
73 } while (!rb->rptr);
74
75 rb->wptr++;
76
77 adreno_ringbuffer_submit(rb);
78
79 rb->wptr = 0;
80 }
81
82 wait_time = jiffies + wait_timeout;
83 wait_time_part = jiffies + msecs_to_jiffies(KGSL_TIMEOUT_PART);
84
85 while (1) {
86 GSL_RB_GET_READPTR(rb, &rb->rptr);
87
88 freecmds = rb->rptr - rb->wptr;
89
90 if (freecmds == 0 || freecmds > numcmds)
91 break;
92
93 if (time_after(jiffies, wait_time_part)) {
94 wait_time_part = jiffies +
95 msecs_to_jiffies(KGSL_TIMEOUT_PART);
96 if ((adreno_hang_detect(rb->device,
97 prev_reg_val))){
98 KGSL_DRV_ERR(rb->device,
99 "Hang detected while waiting for freespace in"
100 "ringbuffer rptr: 0x%x, wptr: 0x%x\n",
101 rb->rptr, rb->wptr);
102 goto err;
103 }
104 }
105
106 if (time_after(jiffies, wait_time)) {
107 KGSL_DRV_ERR(rb->device,
108 "Timed out while waiting for freespace in ringbuffer "
109 "rptr: 0x%x, wptr: 0x%x\n", rb->rptr, rb->wptr);
110 goto err;
111 }
112
113 continue;
114
115err:
116 if (!adreno_dump_and_recover(rb->device)) {
117 wait_time = jiffies + wait_timeout;
118 } else {
119
120 BUG();
121 }
122 }
123}
124
125unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
126 unsigned int numcmds)
127{
128 unsigned int *ptr = NULL;
129
130 BUG_ON(numcmds >= rb->sizedwords);
131
132 GSL_RB_GET_READPTR(rb, &rb->rptr);
133
134 if (rb->wptr >= rb->rptr) {
135
136
137 if ((rb->wptr + numcmds) > (rb->sizedwords -
138 GSL_RB_NOP_SIZEDWORDS))
139 adreno_ringbuffer_waitspace(rb, numcmds, 1);
140 } else {
141
142 if ((rb->wptr + numcmds) >= rb->rptr)
143 adreno_ringbuffer_waitspace(rb, numcmds, 0);
144
145
146 if ((rb->wptr + numcmds) > (rb->sizedwords -
147 GSL_RB_NOP_SIZEDWORDS))
148 adreno_ringbuffer_waitspace(rb, numcmds, 1);
149 }
150
151 ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
152 rb->wptr += numcmds;
153
154 return ptr;
155}
156
157static int _load_firmware(struct kgsl_device *device, const char *fwfile,
158 void **data, int *len)
159{
160 const struct firmware *fw = NULL;
161 int ret;
162
163 ret = request_firmware(&fw, fwfile, device->dev);
164
165 if (ret) {
166 KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n",
167 fwfile, ret);
168 return ret;
169 }
170
171 *data = kmalloc(fw->size, GFP_KERNEL);
172
173 if (*data) {
174 memcpy(*data, fw->data, fw->size);
175 *len = fw->size;
176 } else
177 KGSL_MEM_ERR(device, "kmalloc(%d) failed\n", fw->size);
178
179 release_firmware(fw);
180 return (*data != NULL) ? 0 : -ENOMEM;
181}
182
183int adreno_ringbuffer_read_pm4_ucode(struct kgsl_device *device)
184{
185 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
186 int ret = 0;
187
188 if (adreno_dev->pm4_fw == NULL) {
189 int len;
190 void *ptr;
191
192 ret = _load_firmware(device, adreno_dev->pm4_fwfile,
193 &ptr, &len);
194
195 if (ret)
196 goto err;
197
198
199 if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) {
200 KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
201 ret = -EINVAL;
202 kfree(ptr);
203 goto err;
204 }
205
206 adreno_dev->pm4_fw_size = len / sizeof(uint32_t);
207 adreno_dev->pm4_fw = ptr;
208 adreno_dev->pm4_fw_version = adreno_dev->pm4_fw[1];
209 }
210
211err:
212 return ret;
213}
214
215
216int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device)
217{
218 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
219 int i;
220
221 if (adreno_dev->pm4_fw == NULL) {
222 int ret = adreno_ringbuffer_read_pm4_ucode(device);
223 if (ret)
224 return ret;
225 }
226
227 KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n",
228 adreno_dev->pm4_fw_version);
229 if (adreno_is_a3xx(adreno_dev))
230 adreno_regwrite(device, REG_CP_DEBUG, CP_DEBUG_DEFAULT);
231 else
232 adreno_regwrite(device, REG_CP_DEBUG, 0x02000000);
233 adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0);
234 for (i = 1; i < adreno_dev->pm4_fw_size; i++)
235 adreno_regwrite(device, REG_CP_ME_RAM_DATA,
236 adreno_dev->pm4_fw[i]);
237
238 return 0;
239}
240
241int adreno_ringbuffer_read_pfp_ucode(struct kgsl_device *device)
242{
243 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
244 int ret = 0;
245
246 if (adreno_dev->pfp_fw == NULL) {
247 int len;
248 void *ptr;
249
250 ret = _load_firmware(device, adreno_dev->pfp_fwfile,
251 &ptr, &len);
252 if (ret)
253 goto err;
254
255
256 if (len % sizeof(uint32_t) != 0) {
257 KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
258 ret = -EINVAL;
259 kfree(ptr);
260 goto err;
261 }
262
263 adreno_dev->pfp_fw_size = len / sizeof(uint32_t);
264 adreno_dev->pfp_fw = ptr;
265 adreno_dev->pfp_fw_version = adreno_dev->pfp_fw[5];
266 }
267
268err:
269 return ret;
270}
271
272int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device)
273{
274 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
275 int i;
276
277 if (adreno_dev->pfp_fw == NULL) {
278 int ret = adreno_ringbuffer_read_pfp_ucode(device);
279 if (ret)
280 return ret;
281 }
282
283 KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n",
284 adreno_dev->pfp_fw_version);
285
286 adreno_regwrite(device, adreno_dev->gpudev->reg_cp_pfp_ucode_addr, 0);
287 for (i = 1; i < adreno_dev->pfp_fw_size; i++)
288 adreno_regwrite(device,
289 adreno_dev->gpudev->reg_cp_pfp_ucode_data,
290 adreno_dev->pfp_fw[i]);
291 return 0;
292}
293
294int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
295{
296 int status;
297
298 union reg_cp_rb_cntl cp_rb_cntl;
299 unsigned int rb_cntl;
300 struct kgsl_device *device = rb->device;
301 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
302
303 if (rb->flags & KGSL_FLAGS_STARTED)
304 return 0;
305
306 if (init_ram)
307 rb->timestamp[KGSL_MEMSTORE_GLOBAL] = 0;
308
309 kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
310 sizeof(struct kgsl_rbmemptrs));
311
312 kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
313 (rb->sizedwords << 2));
314
315 if (adreno_is_a2xx(adreno_dev)) {
316 adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
317 (rb->memptrs_desc.gpuaddr
318 + GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
319
320
321 adreno_regwrite(device, REG_CP_RB_WPTR_DELAY,
322 0 );
323 }
324
325
326 adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
327 cp_rb_cntl.val = rb_cntl;
328
329 cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);
330
331 cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);
332
333 if (adreno_is_a2xx(adreno_dev)) {
334
335 cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN;
336 }
337
338
339 cp_rb_cntl.f.rb_no_update = GSL_RB_CNTL_NO_UPDATE;
340
341 adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);
342
343 adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);
344
345 adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
346 rb->memptrs_desc.gpuaddr +
347 GSL_RB_MEMPTRS_RPTR_OFFSET);
348
349 if (adreno_is_a3xx(adreno_dev)) {
350
351 adreno_regwrite(device, A3XX_CP_PROTECT_CTRL, 0x00000007);
352
353
354 adreno_regwrite(device, A3XX_CP_PROTECT_REG_0, 0x63000040);
355 adreno_regwrite(device, A3XX_CP_PROTECT_REG_1, 0x62000080);
356 adreno_regwrite(device, A3XX_CP_PROTECT_REG_2, 0x600000CC);
357 adreno_regwrite(device, A3XX_CP_PROTECT_REG_3, 0x60000108);
358 adreno_regwrite(device, A3XX_CP_PROTECT_REG_4, 0x64000140);
359 adreno_regwrite(device, A3XX_CP_PROTECT_REG_5, 0x66000400);
360
361
362 adreno_regwrite(device, A3XX_CP_PROTECT_REG_6, 0x65000700);
363 adreno_regwrite(device, A3XX_CP_PROTECT_REG_7, 0x610007D8);
364 adreno_regwrite(device, A3XX_CP_PROTECT_REG_8, 0x620007E0);
365 adreno_regwrite(device, A3XX_CP_PROTECT_REG_9, 0x61001178);
366 adreno_regwrite(device, A3XX_CP_PROTECT_REG_A, 0x64001180);
367
368
369 adreno_regwrite(device, A3XX_CP_PROTECT_REG_B, 0x60003300);
370
371
372 adreno_regwrite(device, A3XX_CP_PROTECT_REG_C, 0x6B00C000);
373 }
374
375 if (adreno_is_a2xx(adreno_dev)) {
376
377 adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
378 }
379
380
381 adreno_regwrite(device, REG_SCRATCH_ADDR, device->memstore.gpuaddr +
382 KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
383 soptimestamp));
384
385 adreno_regwrite(device, REG_SCRATCH_UMSK,
386 GSL_RB_MEMPTRS_SCRATCH_MASK);
387
388
389
390 status = adreno_ringbuffer_load_pm4_ucode(device);
391 if (status != 0)
392 return status;
393
394
395 status = adreno_ringbuffer_load_pfp_ucode(device);
396 if (status != 0)
397 return status;
398
399
400 if (adreno_is_a305(adreno_dev) || adreno_is_a320(adreno_dev))
401 adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000E0602);
402
403 rb->rptr = 0;
404 rb->wptr = 0;
405
406
407 adreno_regwrite(device, REG_CP_ME_CNTL, 0);
408
409
410 adreno_dev->gpudev->rb_init(adreno_dev, rb);
411
412
413 status = adreno_idle(device);
414
415 if (status == 0)
416 rb->flags |= KGSL_FLAGS_STARTED;
417
418 return status;
419}
420
421void adreno_ringbuffer_stop(struct adreno_ringbuffer *rb)
422{
423 if (rb->flags & KGSL_FLAGS_STARTED) {
424
425 adreno_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000);
426 rb->flags &= ~KGSL_FLAGS_STARTED;
427 }
428}
429
430int adreno_ringbuffer_init(struct kgsl_device *device)
431{
432 int status;
433 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
434 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
435
436 rb->device = device;
437 rb->sizedwords = KGSL_RB_SIZE >> 2;
438
439
440 status = kgsl_allocate_contiguous(&rb->buffer_desc,
441 (rb->sizedwords << 2));
442
443 if (status != 0) {
444 adreno_ringbuffer_close(rb);
445 return status;
446 }
447
448
449 status = kgsl_allocate_contiguous(&rb->memptrs_desc,
450 sizeof(struct kgsl_rbmemptrs));
451
452 if (status != 0) {
453 adreno_ringbuffer_close(rb);
454 return status;
455 }
456
457
458 rb->memptrs = (struct kgsl_rbmemptrs *) rb->memptrs_desc.hostptr;
459
460 return 0;
461}
462
463void adreno_ringbuffer_close(struct adreno_ringbuffer *rb)
464{
465 struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
466
467 kgsl_sharedmem_free(&rb->buffer_desc);
468 kgsl_sharedmem_free(&rb->memptrs_desc);
469
470 kfree(adreno_dev->pfp_fw);
471 kfree(adreno_dev->pm4_fw);
472
473 adreno_dev->pfp_fw = NULL;
474 adreno_dev->pm4_fw = NULL;
475
476 memset(rb, 0, sizeof(struct adreno_ringbuffer));
477}
478
479static uint32_t
480adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
481 struct adreno_context *context,
482 unsigned int flags, unsigned int *cmds,
483 int sizedwords)
484{
485 struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
486 unsigned int *ringcmds;
487 unsigned int timestamp;
488 unsigned int total_sizedwords = sizedwords;
489 unsigned int i;
490 unsigned int rcmd_gpu;
491 unsigned int context_id = KGSL_MEMSTORE_GLOBAL;
492 unsigned int gpuaddr = rb->device->memstore.gpuaddr;
493
494 if (context && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS))
495 context_id = context->id;
496
497 total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
498 total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0;
499
500 total_sizedwords += 2;
501
502 if (adreno_is_a3xx(adreno_dev))
503 total_sizedwords += 7;
504
505 total_sizedwords += 2;
506 if (context && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS)) {
507 total_sizedwords += 3;
508 total_sizedwords += 4;
509 total_sizedwords += 3;
510 } else {
511 total_sizedwords += 4;
512 }
513
514 ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
515 if (context && (context->flags & CTXT_FLAGS_GPU_HANG)) {
516 KGSL_CTXT_WARN(rb->device,
517 "Context %p caused a gpu hang. Will not accept commands for context %d\n",
518 context, context->id);
519 return rb->timestamp[context_id];
520 }
521
522 rcmd_gpu = rb->buffer_desc.gpuaddr
523 + sizeof(uint)*(rb->wptr-total_sizedwords);
524
525 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
526 GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
527
528 if (flags & KGSL_CMD_FLAGS_PMODE) {
529
530 GSL_RB_WRITE(ringcmds, rcmd_gpu,
531 cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
532 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
533 }
534
535 for (i = 0; i < sizedwords; i++) {
536 GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
537 cmds++;
538 }
539
540 if (flags & KGSL_CMD_FLAGS_PMODE) {
541
542 GSL_RB_WRITE(ringcmds, rcmd_gpu,
543 cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
544 GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
545 }
546
547
548 rb->timestamp[KGSL_MEMSTORE_GLOBAL]++;
549 if (context) {
550 if (context_id == KGSL_MEMSTORE_GLOBAL)
551 rb->timestamp[context_id] =
552 rb->timestamp[KGSL_MEMSTORE_GLOBAL];
553 else
554 rb->timestamp[context_id]++;
555 }
556 timestamp = rb->timestamp[context_id];
557
558
559 GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type0_packet(REG_CP_TIMESTAMP, 1));
560 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
561
562 if (adreno_is_a3xx(adreno_dev)) {
563
564 GSL_RB_WRITE(ringcmds, rcmd_gpu,
565 cp_type3_packet(CP_EVENT_WRITE, 1));
566 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x07);
567 GSL_RB_WRITE(ringcmds, rcmd_gpu,
568 cp_type3_packet(CP_WAIT_FOR_IDLE, 1));
569 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x00);
570 }
571
572 if (context && (context->flags & CTXT_FLAGS_PER_CONTEXT_TS)) {
573
574 GSL_RB_WRITE(ringcmds, rcmd_gpu,
575 cp_type3_packet(CP_MEM_WRITE, 2));
576 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
577 KGSL_MEMSTORE_OFFSET(context->id, soptimestamp)));
578 GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
579
580
581 GSL_RB_WRITE(ringcmds, rcmd_gpu,
582 cp_type3_packet(CP_EVENT_WRITE, 3));
583 GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
584 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
585 KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp)));
586 GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
587
588 GSL_RB_WRITE(ringcmds, rcmd_gpu,
589 cp_type3_packet(CP_MEM_WRITE, 2));
590 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
591 KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
592 eoptimestamp)));
593 GSL_RB_WRITE(ringcmds, rcmd_gpu,
594 rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
595 } else {
596 GSL_RB_WRITE(ringcmds, rcmd_gpu,
597 cp_type3_packet(CP_EVENT_WRITE, 3));
598 GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
599 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
600 KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
601 eoptimestamp)));
602 GSL_RB_WRITE(ringcmds, rcmd_gpu,
603 rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
604 }
605
606 if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
607
608 GSL_RB_WRITE(ringcmds, rcmd_gpu,
609 cp_type3_packet(CP_COND_EXEC, 4));
610 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
611 KGSL_MEMSTORE_OFFSET(
612 context_id, ts_cmp_enable)) >> 2);
613 GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
614 KGSL_MEMSTORE_OFFSET(
615 context_id, ref_wait_ts)) >> 2);
616 GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
617
618 GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
619 GSL_RB_WRITE(ringcmds, rcmd_gpu,
620 cp_type3_packet(CP_INTERRUPT, 1));
621 GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
622 }
623
624 if (adreno_is_a3xx(adreno_dev)) {
625
626 GSL_RB_WRITE(ringcmds, rcmd_gpu,
627 cp_type3_packet(CP_SET_CONSTANT, 2));
628 GSL_RB_WRITE(ringcmds, rcmd_gpu,
629 (0x4<<16)|(A3XX_HLSQ_CL_KERNEL_GROUP_X_REG - 0x2000));
630 GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
631 }
632
633 adreno_ringbuffer_submit(rb);
634
635 return timestamp;
636}
637
638unsigned int
639adreno_ringbuffer_issuecmds(struct kgsl_device *device,
640 struct adreno_context *drawctxt,
641 unsigned int flags,
642 unsigned int *cmds,
643 int sizedwords)
644{
645 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
646 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
647
648 if (device->state & KGSL_STATE_HUNG)
649 return kgsl_readtimestamp(device, KGSL_MEMSTORE_GLOBAL,
650 KGSL_TIMESTAMP_RETIRED);
651 return adreno_ringbuffer_addcmds(rb, drawctxt, flags, cmds, sizedwords);
652}
653
654static bool _parse_ibs(struct kgsl_device_private *dev_priv, uint gpuaddr,
655 int sizedwords);
656
657static bool
658_handle_type3(struct kgsl_device_private *dev_priv, uint *hostaddr)
659{
660 unsigned int opcode = cp_type3_opcode(*hostaddr);
661 switch (opcode) {
662 case CP_INDIRECT_BUFFER_PFD:
663 case CP_INDIRECT_BUFFER_PFE:
664 case CP_COND_INDIRECT_BUFFER_PFE:
665 case CP_COND_INDIRECT_BUFFER_PFD:
666 return _parse_ibs(dev_priv, hostaddr[1], hostaddr[2]);
667 case CP_NOP:
668 case CP_WAIT_FOR_IDLE:
669 case CP_WAIT_REG_MEM:
670 case CP_WAIT_REG_EQ:
671 case CP_WAT_REG_GTE:
672 case CP_WAIT_UNTIL_READ:
673 case CP_WAIT_IB_PFD_COMPLETE:
674 case CP_REG_RMW:
675 case CP_REG_TO_MEM:
676 case CP_MEM_WRITE:
677 case CP_MEM_WRITE_CNTR:
678 case CP_COND_EXEC:
679 case CP_COND_WRITE:
680 case CP_EVENT_WRITE:
681 case CP_EVENT_WRITE_SHD:
682 case CP_EVENT_WRITE_CFL:
683 case CP_EVENT_WRITE_ZPD:
684 case CP_DRAW_INDX:
685 case CP_DRAW_INDX_2:
686 case CP_DRAW_INDX_BIN:
687 case CP_DRAW_INDX_2_BIN:
688 case CP_VIZ_QUERY:
689 case CP_SET_STATE:
690 case CP_SET_CONSTANT:
691 case CP_IM_LOAD:
692 case CP_IM_LOAD_IMMEDIATE:
693 case CP_LOAD_CONSTANT_CONTEXT:
694 case CP_INVALIDATE_STATE:
695 case CP_SET_SHADER_BASES:
696 case CP_SET_BIN_MASK:
697 case CP_SET_BIN_SELECT:
698 case CP_SET_BIN_BASE_OFFSET:
699 case CP_SET_BIN_DATA:
700 case CP_CONTEXT_UPDATE:
701 case CP_INTERRUPT:
702 case CP_IM_STORE:
703 case CP_LOAD_STATE:
704 break;
705
706 case CP_ME_INIT:
707 case CP_SET_PROTECTED_MODE:
708 default:
709 KGSL_CMD_ERR(dev_priv->device, "bad CP opcode %0x\n", opcode);
710 return false;
711 break;
712 }
713
714 return true;
715}
716
717static bool
718_handle_type0(struct kgsl_device_private *dev_priv, uint *hostaddr)
719{
720 unsigned int reg = type0_pkt_offset(*hostaddr);
721 unsigned int cnt = type0_pkt_size(*hostaddr);
722 if (reg < 0x0192 || (reg + cnt) >= 0x8000) {
723 KGSL_CMD_ERR(dev_priv->device, "bad type0 reg: 0x%0x cnt: %d\n",
724 reg, cnt);
725 return false;
726 }
727 return true;
728}
729
730static bool _parse_ibs(struct kgsl_device_private *dev_priv,
731 uint gpuaddr, int sizedwords)
732{
733 static uint level;
734 bool ret = false;
735 uint *hostaddr, *hoststart;
736 int dwords_left = sizedwords;
737 struct kgsl_mem_entry *entry;
738
739 spin_lock(&dev_priv->process_priv->mem_lock);
740 entry = kgsl_sharedmem_find_region(dev_priv->process_priv,
741 gpuaddr, sizedwords * sizeof(uint));
742 spin_unlock(&dev_priv->process_priv->mem_lock);
743 if (entry == NULL) {
744 KGSL_CMD_ERR(dev_priv->device,
745 "no mapping for gpuaddr: 0x%08x\n", gpuaddr);
746 return false;
747 }
748
749 hostaddr = (uint *)kgsl_gpuaddr_to_vaddr(&entry->memdesc, gpuaddr);
750 if (hostaddr == NULL) {
751 KGSL_CMD_ERR(dev_priv->device,
752 "no mapping for gpuaddr: 0x%08x\n", gpuaddr);
753 return false;
754 }
755
756 hoststart = hostaddr;
757
758 level++;
759
760 KGSL_CMD_INFO(dev_priv->device, "ib: gpuaddr:0x%08x, wc:%d, hptr:%p\n",
761 gpuaddr, sizedwords, hostaddr);
762
763 mb();
764 while (dwords_left > 0) {
765 bool cur_ret = true;
766 int count = 0;
767
768 switch (*hostaddr >> 30) {
769 case 0x0:
770 count = (*hostaddr >> 16)+2;
771 cur_ret = _handle_type0(dev_priv, hostaddr);
772 break;
773 case 0x1:
774 count = 2;
775 break;
776 case 0x3:
777 count = ((*hostaddr >> 16) & 0x3fff) + 2;
778 cur_ret = _handle_type3(dev_priv, hostaddr);
779 break;
780 default:
781 KGSL_CMD_ERR(dev_priv->device, "unexpected type: "
782 "type:%d, word:0x%08x @ 0x%p, gpu:0x%08x\n",
783 *hostaddr >> 30, *hostaddr, hostaddr,
784 gpuaddr+4*(sizedwords-dwords_left));
785 cur_ret = false;
786 count = dwords_left;
787 break;
788 }
789
790 if (!cur_ret) {
791 KGSL_CMD_ERR(dev_priv->device,
792 "bad sub-type: #:%d/%d, v:0x%08x"
793 " @ 0x%p[gb:0x%08x], level:%d\n",
794 sizedwords-dwords_left, sizedwords, *hostaddr,
795 hostaddr, gpuaddr+4*(sizedwords-dwords_left),
796 level);
797
798 if (ADRENO_DEVICE(dev_priv->device)->ib_check_level
799 >= 2)
800 print_hex_dump(KERN_ERR,
801 level == 1 ? "IB1:" : "IB2:",
802 DUMP_PREFIX_OFFSET, 32, 4, hoststart,
803 sizedwords*4, 0);
804 goto done;
805 }
806
807
808 dwords_left -= count;
809 hostaddr += count;
810 if (dwords_left < 0) {
811 KGSL_CMD_ERR(dev_priv->device,
812 "bad count: c:%d, #:%d/%d, "
813 "v:0x%08x @ 0x%p[gb:0x%08x], level:%d\n",
814 count, sizedwords-(dwords_left+count),
815 sizedwords, *(hostaddr-count), hostaddr-count,
816 gpuaddr+4*(sizedwords-(dwords_left+count)),
817 level);
818 if (ADRENO_DEVICE(dev_priv->device)->ib_check_level
819 >= 2)
820 print_hex_dump(KERN_ERR,
821 level == 1 ? "IB1:" : "IB2:",
822 DUMP_PREFIX_OFFSET, 32, 4, hoststart,
823 sizedwords*4, 0);
824 goto done;
825 }
826 }
827
828 ret = true;
829done:
830 if (!ret)
831 KGSL_DRV_ERR(dev_priv->device,
832 "parsing failed: gpuaddr:0x%08x, "
833 "host:0x%p, wc:%d\n", gpuaddr, hoststart, sizedwords);
834
835 level--;
836
837 return ret;
838}
839
840int
841adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
842 struct kgsl_context *context,
843 struct kgsl_ibdesc *ibdesc,
844 unsigned int numibs,
845 uint32_t *timestamp,
846 unsigned int flags)
847{
848 struct kgsl_device *device = dev_priv->device;
849 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
850 unsigned int *link;
851 unsigned int *cmds;
852 unsigned int i;
853 struct adreno_context *drawctxt;
854 unsigned int start_index = 0;
855#ifdef CONFIG_MSM_KGSL_GPU_USAGE_SYSTRACE
856 struct kgsl_pwrctrl *pwr = &device->pwrctrl;
857#endif
858
859 if (device->state & KGSL_STATE_HUNG)
860 return -EBUSY;
861 if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED) ||
862 context == NULL || ibdesc == 0 || numibs == 0)
863 return -EINVAL;
864
865 drawctxt = context->devctxt;
866
867 if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
868 KGSL_CTXT_WARN(device, "Context %p caused a gpu hang.."
869 " will not accept commands for context %d\n",
870 drawctxt, drawctxt->id);
871 return -EDEADLK;
872 }
873
874 cmds = link = kzalloc(sizeof(unsigned int) * (numibs * 3 + 4),
875 GFP_KERNEL);
876 if (!link) {
877 KGSL_CORE_ERR("kzalloc(%d) failed\n",
878 sizeof(unsigned int) * (numibs * 3 + 4));
879 return -ENOMEM;
880 }
881
882
883 if (drawctxt->flags & CTXT_FLAGS_PREAMBLE &&
884 adreno_dev->drawctxt_active == drawctxt)
885 start_index = 1;
886
887 if (!start_index) {
888 *cmds++ = cp_nop_packet(1);
889 *cmds++ = KGSL_START_OF_IB_IDENTIFIER;
890 } else {
891 *cmds++ = cp_nop_packet(4);
892 *cmds++ = KGSL_START_OF_IB_IDENTIFIER;
893 *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
894 *cmds++ = ibdesc[0].gpuaddr;
895 *cmds++ = ibdesc[0].sizedwords;
896 }
897 for (i = start_index; i < numibs; i++) {
898 if (unlikely(adreno_dev->ib_check_level >= 1 &&
899 !_parse_ibs(dev_priv, ibdesc[i].gpuaddr,
900 ibdesc[i].sizedwords))) {
901 kfree(link);
902 return -EINVAL;
903 }
904 *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
905 *cmds++ = ibdesc[i].gpuaddr;
906 *cmds++ = ibdesc[i].sizedwords;
907 }
908
909 *cmds++ = cp_nop_packet(1);
910 *cmds++ = KGSL_END_OF_IB_IDENTIFIER;
911
912 kgsl_setstate(&device->mmu, context->id,
913 kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
914 device->id));
915
916#ifdef CONFIG_MSM_KGSL_GPU_USAGE_SYSTRACE
917 if(device->id == 0 && device->prev_pid != -1 && device->prev_pid != task_tgid_nr(current)) {
918 trace_kgsl_usage(device, KGSL_PWRFLAGS_ON, dev_priv->process_priv->pid, device->gputime.total, device->gputime.busy,
919 pwr->active_pwrlevel, pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
920 device->prev_pid = task_tgid_nr(current);
921 }
922#endif
923
924#ifdef CONFIG_MSM_KGSL_GPU_USAGE
925 if(device->current_process_priv == NULL || device->current_process_priv->pid != dev_priv->process_priv->pid)
926 device->current_process_priv = dev_priv->process_priv;
927#endif
928
929 adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
930
931 *timestamp = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
932 drawctxt, 0,
933 &link[0], (cmds - link));
934
935 KGSL_CMD_INFO(device, "ctxt %d g %08x numibs %d ts %d\n",
936 context->id, (unsigned int)ibdesc, numibs, *timestamp);
937
938 kfree(link);
939
940#ifdef CONFIG_MSM_KGSL_CFF_DUMP
941 adreno_idle(device);
942#endif
943 if (drawctxt->flags & CTXT_FLAGS_GPU_HANG_RECOVERED)
944 return -EDEADLK;
945 else
946 return 0;
947
948}
949
950static int _find_start_of_cmd_seq(struct adreno_ringbuffer *rb,
951 unsigned int *ptr,
952 bool inc)
953{
954 int status = -EINVAL;
955 unsigned int val1;
956 unsigned int size = rb->buffer_desc.size;
957 unsigned int start_ptr = *ptr;
958
959 while ((start_ptr / sizeof(unsigned int)) != rb->wptr) {
960 if (inc)
961 start_ptr = adreno_ringbuffer_inc_wrapped(start_ptr,
962 size);
963 else
964 start_ptr = adreno_ringbuffer_dec_wrapped(start_ptr,
965 size);
966 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, start_ptr);
967 if (KGSL_CMD_IDENTIFIER == val1) {
968 if ((start_ptr / sizeof(unsigned int)) != rb->wptr)
969 start_ptr = adreno_ringbuffer_dec_wrapped(
970 start_ptr, size);
971 *ptr = start_ptr;
972 status = 0;
973 break;
974 }
975 }
976 return status;
977}
978
979static int _find_cmd_seq_after_eop_ts(struct adreno_ringbuffer *rb,
980 unsigned int *rb_rptr,
981 unsigned int global_eop,
982 bool inc)
983{
984 int status = -EINVAL;
985 unsigned int temp_rb_rptr = *rb_rptr;
986 unsigned int size = rb->buffer_desc.size;
987 unsigned int val[3];
988 int i = 0;
989 bool check = false;
990
991 if (inc && temp_rb_rptr / sizeof(unsigned int) != rb->wptr)
992 return status;
993
994 do {
995 if (!inc)
996 temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
997 temp_rb_rptr, size);
998 kgsl_sharedmem_readl(&rb->buffer_desc, &val[i],
999 temp_rb_rptr);
1000
1001 if (check && ((inc && val[i] == global_eop) ||
1002 (!inc && (val[i] ==
1003 cp_type3_packet(CP_MEM_WRITE, 2) ||
1004 val[i] == CACHE_FLUSH_TS)))) {
1005 i = (i + 2) % 3;
1006 if (val[i] == rb->device->memstore.gpuaddr +
1007 KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
1008 eoptimestamp)) {
1009 int j = ((i + 2) % 3);
1010 if ((inc && (val[j] == CACHE_FLUSH_TS ||
1011 val[j] == cp_type3_packet(
1012 CP_MEM_WRITE, 2))) ||
1013 (!inc && val[j] == global_eop)) {
1014
1015 status = 0;
1016 break;
1017 }
1018 }
1019 i = (i + 1) % 3;
1020 }
1021 if (inc)
1022 temp_rb_rptr = adreno_ringbuffer_inc_wrapped(
1023 temp_rb_rptr, size);
1024
1025 i = (i + 1) % 3;
1026 if (2 == i)
1027 check = true;
1028 } while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr);
1029 if (!status) {
1030 status = _find_start_of_cmd_seq(rb, &temp_rb_rptr, false);
1031 if (!status) {
1032 *rb_rptr = temp_rb_rptr;
1033 KGSL_DRV_ERR(rb->device,
1034 "Offset of cmd sequence after eop timestamp: 0x%x\n",
1035 temp_rb_rptr / sizeof(unsigned int));
1036 }
1037 }
1038 if (status)
1039 KGSL_DRV_ERR(rb->device,
1040 "Failed to find the command sequence after eop timestamp\n");
1041 return status;
1042}
1043
1044static int _find_hanging_ib_sequence(struct adreno_ringbuffer *rb,
1045 unsigned int *rb_rptr,
1046 unsigned int ib1)
1047{
1048 int status = -EINVAL;
1049 unsigned int temp_rb_rptr = *rb_rptr;
1050 unsigned int size = rb->buffer_desc.size;
1051 unsigned int val[2];
1052 int i = 0;
1053 bool check = false;
1054 bool ctx_switch = false;
1055
1056 while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
1057 kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
1058
1059 if (check && val[i] == ib1) {
1060
1061 i = (i + 1) % 2;
1062 if (adreno_cmd_is_ib(val[i])) {
1063
1064 status = _find_start_of_cmd_seq(rb,
1065 &temp_rb_rptr, false);
1066 KGSL_DRV_ERR(rb->device,
1067 "Found the hanging IB at offset 0x%x\n",
1068 temp_rb_rptr / sizeof(unsigned int));
1069 break;
1070 }
1071 i = (i + 1) % 2;
1072 }
1073 if (val[i] == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
1074 if (ctx_switch) {
1075 KGSL_DRV_ERR(rb->device,
1076 "Context switch encountered before bad "
1077 "IB found\n");
1078 break;
1079 }
1080 ctx_switch = true;
1081 }
1082 i = (i + 1) % 2;
1083 if (1 == i)
1084 check = true;
1085 temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
1086 size);
1087 }
1088 if (!status)
1089 *rb_rptr = temp_rb_rptr;
1090 return status;
1091}
1092
1093static void _turn_preamble_on_for_ib_seq(struct adreno_ringbuffer *rb,
1094 unsigned int rb_rptr)
1095{
1096 unsigned int temp_rb_rptr = rb_rptr;
1097 unsigned int size = rb->buffer_desc.size;
1098 unsigned int val[2];
1099 int i = 0;
1100 bool check = false;
1101 bool cmd_start = false;
1102
1103
1104 while (temp_rb_rptr / sizeof(unsigned int) != rb->wptr) {
1105 kgsl_sharedmem_readl(&rb->buffer_desc, &val[i], temp_rb_rptr);
1106 if (check && KGSL_START_OF_IB_IDENTIFIER == val[i]) {
1107
1108 i = (i + 1) % 2;
1109 if (val[i] == cp_nop_packet(4)) {
1110 temp_rb_rptr = adreno_ringbuffer_dec_wrapped(
1111 temp_rb_rptr, size);
1112 kgsl_sharedmem_writel(&rb->buffer_desc,
1113 temp_rb_rptr, cp_nop_packet(1));
1114 }
1115 KGSL_DRV_ERR(rb->device,
1116 "Turned preamble on at offset 0x%x\n",
1117 temp_rb_rptr / 4);
1118 break;
1119 }
1120 if (KGSL_CMD_IDENTIFIER == val[i]) {
1121 if (cmd_start)
1122 break;
1123 cmd_start = true;
1124 }
1125
1126 i = (i + 1) % 2;
1127 if (1 == i)
1128 check = true;
1129 temp_rb_rptr = adreno_ringbuffer_inc_wrapped(temp_rb_rptr,
1130 size);
1131 }
1132}
1133
1134static void _copy_valid_rb_content(struct adreno_ringbuffer *rb,
1135 unsigned int rb_rptr, unsigned int *temp_rb_buffer,
1136 int *rb_size, unsigned int *bad_rb_buffer,
1137 int *bad_rb_size,
1138 int *last_valid_ctx_id)
1139{
1140 unsigned int good_rb_idx = 0, cmd_start_idx = 0;
1141 unsigned int val1 = 0;
1142 struct kgsl_context *k_ctxt;
1143 struct adreno_context *a_ctxt;
1144 unsigned int bad_rb_idx = 0;
1145 int copy_rb_contents = 0;
1146 unsigned int temp_rb_rptr;
1147 unsigned int size = rb->buffer_desc.size;
1148 unsigned int good_cmd_start_idx = 0;
1149
1150 while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
1151 kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
1152
1153 if (KGSL_CMD_IDENTIFIER == val1) {
1154 cmd_start_idx = bad_rb_idx - 1;
1155 if (copy_rb_contents)
1156 good_cmd_start_idx = good_rb_idx - 1;
1157 }
1158
1159
1160 if (val1 == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
1161 unsigned int temp_idx, val2;
1162
1163 temp_rb_rptr = rb_rptr + (3 * sizeof(unsigned int)) %
1164 size;
1165 kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
1166 temp_rb_rptr);
1167
1168 k_ctxt = idr_find(&rb->device->context_idr, val2);
1169 if (k_ctxt) {
1170 a_ctxt = k_ctxt->devctxt;
1171
1172 if (!copy_rb_contents && ((k_ctxt &&
1173 !(a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) ||
1174 !k_ctxt)) {
1175 for (temp_idx = cmd_start_idx;
1176 temp_idx < bad_rb_idx;
1177 temp_idx++)
1178 temp_rb_buffer[good_rb_idx++] =
1179 bad_rb_buffer[temp_idx];
1180 *last_valid_ctx_id = val2;
1181 copy_rb_contents = 1;
1182 } else if (copy_rb_contents && k_ctxt &&
1183 (a_ctxt->flags & CTXT_FLAGS_GPU_HANG)) {
1184 good_rb_idx = good_cmd_start_idx;
1185 copy_rb_contents = 0;
1186 }
1187 }
1188 }
1189
1190 if (copy_rb_contents)
1191 temp_rb_buffer[good_rb_idx++] = val1;
1192 bad_rb_buffer[bad_rb_idx++] = val1;
1193
1194 rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr, size);
1195 }
1196 *rb_size = good_rb_idx;
1197 *bad_rb_size = bad_rb_idx;
1198}
1199
1200int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
1201 struct adreno_recovery_data *rec_data)
1202{
1203 int status;
1204 struct kgsl_device *device = rb->device;
1205 unsigned int rb_rptr = rb->wptr * sizeof(unsigned int);
1206 struct kgsl_context *context;
1207 struct adreno_context *adreno_context;
1208
1209 context = idr_find(&device->context_idr, rec_data->context_id);
1210
1211
1212 status = _find_cmd_seq_after_eop_ts(rb, &rb_rptr,
1213 rec_data->global_eop + 1, false);
1214 if (status)
1215 goto done;
1216
1217 if (context) {
1218 adreno_context = context->devctxt;
1219
1220 if (adreno_context->flags & CTXT_FLAGS_PREAMBLE) {
1221 if (rec_data->ib1) {
1222 status = _find_hanging_ib_sequence(rb, &rb_rptr,
1223 rec_data->ib1);
1224 if (status)
1225 goto copy_rb_contents;
1226 }
1227 _turn_preamble_on_for_ib_seq(rb, rb_rptr);
1228 } else {
1229 status = -EINVAL;
1230 }
1231 }
1232
1233copy_rb_contents:
1234 _copy_valid_rb_content(rb, rb_rptr, rec_data->rb_buffer,
1235 &rec_data->rb_size,
1236 rec_data->bad_rb_buffer,
1237 &rec_data->bad_rb_size,
1238 &rec_data->last_valid_ctx_id);
1239 if (status) {
1240 rec_data->bad_rb_size = 0;
1241 status = 0;
1242 }
1243 if (!context)
1244 rec_data->rb_size = 0;
1245done:
1246 return status;
1247}
1248
1249void
1250adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
1251 int num_rb_contents)
1252{
1253 int i;
1254 unsigned int *ringcmds;
1255 unsigned int rcmd_gpu;
1256
1257 if (!num_rb_contents)
1258 return;
1259
1260 if (num_rb_contents > (rb->buffer_desc.size - rb->wptr)) {
1261 adreno_regwrite(rb->device, REG_CP_RB_RPTR, 0);
1262 rb->rptr = 0;
1263 BUG_ON(num_rb_contents > rb->buffer_desc.size);
1264 }
1265 ringcmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
1266 rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(unsigned int) * rb->wptr;
1267 for (i = 0; i < num_rb_contents; i++)
1268 GSL_RB_WRITE(ringcmds, rcmd_gpu, rb_buff[i]);
1269 rb->wptr += num_rb_contents;
1270 adreno_ringbuffer_submit(rb);
1271}