blob: 437d6f5ac58cea4cba16eff911a354ff10105c61 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14/* #define DEBUG */
15#define ALIGN_CPU
16
17#include <linux/spinlock.h>
18#include <linux/debugfs.h>
19#include <linux/relay.h>
20#include <linux/slab.h>
21#include <linux/time.h>
22#include <linux/sched.h>
Jeremy Gebben6061bc72011-04-18 15:23:16 -060023#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024
25#include "kgsl.h"
26#include "kgsl_cffdump.h"
27#include "kgsl_debugfs.h"
Sushmita Susheelendra22d87172011-05-09 16:40:02 -060028#include "kgsl_log.h"
29#include "kgsl_sharedmem.h"
30#include "adreno_pm4types.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031
32static struct rchan *chan;
33static struct dentry *dir;
34static int suspended;
35static size_t dropped;
36static size_t subbuf_size = 256*1024;
37static size_t n_subbufs = 64;
38
39/* forward declarations */
40static void destroy_channel(void);
41static struct rchan *create_channel(unsigned subbuf_size, unsigned n_subbufs);
42
43static spinlock_t cffdump_lock;
44static ulong serial_nr;
45static ulong total_bytes;
46static ulong total_syncmem;
47static long last_sec;
48
49#define MEMBUF_SIZE 64
50
51#define CFF_OP_WRITE_REG 0x00000002
52struct cff_op_write_reg {
53 unsigned char op;
54 uint addr;
55 uint value;
Sushmita Susheelendra22d87172011-05-09 16:40:02 -060056} __packed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070057
58#define CFF_OP_POLL_REG 0x00000004
59struct cff_op_poll_reg {
60 unsigned char op;
61 uint addr;
62 uint value;
63 uint mask;
Sushmita Susheelendra22d87172011-05-09 16:40:02 -060064} __packed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065
66#define CFF_OP_WAIT_IRQ 0x00000005
67struct cff_op_wait_irq {
68 unsigned char op;
Sushmita Susheelendra22d87172011-05-09 16:40:02 -060069} __packed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071#define CFF_OP_RMW 0x0000000a
72
73#define CFF_OP_WRITE_MEM 0x0000000b
74struct cff_op_write_mem {
75 unsigned char op;
76 uint addr;
77 uint value;
Sushmita Susheelendra22d87172011-05-09 16:40:02 -060078} __packed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070079
80#define CFF_OP_WRITE_MEMBUF 0x0000000c
81struct cff_op_write_membuf {
82 unsigned char op;
83 uint addr;
84 ushort count;
85 uint buffer[MEMBUF_SIZE];
Sushmita Susheelendra22d87172011-05-09 16:40:02 -060086} __packed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087
Jeremy Gebben6061bc72011-04-18 15:23:16 -060088#define CFF_OP_MEMORY_BASE 0x0000000d
89struct cff_op_memory_base {
90 unsigned char op;
91 uint base;
92 uint size;
93 uint gmemsize;
94} __packed;
95
96#define CFF_OP_HANG 0x0000000e
97struct cff_op_hang {
98 unsigned char op;
99} __packed;
100
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101#define CFF_OP_EOF 0xffffffff
102struct cff_op_eof {
103 unsigned char op;
Sushmita Susheelendra22d87172011-05-09 16:40:02 -0600104} __packed;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700105
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -0600106#define CFF_OP_VERIFY_MEM_FILE 0x00000007
107#define CFF_OP_WRITE_SURFACE_PARAMS 0x00000011
108struct cff_op_user_event {
109 unsigned char op;
110 unsigned int op1;
111 unsigned int op2;
112 unsigned int op3;
113 unsigned int op4;
114 unsigned int op5;
115} __packed;
116
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700117
118static void b64_encodeblock(unsigned char in[3], unsigned char out[4], int len)
119{
120 static const char tob64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmno"
121 "pqrstuvwxyz0123456789+/";
122
123 out[0] = tob64[in[0] >> 2];
124 out[1] = tob64[((in[0] & 0x03) << 4) | ((in[1] & 0xf0) >> 4)];
125 out[2] = (unsigned char) (len > 1 ? tob64[((in[1] & 0x0f) << 2)
126 | ((in[2] & 0xc0) >> 6)] : '=');
127 out[3] = (unsigned char) (len > 2 ? tob64[in[2] & 0x3f] : '=');
128}
129
130static void b64_encode(const unsigned char *in_buf, int in_size,
131 unsigned char *out_buf, int out_bufsize, int *out_size)
132{
133 unsigned char in[3], out[4];
134 int i, len;
135
136 *out_size = 0;
137 while (in_size > 0) {
138 len = 0;
139 for (i = 0; i < 3; ++i) {
140 if (in_size-- > 0) {
141 in[i] = *in_buf++;
142 ++len;
143 } else
144 in[i] = 0;
145 }
146 if (len) {
147 b64_encodeblock(in, out, len);
148 if (out_bufsize < 4) {
149 pr_warn("kgsl: cffdump: %s: out of buffer\n",
150 __func__);
151 return;
152 }
153 for (i = 0; i < 4; ++i)
154 *out_buf++ = out[i];
155 *out_size += 4;
156 out_bufsize -= 4;
157 }
158 }
159}
160
161#define KLOG_TMPBUF_SIZE (1024)
162static void klog_printk(const char *fmt, ...)
163{
164 /* per-cpu klog formatting temporary buffer */
165 static char klog_buf[NR_CPUS][KLOG_TMPBUF_SIZE];
166
167 va_list args;
168 int len;
169 char *cbuf;
170 unsigned long flags;
171
172 local_irq_save(flags);
173 cbuf = klog_buf[smp_processor_id()];
174 va_start(args, fmt);
175 len = vsnprintf(cbuf, KLOG_TMPBUF_SIZE, fmt, args);
176 total_bytes += len;
177 va_end(args);
178 relay_write(chan, cbuf, len);
179 local_irq_restore(flags);
180}
181
182static struct cff_op_write_membuf cff_op_write_membuf;
183static void cffdump_membuf(int id, unsigned char *out_buf, int out_bufsize)
184{
185 void *data;
186 int len, out_size;
187 struct cff_op_write_mem cff_op_write_mem;
188
189 uint addr = cff_op_write_membuf.addr
190 - sizeof(uint)*cff_op_write_membuf.count;
191
192 if (!cff_op_write_membuf.count) {
193 pr_warn("kgsl: cffdump: membuf: count == 0, skipping");
194 return;
195 }
196
197 if (cff_op_write_membuf.count != 1) {
198 cff_op_write_membuf.op = CFF_OP_WRITE_MEMBUF;
199 cff_op_write_membuf.addr = addr;
200 len = sizeof(cff_op_write_membuf) -
201 sizeof(uint)*(MEMBUF_SIZE - cff_op_write_membuf.count);
202 data = &cff_op_write_membuf;
203 } else {
204 cff_op_write_mem.op = CFF_OP_WRITE_MEM;
205 cff_op_write_mem.addr = addr;
206 cff_op_write_mem.value = cff_op_write_membuf.buffer[0];
207 data = &cff_op_write_mem;
208 len = sizeof(cff_op_write_mem);
209 }
210 b64_encode(data, len, out_buf, out_bufsize, &out_size);
211 out_buf[out_size] = 0;
212 klog_printk("%ld:%d;%s\n", ++serial_nr, id, out_buf);
213 cff_op_write_membuf.count = 0;
214 cff_op_write_membuf.addr = 0;
215}
216
217static void cffdump_printline(int id, uint opcode, uint op1, uint op2,
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -0600218 uint op3, uint op4, uint op5)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700219{
220 struct cff_op_write_reg cff_op_write_reg;
221 struct cff_op_poll_reg cff_op_poll_reg;
222 struct cff_op_wait_irq cff_op_wait_irq;
Jeremy Gebben6061bc72011-04-18 15:23:16 -0600223 struct cff_op_memory_base cff_op_memory_base;
224 struct cff_op_hang cff_op_hang;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700225 struct cff_op_eof cff_op_eof;
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -0600226 struct cff_op_user_event cff_op_user_event;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227 unsigned char out_buf[sizeof(cff_op_write_membuf)/3*4 + 16];
228 void *data;
229 int len = 0, out_size;
230 long cur_secs;
231
232 spin_lock(&cffdump_lock);
233 if (opcode == CFF_OP_WRITE_MEM) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700234 if ((cff_op_write_membuf.addr != op1 &&
235 cff_op_write_membuf.count)
236 || (cff_op_write_membuf.count == MEMBUF_SIZE))
237 cffdump_membuf(id, out_buf, sizeof(out_buf));
238
239 cff_op_write_membuf.buffer[cff_op_write_membuf.count++] = op2;
240 cff_op_write_membuf.addr = op1 + sizeof(uint);
241 spin_unlock(&cffdump_lock);
242 return;
243 } else if (cff_op_write_membuf.count)
244 cffdump_membuf(id, out_buf, sizeof(out_buf));
245 spin_unlock(&cffdump_lock);
246
247 switch (opcode) {
248 case CFF_OP_WRITE_REG:
249 cff_op_write_reg.op = opcode;
250 cff_op_write_reg.addr = op1;
251 cff_op_write_reg.value = op2;
252 data = &cff_op_write_reg;
253 len = sizeof(cff_op_write_reg);
254 break;
255
256 case CFF_OP_POLL_REG:
257 cff_op_poll_reg.op = opcode;
258 cff_op_poll_reg.addr = op1;
259 cff_op_poll_reg.value = op2;
260 cff_op_poll_reg.mask = op3;
261 data = &cff_op_poll_reg;
262 len = sizeof(cff_op_poll_reg);
263 break;
264
265 case CFF_OP_WAIT_IRQ:
266 cff_op_wait_irq.op = opcode;
267 data = &cff_op_wait_irq;
268 len = sizeof(cff_op_wait_irq);
269 break;
270
Jeremy Gebben6061bc72011-04-18 15:23:16 -0600271 case CFF_OP_MEMORY_BASE:
272 cff_op_memory_base.op = opcode;
273 cff_op_memory_base.base = op1;
274 cff_op_memory_base.size = op2;
275 cff_op_memory_base.gmemsize = op3;
276 data = &cff_op_memory_base;
277 len = sizeof(cff_op_memory_base);
278 break;
279
280 case CFF_OP_HANG:
281 cff_op_hang.op = opcode;
282 data = &cff_op_hang;
283 len = sizeof(cff_op_hang);
284 break;
285
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700286 case CFF_OP_EOF:
287 cff_op_eof.op = opcode;
288 data = &cff_op_eof;
289 len = sizeof(cff_op_eof);
290 break;
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -0600291
292 case CFF_OP_WRITE_SURFACE_PARAMS:
293 case CFF_OP_VERIFY_MEM_FILE:
294 cff_op_user_event.op = opcode;
295 cff_op_user_event.op1 = op1;
296 cff_op_user_event.op2 = op2;
297 cff_op_user_event.op3 = op3;
298 cff_op_user_event.op4 = op4;
299 cff_op_user_event.op5 = op5;
300 data = &cff_op_user_event;
301 len = sizeof(cff_op_user_event);
302 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700303 }
304
305 if (len) {
306 b64_encode(data, len, out_buf, sizeof(out_buf), &out_size);
307 out_buf[out_size] = 0;
308 klog_printk("%ld:%d;%s\n", ++serial_nr, id, out_buf);
309 } else
310 pr_warn("kgsl: cffdump: unhandled opcode: %d\n", opcode);
311
312 cur_secs = get_seconds();
313 if ((cur_secs - last_sec) > 10 || (last_sec - cur_secs) > 10) {
314 pr_info("kgsl: cffdump: total [bytes:%lu kB, syncmem:%lu kB], "
315 "seq#: %lu\n", total_bytes/1024, total_syncmem/1024,
316 serial_nr);
317 last_sec = cur_secs;
318 }
319}
320
321void kgsl_cffdump_init()
322{
323 struct dentry *debugfs_dir = kgsl_get_debugfs_dir();
324
325#ifdef ALIGN_CPU
326 cpumask_t mask;
327
328 cpumask_clear(&mask);
Lucille Sylvester57996e32011-05-26 19:12:22 -0600329 cpumask_set_cpu(0, &mask);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700330 sched_setaffinity(0, &mask);
331#endif
332 if (!debugfs_dir || IS_ERR(debugfs_dir)) {
333 KGSL_CORE_ERR("Debugfs directory is bad\n");
334 return;
335 }
336
337 kgsl_cff_dump_enable = 1;
338
339 spin_lock_init(&cffdump_lock);
340
341 dir = debugfs_create_dir("cff", debugfs_dir);
342 if (!dir) {
343 KGSL_CORE_ERR("debugfs_create_dir failed\n");
344 return;
345 }
346
347 chan = create_channel(subbuf_size, n_subbufs);
348}
349
350void kgsl_cffdump_destroy()
351{
352 if (chan)
353 relay_flush(chan);
354 destroy_channel();
355 if (dir)
356 debugfs_remove(dir);
357}
358
359void kgsl_cffdump_open(enum kgsl_deviceid device_id)
360{
Jeremy Gebbena3d07a42011-10-17 12:08:16 -0600361 kgsl_cffdump_memory_base(device_id, KGSL_PAGETABLE_BASE,
Jeremy Gebben6061bc72011-04-18 15:23:16 -0600362 CONFIG_MSM_KGSL_PAGE_TABLE_SIZE, SZ_256K);
363}
364
365void kgsl_cffdump_memory_base(enum kgsl_deviceid device_id, unsigned int base,
366 unsigned int range, unsigned gmemsize)
367{
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -0600368 cffdump_printline(device_id, CFF_OP_MEMORY_BASE, base,
369 range, gmemsize, 0, 0);
Jeremy Gebben6061bc72011-04-18 15:23:16 -0600370}
371
372void kgsl_cffdump_hang(enum kgsl_deviceid device_id)
373{
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -0600374 cffdump_printline(device_id, CFF_OP_HANG, 0, 0, 0, 0, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700375}
376
377void kgsl_cffdump_close(enum kgsl_deviceid device_id)
378{
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -0600379 cffdump_printline(device_id, CFF_OP_EOF, 0, 0, 0, 0, 0);
380}
381
382void kgsl_cffdump_user_event(unsigned int cff_opcode, unsigned int op1,
383 unsigned int op2, unsigned int op3,
384 unsigned int op4, unsigned int op5)
385{
386 cffdump_printline(-1, cff_opcode, op1, op2, op3, op4, op5);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700387}
388
389void kgsl_cffdump_syncmem(struct kgsl_device_private *dev_priv,
390 const struct kgsl_memdesc *memdesc, uint gpuaddr, uint sizebytes,
391 bool clean_cache)
392{
393 const void *src;
394 uint host_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395
396 if (!kgsl_cff_dump_enable)
397 return;
398
399 total_syncmem += sizebytes;
400
401 if (memdesc == NULL) {
402 struct kgsl_mem_entry *entry;
403 spin_lock(&dev_priv->process_priv->mem_lock);
404 entry = kgsl_sharedmem_find_region(dev_priv->process_priv,
405 gpuaddr, sizebytes);
406 spin_unlock(&dev_priv->process_priv->mem_lock);
407 if (entry == NULL) {
408 KGSL_CORE_ERR("did not find mapping "
409 "for gpuaddr: 0x%08x\n", gpuaddr);
410 return;
411 }
412 memdesc = &entry->memdesc;
413 }
414 BUG_ON(memdesc->gpuaddr == 0);
415 BUG_ON(gpuaddr == 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700416
417 src = kgsl_gpuaddr_to_vaddr(memdesc, gpuaddr, &host_size);
418 if (src == NULL || host_size < sizebytes) {
Sushmita Susheelendra22d87172011-05-09 16:40:02 -0600419 KGSL_CORE_ERR("did not find mapping for "
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700420 "gpuaddr: 0x%08x, m->host: 0x%p, phys: 0x%08x\n",
421 gpuaddr, memdesc->hostptr, memdesc->physaddr);
422 return;
423 }
424
425 if (clean_cache) {
426 /* Ensure that this memory region is not read from the
427 * cache but fetched fresh */
428
429 mb();
430
Sushmita Susheelendra22d87172011-05-09 16:40:02 -0600431 kgsl_cache_range_op((struct kgsl_memdesc *)memdesc,
432 KGSL_CACHE_OP_INV);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700433 }
434
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700435 while (sizebytes > 3) {
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -0600436 cffdump_printline(-1, CFF_OP_WRITE_MEM, gpuaddr, *(uint *)src,
437 0, 0, 0);
438 gpuaddr += 4;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439 src += 4;
440 sizebytes -= 4;
441 }
442 if (sizebytes > 0)
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -0600443 cffdump_printline(-1, CFF_OP_WRITE_MEM, gpuaddr, *(uint *)src,
444 0, 0, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700445}
446
447void kgsl_cffdump_setmem(uint addr, uint value, uint sizebytes)
448{
449 if (!kgsl_cff_dump_enable)
450 return;
451
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700452 while (sizebytes > 3) {
453 /* Use 32bit memory writes as long as there's at least
454 * 4 bytes left */
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -0600455 cffdump_printline(-1, CFF_OP_WRITE_MEM, addr, value,
456 0, 0, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457 addr += 4;
458 sizebytes -= 4;
459 }
460 if (sizebytes > 0)
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -0600461 cffdump_printline(-1, CFF_OP_WRITE_MEM, addr, value,
462 0, 0, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700463}
464
465void kgsl_cffdump_regwrite(enum kgsl_deviceid device_id, uint addr,
466 uint value)
467{
468 if (!kgsl_cff_dump_enable)
469 return;
470
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -0600471 cffdump_printline(device_id, CFF_OP_WRITE_REG, addr, value,
472 0, 0, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700473}
474
475void kgsl_cffdump_regpoll(enum kgsl_deviceid device_id, uint addr,
476 uint value, uint mask)
477{
478 if (!kgsl_cff_dump_enable)
479 return;
480
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -0600481 cffdump_printline(device_id, CFF_OP_POLL_REG, addr, value,
482 mask, 0, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700483}
484
485void kgsl_cffdump_slavewrite(uint addr, uint value)
486{
487 if (!kgsl_cff_dump_enable)
488 return;
489
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -0600490 cffdump_printline(-1, CFF_OP_WRITE_REG, addr, value, 0, 0, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700491}
492
493int kgsl_cffdump_waitirq(void)
494{
495 if (!kgsl_cff_dump_enable)
496 return 0;
497
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -0600498 cffdump_printline(-1, CFF_OP_WAIT_IRQ, 0, 0, 0, 0, 0);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499
500 return 1;
501}
502EXPORT_SYMBOL(kgsl_cffdump_waitirq);
503
504#define ADDRESS_STACK_SIZE 256
505#define GET_PM4_TYPE3_OPCODE(x) ((*(x) >> 8) & 0xFF)
506static unsigned int kgsl_cffdump_addr_count;
507
508static bool kgsl_cffdump_handle_type3(struct kgsl_device_private *dev_priv,
509 uint *hostaddr, bool check_only)
510{
511 static uint addr_stack[ADDRESS_STACK_SIZE];
512 static uint size_stack[ADDRESS_STACK_SIZE];
513
514 switch (GET_PM4_TYPE3_OPCODE(hostaddr)) {
Sushmita Susheelendrafbb46aa2011-08-25 18:05:08 -0600515 case CP_INDIRECT_BUFFER_PFD:
516 case CP_INDIRECT_BUFFER:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700517 {
518 /* traverse indirect buffers */
519 int i;
520 uint ibaddr = hostaddr[1];
521 uint ibsize = hostaddr[2];
522
523 /* is this address already in encountered? */
524 for (i = 0;
525 i < kgsl_cffdump_addr_count && addr_stack[i] != ibaddr;
526 ++i)
527 ;
528
529 if (kgsl_cffdump_addr_count == i) {
530 addr_stack[kgsl_cffdump_addr_count] = ibaddr;
531 size_stack[kgsl_cffdump_addr_count++] = ibsize;
532
533 if (kgsl_cffdump_addr_count >= ADDRESS_STACK_SIZE) {
534 KGSL_CORE_ERR("stack overflow\n");
535 return false;
536 }
537
538 return kgsl_cffdump_parse_ibs(dev_priv, NULL,
539 ibaddr, ibsize, check_only);
540 } else if (size_stack[i] != ibsize) {
541 KGSL_CORE_ERR("gpuaddr: 0x%08x, "
542 "wc: %u, with size wc: %u already on the "
543 "stack\n", ibaddr, ibsize, size_stack[i]);
544 return false;
545 }
546 }
547 break;
548 }
549
550 return true;
551}
552
553/*
554 * Traverse IBs and dump them to test vector. Detect swap by inspecting
555 * register writes, keeping note of the current state, and dump
556 * framebuffer config to test vector
557 */
558bool kgsl_cffdump_parse_ibs(struct kgsl_device_private *dev_priv,
559 const struct kgsl_memdesc *memdesc, uint gpuaddr, int sizedwords,
560 bool check_only)
561{
562 static uint level; /* recursion level */
563 bool ret = true;
564 uint host_size;
565 uint *hostaddr, *hoststart;
566 int dwords_left = sizedwords; /* dwords left in the current command
567 buffer */
568
569 if (level == 0)
570 kgsl_cffdump_addr_count = 0;
571
572 if (memdesc == NULL) {
573 struct kgsl_mem_entry *entry;
574 spin_lock(&dev_priv->process_priv->mem_lock);
575 entry = kgsl_sharedmem_find_region(dev_priv->process_priv,
576 gpuaddr, sizedwords * sizeof(uint));
577 spin_unlock(&dev_priv->process_priv->mem_lock);
578 if (entry == NULL) {
579 KGSL_CORE_ERR("did not find mapping "
580 "for gpuaddr: 0x%08x\n", gpuaddr);
581 return true;
582 }
583 memdesc = &entry->memdesc;
584 }
585
586 hostaddr = (uint *)kgsl_gpuaddr_to_vaddr(memdesc, gpuaddr, &host_size);
587 if (hostaddr == NULL) {
588 KGSL_CORE_ERR("did not find mapping for "
589 "gpuaddr: 0x%08x\n", gpuaddr);
590 return true;
591 }
592
593 hoststart = hostaddr;
594
595 level++;
596
Jeremy Gebbena3d07a42011-10-17 12:08:16 -0600597 mb();
598 kgsl_cache_range_op((struct kgsl_memdesc *)memdesc,
Sushmita Susheelendra22d87172011-05-09 16:40:02 -0600599 KGSL_CACHE_OP_INV);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700600#ifdef DEBUG
601 pr_info("kgsl: cffdump: ib: gpuaddr:0x%08x, wc:%d, hptr:%p\n",
602 gpuaddr, sizedwords, hostaddr);
603#endif
604
605 while (dwords_left > 0) {
606 int count = 0; /* dword count including packet header */
607 bool cur_ret = true;
608
609 switch (*hostaddr >> 30) {
610 case 0x0: /* type-0 */
611 count = (*hostaddr >> 16)+2;
612 break;
613 case 0x1: /* type-1 */
614 count = 2;
615 break;
616 case 0x3: /* type-3 */
617 count = ((*hostaddr >> 16) & 0x3fff) + 2;
618 cur_ret = kgsl_cffdump_handle_type3(dev_priv,
619 hostaddr, check_only);
620 break;
621 default:
622 pr_warn("kgsl: cffdump: parse-ib: unexpected type: "
623 "type:%d, word:0x%08x @ 0x%p, gpu:0x%08x\n",
624 *hostaddr >> 30, *hostaddr, hostaddr,
625 gpuaddr+4*(sizedwords-dwords_left));
626 cur_ret = false;
627 count = dwords_left;
628 break;
629 }
630
631#ifdef DEBUG
632 if (!cur_ret) {
633 pr_info("kgsl: cffdump: bad sub-type: #:%d/%d, v:0x%08x"
634 " @ 0x%p[gb:0x%08x], level:%d\n",
635 sizedwords-dwords_left, sizedwords, *hostaddr,
636 hostaddr, gpuaddr+4*(sizedwords-dwords_left),
637 level);
638
639 print_hex_dump(KERN_ERR, level == 1 ? "IB1:" : "IB2:",
640 DUMP_PREFIX_OFFSET, 32, 4, hoststart,
641 sizedwords*4, 0);
642 }
643#endif
644 ret = ret && cur_ret;
645
646 /* jump to next packet */
647 dwords_left -= count;
648 hostaddr += count;
649 cur_ret = dwords_left >= 0;
650
651#ifdef DEBUG
652 if (!cur_ret) {
653 pr_info("kgsl: cffdump: bad count: c:%d, #:%d/%d, "
654 "v:0x%08x @ 0x%p[gb:0x%08x], level:%d\n",
655 count, sizedwords-(dwords_left+count),
656 sizedwords, *(hostaddr-count), hostaddr-count,
657 gpuaddr+4*(sizedwords-(dwords_left+count)),
658 level);
659
660 print_hex_dump(KERN_ERR, level == 1 ? "IB1:" : "IB2:",
661 DUMP_PREFIX_OFFSET, 32, 4, hoststart,
662 sizedwords*4, 0);
663 }
664#endif
665
666 ret = ret && cur_ret;
667 }
668
669 if (!ret)
670 pr_info("kgsl: cffdump: parsing failed: gpuaddr:0x%08x, "
671 "host:0x%p, wc:%d\n", gpuaddr, hoststart, sizedwords);
672
673 if (!check_only) {
674#ifdef DEBUG
675 uint offset = gpuaddr - memdesc->gpuaddr;
676 pr_info("kgsl: cffdump: ib-dump: hostptr:%p, gpuaddr:%08x, "
677 "physaddr:%08x, offset:%d, size:%d", hoststart,
678 gpuaddr, memdesc->physaddr + offset, offset,
679 sizedwords*4);
680#endif
681 kgsl_cffdump_syncmem(dev_priv, memdesc, gpuaddr, sizedwords*4,
682 false);
683 }
684
685 level--;
686
687 return ret;
688}
689
690static int subbuf_start_handler(struct rchan_buf *buf,
691 void *subbuf, void *prev_subbuf, uint prev_padding)
692{
693 pr_debug("kgsl: cffdump: subbuf_start_handler(subbuf=%p, prev_subbuf"
694 "=%p, prev_padding=%08x)\n", subbuf, prev_subbuf, prev_padding);
695
696 if (relay_buf_full(buf)) {
697 if (!suspended) {
698 suspended = 1;
699 pr_warn("kgsl: cffdump: relay: cpu %d buffer full!!!\n",
700 smp_processor_id());
701 }
702 dropped++;
703 return 0;
704 } else if (suspended) {
705 suspended = 0;
706 pr_warn("kgsl: cffdump: relay: cpu %d buffer no longer full.\n",
707 smp_processor_id());
708 }
709
710 subbuf_start_reserve(buf, 0);
711 return 1;
712}
713
714static struct dentry *create_buf_file_handler(const char *filename,
715 struct dentry *parent, int mode, struct rchan_buf *buf,
716 int *is_global)
717{
718 return debugfs_create_file(filename, mode, parent, buf,
719 &relay_file_operations);
720}
721
722/*
723 * file_remove() default callback. Removes relay file in debugfs.
724 */
725static int remove_buf_file_handler(struct dentry *dentry)
726{
727 pr_info("kgsl: cffdump: %s()\n", __func__);
728 debugfs_remove(dentry);
729 return 0;
730}
731
732/*
733 * relay callbacks
734 */
735static struct rchan_callbacks relay_callbacks = {
736 .subbuf_start = subbuf_start_handler,
737 .create_buf_file = create_buf_file_handler,
738 .remove_buf_file = remove_buf_file_handler,
739};
740
741/**
742 * create_channel - creates channel /debug/klog/cpuXXX
743 *
744 * Creates channel along with associated produced/consumed control files
745 *
746 * Returns channel on success, NULL otherwise
747 */
748static struct rchan *create_channel(unsigned subbuf_size, unsigned n_subbufs)
749{
750 struct rchan *chan;
751
752 pr_info("kgsl: cffdump: relay: create_channel: subbuf_size %u, "
753 "n_subbufs %u, dir 0x%p\n", subbuf_size, n_subbufs, dir);
754
755 chan = relay_open("cpu", dir, subbuf_size,
756 n_subbufs, &relay_callbacks, NULL);
757 if (!chan) {
758 KGSL_CORE_ERR("relay_open failed\n");
759 return NULL;
760 }
761
762 suspended = 0;
763 dropped = 0;
764
765 return chan;
766}
767
768/**
769 * destroy_channel - destroys channel /debug/kgsl/cff/cpuXXX
770 *
771 * Destroys channel along with associated produced/consumed control files
772 */
773static void destroy_channel(void)
774{
775 pr_info("kgsl: cffdump: relay: destroy_channel\n");
776 if (chan) {
777 relay_close(chan);
778 chan = NULL;
779 }
780}
781