blob: 967e4abc170216d2bfe759c7388615bcfb4f76fa [file] [log] [blame]
Jordan Crouse64646c72013-05-28 17:10:27 -06001/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Jordan Crouse156cfbc2012-01-24 09:32:04 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include "kgsl.h"
14#include "kgsl_sharedmem.h"
15#include "kgsl_snapshot.h"
16
17#include "adreno.h"
18#include "adreno_pm4types.h"
19#include "a2xx_reg.h"
Jordan Crousee0879b12012-03-16 14:53:43 -060020#include "a3xx_reg.h"
Jordan Crouse156cfbc2012-01-24 09:32:04 -070021
22/* Number of dwords of ringbuffer history to record */
23#define NUM_DWORDS_OF_RINGBUFFER_HISTORY 100
24
25/* Maintain a list of the objects we see during parsing */
26
27#define SNAPSHOT_OBJ_BUFSIZE 64
28
29#define SNAPSHOT_OBJ_TYPE_IB 0
30
Jordan Crouse9610b6b2012-03-16 14:53:42 -060031/* Keep track of how many bytes are frozen after a snapshot and tell the user */
32static int snapshot_frozen_objsize;
33
Jordan Crouse156cfbc2012-01-24 09:32:04 -070034static struct kgsl_snapshot_obj {
35 int type;
36 uint32_t gpuaddr;
37 uint32_t ptbase;
38 void *ptr;
39 int dwords;
40} objbuf[SNAPSHOT_OBJ_BUFSIZE];
41
42/* Pointer to the next open entry in the object list */
43static int objbufptr;
44
45/* Push a new buffer object onto the list */
46static void push_object(struct kgsl_device *device, int type, uint32_t ptbase,
47 uint32_t gpuaddr, int dwords)
48{
49 int index;
50 void *ptr;
51
Jordan Crousee9e91bf2012-02-21 09:48:36 -070052 /*
53 * Sometimes IBs can be reused in the same dump. Because we parse from
54 * oldest to newest, if we come across an IB that has already been used,
55 * assume that it has been reused and update the list with the newest
56 * size.
57 */
58
Jordan Crouse156cfbc2012-01-24 09:32:04 -070059 for (index = 0; index < objbufptr; index++) {
60 if (objbuf[index].gpuaddr == gpuaddr &&
Jordan Crousee9e91bf2012-02-21 09:48:36 -070061 objbuf[index].ptbase == ptbase) {
62 objbuf[index].dwords = dwords;
63 return;
64 }
Jordan Crouse156cfbc2012-01-24 09:32:04 -070065 }
66
67 if (objbufptr == SNAPSHOT_OBJ_BUFSIZE) {
68 KGSL_DRV_ERR(device, "snapshot: too many snapshot objects\n");
69 return;
70 }
71
72 /*
73 * adreno_convertaddr verifies that the IB size is valid - at least in
74 * the context of it being smaller then the allocated memory space
75 */
76 ptr = adreno_convertaddr(device, ptbase, gpuaddr, dwords << 2);
77
78 if (ptr == NULL) {
79 KGSL_DRV_ERR(device,
80 "snapshot: Can't find GPU address for %x\n", gpuaddr);
81 return;
82 }
83
84 /* Put it on the list of things to parse */
85 objbuf[objbufptr].type = type;
86 objbuf[objbufptr].gpuaddr = gpuaddr;
87 objbuf[objbufptr].ptbase = ptbase;
88 objbuf[objbufptr].dwords = dwords;
89 objbuf[objbufptr++].ptr = ptr;
90}
91
Jordan Crousee9e91bf2012-02-21 09:48:36 -070092/*
93 * Return a 1 if the specified object is already on the list of buffers
94 * to be dumped
95 */
96
97static int find_object(int type, unsigned int gpuaddr, unsigned int ptbase)
98{
99 int index;
100
101 for (index = 0; index < objbufptr; index++) {
102 if (objbuf[index].gpuaddr == gpuaddr &&
103 objbuf[index].ptbase == ptbase &&
104 objbuf[index].type == type)
105 return 1;
106 }
107
108 return 0;
109}
110
Jordan Crousee0879b12012-03-16 14:53:43 -0600111/*
112 * This structure keeps track of type0 writes to VSC_PIPE_DATA_ADDRESS_x and
113 * VSC_PIPE_DATA_LENGTH_x. When a draw initator is called these registers
114 * point to buffers that we need to freeze for a snapshot
115 */
116
117static struct {
118 unsigned int base;
119 unsigned int size;
120} vsc_pipe[8];
121
122/*
123 * This is the cached value of type0 writes to the VSC_SIZE_ADDRESS which
124 * contains the buffer address of the visiblity stream size buffer during a
125 * binning pass
126 */
127
128static unsigned int vsc_size_address;
129
130/*
131 * This struct keeps track of type0 writes to VFD_FETCH_INSTR_0_X and
132 * VFD_FETCH_INSTR_1_X registers. When a draw initator is called the addresses
133 * and sizes in these registers point to VBOs that we need to freeze for a
134 * snapshot
135 */
136
137static struct {
138 unsigned int base;
139 unsigned int stride;
140} vbo[16];
141
142/*
143 * This is the cached value of type0 writes to VFD_INDEX_MAX. This will be used
144 * to calculate the size of the VBOs when the draw initator is called
145 */
146
147static unsigned int vfd_index_max;
148
149/*
150 * This is the cached value of type0 writes to VFD_CONTROL_0 which tells us how
151 * many VBOs are active when the draw initator is called
152 */
153
154static unsigned int vfd_control_0;
155
156/*
157 * Cached value of type0 writes to SP_VS_PVT_MEM_ADDR and SP_FS_PVT_MEM_ADDR.
158 * This is a buffer that contains private stack information for the shader
159 */
160
161static unsigned int sp_vs_pvt_mem_addr;
162static unsigned int sp_fs_pvt_mem_addr;
163
Jordan Crouse361cc3d2012-06-20 08:22:14 -0600164/*
Shubhraprakash Das49f6ce12013-05-28 17:10:56 -0600165 * Cached value of SP_VS_OBJ_START_REG and SP_FS_OBJ_START_REG.
166 */
167static unsigned int sp_vs_obj_start_reg;
168static unsigned int sp_fs_obj_start_reg;
169
170/*
Jordan Crouse361cc3d2012-06-20 08:22:14 -0600171 * Each load state block has two possible types. Each type has a different
172 * number of dwords per unit. Use this handy lookup table to make sure
173 * we dump the right amount of data from the indirect buffer
174 */
175
176static int load_state_unit_sizes[7][2] = {
177 { 2, 4 },
178 { 0, 1 },
179 { 2, 4 },
180 { 0, 1 },
181 { 8, 2 },
182 { 8, 2 },
183 { 8, 2 },
184};
185
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700186static int ib_parse_load_state(struct kgsl_device *device, unsigned int *pkt,
Jordan Crouseea2c6382012-03-16 14:53:42 -0600187 unsigned int ptbase)
188{
189 unsigned int block, source, type;
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700190 int ret = 0;
Jordan Crouseea2c6382012-03-16 14:53:42 -0600191
192 /*
193 * The object here is to find indirect shaders i.e - shaders loaded from
194 * GPU memory instead of directly in the command. These should be added
195 * to the list of memory objects to dump. So look at the load state
Jordan Crouse361cc3d2012-06-20 08:22:14 -0600196 * if the block is indirect (source = 4). If so then add the memory
197 * address to the list. The size of the object differs depending on the
198 * type per the load_state_unit_sizes array above.
Jordan Crouseea2c6382012-03-16 14:53:42 -0600199 */
200
201 if (type3_pkt_size(pkt[0]) < 2)
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700202 return 0;
Jordan Crouseea2c6382012-03-16 14:53:42 -0600203
204 /*
205 * pkt[1] 18:16 - source
206 * pkt[1] 21:19 - state block
207 * pkt[1] 31:22 - size in units
208 * pkt[2] 0:1 - type
209 * pkt[2] 31:2 - GPU memory address
210 */
211
212 block = (pkt[1] >> 19) & 0x07;
213 source = (pkt[1] >> 16) & 0x07;
214 type = pkt[2] & 0x03;
215
Jordan Crouse361cc3d2012-06-20 08:22:14 -0600216 if (source == 4) {
217 int unitsize, ret;
218
219 if (type == 0)
220 unitsize = load_state_unit_sizes[block][0];
221 else
222 unitsize = load_state_unit_sizes[block][1];
Jordan Crouseea2c6382012-03-16 14:53:42 -0600223
224 /* Freeze the GPU buffer containing the shader */
225
226 ret = kgsl_snapshot_get_object(device, ptbase,
227 pkt[2] & 0xFFFFFFFC,
228 (((pkt[1] >> 22) & 0x03FF) * unitsize) << 2,
229 SNAPSHOT_GPU_OBJECT_SHADER);
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700230
231 if (ret < 0)
232 return -EINVAL;
233
Jordan Crouseea2c6382012-03-16 14:53:42 -0600234 snapshot_frozen_objsize += ret;
235 }
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700236
237 return ret;
Jordan Crouseea2c6382012-03-16 14:53:42 -0600238}
239
240/*
Jordan Crousee0879b12012-03-16 14:53:43 -0600241 * This opcode sets the base addresses for the visibilty stream buffer and the
242 * visiblity stream size buffer.
243 */
244
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700245static int ib_parse_set_bin_data(struct kgsl_device *device, unsigned int *pkt,
Jordan Crousee0879b12012-03-16 14:53:43 -0600246 unsigned int ptbase)
247{
248 int ret;
249
250 if (type3_pkt_size(pkt[0]) < 2)
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700251 return 0;
Jordan Crousee0879b12012-03-16 14:53:43 -0600252
253 /* Visiblity stream buffer */
254 ret = kgsl_snapshot_get_object(device, ptbase, pkt[1], 0,
255 SNAPSHOT_GPU_OBJECT_GENERIC);
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700256
257 if (ret < 0)
258 return -EINVAL;
259
Jordan Crousee0879b12012-03-16 14:53:43 -0600260 snapshot_frozen_objsize += ret;
261
262 /* visiblity stream size buffer (fixed size 8 dwords) */
263 ret = kgsl_snapshot_get_object(device, ptbase, pkt[2], 32,
264 SNAPSHOT_GPU_OBJECT_GENERIC);
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700265
266 if (ret >= 0)
267 snapshot_frozen_objsize += ret;
268
269 return ret;
Jordan Crousee0879b12012-03-16 14:53:43 -0600270}
271
272/*
273 * This opcode writes to GPU memory - if the buffer is written to, there is a
274 * good chance that it would be valuable to capture in the snapshot, so mark all
275 * buffers that are written to as frozen
276 */
277
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700278static int ib_parse_mem_write(struct kgsl_device *device, unsigned int *pkt,
Jordan Crousee0879b12012-03-16 14:53:43 -0600279 unsigned int ptbase)
280{
281 int ret;
282
283 if (type3_pkt_size(pkt[0]) < 1)
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700284 return 0;
Jordan Crousee0879b12012-03-16 14:53:43 -0600285
286 /*
287 * The address is where the data in the rest of this packet is written
288 * to, but since that might be an offset into the larger buffer we need
289 * to get the whole thing. Pass a size of 0 kgsl_snapshot_get_object to
290 * capture the entire buffer.
291 */
292
293 ret = kgsl_snapshot_get_object(device, ptbase, pkt[1] & 0xFFFFFFFC, 0,
294 SNAPSHOT_GPU_OBJECT_GENERIC);
295
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700296 if (ret >= 0)
297 snapshot_frozen_objsize += ret;
298
299 return ret;
Jordan Crousee0879b12012-03-16 14:53:43 -0600300}
301
302/*
303 * The DRAW_INDX opcode sends a draw initator which starts a draw operation in
304 * the GPU, so this is the point where all the registers and buffers become
305 * "valid". The DRAW_INDX may also have an index buffer pointer that should be
306 * frozen with the others
307 */
308
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700309static int ib_parse_draw_indx(struct kgsl_device *device, unsigned int *pkt,
Jordan Crousee0879b12012-03-16 14:53:43 -0600310 unsigned int ptbase)
311{
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700312 int ret = 0, i;
Jordan Crousee0879b12012-03-16 14:53:43 -0600313
314 if (type3_pkt_size(pkt[0]) < 3)
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700315 return 0;
Jordan Crousee0879b12012-03-16 14:53:43 -0600316
317 /* DRAW_IDX may have a index buffer pointer */
318
319 if (type3_pkt_size(pkt[0]) > 3) {
320 ret = kgsl_snapshot_get_object(device, ptbase, pkt[4], pkt[5],
321 SNAPSHOT_GPU_OBJECT_GENERIC);
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700322 if (ret < 0)
323 return -EINVAL;
324
Jordan Crousee0879b12012-03-16 14:53:43 -0600325 snapshot_frozen_objsize += ret;
326 }
327
328 /*
329 * All of the type0 writes are valid at a draw initiator, so freeze
330 * the various buffers that we are tracking
331 */
332
333 /* First up the visiblity stream buffer */
334
335 for (i = 0; i < ARRAY_SIZE(vsc_pipe); i++) {
336 if (vsc_pipe[i].base != 0 && vsc_pipe[i].size != 0) {
337 ret = kgsl_snapshot_get_object(device, ptbase,
338 vsc_pipe[i].base, vsc_pipe[i].size,
339 SNAPSHOT_GPU_OBJECT_GENERIC);
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700340 if (ret < 0)
341 return -EINVAL;
342
Jordan Crousee0879b12012-03-16 14:53:43 -0600343 snapshot_frozen_objsize += ret;
344 }
345 }
346
347 /* Next the visibility stream size buffer */
348
349 if (vsc_size_address) {
350 ret = kgsl_snapshot_get_object(device, ptbase,
351 vsc_size_address, 32,
352 SNAPSHOT_GPU_OBJECT_GENERIC);
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700353 if (ret < 0)
354 return -EINVAL;
355
Jordan Crousee0879b12012-03-16 14:53:43 -0600356 snapshot_frozen_objsize += ret;
357 }
358
359 /* Next private shader buffer memory */
360 if (sp_vs_pvt_mem_addr) {
361 ret = kgsl_snapshot_get_object(device, ptbase,
362 sp_vs_pvt_mem_addr, 8192,
363 SNAPSHOT_GPU_OBJECT_GENERIC);
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700364 if (ret < 0)
365 return -EINVAL;
366
Jordan Crousee0879b12012-03-16 14:53:43 -0600367 snapshot_frozen_objsize += ret;
Jordan Crouse21aaadf2012-09-11 16:38:15 -0600368 sp_vs_pvt_mem_addr = 0;
Jordan Crousee0879b12012-03-16 14:53:43 -0600369 }
370
371 if (sp_fs_pvt_mem_addr) {
372 ret = kgsl_snapshot_get_object(device, ptbase,
373 sp_fs_pvt_mem_addr, 8192,
374 SNAPSHOT_GPU_OBJECT_GENERIC);
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700375 if (ret < 0)
376 return -EINVAL;
377
Jordan Crousee0879b12012-03-16 14:53:43 -0600378 snapshot_frozen_objsize += ret;
Jordan Crouse21aaadf2012-09-11 16:38:15 -0600379 sp_fs_pvt_mem_addr = 0;
Jordan Crousee0879b12012-03-16 14:53:43 -0600380 }
381
Shubhraprakash Das49f6ce12013-05-28 17:10:56 -0600382 if (sp_vs_obj_start_reg) {
383 ret = kgsl_snapshot_get_object(device, ptbase,
384 sp_vs_obj_start_reg & 0xFFFFFFE0, 0,
385 SNAPSHOT_GPU_OBJECT_GENERIC);
386 if (ret < 0)
387 return -EINVAL;
388 snapshot_frozen_objsize += ret;
389 sp_vs_obj_start_reg = 0;
390 }
391
392 if (sp_fs_obj_start_reg) {
393 ret = kgsl_snapshot_get_object(device, ptbase,
394 sp_fs_obj_start_reg & 0xFFFFFFE0, 0,
395 SNAPSHOT_GPU_OBJECT_GENERIC);
396 if (ret < 0)
397 return -EINVAL;
398 snapshot_frozen_objsize += ret;
399 sp_fs_obj_start_reg = 0;
400 }
401
Jordan Crousee0879b12012-03-16 14:53:43 -0600402 /* Finally: VBOs */
403
404 /* The number of active VBOs is stored in VFD_CONTROL_O[31:27] */
405 for (i = 0; i < (vfd_control_0) >> 27; i++) {
406 int size;
407
408 /*
409 * The size of the VBO is the stride stored in
410 * VFD_FETCH_INSTR_0_X.BUFSTRIDE * VFD_INDEX_MAX. The base
411 * is stored in VFD_FETCH_INSTR_1_X
412 */
413
414 if (vbo[i].base != 0) {
415 size = vbo[i].stride * vfd_index_max;
416
417 ret = kgsl_snapshot_get_object(device, ptbase,
418 vbo[i].base,
419 0, SNAPSHOT_GPU_OBJECT_GENERIC);
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700420 if (ret < 0)
421 return -EINVAL;
422
Jordan Crousee0879b12012-03-16 14:53:43 -0600423 snapshot_frozen_objsize += ret;
424 }
Jordan Crouse21aaadf2012-09-11 16:38:15 -0600425
426 vbo[i].base = 0;
427 vbo[i].stride = 0;
Jordan Crousee0879b12012-03-16 14:53:43 -0600428 }
Jordan Crouse21aaadf2012-09-11 16:38:15 -0600429
430 vfd_control_0 = 0;
431 vfd_index_max = 0;
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700432
433 return ret;
Jordan Crousee0879b12012-03-16 14:53:43 -0600434}
435
436/*
Jordan Crouseea2c6382012-03-16 14:53:42 -0600437 * Parse all the type3 opcode packets that may contain important information,
Jordan Crousee0879b12012-03-16 14:53:43 -0600438 * such as additional GPU buffers to grab or a draw initator
Jordan Crouseea2c6382012-03-16 14:53:42 -0600439 */
440
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700441static int ib_parse_type3(struct kgsl_device *device, unsigned int *ptr,
Jordan Crouseea2c6382012-03-16 14:53:42 -0600442 unsigned int ptbase)
443{
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700444 int opcode = cp_type3_opcode(*ptr);
445
446 if (opcode == CP_LOAD_STATE)
447 return ib_parse_load_state(device, ptr, ptbase);
448 else if (opcode == CP_SET_BIN_DATA)
449 return ib_parse_set_bin_data(device, ptr, ptbase);
450 else if (opcode == CP_MEM_WRITE)
451 return ib_parse_mem_write(device, ptr, ptbase);
452 else if (opcode == CP_DRAW_INDX)
453 return ib_parse_draw_indx(device, ptr, ptbase);
454
455 return 0;
Jordan Crousee0879b12012-03-16 14:53:43 -0600456}
457
458/*
459 * Parse type0 packets found in the stream. Some of the registers that are
460 * written are clues for GPU buffers that we need to freeze. Register writes
461 * are considred valid when a draw initator is called, so just cache the values
462 * here and freeze them when a CP_DRAW_INDX is seen. This protects against
463 * needlessly caching buffers that won't be used during a draw call
464 */
465
466static void ib_parse_type0(struct kgsl_device *device, unsigned int *ptr,
467 unsigned int ptbase)
468{
469 int size = type0_pkt_size(*ptr);
470 int offset = type0_pkt_offset(*ptr);
471 int i;
472
Shubhraprakash Das195b3ba2013-05-28 17:11:02 -0600473 for (i = 0; i < size - 1; i++, offset++) {
Jordan Crousee0879b12012-03-16 14:53:43 -0600474
475 /* Visiblity stream buffer */
476
477 if (offset >= A3XX_VSC_PIPE_DATA_ADDRESS_0 &&
478 offset <= A3XX_VSC_PIPE_DATA_LENGTH_7) {
479 int index = offset - A3XX_VSC_PIPE_DATA_ADDRESS_0;
480
481 /* Each bank of address and length registers are
482 * interleaved with an empty register:
483 *
484 * address 0
485 * length 0
486 * empty
487 * address 1
488 * length 1
489 * empty
490 * ...
491 */
492
Jordan Crouse7f24c712012-04-05 16:55:55 -0600493 if ((index % 3) == 0)
Jordan Crousee0879b12012-03-16 14:53:43 -0600494 vsc_pipe[index / 3].base = ptr[i + 1];
Jordan Crouse7f24c712012-04-05 16:55:55 -0600495 else if ((index % 3) == 1)
Jordan Crousee0879b12012-03-16 14:53:43 -0600496 vsc_pipe[index / 3].size = ptr[i + 1];
497 } else if ((offset >= A3XX_VFD_FETCH_INSTR_0_0) &&
498 (offset <= A3XX_VFD_FETCH_INSTR_1_F)) {
499 int index = offset - A3XX_VFD_FETCH_INSTR_0_0;
500
501 /*
502 * FETCH_INSTR_0_X and FETCH_INSTR_1_X banks are
503 * interleaved as above but without the empty register
504 * in between
505 */
506
Jordan Crouse7f24c712012-04-05 16:55:55 -0600507 if ((index % 2) == 0)
Jordan Crousee0879b12012-03-16 14:53:43 -0600508 vbo[index >> 1].stride =
509 (ptr[i + 1] >> 7) & 0x1FF;
510 else
511 vbo[index >> 1].base = ptr[i + 1];
512 } else {
513 /*
514 * Cache various support registers for calculating
515 * buffer sizes
516 */
517
518 switch (offset) {
519 case A3XX_VFD_CONTROL_0:
520 vfd_control_0 = ptr[i + 1];
521 break;
522 case A3XX_VFD_INDEX_MAX:
523 vfd_index_max = ptr[i + 1];
524 break;
525 case A3XX_VSC_SIZE_ADDRESS:
526 vsc_size_address = ptr[i + 1];
527 break;
528 case A3XX_SP_VS_PVT_MEM_ADDR_REG:
529 sp_vs_pvt_mem_addr = ptr[i + 1];
530 break;
531 case A3XX_SP_FS_PVT_MEM_ADDR_REG:
532 sp_fs_pvt_mem_addr = ptr[i + 1];
533 break;
Shubhraprakash Das49f6ce12013-05-28 17:10:56 -0600534 case A3XX_SP_VS_OBJ_START_REG:
535 sp_vs_obj_start_reg = ptr[i + 1];
536 break;
537 case A3XX_SP_FS_OBJ_START_REG:
538 sp_fs_obj_start_reg = ptr[i + 1];
539 break;
Jordan Crousee0879b12012-03-16 14:53:43 -0600540 }
541 }
Jordan Crouseea2c6382012-03-16 14:53:42 -0600542 }
543}
544
Jordan Crouse2dc227b2013-05-28 17:10:34 -0600545static inline int parse_ib(struct kgsl_device *device, unsigned int ptbase,
546 unsigned int gpuaddr, unsigned int dwords);
547
Jordan Crousef99f5a662012-03-16 14:53:43 -0600548/* Add an IB as a GPU object, but first, parse it to find more goodies within */
549
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700550static int ib_add_gpu_object(struct kgsl_device *device, unsigned int ptbase,
Jordan Crousef99f5a662012-03-16 14:53:43 -0600551 unsigned int gpuaddr, unsigned int dwords)
552{
Jordan Crouse17d6d8b2012-04-18 09:31:09 -0600553 int i, ret, rem = dwords;
Jordan Crouse21aaadf2012-09-11 16:38:15 -0600554 unsigned int *src;
555
556 /*
557 * If the object is already in the list, we don't need to parse it again
558 */
559
560 if (kgsl_snapshot_have_object(device, ptbase, gpuaddr, dwords << 2))
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700561 return 0;
Jordan Crouse21aaadf2012-09-11 16:38:15 -0600562
563 src = (unsigned int *) adreno_convertaddr(device, ptbase, gpuaddr,
564 dwords << 2);
Jordan Crousef99f5a662012-03-16 14:53:43 -0600565
566 if (src == NULL)
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700567 return -EINVAL;
Jordan Crousef99f5a662012-03-16 14:53:43 -0600568
Jordan Crouse21aaadf2012-09-11 16:38:15 -0600569 for (i = 0; rem > 0; rem--, i++) {
Jordan Crouse17d6d8b2012-04-18 09:31:09 -0600570 int pktsize;
571
Jordan Crouse21aaadf2012-09-11 16:38:15 -0600572 /* If the packet isn't a type 1 or a type 3, then don't bother
573 * parsing it - it is likely corrupted */
574
Jordan Crouse17d6d8b2012-04-18 09:31:09 -0600575 if (!pkt_is_type0(src[i]) && !pkt_is_type3(src[i]))
Jordan Crouse21aaadf2012-09-11 16:38:15 -0600576 break;
Jordan Crouse17d6d8b2012-04-18 09:31:09 -0600577
578 pktsize = type3_pkt_size(src[i]);
579
Jordan Crouse21aaadf2012-09-11 16:38:15 -0600580 if (!pktsize || (pktsize + 1) > rem)
Jordan Crouse17d6d8b2012-04-18 09:31:09 -0600581 break;
Jordan Crousee0879b12012-03-16 14:53:43 -0600582
Jordan Crousef99f5a662012-03-16 14:53:43 -0600583 if (pkt_is_type3(src[i])) {
Jordan Crouse3bbb56e2012-10-25 09:37:40 -0600584 if (adreno_cmd_is_ib(src[i])) {
585 unsigned int gpuaddr = src[i + 1];
586 unsigned int size = src[i + 2];
Jordan Crouse3bbb56e2012-10-25 09:37:40 -0600587
Jordan Crouse2dc227b2013-05-28 17:10:34 -0600588 ret = parse_ib(device, ptbase, gpuaddr, size);
Jordan Crouse3bbb56e2012-10-25 09:37:40 -0600589
Jordan Crouse2dc227b2013-05-28 17:10:34 -0600590 /* If adding the IB failed then stop parsing */
591 if (ret < 0)
592 goto done;
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700593 } else {
594 ret = ib_parse_type3(device, &src[i], ptbase);
595 /*
596 * If the parse function failed (probably
597 * because of a bad decode) then bail out and
598 * just capture the binary IB data
599 */
600
601 if (ret < 0)
602 goto done;
603 }
Jordan Crousee0879b12012-03-16 14:53:43 -0600604 } else if (pkt_is_type0(src[i])) {
605 ib_parse_type0(device, &src[i], ptbase);
Jordan Crousef99f5a662012-03-16 14:53:43 -0600606 }
Jordan Crousee0879b12012-03-16 14:53:43 -0600607
Jordan Crouse17d6d8b2012-04-18 09:31:09 -0600608 i += pktsize;
609 rem -= pktsize;
Jordan Crousef99f5a662012-03-16 14:53:43 -0600610 }
611
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700612done:
Jordan Crousef99f5a662012-03-16 14:53:43 -0600613 ret = kgsl_snapshot_get_object(device, ptbase, gpuaddr, dwords << 2,
614 SNAPSHOT_GPU_OBJECT_IB);
615
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700616 if (ret >= 0)
617 snapshot_frozen_objsize += ret;
618
619 return ret;
Jordan Crousef99f5a662012-03-16 14:53:43 -0600620}
621
Jordan Crouse2dc227b2013-05-28 17:10:34 -0600622/*
623 * We want to store the last executed IB1 and IB2 in the static region to ensure
624 * that we get at least some information out of the snapshot even if we can't
625 * access the dynamic data from the sysfs file. Push all other IBs on the
626 * dynamic list
627 */
628static inline int parse_ib(struct kgsl_device *device, unsigned int ptbase,
629 unsigned int gpuaddr, unsigned int dwords)
630{
631 unsigned int ib1base, ib2base;
632 int ret = 0;
633
634 /*
635 * Check the IB address - if it is either the last executed IB1 or the
636 * last executed IB2 then push it into the static blob otherwise put
637 * it in the dynamic list
638 */
639
640 kgsl_regread(device, REG_CP_IB1_BASE, &ib1base);
641 kgsl_regread(device, REG_CP_IB2_BASE, &ib2base);
642
643 if (gpuaddr == ib1base || gpuaddr == ib2base)
644 push_object(device, SNAPSHOT_OBJ_TYPE_IB, ptbase,
645 gpuaddr, dwords);
646 else
647 ret = ib_add_gpu_object(device, ptbase, gpuaddr, dwords);
648
649 return ret;
650}
651
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700652/* Snapshot the ringbuffer memory */
653static int snapshot_rb(struct kgsl_device *device, void *snapshot,
654 int remain, void *priv)
655{
656 struct kgsl_snapshot_rb *header = snapshot;
657 unsigned int *data = snapshot + sizeof(*header);
658 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
659 struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
Jordan Crousee6b77622012-04-05 16:55:54 -0600660 unsigned int ptbase, rptr, *rbptr, ibbase;
661 int index, size, i;
Jordan Crousee9e91bf2012-02-21 09:48:36 -0700662 int parse_ibs = 0, ib_parse_start;
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700663
664 /* Get the physical address of the MMU pagetable */
Shubhraprakash Das79447952012-04-26 18:12:23 -0600665 ptbase = kgsl_mmu_get_current_ptbase(&device->mmu);
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700666
667 /* Get the current read pointers for the RB */
668 kgsl_regread(device, REG_CP_RB_RPTR, &rptr);
669
Jordan Crousee6b77622012-04-05 16:55:54 -0600670 /* Address of the last processed IB */
Jordan Crousef99f5a662012-03-16 14:53:43 -0600671 kgsl_regread(device, REG_CP_IB1_BASE, &ibbase);
672
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700673 /*
Jordan Crousee6b77622012-04-05 16:55:54 -0600674 * Figure out the window of ringbuffer data to dump. First we need to
Jordan Crouse6c214ba62012-06-20 08:22:16 -0600675 * find where the last processed IB ws submitted. Start walking back
676 * from the rptr
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700677 */
678
Jordan Crousee6b77622012-04-05 16:55:54 -0600679 index = rptr;
680 rbptr = rb->buffer_desc.hostptr;
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700681
Jordan Crouse6c214ba62012-06-20 08:22:16 -0600682 do {
Jordan Crousee6b77622012-04-05 16:55:54 -0600683 index--;
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700684
Jordan Crousee6b77622012-04-05 16:55:54 -0600685 if (index < 0) {
686 index = rb->sizedwords - 3;
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700687
Jordan Crousee6b77622012-04-05 16:55:54 -0600688 /* We wrapped without finding what we wanted */
689 if (index < rb->wptr) {
690 index = rb->wptr;
691 break;
692 }
693 }
694
695 if (adreno_cmd_is_ib(rbptr[index]) &&
696 rbptr[index + 1] == ibbase)
697 break;
Jordan Crouse6c214ba62012-06-20 08:22:16 -0600698 } while (index != rb->wptr);
Jordan Crousee6b77622012-04-05 16:55:54 -0600699
700 /*
701 * index points at the last submitted IB. We can only trust that the
702 * memory between the context switch and the hanging IB is valid, so
703 * the next step is to find the context switch before the submission
704 */
705
706 while (index != rb->wptr) {
707 index--;
708
Jordan Crouse17d6d8b2012-04-18 09:31:09 -0600709 if (index < 0) {
Jordan Crousee6b77622012-04-05 16:55:54 -0600710 index = rb->sizedwords - 2;
711
712 /*
713 * Wrapped without finding the context switch. This is
714 * harmless - we should still have enough data to dump a
715 * valid state
716 */
717
718 if (index < rb->wptr) {
719 index = rb->wptr;
720 break;
721 }
722 }
723
724 /* Break if the current packet is a context switch identifier */
Jordan Crouse17d6d8b2012-04-18 09:31:09 -0600725 if ((rbptr[index] == cp_nop_packet(1)) &&
726 (rbptr[index + 1] == KGSL_CONTEXT_TO_MEM_IDENTIFIER))
Jordan Crousee6b77622012-04-05 16:55:54 -0600727 break;
728 }
729
730 /*
731 * Index represents the start of the window of interest. We will try
732 * to dump all buffers between here and the rptr
733 */
734
735 ib_parse_start = index;
736
737 /*
738 * Dump the entire ringbuffer - the parser can choose how much of it to
739 * process
740 */
741
742 size = (rb->sizedwords << 2);
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700743
744 if (remain < size + sizeof(*header)) {
745 KGSL_DRV_ERR(device,
746 "snapshot: Not enough memory for the rb section");
747 return 0;
748 }
749
750 /* Write the sub-header for the section */
Jordan Crousee6b77622012-04-05 16:55:54 -0600751 header->start = rb->wptr;
752 header->end = rb->wptr;
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700753 header->wptr = rb->wptr;
754 header->rbsize = rb->sizedwords;
Jordan Crousee6b77622012-04-05 16:55:54 -0600755 header->count = rb->sizedwords;
Jordan Crousee9e91bf2012-02-21 09:48:36 -0700756
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700757 /*
758 * Loop through the RB, copying the data and looking for indirect
759 * buffers and MMU pagetable changes
760 */
761
Jordan Crousee6b77622012-04-05 16:55:54 -0600762 index = rb->wptr;
763 for (i = 0; i < rb->sizedwords; i++) {
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700764 *data = rbptr[index];
765
Jordan Crousee6b77622012-04-05 16:55:54 -0600766 /*
Jordan Crousee6b77622012-04-05 16:55:54 -0600767 * Only parse IBs between the start and the rptr or the next
768 * context switch, whichever comes first
769 */
Jordan Crousee9e91bf2012-02-21 09:48:36 -0700770
Jordan Crouse21aaadf2012-09-11 16:38:15 -0600771 if (parse_ibs == 0 && index == ib_parse_start)
Jordan Crousee9e91bf2012-02-21 09:48:36 -0700772 parse_ibs = 1;
Jordan Crousee6b77622012-04-05 16:55:54 -0600773 else if (index == rptr || adreno_rb_ctxtswitch(&rbptr[index]))
Jordan Crousee9e91bf2012-02-21 09:48:36 -0700774 parse_ibs = 0;
775
Jordan Crousef99f5a662012-03-16 14:53:43 -0600776 if (parse_ibs && adreno_cmd_is_ib(rbptr[index])) {
Jordan Crouse233b2092012-04-18 09:31:09 -0600777 unsigned int ibaddr = rbptr[index + 1];
778 unsigned int ibsize = rbptr[index + 2];
779
Jordan Crousef99f5a662012-03-16 14:53:43 -0600780 /*
Jordan Crouse233b2092012-04-18 09:31:09 -0600781 * This will return non NULL if the IB happens to be
782 * part of the context memory (i.e - context switch
783 * command buffers)
784 */
785
786 struct kgsl_memdesc *memdesc =
787 adreno_find_ctxtmem(device, ptbase, ibaddr,
Jordan Crouse35ed68f2013-05-28 17:08:29 -0600788 ibsize << 2);
Jordan Crouse233b2092012-04-18 09:31:09 -0600789
Shubhraprakash Das3d784f52012-06-06 23:12:11 -0600790 /* IOMMU uses a NOP IB placed in setsate memory */
791 if (NULL == memdesc)
792 if (kgsl_gpuaddr_in_memdesc(
793 &device->mmu.setstate_memory,
Jordan Crouse35ed68f2013-05-28 17:08:29 -0600794 ibaddr, ibsize << 2))
Shubhraprakash Das3d784f52012-06-06 23:12:11 -0600795 memdesc = &device->mmu.setstate_memory;
Jordan Crouse233b2092012-04-18 09:31:09 -0600796 /*
797 * The IB from CP_IB1_BASE and the IBs for legacy
798 * context switch go into the snapshot all
Jordan Crousef99f5a662012-03-16 14:53:43 -0600799 * others get marked at GPU objects
800 */
Jordan Crouse233b2092012-04-18 09:31:09 -0600801
Jordan Crouse2dc227b2013-05-28 17:10:34 -0600802 if (memdesc != NULL)
Jordan Crousef99f5a662012-03-16 14:53:43 -0600803 push_object(device, SNAPSHOT_OBJ_TYPE_IB,
Jordan Crouse233b2092012-04-18 09:31:09 -0600804 ptbase, ibaddr, ibsize);
Jordan Crousef99f5a662012-03-16 14:53:43 -0600805 else
Jordan Crouse2dc227b2013-05-28 17:10:34 -0600806 parse_ib(device, ptbase, ibaddr, ibsize);
Jordan Crousef99f5a662012-03-16 14:53:43 -0600807 }
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700808
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700809 index = index + 1;
810
811 if (index == rb->sizedwords)
812 index = 0;
813
814 data++;
815 }
816
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700817 /* Return the size of the section */
818 return size + sizeof(*header);
819}
820
Shubhraprakash Dase87e3df2013-01-17 00:53:26 -0800821static int snapshot_capture_mem_list(struct kgsl_device *device, void *snapshot,
822 int remain, void *priv)
823{
824 struct kgsl_snapshot_replay_mem_list *header = snapshot;
825 struct kgsl_process_private *private;
826 unsigned int ptbase;
827 struct rb_node *node;
828 struct kgsl_mem_entry *entry = NULL;
829 int num_mem;
830 unsigned int *data = snapshot + sizeof(*header);
831
832 ptbase = kgsl_mmu_get_current_ptbase(&device->mmu);
833 mutex_lock(&kgsl_driver.process_mutex);
834 list_for_each_entry(private, &kgsl_driver.process_list, list) {
835 if (kgsl_mmu_pt_equal(&device->mmu, private->pagetable,
836 ptbase))
837 break;
838 }
839 mutex_unlock(&kgsl_driver.process_mutex);
840 if (!private) {
841 KGSL_DRV_ERR(device,
842 "Failed to get pointer to process private structure\n");
843 return 0;
844 }
845 /* We need to know the number of memory objects that the process has */
846 spin_lock(&private->mem_lock);
847 for (node = rb_first(&private->mem_rb), num_mem = 0; node; ) {
848 entry = rb_entry(node, struct kgsl_mem_entry, node);
849 node = rb_next(&entry->node);
850 num_mem++;
851 }
852
853 if (remain < ((num_mem * 3 * sizeof(unsigned int)) +
854 sizeof(*header))) {
855 KGSL_DRV_ERR(device,
856 "snapshot: Not enough memory for the mem list section");
857 spin_unlock(&private->mem_lock);
858 return 0;
859 }
860 header->num_entries = num_mem;
861 header->ptbase = ptbase;
862 /*
863 * Walk throught the memory list and store the
864 * tuples(gpuaddr, size, memtype) in snapshot
865 */
866 for (node = rb_first(&private->mem_rb); node; ) {
867 entry = rb_entry(node, struct kgsl_mem_entry, node);
868 node = rb_next(&entry->node);
869
870 *data++ = entry->memdesc.gpuaddr;
871 *data++ = entry->memdesc.size;
872 *data++ = (entry->memdesc.priv & KGSL_MEMTYPE_MASK) >>
873 KGSL_MEMTYPE_SHIFT;
874 }
875 spin_unlock(&private->mem_lock);
876 return sizeof(*header) + (num_mem * 3 * sizeof(unsigned int));
877}
878
Jordan Crousee0879b12012-03-16 14:53:43 -0600879/* Snapshot the memory for an indirect buffer */
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700880static int snapshot_ib(struct kgsl_device *device, void *snapshot,
881 int remain, void *priv)
882{
883 struct kgsl_snapshot_ib *header = snapshot;
884 struct kgsl_snapshot_obj *obj = priv;
885 unsigned int *src = obj->ptr;
886 unsigned int *dst = snapshot + sizeof(*header);
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700887 int i, ret;
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700888
889 if (remain < (obj->dwords << 2) + sizeof(*header)) {
890 KGSL_DRV_ERR(device,
891 "snapshot: Not enough memory for the ib section");
892 return 0;
893 }
894
895 /* Write the sub-header for the section */
896 header->gpuaddr = obj->gpuaddr;
897 header->ptbase = obj->ptbase;
898 header->size = obj->dwords;
899
900 /* Write the contents of the ib */
Jordan Crouseea2c6382012-03-16 14:53:42 -0600901 for (i = 0; i < obj->dwords; i++, src++, dst++) {
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700902 *dst = *src;
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700903
Jordan Crouseea2c6382012-03-16 14:53:42 -0600904 if (pkt_is_type3(*src)) {
905 if ((obj->dwords - i) < type3_pkt_size(*src) + 1)
906 continue;
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700907
Jordan Crouseea2c6382012-03-16 14:53:42 -0600908 if (adreno_cmd_is_ib(*src))
Jordan Crouse2dc227b2013-05-28 17:10:34 -0600909 ret = parse_ib(device, obj->ptbase, src[1],
910 src[2]);
911 else
Jordan Crouse2cf6ec92013-02-14 14:46:11 -0700912 ret = ib_parse_type3(device, src, obj->ptbase);
913
Jordan Crouse2dc227b2013-05-28 17:10:34 -0600914 /* Stop parsing if the type3 decode fails */
915 if (ret < 0)
916 break;
Jordan Crouseea2c6382012-03-16 14:53:42 -0600917 }
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700918 }
919
920 return (obj->dwords << 2) + sizeof(*header);
921}
922
923/* Dump another item on the current pending list */
924static void *dump_object(struct kgsl_device *device, int obj, void *snapshot,
925 int *remain)
926{
927 switch (objbuf[obj].type) {
928 case SNAPSHOT_OBJ_TYPE_IB:
929 snapshot = kgsl_snapshot_add_section(device,
930 KGSL_SNAPSHOT_SECTION_IB, snapshot, remain,
931 snapshot_ib, &objbuf[obj]);
932 break;
933 default:
934 KGSL_DRV_ERR(device,
935 "snapshot: Invalid snapshot object type: %d\n",
936 objbuf[obj].type);
937 break;
938 }
939
940 return snapshot;
941}
942
943/* adreno_snapshot - Snapshot the Adreno GPU state
944 * @device - KGSL device to snapshot
945 * @snapshot - Pointer to the start of memory to write into
946 * @remain - A pointer to how many bytes of memory are remaining in the snapshot
947 * @hang - set if this snapshot was automatically triggered by a GPU hang
948 * This is a hook function called by kgsl_snapshot to snapshot the
949 * Adreno specific information for the GPU snapshot. In turn, this function
950 * calls the GPU specific snapshot function to get core specific information.
951 */
952
953void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
954 int hang)
955{
956 int i;
957 uint32_t ptbase, ibbase, ibsize;
958 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
959
960 /* Reset the list of objects */
961 objbufptr = 0;
962
Jordan Crouse9610b6b2012-03-16 14:53:42 -0600963 snapshot_frozen_objsize = 0;
964
Jordan Crousee0879b12012-03-16 14:53:43 -0600965 /* Clear the caches for the visibilty stream and VBO parsing */
966
967 vfd_control_0 = 0;
968 vfd_index_max = 0;
969 vsc_size_address = 0;
970
971 memset(vsc_pipe, 0, sizeof(vsc_pipe));
972 memset(vbo, 0, sizeof(vbo));
973
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700974 /* Get the physical address of the MMU pagetable */
Shubhraprakash Das79447952012-04-26 18:12:23 -0600975 ptbase = kgsl_mmu_get_current_ptbase(&device->mmu);
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700976
977 /* Dump the ringbuffer */
978 snapshot = kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_RB,
979 snapshot, remain, snapshot_rb, NULL);
980
981 /*
Shubhraprakash Dase87e3df2013-01-17 00:53:26 -0800982 * Add a section that lists (gpuaddr, size, memtype) tuples of the
983 * hanging process
984 */
985 snapshot = kgsl_snapshot_add_section(device,
986 KGSL_SNAPSHOT_SECTION_MEMLIST, snapshot, remain,
987 snapshot_capture_mem_list, NULL);
988 /*
Jordan Crousee9e91bf2012-02-21 09:48:36 -0700989 * Make sure that the last IB1 that was being executed is dumped.
990 * Since this was the last IB1 that was processed, we should have
991 * already added it to the list during the ringbuffer parse but we
992 * want to be double plus sure.
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700993 */
Jordan Crousee9e91bf2012-02-21 09:48:36 -0700994
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700995 kgsl_regread(device, REG_CP_IB1_BASE, &ibbase);
996 kgsl_regread(device, REG_CP_IB1_BUFSZ, &ibsize);
997
Jordan Crousee9e91bf2012-02-21 09:48:36 -0700998 /*
999 * The problem is that IB size from the register is the unprocessed size
1000 * of the buffer not the original size, so if we didn't catch this
1001 * buffer being directly used in the RB, then we might not be able to
1002 * dump the whle thing. Print a warning message so we can try to
1003 * figure how often this really happens.
1004 */
1005
1006 if (!find_object(SNAPSHOT_OBJ_TYPE_IB, ibbase, ptbase) && ibsize) {
Jordan Crouse156cfbc2012-01-24 09:32:04 -07001007 push_object(device, SNAPSHOT_OBJ_TYPE_IB, ptbase,
1008 ibbase, ibsize);
Jordan Crousee9e91bf2012-02-21 09:48:36 -07001009 KGSL_DRV_ERR(device, "CP_IB1_BASE not found in the ringbuffer. "
1010 "Dumping %x dwords of the buffer.\n", ibsize);
1011 }
Jordan Crouse156cfbc2012-01-24 09:32:04 -07001012
1013 kgsl_regread(device, REG_CP_IB2_BASE, &ibbase);
1014 kgsl_regread(device, REG_CP_IB2_BUFSZ, &ibsize);
1015
Jordan Crousee9e91bf2012-02-21 09:48:36 -07001016 /*
1017 * Add the last parsed IB2 to the list. The IB2 should be found as we
1018 * parse the objects below, but we try to add it to the list first, so
1019 * it too can be parsed. Don't print an error message in this case - if
1020 * the IB2 is found during parsing, the list will be updated with the
1021 * correct size.
1022 */
1023
1024 if (!find_object(SNAPSHOT_OBJ_TYPE_IB, ibbase, ptbase) && ibsize) {
Jordan Crouse156cfbc2012-01-24 09:32:04 -07001025 push_object(device, SNAPSHOT_OBJ_TYPE_IB, ptbase,
1026 ibbase, ibsize);
Jordan Crousee9e91bf2012-02-21 09:48:36 -07001027 }
Jordan Crouse156cfbc2012-01-24 09:32:04 -07001028
1029 /*
1030 * Go through the list of found objects and dump each one. As the IBs
1031 * are parsed, more objects might be found, and objbufptr will increase
1032 */
1033 for (i = 0; i < objbufptr; i++)
1034 snapshot = dump_object(device, i, snapshot, remain);
1035
Jordan Crouse156cfbc2012-01-24 09:32:04 -07001036 /* Add GPU specific sections - registers mainly, but other stuff too */
1037 if (adreno_dev->gpudev->snapshot)
1038 snapshot = adreno_dev->gpudev->snapshot(adreno_dev, snapshot,
1039 remain, hang);
1040
Jordan Crouse9610b6b2012-03-16 14:53:42 -06001041 if (snapshot_frozen_objsize)
1042 KGSL_DRV_ERR(device, "GPU snapshot froze %dKb of GPU buffers\n",
1043 snapshot_frozen_objsize / 1024);
1044
Jordan Crouse156cfbc2012-01-24 09:32:04 -07001045 return snapshot;
1046}