blob: 514c86c33e6f56be72e55aa25e190e8b21a1d91a [file] [log] [blame]
Duy Truonge833aca2013-02-12 13:35:08 -08001/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#ifndef __ADRENO_H
14#define __ADRENO_H
15
16#include "kgsl_device.h"
17#include "adreno_drawctxt.h"
18#include "adreno_ringbuffer.h"
Shubhraprakash Dasc6e21012012-05-11 17:24:51 -060019#include "kgsl_iommu.h"
liu zhong7dfa2a32012-04-27 19:11:01 -070020#include <mach/ocmem.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070021
22#define DEVICE_3D_NAME "kgsl-3d"
23#define DEVICE_3D0_NAME "kgsl-3d0"
24
25#define ADRENO_DEVICE(device) \
26 KGSL_CONTAINER_OF(device, struct adreno_device, dev)
27
Jordan Crouse4815e9f2012-07-09 15:36:37 -060028#define ADRENO_CHIPID_CORE(_id) (((_id) >> 24) & 0xFF)
29#define ADRENO_CHIPID_MAJOR(_id) (((_id) >> 16) & 0xFF)
30#define ADRENO_CHIPID_MINOR(_id) (((_id) >> 8) & 0xFF)
31#define ADRENO_CHIPID_PATCH(_id) ((_id) & 0xFF)
32
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033/* Flags to control command packet settings */
Jordan Crousee0ea7622012-01-24 09:32:04 -070034#define KGSL_CMD_FLAGS_NONE 0x00000000
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070035#define KGSL_CMD_FLAGS_PMODE 0x00000001
Vijay Krishnamoorthye80c3462012-08-27 14:07:32 -070036#define KGSL_CMD_FLAGS_INTERNAL_ISSUE 0x00000002
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037
38/* Command identifiers */
Shubhraprakash Dasd23ff4b2012-04-05 16:55:54 -060039#define KGSL_CONTEXT_TO_MEM_IDENTIFIER 0x2EADBEEF
40#define KGSL_CMD_IDENTIFIER 0x2EEDFACE
41#define KGSL_START_OF_IB_IDENTIFIER 0x2EADEABE
42#define KGSL_END_OF_IB_IDENTIFIER 0x2ABEDEAD
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070043
44#ifdef CONFIG_MSM_SCM
45#define ADRENO_DEFAULT_PWRSCALE_POLICY (&kgsl_pwrscale_policy_tz)
Lynus Vaz31754cb2012-02-22 18:07:02 +053046#elif defined CONFIG_MSM_SLEEP_STATS_DEVICE
47#define ADRENO_DEFAULT_PWRSCALE_POLICY (&kgsl_pwrscale_policy_idlestats)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070048#else
49#define ADRENO_DEFAULT_PWRSCALE_POLICY NULL
50#endif
51
Harsh Vardhan Dwivedi715fb832012-05-18 00:24:18 -060052void adreno_debugfs_init(struct kgsl_device *device);
53
Jordan Crousec6b3a992012-02-04 10:23:51 -070054#define ADRENO_ISTORE_START 0x5000 /* Istore offset */
Jeremy Gebbenddf6b572011-09-09 13:39:49 -070055
Shubhraprakash Das4624b552012-06-01 14:08:03 -060056#define ADRENO_NUM_CTX_SWITCH_ALLOWED_BEFORE_DRAW 50
57
Jordan Crousea29a2e02012-08-14 09:09:23 -060058/* One cannot wait forever for the core to idle, so set an upper limit to the
59 * amount of time to wait for the core to go idle
60 */
61
62#define ADRENO_IDLE_TIMEOUT (20 * 1000)
63
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064enum adreno_gpurev {
65 ADRENO_REV_UNKNOWN = 0,
66 ADRENO_REV_A200 = 200,
Ranjhith Kalisamy938e00f2012-02-17 14:39:47 +053067 ADRENO_REV_A203 = 203,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068 ADRENO_REV_A205 = 205,
69 ADRENO_REV_A220 = 220,
70 ADRENO_REV_A225 = 225,
Sudhakara Rao Tentu79853832012-03-06 15:52:38 +053071 ADRENO_REV_A305 = 305,
Jordan Crouseb4d31bd2012-02-01 22:11:12 -070072 ADRENO_REV_A320 = 320,
liu zhongfd42e622012-05-01 19:18:30 -070073 ADRENO_REV_A330 = 330,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074};
75
Jordan Crousea78c9172011-07-11 13:14:09 -060076struct adreno_gpudev;
77
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070078struct adreno_device {
79 struct kgsl_device dev; /* Must be first field in this struct */
80 unsigned int chip_id;
81 enum adreno_gpurev gpurev;
Jordan Crouse7501d452012-04-19 08:58:44 -060082 unsigned long gmem_base;
83 unsigned int gmem_size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070084 struct adreno_context *drawctxt_active;
Jordan Crouse505df9c2011-07-28 08:37:59 -060085 const char *pfp_fwfile;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070086 unsigned int *pfp_fw;
87 size_t pfp_fw_size;
Tarun Karra9c070822012-11-27 16:43:51 -070088 unsigned int pfp_fw_version;
Jordan Crouse505df9c2011-07-28 08:37:59 -060089 const char *pm4_fwfile;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070090 unsigned int *pm4_fw;
91 size_t pm4_fw_size;
Tarun Karra9c070822012-11-27 16:43:51 -070092 unsigned int pm4_fw_version;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070093 struct adreno_ringbuffer ringbuffer;
94 unsigned int mharb;
Jordan Crousea78c9172011-07-11 13:14:09 -060095 struct adreno_gpudev *gpudev;
Ranjhith Kalisamy823c1482011-09-05 20:31:07 +053096 unsigned int wait_timeout;
Jeremy Gebbenddf6b572011-09-09 13:39:49 -070097 unsigned int istore_size;
98 unsigned int pix_shader_start;
Jordan Crousec6b3a992012-02-04 10:23:51 -070099 unsigned int instruction_size;
Jeremy Gebbend0ab6ad2012-04-06 11:13:35 -0600100 unsigned int ib_check_level;
Tarun Karra3335f142012-06-19 14:11:48 -0700101 unsigned int fast_hang_detect;
Tarun Karra9c070822012-11-27 16:43:51 -0700102 unsigned int gpulist_index;
liu zhong7dfa2a32012-04-27 19:11:01 -0700103 struct ocmem_buf *ocmem_hdl;
liu zhong5af32d92012-08-29 14:36:36 -0600104 unsigned int ocmem_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700105};
106
Jordan Crousea78c9172011-07-11 13:14:09 -0600107struct adreno_gpudev {
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700108 /*
109 * These registers are in a different location on A3XX, so define
110 * them in the structure and use them as variables.
111 */
112 unsigned int reg_rbbm_status;
113 unsigned int reg_cp_pfp_ucode_data;
114 unsigned int reg_cp_pfp_ucode_addr;
Shubhraprakash Das4624b552012-06-01 14:08:03 -0600115 /* keeps track of when we need to execute the draw workaround code */
116 int ctx_switches_since_last_draw;
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700117
118 /* GPU specific function hooks */
Vijay Krishnamoorthybef66932012-01-24 09:32:05 -0700119 int (*ctxt_create)(struct adreno_device *, struct adreno_context *);
Jordan Crousea78c9172011-07-11 13:14:09 -0600120 void (*ctxt_save)(struct adreno_device *, struct adreno_context *);
121 void (*ctxt_restore)(struct adreno_device *, struct adreno_context *);
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600122 void (*ctxt_draw_workaround)(struct adreno_device *,
123 struct adreno_context *);
Jordan Crousea78c9172011-07-11 13:14:09 -0600124 irqreturn_t (*irq_handler)(struct adreno_device *);
125 void (*irq_control)(struct adreno_device *, int);
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700126 void * (*snapshot)(struct adreno_device *, void *, int *, int);
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700127 void (*rb_init)(struct adreno_device *, struct adreno_ringbuffer *);
128 void (*start)(struct adreno_device *);
129 unsigned int (*busy_cycles)(struct adreno_device *);
Jordan Crousea78c9172011-07-11 13:14:09 -0600130};
131
Shubhraprakash Dasba6c70b2012-05-31 02:53:06 -0600132/*
133 * struct adreno_recovery_data - Structure that contains all information to
134 * perform gpu recovery from hangs
135 * @ib1 - IB1 that the GPU was executing when hang happened
136 * @context_id - Context which caused the hang
137 * @global_eop - eoptimestamp at time of hang
138 * @rb_buffer - Buffer that holds the commands from good contexts
139 * @rb_size - Number of valid dwords in rb_buffer
140 * @bad_rb_buffer - Buffer that holds commands from the hanging context
141 * bad_rb_size - Number of valid dwords in bad_rb_buffer
142 * @last_valid_ctx_id - The last context from which commands were placed in
143 * ringbuffer before the GPU hung
Shubhraprakash Das2747cf62012-09-27 23:05:43 -0700144 * @fault - Indicates whether the hang was caused due to a pagefault
Shubhraprakash Das460cc762013-01-16 16:57:46 -0800145 * @start_of_replay_cmds - Offset in ringbuffer from where commands can be
146 * replayed during recovery
147 * @replay_for_snapshot - Offset in ringbuffer where IB's can be saved for
148 * replaying with snapshot
Shubhraprakash Dasba6c70b2012-05-31 02:53:06 -0600149 */
150struct adreno_recovery_data {
151 unsigned int ib1;
152 unsigned int context_id;
153 unsigned int global_eop;
154 unsigned int *rb_buffer;
155 unsigned int rb_size;
156 unsigned int *bad_rb_buffer;
157 unsigned int bad_rb_size;
158 unsigned int last_valid_ctx_id;
Shubhraprakash Das2747cf62012-09-27 23:05:43 -0700159 int fault;
Shubhraprakash Das460cc762013-01-16 16:57:46 -0800160 unsigned int start_of_replay_cmds;
161 unsigned int replay_for_snapshot;
Shubhraprakash Dasba6c70b2012-05-31 02:53:06 -0600162};
163
Jordan Crousea78c9172011-07-11 13:14:09 -0600164extern struct adreno_gpudev adreno_a2xx_gpudev;
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700165extern struct adreno_gpudev adreno_a3xx_gpudev;
Jordan Crousea78c9172011-07-11 13:14:09 -0600166
Jordan Crousef7597bf2012-01-03 08:43:34 -0700167/* A2XX register sets defined in adreno_a2xx.c */
168extern const unsigned int a200_registers[];
169extern const unsigned int a220_registers[];
Jeremy Gebben6be78d12012-03-07 16:02:47 -0700170extern const unsigned int a225_registers[];
Jordan Crousef7597bf2012-01-03 08:43:34 -0700171extern const unsigned int a200_registers_count;
172extern const unsigned int a220_registers_count;
Jeremy Gebben6be78d12012-03-07 16:02:47 -0700173extern const unsigned int a225_registers_count;
Jordan Crousef7597bf2012-01-03 08:43:34 -0700174
Jordan Crouse0c2761a2012-02-01 22:11:12 -0700175/* A3XX register set defined in adreno_a3xx.c */
176extern const unsigned int a3xx_registers[];
177extern const unsigned int a3xx_registers_count;
178
Carter Cooperf294e892012-11-26 10:45:53 -0700179extern const unsigned int a3xx_hlsq_registers[];
180extern const unsigned int a3xx_hlsq_registers_count;
181
Jordan Crouse99839252012-08-14 14:33:42 -0600182extern const unsigned int a330_registers[];
183extern const unsigned int a330_registers_count;
184
Tarun Karra3335f142012-06-19 14:11:48 -0700185extern unsigned int hang_detect_regs[];
186extern const unsigned int hang_detect_regs_count;
187
188
Jordan Crousea29a2e02012-08-14 09:09:23 -0600189int adreno_idle(struct kgsl_device *device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700190void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
191 unsigned int *value);
192void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords,
193 unsigned int value);
194
Harsh Vardhan Dwivedi715fb832012-05-18 00:24:18 -0600195int adreno_dump(struct kgsl_device *device, int manual);
196
Harsh Vardhan Dwivedi8cb835b2012-03-29 17:23:11 -0600197struct kgsl_memdesc *adreno_find_region(struct kgsl_device *device,
Jeremy Gebben16e80fa2011-11-30 15:56:29 -0700198 unsigned int pt_base,
199 unsigned int gpuaddr,
200 unsigned int size);
201
202uint8_t *adreno_convertaddr(struct kgsl_device *device,
203 unsigned int pt_base, unsigned int gpuaddr, unsigned int size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204
Jordan Crouse233b2092012-04-18 09:31:09 -0600205struct kgsl_memdesc *adreno_find_ctxtmem(struct kgsl_device *device,
206 unsigned int pt_base, unsigned int gpuaddr, unsigned int size);
207
Jordan Crouse156cfbc2012-01-24 09:32:04 -0700208void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
209 int hang);
210
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600211int adreno_dump_and_recover(struct kgsl_device *device);
212
Tarun Karra3335f142012-06-19 14:11:48 -0700213unsigned int adreno_hang_detect(struct kgsl_device *device,
214 unsigned int *prev_reg_val);
215
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216static inline int adreno_is_a200(struct adreno_device *adreno_dev)
217{
218 return (adreno_dev->gpurev == ADRENO_REV_A200);
219}
220
Ranjhith Kalisamy938e00f2012-02-17 14:39:47 +0530221static inline int adreno_is_a203(struct adreno_device *adreno_dev)
222{
223 return (adreno_dev->gpurev == ADRENO_REV_A203);
224}
225
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700226static inline int adreno_is_a205(struct adreno_device *adreno_dev)
227{
Ranjhith Kalisamy938e00f2012-02-17 14:39:47 +0530228 return (adreno_dev->gpurev == ADRENO_REV_A205);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700229}
230
231static inline int adreno_is_a20x(struct adreno_device *adreno_dev)
232{
Ranjhith Kalisamy938e00f2012-02-17 14:39:47 +0530233 return (adreno_dev->gpurev <= 209);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700234}
235
236static inline int adreno_is_a220(struct adreno_device *adreno_dev)
237{
238 return (adreno_dev->gpurev == ADRENO_REV_A220);
239}
240
241static inline int adreno_is_a225(struct adreno_device *adreno_dev)
242{
243 return (adreno_dev->gpurev == ADRENO_REV_A225);
244}
245
246static inline int adreno_is_a22x(struct adreno_device *adreno_dev)
247{
248 return (adreno_dev->gpurev == ADRENO_REV_A220 ||
249 adreno_dev->gpurev == ADRENO_REV_A225);
250}
251
Jordan Crouse196c45b2011-07-28 08:37:57 -0600252static inline int adreno_is_a2xx(struct adreno_device *adreno_dev)
253{
Jordan Crouseb4d31bd2012-02-01 22:11:12 -0700254 return (adreno_dev->gpurev <= 299);
255}
256
257static inline int adreno_is_a3xx(struct adreno_device *adreno_dev)
258{
259 return (adreno_dev->gpurev >= 300);
Jordan Crouse196c45b2011-07-28 08:37:57 -0600260}
261
Kevin Matlage48d0e2e2012-04-26 10:52:36 -0600262static inline int adreno_is_a305(struct adreno_device *adreno_dev)
263{
264 return (adreno_dev->gpurev == ADRENO_REV_A305);
265}
266
267static inline int adreno_is_a320(struct adreno_device *adreno_dev)
268{
269 return (adreno_dev->gpurev == ADRENO_REV_A320);
270}
271
Jordan Crousec0978202012-08-29 14:35:51 -0600272static inline int adreno_is_a330(struct adreno_device *adreno_dev)
273{
274 return (adreno_dev->gpurev == ADRENO_REV_A330);
275}
276
Jordan Crousee6b77622012-04-05 16:55:54 -0600277static inline int adreno_rb_ctxtswitch(unsigned int *cmd)
278{
279 return (cmd[0] == cp_nop_packet(1) &&
280 cmd[1] == KGSL_CONTEXT_TO_MEM_IDENTIFIER);
281}
282
Jeremy Gebbenddf6b572011-09-09 13:39:49 -0700283/**
284 * adreno_encode_istore_size - encode istore size in CP format
285 * @adreno_dev - The 3D device.
286 *
287 * Encode the istore size into the format expected that the
288 * CP_SET_SHADER_BASES and CP_ME_INIT commands:
289 * bits 31:29 - istore size as encoded by this function
290 * bits 27:16 - vertex shader start offset in instructions
291 * bits 11:0 - pixel shader start offset in instructions.
292 */
293static inline int adreno_encode_istore_size(struct adreno_device *adreno_dev)
294{
295 unsigned int size;
296 /* in a225 the CP microcode multiplies the encoded
297 * value by 3 while decoding.
298 */
299 if (adreno_is_a225(adreno_dev))
300 size = adreno_dev->istore_size/3;
301 else
302 size = adreno_dev->istore_size;
303
304 return (ilog2(size) - 5) << 29;
305}
Jordan Crouse196c45b2011-07-28 08:37:57 -0600306
Shubhraprakash Dasc6e21012012-05-11 17:24:51 -0600307static inline int __adreno_add_idle_indirect_cmds(unsigned int *cmds,
308 unsigned int nop_gpuaddr)
309{
310 /* Adding an indirect buffer ensures that the prefetch stalls until
311 * the commands in indirect buffer have completed. We need to stall
312 * prefetch with a nop indirect buffer when updating pagetables
313 * because it provides stabler synchronization */
314 *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
315 *cmds++ = nop_gpuaddr;
316 *cmds++ = 2;
317 *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
318 *cmds++ = 0x00000000;
319 return 5;
320}
321
322static inline int adreno_add_change_mh_phys_limit_cmds(unsigned int *cmds,
323 unsigned int new_phys_limit,
324 unsigned int nop_gpuaddr)
325{
326 unsigned int *start = cmds;
327
Shubhraprakash Dasc6e21012012-05-11 17:24:51 -0600328 *cmds++ = cp_type0_packet(MH_MMU_MPU_END, 1);
329 *cmds++ = new_phys_limit;
330 cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
331 return cmds - start;
332}
333
334static inline int adreno_add_bank_change_cmds(unsigned int *cmds,
335 int cur_ctx_bank,
336 unsigned int nop_gpuaddr)
337{
338 unsigned int *start = cmds;
339
Shubhraprakash Dasc6e21012012-05-11 17:24:51 -0600340 *cmds++ = cp_type0_packet(REG_CP_STATE_DEBUG_INDEX, 1);
341 *cmds++ = (cur_ctx_bank ? 0 : 0x20);
342 cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
343 return cmds - start;
344}
345
346/*
347 * adreno_read_cmds - Add pm4 packets to perform read
348 * @device - Pointer to device structure
349 * @cmds - Pointer to memory where read commands need to be added
350 * @addr - gpu address of the read
351 * @val - The GPU will wait until the data at address addr becomes
352 * equal to value
353 */
354static inline int adreno_add_read_cmds(struct kgsl_device *device,
355 unsigned int *cmds, unsigned int addr,
356 unsigned int val, unsigned int nop_gpuaddr)
357{
358 unsigned int *start = cmds;
359
360 *cmds++ = cp_type3_packet(CP_WAIT_REG_MEM, 5);
361 /* MEM SPACE = memory, FUNCTION = equals */
362 *cmds++ = 0x13;
363 *cmds++ = addr;
364 *cmds++ = val;
365 *cmds++ = 0xFFFFFFFF;
366 *cmds++ = 0xFFFFFFFF;
367 cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
368 return cmds - start;
369}
370
Tarun Karra9c070822012-11-27 16:43:51 -0700371/*
372 * adreno_idle_cmds - Add pm4 packets for GPU idle
373 * @adreno_dev - Pointer to device structure
374 * @cmds - Pointer to memory where idle commands need to be added
375 */
376static inline int adreno_add_idle_cmds(struct adreno_device *adreno_dev,
377 unsigned int *cmds)
378{
379 unsigned int *start = cmds;
380
381 *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
382 *cmds++ = 0x00000000;
383
384 if ((adreno_dev->gpurev == ADRENO_REV_A305) ||
385 (adreno_dev->gpurev == ADRENO_REV_A320)) {
386 *cmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1);
387 *cmds++ = 0x00000000;
388 }
389
390 return cmds - start;
391}
392
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393#endif /*__ADRENO_H */