blob: 881e39c970734a60f2790869e93736e20e7fd1e6 [file] [log] [blame]
Steve Kondikf7652b32013-11-26 15:20:51 -08001/* Copyright (c) 2008-2013, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#ifndef __ADRENO_H
14#define __ADRENO_H
15
16#include "kgsl_device.h"
17#include "adreno_drawctxt.h"
18#include "adreno_ringbuffer.h"
19#include "kgsl_iommu.h"
20#include <mach/ocmem.h>
21
22#define DEVICE_3D_NAME "kgsl-3d"
23#define DEVICE_3D0_NAME "kgsl-3d0"
24
25#define ADRENO_DEVICE(device) \
26 KGSL_CONTAINER_OF(device, struct adreno_device, dev)
27
28#define ADRENO_CONTEXT(device) \
29 KGSL_CONTAINER_OF(device, struct adreno_context, base)
30
31#define ADRENO_CHIPID_CORE(_id) (((_id) >> 24) & 0xFF)
32#define ADRENO_CHIPID_MAJOR(_id) (((_id) >> 16) & 0xFF)
33#define ADRENO_CHIPID_MINOR(_id) (((_id) >> 8) & 0xFF)
34#define ADRENO_CHIPID_PATCH(_id) ((_id) & 0xFF)
35
36/* Flags to control command packet settings */
37#define KGSL_CMD_FLAGS_NONE 0
38#define KGSL_CMD_FLAGS_PMODE BIT(0)
39#define KGSL_CMD_FLAGS_INTERNAL_ISSUE BIT(1)
40#define KGSL_CMD_FLAGS_WFI BIT(2)
41#define KGSL_CMD_FLAGS_PWRON_FIXUP BIT(3)
42
43/* Command identifiers */
44#define KGSL_CONTEXT_TO_MEM_IDENTIFIER 0x2EADBEEF
45#define KGSL_CMD_IDENTIFIER 0x2EEDFACE
46#define KGSL_CMD_INTERNAL_IDENTIFIER 0x2EEDD00D
47#define KGSL_START_OF_IB_IDENTIFIER 0x2EADEABE
48#define KGSL_END_OF_IB_IDENTIFIER 0x2ABEDEAD
49#define KGSL_END_OF_FRAME_IDENTIFIER 0x2E0F2E0F
50#define KGSL_NOP_IB_IDENTIFIER 0x20F20F20
51#define KGSL_PWRON_FIXUP_IDENTIFIER 0x2AFAFAFA
52
53#ifdef CONFIG_MSM_SCM
54#define ADRENO_DEFAULT_PWRSCALE_POLICY (&kgsl_pwrscale_policy_tz)
55#elif defined CONFIG_MSM_SLEEP_STATS_DEVICE
56#define ADRENO_DEFAULT_PWRSCALE_POLICY (&kgsl_pwrscale_policy_idlestats)
57#else
58#define ADRENO_DEFAULT_PWRSCALE_POLICY NULL
59#endif
60
61void adreno_debugfs_init(struct kgsl_device *device);
62
63#define ADRENO_ISTORE_START 0x5000 /* Istore offset */
64
65#define ADRENO_NUM_CTX_SWITCH_ALLOWED_BEFORE_DRAW 50
66
67/* One cannot wait forever for the core to idle, so set an upper limit to the
68 * amount of time to wait for the core to go idle
69 */
70
71#define ADRENO_IDLE_TIMEOUT (20 * 1000)
72
73enum adreno_gpurev {
74 ADRENO_REV_UNKNOWN = 0,
75 ADRENO_REV_A200 = 200,
76 ADRENO_REV_A203 = 203,
77 ADRENO_REV_A205 = 205,
78 ADRENO_REV_A220 = 220,
79 ADRENO_REV_A225 = 225,
80 ADRENO_REV_A305 = 305,
81 ADRENO_REV_A305C = 306,
82 ADRENO_REV_A320 = 320,
83 ADRENO_REV_A330 = 330,
84 ADRENO_REV_A305B = 335,
85 ADRENO_REV_A420 = 420,
86};
87
88enum coresight_debug_reg {
89 DEBUG_BUS_CTL,
90 TRACE_STOP_CNT,
91 TRACE_START_CNT,
92 TRACE_PERIOD_CNT,
93 TRACE_CMD,
94 TRACE_BUS_CTL,
95};
96
97#define ADRENO_SOFT_FAULT BIT(0)
98#define ADRENO_HARD_FAULT BIT(1)
99#define ADRENO_TIMEOUT_FAULT BIT(2)
100#define ADRENO_IOMMU_PAGE_FAULT BIT(3)
101
102/*
103 * Maximum size of the dispatcher ringbuffer - the actual inflight size will be
104 * smaller then this but this size will allow for a larger range of inflight
105 * sizes that can be chosen at runtime
106 */
107
108#define ADRENO_DISPATCH_CMDQUEUE_SIZE 128
109
110/**
111 * struct adreno_dispatcher - container for the adreno GPU dispatcher
112 * @mutex: Mutex to protect the structure
113 * @state: Current state of the dispatcher (active or paused)
114 * @timer: Timer to monitor the progress of the command batches
115 * @inflight: Number of command batch operations pending in the ringbuffer
116 * @fault: Non-zero if a fault was detected.
117 * @pending: Priority list of contexts waiting to submit command batches
118 * @plist_lock: Spin lock to protect the pending queue
119 * @cmdqueue: Queue of command batches currently flight
120 * @head: pointer to the head of of the cmdqueue. This is the oldest pending
121 * operation
122 * @tail: pointer to the tail of the cmdqueue. This is the most recently
123 * submitted operation
124 * @work: work_struct to put the dispatcher in a work queue
125 * @kobj: kobject for the dispatcher directory in the device sysfs node
126 */
127struct adreno_dispatcher {
128 struct mutex mutex;
129 unsigned int state;
130 struct timer_list timer;
131 struct timer_list fault_timer;
132 unsigned int inflight;
133 atomic_t fault;
134 struct plist_head pending;
135 spinlock_t plist_lock;
136 struct kgsl_cmdbatch *cmdqueue[ADRENO_DISPATCH_CMDQUEUE_SIZE];
137 unsigned int head;
138 unsigned int tail;
139 struct work_struct work;
140 struct kobject kobj;
141};
142
143struct adreno_gpudev;
144
145struct adreno_device {
146 struct kgsl_device dev; /* Must be first field in this struct */
147 unsigned long priv;
148 unsigned int chip_id;
149 enum adreno_gpurev gpurev;
150 unsigned long gmem_base;
151 unsigned int gmem_size;
152 struct adreno_context *drawctxt_active;
153 const char *pfp_fwfile;
154 unsigned int *pfp_fw;
155 size_t pfp_fw_size;
156 unsigned int pfp_fw_version;
157 const char *pm4_fwfile;
158 unsigned int *pm4_fw;
159 size_t pm4_fw_size;
160 unsigned int pm4_fw_version;
161 struct adreno_ringbuffer ringbuffer;
162 unsigned int mharb;
163 struct adreno_gpudev *gpudev;
164 unsigned int wait_timeout;
165 unsigned int pm4_jt_idx;
166 unsigned int pm4_jt_addr;
167 unsigned int pfp_jt_idx;
168 unsigned int pfp_jt_addr;
169 unsigned int istore_size;
170 unsigned int pix_shader_start;
171 unsigned int instruction_size;
172 unsigned int ib_check_level;
173 unsigned int fast_hang_detect;
174 unsigned int ft_policy;
175 unsigned int long_ib_detect;
176 unsigned int long_ib;
177 unsigned int long_ib_ts;
178 unsigned int ft_pf_policy;
179 unsigned int gpulist_index;
180 struct ocmem_buf *ocmem_hdl;
181 unsigned int ocmem_base;
182 unsigned int gpu_cycles;
183 struct adreno_dispatcher dispatcher;
184 struct kgsl_memdesc pwron_fixup;
185 unsigned int pwron_fixup_dwords;
186};
187
188/**
189 * enum adreno_device_flags - Private flags for the adreno_device
190 * @ADRENO_DEVICE_PWRON - Set during init after a power collapse
191 * @ADRENO_DEVICE_PWRON_FIXUP - Set if the target requires the shader fixup
192 * after power collapse
193 */
194enum adreno_device_flags {
195 ADRENO_DEVICE_PWRON = 0,
196 ADRENO_DEVICE_PWRON_FIXUP = 1,
197 ADRENO_DEVICE_INITIALIZED = 2,
198};
199
200#define PERFCOUNTER_FLAG_NONE 0x0
201#define PERFCOUNTER_FLAG_KERNEL 0x1
202
203/* Structs to maintain the list of active performance counters */
204
205/**
206 * struct adreno_perfcount_register: register state
207 * @countable: countable the register holds
208 * @kernelcount: number of user space users of the register
209 * @usercount: number of kernel users of the register
210 * @offset: register hardware offset
211 * @load_bit: The bit number in LOAD register which corresponds to this counter
212 * @select: The countable register offset
213 */
214struct adreno_perfcount_register {
215 unsigned int countable;
216 unsigned int kernelcount;
217 unsigned int usercount;
218 unsigned int offset;
219 int load_bit;
220 unsigned int select;
221};
222
223/**
224 * struct adreno_perfcount_group: registers for a hardware group
225 * @regs: available registers for this group
226 * @reg_count: total registers for this group
227 */
228struct adreno_perfcount_group {
229 struct adreno_perfcount_register *regs;
230 unsigned int reg_count;
231};
232
233/**
234 * adreno_perfcounts: all available perfcounter groups
235 * @groups: available groups for this device
236 * @group_count: total groups for this device
237 */
238struct adreno_perfcounters {
239 struct adreno_perfcount_group *groups;
240 unsigned int group_count;
241};
242
243#define ADRENO_PERFCOUNTER_GROUP(core, name) { core##_perfcounters_##name, \
244 ARRAY_SIZE(core##_perfcounters_##name) }
245
246/**
247 * adreno_regs: List of registers that are used in kgsl driver for all
248 * 3D devices. Each device type has different offset value for the same
249 * register, so an array of register offsets are declared for every device
250 * and are indexed by the enumeration values defined in this enum
251 */
252enum adreno_regs {
253 ADRENO_REG_CP_DEBUG,
254 ADRENO_REG_CP_ME_RAM_WADDR,
255 ADRENO_REG_CP_ME_RAM_DATA,
256 ADRENO_REG_CP_PFP_UCODE_DATA,
257 ADRENO_REG_CP_PFP_UCODE_ADDR,
258 ADRENO_REG_CP_WFI_PEND_CTR,
259 ADRENO_REG_CP_RB_BASE,
260 ADRENO_REG_CP_RB_RPTR_ADDR,
261 ADRENO_REG_CP_RB_RPTR,
262 ADRENO_REG_CP_RB_WPTR,
263 ADRENO_REG_CP_PROTECT_CTRL,
264 ADRENO_REG_CP_ME_CNTL,
265 ADRENO_REG_CP_RB_CNTL,
266 ADRENO_REG_CP_IB1_BASE,
267 ADRENO_REG_CP_IB1_BUFSZ,
268 ADRENO_REG_CP_IB2_BASE,
269 ADRENO_REG_CP_IB2_BUFSZ,
270 ADRENO_REG_CP_TIMESTAMP,
271 ADRENO_REG_CP_ME_RAM_RADDR,
272 ADRENO_REG_SCRATCH_ADDR,
273 ADRENO_REG_SCRATCH_UMSK,
274 ADRENO_REG_SCRATCH_REG2,
275 ADRENO_REG_RBBM_STATUS,
276 ADRENO_REG_RBBM_PERFCTR_CTL,
277 ADRENO_REG_RBBM_PERFCTR_LOAD_CMD0,
278 ADRENO_REG_RBBM_PERFCTR_LOAD_CMD1,
279 ADRENO_REG_RBBM_PERFCTR_LOAD_CMD2,
280 ADRENO_REG_RBBM_PERFCTR_PWR_1_LO,
281 ADRENO_REG_RBBM_INT_0_MASK,
282 ADRENO_REG_RBBM_INT_0_STATUS,
283 ADRENO_REG_RBBM_AHB_ERROR_STATUS,
284 ADRENO_REG_RBBM_PM_OVERRIDE2,
285 ADRENO_REG_RBBM_AHB_CMD,
286 ADRENO_REG_RBBM_INT_CLEAR_CMD,
287 ADRENO_REG_VPC_DEBUG_RAM_SEL,
288 ADRENO_REG_VPC_DEBUG_RAM_READ,
289 ADRENO_REG_VSC_PIPE_DATA_ADDRESS_0,
290 ADRENO_REG_VSC_PIPE_DATA_LENGTH_7,
291 ADRENO_REG_VSC_SIZE_ADDRESS,
292 ADRENO_REG_VFD_CONTROL_0,
293 ADRENO_REG_VFD_FETCH_INSTR_0_0,
294 ADRENO_REG_VFD_FETCH_INSTR_1_F,
295 ADRENO_REG_VFD_INDEX_MAX,
296 ADRENO_REG_SP_VS_PVT_MEM_ADDR_REG,
297 ADRENO_REG_SP_FS_PVT_MEM_ADDR_REG,
298 ADRENO_REG_SP_VS_OBJ_START_REG,
299 ADRENO_REG_SP_FS_OBJ_START_REG,
300 ADRENO_REG_PA_SC_AA_CONFIG,
301 ADRENO_REG_SQ_GPR_MANAGEMENT,
302 ADRENO_REG_SQ_INST_STORE_MANAGMENT,
303 ADRENO_REG_TC_CNTL_STATUS,
304 ADRENO_REG_TP0_CHICKEN,
305 ADRENO_REG_RBBM_RBBM_CTL,
306 ADRENO_REG_REGISTER_MAX,
307};
308
309/**
310 * adreno_reg_offsets: Holds array of register offsets
311 * @offsets: Offset array of size defined by enum adreno_regs
312 * @offset_0: This is the index of the register in offset array whose value
313 * is 0. 0 is a valid register offset and during initialization of the
314 * offset array we need to know if an offset value is correctly defined to 0
315 */
316struct adreno_reg_offsets {
317 unsigned int *const offsets;
318 enum adreno_regs offset_0;
319};
320
321#define ADRENO_REG_UNUSED 0xFFFFFFFF
322#define ADRENO_REG_DEFINE(_offset, _reg) [_offset] = _reg
323
324/*
325 * struct adreno_vbif_data - Describes vbif register value pair
326 * @reg: Offset to vbif register
327 * @val: The value that should be programmed in the register at reg
328 */
329struct adreno_vbif_data {
330 unsigned int reg;
331 unsigned int val;
332};
333
334/*
335 * struct adreno_vbif_platform - Holds an array of vbif reg value pairs
336 * for a particular core
337 * @devfunc: Pointer to platform/core identification function
338 * @vbif: Array of reg value pairs for vbif registers
339 */
340struct adreno_vbif_platform {
341 int(*devfunc)(struct adreno_device *);
342 const struct adreno_vbif_data *vbif;
343};
344
345struct adreno_gpudev {
346 /*
347 * These registers are in a different location on different devices,
348 * so define them in the structure and use them as variables.
349 */
350 const struct adreno_reg_offsets *reg_offsets;
351 /* keeps track of when we need to execute the draw workaround code */
352 int ctx_switches_since_last_draw;
353
354 struct adreno_perfcounters *perfcounters;
355
356 /* GPU specific function hooks */
357 int (*ctxt_create)(struct adreno_device *, struct adreno_context *);
358 int (*ctxt_save)(struct adreno_device *, struct adreno_context *);
359 int (*ctxt_restore)(struct adreno_device *, struct adreno_context *);
360 int (*ctxt_draw_workaround)(struct adreno_device *,
361 struct adreno_context *);
362 irqreturn_t (*irq_handler)(struct adreno_device *);
363 void (*irq_control)(struct adreno_device *, int);
364 unsigned int (*irq_pending)(struct adreno_device *);
365 void * (*snapshot)(struct adreno_device *, void *, int *, int);
366 int (*rb_init)(struct adreno_device *, struct adreno_ringbuffer *);
367 int (*perfcounter_init)(struct adreno_device *);
368 void (*perfcounter_close)(struct adreno_device *);
369 void (*start)(struct adreno_device *);
370 unsigned int (*busy_cycles)(struct adreno_device *);
371 int (*perfcounter_enable)(struct adreno_device *, unsigned int group,
372 unsigned int counter, unsigned int countable);
373 uint64_t (*perfcounter_read)(struct adreno_device *adreno_dev,
374 unsigned int group, unsigned int counter);
375 int (*coresight_enable) (struct kgsl_device *device);
376 void (*coresight_disable) (struct kgsl_device *device);
377 void (*coresight_config_debug_reg) (struct kgsl_device *device,
378 int debug_reg, unsigned int val);
379 void (*postmortem_dump)(struct adreno_device *adreno_dev);
380 void (*soft_reset)(struct adreno_device *device);
381};
382
383#define FT_DETECT_REGS_COUNT 12
384
385struct log_field {
386 bool show;
387 const char *display;
388};
389
390/* Fault Tolerance policy flags */
391#define KGSL_FT_OFF 0
392#define KGSL_FT_REPLAY 1
393#define KGSL_FT_SKIPIB 2
394#define KGSL_FT_SKIPFRAME 3
395#define KGSL_FT_DISABLE 4
396#define KGSL_FT_TEMP_DISABLE 5
397#define KGSL_FT_DEFAULT_POLICY (BIT(KGSL_FT_REPLAY) + BIT(KGSL_FT_SKIPIB))
398
399/* This internal bit is used to skip the PM dump on replayed command batches */
400#define KGSL_FT_SKIP_PMDUMP 31
401
402/* Pagefault policy flags */
403#define KGSL_FT_PAGEFAULT_INT_ENABLE BIT(0)
404#define KGSL_FT_PAGEFAULT_GPUHALT_ENABLE BIT(1)
405#define KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE BIT(2)
406#define KGSL_FT_PAGEFAULT_LOG_ONE_PER_INT BIT(3)
407#define KGSL_FT_PAGEFAULT_DEFAULT_POLICY KGSL_FT_PAGEFAULT_INT_ENABLE
408
409#define ADRENO_FT_TYPES \
410 { BIT(KGSL_FT_OFF), "off" }, \
411 { BIT(KGSL_FT_REPLAY), "replay" }, \
412 { BIT(KGSL_FT_SKIPIB), "skipib" }, \
413 { BIT(KGSL_FT_SKIPFRAME), "skipframe" }, \
414 { BIT(KGSL_FT_DISABLE), "disable" }, \
415 { BIT(KGSL_FT_TEMP_DISABLE), "temp" }
416
417extern struct adreno_gpudev adreno_a2xx_gpudev;
418extern struct adreno_gpudev adreno_a3xx_gpudev;
419extern struct adreno_gpudev adreno_a4xx_gpudev;
420
421/* A2XX register sets defined in adreno_a2xx.c */
422extern const unsigned int a200_registers[];
423extern const unsigned int a220_registers[];
424extern const unsigned int a225_registers[];
425extern const unsigned int a200_registers_count;
426extern const unsigned int a220_registers_count;
427extern const unsigned int a225_registers_count;
428
429/* A3XX register set defined in adreno_a3xx.c */
430extern const unsigned int a3xx_registers[];
431extern const unsigned int a3xx_registers_count;
432
433extern const unsigned int a3xx_hlsq_registers[];
434extern const unsigned int a3xx_hlsq_registers_count;
435
436extern const unsigned int a330_registers[];
437extern const unsigned int a330_registers_count;
438
439/* A4XX register set defined in adreno_a4xx.c */
440extern const unsigned int a4xx_registers[];
441extern const unsigned int a4xx_registers_count;
442
443extern unsigned int ft_detect_regs[];
444
445int adreno_coresight_enable(struct coresight_device *csdev);
446void adreno_coresight_disable(struct coresight_device *csdev);
447void adreno_coresight_remove(struct platform_device *pdev);
448int adreno_coresight_init(struct platform_device *pdev);
449
450int adreno_idle(struct kgsl_device *device);
451bool adreno_isidle(struct kgsl_device *device);
452
453void adreno_shadermem_regread(struct kgsl_device *device,
454 unsigned int offsetwords,
455 unsigned int *value);
456
457int adreno_dump(struct kgsl_device *device, int manual);
458void adreno_dump_fields(struct kgsl_device *device,
459 const char *start, const struct log_field *lines,
460 int num);
461unsigned int adreno_a3xx_rbbm_clock_ctl_default(struct adreno_device
462 *adreno_dev);
463
464struct kgsl_memdesc *adreno_find_region(struct kgsl_device *device,
465 phys_addr_t pt_base,
466 unsigned int gpuaddr,
467 unsigned int size,
468 struct kgsl_mem_entry **entry);
469
470uint8_t *adreno_convertaddr(struct kgsl_device *device,
471 phys_addr_t pt_base, unsigned int gpuaddr, unsigned int size,
472 struct kgsl_mem_entry **entry);
473
474struct kgsl_memdesc *adreno_find_ctxtmem(struct kgsl_device *device,
475 phys_addr_t pt_base, unsigned int gpuaddr, unsigned int size);
476
477void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
478 int hang);
479
480void adreno_dispatcher_start(struct adreno_device *adreno_dev);
481int adreno_dispatcher_init(struct adreno_device *adreno_dev);
482void adreno_dispatcher_close(struct adreno_device *adreno_dev);
483int adreno_dispatcher_idle(struct adreno_device *adreno_dev,
484 unsigned int timeout);
485void adreno_dispatcher_irq_fault(struct kgsl_device *device);
486void adreno_dispatcher_stop(struct adreno_device *adreno_dev);
487
488int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
489 struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
490 uint32_t *timestamp);
491
492void adreno_dispatcher_schedule(struct kgsl_device *device);
493void adreno_dispatcher_pause(struct adreno_device *adreno_dev);
494void adreno_dispatcher_resume(struct adreno_device *adreno_dev);
495void adreno_dispatcher_queue_context(struct kgsl_device *device,
496 struct adreno_context *drawctxt);
497int adreno_reset(struct kgsl_device *device);
498
499int adreno_ft_init_sysfs(struct kgsl_device *device);
500void adreno_ft_uninit_sysfs(struct kgsl_device *device);
501
502int adreno_perfcounter_get(struct adreno_device *adreno_dev,
503 unsigned int groupid, unsigned int countable, unsigned int *offset,
504 unsigned int flags);
505
506int adreno_perfcounter_put(struct adreno_device *adreno_dev,
507 unsigned int groupid, unsigned int countable, unsigned int flags);
508
509int adreno_soft_reset(struct kgsl_device *device);
510
511int adreno_a3xx_pwron_fixup_init(struct adreno_device *adreno_dev);
512
513static inline int adreno_is_a200(struct adreno_device *adreno_dev)
514{
515 return (adreno_dev->gpurev == ADRENO_REV_A200);
516}
517
518static inline int adreno_is_a203(struct adreno_device *adreno_dev)
519{
520 return (adreno_dev->gpurev == ADRENO_REV_A203);
521}
522
523static inline int adreno_is_a205(struct adreno_device *adreno_dev)
524{
525 return (adreno_dev->gpurev == ADRENO_REV_A205);
526}
527
528static inline int adreno_is_a20x(struct adreno_device *adreno_dev)
529{
530 return (adreno_dev->gpurev <= 209);
531}
532
533static inline int adreno_is_a220(struct adreno_device *adreno_dev)
534{
535 return (adreno_dev->gpurev == ADRENO_REV_A220);
536}
537
538static inline int adreno_is_a225(struct adreno_device *adreno_dev)
539{
540 return (adreno_dev->gpurev == ADRENO_REV_A225);
541}
542
543static inline int adreno_is_a22x(struct adreno_device *adreno_dev)
544{
545 return (adreno_dev->gpurev == ADRENO_REV_A220 ||
546 adreno_dev->gpurev == ADRENO_REV_A225);
547}
548
549static inline int adreno_is_a2xx(struct adreno_device *adreno_dev)
550{
551 return (adreno_dev->gpurev <= 299);
552}
553
554static inline int adreno_is_a3xx(struct adreno_device *adreno_dev)
555{
556 return ((adreno_dev->gpurev >= 300) && (adreno_dev->gpurev < 400));
557}
558
559static inline int adreno_is_a305(struct adreno_device *adreno_dev)
560{
561 return (adreno_dev->gpurev == ADRENO_REV_A305);
562}
563
564static inline int adreno_is_a305b(struct adreno_device *adreno_dev)
565{
566 return (adreno_dev->gpurev == ADRENO_REV_A305B);
567}
568
569static inline int adreno_is_a305c(struct adreno_device *adreno_dev)
570{
571 return (adreno_dev->gpurev == ADRENO_REV_A305C);
572}
573
574static inline int adreno_is_a320(struct adreno_device *adreno_dev)
575{
576 return (adreno_dev->gpurev == ADRENO_REV_A320);
577}
578
579static inline int adreno_is_a330(struct adreno_device *adreno_dev)
580{
581 return (adreno_dev->gpurev == ADRENO_REV_A330);
582}
583
584static inline int adreno_is_a330v2(struct adreno_device *adreno_dev)
585{
586 return ((adreno_dev->gpurev == ADRENO_REV_A330) &&
587 (ADRENO_CHIPID_PATCH(adreno_dev->chip_id) > 0));
588}
589
590
591static inline int adreno_is_a4xx(struct adreno_device *adreno_dev)
592{
593 return (adreno_dev->gpurev >= 400);
594}
595
596static inline int adreno_is_a420(struct adreno_device *adreno_dev)
597{
598 return (adreno_dev->gpurev == ADRENO_REV_A420);
599}
600
601static inline int adreno_rb_ctxtswitch(unsigned int *cmd)
602{
603 return (cmd[0] == cp_nop_packet(1) &&
604 cmd[1] == KGSL_CONTEXT_TO_MEM_IDENTIFIER);
605}
606
607/**
608 * adreno_context_timestamp() - Return the last queued timestamp for the context
609 * @k_ctxt: Pointer to the KGSL context to query
610 * @rb: Pointer to the ringbuffer structure for the GPU
611 *
612 * Return the last queued context for the given context. This is used to verify
613 * that incoming requests are not using an invalid (unsubmitted) timestamp
614 */
615static inline int adreno_context_timestamp(struct kgsl_context *k_ctxt,
616 struct adreno_ringbuffer *rb)
617{
618 if (k_ctxt) {
619 struct adreno_context *a_ctxt = ADRENO_CONTEXT(k_ctxt);
620 return a_ctxt->timestamp;
621 }
622 return rb->global_ts;
623}
624
625/**
626 * adreno_encode_istore_size - encode istore size in CP format
627 * @adreno_dev - The 3D device.
628 *
629 * Encode the istore size into the format expected that the
630 * CP_SET_SHADER_BASES and CP_ME_INIT commands:
631 * bits 31:29 - istore size as encoded by this function
632 * bits 27:16 - vertex shader start offset in instructions
633 * bits 11:0 - pixel shader start offset in instructions.
634 */
635static inline int adreno_encode_istore_size(struct adreno_device *adreno_dev)
636{
637 unsigned int size;
638 /* in a225 the CP microcode multiplies the encoded
639 * value by 3 while decoding.
640 */
641 if (adreno_is_a225(adreno_dev))
642 size = adreno_dev->istore_size/3;
643 else
644 size = adreno_dev->istore_size;
645
646 return (ilog2(size) - 5) << 29;
647}
648
649static inline int __adreno_add_idle_indirect_cmds(unsigned int *cmds,
650 unsigned int nop_gpuaddr)
651{
652 /* Adding an indirect buffer ensures that the prefetch stalls until
653 * the commands in indirect buffer have completed. We need to stall
654 * prefetch with a nop indirect buffer when updating pagetables
655 * because it provides stabler synchronization */
656 *cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
657 *cmds++ = nop_gpuaddr;
658 *cmds++ = 2;
659 *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
660 *cmds++ = 0x00000000;
661 return 5;
662}
663
664static inline int adreno_add_change_mh_phys_limit_cmds(unsigned int *cmds,
665 unsigned int new_phys_limit,
666 unsigned int nop_gpuaddr)
667{
668 unsigned int *start = cmds;
669
670 *cmds++ = cp_type0_packet(MH_MMU_MPU_END, 1);
671 *cmds++ = new_phys_limit;
672 cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
673 return cmds - start;
674}
675
676static inline int adreno_add_bank_change_cmds(unsigned int *cmds,
677 int cur_ctx_bank,
678 unsigned int nop_gpuaddr)
679{
680 unsigned int *start = cmds;
681
682 *cmds++ = cp_type0_packet(REG_CP_STATE_DEBUG_INDEX, 1);
683 *cmds++ = (cur_ctx_bank ? 0 : 0x20);
684 cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
685 return cmds - start;
686}
687
688/*
689 * adreno_read_cmds - Add pm4 packets to perform read
690 * @device - Pointer to device structure
691 * @cmds - Pointer to memory where read commands need to be added
692 * @addr - gpu address of the read
693 * @val - The GPU will wait until the data at address addr becomes
694 * equal to value
695 */
696static inline int adreno_add_read_cmds(struct kgsl_device *device,
697 unsigned int *cmds, unsigned int addr,
698 unsigned int val, unsigned int nop_gpuaddr)
699{
700 unsigned int *start = cmds;
701
702 *cmds++ = cp_type3_packet(CP_WAIT_REG_MEM, 5);
703 /* MEM SPACE = memory, FUNCTION = equals */
704 *cmds++ = 0x13;
705 *cmds++ = addr;
706 *cmds++ = val;
707 *cmds++ = 0xFFFFFFFF;
708 *cmds++ = 0xFFFFFFFF;
709 cmds += __adreno_add_idle_indirect_cmds(cmds, nop_gpuaddr);
710 return cmds - start;
711}
712
713/*
714 * adreno_idle_cmds - Add pm4 packets for GPU idle
715 * @adreno_dev - Pointer to device structure
716 * @cmds - Pointer to memory where idle commands need to be added
717 */
718static inline int adreno_add_idle_cmds(struct adreno_device *adreno_dev,
719 unsigned int *cmds)
720{
721 unsigned int *start = cmds;
722
723 *cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
724 *cmds++ = 0x00000000;
725
726 if ((adreno_dev->gpurev == ADRENO_REV_A305) ||
727 (adreno_dev->gpurev == ADRENO_REV_A305C) ||
728 (adreno_dev->gpurev == ADRENO_REV_A320)) {
729 *cmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1);
730 *cmds++ = 0x00000000;
731 }
732
733 return cmds - start;
734}
735
736/*
737 * adreno_wait_reg_eq() - Add a CP_WAIT_REG_EQ command
738 * @cmds: Pointer to memory where commands are to be added
739 * @addr: Regiater address to poll for
740 * @val: Value to poll for
741 * @mask: The value against which register value is masked
742 * @interval: wait interval
743 */
744static inline int adreno_wait_reg_eq(unsigned int *cmds, unsigned int addr,
745 unsigned int val, unsigned int mask,
746 unsigned int interval)
747{
748 unsigned int *start = cmds;
749 *cmds++ = cp_type3_packet(CP_WAIT_REG_EQ, 4);
750 *cmds++ = addr;
751 *cmds++ = val;
752 *cmds++ = mask;
753 *cmds++ = interval;
754 return cmds - start;
755}
756
757/*
758 * adreno_checkreg_off() - Checks the validity of a register enum
759 * @adreno_dev: Pointer to adreno device
760 * @offset_name: The register enum that is checked
761 */
762static inline bool adreno_checkreg_off(struct adreno_device *adreno_dev,
763 enum adreno_regs offset_name)
764{
765 if (offset_name >= ADRENO_REG_REGISTER_MAX ||
766 ADRENO_REG_UNUSED ==
767 adreno_dev->gpudev->reg_offsets->offsets[offset_name]) {
768 BUG_ON(1);
769 }
770 return true;
771}
772
773/*
774 * adreno_readreg() - Read a register by getting its offset from the
775 * offset array defined in gpudev node
776 * @adreno_dev: Pointer to the the adreno device
777 * @offset_name: The register enum that is to be read
778 * @val: Register value read is placed here
779 */
780static inline void adreno_readreg(struct adreno_device *adreno_dev,
781 enum adreno_regs offset_name, unsigned int *val)
782{
783 struct kgsl_device *device = &adreno_dev->dev;
784 if (adreno_checkreg_off(adreno_dev, offset_name))
785 kgsl_regread(device,
786 adreno_dev->gpudev->reg_offsets->offsets[offset_name],
787 val);
788}
789
790/*
791 * adreno_writereg() - Write a register by getting its offset from the
792 * offset array defined in gpudev node
793 * @adreno_dev: Pointer to the the adreno device
794 * @offset_name: The register enum that is to be written
795 * @val: Value to write
796 */
797static inline void adreno_writereg(struct adreno_device *adreno_dev,
798 enum adreno_regs offset_name, unsigned int val)
799{
800 struct kgsl_device *device = &adreno_dev->dev;
801 if (adreno_checkreg_off(adreno_dev, offset_name))
802 kgsl_regwrite(device,
803 adreno_dev->gpudev->reg_offsets->offsets[offset_name], val);
804}
805
806/*
807 * adreno_getreg() - Returns the offset value of a register from the
808 * register offset array in the gpudev node
809 * @adreno_dev: Pointer to the the adreno device
810 * @offset_name: The register enum whore offset is returned
811 */
812static inline unsigned int adreno_getreg(struct adreno_device *adreno_dev,
813 enum adreno_regs offset_name)
814{
815 if (!adreno_checkreg_off(adreno_dev, offset_name))
816 return ADRENO_REG_REGISTER_MAX;
817 return adreno_dev->gpudev->reg_offsets->offsets[offset_name];
818}
819
820/**
821 * adreno_gpu_fault() - Return the current state of the GPU
822 * @adreno_dev: A ponter to the adreno_device to query
823 *
824 * Return 0 if there is no fault or positive with the last type of fault that
825 * occurred
826 */
827static inline unsigned int adreno_gpu_fault(struct adreno_device *adreno_dev)
828{
829 smp_rmb();
830 return atomic_read(&adreno_dev->dispatcher.fault);
831}
832
833/**
834 * adreno_set_gpu_fault() - Set the current fault status of the GPU
835 * @adreno_dev: A pointer to the adreno_device to set
836 * @state: fault state to set
837 *
838 */
839static inline void adreno_set_gpu_fault(struct adreno_device *adreno_dev,
840 int state)
841{
842 /* only set the fault bit w/o overwriting other bits */
843 atomic_add(state, &adreno_dev->dispatcher.fault);
844 smp_wmb();
845}
846
847/*
848 * adreno_vbif_start() - Program VBIF registers, called in device start
849 * @device: Pointer to device whose vbif data is to be programmed
850 * @vbif_platforms: list register value pair of vbif for a family
851 * of adreno cores
852 * @num_platforms: Number of platforms contained in vbif_platforms
853 */
854static inline void adreno_vbif_start(struct kgsl_device *device,
855 const struct adreno_vbif_platform *vbif_platforms,
856 int num_platforms)
857{
858 int i;
859 const struct adreno_vbif_data *vbif = NULL;
860 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
861
862 for (i = 0; i < num_platforms; i++) {
863 if (vbif_platforms[i].devfunc(adreno_dev)) {
864 vbif = vbif_platforms[i].vbif;
865 break;
866 }
867 }
868 BUG_ON(vbif == NULL);
869 while (vbif->reg != 0) {
870 kgsl_regwrite(device, vbif->reg, vbif->val);
871 vbif++;
872 }
873}
874
875#endif /*__ADRENO_H */