blob: e19f33807562112146747cf53fef2e4d1765e21c [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001#ifndef _MSM_KGSL_H
2#define _MSM_KGSL_H
3
Jordan Crousee9efb0b2013-05-28 16:54:19 -06004/*
5 * The KGSL version has proven not to be very useful in userspace if features
6 * are cherry picked into other trees out of order so it is frozen as of 3.14.
7 * It is left here for backwards compatabilty and as a reminder that
8 * software releases are never linear. Also, I like pie.
9 */
10
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070011#define KGSL_VERSION_MAJOR 3
Vijay Krishnamoorthye80c3462012-08-27 14:07:32 -070012#define KGSL_VERSION_MINOR 14
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070013
14/*context flags */
Jordan Crouse72bb70b2013-05-28 17:03:52 -060015#define KGSL_CONTEXT_SAVE_GMEM 0x00000001
16#define KGSL_CONTEXT_NO_GMEM_ALLOC 0x00000002
17#define KGSL_CONTEXT_SUBMIT_IB_LIST 0x00000004
18#define KGSL_CONTEXT_CTX_SWITCH 0x00000008
19#define KGSL_CONTEXT_PREAMBLE 0x00000010
20#define KGSL_CONTEXT_TRASH_STATE 0x00000020
21#define KGSL_CONTEXT_PER_CONTEXT_TS 0x00000040
22#define KGSL_CONTEXT_USER_GENERATED_TS 0x00000080
23#define KGSL_CONTEXT_END_OF_FRAME 0x00000100
24#define KGSL_CONTEXT_NO_FAULT_TOLERANCE 0x00000200
25/* bits [12:15] are reserved for future use */
26#define KGSL_CONTEXT_TYPE_MASK 0x01F00000
27#define KGSL_CONTEXT_TYPE_SHIFT 20
Tarun Karra83297222013-02-05 19:45:49 -080028
Jordan Crouse72bb70b2013-05-28 17:03:52 -060029#define KGSL_CONTEXT_TYPE_ANY 0
30#define KGSL_CONTEXT_TYPE_GL 1
31#define KGSL_CONTEXT_TYPE_CL 2
32#define KGSL_CONTEXT_TYPE_C2D 3
33#define KGSL_CONTEXT_TYPE_RS 4
Carter Cooper7e7f02e2012-02-15 09:36:31 -070034
35#define KGSL_CONTEXT_INVALID 0xffffffff
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070036
Jordan Crousee9efb0b2013-05-28 16:54:19 -060037/* --- Memory allocation flags --- */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038
Jordan Crousee9efb0b2013-05-28 16:54:19 -060039/* General allocation hints */
40#define KGSL_MEMFLAGS_GPUREADONLY 0x01000000
Jeremy Gebbenfec05c22013-05-28 16:59:29 -060041#define KGSL_MEMFLAGS_USE_CPU_MAP 0x10000000
Jordan Crousee9efb0b2013-05-28 16:54:19 -060042
43/* Memory caching hints */
44#define KGSL_CACHEMODE_MASK 0x0C000000
45#define KGSL_CACHEMODE_SHIFT 26
46
47#define KGSL_CACHEMODE_WRITECOMBINE 0
48#define KGSL_CACHEMODE_UNCACHED 1
49#define KGSL_CACHEMODE_WRITETHROUGH 2
50#define KGSL_CACHEMODE_WRITEBACK 3
51
52/* Memory types for which allocations are made */
Jeremy Gebben3e626ff2012-09-24 13:05:39 -060053#define KGSL_MEMTYPE_MASK 0x0000FF00
54#define KGSL_MEMTYPE_SHIFT 8
55
Jeremy Gebben3e626ff2012-09-24 13:05:39 -060056#define KGSL_MEMTYPE_OBJECTANY 0
57#define KGSL_MEMTYPE_FRAMEBUFFER 1
58#define KGSL_MEMTYPE_RENDERBUFFER 2
59#define KGSL_MEMTYPE_ARRAYBUFFER 3
60#define KGSL_MEMTYPE_ELEMENTARRAYBUFFER 4
61#define KGSL_MEMTYPE_VERTEXARRAYBUFFER 5
62#define KGSL_MEMTYPE_TEXTURE 6
63#define KGSL_MEMTYPE_SURFACE 7
64#define KGSL_MEMTYPE_EGL_SURFACE 8
65#define KGSL_MEMTYPE_GL 9
66#define KGSL_MEMTYPE_CL 10
67#define KGSL_MEMTYPE_CL_BUFFER_MAP 11
68#define KGSL_MEMTYPE_CL_BUFFER_NOMAP 12
69#define KGSL_MEMTYPE_CL_IMAGE_MAP 13
70#define KGSL_MEMTYPE_CL_IMAGE_NOMAP 14
71#define KGSL_MEMTYPE_CL_KERNEL_STACK 15
72#define KGSL_MEMTYPE_COMMAND 16
73#define KGSL_MEMTYPE_2D 17
74#define KGSL_MEMTYPE_EGL_IMAGE 18
75#define KGSL_MEMTYPE_EGL_SHADOW 19
76#define KGSL_MEMTYPE_MULTISAMPLE 20
77#define KGSL_MEMTYPE_KERNEL 255
78
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -080079/*
80 * Alignment hint, passed as the power of 2 exponent.
81 * i.e 4k (2^12) would be 12, 64k (2^16)would be 16.
82 */
83#define KGSL_MEMALIGN_MASK 0x00FF0000
84#define KGSL_MEMALIGN_SHIFT 16
85
Jordan Crousee9efb0b2013-05-28 16:54:19 -060086/* --- generic KGSL flag values --- */
87
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088#define KGSL_FLAGS_NORMALMODE 0x00000000
89#define KGSL_FLAGS_SAFEMODE 0x00000001
90#define KGSL_FLAGS_INITIALIZED0 0x00000002
91#define KGSL_FLAGS_INITIALIZED 0x00000004
92#define KGSL_FLAGS_STARTED 0x00000008
93#define KGSL_FLAGS_ACTIVE 0x00000010
94#define KGSL_FLAGS_RESERVED0 0x00000020
95#define KGSL_FLAGS_RESERVED1 0x00000040
96#define KGSL_FLAGS_RESERVED2 0x00000080
97#define KGSL_FLAGS_SOFT_RESET 0x00000100
Carter Cooper7e7f02e2012-02-15 09:36:31 -070098#define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070099
Lucille Sylvesterdce84cd2011-10-12 14:15:37 -0600100/* Clock flags to show which clocks should be controled by a given platform */
101#define KGSL_CLK_SRC 0x00000001
102#define KGSL_CLK_CORE 0x00000002
103#define KGSL_CLK_IFACE 0x00000004
104#define KGSL_CLK_MEM 0x00000008
105#define KGSL_CLK_MEM_IFACE 0x00000010
106#define KGSL_CLK_AXI 0x00000020
107
Vijay Krishnamoorthye80c3462012-08-27 14:07:32 -0700108/* Server Side Sync Timeout in milliseconds */
109#define KGSL_SYNCOBJ_SERVER_TIMEOUT 2000
110
Shubhraprakash Das2dfe5dd2012-02-10 13:49:53 -0700111/*
112 * Reset status values for context
113 */
114enum kgsl_ctx_reset_stat {
115 KGSL_CTX_STAT_NO_ERROR = 0x00000000,
116 KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT = 0x00000001,
117 KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT = 0x00000002,
118 KGSL_CTX_STAT_UNKNOWN_CONTEXT_RESET_EXT = 0x00000003
119};
120
Suman Tatiraju0123d182011-09-30 14:59:06 -0700121#define KGSL_CONVERT_TO_MBPS(val) \
122 (val*1000*1000U)
123
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700124/* device id */
125enum kgsl_deviceid {
126 KGSL_DEVICE_3D0 = 0x00000000,
127 KGSL_DEVICE_2D0 = 0x00000001,
128 KGSL_DEVICE_2D1 = 0x00000002,
129 KGSL_DEVICE_MAX = 0x00000003
130};
131
132enum kgsl_user_mem_type {
133 KGSL_USER_MEM_TYPE_PMEM = 0x00000000,
134 KGSL_USER_MEM_TYPE_ASHMEM = 0x00000001,
Jordan Crouse8eab35a2011-10-12 16:57:48 -0600135 KGSL_USER_MEM_TYPE_ADDR = 0x00000002,
136 KGSL_USER_MEM_TYPE_ION = 0x00000003,
Lynus Vaz31b5290e2012-01-18 19:20:24 +0530137 KGSL_USER_MEM_TYPE_MAX = 0x00000004,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700138};
139
140struct kgsl_devinfo {
141
142 unsigned int device_id;
143 /* chip revision id
144 * coreid:8 majorrev:8 minorrev:8 patch:8
145 */
146 unsigned int chip_id;
147 unsigned int mmu_enabled;
148 unsigned int gmem_gpubaseaddr;
149 /*
150 * This field contains the adreno revision
151 * number 200, 205, 220, etc...
152 */
153 unsigned int gpu_id;
154 unsigned int gmem_sizebytes;
155};
156
157/* this structure defines the region of memory that can be mmap()ed from this
158 driver. The timestamp fields are volatile because they are written by the
159 GPU
160*/
161struct kgsl_devmemstore {
162 volatile unsigned int soptimestamp;
163 unsigned int sbz;
164 volatile unsigned int eoptimestamp;
165 unsigned int sbz2;
166 volatile unsigned int ts_cmp_enable;
167 unsigned int sbz3;
168 volatile unsigned int ref_wait_ts;
169 unsigned int sbz4;
170 unsigned int current_context;
171 unsigned int sbz5;
172};
173
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700174#define KGSL_MEMSTORE_OFFSET(ctxt_id, field) \
175 ((ctxt_id)*sizeof(struct kgsl_devmemstore) + \
176 offsetof(struct kgsl_devmemstore, field))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700177
178/* timestamp id*/
179enum kgsl_timestamp_type {
180 KGSL_TIMESTAMP_CONSUMED = 0x00000001, /* start-of-pipeline timestamp */
181 KGSL_TIMESTAMP_RETIRED = 0x00000002, /* end-of-pipeline timestamp*/
Jordan Crousec659f382012-04-16 11:10:41 -0600182 KGSL_TIMESTAMP_QUEUED = 0x00000003,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183};
184
185/* property types - used with kgsl_device_getproperty */
186enum kgsl_property_type {
187 KGSL_PROP_DEVICE_INFO = 0x00000001,
188 KGSL_PROP_DEVICE_SHADOW = 0x00000002,
189 KGSL_PROP_DEVICE_POWER = 0x00000003,
190 KGSL_PROP_SHMEM = 0x00000004,
191 KGSL_PROP_SHMEM_APERTURES = 0x00000005,
192 KGSL_PROP_MMU_ENABLE = 0x00000006,
193 KGSL_PROP_INTERRUPT_WAITS = 0x00000007,
194 KGSL_PROP_VERSION = 0x00000008,
Jordan Crousef7370f82012-04-18 09:31:07 -0600195 KGSL_PROP_GPU_RESET_STAT = 0x00000009,
196 KGSL_PROP_PWRCTRL = 0x0000000E,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197};
198
199struct kgsl_shadowprop {
200 unsigned int gpuaddr;
201 unsigned int size;
202 unsigned int flags; /* contains KGSL_FLAGS_ values */
203};
204
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700205struct kgsl_version {
206 unsigned int drv_major;
207 unsigned int drv_minor;
208 unsigned int dev_major;
209 unsigned int dev_minor;
210};
211
Jordan Crouse57034962013-05-28 17:09:15 -0600212/* Performance counter groups */
213
214#define KGSL_PERFCOUNTER_GROUP_CP 0x0
215#define KGSL_PERFCOUNTER_GROUP_RBBM 0x1
216#define KGSL_PERFCOUNTER_GROUP_PC 0x2
217#define KGSL_PERFCOUNTER_GROUP_VFD 0x3
218#define KGSL_PERFCOUNTER_GROUP_HLSQ 0x4
219#define KGSL_PERFCOUNTER_GROUP_VPC 0x5
220#define KGSL_PERFCOUNTER_GROUP_TSE 0x6
221#define KGSL_PERFCOUNTER_GROUP_RAS 0x7
222#define KGSL_PERFCOUNTER_GROUP_UCHE 0x8
223#define KGSL_PERFCOUNTER_GROUP_TP 0x9
224#define KGSL_PERFCOUNTER_GROUP_SP 0xA
225#define KGSL_PERFCOUNTER_GROUP_RB 0xB
226#define KGSL_PERFCOUNTER_GROUP_PWR 0xC
227#define KGSL_PERFCOUNTER_GROUP_VBIF 0xD
228#define KGSL_PERFCOUNTER_GROUP_VBIF_PWR 0xE
229
230#define KGSL_PERFCOUNTER_NOT_USED 0xFFFFFFFF
231
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232/* structure holds list of ibs */
233struct kgsl_ibdesc {
234 unsigned int gpuaddr;
235 void *hostptr;
236 unsigned int sizedwords;
237 unsigned int ctrl;
238};
239
240/* ioctls */
241#define KGSL_IOC_TYPE 0x09
242
243/* get misc info about the GPU
244 type should be a value from enum kgsl_property_type
245 value points to a structure that varies based on type
246 sizebytes is sizeof() that structure
247 for KGSL_PROP_DEVICE_INFO, use struct kgsl_devinfo
248 this structure contaings hardware versioning info.
249 for KGSL_PROP_DEVICE_SHADOW, use struct kgsl_shadowprop
250 this is used to find mmap() offset and sizes for mapping
251 struct kgsl_memstore into userspace.
252*/
253struct kgsl_device_getproperty {
254 unsigned int type;
255 void *value;
256 unsigned int sizebytes;
257};
258
259#define IOCTL_KGSL_DEVICE_GETPROPERTY \
260 _IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty)
261
Harsh Vardhan Dwivedib6cebfe2012-03-15 18:20:59 -0600262/* IOCTL_KGSL_DEVICE_READ (0x3) - removed 03/2012
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700263 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700264
265/* block until the GPU has executed past a given timestamp
266 * timeout is in milliseconds.
267 */
268struct kgsl_device_waittimestamp {
269 unsigned int timestamp;
270 unsigned int timeout;
271};
272
273#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \
274 _IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp)
275
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700276struct kgsl_device_waittimestamp_ctxtid {
277 unsigned int context_id;
278 unsigned int timestamp;
279 unsigned int timeout;
280};
281
282#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \
283 _IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700284
285/* issue indirect commands to the GPU.
286 * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE
287 * ibaddr and sizedwords must specify a subset of a buffer created
288 * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM
289 * flags may be a mask of KGSL_CONTEXT_ values
290 * timestamp is a returned counter value which can be passed to
291 * other ioctls to determine when the commands have been executed by
292 * the GPU.
293 */
294struct kgsl_ringbuffer_issueibcmds {
295 unsigned int drawctxt_id;
296 unsigned int ibdesc_addr;
297 unsigned int numibs;
298 unsigned int timestamp; /*output param */
299 unsigned int flags;
300};
301
302#define IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS \
303 _IOWR(KGSL_IOC_TYPE, 0x10, struct kgsl_ringbuffer_issueibcmds)
304
305/* read the most recently executed timestamp value
306 * type should be a value from enum kgsl_timestamp_type
307 */
308struct kgsl_cmdstream_readtimestamp {
309 unsigned int type;
310 unsigned int timestamp; /*output param */
311};
312
Jason Varbedian80ba33d2011-07-11 17:29:05 -0700313#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD \
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700314 _IOR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
315
Jason Varbedian80ba33d2011-07-11 17:29:05 -0700316#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP \
317 _IOWR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
318
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700319/* free memory when the GPU reaches a given timestamp.
320 * gpuaddr specify a memory region created by a
321 * IOCTL_KGSL_SHAREDMEM_FROM_PMEM call
322 * type should be a value from enum kgsl_timestamp_type
323 */
324struct kgsl_cmdstream_freememontimestamp {
325 unsigned int gpuaddr;
326 unsigned int type;
327 unsigned int timestamp;
328};
329
330#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP \
331 _IOW(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
332
333/* Previous versions of this header had incorrectly defined
334 IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead
335 of a write only ioctl. To ensure binary compatability, the following
336 #define will be used to intercept the incorrect ioctl
337*/
338
339#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD \
340 _IOR(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
341
342/* create a draw context, which is used to preserve GPU state.
343 * The flags field may contain a mask KGSL_CONTEXT_* values
344 */
345struct kgsl_drawctxt_create {
346 unsigned int flags;
347 unsigned int drawctxt_id; /*output param */
348};
349
350#define IOCTL_KGSL_DRAWCTXT_CREATE \
351 _IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create)
352
353/* destroy a draw context */
354struct kgsl_drawctxt_destroy {
355 unsigned int drawctxt_id;
356};
357
358#define IOCTL_KGSL_DRAWCTXT_DESTROY \
359 _IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy)
360
361/* add a block of pmem, fb, ashmem or user allocated address
362 * into the GPU address space */
363struct kgsl_map_user_mem {
364 int fd;
365 unsigned int gpuaddr; /*output param */
366 unsigned int len;
367 unsigned int offset;
368 unsigned int hostptr; /*input param */
369 enum kgsl_user_mem_type memtype;
Jeremy Gebben3e626ff2012-09-24 13:05:39 -0600370 unsigned int flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700371};
372
373#define IOCTL_KGSL_MAP_USER_MEM \
374 _IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem)
375
Carter Cooper7e7f02e2012-02-15 09:36:31 -0700376struct kgsl_cmdstream_readtimestamp_ctxtid {
377 unsigned int context_id;
378 unsigned int type;
379 unsigned int timestamp; /*output param */
380};
381
382#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID \
383 _IOWR(KGSL_IOC_TYPE, 0x16, struct kgsl_cmdstream_readtimestamp_ctxtid)
384
385struct kgsl_cmdstream_freememontimestamp_ctxtid {
386 unsigned int context_id;
387 unsigned int gpuaddr;
388 unsigned int type;
389 unsigned int timestamp;
390};
391
392#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID \
393 _IOW(KGSL_IOC_TYPE, 0x17, \
394 struct kgsl_cmdstream_freememontimestamp_ctxtid)
395
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396/* add a block of pmem or fb into the GPU address space */
397struct kgsl_sharedmem_from_pmem {
398 int pmem_fd;
399 unsigned int gpuaddr; /*output param */
400 unsigned int len;
401 unsigned int offset;
402};
403
404#define IOCTL_KGSL_SHAREDMEM_FROM_PMEM \
405 _IOWR(KGSL_IOC_TYPE, 0x20, struct kgsl_sharedmem_from_pmem)
406
407/* remove memory from the GPU's address space */
408struct kgsl_sharedmem_free {
409 unsigned int gpuaddr;
410};
411
412#define IOCTL_KGSL_SHAREDMEM_FREE \
413 _IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free)
414
Sushmita Susheelendra41f8fa32011-05-11 17:15:58 -0600415struct kgsl_cff_user_event {
416 unsigned char cff_opcode;
417 unsigned int op1;
418 unsigned int op2;
419 unsigned int op3;
420 unsigned int op4;
421 unsigned int op5;
422 unsigned int __pad[2];
423};
424
425#define IOCTL_KGSL_CFF_USER_EVENT \
426 _IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_cff_user_event)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427
428struct kgsl_gmem_desc {
429 unsigned int x;
430 unsigned int y;
431 unsigned int width;
432 unsigned int height;
433 unsigned int pitch;
434};
435
436struct kgsl_buffer_desc {
437 void *hostptr;
438 unsigned int gpuaddr;
439 int size;
440 unsigned int format;
441 unsigned int pitch;
442 unsigned int enabled;
443};
444
445struct kgsl_bind_gmem_shadow {
446 unsigned int drawctxt_id;
447 struct kgsl_gmem_desc gmem_desc;
448 unsigned int shadow_x;
449 unsigned int shadow_y;
450 struct kgsl_buffer_desc shadow_buffer;
451 unsigned int buffer_id;
452};
453
454#define IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW \
455 _IOW(KGSL_IOC_TYPE, 0x22, struct kgsl_bind_gmem_shadow)
456
457/* add a block of memory into the GPU address space */
Jordan Crousedb8cf392012-09-11 16:38:14 -0600458
459/*
460 * IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC deprecated 09/2012
461 * use IOCTL_KGSL_GPUMEM_ALLOC instead
462 */
463
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700464struct kgsl_sharedmem_from_vmalloc {
465 unsigned int gpuaddr; /*output param */
466 unsigned int hostptr;
467 unsigned int flags;
468};
469
470#define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \
471 _IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc)
472
Jordan Crousee9efb0b2013-05-28 16:54:19 -0600473/*
474 * This is being deprecated in favor of IOCTL_KGSL_GPUMEM_CACHE_SYNC which
475 * supports both directions (flush and invalidate). This code will still
476 * work, but by definition it will do a flush of the cache which might not be
477 * what you want to have happen on a buffer following a GPU operation. It is
478 * safer to go with IOCTL_KGSL_GPUMEM_CACHE_SYNC
479 */
480
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700481#define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \
482 _IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free)
483
484struct kgsl_drawctxt_set_bin_base_offset {
485 unsigned int drawctxt_id;
486 unsigned int offset;
487};
488
489#define IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET \
490 _IOW(KGSL_IOC_TYPE, 0x25, struct kgsl_drawctxt_set_bin_base_offset)
491
492enum kgsl_cmdwindow_type {
493 KGSL_CMDWINDOW_MIN = 0x00000000,
494 KGSL_CMDWINDOW_2D = 0x00000000,
495 KGSL_CMDWINDOW_3D = 0x00000001, /* legacy */
496 KGSL_CMDWINDOW_MMU = 0x00000002,
497 KGSL_CMDWINDOW_ARBITER = 0x000000FF,
498 KGSL_CMDWINDOW_MAX = 0x000000FF,
499};
500
501/* write to the command window */
502struct kgsl_cmdwindow_write {
503 enum kgsl_cmdwindow_type target;
504 unsigned int addr;
505 unsigned int data;
506};
507
508#define IOCTL_KGSL_CMDWINDOW_WRITE \
509 _IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write)
510
511struct kgsl_gpumem_alloc {
512 unsigned long gpuaddr;
513 size_t size;
514 unsigned int flags;
515};
516
517#define IOCTL_KGSL_GPUMEM_ALLOC \
518 _IOWR(KGSL_IOC_TYPE, 0x2f, struct kgsl_gpumem_alloc)
519
Jeremy Gebbena7423e42011-04-18 15:11:21 -0600520struct kgsl_cff_syncmem {
521 unsigned int gpuaddr;
522 unsigned int len;
523 unsigned int __pad[2]; /* For future binary compatibility */
524};
525
526#define IOCTL_KGSL_CFF_SYNCMEM \
527 _IOW(KGSL_IOC_TYPE, 0x30, struct kgsl_cff_syncmem)
528
Jordan Croused4bc9d22011-11-17 13:39:21 -0700529/*
530 * A timestamp event allows the user space to register an action following an
Jeff Boodyfe6c39c2012-08-09 13:54:50 -0600531 * expired timestamp. Note IOCTL_KGSL_TIMESTAMP_EVENT has been redefined to
532 * _IOWR to support fences which need to return a fd for the priv parameter.
Jordan Croused4bc9d22011-11-17 13:39:21 -0700533 */
534
535struct kgsl_timestamp_event {
536 int type; /* Type of event (see list below) */
537 unsigned int timestamp; /* Timestamp to trigger event on */
538 unsigned int context_id; /* Context for the timestamp */
539 void *priv; /* Pointer to the event specific blob */
540 size_t len; /* Size of the event specific blob */
541};
542
Jeff Boodyfe6c39c2012-08-09 13:54:50 -0600543#define IOCTL_KGSL_TIMESTAMP_EVENT_OLD \
Jordan Croused4bc9d22011-11-17 13:39:21 -0700544 _IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_timestamp_event)
545
546/* A genlock timestamp event releases an existing lock on timestamp expire */
547
548#define KGSL_TIMESTAMP_EVENT_GENLOCK 1
549
550struct kgsl_timestamp_event_genlock {
551 int handle; /* Handle of the genlock lock to release */
552};
553
Jeff Boodyfe6c39c2012-08-09 13:54:50 -0600554/* A fence timestamp event releases an existing lock on timestamp expire */
555
556#define KGSL_TIMESTAMP_EVENT_FENCE 2
557
558struct kgsl_timestamp_event_fence {
559 int fence_fd; /* Fence to signal */
560};
561
Jordan Crouseed7dd7f2012-03-29 13:16:02 -0600562/*
563 * Set a property within the kernel. Uses the same structure as
564 * IOCTL_KGSL_GETPROPERTY
565 */
566
567#define IOCTL_KGSL_SETPROPERTY \
568 _IOW(KGSL_IOC_TYPE, 0x32, struct kgsl_device_getproperty)
569
Jeff Boodyfe6c39c2012-08-09 13:54:50 -0600570#define IOCTL_KGSL_TIMESTAMP_EVENT \
571 _IOWR(KGSL_IOC_TYPE, 0x33, struct kgsl_timestamp_event)
572
Jeremy Gebbena46f4272013-05-28 16:54:09 -0600573/**
574 * struct kgsl_gpumem_alloc_id - argument to IOCTL_KGSL_GPUMEM_ALLOC_ID
575 * @id: returned id value for this allocation.
576 * @flags: mask of KGSL_MEM* values requested and actual flags on return.
577 * @size: requested size of the allocation and actual size on return.
578 * @mmapsize: returned size to pass to mmap() which may be larger than 'size'
579 * @gpuaddr: returned GPU address for the allocation
580 *
581 * Allocate memory for access by the GPU. The flags and size fields are echoed
582 * back by the kernel, so that the caller can know if the request was
583 * adjusted.
584 *
585 * Supported flags:
586 * KGSL_MEMFLAGS_GPUREADONLY: the GPU will be unable to write to the buffer
587 * KGSL_MEMTYPE*: usage hint for debugging aid
588 * KGSL_MEMALIGN*: alignment hint, may be ignored or adjusted by the kernel.
Jeremy Gebbenfec05c22013-05-28 16:59:29 -0600589 * KGSL_MEMFLAGS_USE_CPU_MAP: If set on call and return, the returned GPU
590 * address will be 0. Calling mmap() will set the GPU address.
Jeremy Gebbena46f4272013-05-28 16:54:09 -0600591 */
592struct kgsl_gpumem_alloc_id {
593 unsigned int id;
594 unsigned int flags;
595 unsigned int size;
596 unsigned int mmapsize;
597 unsigned long gpuaddr;
598/* private: reserved for future use*/
599 unsigned int __pad[2];
600};
601
602#define IOCTL_KGSL_GPUMEM_ALLOC_ID \
603 _IOWR(KGSL_IOC_TYPE, 0x34, struct kgsl_gpumem_alloc_id)
604
605/**
606 * struct kgsl_gpumem_free_id - argument to IOCTL_KGSL_GPUMEM_FREE_ID
607 * @id: GPU allocation id to free
608 *
609 * Free an allocation by id, in case a GPU address has not been assigned or
610 * is unknown. Freeing an allocation by id with this ioctl or by GPU address
611 * with IOCTL_KGSL_SHAREDMEM_FREE are equivalent.
612 */
613struct kgsl_gpumem_free_id {
614 unsigned int id;
615/* private: reserved for future use*/
616 unsigned int __pad;
617};
618
619#define IOCTL_KGSL_GPUMEM_FREE_ID \
620 _IOWR(KGSL_IOC_TYPE, 0x35, struct kgsl_gpumem_free_id)
621
622/**
623 * struct kgsl_gpumem_get_info - argument to IOCTL_KGSL_GPUMEM_GET_INFO
624 * @gpuaddr: GPU address to query. Also set on return.
625 * @id: GPU allocation id to query. Also set on return.
626 * @flags: returned mask of KGSL_MEM* values.
627 * @size: returned size of the allocation.
628 * @mmapsize: returned size to pass mmap(), which may be larger than 'size'
629 * @useraddr: returned address of the userspace mapping for this buffer
630 *
631 * This ioctl allows querying of all user visible attributes of an existing
632 * allocation, by either the GPU address or the id returned by a previous
633 * call to IOCTL_KGSL_GPUMEM_ALLOC_ID. Legacy allocation ioctls may not
634 * return all attributes so this ioctl can be used to look them up if needed.
635 *
636 */
637struct kgsl_gpumem_get_info {
638 unsigned long gpuaddr;
639 unsigned int id;
640 unsigned int flags;
641 unsigned int size;
642 unsigned int mmapsize;
643 unsigned long useraddr;
644/* private: reserved for future use*/
645 unsigned int __pad[4];
646};
647
648#define IOCTL_KGSL_GPUMEM_GET_INFO\
649 _IOWR(KGSL_IOC_TYPE, 0x36, struct kgsl_gpumem_get_info)
650
Jordan Crousee9efb0b2013-05-28 16:54:19 -0600651/**
652 * struct kgsl_gpumem_sync_cache - argument to IOCTL_KGSL_GPUMEM_SYNC_CACHE
653 * @gpuaddr: GPU address of the buffer to sync.
654 * @id: id of the buffer to sync. Either gpuaddr or id is sufficient.
655 * @op: a mask of KGSL_GPUMEM_CACHE_* values
656 *
657 * Sync the L2 cache for memory headed to and from the GPU - this replaces
658 * KGSL_SHAREDMEM_FLUSH_CACHE since it can handle cache management for both
659 * directions
660 *
661 */
662struct kgsl_gpumem_sync_cache {
663 unsigned int gpuaddr;
664 unsigned int id;
665 unsigned int op;
666/* private: reserved for future use*/
667 unsigned int __pad[2]; /* For future binary compatibility */
668};
669
670#define KGSL_GPUMEM_CACHE_CLEAN (1 << 0)
671#define KGSL_GPUMEM_CACHE_TO_GPU KGSL_GPUMEM_CACHE_CLEAN
672
673#define KGSL_GPUMEM_CACHE_INV (1 << 1)
674#define KGSL_GPUMEM_CACHE_FROM_GPU KGSL_GPUMEM_CACHE_INV
675
676#define KGSL_GPUMEM_CACHE_FLUSH \
677 (KGSL_GPUMEM_CACHE_CLEAN | KGSL_GPUMEM_CACHE_INV)
678
679#define IOCTL_KGSL_GPUMEM_SYNC_CACHE \
680 _IOW(KGSL_IOC_TYPE, 0x37, struct kgsl_gpumem_sync_cache)
681
Jordan Crouse57034962013-05-28 17:09:15 -0600682/**
683 * struct kgsl_perfcounter_get - argument to IOCTL_KGSL_PERFCOUNTER_GET
684 * @groupid: Performance counter group ID
685 * @countable: Countable to select within the group
686 * @offset: Return offset of the reserved counter
687 *
688 * Get an available performance counter from a specified groupid. The offset
689 * of the performance counter will be returned after successfully assigning
690 * the countable to the counter for the specified group. An error will be
691 * returned and an offset of 0 if the groupid is invalid or there are no
692 * more counters left. After successfully getting a perfcounter, the user
693 * must call kgsl_perfcounter_put(groupid, contable) when finished with
694 * the perfcounter to clear up perfcounter resources.
695 *
696 */
697struct kgsl_perfcounter_get {
698 unsigned int groupid;
699 unsigned int countable;
700 unsigned int offset;
701/* private: reserved for future use */
702 unsigned int __pad[2]; /* For future binary compatibility */
703};
704
705#define IOCTL_KGSL_PERFCOUNTER_GET \
706 _IOWR(KGSL_IOC_TYPE, 0x38, struct kgsl_perfcounter_get)
707
708/**
709 * struct kgsl_perfcounter_put - argument to IOCTL_KGSL_PERFCOUNTER_PUT
710 * @groupid: Performance counter group ID
711 * @countable: Countable to release within the group
712 *
713 * Put an allocated performance counter to allow others to have access to the
714 * resource that was previously taken. This is only to be called after
715 * successfully getting a performance counter from kgsl_perfcounter_get().
716 *
717 */
718struct kgsl_perfcounter_put {
719 unsigned int groupid;
720 unsigned int countable;
721/* private: reserved for future use */
722 unsigned int __pad[2]; /* For future binary compatibility */
723};
724
725#define IOCTL_KGSL_PERFCOUNTER_PUT \
726 _IOW(KGSL_IOC_TYPE, 0x39, struct kgsl_perfcounter_put)
727
728/**
729 * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
730 * @groupid: Performance counter group ID
731 * @countable: Return active countables array
732 * @size: Size of active countables array
733 * @max_counters: Return total number counters for the group ID
734 *
735 * Query the available performance counters given a groupid. The array
736 * *countables is used to return the current active countables in counters.
737 * The size of the array is passed in so the kernel will only write at most
738 * size or counter->size for the group id. The total number of available
739 * counters for the group ID is returned in max_counters.
740 * If the array or size passed in are invalid, then only the maximum number
741 * of counters will be returned, no data will be written to *countables.
742 * If the groupid is invalid an error code will be returned.
743 *
744 */
745struct kgsl_perfcounter_query {
746 unsigned int groupid;
747 /* Array to return the current countable for up to size counters */
748 unsigned int *countables;
749 unsigned int count;
750 unsigned int max_counters;
751/* private: reserved for future use */
752 unsigned int __pad[2]; /* For future binary compatibility */
753};
754
755#define IOCTL_KGSL_PERFCOUNTER_QUERY \
756 _IOWR(KGSL_IOC_TYPE, 0x3A, struct kgsl_perfcounter_query)
757
758/**
759 * struct kgsl_perfcounter_query - argument to IOCTL_KGSL_PERFCOUNTER_QUERY
760 * @groupid: Performance counter group IDs
761 * @countable: Performance counter countable IDs
762 * @value: Return performance counter reads
763 * @size: Size of all arrays (groupid/countable pair and return value)
764 *
765 * Read in the current value of a performance counter given by the groupid
766 * and countable.
767 *
768 */
769
770struct kgsl_perfcounter_read_group {
771 unsigned int groupid;
772 unsigned int countable;
773 uint64_t value;
774};
775
776struct kgsl_perfcounter_read {
777 struct kgsl_perfcounter_read_group *reads;
778 unsigned int count;
779/* private: reserved for future use */
780 unsigned int __pad[2]; /* For future binary compatibility */
781};
782
783#define IOCTL_KGSL_PERFCOUNTER_READ \
784 _IOWR(KGSL_IOC_TYPE, 0x3B, struct kgsl_perfcounter_read)
785
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700786#ifdef __KERNEL__
787#ifdef CONFIG_MSM_KGSL_DRM
788int kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
789 unsigned long *len);
790#else
791#define kgsl_gem_obj_addr(...) 0
792#endif
793#endif
794#endif /* _MSM_KGSL_H */