blob: 85e500262dbe1bc56dd2cc4a368045f393b83324 [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * include/linux/ion.h
3 *
4 * Copyright (C) 2011 Google, Inc.
Olav Haugan0a852512012-01-09 10:20:55 -08005 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#ifndef _LINUX_ION_H
19#define _LINUX_ION_H
20
Laura Abbottabcb6f72011-10-04 16:26:49 -070021#include <linux/ioctl.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070022#include <linux/types.h>
23
24struct ion_handle;
25/**
26 * enum ion_heap_types - list of all possible types of heaps
Iliyan Malchevf22301562011-07-06 16:53:21 -070027 * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc
28 * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
29 * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved
Olav Hauganb5be7992011-11-18 14:29:02 -080030 * carveout heap, allocations are physically
31 * contiguous
Olav Haugan0a852512012-01-09 10:20:55 -080032 * @ION_HEAP_TYPE_IOMMU: IOMMU memory
33 * @ION_HEAP_TYPE_CP: memory allocated from a prereserved
34 * carveout heap, allocations are physically
35 * contiguous. Used for content protection.
36 * @ION_HEAP_END: helper for iterating over heaps
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070037 */
38enum ion_heap_type {
39 ION_HEAP_TYPE_SYSTEM,
40 ION_HEAP_TYPE_SYSTEM_CONTIG,
41 ION_HEAP_TYPE_CARVEOUT,
42 ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
43 are at the end of this enum */
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070044 ION_NUM_HEAPS,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070045};
46
Iliyan Malchevf22301562011-07-06 16:53:21 -070047#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
48#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
49#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070050
Mitchel Humpherys97e21232012-09-11 15:59:11 -070051/**
52 * heap flags - the lower 16 bits are used by core ion, the upper 16
53 * bits are reserved for use by the heaps themselves.
54 */
55#define ION_FLAG_CACHED 1 /* mappings of this buffer should be
56 cached, ion will do cache
57 maintenance when the buffer is
58 mapped for dma */
Laura Abbotta2e93632011-08-19 13:36:32 -070059
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070060#ifdef __KERNEL__
Laura Abbott65576962011-10-31 12:13:25 -070061#include <linux/err.h>
Laura Abbottcffdff52011-09-23 10:40:19 -070062#include <mach/ion.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070063struct ion_device;
64struct ion_heap;
65struct ion_mapper;
66struct ion_client;
67struct ion_buffer;
68
69/* This should be removed some day when phys_addr_t's are fully
70 plumbed in the kernel, and all instances of ion_phys_addr_t should
71 be converted to phys_addr_t. For the time being many kernel interfaces
72 do not accept phys_addr_t's that would have to */
73#define ion_phys_addr_t unsigned long
Laura Abbottcaafeea2011-12-13 11:43:10 -080074#define ion_virt_addr_t unsigned long
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070075
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070076/**
77 * struct ion_platform_heap - defines a heap in the given platform
78 * @type: type of the heap from ion_heap_type enum
Olav Hauganee0f7802011-12-19 13:28:57 -080079 * @id: unique identifier for heap. When allocating (lower numbers
Olav Hauganb5be7992011-11-18 14:29:02 -080080 * will be allocated from first)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070081 * @name: used for debug purposes
82 * @base: base address of heap in physical memory if applicable
83 * @size: size of the heap in bytes if applicable
Laura Abbottcaafeea2011-12-13 11:43:10 -080084 * @memory_type:Memory type used for the heap
Olav Haugan85c95402012-05-30 17:32:37 -070085 * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
Laura Abbottcaafeea2011-12-13 11:43:10 -080086 * @extra_data: Extra data specific to each heap type
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070087 */
88struct ion_platform_heap {
89 enum ion_heap_type type;
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070090 unsigned int id;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070091 const char *name;
92 ion_phys_addr_t base;
93 size_t size;
Laura Abbotta2e93632011-08-19 13:36:32 -070094 enum ion_memory_types memory_type;
Olav Haugan85c95402012-05-30 17:32:37 -070095 unsigned int has_outer_cache;
Olav Haugan0703dbf2011-12-19 17:53:38 -080096 void *extra_data;
97};
98
Laura Abbottcaafeea2011-12-13 11:43:10 -080099/**
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700100 * struct ion_platform_data - array of platform heaps passed from board file
Olav Haugan85c95402012-05-30 17:32:37 -0700101 * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
Alex Bird27ca6612011-11-01 14:40:06 -0700102 * @nr: number of structures in the array
103 * @request_region: function to be called when the number of allocations goes
104 * from 0 -> 1
105 * @release_region: function to be called when the number of allocations goes
106 * from 1 -> 0
107 * @setup_region: function to be called upon ion registration
108 * @heaps: array of platform_heap structions
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700109 *
110 * Provided by the board file in the form of platform data to a platform device.
111 */
112struct ion_platform_data {
Olav Haugan85c95402012-05-30 17:32:37 -0700113 unsigned int has_outer_cache;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700114 int nr;
Olav Hauganee0f7802011-12-19 13:28:57 -0800115 int (*request_region)(void *);
116 int (*release_region)(void *);
Alex Bird27ca6612011-11-01 14:40:06 -0700117 void *(*setup_region)(void);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700118 struct ion_platform_heap heaps[];
119};
120
Jordan Crouse8cd48322011-10-12 17:05:19 -0600121#ifdef CONFIG_ION
122
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700123/**
Laura Abbottb14ed962012-01-30 14:18:08 -0800124 * ion_reserve() - reserve memory for ion heaps if applicable
125 * @data: platform data specifying starting physical address and
126 * size
127 *
128 * Calls memblock reserve to set aside memory for heaps that are
129 * located at specific memory addresses or of specfic sizes not
130 * managed by the kernel
131 */
132void ion_reserve(struct ion_platform_data *data);
133
134/**
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700135 * ion_client_create() - allocate a client and returns it
136 * @dev: the global ion device
137 * @heap_mask: mask of heaps this client can allocate from
138 * @name: used for debugging
139 */
140struct ion_client *ion_client_create(struct ion_device *dev,
141 unsigned int heap_mask, const char *name);
142
143/**
Laura Abbott302911d2011-08-15 17:12:57 -0700144 * msm_ion_client_create - allocate a client using the ion_device specified in
145 * drivers/gpu/ion/msm/msm_ion.c
146 *
147 * heap_mask and name are the same as ion_client_create, return values
148 * are the same as ion_client_create.
149 */
150
151struct ion_client *msm_ion_client_create(unsigned int heap_mask,
152 const char *name);
153
154/**
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700155 * ion_client_destroy() - free's a client and all it's handles
156 * @client: the client
157 *
158 * Free the provided client and all it's resources including
159 * any handles it is holding.
160 */
161void ion_client_destroy(struct ion_client *client);
162
163/**
164 * ion_alloc - allocate ion memory
165 * @client: the client
166 * @len: size of the allocation
167 * @align: requested allocation alignment, lots of hardware blocks have
168 * alignment requirements of some kind
Hanumant Singh2ac41c92012-08-29 18:39:44 -0700169 * @heap_mask: mask of heaps to allocate from, if multiple bits are set
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700170 * heaps will be tried in order from lowest to highest order bit
Hanumant Singh2ac41c92012-08-29 18:39:44 -0700171 * @flags: heap flags, the low 16 bits are consumed by ion, the high 16
172 * bits are passed on to the respective heap and can be heap
173 * custom
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700174 *
175 * Allocate memory in one of the heaps provided in heap mask and return
176 * an opaque handle to it.
177 */
178struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Hanumant Singh2ac41c92012-08-29 18:39:44 -0700179 size_t align, unsigned int heap_mask,
180 unsigned int flags);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700181
182/**
183 * ion_free - free a handle
184 * @client: the client
185 * @handle: the handle to free
186 *
187 * Free the provided handle.
188 */
189void ion_free(struct ion_client *client, struct ion_handle *handle);
190
191/**
192 * ion_phys - returns the physical address and len of a handle
193 * @client: the client
194 * @handle: the handle
195 * @addr: a pointer to put the address in
196 * @len: a pointer to put the length in
197 *
198 * This function queries the heap for a particular handle to get the
199 * handle's physical address. It't output is only correct if
200 * a heap returns physically contiguous memory -- in other cases
Laura Abbottb14ed962012-01-30 14:18:08 -0800201 * this api should not be implemented -- ion_sg_table should be used
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700202 * instead. Returns -EINVAL if the handle is invalid. This has
203 * no implications on the reference counting of the handle --
204 * the returned value may not be valid if the caller is not
205 * holding a reference.
206 */
207int ion_phys(struct ion_client *client, struct ion_handle *handle,
208 ion_phys_addr_t *addr, size_t *len);
209
210/**
Laura Abbottb14ed962012-01-30 14:18:08 -0800211 * ion_map_dma - return an sg_table describing a handle
212 * @client: the client
213 * @handle: the handle
214 *
215 * This function returns the sg_table describing
216 * a particular ion handle.
217 */
218struct sg_table *ion_sg_table(struct ion_client *client,
219 struct ion_handle *handle);
220
221/**
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700222 * ion_map_kernel - create mapping for the given handle
223 * @client: the client
224 * @handle: handle to map
Laura Abbott894fd582011-08-19 13:33:56 -0700225 * @flags: flags for this mapping
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700226 *
227 * Map the given handle into the kernel and return a kernel address that
Laura Abbott894fd582011-08-19 13:33:56 -0700228 * can be used to access this address. If no flags are specified, this
229 * will return a non-secure uncached mapping.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700230 */
Mitchel Humpherys456e2682012-09-12 14:42:50 -0700231void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700232
233/**
234 * ion_unmap_kernel() - destroy a kernel mapping for a handle
235 * @client: the client
236 * @handle: handle to unmap
237 */
238void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
239
240/**
Laura Abbottb14ed962012-01-30 14:18:08 -0800241 * ion_share_dma_buf() - given an ion client, create a dma-buf fd
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700242 * @client: the client
Laura Abbottb14ed962012-01-30 14:18:08 -0800243 * @handle: the handle
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700244 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800245int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700246
247/**
Laura Abbottb14ed962012-01-30 14:18:08 -0800248 * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700249 * @client: the client
Laura Abbottb14ed962012-01-30 14:18:08 -0800250 * @fd: the dma-buf fd
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700251 *
Laura Abbottb14ed962012-01-30 14:18:08 -0800252 * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
253 * import that fd and return a handle representing it. If a dma-buf from
254 * another exporter is passed in this function will return ERR_PTR(-EINVAL)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700255 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800256struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
Laura Abbott273dd8e2011-10-12 14:26:33 -0700257
Laura Abbott273dd8e2011-10-12 14:26:33 -0700258/**
259 * ion_handle_get_flags - get the flags for a given handle
260 *
261 * @client - client who allocated the handle
262 * @handle - handle to get the flags
263 * @flags - pointer to store the flags
264 *
265 * Gets the current flags for a handle. These flags indicate various options
266 * of the buffer (caching, security, etc.)
267 */
268int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
269 unsigned long *flags);
270
Laura Abbott8c017362011-09-22 20:59:12 -0700271
272/**
273 * ion_map_iommu - map the given handle into an iommu
274 *
275 * @client - client who allocated the handle
276 * @handle - handle to map
277 * @domain_num - domain number to map to
278 * @partition_num - partition number to allocate iova from
279 * @align - alignment for the iova
280 * @iova_length - length of iova to map. If the iova length is
281 * greater than the handle length, the remaining
282 * address space will be mapped to a dummy buffer.
283 * @iova - pointer to store the iova address
284 * @buffer_size - pointer to store the size of the buffer
Mitchel Humpherysdc4d01d2012-09-13 10:53:22 -0700285 * @flags - flags for options to map
Olav Hauganb3676592012-03-02 15:02:25 -0800286 * @iommu_flags - flags specific to the iommu.
Laura Abbott8c017362011-09-22 20:59:12 -0700287 *
288 * Maps the handle into the iova space specified via domain number. Iova
289 * will be allocated from the partition specified via partition_num.
290 * Returns 0 on success, negative value on error.
291 */
292int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
293 int domain_num, int partition_num, unsigned long align,
294 unsigned long iova_length, unsigned long *iova,
295 unsigned long *buffer_size,
Olav Hauganb3676592012-03-02 15:02:25 -0800296 unsigned long flags, unsigned long iommu_flags);
Laura Abbott8c017362011-09-22 20:59:12 -0700297
298
299/**
300 * ion_handle_get_size - get the allocated size of a given handle
301 *
302 * @client - client who allocated the handle
303 * @handle - handle to get the size
304 * @size - pointer to store the size
305 *
306 * gives the allocated size of a handle. returns 0 on success, negative
307 * value on error
308 *
309 * NOTE: This is intended to be used only to get a size to pass to map_iommu.
310 * You should *NOT* rely on this for any other usage.
311 */
312
313int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
314 unsigned long *size);
315
316/**
317 * ion_unmap_iommu - unmap the handle from an iommu
318 *
319 * @client - client who allocated the handle
320 * @handle - handle to unmap
321 * @domain_num - domain to unmap from
322 * @partition_num - partition to unmap from
323 *
324 * Decrement the reference count on the iommu mapping. If the count is
325 * 0, the mapping will be removed from the iommu.
326 */
327void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
328 int domain_num, int partition_num);
329
330
Olav Haugan0a852512012-01-09 10:20:55 -0800331/**
332 * ion_secure_heap - secure a heap
333 *
334 * @client - a client that has allocated from the heap heap_id
335 * @heap_id - heap id to secure.
Laura Abbott7e446482012-06-13 15:59:39 -0700336 * @version - version of content protection
337 * @data - extra data needed for protection
Olav Haugan0a852512012-01-09 10:20:55 -0800338 *
339 * Secure a heap
340 * Returns 0 on success
341 */
Laura Abbott7e446482012-06-13 15:59:39 -0700342int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
343 void *data);
Olav Haugan0a852512012-01-09 10:20:55 -0800344
345/**
346 * ion_unsecure_heap - un-secure a heap
347 *
348 * @client - a client that has allocated from the heap heap_id
349 * @heap_id - heap id to un-secure.
Laura Abbott7e446482012-06-13 15:59:39 -0700350 * @version - version of content protection
351 * @data - extra data needed for protection
Olav Haugan0a852512012-01-09 10:20:55 -0800352 *
353 * Un-secure a heap
354 * Returns 0 on success
355 */
Laura Abbott7e446482012-06-13 15:59:39 -0700356int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
357 void *data);
Olav Haugan0a852512012-01-09 10:20:55 -0800358
359/**
Olav Haugan41f85792012-02-08 15:28:05 -0800360 * msm_ion_do_cache_op - do cache operations.
361 *
362 * @client - pointer to ION client.
363 * @handle - pointer to buffer handle.
364 * @vaddr - virtual address to operate on.
365 * @len - Length of data to do cache operation on.
366 * @cmd - Cache operation to perform:
367 * ION_IOC_CLEAN_CACHES
368 * ION_IOC_INV_CACHES
369 * ION_IOC_CLEAN_INV_CACHES
370 *
371 * Returns 0 on success
372 */
373int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
374 void *vaddr, unsigned long len, unsigned int cmd);
375
Jordan Crouse8cd48322011-10-12 17:05:19 -0600376#else
Laura Abbottb14ed962012-01-30 14:18:08 -0800377static inline void ion_reserve(struct ion_platform_data *data)
378{
379
380}
381
Jordan Crouse8cd48322011-10-12 17:05:19 -0600382static inline struct ion_client *ion_client_create(struct ion_device *dev,
383 unsigned int heap_mask, const char *name)
384{
385 return ERR_PTR(-ENODEV);
386}
Laura Abbott273dd8e2011-10-12 14:26:33 -0700387
Jordan Crouse8cd48322011-10-12 17:05:19 -0600388static inline struct ion_client *msm_ion_client_create(unsigned int heap_mask,
389 const char *name)
390{
391 return ERR_PTR(-ENODEV);
392}
393
394static inline void ion_client_destroy(struct ion_client *client) { }
395
396static inline struct ion_handle *ion_alloc(struct ion_client *client,
Hanumant Singh2ac41c92012-08-29 18:39:44 -0700397 size_t len, size_t align,
398 unsigned int heap_mask,
399 unsigned int flags)
Jordan Crouse8cd48322011-10-12 17:05:19 -0600400{
401 return ERR_PTR(-ENODEV);
402}
403
404static inline void ion_free(struct ion_client *client,
405 struct ion_handle *handle) { }
406
407
408static inline int ion_phys(struct ion_client *client,
409 struct ion_handle *handle, ion_phys_addr_t *addr, size_t *len)
410{
411 return -ENODEV;
412}
413
Laura Abbottb14ed962012-01-30 14:18:08 -0800414static inline struct sg_table *ion_sg_table(struct ion_client *client,
415 struct ion_handle *handle)
416{
417 return ERR_PTR(-ENODEV);
418}
419
Jordan Crouse8cd48322011-10-12 17:05:19 -0600420static inline void *ion_map_kernel(struct ion_client *client,
421 struct ion_handle *handle, unsigned long flags)
422{
423 return ERR_PTR(-ENODEV);
424}
425
426static inline void ion_unmap_kernel(struct ion_client *client,
427 struct ion_handle *handle) { }
428
Laura Abbottb14ed962012-01-30 14:18:08 -0800429static inline int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
Jordan Crouse8cd48322011-10-12 17:05:19 -0600430{
Laura Abbottb14ed962012-01-30 14:18:08 -0800431 return -ENODEV;
Jordan Crouse8cd48322011-10-12 17:05:19 -0600432}
433
Laura Abbottb14ed962012-01-30 14:18:08 -0800434static inline struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
Jordan Crouse8cd48322011-10-12 17:05:19 -0600435{
436 return ERR_PTR(-ENODEV);
437}
438
439static inline int ion_handle_get_flags(struct ion_client *client,
440 struct ion_handle *handle, unsigned long *flags)
441{
442 return -ENODEV;
443}
Laura Abbott8c017362011-09-22 20:59:12 -0700444
445static inline int ion_map_iommu(struct ion_client *client,
446 struct ion_handle *handle, int domain_num,
447 int partition_num, unsigned long align,
448 unsigned long iova_length, unsigned long *iova,
Olav Haugan9a27d4c2012-02-23 09:35:16 -0800449 unsigned long *buffer_size,
Olav Hauganb3676592012-03-02 15:02:25 -0800450 unsigned long flags,
451 unsigned long iommu_flags)
Laura Abbott8c017362011-09-22 20:59:12 -0700452{
453 return -ENODEV;
454}
455
456static inline void ion_unmap_iommu(struct ion_client *client,
457 struct ion_handle *handle, int domain_num,
458 int partition_num)
459{
460 return;
461}
462
Laura Abbott7e446482012-06-13 15:59:39 -0700463static inline int ion_secure_heap(struct ion_device *dev, int heap_id,
464 int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800465{
466 return -ENODEV;
Laura Abbott8c017362011-09-22 20:59:12 -0700467
Olav Haugan0a852512012-01-09 10:20:55 -0800468}
469
Laura Abbott7e446482012-06-13 15:59:39 -0700470static inline int ion_unsecure_heap(struct ion_device *dev, int heap_id,
471 int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800472{
473 return -ENODEV;
474}
475
Olav Haugan41f85792012-02-08 15:28:05 -0800476static inline int msm_ion_do_cache_op(struct ion_client *client,
477 struct ion_handle *handle, void *vaddr,
478 unsigned long len, unsigned int cmd)
479{
480 return -ENODEV;
481}
482
Jordan Crouse8cd48322011-10-12 17:05:19 -0600483#endif /* CONFIG_ION */
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700484#endif /* __KERNEL__ */
485
486/**
487 * DOC: Ion Userspace API
488 *
489 * create a client by opening /dev/ion
490 * most operations handled via following ioctls
491 *
492 */
493
494/**
495 * struct ion_allocation_data - metadata passed from userspace for allocations
496 * @len: size of the allocation
497 * @align: required alignment of the allocation
Brian Muramatsub6b0b652012-07-30 22:54:08 -0700498 * @heap_mask: mask of heaps to allocate from
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700499 * @flags: flags passed to heap
500 * @handle: pointer that will be populated with a cookie to use to refer
501 * to this allocation
502 *
503 * Provided by userspace as an argument to the ioctl
504 */
505struct ion_allocation_data {
506 size_t len;
507 size_t align;
Brian Muramatsub6b0b652012-07-30 22:54:08 -0700508 unsigned int heap_mask;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700509 unsigned int flags;
510 struct ion_handle *handle;
511};
512
513/**
514 * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
515 * @handle: a handle
516 * @fd: a file descriptor representing that handle
517 *
518 * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
519 * the handle returned from ion alloc, and the kernel returns the file
520 * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace
521 * provides the file descriptor and the kernel returns the handle.
522 */
523struct ion_fd_data {
524 struct ion_handle *handle;
525 int fd;
526};
527
528/**
529 * struct ion_handle_data - a handle passed to/from the kernel
530 * @handle: a handle
531 */
532struct ion_handle_data {
533 struct ion_handle *handle;
534};
535
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700536/**
537 * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
538 * @cmd: the custom ioctl function to call
539 * @arg: additional data to pass to the custom ioctl, typically a user
540 * pointer to a predefined structure
541 *
542 * This works just like the regular cmd and arg fields of an ioctl.
543 */
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700544struct ion_custom_data {
545 unsigned int cmd;
546 unsigned long arg;
547};
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700548#define ION_IOC_MAGIC 'I'
549
550/**
551 * DOC: ION_IOC_ALLOC - allocate memory
552 *
553 * Takes an ion_allocation_data struct and returns it with the handle field
554 * populated with the opaque handle for the allocation.
555 */
556#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
557 struct ion_allocation_data)
558
559/**
560 * DOC: ION_IOC_FREE - free memory
561 *
562 * Takes an ion_handle_data struct and frees the handle.
563 */
564#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
565
566/**
567 * DOC: ION_IOC_MAP - get a file descriptor to mmap
568 *
569 * Takes an ion_fd_data struct with the handle field populated with a valid
570 * opaque handle. Returns the struct with the fd field set to a file
571 * descriptor open in the current address space. This file descriptor
572 * can then be used as an argument to mmap.
573 */
574#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
575
576/**
577 * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
578 *
579 * Takes an ion_fd_data struct with the handle field populated with a valid
580 * opaque handle. Returns the struct with the fd field set to a file
581 * descriptor open in the current address space. This file descriptor
582 * can then be passed to another process. The corresponding opaque handle can
583 * be retrieved via ION_IOC_IMPORT.
584 */
585#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
586
587/**
588 * DOC: ION_IOC_IMPORT - imports a shared file descriptor
589 *
590 * Takes an ion_fd_data struct with the fd field populated with a valid file
591 * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
592 * filed set to the corresponding opaque handle.
593 */
Ajay Dudanif572d262012-08-29 18:02:11 -0700594#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700595
596/**
597 * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
598 *
599 * Takes the argument of the architecture specific ioctl to call and
600 * passes appropriate userdata for that ioctl
601 */
602#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
603
Laura Abbottabcb6f72011-10-04 16:26:49 -0700604
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700605#endif /* _LINUX_ION_H */