blob: 6b1d6f4ce539b3b2951e3e51e52a20b1ed0b2712 [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * include/linux/ion.h
3 *
4 * Copyright (C) 2011 Google, Inc.
Duy Truonge833aca2013-02-12 13:35:08 -08005 * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#ifndef _LINUX_ION_H
19#define _LINUX_ION_H
20
Laura Abbottabcb6f72011-10-04 16:26:49 -070021#include <linux/ioctl.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070022#include <linux/types.h>
23
24struct ion_handle;
25/**
26 * enum ion_heap_types - list of all possible types of heaps
Iliyan Malchevf22301562011-07-06 16:53:21 -070027 * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc
28 * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
29 * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved
Olav Hauganb5be7992011-11-18 14:29:02 -080030 * carveout heap, allocations are physically
31 * contiguous
Olav Haugan0a852512012-01-09 10:20:55 -080032 * @ION_HEAP_TYPE_IOMMU: IOMMU memory
33 * @ION_HEAP_TYPE_CP: memory allocated from a prereserved
34 * carveout heap, allocations are physically
35 * contiguous. Used for content protection.
Benjamin Gaignard792b32d2012-08-15 10:55:10 -070036 * @ION_HEAP_TYPE_DMA: memory allocated via DMA API
Olav Haugan0a852512012-01-09 10:20:55 -080037 * @ION_HEAP_END: helper for iterating over heaps
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070038 */
39enum ion_heap_type {
40 ION_HEAP_TYPE_SYSTEM,
41 ION_HEAP_TYPE_SYSTEM_CONTIG,
42 ION_HEAP_TYPE_CARVEOUT,
Benjamin Gaignard792b32d2012-08-15 10:55:10 -070043 ION_HEAP_TYPE_DMA,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070044 ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
45 are at the end of this enum */
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070046 ION_NUM_HEAPS,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070047};
48
Iliyan Malchevf22301562011-07-06 16:53:21 -070049#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
50#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
51#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
Benjamin Gaignard792b32d2012-08-15 10:55:10 -070052#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070053
Mitchel Humpherys97e21232012-09-11 15:59:11 -070054/**
55 * heap flags - the lower 16 bits are used by core ion, the upper 16
56 * bits are reserved for use by the heaps themselves.
57 */
58#define ION_FLAG_CACHED 1 /* mappings of this buffer should be
59 cached, ion will do cache
60 maintenance when the buffer is
61 mapped for dma */
Laura Abbotta2e93632011-08-19 13:36:32 -070062
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070063#ifdef __KERNEL__
Laura Abbott65576962011-10-31 12:13:25 -070064#include <linux/err.h>
Laura Abbottcffdff52011-09-23 10:40:19 -070065#include <mach/ion.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070066struct ion_device;
67struct ion_heap;
68struct ion_mapper;
69struct ion_client;
70struct ion_buffer;
71
72/* This should be removed some day when phys_addr_t's are fully
73 plumbed in the kernel, and all instances of ion_phys_addr_t should
74 be converted to phys_addr_t. For the time being many kernel interfaces
75 do not accept phys_addr_t's that would have to */
76#define ion_phys_addr_t unsigned long
Laura Abbottcaafeea2011-12-13 11:43:10 -080077#define ion_virt_addr_t unsigned long
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070078
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070079/**
80 * struct ion_platform_heap - defines a heap in the given platform
81 * @type: type of the heap from ion_heap_type enum
Olav Hauganee0f7802011-12-19 13:28:57 -080082 * @id: unique identifier for heap. When allocating (lower numbers
Olav Hauganb5be7992011-11-18 14:29:02 -080083 * will be allocated from first)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070084 * @name: used for debug purposes
85 * @base: base address of heap in physical memory if applicable
86 * @size: size of the heap in bytes if applicable
Laura Abbottcaafeea2011-12-13 11:43:10 -080087 * @memory_type:Memory type used for the heap
Olav Haugan85c95402012-05-30 17:32:37 -070088 * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
Laura Abbottcaafeea2011-12-13 11:43:10 -080089 * @extra_data: Extra data specific to each heap type
Benjamin Gaignard0085c1a2012-06-25 15:30:18 -070090 * @priv: heap private data
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070091 */
92struct ion_platform_heap {
93 enum ion_heap_type type;
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070094 unsigned int id;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070095 const char *name;
96 ion_phys_addr_t base;
97 size_t size;
Laura Abbotta2e93632011-08-19 13:36:32 -070098 enum ion_memory_types memory_type;
Olav Haugan85c95402012-05-30 17:32:37 -070099 unsigned int has_outer_cache;
Olav Haugan0703dbf2011-12-19 17:53:38 -0800100 void *extra_data;
Benjamin Gaignard0085c1a2012-06-25 15:30:18 -0700101 void *priv;
Olav Haugan0703dbf2011-12-19 17:53:38 -0800102};
103
Laura Abbottcaafeea2011-12-13 11:43:10 -0800104/**
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700105 * struct ion_platform_data - array of platform heaps passed from board file
Olav Haugan85c95402012-05-30 17:32:37 -0700106 * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
Alex Bird27ca6612011-11-01 14:40:06 -0700107 * @nr: number of structures in the array
108 * @request_region: function to be called when the number of allocations goes
109 * from 0 -> 1
110 * @release_region: function to be called when the number of allocations goes
111 * from 1 -> 0
112 * @setup_region: function to be called upon ion registration
113 * @heaps: array of platform_heap structions
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700114 *
115 * Provided by the board file in the form of platform data to a platform device.
116 */
117struct ion_platform_data {
Olav Haugan85c95402012-05-30 17:32:37 -0700118 unsigned int has_outer_cache;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700119 int nr;
Olav Hauganee0f7802011-12-19 13:28:57 -0800120 int (*request_region)(void *);
121 int (*release_region)(void *);
Alex Bird27ca6612011-11-01 14:40:06 -0700122 void *(*setup_region)(void);
Benjamin Gaignardb2d367c2012-06-25 15:27:30 -0700123 struct ion_platform_heap *heaps;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700124};
125
Jordan Crouse8cd48322011-10-12 17:05:19 -0600126#ifdef CONFIG_ION
127
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700128/**
Laura Abbottb14ed962012-01-30 14:18:08 -0800129 * ion_reserve() - reserve memory for ion heaps if applicable
130 * @data: platform data specifying starting physical address and
131 * size
132 *
133 * Calls memblock reserve to set aside memory for heaps that are
134 * located at specific memory addresses or of specfic sizes not
135 * managed by the kernel
136 */
137void ion_reserve(struct ion_platform_data *data);
138
139/**
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700140 * ion_client_create() - allocate a client and returns it
141 * @dev: the global ion device
142 * @heap_mask: mask of heaps this client can allocate from
143 * @name: used for debugging
144 */
145struct ion_client *ion_client_create(struct ion_device *dev,
146 unsigned int heap_mask, const char *name);
147
148/**
Laura Abbott302911d2011-08-15 17:12:57 -0700149 * msm_ion_client_create - allocate a client using the ion_device specified in
150 * drivers/gpu/ion/msm/msm_ion.c
151 *
152 * heap_mask and name are the same as ion_client_create, return values
153 * are the same as ion_client_create.
154 */
155
156struct ion_client *msm_ion_client_create(unsigned int heap_mask,
157 const char *name);
158
159/**
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700160 * ion_client_destroy() - free's a client and all it's handles
161 * @client: the client
162 *
163 * Free the provided client and all it's resources including
164 * any handles it is holding.
165 */
166void ion_client_destroy(struct ion_client *client);
167
168/**
169 * ion_alloc - allocate ion memory
170 * @client: the client
171 * @len: size of the allocation
172 * @align: requested allocation alignment, lots of hardware blocks have
173 * alignment requirements of some kind
Hanumant Singh2ac41c92012-08-29 18:39:44 -0700174 * @heap_mask: mask of heaps to allocate from, if multiple bits are set
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700175 * heaps will be tried in order from lowest to highest order bit
Hanumant Singh2ac41c92012-08-29 18:39:44 -0700176 * @flags: heap flags, the low 16 bits are consumed by ion, the high 16
177 * bits are passed on to the respective heap and can be heap
178 * custom
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700179 *
180 * Allocate memory in one of the heaps provided in heap mask and return
181 * an opaque handle to it.
182 */
183struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Hanumant Singh2ac41c92012-08-29 18:39:44 -0700184 size_t align, unsigned int heap_mask,
185 unsigned int flags);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700186
187/**
188 * ion_free - free a handle
189 * @client: the client
190 * @handle: the handle to free
191 *
192 * Free the provided handle.
193 */
194void ion_free(struct ion_client *client, struct ion_handle *handle);
195
196/**
197 * ion_phys - returns the physical address and len of a handle
198 * @client: the client
199 * @handle: the handle
200 * @addr: a pointer to put the address in
201 * @len: a pointer to put the length in
202 *
203 * This function queries the heap for a particular handle to get the
204 * handle's physical address. It't output is only correct if
205 * a heap returns physically contiguous memory -- in other cases
Laura Abbottb14ed962012-01-30 14:18:08 -0800206 * this api should not be implemented -- ion_sg_table should be used
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700207 * instead. Returns -EINVAL if the handle is invalid. This has
208 * no implications on the reference counting of the handle --
209 * the returned value may not be valid if the caller is not
210 * holding a reference.
211 */
212int ion_phys(struct ion_client *client, struct ion_handle *handle,
213 ion_phys_addr_t *addr, size_t *len);
214
215/**
Laura Abbottb14ed962012-01-30 14:18:08 -0800216 * ion_map_dma - return an sg_table describing a handle
217 * @client: the client
218 * @handle: the handle
219 *
220 * This function returns the sg_table describing
221 * a particular ion handle.
222 */
223struct sg_table *ion_sg_table(struct ion_client *client,
224 struct ion_handle *handle);
225
226/**
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700227 * ion_map_kernel - create mapping for the given handle
228 * @client: the client
229 * @handle: handle to map
Laura Abbott894fd582011-08-19 13:33:56 -0700230 * @flags: flags for this mapping
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700231 *
232 * Map the given handle into the kernel and return a kernel address that
Laura Abbott894fd582011-08-19 13:33:56 -0700233 * can be used to access this address. If no flags are specified, this
234 * will return a non-secure uncached mapping.
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700235 */
Mitchel Humpherys456e2682012-09-12 14:42:50 -0700236void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700237
238/**
239 * ion_unmap_kernel() - destroy a kernel mapping for a handle
240 * @client: the client
241 * @handle: handle to unmap
242 */
243void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
244
245/**
Laura Abbottb14ed962012-01-30 14:18:08 -0800246 * ion_share_dma_buf() - given an ion client, create a dma-buf fd
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700247 * @client: the client
Laura Abbottb14ed962012-01-30 14:18:08 -0800248 * @handle: the handle
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700249 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800250int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700251
252/**
Laura Abbottb14ed962012-01-30 14:18:08 -0800253 * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700254 * @client: the client
Laura Abbottb14ed962012-01-30 14:18:08 -0800255 * @fd: the dma-buf fd
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700256 *
Laura Abbottb14ed962012-01-30 14:18:08 -0800257 * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
258 * import that fd and return a handle representing it. If a dma-buf from
259 * another exporter is passed in this function will return ERR_PTR(-EINVAL)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700260 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800261struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
Laura Abbott273dd8e2011-10-12 14:26:33 -0700262
Laura Abbott273dd8e2011-10-12 14:26:33 -0700263/**
264 * ion_handle_get_flags - get the flags for a given handle
265 *
266 * @client - client who allocated the handle
267 * @handle - handle to get the flags
268 * @flags - pointer to store the flags
269 *
270 * Gets the current flags for a handle. These flags indicate various options
271 * of the buffer (caching, security, etc.)
272 */
273int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
274 unsigned long *flags);
275
Laura Abbott8c017362011-09-22 20:59:12 -0700276
277/**
278 * ion_map_iommu - map the given handle into an iommu
279 *
280 * @client - client who allocated the handle
281 * @handle - handle to map
282 * @domain_num - domain number to map to
283 * @partition_num - partition number to allocate iova from
284 * @align - alignment for the iova
285 * @iova_length - length of iova to map. If the iova length is
286 * greater than the handle length, the remaining
287 * address space will be mapped to a dummy buffer.
288 * @iova - pointer to store the iova address
289 * @buffer_size - pointer to store the size of the buffer
Mitchel Humpherysdc4d01d2012-09-13 10:53:22 -0700290 * @flags - flags for options to map
Olav Hauganb3676592012-03-02 15:02:25 -0800291 * @iommu_flags - flags specific to the iommu.
Laura Abbott8c017362011-09-22 20:59:12 -0700292 *
293 * Maps the handle into the iova space specified via domain number. Iova
294 * will be allocated from the partition specified via partition_num.
295 * Returns 0 on success, negative value on error.
296 */
297int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
298 int domain_num, int partition_num, unsigned long align,
299 unsigned long iova_length, unsigned long *iova,
300 unsigned long *buffer_size,
Olav Hauganb3676592012-03-02 15:02:25 -0800301 unsigned long flags, unsigned long iommu_flags);
Laura Abbott8c017362011-09-22 20:59:12 -0700302
Sultanxdace256952014-11-30 18:07:09 -0800303/**
304 * ion_map_iommu_by_force - map the given handle into an iommu by force
305 *
306 * Does the same thing as ion_map_iommu(), but if the handle that is
307 * attempting to be mapped is already mapped then it will be unmapped first,
308 * ensuring success. This should be used in areas where hard-to-debug
309 * memory leaks are expected and where the success of ion_map_iommu() is crucial.
310 */
311int ion_map_iommu_by_force(struct ion_client *client, struct ion_handle *handle,
312 int domain_num, int partition_num, unsigned long align,
313 unsigned long iova_length, unsigned long *iova,
314 unsigned long *buffer_size,
315 unsigned long flags, unsigned long iommu_flags);
316
Laura Abbott8c017362011-09-22 20:59:12 -0700317
318/**
319 * ion_handle_get_size - get the allocated size of a given handle
320 *
321 * @client - client who allocated the handle
322 * @handle - handle to get the size
323 * @size - pointer to store the size
324 *
325 * gives the allocated size of a handle. returns 0 on success, negative
326 * value on error
327 *
328 * NOTE: This is intended to be used only to get a size to pass to map_iommu.
329 * You should *NOT* rely on this for any other usage.
330 */
331
332int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
333 unsigned long *size);
334
335/**
336 * ion_unmap_iommu - unmap the handle from an iommu
337 *
338 * @client - client who allocated the handle
339 * @handle - handle to unmap
340 * @domain_num - domain to unmap from
341 * @partition_num - partition to unmap from
342 *
343 * Decrement the reference count on the iommu mapping. If the count is
344 * 0, the mapping will be removed from the iommu.
345 */
346void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
347 int domain_num, int partition_num);
348
349
Olav Haugan0a852512012-01-09 10:20:55 -0800350/**
351 * ion_secure_heap - secure a heap
352 *
353 * @client - a client that has allocated from the heap heap_id
354 * @heap_id - heap id to secure.
Laura Abbott7e446482012-06-13 15:59:39 -0700355 * @version - version of content protection
356 * @data - extra data needed for protection
Olav Haugan0a852512012-01-09 10:20:55 -0800357 *
358 * Secure a heap
359 * Returns 0 on success
360 */
Laura Abbott7e446482012-06-13 15:59:39 -0700361int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
362 void *data);
Olav Haugan0a852512012-01-09 10:20:55 -0800363
364/**
365 * ion_unsecure_heap - un-secure a heap
366 *
367 * @client - a client that has allocated from the heap heap_id
368 * @heap_id - heap id to un-secure.
Laura Abbott7e446482012-06-13 15:59:39 -0700369 * @version - version of content protection
370 * @data - extra data needed for protection
Olav Haugan0a852512012-01-09 10:20:55 -0800371 *
372 * Un-secure a heap
373 * Returns 0 on success
374 */
Laura Abbott7e446482012-06-13 15:59:39 -0700375int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
376 void *data);
Olav Haugan0a852512012-01-09 10:20:55 -0800377
378/**
Olav Haugan41f85792012-02-08 15:28:05 -0800379 * msm_ion_do_cache_op - do cache operations.
380 *
381 * @client - pointer to ION client.
382 * @handle - pointer to buffer handle.
383 * @vaddr - virtual address to operate on.
384 * @len - Length of data to do cache operation on.
385 * @cmd - Cache operation to perform:
386 * ION_IOC_CLEAN_CACHES
387 * ION_IOC_INV_CACHES
388 * ION_IOC_CLEAN_INV_CACHES
389 *
390 * Returns 0 on success
391 */
392int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
393 void *vaddr, unsigned long len, unsigned int cmd);
394
Jordan Crouse8cd48322011-10-12 17:05:19 -0600395#else
Laura Abbottb14ed962012-01-30 14:18:08 -0800396static inline void ion_reserve(struct ion_platform_data *data)
397{
398
399}
400
Jordan Crouse8cd48322011-10-12 17:05:19 -0600401static inline struct ion_client *ion_client_create(struct ion_device *dev,
402 unsigned int heap_mask, const char *name)
403{
404 return ERR_PTR(-ENODEV);
405}
Laura Abbott273dd8e2011-10-12 14:26:33 -0700406
Jordan Crouse8cd48322011-10-12 17:05:19 -0600407static inline struct ion_client *msm_ion_client_create(unsigned int heap_mask,
408 const char *name)
409{
410 return ERR_PTR(-ENODEV);
411}
412
413static inline void ion_client_destroy(struct ion_client *client) { }
414
415static inline struct ion_handle *ion_alloc(struct ion_client *client,
Hanumant Singh2ac41c92012-08-29 18:39:44 -0700416 size_t len, size_t align,
417 unsigned int heap_mask,
418 unsigned int flags)
Jordan Crouse8cd48322011-10-12 17:05:19 -0600419{
420 return ERR_PTR(-ENODEV);
421}
422
423static inline void ion_free(struct ion_client *client,
424 struct ion_handle *handle) { }
425
426
427static inline int ion_phys(struct ion_client *client,
428 struct ion_handle *handle, ion_phys_addr_t *addr, size_t *len)
429{
430 return -ENODEV;
431}
432
Laura Abbottb14ed962012-01-30 14:18:08 -0800433static inline struct sg_table *ion_sg_table(struct ion_client *client,
434 struct ion_handle *handle)
435{
436 return ERR_PTR(-ENODEV);
437}
438
Jordan Crouse8cd48322011-10-12 17:05:19 -0600439static inline void *ion_map_kernel(struct ion_client *client,
440 struct ion_handle *handle, unsigned long flags)
441{
442 return ERR_PTR(-ENODEV);
443}
444
445static inline void ion_unmap_kernel(struct ion_client *client,
446 struct ion_handle *handle) { }
447
Laura Abbottb14ed962012-01-30 14:18:08 -0800448static inline int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
Jordan Crouse8cd48322011-10-12 17:05:19 -0600449{
Laura Abbottb14ed962012-01-30 14:18:08 -0800450 return -ENODEV;
Jordan Crouse8cd48322011-10-12 17:05:19 -0600451}
452
Laura Abbottb14ed962012-01-30 14:18:08 -0800453static inline struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
Jordan Crouse8cd48322011-10-12 17:05:19 -0600454{
455 return ERR_PTR(-ENODEV);
456}
457
458static inline int ion_handle_get_flags(struct ion_client *client,
459 struct ion_handle *handle, unsigned long *flags)
460{
461 return -ENODEV;
462}
Laura Abbott8c017362011-09-22 20:59:12 -0700463
464static inline int ion_map_iommu(struct ion_client *client,
465 struct ion_handle *handle, int domain_num,
466 int partition_num, unsigned long align,
467 unsigned long iova_length, unsigned long *iova,
Olav Haugan9a27d4c2012-02-23 09:35:16 -0800468 unsigned long *buffer_size,
Olav Hauganb3676592012-03-02 15:02:25 -0800469 unsigned long flags,
470 unsigned long iommu_flags)
Laura Abbott8c017362011-09-22 20:59:12 -0700471{
472 return -ENODEV;
473}
474
475static inline void ion_unmap_iommu(struct ion_client *client,
476 struct ion_handle *handle, int domain_num,
477 int partition_num)
478{
479 return;
480}
481
Laura Abbott7e446482012-06-13 15:59:39 -0700482static inline int ion_secure_heap(struct ion_device *dev, int heap_id,
483 int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800484{
485 return -ENODEV;
Laura Abbott8c017362011-09-22 20:59:12 -0700486
Olav Haugan0a852512012-01-09 10:20:55 -0800487}
488
Laura Abbott7e446482012-06-13 15:59:39 -0700489static inline int ion_unsecure_heap(struct ion_device *dev, int heap_id,
490 int version, void *data)
Olav Haugan0a852512012-01-09 10:20:55 -0800491{
492 return -ENODEV;
493}
494
Olav Haugan41f85792012-02-08 15:28:05 -0800495static inline int msm_ion_do_cache_op(struct ion_client *client,
496 struct ion_handle *handle, void *vaddr,
497 unsigned long len, unsigned int cmd)
498{
499 return -ENODEV;
500}
501
Jordan Crouse8cd48322011-10-12 17:05:19 -0600502#endif /* CONFIG_ION */
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700503#endif /* __KERNEL__ */
504
505/**
506 * DOC: Ion Userspace API
507 *
508 * create a client by opening /dev/ion
509 * most operations handled via following ioctls
510 *
511 */
512
513/**
514 * struct ion_allocation_data - metadata passed from userspace for allocations
515 * @len: size of the allocation
516 * @align: required alignment of the allocation
Brian Muramatsub6b0b652012-07-30 22:54:08 -0700517 * @heap_mask: mask of heaps to allocate from
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700518 * @flags: flags passed to heap
519 * @handle: pointer that will be populated with a cookie to use to refer
520 * to this allocation
521 *
522 * Provided by userspace as an argument to the ioctl
523 */
524struct ion_allocation_data {
525 size_t len;
526 size_t align;
Brian Muramatsub6b0b652012-07-30 22:54:08 -0700527 unsigned int heap_mask;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700528 unsigned int flags;
529 struct ion_handle *handle;
530};
531
detulea3929042013-05-05 13:56:50 +0200532
533struct ion_allocation_data_compat {
534 size_t len;
535 size_t align;
536 unsigned int flags;
537 struct ion_handle *handle;
538};
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700539/**
540 * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
541 * @handle: a handle
542 * @fd: a file descriptor representing that handle
543 *
544 * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
545 * the handle returned from ion alloc, and the kernel returns the file
546 * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace
547 * provides the file descriptor and the kernel returns the handle.
548 */
549struct ion_fd_data {
550 struct ion_handle *handle;
551 int fd;
552};
553
554/**
555 * struct ion_handle_data - a handle passed to/from the kernel
556 * @handle: a handle
557 */
558struct ion_handle_data {
559 struct ion_handle *handle;
560};
561
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700562/**
563 * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
564 * @cmd: the custom ioctl function to call
565 * @arg: additional data to pass to the custom ioctl, typically a user
566 * pointer to a predefined structure
567 *
568 * This works just like the regular cmd and arg fields of an ioctl.
569 */
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700570struct ion_custom_data {
571 unsigned int cmd;
572 unsigned long arg;
573};
detulea3929042013-05-05 13:56:50 +0200574
575struct ion_flush_data {
576 struct ion_handle *handle;
577 int fd;
578 void *vaddr;
579 unsigned int offset;
580 unsigned int length;
581};
582struct ion_flag_data {
583 struct ion_handle *handle;
584 unsigned long flags;
585};
586
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700587#define ION_IOC_MAGIC 'I'
588
589/**
590 * DOC: ION_IOC_ALLOC - allocate memory
591 *
592 * Takes an ion_allocation_data struct and returns it with the handle field
593 * populated with the opaque handle for the allocation.
594 */
595#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
596 struct ion_allocation_data)
597
detulea3929042013-05-05 13:56:50 +0200598
599#define ION_IOC_ALLOC_COMPAT _IOWR(ION_IOC_MAGIC, 0, \
600 struct ion_allocation_data_compat)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700601/**
602 * DOC: ION_IOC_FREE - free memory
603 *
604 * Takes an ion_handle_data struct and frees the handle.
605 */
606#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
607
608/**
609 * DOC: ION_IOC_MAP - get a file descriptor to mmap
610 *
611 * Takes an ion_fd_data struct with the handle field populated with a valid
612 * opaque handle. Returns the struct with the fd field set to a file
613 * descriptor open in the current address space. This file descriptor
614 * can then be used as an argument to mmap.
615 */
616#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
617
618/**
619 * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
620 *
621 * Takes an ion_fd_data struct with the handle field populated with a valid
622 * opaque handle. Returns the struct with the fd field set to a file
623 * descriptor open in the current address space. This file descriptor
624 * can then be passed to another process. The corresponding opaque handle can
625 * be retrieved via ION_IOC_IMPORT.
626 */
627#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
628
629/**
630 * DOC: ION_IOC_IMPORT - imports a shared file descriptor
631 *
632 * Takes an ion_fd_data struct with the fd field populated with a valid file
633 * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
634 * filed set to the corresponding opaque handle.
635 */
Ajay Dudanif572d262012-08-29 18:02:11 -0700636#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
detulea3929042013-05-05 13:56:50 +0200637#define ION_IOC_IMPORT_COMPAT _IOWR(ION_IOC_MAGIC, 5, int)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700638
639/**
640 * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
641 *
642 * Takes the argument of the architecture specific ioctl to call and
643 * passes appropriate userdata for that ioctl
644 */
645#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
646
detulea3929042013-05-05 13:56:50 +0200647#define ION_IOC_CLEAN_CACHES_COMPAT _IOWR(ION_IOC_MAGIC, 7, \
648 struct ion_flush_data)
649#define ION_IOC_INV_CACHES_COMPAT _IOWR(ION_IOC_MAGIC, 8, \
650 struct ion_flush_data)
651#define ION_IOC_CLEAN_INV_CACHES_COMPAT _IOWR(ION_IOC_MAGIC, 9, \
652 struct ion_flush_data)
653#define ION_IOC_GET_FLAGS_COMPAT _IOWR(ION_IOC_MAGIC, 10, \
654 struct ion_flag_data)
Laura Abbottabcb6f72011-10-04 16:26:49 -0700655
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700656#endif /* _LINUX_ION_H */