blob: 64e6e27e5059f7c2eb0b686ae98fee437ed91b16 [file] [log] [blame]
Laura Abbott6438e532012-07-20 10:10:41 -07001/*
Laura Abbott6438e532012-07-20 10:10:41 -07002 *
Duy Truonge833aca2013-02-12 13:35:08 -08003 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Laura Abbott6438e532012-07-20 10:10:41 -07004 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#ifndef _LINUX_MSM_ION_H
17#define _LINUX_MSM_ION_H
18
Ajay Dudanif572d262012-08-29 18:02:11 -070019#include <linux/ion.h>
Laura Abbott6438e532012-07-20 10:10:41 -070020
Mitchel Humpherysdc4d01d2012-09-13 10:53:22 -070021enum msm_ion_heap_types {
22 ION_HEAP_TYPE_MSM_START = ION_HEAP_TYPE_CUSTOM + 1,
23 ION_HEAP_TYPE_IOMMU = ION_HEAP_TYPE_MSM_START,
24 ION_HEAP_TYPE_CP,
25};
26
27/**
28 * These are the only ids that should be used for Ion heap ids.
29 * The ids listed are the order in which allocation will be attempted
30 * if specified. Don't swap the order of heap ids unless you know what
31 * you are doing!
32 * Id's are spaced by purpose to allow new Id's to be inserted in-between (for
33 * possible fallbacks)
34 */
35
36enum ion_heap_ids {
37 INVALID_HEAP_ID = -1,
38 ION_CP_MM_HEAP_ID = 8,
39 ION_CP_MFC_HEAP_ID = 12,
40 ION_CP_WB_HEAP_ID = 16, /* 8660 only */
41 ION_CAMERA_HEAP_ID = 20, /* 8660 only */
Laura Abbott03e3cd72013-02-09 09:35:30 -080042 ION_ADSP_HEAP_ID = 22,
Mitchel Humpherysdc4d01d2012-09-13 10:53:22 -070043 ION_SF_HEAP_ID = 24,
44 ION_IOMMU_HEAP_ID = 25,
45 ION_QSECOM_HEAP_ID = 27,
46 ION_AUDIO_HEAP_ID = 28,
47
48 ION_MM_FIRMWARE_HEAP_ID = 29,
49 ION_SYSTEM_HEAP_ID = 30,
50
51 ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_SECURE flag */
52};
53
54enum ion_fixed_position {
55 NOT_FIXED,
56 FIXED_LOW,
57 FIXED_MIDDLE,
58 FIXED_HIGH,
59};
60
61enum cp_mem_usage {
62 VIDEO_BITSTREAM = 0x1,
63 VIDEO_PIXEL = 0x2,
64 VIDEO_NONPIXEL = 0x3,
65 MAX_USAGE = 0x4,
66 UNKNOWN = 0x7FFFFFFF,
67};
68
69#define ION_HEAP_CP_MASK (1 << ION_HEAP_TYPE_CP)
70
71/**
72 * Flag to use when allocating to indicate that a heap is secure.
73 */
Adrian Alexei416c2512013-04-04 16:18:51 -070074#define ION_FLAG_SECURE (1 << ION_HEAP_ID_RESERVED)
Mitchel Humpherysdc4d01d2012-09-13 10:53:22 -070075
76/**
Laura Abbottc7cd2b02013-01-03 14:20:16 -080077 * Flag for clients to force contiguous memort allocation
78 *
79 * Use of this flag is carefully monitored!
80 */
Adrian Alexei416c2512013-04-04 16:18:51 -070081#define ION_FLAG_FORCE_CONTIGUOUS (1 << 30)
82
83/**
84* Deprecated! Please use the corresponding ION_FLAG_*
85*/
86#define ION_SECURE ION_FLAG_SECURE
87#define ION_FORCE_CONTIGUOUS ION_FLAG_FORCE_CONTIGUOUS
Laura Abbottc7cd2b02013-01-03 14:20:16 -080088
89/**
Mitchel Humpherysdc4d01d2012-09-13 10:53:22 -070090 * Macro should be used with ion_heap_ids defined above.
91 */
92#define ION_HEAP(bit) (1 << (bit))
93
Laura Abbott03e3cd72013-02-09 09:35:30 -080094#define ION_ADSP_HEAP_NAME "adsp"
Mitchel Humpherysdc4d01d2012-09-13 10:53:22 -070095#define ION_VMALLOC_HEAP_NAME "vmalloc"
96#define ION_AUDIO_HEAP_NAME "audio"
97#define ION_SF_HEAP_NAME "sf"
98#define ION_MM_HEAP_NAME "mm"
99#define ION_CAMERA_HEAP_NAME "camera_preview"
100#define ION_IOMMU_HEAP_NAME "iommu"
101#define ION_MFC_HEAP_NAME "mfc"
102#define ION_WB_HEAP_NAME "wb"
103#define ION_MM_FIRMWARE_HEAP_NAME "mm_fw"
104#define ION_QSECOM_HEAP_NAME "qsecom"
105#define ION_FMEM_HEAP_NAME "fmem"
106
107#define ION_SET_CACHED(__cache) (__cache | ION_FLAG_CACHED)
108#define ION_SET_UNCACHED(__cache) (__cache & ~ION_FLAG_CACHED)
109
110#define ION_IS_CACHED(__flags) ((__flags) & ION_FLAG_CACHED)
111
112#ifdef __KERNEL__
113
114/*
115 * This flag allows clients when mapping into the IOMMU to specify to
116 * defer un-mapping from the IOMMU until the buffer memory is freed.
117 */
118#define ION_IOMMU_UNMAP_DELAYED 1
119
120/**
121 * struct ion_cp_heap_pdata - defines a content protection heap in the given
122 * platform
123 * @permission_type: Memory ID used to identify the memory to TZ
124 * @align: Alignment requirement for the memory
125 * @secure_base: Base address for securing the heap.
126 * Note: This might be different from actual base address
127 * of this heap in the case of a shared heap.
128 * @secure_size: Memory size for securing the heap.
129 * Note: This might be different from actual size
130 * of this heap in the case of a shared heap.
131 * @reusable Flag indicating whether this heap is reusable of not.
132 * (see FMEM)
133 * @mem_is_fmem Flag indicating whether this memory is coming from fmem
134 * or not.
135 * @fixed_position If nonzero, position in the fixed area.
136 * @virt_addr: Virtual address used when using fmem.
137 * @iommu_map_all: Indicates whether we should map whole heap into IOMMU.
138 * @iommu_2x_map_domain: Indicates the domain to use for overmapping.
139 * @request_region: function to be called when the number of allocations
140 * goes from 0 -> 1
141 * @release_region: function to be called when the number of allocations
142 * goes from 1 -> 0
143 * @setup_region: function to be called upon ion registration
144 * @memory_type:Memory type used for the heap
Laura Abbott5249a052012-12-11 15:09:03 -0800145 * @no_nonsecure_alloc: don't allow non-secure allocations from this heap
Mitchel Humpherysdc4d01d2012-09-13 10:53:22 -0700146 *
147 */
148struct ion_cp_heap_pdata {
149 enum ion_permission_type permission_type;
150 unsigned int align;
151 ion_phys_addr_t secure_base; /* Base addr used when heap is shared */
152 size_t secure_size; /* Size used for securing heap when heap is shared*/
153 int reusable;
154 int mem_is_fmem;
Laura Abbottd3142222012-08-03 17:31:03 -0700155 int is_cma;
Mitchel Humpherysdc4d01d2012-09-13 10:53:22 -0700156 enum ion_fixed_position fixed_position;
157 int iommu_map_all;
158 int iommu_2x_map_domain;
159 ion_virt_addr_t *virt_addr;
160 int (*request_region)(void *);
161 int (*release_region)(void *);
162 void *(*setup_region)(void);
163 enum ion_memory_types memory_type;
Laura Abbott5249a052012-12-11 15:09:03 -0800164 int no_nonsecure_alloc;
Mitchel Humpherysdc4d01d2012-09-13 10:53:22 -0700165};
166
167/**
168 * struct ion_co_heap_pdata - defines a carveout heap in the given platform
169 * @adjacent_mem_id: Id of heap that this heap must be adjacent to.
170 * @align: Alignment requirement for the memory
171 * @mem_is_fmem Flag indicating whether this memory is coming from fmem
172 * or not.
173 * @fixed_position If nonzero, position in the fixed area.
174 * @request_region: function to be called when the number of allocations
175 * goes from 0 -> 1
176 * @release_region: function to be called when the number of allocations
177 * goes from 1 -> 0
178 * @setup_region: function to be called upon ion registration
179 * @memory_type:Memory type used for the heap
180 *
181 */
182struct ion_co_heap_pdata {
183 int adjacent_mem_id;
184 unsigned int align;
185 int mem_is_fmem;
186 enum ion_fixed_position fixed_position;
187 int (*request_region)(void *);
188 int (*release_region)(void *);
189 void *(*setup_region)(void);
190 enum ion_memory_types memory_type;
191};
192
193#ifdef CONFIG_ION
194/**
195 * msm_ion_secure_heap - secure a heap. Wrapper around ion_secure_heap.
196 *
197 * @heap_id - heap id to secure.
198 *
199 * Secure a heap
200 * Returns 0 on success
201 */
202int msm_ion_secure_heap(int heap_id);
203
204/**
205 * msm_ion_unsecure_heap - unsecure a heap. Wrapper around ion_unsecure_heap.
206 *
207 * @heap_id - heap id to secure.
208 *
209 * Un-secure a heap
210 * Returns 0 on success
211 */
212int msm_ion_unsecure_heap(int heap_id);
213
214/**
215 * msm_ion_secure_heap_2_0 - secure a heap using 2.0 APIs
216 * Wrapper around ion_secure_heap.
217 *
218 * @heap_id - heap id to secure.
219 * @usage - usage hint to TZ
220 *
221 * Secure a heap
222 * Returns 0 on success
223 */
224int msm_ion_secure_heap_2_0(int heap_id, enum cp_mem_usage usage);
225
226/**
227 * msm_ion_unsecure_heap - unsecure a heap secured with 3.0 APIs.
228 * Wrapper around ion_unsecure_heap.
229 *
230 * @heap_id - heap id to secure.
231 * @usage - usage hint to TZ
232 *
233 * Un-secure a heap
234 * Returns 0 on success
235 */
236int msm_ion_unsecure_heap_2_0(int heap_id, enum cp_mem_usage usage);
237#else
238static inline int msm_ion_secure_heap(int heap_id)
239{
240 return -ENODEV;
241
242}
243
244static inline int msm_ion_unsecure_heap(int heap_id)
245{
246 return -ENODEV;
247}
248
249static inline int msm_ion_secure_heap_2_0(int heap_id, enum cp_mem_usage usage)
250{
251 return -ENODEV;
252}
253
254static inline int msm_ion_unsecure_heap_2_0(int heap_id,
255 enum cp_mem_usage usage)
256{
257 return -ENODEV;
258}
259#endif /* CONFIG_ION */
260
261#endif /* __KERNEL */
262
263/* struct ion_flush_data - data passed to ion for flushing caches
264 *
265 * @handle: handle with data to flush
266 * @fd: fd to flush
267 * @vaddr: userspace virtual address mapped with mmap
268 * @offset: offset into the handle to flush
269 * @length: length of handle to flush
270 *
271 * Performs cache operations on the handle. If p is the start address
272 * of the handle, p + offset through p + offset + length will have
273 * the cache operations performed
274 */
275struct ion_flush_data {
276 struct ion_handle *handle;
277 int fd;
278 void *vaddr;
279 unsigned int offset;
280 unsigned int length;
281};
282
283/* struct ion_flag_data - information about flags for this buffer
284 *
285 * @handle: handle to get flags from
286 * @flags: flags of this handle
287 *
288 * Takes handle as an input and outputs the flags from the handle
289 * in the flag field.
290 */
291struct ion_flag_data {
292 struct ion_handle *handle;
293 unsigned long flags;
294};
295
296#define ION_IOC_MSM_MAGIC 'M'
297
298/**
299 * DOC: ION_IOC_CLEAN_CACHES - clean the caches
300 *
301 * Clean the caches of the handle specified.
302 */
303#define ION_IOC_CLEAN_CACHES _IOWR(ION_IOC_MSM_MAGIC, 0, \
304 struct ion_flush_data)
305/**
306 * DOC: ION_IOC_INV_CACHES - invalidate the caches
307 *
308 * Invalidate the caches of the handle specified.
309 */
310#define ION_IOC_INV_CACHES _IOWR(ION_IOC_MSM_MAGIC, 1, \
311 struct ion_flush_data)
312/**
313 * DOC: ION_IOC_CLEAN_INV_CACHES - clean and invalidate the caches
314 *
315 * Clean and invalidate the caches of the handle specified.
316 */
317#define ION_IOC_CLEAN_INV_CACHES _IOWR(ION_IOC_MSM_MAGIC, 2, \
318 struct ion_flush_data)
319
320/**
321 * DOC: ION_IOC_GET_FLAGS - get the flags of the handle
322 *
323 * Gets the flags of the current handle which indicate cachability,
324 * secure state etc.
325 */
326#define ION_IOC_GET_FLAGS _IOWR(ION_IOC_MSM_MAGIC, 3, \
327 struct ion_flag_data)
328
Laura Abbott6438e532012-07-20 10:10:41 -0700329#endif