blob: 77b73e241822c5c554059b7b0d54612546428bb2 [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion_priv.h
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#ifndef _ION_PRIV_H
18#define _ION_PRIV_H
19
20#include <linux/kref.h>
21#include <linux/mm_types.h>
22#include <linux/mutex.h>
23#include <linux/rbtree.h>
24#include <linux/ion.h>
Laura Abbott8c017362011-09-22 20:59:12 -070025#include <linux/iommu.h>
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070026
27struct ion_mapping;
28
29struct ion_dma_mapping {
30 struct kref ref;
31 struct scatterlist *sglist;
32};
33
34struct ion_kernel_mapping {
35 struct kref ref;
36 void *vaddr;
37};
38
Laura Abbott8c017362011-09-22 20:59:12 -070039/**
40 * struct ion_iommu_map - represents a mapping of an ion buffer to an iommu
41 * @iova_addr - iommu virtual address
42 * @node - rb node to exist in the buffer's tree of iommu mappings
43 * @domain_info - contains the partition number and domain number
44 * domain_info[1] = domain number
45 * domain_info[0] = partition number
46 * @ref - for reference counting this mapping
47 * @mapped_size - size of the iova space mapped
48 * (may not be the same as the buffer size)
49 *
50 * Represents a mapping of one ion buffer to a particular iommu domain
51 * and address range. There may exist other mappings of this buffer in
52 * different domains or address ranges. All mappings will have the same
53 * cacheability and security.
54 */
55struct ion_iommu_map {
56 unsigned long iova_addr;
57 struct rb_node node;
58 union {
59 int domain_info[2];
60 uint64_t key;
61 };
62 struct ion_buffer *buffer;
63 struct kref ref;
64 int mapped_size;
65};
66
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070067struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
68
69/**
70 * struct ion_buffer - metadata for a particular buffer
71 * @ref: refernce count
72 * @node: node in the ion_device buffers tree
73 * @dev: back pointer to the ion_device
74 * @heap: back pointer to the heap the buffer came from
75 * @flags: buffer specific flags
76 * @size: size of the buffer
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070077 * @priv_virt: private data to the buffer representable as
78 * a void *
79 * @priv_phys: private data to the buffer representable as
80 * an ion_phys_addr_t (and someday a phys_addr_t)
81 * @lock: protects the buffers cnt fields
82 * @kmap_cnt: number of times the buffer is mapped to the kernel
83 * @vaddr: the kenrel mapping if kmap_cnt is not zero
84 * @dmap_cnt: number of times the buffer is mapped for dma
85 * @sglist: the scatterlist for the buffer is dmap_cnt is not zero
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070086*/
87struct ion_buffer {
88 struct kref ref;
89 struct rb_node node;
90 struct ion_device *dev;
91 struct ion_heap *heap;
92 unsigned long flags;
93 size_t size;
94 union {
95 void *priv_virt;
96 ion_phys_addr_t priv_phys;
97 };
98 struct mutex lock;
99 int kmap_cnt;
100 void *vaddr;
101 int dmap_cnt;
102 struct scatterlist *sglist;
Laura Abbott894fd582011-08-19 13:33:56 -0700103 int umap_cnt;
Laura Abbott8c017362011-09-22 20:59:12 -0700104 unsigned int iommu_map_cnt;
105 struct rb_root iommu_maps;
Laura Abbott404f8242011-10-31 14:22:53 -0700106 int marked;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700107};
108
109/**
110 * struct ion_heap_ops - ops to operate on a given heap
111 * @allocate: allocate memory
112 * @free: free memory
113 * @phys get physical address of a buffer (only define on
114 * physically contiguous heaps)
115 * @map_dma map the memory for dma to a scatterlist
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700116 * @unmap_dma unmap the memory for dma
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700117 * @map_kernel map memory to the kernel
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700118 * @unmap_kernel unmap memory to the kernel
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700119 * @map_user map memory to userspace
Alex Bird8a3ede32011-11-07 12:33:42 -0800120 * @unmap_user unmap memory to userspace
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700121 */
122struct ion_heap_ops {
123 int (*allocate) (struct ion_heap *heap,
124 struct ion_buffer *buffer, unsigned long len,
125 unsigned long align, unsigned long flags);
126 void (*free) (struct ion_buffer *buffer);
127 int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer,
128 ion_phys_addr_t *addr, size_t *len);
129 struct scatterlist *(*map_dma) (struct ion_heap *heap,
130 struct ion_buffer *buffer);
131 void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer);
Laura Abbott894fd582011-08-19 13:33:56 -0700132 void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer,
133 unsigned long flags);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700134 void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
135 int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
Laura Abbott894fd582011-08-19 13:33:56 -0700136 struct vm_area_struct *vma, unsigned long flags);
Alex Bird8a3ede32011-11-07 12:33:42 -0800137 void (*unmap_user) (struct ion_heap *mapper, struct ion_buffer *buffer);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700138 int (*cache_op)(struct ion_heap *heap, struct ion_buffer *buffer,
139 void *vaddr, unsigned int offset,
140 unsigned int length, unsigned int cmd);
Laura Abbott68c80642011-10-21 17:32:27 -0700141 unsigned long (*get_allocated)(struct ion_heap *heap);
142 unsigned long (*get_total)(struct ion_heap *heap);
Laura Abbott8c017362011-09-22 20:59:12 -0700143 int (*map_iommu)(struct ion_buffer *buffer,
144 struct ion_iommu_map *map_data,
145 unsigned int domain_num,
146 unsigned int partition_num,
147 unsigned long align,
148 unsigned long iova_length,
149 unsigned long flags);
150 void (*unmap_iommu)(struct ion_iommu_map *data);
151
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700152};
153
154/**
155 * struct ion_heap - represents a heap in the system
156 * @node: rb node to put the heap on the device's tree of heaps
157 * @dev: back pointer to the ion_device
158 * @type: type of heap
159 * @ops: ops struct as above
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700160 * @id: id of heap, also indicates priority of this heap when
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700161 * allocating. These are specified by platform data and
162 * MUST be unique
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700163 * @name: used for debugging
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700164 *
165 * Represents a pool of memory from which buffers can be made. In some
166 * systems the only heap is regular system memory allocated via vmalloc.
167 * On others, some blocks might require large physically contiguous buffers
168 * that are allocated from a specially reserved heap.
169 */
170struct ion_heap {
171 struct rb_node node;
172 struct ion_device *dev;
173 enum ion_heap_type type;
174 struct ion_heap_ops *ops;
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700175 int id;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700176 const char *name;
177};
178
Laura Abbott8c017362011-09-22 20:59:12 -0700179
180
181#define iommu_map_domain(__m) ((__m)->domain_info[1])
182#define iommu_map_partition(__m) ((__m)->domain_info[0])
183
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700184/**
185 * ion_device_create - allocates and returns an ion device
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700186 * @custom_ioctl: arch specific ioctl function if applicable
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700187 *
188 * returns a valid device or -PTR_ERR
189 */
190struct ion_device *ion_device_create(long (*custom_ioctl)
191 (struct ion_client *client,
192 unsigned int cmd,
193 unsigned long arg));
194
195/**
196 * ion_device_destroy - free and device and it's resource
197 * @dev: the device
198 */
199void ion_device_destroy(struct ion_device *dev);
200
201/**
202 * ion_device_add_heap - adds a heap to the ion device
203 * @dev: the device
204 * @heap: the heap to add
205 */
206void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
207
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700208/**
209 * functions for creating and destroying the built in ion heaps.
210 * architectures can add their own custom architecture specific
211 * heaps as appropriate.
212 */
213
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700214struct ion_heap *ion_heap_create(struct ion_platform_heap *);
215void ion_heap_destroy(struct ion_heap *);
216
217struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
218void ion_system_heap_destroy(struct ion_heap *);
219
220struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
221void ion_system_contig_heap_destroy(struct ion_heap *);
222
223struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
224void ion_carveout_heap_destroy(struct ion_heap *);
Laura Abbott8c017362011-09-22 20:59:12 -0700225
226struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *);
227void ion_iommu_heap_destroy(struct ion_heap *);
228
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700229/**
230 * kernel api to allocate/free from carveout -- used when carveout is
231 * used to back an architecture specific custom heap
232 */
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700233ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
234 unsigned long align);
235void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
236 unsigned long size);
Laura Abbott8c017362011-09-22 20:59:12 -0700237
238
239struct ion_heap *msm_get_contiguous_heap(void);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700240/**
241 * The carveout heap returns physical addresses, since 0 may be a valid
242 * physical address, this is used to indicate allocation failed
243 */
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700244#define ION_CARVEOUT_ALLOCATE_FAIL -1
245
246#endif /* _ION_PRIV_H */