blob: e9b3bb372c89cf279a491b249f1eb96a2c6d7a5d [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Duy Truonge833aca2013-02-12 13:35:08 -08005 * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Steve Mucklef132c6c2012-06-06 18:30:57 -070018#include <linux/module.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070019#include <linux/device.h>
20#include <linux/file.h>
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
23#include <linux/ion.h>
24#include <linux/list.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080025#include <linux/memblock.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070026#include <linux/miscdevice.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070027#include <linux/mm.h>
28#include <linux/mm_types.h>
29#include <linux/rbtree.h>
30#include <linux/sched.h>
31#include <linux/slab.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
34#include <linux/debugfs.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080035#include <linux/dma-buf.h>
Mitchel Humpherysfd02cfb2012-09-04 17:00:29 -070036#include <linux/msm_ion.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070037
Laura Abbott8c017362011-09-22 20:59:12 -070038#include <mach/iommu_domains.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070039#include "ion_priv.h"
40#define DEBUG
41
42/**
43 * struct ion_device - the metadata of the ion device node
44 * @dev: the actual misc device
45 * @buffers: an rb tree of all the existing buffers
46 * @lock: lock protecting the buffers & heaps trees
47 * @heaps: list of all the heaps in the system
48 * @user_clients: list of all the clients created from userspace
49 */
50struct ion_device {
51 struct miscdevice dev;
52 struct rb_root buffers;
53 struct mutex lock;
54 struct rb_root heaps;
55 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
56 unsigned long arg);
Laura Abbottb14ed962012-01-30 14:18:08 -080057 struct rb_root clients;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070058 struct dentry *debug_root;
59};
60
61/**
62 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070063 * @node: node in the tree of all clients
64 * @dev: backpointer to ion device
65 * @handles: an rb tree of all the handles in this client
66 * @lock: lock protecting the tree of handles
67 * @heap_mask: mask of all supported heaps
68 * @name: used for debugging
69 * @task: used for debugging
70 *
71 * A client represents a list of buffers this client may access.
72 * The mutex stored here is used to protect both handles tree
73 * as well as the handles themselves, and should be held while modifying either.
74 */
75struct ion_client {
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070076 struct rb_node node;
77 struct ion_device *dev;
78 struct rb_root handles;
79 struct mutex lock;
80 unsigned int heap_mask;
Olav Haugan63e5f3b2012-01-11 16:42:37 -080081 char *name;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070082 struct task_struct *task;
83 pid_t pid;
84 struct dentry *debug_root;
85};
86
87/**
88 * ion_handle - a client local reference to a buffer
89 * @ref: reference count
90 * @client: back pointer to the client the buffer resides in
91 * @buffer: pointer to the buffer
92 * @node: node in the client's handle rbtree
93 * @kmap_cnt: count of times this client has mapped to kernel
94 * @dmap_cnt: count of times this client has mapped for dma
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070095 *
96 * Modifications to node, map_cnt or mapping should be protected by the
97 * lock in the client. Other fields are never changed after initialization.
98 */
99struct ion_handle {
100 struct kref ref;
101 struct ion_client *client;
102 struct ion_buffer *buffer;
103 struct rb_node node;
104 unsigned int kmap_cnt;
Laura Abbott8c017362011-09-22 20:59:12 -0700105 unsigned int iommu_map_cnt;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700106};
107
Olav Hauganb3676592012-03-02 15:02:25 -0800108static void ion_iommu_release(struct kref *kref);
109
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700110/* this function should only be called while dev->lock is held */
111static void ion_buffer_add(struct ion_device *dev,
112 struct ion_buffer *buffer)
113{
114 struct rb_node **p = &dev->buffers.rb_node;
115 struct rb_node *parent = NULL;
116 struct ion_buffer *entry;
117
118 while (*p) {
119 parent = *p;
120 entry = rb_entry(parent, struct ion_buffer, node);
121
122 if (buffer < entry) {
123 p = &(*p)->rb_left;
124 } else if (buffer > entry) {
125 p = &(*p)->rb_right;
126 } else {
127 pr_err("%s: buffer already found.", __func__);
128 BUG();
129 }
130 }
131
132 rb_link_node(&buffer->node, parent, p);
133 rb_insert_color(&buffer->node, &dev->buffers);
134}
135
Olav Haugan0fa9b602012-01-25 11:50:38 -0800136static void ion_iommu_add(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700137 struct ion_iommu_map *iommu)
138{
139 struct rb_node **p = &buffer->iommu_maps.rb_node;
140 struct rb_node *parent = NULL;
141 struct ion_iommu_map *entry;
142
143 while (*p) {
144 parent = *p;
145 entry = rb_entry(parent, struct ion_iommu_map, node);
146
147 if (iommu->key < entry->key) {
148 p = &(*p)->rb_left;
149 } else if (iommu->key > entry->key) {
150 p = &(*p)->rb_right;
151 } else {
152 pr_err("%s: buffer %p already has mapping for domain %d"
153 " and partition %d\n", __func__,
154 buffer,
155 iommu_map_domain(iommu),
156 iommu_map_partition(iommu));
157 BUG();
158 }
159 }
160
161 rb_link_node(&iommu->node, parent, p);
162 rb_insert_color(&iommu->node, &buffer->iommu_maps);
163
164}
165
166static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
167 unsigned int domain_no,
168 unsigned int partition_no)
169{
170 struct rb_node **p = &buffer->iommu_maps.rb_node;
171 struct rb_node *parent = NULL;
172 struct ion_iommu_map *entry;
173 uint64_t key = domain_no;
174 key = key << 32 | partition_no;
175
176 while (*p) {
177 parent = *p;
178 entry = rb_entry(parent, struct ion_iommu_map, node);
179
180 if (key < entry->key)
181 p = &(*p)->rb_left;
182 else if (key > entry->key)
183 p = &(*p)->rb_right;
184 else
185 return entry;
186 }
187
188 return NULL;
189}
190
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700191/* this function should only be called while dev->lock is held */
192static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
193 struct ion_device *dev,
194 unsigned long len,
195 unsigned long align,
196 unsigned long flags)
197{
198 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800199 struct sg_table *table;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700200 int ret;
201
202 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
203 if (!buffer)
204 return ERR_PTR(-ENOMEM);
205
206 buffer->heap = heap;
207 kref_init(&buffer->ref);
208
209 ret = heap->ops->allocate(heap, buffer, len, align, flags);
210 if (ret) {
211 kfree(buffer);
212 return ERR_PTR(ret);
213 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800214
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700215 buffer->dev = dev;
216 buffer->size = len;
Hanumant Singh2ac41c92012-08-29 18:39:44 -0700217 buffer->flags = flags;
Laura Abbottb14ed962012-01-30 14:18:08 -0800218
219 table = buffer->heap->ops->map_dma(buffer->heap, buffer);
220 if (IS_ERR_OR_NULL(table)) {
221 heap->ops->free(buffer);
222 kfree(buffer);
223 return ERR_PTR(PTR_ERR(table));
224 }
225 buffer->sg_table = table;
226
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700227 mutex_init(&buffer->lock);
228 ion_buffer_add(dev, buffer);
229 return buffer;
230}
231
Olav Hauganb3676592012-03-02 15:02:25 -0800232/**
233 * Check for delayed IOMMU unmapping. Also unmap any outstanding
234 * mappings which would otherwise have been leaked.
235 */
236static void ion_iommu_delayed_unmap(struct ion_buffer *buffer)
237{
238 struct ion_iommu_map *iommu_map;
239 struct rb_node *node;
240 const struct rb_root *rb = &(buffer->iommu_maps);
241 unsigned long ref_count;
242 unsigned int delayed_unmap;
243
244 mutex_lock(&buffer->lock);
245
246 while ((node = rb_first(rb)) != 0) {
247 iommu_map = rb_entry(node, struct ion_iommu_map, node);
248 ref_count = atomic_read(&iommu_map->ref.refcount);
249 delayed_unmap = iommu_map->flags & ION_IOMMU_UNMAP_DELAYED;
250
251 if ((delayed_unmap && ref_count > 1) || !delayed_unmap) {
252 pr_err("%s: Virtual memory address leak in domain %u, partition %u\n",
253 __func__, iommu_map->domain_info[DI_DOMAIN_NUM],
254 iommu_map->domain_info[DI_PARTITION_NUM]);
255 }
256 /* set ref count to 1 to force release */
257 kref_init(&iommu_map->ref);
258 kref_put(&iommu_map->ref, ion_iommu_release);
259 }
260
261 mutex_unlock(&buffer->lock);
262}
263
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700264static void ion_buffer_destroy(struct kref *kref)
265{
266 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
267 struct ion_device *dev = buffer->dev;
268
Laura Abbottb14ed962012-01-30 14:18:08 -0800269 if (WARN_ON(buffer->kmap_cnt > 0))
270 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
271
272 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
273
Olav Hauganb3676592012-03-02 15:02:25 -0800274 ion_iommu_delayed_unmap(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700275 buffer->heap->ops->free(buffer);
276 mutex_lock(&dev->lock);
277 rb_erase(&buffer->node, &dev->buffers);
278 mutex_unlock(&dev->lock);
279 kfree(buffer);
280}
281
282static void ion_buffer_get(struct ion_buffer *buffer)
283{
284 kref_get(&buffer->ref);
285}
286
287static int ion_buffer_put(struct ion_buffer *buffer)
288{
289 return kref_put(&buffer->ref, ion_buffer_destroy);
290}
291
292static struct ion_handle *ion_handle_create(struct ion_client *client,
293 struct ion_buffer *buffer)
294{
295 struct ion_handle *handle;
296
297 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
298 if (!handle)
299 return ERR_PTR(-ENOMEM);
300 kref_init(&handle->ref);
301 rb_init_node(&handle->node);
302 handle->client = client;
303 ion_buffer_get(buffer);
304 handle->buffer = buffer;
305
306 return handle;
307}
308
Laura Abbottb14ed962012-01-30 14:18:08 -0800309static void ion_handle_kmap_put(struct ion_handle *);
310
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700311static void ion_handle_destroy(struct kref *kref)
312{
313 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Laura Abbottb14ed962012-01-30 14:18:08 -0800314 struct ion_client *client = handle->client;
315 struct ion_buffer *buffer = handle->buffer;
316
Laura Abbottb14ed962012-01-30 14:18:08 -0800317 mutex_lock(&buffer->lock);
318 while (handle->kmap_cnt)
319 ion_handle_kmap_put(handle);
320 mutex_unlock(&buffer->lock);
321
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700322 if (!RB_EMPTY_NODE(&handle->node))
Laura Abbottb14ed962012-01-30 14:18:08 -0800323 rb_erase(&handle->node, &client->handles);
Laura Abbottb14ed962012-01-30 14:18:08 -0800324
325 ion_buffer_put(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700326 kfree(handle);
327}
328
329struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
330{
331 return handle->buffer;
332}
333
334static void ion_handle_get(struct ion_handle *handle)
335{
336 kref_get(&handle->ref);
337}
338
339static int ion_handle_put(struct ion_handle *handle)
340{
341 return kref_put(&handle->ref, ion_handle_destroy);
342}
343
344static struct ion_handle *ion_handle_lookup(struct ion_client *client,
345 struct ion_buffer *buffer)
346{
347 struct rb_node *n;
348
349 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
350 struct ion_handle *handle = rb_entry(n, struct ion_handle,
351 node);
352 if (handle->buffer == buffer)
353 return handle;
354 }
355 return NULL;
356}
357
358static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
359{
360 struct rb_node *n = client->handles.rb_node;
361
362 while (n) {
363 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
364 node);
365 if (handle < handle_node)
366 n = n->rb_left;
367 else if (handle > handle_node)
368 n = n->rb_right;
369 else
370 return true;
371 }
372 return false;
373}
374
375static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
376{
377 struct rb_node **p = &client->handles.rb_node;
378 struct rb_node *parent = NULL;
379 struct ion_handle *entry;
380
381 while (*p) {
382 parent = *p;
383 entry = rb_entry(parent, struct ion_handle, node);
384
385 if (handle < entry)
386 p = &(*p)->rb_left;
387 else if (handle > entry)
388 p = &(*p)->rb_right;
389 else
390 WARN(1, "%s: buffer already found.", __func__);
391 }
392
393 rb_link_node(&handle->node, parent, p);
394 rb_insert_color(&handle->node, &client->handles);
395}
396
397struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Hanumant Singh2ac41c92012-08-29 18:39:44 -0700398 size_t align, unsigned int heap_mask,
399 unsigned int flags)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700400{
401 struct rb_node *n;
402 struct ion_handle *handle;
403 struct ion_device *dev = client->dev;
404 struct ion_buffer *buffer = NULL;
Olav Haugan0a852512012-01-09 10:20:55 -0800405 unsigned long secure_allocation = flags & ION_SECURE;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800406 const unsigned int MAX_DBG_STR_LEN = 64;
407 char dbg_str[MAX_DBG_STR_LEN];
408 unsigned int dbg_str_idx = 0;
409
410 dbg_str[0] = '\0';
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700411
412 /*
413 * traverse the list of heaps available in this system in priority
414 * order. If the heap type is supported by the client, and matches the
415 * request of the caller allocate from it. Repeat until allocate has
416 * succeeded or all heaps have been tried
417 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800418 if (WARN_ON(!len))
419 return ERR_PTR(-EINVAL);
420
421 len = PAGE_ALIGN(len);
422
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700423 mutex_lock(&dev->lock);
424 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
425 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
426 /* if the client doesn't support this heap type */
427 if (!((1 << heap->type) & client->heap_mask))
428 continue;
429 /* if the caller didn't specify this heap type */
Hanumant Singh2ac41c92012-08-29 18:39:44 -0700430 if (!((1 << heap->id) & heap_mask))
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700431 continue;
Olav Haugan0a852512012-01-09 10:20:55 -0800432 /* Do not allow un-secure heap if secure is specified */
Mitchel Humpherysdc4d01d2012-09-13 10:53:22 -0700433 if (secure_allocation &&
434 (heap->type != (enum ion_heap_type) ION_HEAP_TYPE_CP))
Olav Haugan0a852512012-01-09 10:20:55 -0800435 continue;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700436 buffer = ion_buffer_create(heap, dev, len, align, flags);
437 if (!IS_ERR_OR_NULL(buffer))
438 break;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800439 if (dbg_str_idx < MAX_DBG_STR_LEN) {
440 unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
441 int ret_value = snprintf(&dbg_str[dbg_str_idx],
442 len_left, "%s ", heap->name);
443 if (ret_value >= len_left) {
444 /* overflow */
445 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
446 dbg_str_idx = MAX_DBG_STR_LEN;
447 } else if (ret_value >= 0) {
448 dbg_str_idx += ret_value;
449 } else {
450 /* error */
451 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
452 }
453 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700454 }
455 mutex_unlock(&dev->lock);
456
Laura Abbottb14ed962012-01-30 14:18:08 -0800457 if (buffer == NULL)
458 return ERR_PTR(-ENODEV);
459
460 if (IS_ERR(buffer)) {
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800461 pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
462 "0x%x) from heap(s) %sfor client %s with heap "
463 "mask 0x%x\n",
464 len, align, dbg_str, client->name, client->heap_mask);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700465 return ERR_PTR(PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800466 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700467
468 handle = ion_handle_create(client, buffer);
469
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700470 /*
471 * ion_buffer_create will create a buffer with a ref_cnt of 1,
472 * and ion_handle_create will take a second reference, drop one here
473 */
474 ion_buffer_put(buffer);
475
Laura Abbottb14ed962012-01-30 14:18:08 -0800476 if (!IS_ERR(handle)) {
477 mutex_lock(&client->lock);
478 ion_handle_add(client, handle);
479 mutex_unlock(&client->lock);
480 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700481
Laura Abbottb14ed962012-01-30 14:18:08 -0800482
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700483 return handle;
484}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800485EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700486
487void ion_free(struct ion_client *client, struct ion_handle *handle)
488{
489 bool valid_handle;
490
491 BUG_ON(client != handle->client);
492
493 mutex_lock(&client->lock);
494 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700495 if (!valid_handle) {
Laura Abbottec149ff2012-01-26 13:33:11 -0800496 mutex_unlock(&client->lock);
Olav Haugan6ede5672012-04-19 10:20:22 -0700497 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700498 return;
499 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800500 ion_handle_put(handle);
Ajay Dudani6ca95312012-08-20 15:41:11 -0700501 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700502}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800503EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700504
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700505int ion_phys(struct ion_client *client, struct ion_handle *handle,
506 ion_phys_addr_t *addr, size_t *len)
507{
508 struct ion_buffer *buffer;
509 int ret;
510
511 mutex_lock(&client->lock);
512 if (!ion_handle_validate(client, handle)) {
513 mutex_unlock(&client->lock);
514 return -EINVAL;
515 }
516
517 buffer = handle->buffer;
518
519 if (!buffer->heap->ops->phys) {
520 pr_err("%s: ion_phys is not implemented by this heap.\n",
521 __func__);
522 mutex_unlock(&client->lock);
523 return -ENODEV;
524 }
525 mutex_unlock(&client->lock);
526 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
527 return ret;
528}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800529EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700530
Laura Abbottb14ed962012-01-30 14:18:08 -0800531static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700532{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700533 void *vaddr;
534
Laura Abbottb14ed962012-01-30 14:18:08 -0800535 if (buffer->kmap_cnt) {
536 buffer->kmap_cnt++;
537 return buffer->vaddr;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700538 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800539 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
540 if (IS_ERR_OR_NULL(vaddr))
541 return vaddr;
542 buffer->vaddr = vaddr;
543 buffer->kmap_cnt++;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700544 return vaddr;
545}
Laura Abbottb14ed962012-01-30 14:18:08 -0800546
547static void *ion_handle_kmap_get(struct ion_handle *handle)
548{
549 struct ion_buffer *buffer = handle->buffer;
550 void *vaddr;
551
552 if (handle->kmap_cnt) {
553 handle->kmap_cnt++;
554 return buffer->vaddr;
555 }
556 vaddr = ion_buffer_kmap_get(buffer);
557 if (IS_ERR_OR_NULL(vaddr))
558 return vaddr;
559 handle->kmap_cnt++;
560 return vaddr;
561}
562
563static void ion_buffer_kmap_put(struct ion_buffer *buffer)
564{
565 buffer->kmap_cnt--;
566 if (!buffer->kmap_cnt) {
567 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
568 buffer->vaddr = NULL;
569 }
570}
571
572static void ion_handle_kmap_put(struct ion_handle *handle)
573{
574 struct ion_buffer *buffer = handle->buffer;
575
576 handle->kmap_cnt--;
577 if (!handle->kmap_cnt)
578 ion_buffer_kmap_put(buffer);
579}
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700580
Olav Hauganb3676592012-03-02 15:02:25 -0800581static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700582 int domain_num, int partition_num, unsigned long align,
583 unsigned long iova_length, unsigned long flags,
584 unsigned long *iova)
585{
586 struct ion_iommu_map *data;
587 int ret;
588
589 data = kmalloc(sizeof(*data), GFP_ATOMIC);
590
591 if (!data)
Olav Hauganb3676592012-03-02 15:02:25 -0800592 return ERR_PTR(-ENOMEM);
Laura Abbott8c017362011-09-22 20:59:12 -0700593
594 data->buffer = buffer;
595 iommu_map_domain(data) = domain_num;
596 iommu_map_partition(data) = partition_num;
597
598 ret = buffer->heap->ops->map_iommu(buffer, data,
599 domain_num,
600 partition_num,
601 align,
602 iova_length,
603 flags);
604
605 if (ret)
606 goto out;
607
608 kref_init(&data->ref);
609 *iova = data->iova_addr;
610
611 ion_iommu_add(buffer, data);
612
Olav Hauganb3676592012-03-02 15:02:25 -0800613 return data;
Laura Abbott8c017362011-09-22 20:59:12 -0700614
615out:
Laura Abbott8c017362011-09-22 20:59:12 -0700616 kfree(data);
Olav Hauganb3676592012-03-02 15:02:25 -0800617 return ERR_PTR(ret);
Laura Abbott8c017362011-09-22 20:59:12 -0700618}
619
620int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
621 int domain_num, int partition_num, unsigned long align,
622 unsigned long iova_length, unsigned long *iova,
623 unsigned long *buffer_size,
Olav Hauganb3676592012-03-02 15:02:25 -0800624 unsigned long flags, unsigned long iommu_flags)
Laura Abbott8c017362011-09-22 20:59:12 -0700625{
626 struct ion_buffer *buffer;
627 struct ion_iommu_map *iommu_map;
628 int ret = 0;
629
Olav Haugan79e9ffa2012-02-24 13:11:10 -0800630 if (ION_IS_CACHED(flags)) {
631 pr_err("%s: Cannot map iommu as cached.\n", __func__);
632 return -EINVAL;
633 }
634
Laura Abbott8c017362011-09-22 20:59:12 -0700635 mutex_lock(&client->lock);
636 if (!ion_handle_validate(client, handle)) {
637 pr_err("%s: invalid handle passed to map_kernel.\n",
638 __func__);
639 mutex_unlock(&client->lock);
640 return -EINVAL;
641 }
642
643 buffer = handle->buffer;
644 mutex_lock(&buffer->lock);
645
646 if (!handle->buffer->heap->ops->map_iommu) {
647 pr_err("%s: map_iommu is not implemented by this heap.\n",
648 __func__);
649 ret = -ENODEV;
650 goto out;
651 }
652
Laura Abbott8c017362011-09-22 20:59:12 -0700653 /*
654 * If clients don't want a custom iova length, just use whatever
655 * the buffer size is
656 */
657 if (!iova_length)
658 iova_length = buffer->size;
659
660 if (buffer->size > iova_length) {
661 pr_debug("%s: iova length %lx is not at least buffer size"
662 " %x\n", __func__, iova_length, buffer->size);
663 ret = -EINVAL;
664 goto out;
665 }
666
667 if (buffer->size & ~PAGE_MASK) {
668 pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
669 buffer->size, PAGE_SIZE);
670 ret = -EINVAL;
671 goto out;
672 }
673
674 if (iova_length & ~PAGE_MASK) {
675 pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
676 iova_length, PAGE_SIZE);
677 ret = -EINVAL;
678 goto out;
679 }
680
681 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
Olav Hauganb3676592012-03-02 15:02:25 -0800682 if (!iommu_map) {
683 iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
684 align, iova_length, flags, iova);
Laura Abbottb14ed962012-01-30 14:18:08 -0800685 if (!IS_ERR_OR_NULL(iommu_map)) {
Olav Hauganb3676592012-03-02 15:02:25 -0800686 iommu_map->flags = iommu_flags;
687
688 if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
689 kref_get(&iommu_map->ref);
690 }
Laura Abbott8c017362011-09-22 20:59:12 -0700691 } else {
Olav Hauganb3676592012-03-02 15:02:25 -0800692 if (iommu_map->flags != iommu_flags) {
693 pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
694 __func__, handle,
695 iommu_map->flags, iommu_flags);
Olav Hauganb3676592012-03-02 15:02:25 -0800696 ret = -EINVAL;
697 } else if (iommu_map->mapped_size != iova_length) {
Laura Abbott8c017362011-09-22 20:59:12 -0700698 pr_err("%s: handle %p is already mapped with length"
Olav Hauganb3676592012-03-02 15:02:25 -0800699 " %x, trying to map with length %lx\n",
Laura Abbott8c017362011-09-22 20:59:12 -0700700 __func__, handle, iommu_map->mapped_size,
701 iova_length);
Laura Abbott8c017362011-09-22 20:59:12 -0700702 ret = -EINVAL;
703 } else {
704 kref_get(&iommu_map->ref);
705 *iova = iommu_map->iova_addr;
706 }
707 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800708 if (!ret)
709 buffer->iommu_map_cnt++;
Laura Abbott8c017362011-09-22 20:59:12 -0700710 *buffer_size = buffer->size;
711out:
712 mutex_unlock(&buffer->lock);
713 mutex_unlock(&client->lock);
714 return ret;
715}
716EXPORT_SYMBOL(ion_map_iommu);
717
718static void ion_iommu_release(struct kref *kref)
719{
720 struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
721 ref);
722 struct ion_buffer *buffer = map->buffer;
723
724 rb_erase(&map->node, &buffer->iommu_maps);
725 buffer->heap->ops->unmap_iommu(map);
726 kfree(map);
727}
728
729void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
730 int domain_num, int partition_num)
731{
732 struct ion_iommu_map *iommu_map;
733 struct ion_buffer *buffer;
734
735 mutex_lock(&client->lock);
736 buffer = handle->buffer;
737
738 mutex_lock(&buffer->lock);
739
740 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
741
742 if (!iommu_map) {
743 WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
744 domain_num, partition_num, buffer);
745 goto out;
746 }
747
Laura Abbott8c017362011-09-22 20:59:12 -0700748 kref_put(&iommu_map->ref, ion_iommu_release);
749
Laura Abbottb14ed962012-01-30 14:18:08 -0800750 buffer->iommu_map_cnt--;
Laura Abbott8c017362011-09-22 20:59:12 -0700751out:
752 mutex_unlock(&buffer->lock);
753
754 mutex_unlock(&client->lock);
755
756}
757EXPORT_SYMBOL(ion_unmap_iommu);
758
Mitchel Humpherys456e2682012-09-12 14:42:50 -0700759void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700760{
761 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800762 void *vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700763
764 mutex_lock(&client->lock);
765 if (!ion_handle_validate(client, handle)) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800766 pr_err("%s: invalid handle passed to map_kernel.\n",
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700767 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700768 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700769 return ERR_PTR(-EINVAL);
770 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700771
Laura Abbottb14ed962012-01-30 14:18:08 -0800772 buffer = handle->buffer;
773
774 if (!handle->buffer->heap->ops->map_kernel) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700775 pr_err("%s: map_kernel is not implemented by this heap.\n",
776 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700777 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700778 return ERR_PTR(-ENODEV);
779 }
Laura Abbott894fd582011-08-19 13:33:56 -0700780
Laura Abbottb14ed962012-01-30 14:18:08 -0800781 mutex_lock(&buffer->lock);
782 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700783 mutex_unlock(&buffer->lock);
784 mutex_unlock(&client->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800785 return vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700786}
Olav Hauganbd453a92012-07-05 14:21:34 -0700787EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700788
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700789void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
790{
791 struct ion_buffer *buffer;
792
793 mutex_lock(&client->lock);
794 buffer = handle->buffer;
795 mutex_lock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800796 ion_handle_kmap_put(handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700797 mutex_unlock(&buffer->lock);
798 mutex_unlock(&client->lock);
799}
Olav Hauganbd453a92012-07-05 14:21:34 -0700800EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700801
Olav Haugan41f85792012-02-08 15:28:05 -0800802int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700803 void *uaddr, unsigned long offset, unsigned long len,
804 unsigned int cmd)
805{
806 struct ion_buffer *buffer;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700807 int ret = -EINVAL;
808
809 mutex_lock(&client->lock);
810 if (!ion_handle_validate(client, handle)) {
811 pr_err("%s: invalid handle passed to do_cache_op.\n",
812 __func__);
813 mutex_unlock(&client->lock);
814 return -EINVAL;
815 }
816 buffer = handle->buffer;
817 mutex_lock(&buffer->lock);
818
Laura Abbottcbaa6682011-10-19 12:14:14 -0700819 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbottabcb6f72011-10-04 16:26:49 -0700820 ret = 0;
821 goto out;
822 }
823
824 if (!handle->buffer->heap->ops->cache_op) {
825 pr_err("%s: cache_op is not implemented by this heap.\n",
826 __func__);
827 ret = -ENODEV;
828 goto out;
829 }
830
Laura Abbottabcb6f72011-10-04 16:26:49 -0700831
832 ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
833 offset, len, cmd);
834
835out:
836 mutex_unlock(&buffer->lock);
837 mutex_unlock(&client->lock);
838 return ret;
839
840}
Olav Hauganbd453a92012-07-05 14:21:34 -0700841EXPORT_SYMBOL(ion_do_cache_op);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700842
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700843static int ion_debug_client_show(struct seq_file *s, void *unused)
844{
845 struct ion_client *client = s->private;
846 struct rb_node *n;
Olav Haugan854c9e12012-05-16 16:34:28 -0700847 struct rb_node *n2;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700848
Olav Haugan854c9e12012-05-16 16:34:28 -0700849 seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
850 "heap_name", "size_in_bytes", "handle refcount",
851 "buffer", "physical", "[domain,partition] - virt");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700852
853 mutex_lock(&client->lock);
854 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
855 struct ion_handle *handle = rb_entry(n, struct ion_handle,
856 node);
857 enum ion_heap_type type = handle->buffer->heap->type;
858
Olav Haugan854c9e12012-05-16 16:34:28 -0700859 seq_printf(s, "%16.16s: %16x : %16d : %12p",
Laura Abbott68c80642011-10-21 17:32:27 -0700860 handle->buffer->heap->name,
861 handle->buffer->size,
862 atomic_read(&handle->ref.refcount),
863 handle->buffer);
Olav Haugan854c9e12012-05-16 16:34:28 -0700864
865 if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
866 type == ION_HEAP_TYPE_CARVEOUT ||
Mitchel Humpherysdc4d01d2012-09-13 10:53:22 -0700867 type == (enum ion_heap_type) ION_HEAP_TYPE_CP)
Olav Haugan854c9e12012-05-16 16:34:28 -0700868 seq_printf(s, " : %12lx", handle->buffer->priv_phys);
869 else
870 seq_printf(s, " : %12s", "N/A");
871
872 for (n2 = rb_first(&handle->buffer->iommu_maps); n2;
873 n2 = rb_next(n2)) {
874 struct ion_iommu_map *imap =
875 rb_entry(n2, struct ion_iommu_map, node);
876 seq_printf(s, " : [%d,%d] - %8lx",
877 imap->domain_info[DI_DOMAIN_NUM],
878 imap->domain_info[DI_PARTITION_NUM],
879 imap->iova_addr);
880 }
881 seq_printf(s, "\n");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700882 }
883 mutex_unlock(&client->lock);
884
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700885 return 0;
886}
887
888static int ion_debug_client_open(struct inode *inode, struct file *file)
889{
890 return single_open(file, ion_debug_client_show, inode->i_private);
891}
892
893static const struct file_operations debug_client_fops = {
894 .open = ion_debug_client_open,
895 .read = seq_read,
896 .llseek = seq_lseek,
897 .release = single_release,
898};
899
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700900struct ion_client *ion_client_create(struct ion_device *dev,
901 unsigned int heap_mask,
902 const char *name)
903{
904 struct ion_client *client;
905 struct task_struct *task;
906 struct rb_node **p;
907 struct rb_node *parent = NULL;
908 struct ion_client *entry;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700909 pid_t pid;
Olav Haugane8a31972012-05-16 13:11:41 -0700910 unsigned int name_len;
911
912 if (!name) {
913 pr_err("%s: Name cannot be null\n", __func__);
914 return ERR_PTR(-EINVAL);
915 }
916 name_len = strnlen(name, 64);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700917
918 get_task_struct(current->group_leader);
919 task_lock(current->group_leader);
920 pid = task_pid_nr(current->group_leader);
921 /* don't bother to store task struct for kernel threads,
922 they can't be killed anyway */
923 if (current->group_leader->flags & PF_KTHREAD) {
924 put_task_struct(current->group_leader);
925 task = NULL;
926 } else {
927 task = current->group_leader;
928 }
929 task_unlock(current->group_leader);
930
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700931 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
932 if (!client) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800933 if (task)
934 put_task_struct(current->group_leader);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700935 return ERR_PTR(-ENOMEM);
936 }
937
938 client->dev = dev;
939 client->handles = RB_ROOT;
940 mutex_init(&client->lock);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800941
Olav Haugan6625c7d2012-01-24 13:50:43 -0800942 client->name = kzalloc(name_len+1, GFP_KERNEL);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800943 if (!client->name) {
944 put_task_struct(current->group_leader);
945 kfree(client);
946 return ERR_PTR(-ENOMEM);
947 } else {
Olav Haugan6625c7d2012-01-24 13:50:43 -0800948 strlcpy(client->name, name, name_len+1);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800949 }
950
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700951 client->heap_mask = heap_mask;
952 client->task = task;
953 client->pid = pid;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700954
955 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800956 p = &dev->clients.rb_node;
957 while (*p) {
958 parent = *p;
959 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700960
Laura Abbottb14ed962012-01-30 14:18:08 -0800961 if (client < entry)
962 p = &(*p)->rb_left;
963 else if (client > entry)
964 p = &(*p)->rb_right;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700965 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800966 rb_link_node(&client->node, parent, p);
967 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700968
Laura Abbotteed86032011-12-05 15:32:36 -0800969
970 client->debug_root = debugfs_create_file(name, 0664,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700971 dev->debug_root, client,
972 &debug_client_fops);
973 mutex_unlock(&dev->lock);
974
975 return client;
976}
977
Laura Abbottb14ed962012-01-30 14:18:08 -0800978void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700979{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700980 struct ion_device *dev = client->dev;
981 struct rb_node *n;
982
983 pr_debug("%s: %d\n", __func__, __LINE__);
984 while ((n = rb_first(&client->handles))) {
985 struct ion_handle *handle = rb_entry(n, struct ion_handle,
986 node);
987 ion_handle_destroy(&handle->ref);
988 }
989 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800990 if (client->task)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700991 put_task_struct(client->task);
Laura Abbottb14ed962012-01-30 14:18:08 -0800992 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700993 debugfs_remove_recursive(client->debug_root);
994 mutex_unlock(&dev->lock);
995
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800996 kfree(client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700997 kfree(client);
998}
Olav Hauganbd453a92012-07-05 14:21:34 -0700999EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001000
Laura Abbott273dd8e2011-10-12 14:26:33 -07001001int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
1002 unsigned long *flags)
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001003{
1004 struct ion_buffer *buffer;
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001005
1006 mutex_lock(&client->lock);
1007 if (!ion_handle_validate(client, handle)) {
Laura Abbott273dd8e2011-10-12 14:26:33 -07001008 pr_err("%s: invalid handle passed to %s.\n",
1009 __func__, __func__);
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001010 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001011 return -EINVAL;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001012 }
Laura Abbott273dd8e2011-10-12 14:26:33 -07001013 buffer = handle->buffer;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001014 mutex_lock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001015 *flags = buffer->flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001016 mutex_unlock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001017 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001018
Laura Abbott273dd8e2011-10-12 14:26:33 -07001019 return 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001020}
Laura Abbott273dd8e2011-10-12 14:26:33 -07001021EXPORT_SYMBOL(ion_handle_get_flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001022
Laura Abbott8c017362011-09-22 20:59:12 -07001023int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
1024 unsigned long *size)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001025{
Laura Abbott8c017362011-09-22 20:59:12 -07001026 struct ion_buffer *buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001027
Laura Abbott8c017362011-09-22 20:59:12 -07001028 mutex_lock(&client->lock);
1029 if (!ion_handle_validate(client, handle)) {
1030 pr_err("%s: invalid handle passed to %s.\n",
1031 __func__, __func__);
1032 mutex_unlock(&client->lock);
1033 return -EINVAL;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001034 }
Laura Abbott8c017362011-09-22 20:59:12 -07001035 buffer = handle->buffer;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001036 mutex_lock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001037 *size = buffer->size;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001038 mutex_unlock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001039 mutex_unlock(&client->lock);
1040
1041 return 0;
1042}
1043EXPORT_SYMBOL(ion_handle_get_size);
1044
Laura Abbottb14ed962012-01-30 14:18:08 -08001045struct sg_table *ion_sg_table(struct ion_client *client,
1046 struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001047{
Laura Abbottb14ed962012-01-30 14:18:08 -08001048 struct ion_buffer *buffer;
1049 struct sg_table *table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001050
Laura Abbottb14ed962012-01-30 14:18:08 -08001051 mutex_lock(&client->lock);
1052 if (!ion_handle_validate(client, handle)) {
1053 pr_err("%s: invalid handle passed to map_dma.\n",
1054 __func__);
1055 mutex_unlock(&client->lock);
1056 return ERR_PTR(-EINVAL);
1057 }
1058 buffer = handle->buffer;
1059 table = buffer->sg_table;
1060 mutex_unlock(&client->lock);
1061 return table;
1062}
Olav Hauganbd453a92012-07-05 14:21:34 -07001063EXPORT_SYMBOL(ion_sg_table);
Laura Abbottb14ed962012-01-30 14:18:08 -08001064
1065static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1066 enum dma_data_direction direction)
1067{
1068 struct dma_buf *dmabuf = attachment->dmabuf;
1069 struct ion_buffer *buffer = dmabuf->priv;
1070
1071 return buffer->sg_table;
1072}
1073
1074static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1075 struct sg_table *table,
1076 enum dma_data_direction direction)
1077{
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001078}
1079
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001080static void ion_vma_open(struct vm_area_struct *vma)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001081{
Laura Abbottb14ed962012-01-30 14:18:08 -08001082 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001083
1084 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001085
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001086 mutex_lock(&buffer->lock);
Laura Abbott77168502011-12-05 11:06:24 -08001087 buffer->umap_cnt++;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001088 mutex_unlock(&buffer->lock);
1089}
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001090
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001091static void ion_vma_close(struct vm_area_struct *vma)
1092{
Laura Abbottb14ed962012-01-30 14:18:08 -08001093 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001094
1095 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001096
Laura Abbott77168502011-12-05 11:06:24 -08001097 mutex_lock(&buffer->lock);
1098 buffer->umap_cnt--;
1099 mutex_unlock(&buffer->lock);
Laura Abbotta6835092011-11-14 15:27:02 -08001100
1101 if (buffer->heap->ops->unmap_user)
1102 buffer->heap->ops->unmap_user(buffer->heap, buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001103}
1104
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001105static struct vm_operations_struct ion_vm_ops = {
1106 .open = ion_vma_open,
1107 .close = ion_vma_close,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001108};
1109
Laura Abbottb14ed962012-01-30 14:18:08 -08001110static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001111{
Laura Abbottb14ed962012-01-30 14:18:08 -08001112 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001113 int ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001114
Laura Abbottb14ed962012-01-30 14:18:08 -08001115 if (!buffer->heap->ops->map_user) {
1116 pr_err("%s: this heap does not define a method for mapping "
1117 "to userspace\n", __func__);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001118 return -EINVAL;
1119 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001120
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001121 mutex_lock(&buffer->lock);
1122 /* now map it to userspace */
Laura Abbottb14ed962012-01-30 14:18:08 -08001123 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001124
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001125 if (ret) {
Laura Abbottb14ed962012-01-30 14:18:08 -08001126 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001127 pr_err("%s: failure mapping buffer to userspace\n",
1128 __func__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001129 } else {
1130 buffer->umap_cnt++;
1131 mutex_unlock(&buffer->lock);
1132
1133 vma->vm_ops = &ion_vm_ops;
1134 /*
1135 * move the buffer into the vm_private_data so we can access it
1136 * from vma_open/close
1137 */
1138 vma->vm_private_data = buffer;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001139 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001140 return ret;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001141}
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001142
Laura Abbottb14ed962012-01-30 14:18:08 -08001143static void ion_dma_buf_release(struct dma_buf *dmabuf)
1144{
1145 struct ion_buffer *buffer = dmabuf->priv;
1146 ion_buffer_put(buffer);
1147}
1148
1149static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1150{
1151 struct ion_buffer *buffer = dmabuf->priv;
1152 return buffer->vaddr + offset;
1153}
1154
1155static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1156 void *ptr)
1157{
1158 return;
1159}
1160
1161static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1162 size_t len,
1163 enum dma_data_direction direction)
1164{
1165 struct ion_buffer *buffer = dmabuf->priv;
1166 void *vaddr;
1167
1168 if (!buffer->heap->ops->map_kernel) {
1169 pr_err("%s: map kernel is not implemented by this heap.\n",
1170 __func__);
1171 return -ENODEV;
1172 }
1173
1174 mutex_lock(&buffer->lock);
1175 vaddr = ion_buffer_kmap_get(buffer);
1176 mutex_unlock(&buffer->lock);
1177 if (IS_ERR(vaddr))
1178 return PTR_ERR(vaddr);
1179 if (!vaddr)
1180 return -ENOMEM;
1181 return 0;
1182}
1183
1184static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1185 size_t len,
1186 enum dma_data_direction direction)
1187{
1188 struct ion_buffer *buffer = dmabuf->priv;
1189
1190 mutex_lock(&buffer->lock);
1191 ion_buffer_kmap_put(buffer);
1192 mutex_unlock(&buffer->lock);
1193}
1194
1195struct dma_buf_ops dma_buf_ops = {
1196 .map_dma_buf = ion_map_dma_buf,
1197 .unmap_dma_buf = ion_unmap_dma_buf,
1198 .mmap = ion_mmap,
1199 .release = ion_dma_buf_release,
1200 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1201 .end_cpu_access = ion_dma_buf_end_cpu_access,
1202 .kmap_atomic = ion_dma_buf_kmap,
1203 .kunmap_atomic = ion_dma_buf_kunmap,
1204 .kmap = ion_dma_buf_kmap,
1205 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001206};
1207
Laura Abbottb14ed962012-01-30 14:18:08 -08001208static int ion_share_set_flags(struct ion_client *client,
1209 struct ion_handle *handle,
1210 unsigned long flags)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001211{
Laura Abbottb14ed962012-01-30 14:18:08 -08001212 struct ion_buffer *buffer;
1213 bool valid_handle;
Mitchel Humpherys97e21232012-09-11 15:59:11 -07001214 unsigned long ion_flags = 0;
Laura Abbottb14ed962012-01-30 14:18:08 -08001215 if (flags & O_DSYNC)
Mitchel Humpherys97e21232012-09-11 15:59:11 -07001216 ion_flags = ION_SET_UNCACHED(ion_flags);
1217 else
1218 ion_flags = ION_SET_CACHED(ion_flags);
1219
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001220
Laura Abbottb14ed962012-01-30 14:18:08 -08001221 mutex_lock(&client->lock);
1222 valid_handle = ion_handle_validate(client, handle);
1223 mutex_unlock(&client->lock);
1224 if (!valid_handle) {
1225 WARN(1, "%s: invalid handle passed to set_flags.\n", __func__);
1226 return -EINVAL;
1227 }
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001228
Laura Abbottb14ed962012-01-30 14:18:08 -08001229 buffer = handle->buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001230
Laura Abbottb14ed962012-01-30 14:18:08 -08001231 return 0;
1232}
Laura Abbott4b5d0482011-09-27 18:35:14 -07001233
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001234
Laura Abbottb14ed962012-01-30 14:18:08 -08001235int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
1236{
1237 struct ion_buffer *buffer;
1238 struct dma_buf *dmabuf;
1239 bool valid_handle;
1240 int fd;
1241
1242 mutex_lock(&client->lock);
1243 valid_handle = ion_handle_validate(client, handle);
1244 mutex_unlock(&client->lock);
1245 if (!valid_handle) {
Olav Haugan0df59942012-07-05 14:27:30 -07001246 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001247 return -EINVAL;
1248 }
1249
1250 buffer = handle->buffer;
1251 ion_buffer_get(buffer);
1252 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1253 if (IS_ERR(dmabuf)) {
1254 ion_buffer_put(buffer);
1255 return PTR_ERR(dmabuf);
1256 }
1257 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Ajay Dudani173f6132012-08-01 18:06:18 -07001258 if (fd < 0)
Laura Abbottb14ed962012-01-30 14:18:08 -08001259 dma_buf_put(dmabuf);
Ajay Dudani173f6132012-08-01 18:06:18 -07001260
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001261 return fd;
Laura Abbottb14ed962012-01-30 14:18:08 -08001262}
Olav Hauganbd453a92012-07-05 14:21:34 -07001263EXPORT_SYMBOL(ion_share_dma_buf);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001264
Laura Abbottb14ed962012-01-30 14:18:08 -08001265struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1266{
1267 struct dma_buf *dmabuf;
1268 struct ion_buffer *buffer;
1269 struct ion_handle *handle;
1270
1271 dmabuf = dma_buf_get(fd);
1272 if (IS_ERR_OR_NULL(dmabuf))
1273 return ERR_PTR(PTR_ERR(dmabuf));
1274 /* if this memory came from ion */
1275
1276 if (dmabuf->ops != &dma_buf_ops) {
1277 pr_err("%s: can not import dmabuf from another exporter\n",
1278 __func__);
1279 dma_buf_put(dmabuf);
1280 return ERR_PTR(-EINVAL);
1281 }
1282 buffer = dmabuf->priv;
1283
1284 mutex_lock(&client->lock);
1285 /* if a handle exists for this buffer just take a reference to it */
1286 handle = ion_handle_lookup(client, buffer);
1287 if (!IS_ERR_OR_NULL(handle)) {
1288 ion_handle_get(handle);
1289 goto end;
1290 }
1291 handle = ion_handle_create(client, buffer);
1292 if (IS_ERR_OR_NULL(handle))
1293 goto end;
1294 ion_handle_add(client, handle);
1295end:
1296 mutex_unlock(&client->lock);
1297 dma_buf_put(dmabuf);
1298 return handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001299}
Olav Hauganbd453a92012-07-05 14:21:34 -07001300EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001301
1302static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1303{
1304 struct ion_client *client = filp->private_data;
1305
1306 switch (cmd) {
1307 case ION_IOC_ALLOC:
1308 {
1309 struct ion_allocation_data data;
1310
1311 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1312 return -EFAULT;
1313 data.handle = ion_alloc(client, data.len, data.align,
Hanumant Singh2ac41c92012-08-29 18:39:44 -07001314 data.heap_mask, data.flags);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001315
Laura Abbottb14ed962012-01-30 14:18:08 -08001316 if (IS_ERR(data.handle))
1317 return PTR_ERR(data.handle);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001318
Laura Abbottb14ed962012-01-30 14:18:08 -08001319 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1320 ion_free(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001321 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001322 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001323 break;
1324 }
detulea3929042013-05-05 13:56:50 +02001325 case ION_IOC_ALLOC_COMPAT:
1326 {
1327 struct ion_allocation_data_compat data;
1328
1329 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1330 return -EFAULT;
1331 data.handle = ion_alloc(client, data.len, data.align,
1332 ion_full_heap_mask, 0);
1333
1334 if (IS_ERR(data.handle))
1335 return PTR_ERR(data.handle);
1336
1337 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1338 ion_free(client, data.handle);
1339 return -EFAULT;
1340 }
1341 break;
1342 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001343 case ION_IOC_FREE:
1344 {
1345 struct ion_handle_data data;
1346 bool valid;
1347
1348 if (copy_from_user(&data, (void __user *)arg,
1349 sizeof(struct ion_handle_data)))
1350 return -EFAULT;
1351 mutex_lock(&client->lock);
1352 valid = ion_handle_validate(client, data.handle);
1353 mutex_unlock(&client->lock);
1354 if (!valid)
1355 return -EINVAL;
1356 ion_free(client, data.handle);
1357 break;
1358 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001359 case ION_IOC_MAP:
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001360 case ION_IOC_SHARE:
1361 {
1362 struct ion_fd_data data;
Laura Abbottb14ed962012-01-30 14:18:08 -08001363 int ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001364 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1365 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001366
1367 ret = ion_share_set_flags(client, data.handle, filp->f_flags);
1368 if (ret)
1369 return ret;
1370
1371 data.fd = ion_share_dma_buf(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001372 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1373 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001374 if (data.fd < 0)
1375 return data.fd;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001376 break;
1377 }
1378 case ION_IOC_IMPORT:
detulea3929042013-05-05 13:56:50 +02001379 case ION_IOC_IMPORT_COMPAT:
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001380 {
1381 struct ion_fd_data data;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001382 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001383 if (copy_from_user(&data, (void __user *)arg,
1384 sizeof(struct ion_fd_data)))
1385 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001386 data.handle = ion_import_dma_buf(client, data.fd);
Olav Haugan21ceb8a2012-05-15 14:40:11 -07001387 if (IS_ERR(data.handle)) {
1388 ret = PTR_ERR(data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001389 data.handle = NULL;
Olav Haugan21ceb8a2012-05-15 14:40:11 -07001390 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001391 if (copy_to_user((void __user *)arg, &data,
1392 sizeof(struct ion_fd_data)))
1393 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001394 if (ret < 0)
1395 return ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001396 break;
1397 }
1398 case ION_IOC_CUSTOM:
1399 {
1400 struct ion_device *dev = client->dev;
1401 struct ion_custom_data data;
1402
1403 if (!dev->custom_ioctl)
1404 return -ENOTTY;
1405 if (copy_from_user(&data, (void __user *)arg,
1406 sizeof(struct ion_custom_data)))
1407 return -EFAULT;
1408 return dev->custom_ioctl(client, data.cmd, data.arg);
1409 }
Laura Abbottabcb6f72011-10-04 16:26:49 -07001410 case ION_IOC_CLEAN_CACHES:
detulea3929042013-05-05 13:56:50 +02001411 case ION_IOC_CLEAN_CACHES_COMPAT:
Mitchel Humpherysfd02cfb2012-09-04 17:00:29 -07001412 return client->dev->custom_ioctl(client,
1413 ION_IOC_CLEAN_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001414 case ION_IOC_INV_CACHES:
detulea3929042013-05-05 13:56:50 +02001415 case ION_IOC_INV_CACHES_COMPAT:
Mitchel Humpherysfd02cfb2012-09-04 17:00:29 -07001416 return client->dev->custom_ioctl(client,
1417 ION_IOC_INV_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001418 case ION_IOC_CLEAN_INV_CACHES:
detulea3929042013-05-05 13:56:50 +02001419 case ION_IOC_CLEAN_INV_CACHES_COMPAT:
Mitchel Humpherysfd02cfb2012-09-04 17:00:29 -07001420 return client->dev->custom_ioctl(client,
1421 ION_IOC_CLEAN_INV_CACHES, arg);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001422 case ION_IOC_GET_FLAGS:
detulea3929042013-05-05 13:56:50 +02001423 case ION_IOC_GET_FLAGS_COMPAT:
Mitchel Humpherysfd02cfb2012-09-04 17:00:29 -07001424 return client->dev->custom_ioctl(client,
1425 ION_IOC_GET_FLAGS, arg);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001426 default:
1427 return -ENOTTY;
1428 }
1429 return 0;
1430}
1431
1432static int ion_release(struct inode *inode, struct file *file)
1433{
1434 struct ion_client *client = file->private_data;
1435
1436 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001437 ion_client_destroy(client);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001438 return 0;
1439}
1440
1441static int ion_open(struct inode *inode, struct file *file)
1442{
1443 struct miscdevice *miscdev = file->private_data;
1444 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1445 struct ion_client *client;
Laura Abbotteed86032011-12-05 15:32:36 -08001446 char debug_name[64];
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001447
1448 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbotteed86032011-12-05 15:32:36 -08001449 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1450 client = ion_client_create(dev, -1, debug_name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001451 if (IS_ERR_OR_NULL(client))
1452 return PTR_ERR(client);
1453 file->private_data = client;
1454
1455 return 0;
1456}
1457
1458static const struct file_operations ion_fops = {
1459 .owner = THIS_MODULE,
1460 .open = ion_open,
1461 .release = ion_release,
1462 .unlocked_ioctl = ion_ioctl,
1463};
1464
1465static size_t ion_debug_heap_total(struct ion_client *client,
Laura Abbott3647ac32011-10-31 14:09:53 -07001466 enum ion_heap_ids id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001467{
1468 size_t size = 0;
1469 struct rb_node *n;
1470
1471 mutex_lock(&client->lock);
1472 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1473 struct ion_handle *handle = rb_entry(n,
1474 struct ion_handle,
1475 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001476 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001477 size += handle->buffer->size;
1478 }
1479 mutex_unlock(&client->lock);
1480 return size;
1481}
1482
Olav Haugan0671b9a2012-05-25 11:58:56 -07001483/**
1484 * Searches through a clients handles to find if the buffer is owned
1485 * by this client. Used for debug output.
1486 * @param client pointer to candidate owner of buffer
1487 * @param buf pointer to buffer that we are trying to find the owner of
1488 * @return 1 if found, 0 otherwise
1489 */
1490static int ion_debug_find_buffer_owner(const struct ion_client *client,
1491 const struct ion_buffer *buf)
1492{
1493 struct rb_node *n;
1494
1495 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1496 const struct ion_handle *handle = rb_entry(n,
1497 const struct ion_handle,
1498 node);
1499 if (handle->buffer == buf)
1500 return 1;
1501 }
1502 return 0;
1503}
1504
1505/**
1506 * Adds mem_map_data pointer to the tree of mem_map
1507 * Used for debug output.
1508 * @param mem_map The mem_map tree
1509 * @param data The new data to add to the tree
1510 */
1511static void ion_debug_mem_map_add(struct rb_root *mem_map,
1512 struct mem_map_data *data)
1513{
1514 struct rb_node **p = &mem_map->rb_node;
1515 struct rb_node *parent = NULL;
1516 struct mem_map_data *entry;
1517
1518 while (*p) {
1519 parent = *p;
1520 entry = rb_entry(parent, struct mem_map_data, node);
1521
1522 if (data->addr < entry->addr) {
1523 p = &(*p)->rb_left;
1524 } else if (data->addr > entry->addr) {
1525 p = &(*p)->rb_right;
1526 } else {
1527 pr_err("%s: mem_map_data already found.", __func__);
1528 BUG();
1529 }
1530 }
1531 rb_link_node(&data->node, parent, p);
1532 rb_insert_color(&data->node, mem_map);
1533}
1534
1535/**
1536 * Search for an owner of a buffer by iterating over all ION clients.
1537 * @param dev ion device containing pointers to all the clients.
1538 * @param buffer pointer to buffer we are trying to find the owner of.
1539 * @return name of owner.
1540 */
1541const char *ion_debug_locate_owner(const struct ion_device *dev,
1542 const struct ion_buffer *buffer)
1543{
1544 struct rb_node *j;
1545 const char *client_name = NULL;
1546
Laura Abbottb14ed962012-01-30 14:18:08 -08001547 for (j = rb_first(&dev->clients); j && !client_name;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001548 j = rb_next(j)) {
1549 struct ion_client *client = rb_entry(j, struct ion_client,
1550 node);
1551 if (ion_debug_find_buffer_owner(client, buffer))
1552 client_name = client->name;
1553 }
1554 return client_name;
1555}
1556
1557/**
1558 * Create a mem_map of the heap.
1559 * @param s seq_file to log error message to.
1560 * @param heap The heap to create mem_map for.
1561 * @param mem_map The mem map to be created.
1562 */
1563void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
1564 struct rb_root *mem_map)
1565{
1566 struct ion_device *dev = heap->dev;
1567 struct rb_node *n;
1568
1569 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1570 struct ion_buffer *buffer =
1571 rb_entry(n, struct ion_buffer, node);
1572 if (buffer->heap->id == heap->id) {
1573 struct mem_map_data *data =
1574 kzalloc(sizeof(*data), GFP_KERNEL);
1575 if (!data) {
1576 seq_printf(s, "ERROR: out of memory. "
1577 "Part of memory map will not be logged\n");
1578 break;
1579 }
1580 data->addr = buffer->priv_phys;
1581 data->addr_end = buffer->priv_phys + buffer->size-1;
1582 data->size = buffer->size;
1583 data->client_name = ion_debug_locate_owner(dev, buffer);
1584 ion_debug_mem_map_add(mem_map, data);
1585 }
1586 }
1587}
1588
1589/**
1590 * Free the memory allocated by ion_debug_mem_map_create
1591 * @param mem_map The mem map to free.
1592 */
1593static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
1594{
1595 if (mem_map) {
1596 struct rb_node *n;
1597 while ((n = rb_first(mem_map)) != 0) {
1598 struct mem_map_data *data =
1599 rb_entry(n, struct mem_map_data, node);
1600 rb_erase(&data->node, mem_map);
1601 kfree(data);
1602 }
1603 }
1604}
1605
1606/**
1607 * Print heap debug information.
1608 * @param s seq_file to log message to.
1609 * @param heap pointer to heap that we will print debug information for.
1610 */
1611static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
1612{
1613 if (heap->ops->print_debug) {
1614 struct rb_root mem_map = RB_ROOT;
1615 ion_debug_mem_map_create(s, heap, &mem_map);
1616 heap->ops->print_debug(heap, s, &mem_map);
1617 ion_debug_mem_map_destroy(&mem_map);
1618 }
1619}
1620
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001621static int ion_debug_heap_show(struct seq_file *s, void *unused)
1622{
1623 struct ion_heap *heap = s->private;
1624 struct ion_device *dev = heap->dev;
1625 struct rb_node *n;
1626
Olav Haugane4900b52012-05-25 11:58:03 -07001627 mutex_lock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001628 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001629
Laura Abbottb14ed962012-01-30 14:18:08 -08001630 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001631 struct ion_client *client = rb_entry(n, struct ion_client,
1632 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001633 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001634 if (!size)
1635 continue;
Laura Abbottb14ed962012-01-30 14:18:08 -08001636 if (client->task) {
1637 char task_comm[TASK_COMM_LEN];
1638
1639 get_task_comm(task_comm, client->task);
1640 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1641 client->pid, size);
1642 } else {
1643 seq_printf(s, "%16.s %16u %16u\n", client->name,
1644 client->pid, size);
1645 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001646 }
Olav Haugan0671b9a2012-05-25 11:58:56 -07001647 ion_heap_print_debug(s, heap);
Olav Haugane4900b52012-05-25 11:58:03 -07001648 mutex_unlock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001649 return 0;
1650}
1651
1652static int ion_debug_heap_open(struct inode *inode, struct file *file)
1653{
1654 return single_open(file, ion_debug_heap_show, inode->i_private);
1655}
1656
1657static const struct file_operations debug_heap_fops = {
1658 .open = ion_debug_heap_open,
1659 .read = seq_read,
1660 .llseek = seq_lseek,
1661 .release = single_release,
1662};
1663
1664void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1665{
1666 struct rb_node **p = &dev->heaps.rb_node;
1667 struct rb_node *parent = NULL;
1668 struct ion_heap *entry;
1669
Laura Abbottb14ed962012-01-30 14:18:08 -08001670 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1671 !heap->ops->unmap_dma)
1672 pr_err("%s: can not add heap with invalid ops struct.\n",
1673 __func__);
1674
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001675 heap->dev = dev;
1676 mutex_lock(&dev->lock);
1677 while (*p) {
1678 parent = *p;
1679 entry = rb_entry(parent, struct ion_heap, node);
1680
1681 if (heap->id < entry->id) {
1682 p = &(*p)->rb_left;
1683 } else if (heap->id > entry->id ) {
1684 p = &(*p)->rb_right;
1685 } else {
1686 pr_err("%s: can not insert multiple heaps with "
1687 "id %d\n", __func__, heap->id);
1688 goto end;
1689 }
1690 }
1691
1692 rb_link_node(&heap->node, parent, p);
1693 rb_insert_color(&heap->node, &dev->heaps);
1694 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1695 &debug_heap_fops);
1696end:
1697 mutex_unlock(&dev->lock);
1698}
1699
Laura Abbott7e446482012-06-13 15:59:39 -07001700int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
1701 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001702{
1703 struct rb_node *n;
1704 int ret_val = 0;
1705
1706 /*
1707 * traverse the list of heaps available in this system
1708 * and find the heap that is specified.
1709 */
1710 mutex_lock(&dev->lock);
1711 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
1712 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
Mitchel Humpherysdc4d01d2012-09-13 10:53:22 -07001713 if (heap->type != (enum ion_heap_type) ION_HEAP_TYPE_CP)
Olav Haugan0a852512012-01-09 10:20:55 -08001714 continue;
1715 if (ION_HEAP(heap->id) != heap_id)
1716 continue;
1717 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001718 ret_val = heap->ops->secure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001719 else
1720 ret_val = -EINVAL;
1721 break;
1722 }
1723 mutex_unlock(&dev->lock);
1724 return ret_val;
1725}
Olav Hauganbd453a92012-07-05 14:21:34 -07001726EXPORT_SYMBOL(ion_secure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001727
Laura Abbott7e446482012-06-13 15:59:39 -07001728int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
1729 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001730{
1731 struct rb_node *n;
1732 int ret_val = 0;
1733
1734 /*
1735 * traverse the list of heaps available in this system
1736 * and find the heap that is specified.
1737 */
1738 mutex_lock(&dev->lock);
1739 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
1740 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
Mitchel Humpherysdc4d01d2012-09-13 10:53:22 -07001741 if (heap->type != (enum ion_heap_type) ION_HEAP_TYPE_CP)
Olav Haugan0a852512012-01-09 10:20:55 -08001742 continue;
1743 if (ION_HEAP(heap->id) != heap_id)
1744 continue;
1745 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001746 ret_val = heap->ops->unsecure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001747 else
1748 ret_val = -EINVAL;
1749 break;
1750 }
1751 mutex_unlock(&dev->lock);
1752 return ret_val;
1753}
Olav Hauganbd453a92012-07-05 14:21:34 -07001754EXPORT_SYMBOL(ion_unsecure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001755
Laura Abbott404f8242011-10-31 14:22:53 -07001756static int ion_debug_leak_show(struct seq_file *s, void *unused)
1757{
1758 struct ion_device *dev = s->private;
1759 struct rb_node *n;
1760 struct rb_node *n2;
1761
1762 /* mark all buffers as 1 */
1763 seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
1764 "ref cnt");
1765 mutex_lock(&dev->lock);
1766 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1767 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1768 node);
1769
1770 buf->marked = 1;
1771 }
1772
1773 /* now see which buffers we can access */
Laura Abbottb14ed962012-01-30 14:18:08 -08001774 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Laura Abbott404f8242011-10-31 14:22:53 -07001775 struct ion_client *client = rb_entry(n, struct ion_client,
1776 node);
1777
1778 mutex_lock(&client->lock);
1779 for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
1780 struct ion_handle *handle = rb_entry(n2,
1781 struct ion_handle, node);
1782
1783 handle->buffer->marked = 0;
1784
1785 }
1786 mutex_unlock(&client->lock);
1787
1788 }
1789
Laura Abbott404f8242011-10-31 14:22:53 -07001790 /* And anyone still marked as a 1 means a leaked handle somewhere */
1791 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1792 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1793 node);
1794
1795 if (buf->marked == 1)
1796 seq_printf(s, "%16.x %16.s %16.x %16.d\n",
1797 (int)buf, buf->heap->name, buf->size,
1798 atomic_read(&buf->ref.refcount));
1799 }
1800 mutex_unlock(&dev->lock);
1801 return 0;
1802}
1803
1804static int ion_debug_leak_open(struct inode *inode, struct file *file)
1805{
1806 return single_open(file, ion_debug_leak_show, inode->i_private);
1807}
1808
1809static const struct file_operations debug_leak_fops = {
1810 .open = ion_debug_leak_open,
1811 .read = seq_read,
1812 .llseek = seq_lseek,
1813 .release = single_release,
1814};
1815
1816
1817
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001818struct ion_device *ion_device_create(long (*custom_ioctl)
1819 (struct ion_client *client,
1820 unsigned int cmd,
1821 unsigned long arg))
1822{
1823 struct ion_device *idev;
1824 int ret;
1825
1826 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1827 if (!idev)
1828 return ERR_PTR(-ENOMEM);
1829
1830 idev->dev.minor = MISC_DYNAMIC_MINOR;
1831 idev->dev.name = "ion";
1832 idev->dev.fops = &ion_fops;
1833 idev->dev.parent = NULL;
1834 ret = misc_register(&idev->dev);
1835 if (ret) {
1836 pr_err("ion: failed to register misc device.\n");
1837 return ERR_PTR(ret);
1838 }
1839
1840 idev->debug_root = debugfs_create_dir("ion", NULL);
1841 if (IS_ERR_OR_NULL(idev->debug_root))
1842 pr_err("ion: failed to create debug files.\n");
1843
1844 idev->custom_ioctl = custom_ioctl;
1845 idev->buffers = RB_ROOT;
1846 mutex_init(&idev->lock);
1847 idev->heaps = RB_ROOT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001848 idev->clients = RB_ROOT;
Laura Abbott404f8242011-10-31 14:22:53 -07001849 debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
1850 &debug_leak_fops);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001851 return idev;
1852}
1853
1854void ion_device_destroy(struct ion_device *dev)
1855{
1856 misc_deregister(&dev->dev);
1857 /* XXX need to free the heaps and clients ? */
1858 kfree(dev);
1859}
Laura Abbottb14ed962012-01-30 14:18:08 -08001860
1861void __init ion_reserve(struct ion_platform_data *data)
1862{
1863 int i, ret;
1864
1865 for (i = 0; i < data->nr; i++) {
1866 if (data->heaps[i].size == 0)
1867 continue;
1868 ret = memblock_reserve(data->heaps[i].base,
1869 data->heaps[i].size);
1870 if (ret)
1871 pr_err("memblock reserve of %x@%lx failed\n",
1872 data->heaps[i].size,
1873 data->heaps[i].base);
1874 }
1875}