blob: 4fe1f019334d69a4dcb219b38b232ff18503d0b5 [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Olav Haugan0a852512012-01-09 10:20:55 -08005 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Steve Mucklef132c6c2012-06-06 18:30:57 -070018#include <linux/module.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070019#include <linux/device.h>
20#include <linux/file.h>
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
23#include <linux/ion.h>
24#include <linux/list.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080025#include <linux/memblock.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070026#include <linux/miscdevice.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070027#include <linux/mm.h>
28#include <linux/mm_types.h>
29#include <linux/rbtree.h>
30#include <linux/sched.h>
31#include <linux/slab.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
34#include <linux/debugfs.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080035#include <linux/dma-buf.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070036
Laura Abbott8c017362011-09-22 20:59:12 -070037#include <mach/iommu_domains.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070038#include "ion_priv.h"
39#define DEBUG
40
41/**
42 * struct ion_device - the metadata of the ion device node
43 * @dev: the actual misc device
44 * @buffers: an rb tree of all the existing buffers
45 * @lock: lock protecting the buffers & heaps trees
46 * @heaps: list of all the heaps in the system
47 * @user_clients: list of all the clients created from userspace
48 */
49struct ion_device {
50 struct miscdevice dev;
51 struct rb_root buffers;
52 struct mutex lock;
53 struct rb_root heaps;
54 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
55 unsigned long arg);
Laura Abbottb14ed962012-01-30 14:18:08 -080056 struct rb_root clients;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070057 struct dentry *debug_root;
58};
59
60/**
61 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070062 * @node: node in the tree of all clients
63 * @dev: backpointer to ion device
64 * @handles: an rb tree of all the handles in this client
65 * @lock: lock protecting the tree of handles
66 * @heap_mask: mask of all supported heaps
67 * @name: used for debugging
68 * @task: used for debugging
69 *
70 * A client represents a list of buffers this client may access.
71 * The mutex stored here is used to protect both handles tree
72 * as well as the handles themselves, and should be held while modifying either.
73 */
74struct ion_client {
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070075 struct rb_node node;
76 struct ion_device *dev;
77 struct rb_root handles;
78 struct mutex lock;
79 unsigned int heap_mask;
Olav Haugan63e5f3b2012-01-11 16:42:37 -080080 char *name;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070081 struct task_struct *task;
82 pid_t pid;
83 struct dentry *debug_root;
84};
85
86/**
87 * ion_handle - a client local reference to a buffer
88 * @ref: reference count
89 * @client: back pointer to the client the buffer resides in
90 * @buffer: pointer to the buffer
91 * @node: node in the client's handle rbtree
92 * @kmap_cnt: count of times this client has mapped to kernel
93 * @dmap_cnt: count of times this client has mapped for dma
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070094 *
95 * Modifications to node, map_cnt or mapping should be protected by the
96 * lock in the client. Other fields are never changed after initialization.
97 */
98struct ion_handle {
99 struct kref ref;
100 struct ion_client *client;
101 struct ion_buffer *buffer;
102 struct rb_node node;
103 unsigned int kmap_cnt;
Laura Abbott8c017362011-09-22 20:59:12 -0700104 unsigned int iommu_map_cnt;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700105};
106
Olav Hauganb3676592012-03-02 15:02:25 -0800107static void ion_iommu_release(struct kref *kref);
108
Laura Abbott8c017362011-09-22 20:59:12 -0700109static int ion_validate_buffer_flags(struct ion_buffer *buffer,
110 unsigned long flags)
111{
112 if (buffer->kmap_cnt || buffer->dmap_cnt || buffer->umap_cnt ||
113 buffer->iommu_map_cnt) {
114 if (buffer->flags != flags) {
115 pr_err("%s: buffer was already mapped with flags %lx,"
116 " cannot map with flags %lx\n", __func__,
117 buffer->flags, flags);
118 return 1;
119 }
120
121 } else {
122 buffer->flags = flags;
123 }
124 return 0;
125}
126
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700127/* this function should only be called while dev->lock is held */
128static void ion_buffer_add(struct ion_device *dev,
129 struct ion_buffer *buffer)
130{
131 struct rb_node **p = &dev->buffers.rb_node;
132 struct rb_node *parent = NULL;
133 struct ion_buffer *entry;
134
135 while (*p) {
136 parent = *p;
137 entry = rb_entry(parent, struct ion_buffer, node);
138
139 if (buffer < entry) {
140 p = &(*p)->rb_left;
141 } else if (buffer > entry) {
142 p = &(*p)->rb_right;
143 } else {
144 pr_err("%s: buffer already found.", __func__);
145 BUG();
146 }
147 }
148
149 rb_link_node(&buffer->node, parent, p);
150 rb_insert_color(&buffer->node, &dev->buffers);
151}
152
Olav Haugan0fa9b602012-01-25 11:50:38 -0800153static void ion_iommu_add(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700154 struct ion_iommu_map *iommu)
155{
156 struct rb_node **p = &buffer->iommu_maps.rb_node;
157 struct rb_node *parent = NULL;
158 struct ion_iommu_map *entry;
159
160 while (*p) {
161 parent = *p;
162 entry = rb_entry(parent, struct ion_iommu_map, node);
163
164 if (iommu->key < entry->key) {
165 p = &(*p)->rb_left;
166 } else if (iommu->key > entry->key) {
167 p = &(*p)->rb_right;
168 } else {
169 pr_err("%s: buffer %p already has mapping for domain %d"
170 " and partition %d\n", __func__,
171 buffer,
172 iommu_map_domain(iommu),
173 iommu_map_partition(iommu));
174 BUG();
175 }
176 }
177
178 rb_link_node(&iommu->node, parent, p);
179 rb_insert_color(&iommu->node, &buffer->iommu_maps);
180
181}
182
183static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
184 unsigned int domain_no,
185 unsigned int partition_no)
186{
187 struct rb_node **p = &buffer->iommu_maps.rb_node;
188 struct rb_node *parent = NULL;
189 struct ion_iommu_map *entry;
190 uint64_t key = domain_no;
191 key = key << 32 | partition_no;
192
193 while (*p) {
194 parent = *p;
195 entry = rb_entry(parent, struct ion_iommu_map, node);
196
197 if (key < entry->key)
198 p = &(*p)->rb_left;
199 else if (key > entry->key)
200 p = &(*p)->rb_right;
201 else
202 return entry;
203 }
204
205 return NULL;
206}
207
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700208/* this function should only be called while dev->lock is held */
209static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
210 struct ion_device *dev,
211 unsigned long len,
212 unsigned long align,
213 unsigned long flags)
214{
215 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800216 struct sg_table *table;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700217 int ret;
218
219 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
220 if (!buffer)
221 return ERR_PTR(-ENOMEM);
222
223 buffer->heap = heap;
224 kref_init(&buffer->ref);
225
226 ret = heap->ops->allocate(heap, buffer, len, align, flags);
227 if (ret) {
228 kfree(buffer);
229 return ERR_PTR(ret);
230 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800231
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700232 buffer->dev = dev;
233 buffer->size = len;
Laura Abbottb14ed962012-01-30 14:18:08 -0800234
235 table = buffer->heap->ops->map_dma(buffer->heap, buffer);
236 if (IS_ERR_OR_NULL(table)) {
237 heap->ops->free(buffer);
238 kfree(buffer);
239 return ERR_PTR(PTR_ERR(table));
240 }
241 buffer->sg_table = table;
242
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700243 mutex_init(&buffer->lock);
244 ion_buffer_add(dev, buffer);
245 return buffer;
246}
247
Olav Hauganb3676592012-03-02 15:02:25 -0800248/**
249 * Check for delayed IOMMU unmapping. Also unmap any outstanding
250 * mappings which would otherwise have been leaked.
251 */
252static void ion_iommu_delayed_unmap(struct ion_buffer *buffer)
253{
254 struct ion_iommu_map *iommu_map;
255 struct rb_node *node;
256 const struct rb_root *rb = &(buffer->iommu_maps);
257 unsigned long ref_count;
258 unsigned int delayed_unmap;
259
260 mutex_lock(&buffer->lock);
261
262 while ((node = rb_first(rb)) != 0) {
263 iommu_map = rb_entry(node, struct ion_iommu_map, node);
264 ref_count = atomic_read(&iommu_map->ref.refcount);
265 delayed_unmap = iommu_map->flags & ION_IOMMU_UNMAP_DELAYED;
266
267 if ((delayed_unmap && ref_count > 1) || !delayed_unmap) {
268 pr_err("%s: Virtual memory address leak in domain %u, partition %u\n",
269 __func__, iommu_map->domain_info[DI_DOMAIN_NUM],
270 iommu_map->domain_info[DI_PARTITION_NUM]);
271 }
272 /* set ref count to 1 to force release */
273 kref_init(&iommu_map->ref);
274 kref_put(&iommu_map->ref, ion_iommu_release);
275 }
276
277 mutex_unlock(&buffer->lock);
278}
279
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700280static void ion_buffer_destroy(struct kref *kref)
281{
282 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
283 struct ion_device *dev = buffer->dev;
284
Laura Abbottb14ed962012-01-30 14:18:08 -0800285 if (WARN_ON(buffer->kmap_cnt > 0))
286 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
287
288 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
289
Olav Hauganb3676592012-03-02 15:02:25 -0800290 ion_iommu_delayed_unmap(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700291 buffer->heap->ops->free(buffer);
292 mutex_lock(&dev->lock);
293 rb_erase(&buffer->node, &dev->buffers);
294 mutex_unlock(&dev->lock);
295 kfree(buffer);
296}
297
298static void ion_buffer_get(struct ion_buffer *buffer)
299{
300 kref_get(&buffer->ref);
301}
302
303static int ion_buffer_put(struct ion_buffer *buffer)
304{
305 return kref_put(&buffer->ref, ion_buffer_destroy);
306}
307
308static struct ion_handle *ion_handle_create(struct ion_client *client,
309 struct ion_buffer *buffer)
310{
311 struct ion_handle *handle;
312
313 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
314 if (!handle)
315 return ERR_PTR(-ENOMEM);
316 kref_init(&handle->ref);
317 rb_init_node(&handle->node);
318 handle->client = client;
319 ion_buffer_get(buffer);
320 handle->buffer = buffer;
321
322 return handle;
323}
324
Laura Abbottb14ed962012-01-30 14:18:08 -0800325static void ion_handle_kmap_put(struct ion_handle *);
326
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700327static void ion_handle_destroy(struct kref *kref)
328{
329 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Laura Abbottb14ed962012-01-30 14:18:08 -0800330 struct ion_client *client = handle->client;
331 struct ion_buffer *buffer = handle->buffer;
332
333 mutex_lock(&client->lock);
334
335 mutex_lock(&buffer->lock);
336 while (handle->kmap_cnt)
337 ion_handle_kmap_put(handle);
338 mutex_unlock(&buffer->lock);
339
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700340 if (!RB_EMPTY_NODE(&handle->node))
Laura Abbottb14ed962012-01-30 14:18:08 -0800341 rb_erase(&handle->node, &client->handles);
342 mutex_unlock(&client->lock);
343
344 ion_buffer_put(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700345 kfree(handle);
346}
347
348struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
349{
350 return handle->buffer;
351}
352
353static void ion_handle_get(struct ion_handle *handle)
354{
355 kref_get(&handle->ref);
356}
357
358static int ion_handle_put(struct ion_handle *handle)
359{
360 return kref_put(&handle->ref, ion_handle_destroy);
361}
362
363static struct ion_handle *ion_handle_lookup(struct ion_client *client,
364 struct ion_buffer *buffer)
365{
366 struct rb_node *n;
367
368 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
369 struct ion_handle *handle = rb_entry(n, struct ion_handle,
370 node);
371 if (handle->buffer == buffer)
372 return handle;
373 }
374 return NULL;
375}
376
377static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
378{
379 struct rb_node *n = client->handles.rb_node;
380
381 while (n) {
382 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
383 node);
384 if (handle < handle_node)
385 n = n->rb_left;
386 else if (handle > handle_node)
387 n = n->rb_right;
388 else
389 return true;
390 }
391 return false;
392}
393
394static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
395{
396 struct rb_node **p = &client->handles.rb_node;
397 struct rb_node *parent = NULL;
398 struct ion_handle *entry;
399
400 while (*p) {
401 parent = *p;
402 entry = rb_entry(parent, struct ion_handle, node);
403
404 if (handle < entry)
405 p = &(*p)->rb_left;
406 else if (handle > entry)
407 p = &(*p)->rb_right;
408 else
409 WARN(1, "%s: buffer already found.", __func__);
410 }
411
412 rb_link_node(&handle->node, parent, p);
413 rb_insert_color(&handle->node, &client->handles);
414}
415
416struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
417 size_t align, unsigned int flags)
418{
419 struct rb_node *n;
420 struct ion_handle *handle;
421 struct ion_device *dev = client->dev;
422 struct ion_buffer *buffer = NULL;
Olav Haugan0a852512012-01-09 10:20:55 -0800423 unsigned long secure_allocation = flags & ION_SECURE;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800424 const unsigned int MAX_DBG_STR_LEN = 64;
425 char dbg_str[MAX_DBG_STR_LEN];
426 unsigned int dbg_str_idx = 0;
427
428 dbg_str[0] = '\0';
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700429
430 /*
431 * traverse the list of heaps available in this system in priority
432 * order. If the heap type is supported by the client, and matches the
433 * request of the caller allocate from it. Repeat until allocate has
434 * succeeded or all heaps have been tried
435 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800436 if (WARN_ON(!len))
437 return ERR_PTR(-EINVAL);
438
439 len = PAGE_ALIGN(len);
440
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700441 mutex_lock(&dev->lock);
442 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
443 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
444 /* if the client doesn't support this heap type */
445 if (!((1 << heap->type) & client->heap_mask))
446 continue;
447 /* if the caller didn't specify this heap type */
448 if (!((1 << heap->id) & flags))
449 continue;
Olav Haugan0a852512012-01-09 10:20:55 -0800450 /* Do not allow un-secure heap if secure is specified */
451 if (secure_allocation && (heap->type != ION_HEAP_TYPE_CP))
452 continue;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700453 buffer = ion_buffer_create(heap, dev, len, align, flags);
454 if (!IS_ERR_OR_NULL(buffer))
455 break;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800456 if (dbg_str_idx < MAX_DBG_STR_LEN) {
457 unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
458 int ret_value = snprintf(&dbg_str[dbg_str_idx],
459 len_left, "%s ", heap->name);
460 if (ret_value >= len_left) {
461 /* overflow */
462 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
463 dbg_str_idx = MAX_DBG_STR_LEN;
464 } else if (ret_value >= 0) {
465 dbg_str_idx += ret_value;
466 } else {
467 /* error */
468 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
469 }
470 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700471 }
472 mutex_unlock(&dev->lock);
473
Laura Abbottb14ed962012-01-30 14:18:08 -0800474 if (buffer == NULL)
475 return ERR_PTR(-ENODEV);
476
477 if (IS_ERR(buffer)) {
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800478 pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
479 "0x%x) from heap(s) %sfor client %s with heap "
480 "mask 0x%x\n",
481 len, align, dbg_str, client->name, client->heap_mask);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700482 return ERR_PTR(PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800483 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700484
485 handle = ion_handle_create(client, buffer);
486
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700487 /*
488 * ion_buffer_create will create a buffer with a ref_cnt of 1,
489 * and ion_handle_create will take a second reference, drop one here
490 */
491 ion_buffer_put(buffer);
492
Laura Abbottb14ed962012-01-30 14:18:08 -0800493 if (!IS_ERR(handle)) {
494 mutex_lock(&client->lock);
495 ion_handle_add(client, handle);
496 mutex_unlock(&client->lock);
497 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700498
Laura Abbottb14ed962012-01-30 14:18:08 -0800499
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700500 return handle;
501}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800502EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700503
504void ion_free(struct ion_client *client, struct ion_handle *handle)
505{
506 bool valid_handle;
507
508 BUG_ON(client != handle->client);
509
510 mutex_lock(&client->lock);
511 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700512 if (!valid_handle) {
Laura Abbottec149ff2012-01-26 13:33:11 -0800513 mutex_unlock(&client->lock);
Olav Haugan6ede5672012-04-19 10:20:22 -0700514 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700515 return;
516 }
Laura Abbottec149ff2012-01-26 13:33:11 -0800517 mutex_unlock(&client->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800518 ion_handle_put(handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700519}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800520EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700521
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700522int ion_phys(struct ion_client *client, struct ion_handle *handle,
523 ion_phys_addr_t *addr, size_t *len)
524{
525 struct ion_buffer *buffer;
526 int ret;
527
528 mutex_lock(&client->lock);
529 if (!ion_handle_validate(client, handle)) {
530 mutex_unlock(&client->lock);
531 return -EINVAL;
532 }
533
534 buffer = handle->buffer;
535
536 if (!buffer->heap->ops->phys) {
537 pr_err("%s: ion_phys is not implemented by this heap.\n",
538 __func__);
539 mutex_unlock(&client->lock);
540 return -ENODEV;
541 }
542 mutex_unlock(&client->lock);
543 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
544 return ret;
545}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800546EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700547
Laura Abbottb14ed962012-01-30 14:18:08 -0800548static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700549{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700550 void *vaddr;
551
Laura Abbottb14ed962012-01-30 14:18:08 -0800552 if (buffer->kmap_cnt) {
553 buffer->kmap_cnt++;
554 return buffer->vaddr;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700555 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800556 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
557 if (IS_ERR_OR_NULL(vaddr))
558 return vaddr;
559 buffer->vaddr = vaddr;
560 buffer->kmap_cnt++;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700561 return vaddr;
562}
Laura Abbottb14ed962012-01-30 14:18:08 -0800563
564static void *ion_handle_kmap_get(struct ion_handle *handle)
565{
566 struct ion_buffer *buffer = handle->buffer;
567 void *vaddr;
568
569 if (handle->kmap_cnt) {
570 handle->kmap_cnt++;
571 return buffer->vaddr;
572 }
573 vaddr = ion_buffer_kmap_get(buffer);
574 if (IS_ERR_OR_NULL(vaddr))
575 return vaddr;
576 handle->kmap_cnt++;
577 return vaddr;
578}
579
580static void ion_buffer_kmap_put(struct ion_buffer *buffer)
581{
582 buffer->kmap_cnt--;
583 if (!buffer->kmap_cnt) {
584 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
585 buffer->vaddr = NULL;
586 }
587}
588
589static void ion_handle_kmap_put(struct ion_handle *handle)
590{
591 struct ion_buffer *buffer = handle->buffer;
592
593 handle->kmap_cnt--;
594 if (!handle->kmap_cnt)
595 ion_buffer_kmap_put(buffer);
596}
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700597
Olav Hauganb3676592012-03-02 15:02:25 -0800598static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700599 int domain_num, int partition_num, unsigned long align,
600 unsigned long iova_length, unsigned long flags,
601 unsigned long *iova)
602{
603 struct ion_iommu_map *data;
604 int ret;
605
606 data = kmalloc(sizeof(*data), GFP_ATOMIC);
607
608 if (!data)
Olav Hauganb3676592012-03-02 15:02:25 -0800609 return ERR_PTR(-ENOMEM);
Laura Abbott8c017362011-09-22 20:59:12 -0700610
611 data->buffer = buffer;
612 iommu_map_domain(data) = domain_num;
613 iommu_map_partition(data) = partition_num;
614
615 ret = buffer->heap->ops->map_iommu(buffer, data,
616 domain_num,
617 partition_num,
618 align,
619 iova_length,
620 flags);
621
622 if (ret)
623 goto out;
624
625 kref_init(&data->ref);
626 *iova = data->iova_addr;
627
628 ion_iommu_add(buffer, data);
629
Olav Hauganb3676592012-03-02 15:02:25 -0800630 return data;
Laura Abbott8c017362011-09-22 20:59:12 -0700631
632out:
Laura Abbott8c017362011-09-22 20:59:12 -0700633 kfree(data);
Olav Hauganb3676592012-03-02 15:02:25 -0800634 return ERR_PTR(ret);
Laura Abbott8c017362011-09-22 20:59:12 -0700635}
636
637int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
638 int domain_num, int partition_num, unsigned long align,
639 unsigned long iova_length, unsigned long *iova,
640 unsigned long *buffer_size,
Olav Hauganb3676592012-03-02 15:02:25 -0800641 unsigned long flags, unsigned long iommu_flags)
Laura Abbott8c017362011-09-22 20:59:12 -0700642{
643 struct ion_buffer *buffer;
644 struct ion_iommu_map *iommu_map;
645 int ret = 0;
646
Olav Haugan79e9ffa2012-02-24 13:11:10 -0800647 if (ION_IS_CACHED(flags)) {
648 pr_err("%s: Cannot map iommu as cached.\n", __func__);
649 return -EINVAL;
650 }
651
Laura Abbott8c017362011-09-22 20:59:12 -0700652 mutex_lock(&client->lock);
653 if (!ion_handle_validate(client, handle)) {
654 pr_err("%s: invalid handle passed to map_kernel.\n",
655 __func__);
656 mutex_unlock(&client->lock);
657 return -EINVAL;
658 }
659
660 buffer = handle->buffer;
661 mutex_lock(&buffer->lock);
662
663 if (!handle->buffer->heap->ops->map_iommu) {
664 pr_err("%s: map_iommu is not implemented by this heap.\n",
665 __func__);
666 ret = -ENODEV;
667 goto out;
668 }
669
Laura Abbott8c017362011-09-22 20:59:12 -0700670 /*
671 * If clients don't want a custom iova length, just use whatever
672 * the buffer size is
673 */
674 if (!iova_length)
675 iova_length = buffer->size;
676
677 if (buffer->size > iova_length) {
678 pr_debug("%s: iova length %lx is not at least buffer size"
679 " %x\n", __func__, iova_length, buffer->size);
680 ret = -EINVAL;
681 goto out;
682 }
683
684 if (buffer->size & ~PAGE_MASK) {
685 pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
686 buffer->size, PAGE_SIZE);
687 ret = -EINVAL;
688 goto out;
689 }
690
691 if (iova_length & ~PAGE_MASK) {
692 pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
693 iova_length, PAGE_SIZE);
694 ret = -EINVAL;
695 goto out;
696 }
697
698 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
Olav Hauganb3676592012-03-02 15:02:25 -0800699 if (!iommu_map) {
700 iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
701 align, iova_length, flags, iova);
Laura Abbottb14ed962012-01-30 14:18:08 -0800702 if (!IS_ERR_OR_NULL(iommu_map)) {
Olav Hauganb3676592012-03-02 15:02:25 -0800703 iommu_map->flags = iommu_flags;
704
705 if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
706 kref_get(&iommu_map->ref);
707 }
Laura Abbott8c017362011-09-22 20:59:12 -0700708 } else {
Olav Hauganb3676592012-03-02 15:02:25 -0800709 if (iommu_map->flags != iommu_flags) {
710 pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
711 __func__, handle,
712 iommu_map->flags, iommu_flags);
Olav Hauganb3676592012-03-02 15:02:25 -0800713 ret = -EINVAL;
714 } else if (iommu_map->mapped_size != iova_length) {
Laura Abbott8c017362011-09-22 20:59:12 -0700715 pr_err("%s: handle %p is already mapped with length"
Olav Hauganb3676592012-03-02 15:02:25 -0800716 " %x, trying to map with length %lx\n",
Laura Abbott8c017362011-09-22 20:59:12 -0700717 __func__, handle, iommu_map->mapped_size,
718 iova_length);
Laura Abbott8c017362011-09-22 20:59:12 -0700719 ret = -EINVAL;
720 } else {
721 kref_get(&iommu_map->ref);
722 *iova = iommu_map->iova_addr;
723 }
724 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800725 if (!ret)
726 buffer->iommu_map_cnt++;
Laura Abbott8c017362011-09-22 20:59:12 -0700727 *buffer_size = buffer->size;
728out:
729 mutex_unlock(&buffer->lock);
730 mutex_unlock(&client->lock);
731 return ret;
732}
733EXPORT_SYMBOL(ion_map_iommu);
734
735static void ion_iommu_release(struct kref *kref)
736{
737 struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
738 ref);
739 struct ion_buffer *buffer = map->buffer;
740
741 rb_erase(&map->node, &buffer->iommu_maps);
742 buffer->heap->ops->unmap_iommu(map);
743 kfree(map);
744}
745
746void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
747 int domain_num, int partition_num)
748{
749 struct ion_iommu_map *iommu_map;
750 struct ion_buffer *buffer;
751
752 mutex_lock(&client->lock);
753 buffer = handle->buffer;
754
755 mutex_lock(&buffer->lock);
756
757 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
758
759 if (!iommu_map) {
760 WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
761 domain_num, partition_num, buffer);
762 goto out;
763 }
764
Laura Abbott8c017362011-09-22 20:59:12 -0700765 kref_put(&iommu_map->ref, ion_iommu_release);
766
Laura Abbottb14ed962012-01-30 14:18:08 -0800767 buffer->iommu_map_cnt--;
Laura Abbott8c017362011-09-22 20:59:12 -0700768out:
769 mutex_unlock(&buffer->lock);
770
771 mutex_unlock(&client->lock);
772
773}
774EXPORT_SYMBOL(ion_unmap_iommu);
775
Laura Abbottb14ed962012-01-30 14:18:08 -0800776void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
777 unsigned long flags)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700778{
779 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800780 void *vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700781
782 mutex_lock(&client->lock);
783 if (!ion_handle_validate(client, handle)) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800784 pr_err("%s: invalid handle passed to map_kernel.\n",
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700785 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700786 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700787 return ERR_PTR(-EINVAL);
788 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700789
Laura Abbottb14ed962012-01-30 14:18:08 -0800790 buffer = handle->buffer;
791
792 if (!handle->buffer->heap->ops->map_kernel) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700793 pr_err("%s: map_kernel is not implemented by this heap.\n",
794 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700795 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700796 return ERR_PTR(-ENODEV);
797 }
Laura Abbott894fd582011-08-19 13:33:56 -0700798
Laura Abbott8c017362011-09-22 20:59:12 -0700799 if (ion_validate_buffer_flags(buffer, flags)) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800800 mutex_unlock(&client->lock);
801 return ERR_PTR(-EEXIST);
Laura Abbott894fd582011-08-19 13:33:56 -0700802 }
803
Laura Abbottb14ed962012-01-30 14:18:08 -0800804 mutex_lock(&buffer->lock);
805 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700806 mutex_unlock(&buffer->lock);
807 mutex_unlock(&client->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800808 return vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700809}
Olav Hauganbd453a92012-07-05 14:21:34 -0700810EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700811
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700812void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
813{
814 struct ion_buffer *buffer;
815
816 mutex_lock(&client->lock);
817 buffer = handle->buffer;
818 mutex_lock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800819 ion_handle_kmap_put(handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700820 mutex_unlock(&buffer->lock);
821 mutex_unlock(&client->lock);
822}
Olav Hauganbd453a92012-07-05 14:21:34 -0700823EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700824
Laura Abbottabcb6f72011-10-04 16:26:49 -0700825static int check_vaddr_bounds(unsigned long start, unsigned long end)
826{
827 struct mm_struct *mm = current->active_mm;
828 struct vm_area_struct *vma;
829 int ret = 1;
830
831 if (end < start)
832 goto out;
833
834 down_read(&mm->mmap_sem);
835 vma = find_vma(mm, start);
836 if (vma && vma->vm_start < end) {
837 if (start < vma->vm_start)
838 goto out_up;
839 if (end > vma->vm_end)
840 goto out_up;
841 ret = 0;
842 }
843
844out_up:
845 up_read(&mm->mmap_sem);
846out:
847 return ret;
848}
849
Olav Haugan41f85792012-02-08 15:28:05 -0800850int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700851 void *uaddr, unsigned long offset, unsigned long len,
852 unsigned int cmd)
853{
854 struct ion_buffer *buffer;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700855 int ret = -EINVAL;
856
857 mutex_lock(&client->lock);
858 if (!ion_handle_validate(client, handle)) {
859 pr_err("%s: invalid handle passed to do_cache_op.\n",
860 __func__);
861 mutex_unlock(&client->lock);
862 return -EINVAL;
863 }
864 buffer = handle->buffer;
865 mutex_lock(&buffer->lock);
866
Laura Abbottcbaa6682011-10-19 12:14:14 -0700867 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbottabcb6f72011-10-04 16:26:49 -0700868 ret = 0;
869 goto out;
870 }
871
872 if (!handle->buffer->heap->ops->cache_op) {
873 pr_err("%s: cache_op is not implemented by this heap.\n",
874 __func__);
875 ret = -ENODEV;
876 goto out;
877 }
878
Laura Abbottabcb6f72011-10-04 16:26:49 -0700879
880 ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
881 offset, len, cmd);
882
883out:
884 mutex_unlock(&buffer->lock);
885 mutex_unlock(&client->lock);
886 return ret;
887
888}
Olav Hauganbd453a92012-07-05 14:21:34 -0700889EXPORT_SYMBOL(ion_do_cache_op);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700890
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700891static int ion_debug_client_show(struct seq_file *s, void *unused)
892{
893 struct ion_client *client = s->private;
894 struct rb_node *n;
Olav Haugan854c9e12012-05-16 16:34:28 -0700895 struct rb_node *n2;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700896
Olav Haugan854c9e12012-05-16 16:34:28 -0700897 seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
898 "heap_name", "size_in_bytes", "handle refcount",
899 "buffer", "physical", "[domain,partition] - virt");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700900
901 mutex_lock(&client->lock);
902 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
903 struct ion_handle *handle = rb_entry(n, struct ion_handle,
904 node);
905 enum ion_heap_type type = handle->buffer->heap->type;
906
Olav Haugan854c9e12012-05-16 16:34:28 -0700907 seq_printf(s, "%16.16s: %16x : %16d : %12p",
Laura Abbott68c80642011-10-21 17:32:27 -0700908 handle->buffer->heap->name,
909 handle->buffer->size,
910 atomic_read(&handle->ref.refcount),
911 handle->buffer);
Olav Haugan854c9e12012-05-16 16:34:28 -0700912
913 if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
914 type == ION_HEAP_TYPE_CARVEOUT ||
915 type == ION_HEAP_TYPE_CP)
916 seq_printf(s, " : %12lx", handle->buffer->priv_phys);
917 else
918 seq_printf(s, " : %12s", "N/A");
919
920 for (n2 = rb_first(&handle->buffer->iommu_maps); n2;
921 n2 = rb_next(n2)) {
922 struct ion_iommu_map *imap =
923 rb_entry(n2, struct ion_iommu_map, node);
924 seq_printf(s, " : [%d,%d] - %8lx",
925 imap->domain_info[DI_DOMAIN_NUM],
926 imap->domain_info[DI_PARTITION_NUM],
927 imap->iova_addr);
928 }
929 seq_printf(s, "\n");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700930 }
931 mutex_unlock(&client->lock);
932
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700933 return 0;
934}
935
936static int ion_debug_client_open(struct inode *inode, struct file *file)
937{
938 return single_open(file, ion_debug_client_show, inode->i_private);
939}
940
941static const struct file_operations debug_client_fops = {
942 .open = ion_debug_client_open,
943 .read = seq_read,
944 .llseek = seq_lseek,
945 .release = single_release,
946};
947
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700948struct ion_client *ion_client_create(struct ion_device *dev,
949 unsigned int heap_mask,
950 const char *name)
951{
952 struct ion_client *client;
953 struct task_struct *task;
954 struct rb_node **p;
955 struct rb_node *parent = NULL;
956 struct ion_client *entry;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700957 pid_t pid;
Olav Haugane8a31972012-05-16 13:11:41 -0700958 unsigned int name_len;
959
960 if (!name) {
961 pr_err("%s: Name cannot be null\n", __func__);
962 return ERR_PTR(-EINVAL);
963 }
964 name_len = strnlen(name, 64);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700965
966 get_task_struct(current->group_leader);
967 task_lock(current->group_leader);
968 pid = task_pid_nr(current->group_leader);
969 /* don't bother to store task struct for kernel threads,
970 they can't be killed anyway */
971 if (current->group_leader->flags & PF_KTHREAD) {
972 put_task_struct(current->group_leader);
973 task = NULL;
974 } else {
975 task = current->group_leader;
976 }
977 task_unlock(current->group_leader);
978
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700979 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
980 if (!client) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800981 if (task)
982 put_task_struct(current->group_leader);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700983 return ERR_PTR(-ENOMEM);
984 }
985
986 client->dev = dev;
987 client->handles = RB_ROOT;
988 mutex_init(&client->lock);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800989
Olav Haugan6625c7d2012-01-24 13:50:43 -0800990 client->name = kzalloc(name_len+1, GFP_KERNEL);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800991 if (!client->name) {
992 put_task_struct(current->group_leader);
993 kfree(client);
994 return ERR_PTR(-ENOMEM);
995 } else {
Olav Haugan6625c7d2012-01-24 13:50:43 -0800996 strlcpy(client->name, name, name_len+1);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800997 }
998
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700999 client->heap_mask = heap_mask;
1000 client->task = task;
1001 client->pid = pid;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001002
1003 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001004 p = &dev->clients.rb_node;
1005 while (*p) {
1006 parent = *p;
1007 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001008
Laura Abbottb14ed962012-01-30 14:18:08 -08001009 if (client < entry)
1010 p = &(*p)->rb_left;
1011 else if (client > entry)
1012 p = &(*p)->rb_right;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001013 }
Laura Abbottb14ed962012-01-30 14:18:08 -08001014 rb_link_node(&client->node, parent, p);
1015 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001016
Laura Abbotteed86032011-12-05 15:32:36 -08001017
1018 client->debug_root = debugfs_create_file(name, 0664,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001019 dev->debug_root, client,
1020 &debug_client_fops);
1021 mutex_unlock(&dev->lock);
1022
1023 return client;
1024}
1025
Laura Abbottb14ed962012-01-30 14:18:08 -08001026void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001027{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001028 struct ion_device *dev = client->dev;
1029 struct rb_node *n;
1030
1031 pr_debug("%s: %d\n", __func__, __LINE__);
1032 while ((n = rb_first(&client->handles))) {
1033 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1034 node);
1035 ion_handle_destroy(&handle->ref);
1036 }
1037 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -08001038 if (client->task)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001039 put_task_struct(client->task);
Laura Abbottb14ed962012-01-30 14:18:08 -08001040 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001041 debugfs_remove_recursive(client->debug_root);
1042 mutex_unlock(&dev->lock);
1043
Olav Haugan63e5f3b2012-01-11 16:42:37 -08001044 kfree(client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001045 kfree(client);
1046}
Olav Hauganbd453a92012-07-05 14:21:34 -07001047EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001048
Laura Abbott273dd8e2011-10-12 14:26:33 -07001049int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
1050 unsigned long *flags)
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001051{
1052 struct ion_buffer *buffer;
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001053
1054 mutex_lock(&client->lock);
1055 if (!ion_handle_validate(client, handle)) {
Laura Abbott273dd8e2011-10-12 14:26:33 -07001056 pr_err("%s: invalid handle passed to %s.\n",
1057 __func__, __func__);
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001058 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001059 return -EINVAL;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001060 }
Laura Abbott273dd8e2011-10-12 14:26:33 -07001061 buffer = handle->buffer;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001062 mutex_lock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001063 *flags = buffer->flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001064 mutex_unlock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001065 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001066
Laura Abbott273dd8e2011-10-12 14:26:33 -07001067 return 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001068}
Laura Abbott273dd8e2011-10-12 14:26:33 -07001069EXPORT_SYMBOL(ion_handle_get_flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001070
Laura Abbott8c017362011-09-22 20:59:12 -07001071int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
1072 unsigned long *size)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001073{
Laura Abbott8c017362011-09-22 20:59:12 -07001074 struct ion_buffer *buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001075
Laura Abbott8c017362011-09-22 20:59:12 -07001076 mutex_lock(&client->lock);
1077 if (!ion_handle_validate(client, handle)) {
1078 pr_err("%s: invalid handle passed to %s.\n",
1079 __func__, __func__);
1080 mutex_unlock(&client->lock);
1081 return -EINVAL;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001082 }
Laura Abbott8c017362011-09-22 20:59:12 -07001083 buffer = handle->buffer;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001084 mutex_lock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001085 *size = buffer->size;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001086 mutex_unlock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001087 mutex_unlock(&client->lock);
1088
1089 return 0;
1090}
1091EXPORT_SYMBOL(ion_handle_get_size);
1092
Laura Abbottb14ed962012-01-30 14:18:08 -08001093struct sg_table *ion_sg_table(struct ion_client *client,
1094 struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001095{
Laura Abbottb14ed962012-01-30 14:18:08 -08001096 struct ion_buffer *buffer;
1097 struct sg_table *table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001098
Laura Abbottb14ed962012-01-30 14:18:08 -08001099 mutex_lock(&client->lock);
1100 if (!ion_handle_validate(client, handle)) {
1101 pr_err("%s: invalid handle passed to map_dma.\n",
1102 __func__);
1103 mutex_unlock(&client->lock);
1104 return ERR_PTR(-EINVAL);
1105 }
1106 buffer = handle->buffer;
1107 table = buffer->sg_table;
1108 mutex_unlock(&client->lock);
1109 return table;
1110}
Olav Hauganbd453a92012-07-05 14:21:34 -07001111EXPORT_SYMBOL(ion_sg_table);
Laura Abbottb14ed962012-01-30 14:18:08 -08001112
1113static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1114 enum dma_data_direction direction)
1115{
1116 struct dma_buf *dmabuf = attachment->dmabuf;
1117 struct ion_buffer *buffer = dmabuf->priv;
1118
1119 return buffer->sg_table;
1120}
1121
1122static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1123 struct sg_table *table,
1124 enum dma_data_direction direction)
1125{
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001126}
1127
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001128static void ion_vma_open(struct vm_area_struct *vma)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001129{
Laura Abbottb14ed962012-01-30 14:18:08 -08001130 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001131
1132 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001133
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001134 mutex_lock(&buffer->lock);
Laura Abbott77168502011-12-05 11:06:24 -08001135 buffer->umap_cnt++;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001136 mutex_unlock(&buffer->lock);
1137}
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001138
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001139static void ion_vma_close(struct vm_area_struct *vma)
1140{
Laura Abbottb14ed962012-01-30 14:18:08 -08001141 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001142
1143 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001144
Laura Abbott77168502011-12-05 11:06:24 -08001145 mutex_lock(&buffer->lock);
1146 buffer->umap_cnt--;
1147 mutex_unlock(&buffer->lock);
Laura Abbotta6835092011-11-14 15:27:02 -08001148
1149 if (buffer->heap->ops->unmap_user)
1150 buffer->heap->ops->unmap_user(buffer->heap, buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001151}
1152
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001153static struct vm_operations_struct ion_vm_ops = {
1154 .open = ion_vma_open,
1155 .close = ion_vma_close,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001156};
1157
Laura Abbottb14ed962012-01-30 14:18:08 -08001158static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001159{
Laura Abbottb14ed962012-01-30 14:18:08 -08001160 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001161 int ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001162
Laura Abbottb14ed962012-01-30 14:18:08 -08001163 if (!buffer->heap->ops->map_user) {
1164 pr_err("%s: this heap does not define a method for mapping "
1165 "to userspace\n", __func__);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001166 return -EINVAL;
1167 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001168
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001169 mutex_lock(&buffer->lock);
1170 /* now map it to userspace */
Laura Abbottb14ed962012-01-30 14:18:08 -08001171 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001172
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001173 if (ret) {
Laura Abbottb14ed962012-01-30 14:18:08 -08001174 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001175 pr_err("%s: failure mapping buffer to userspace\n",
1176 __func__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001177 } else {
1178 buffer->umap_cnt++;
1179 mutex_unlock(&buffer->lock);
1180
1181 vma->vm_ops = &ion_vm_ops;
1182 /*
1183 * move the buffer into the vm_private_data so we can access it
1184 * from vma_open/close
1185 */
1186 vma->vm_private_data = buffer;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001187 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001188 return ret;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001189}
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001190
Laura Abbottb14ed962012-01-30 14:18:08 -08001191static void ion_dma_buf_release(struct dma_buf *dmabuf)
1192{
1193 struct ion_buffer *buffer = dmabuf->priv;
1194 ion_buffer_put(buffer);
1195}
1196
1197static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1198{
1199 struct ion_buffer *buffer = dmabuf->priv;
1200 return buffer->vaddr + offset;
1201}
1202
1203static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1204 void *ptr)
1205{
1206 return;
1207}
1208
1209static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1210 size_t len,
1211 enum dma_data_direction direction)
1212{
1213 struct ion_buffer *buffer = dmabuf->priv;
1214 void *vaddr;
1215
1216 if (!buffer->heap->ops->map_kernel) {
1217 pr_err("%s: map kernel is not implemented by this heap.\n",
1218 __func__);
1219 return -ENODEV;
1220 }
1221
1222 mutex_lock(&buffer->lock);
1223 vaddr = ion_buffer_kmap_get(buffer);
1224 mutex_unlock(&buffer->lock);
1225 if (IS_ERR(vaddr))
1226 return PTR_ERR(vaddr);
1227 if (!vaddr)
1228 return -ENOMEM;
1229 return 0;
1230}
1231
1232static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1233 size_t len,
1234 enum dma_data_direction direction)
1235{
1236 struct ion_buffer *buffer = dmabuf->priv;
1237
1238 mutex_lock(&buffer->lock);
1239 ion_buffer_kmap_put(buffer);
1240 mutex_unlock(&buffer->lock);
1241}
1242
1243struct dma_buf_ops dma_buf_ops = {
1244 .map_dma_buf = ion_map_dma_buf,
1245 .unmap_dma_buf = ion_unmap_dma_buf,
1246 .mmap = ion_mmap,
1247 .release = ion_dma_buf_release,
1248 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1249 .end_cpu_access = ion_dma_buf_end_cpu_access,
1250 .kmap_atomic = ion_dma_buf_kmap,
1251 .kunmap_atomic = ion_dma_buf_kunmap,
1252 .kmap = ion_dma_buf_kmap,
1253 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001254};
1255
Laura Abbottb14ed962012-01-30 14:18:08 -08001256static int ion_share_set_flags(struct ion_client *client,
1257 struct ion_handle *handle,
1258 unsigned long flags)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001259{
Laura Abbottb14ed962012-01-30 14:18:08 -08001260 struct ion_buffer *buffer;
1261 bool valid_handle;
1262 unsigned long ion_flags = ION_SET_CACHE(CACHED);
1263 if (flags & O_DSYNC)
1264 ion_flags = ION_SET_CACHE(UNCACHED);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001265
Laura Abbottb14ed962012-01-30 14:18:08 -08001266 mutex_lock(&client->lock);
1267 valid_handle = ion_handle_validate(client, handle);
1268 mutex_unlock(&client->lock);
1269 if (!valid_handle) {
1270 WARN(1, "%s: invalid handle passed to set_flags.\n", __func__);
1271 return -EINVAL;
1272 }
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001273
Laura Abbottb14ed962012-01-30 14:18:08 -08001274 buffer = handle->buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001275
Laura Abbottb14ed962012-01-30 14:18:08 -08001276 mutex_lock(&buffer->lock);
1277 if (ion_validate_buffer_flags(buffer, ion_flags)) {
1278 mutex_unlock(&buffer->lock);
1279 return -EEXIST;
1280 }
1281 mutex_unlock(&buffer->lock);
1282 return 0;
1283}
Laura Abbott4b5d0482011-09-27 18:35:14 -07001284
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001285
Laura Abbottb14ed962012-01-30 14:18:08 -08001286int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
1287{
1288 struct ion_buffer *buffer;
1289 struct dma_buf *dmabuf;
1290 bool valid_handle;
1291 int fd;
1292
1293 mutex_lock(&client->lock);
1294 valid_handle = ion_handle_validate(client, handle);
1295 mutex_unlock(&client->lock);
1296 if (!valid_handle) {
Olav Haugan0df59942012-07-05 14:27:30 -07001297 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001298 return -EINVAL;
1299 }
1300
1301 buffer = handle->buffer;
1302 ion_buffer_get(buffer);
1303 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1304 if (IS_ERR(dmabuf)) {
1305 ion_buffer_put(buffer);
1306 return PTR_ERR(dmabuf);
1307 }
1308 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Ajay Dudani173f6132012-08-01 18:06:18 -07001309 if (fd < 0)
Laura Abbottb14ed962012-01-30 14:18:08 -08001310 dma_buf_put(dmabuf);
Ajay Dudani173f6132012-08-01 18:06:18 -07001311
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001312 return fd;
Laura Abbottb14ed962012-01-30 14:18:08 -08001313}
Olav Hauganbd453a92012-07-05 14:21:34 -07001314EXPORT_SYMBOL(ion_share_dma_buf);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001315
Laura Abbottb14ed962012-01-30 14:18:08 -08001316struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1317{
1318 struct dma_buf *dmabuf;
1319 struct ion_buffer *buffer;
1320 struct ion_handle *handle;
1321
1322 dmabuf = dma_buf_get(fd);
1323 if (IS_ERR_OR_NULL(dmabuf))
1324 return ERR_PTR(PTR_ERR(dmabuf));
1325 /* if this memory came from ion */
1326
1327 if (dmabuf->ops != &dma_buf_ops) {
1328 pr_err("%s: can not import dmabuf from another exporter\n",
1329 __func__);
1330 dma_buf_put(dmabuf);
1331 return ERR_PTR(-EINVAL);
1332 }
1333 buffer = dmabuf->priv;
1334
1335 mutex_lock(&client->lock);
1336 /* if a handle exists for this buffer just take a reference to it */
1337 handle = ion_handle_lookup(client, buffer);
1338 if (!IS_ERR_OR_NULL(handle)) {
1339 ion_handle_get(handle);
1340 goto end;
1341 }
1342 handle = ion_handle_create(client, buffer);
1343 if (IS_ERR_OR_NULL(handle))
1344 goto end;
1345 ion_handle_add(client, handle);
1346end:
1347 mutex_unlock(&client->lock);
1348 dma_buf_put(dmabuf);
1349 return handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001350}
Olav Hauganbd453a92012-07-05 14:21:34 -07001351EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001352
1353static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1354{
1355 struct ion_client *client = filp->private_data;
1356
1357 switch (cmd) {
1358 case ION_IOC_ALLOC:
1359 {
1360 struct ion_allocation_data data;
1361
1362 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1363 return -EFAULT;
1364 data.handle = ion_alloc(client, data.len, data.align,
1365 data.flags);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001366
Laura Abbottb14ed962012-01-30 14:18:08 -08001367 if (IS_ERR(data.handle))
1368 return PTR_ERR(data.handle);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001369
Laura Abbottb14ed962012-01-30 14:18:08 -08001370 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1371 ion_free(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001372 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001373 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001374 break;
1375 }
1376 case ION_IOC_FREE:
1377 {
1378 struct ion_handle_data data;
1379 bool valid;
1380
1381 if (copy_from_user(&data, (void __user *)arg,
1382 sizeof(struct ion_handle_data)))
1383 return -EFAULT;
1384 mutex_lock(&client->lock);
1385 valid = ion_handle_validate(client, data.handle);
1386 mutex_unlock(&client->lock);
1387 if (!valid)
1388 return -EINVAL;
1389 ion_free(client, data.handle);
1390 break;
1391 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001392 case ION_IOC_MAP:
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001393 case ION_IOC_SHARE:
1394 {
1395 struct ion_fd_data data;
Laura Abbottb14ed962012-01-30 14:18:08 -08001396 int ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001397 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1398 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001399
1400 ret = ion_share_set_flags(client, data.handle, filp->f_flags);
1401 if (ret)
1402 return ret;
1403
1404 data.fd = ion_share_dma_buf(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001405 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1406 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001407 if (data.fd < 0)
1408 return data.fd;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001409 break;
1410 }
1411 case ION_IOC_IMPORT:
1412 {
1413 struct ion_fd_data data;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001414 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001415 if (copy_from_user(&data, (void __user *)arg,
1416 sizeof(struct ion_fd_data)))
1417 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001418 data.handle = ion_import_dma_buf(client, data.fd);
1419 if (IS_ERR(data.handle))
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001420 data.handle = NULL;
1421 if (copy_to_user((void __user *)arg, &data,
1422 sizeof(struct ion_fd_data)))
1423 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001424 if (ret < 0)
1425 return ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001426 break;
1427 }
1428 case ION_IOC_CUSTOM:
1429 {
1430 struct ion_device *dev = client->dev;
1431 struct ion_custom_data data;
1432
1433 if (!dev->custom_ioctl)
1434 return -ENOTTY;
1435 if (copy_from_user(&data, (void __user *)arg,
1436 sizeof(struct ion_custom_data)))
1437 return -EFAULT;
1438 return dev->custom_ioctl(client, data.cmd, data.arg);
1439 }
Laura Abbottabcb6f72011-10-04 16:26:49 -07001440 case ION_IOC_CLEAN_CACHES:
1441 case ION_IOC_INV_CACHES:
1442 case ION_IOC_CLEAN_INV_CACHES:
1443 {
1444 struct ion_flush_data data;
Laura Abbott9fa29e82011-11-14 09:42:53 -08001445 unsigned long start, end;
Laura Abbotte80ea012011-11-18 18:36:47 -08001446 struct ion_handle *handle = NULL;
1447 int ret;
Laura Abbottabcb6f72011-10-04 16:26:49 -07001448
1449 if (copy_from_user(&data, (void __user *)arg,
1450 sizeof(struct ion_flush_data)))
1451 return -EFAULT;
1452
Laura Abbott9fa29e82011-11-14 09:42:53 -08001453 start = (unsigned long) data.vaddr;
1454 end = (unsigned long) data.vaddr + data.length;
1455
1456 if (check_vaddr_bounds(start, end)) {
1457 pr_err("%s: virtual address %p is out of bounds\n",
1458 __func__, data.vaddr);
1459 return -EINVAL;
1460 }
1461
Laura Abbotte80ea012011-11-18 18:36:47 -08001462 if (!data.handle) {
Laura Abbottb14ed962012-01-30 14:18:08 -08001463 handle = ion_import_dma_buf(client, data.fd);
1464 if (IS_ERR(handle)) {
Laura Abbotte80ea012011-11-18 18:36:47 -08001465 pr_info("%s: Could not import handle: %d\n",
1466 __func__, (int)handle);
1467 return -EINVAL;
1468 }
1469 }
1470
1471 ret = ion_do_cache_op(client,
1472 data.handle ? data.handle : handle,
1473 data.vaddr, data.offset, data.length,
1474 cmd);
1475
1476 if (!data.handle)
1477 ion_free(client, handle);
1478
Olav Haugand7baec02012-05-15 14:38:09 -07001479 if (ret < 0)
1480 return ret;
Laura Abbotte80ea012011-11-18 18:36:47 -08001481 break;
Laura Abbottabcb6f72011-10-04 16:26:49 -07001482
1483 }
Laura Abbott273dd8e2011-10-12 14:26:33 -07001484 case ION_IOC_GET_FLAGS:
1485 {
1486 struct ion_flag_data data;
1487 int ret;
1488 if (copy_from_user(&data, (void __user *)arg,
1489 sizeof(struct ion_flag_data)))
1490 return -EFAULT;
1491
1492 ret = ion_handle_get_flags(client, data.handle, &data.flags);
1493 if (ret < 0)
1494 return ret;
1495 if (copy_to_user((void __user *)arg, &data,
1496 sizeof(struct ion_flag_data)))
1497 return -EFAULT;
1498 break;
1499 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001500 default:
1501 return -ENOTTY;
1502 }
1503 return 0;
1504}
1505
1506static int ion_release(struct inode *inode, struct file *file)
1507{
1508 struct ion_client *client = file->private_data;
1509
1510 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001511 ion_client_destroy(client);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001512 return 0;
1513}
1514
1515static int ion_open(struct inode *inode, struct file *file)
1516{
1517 struct miscdevice *miscdev = file->private_data;
1518 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1519 struct ion_client *client;
Laura Abbotteed86032011-12-05 15:32:36 -08001520 char debug_name[64];
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001521
1522 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbotteed86032011-12-05 15:32:36 -08001523 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1524 client = ion_client_create(dev, -1, debug_name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001525 if (IS_ERR_OR_NULL(client))
1526 return PTR_ERR(client);
1527 file->private_data = client;
1528
1529 return 0;
1530}
1531
1532static const struct file_operations ion_fops = {
1533 .owner = THIS_MODULE,
1534 .open = ion_open,
1535 .release = ion_release,
1536 .unlocked_ioctl = ion_ioctl,
1537};
1538
1539static size_t ion_debug_heap_total(struct ion_client *client,
Laura Abbott3647ac32011-10-31 14:09:53 -07001540 enum ion_heap_ids id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001541{
1542 size_t size = 0;
1543 struct rb_node *n;
1544
1545 mutex_lock(&client->lock);
1546 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1547 struct ion_handle *handle = rb_entry(n,
1548 struct ion_handle,
1549 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001550 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001551 size += handle->buffer->size;
1552 }
1553 mutex_unlock(&client->lock);
1554 return size;
1555}
1556
Olav Haugan0671b9a2012-05-25 11:58:56 -07001557/**
1558 * Searches through a clients handles to find if the buffer is owned
1559 * by this client. Used for debug output.
1560 * @param client pointer to candidate owner of buffer
1561 * @param buf pointer to buffer that we are trying to find the owner of
1562 * @return 1 if found, 0 otherwise
1563 */
1564static int ion_debug_find_buffer_owner(const struct ion_client *client,
1565 const struct ion_buffer *buf)
1566{
1567 struct rb_node *n;
1568
1569 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1570 const struct ion_handle *handle = rb_entry(n,
1571 const struct ion_handle,
1572 node);
1573 if (handle->buffer == buf)
1574 return 1;
1575 }
1576 return 0;
1577}
1578
1579/**
1580 * Adds mem_map_data pointer to the tree of mem_map
1581 * Used for debug output.
1582 * @param mem_map The mem_map tree
1583 * @param data The new data to add to the tree
1584 */
1585static void ion_debug_mem_map_add(struct rb_root *mem_map,
1586 struct mem_map_data *data)
1587{
1588 struct rb_node **p = &mem_map->rb_node;
1589 struct rb_node *parent = NULL;
1590 struct mem_map_data *entry;
1591
1592 while (*p) {
1593 parent = *p;
1594 entry = rb_entry(parent, struct mem_map_data, node);
1595
1596 if (data->addr < entry->addr) {
1597 p = &(*p)->rb_left;
1598 } else if (data->addr > entry->addr) {
1599 p = &(*p)->rb_right;
1600 } else {
1601 pr_err("%s: mem_map_data already found.", __func__);
1602 BUG();
1603 }
1604 }
1605 rb_link_node(&data->node, parent, p);
1606 rb_insert_color(&data->node, mem_map);
1607}
1608
1609/**
1610 * Search for an owner of a buffer by iterating over all ION clients.
1611 * @param dev ion device containing pointers to all the clients.
1612 * @param buffer pointer to buffer we are trying to find the owner of.
1613 * @return name of owner.
1614 */
1615const char *ion_debug_locate_owner(const struct ion_device *dev,
1616 const struct ion_buffer *buffer)
1617{
1618 struct rb_node *j;
1619 const char *client_name = NULL;
1620
Laura Abbottb14ed962012-01-30 14:18:08 -08001621 for (j = rb_first(&dev->clients); j && !client_name;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001622 j = rb_next(j)) {
1623 struct ion_client *client = rb_entry(j, struct ion_client,
1624 node);
1625 if (ion_debug_find_buffer_owner(client, buffer))
1626 client_name = client->name;
1627 }
1628 return client_name;
1629}
1630
1631/**
1632 * Create a mem_map of the heap.
1633 * @param s seq_file to log error message to.
1634 * @param heap The heap to create mem_map for.
1635 * @param mem_map The mem map to be created.
1636 */
1637void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
1638 struct rb_root *mem_map)
1639{
1640 struct ion_device *dev = heap->dev;
1641 struct rb_node *n;
1642
1643 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1644 struct ion_buffer *buffer =
1645 rb_entry(n, struct ion_buffer, node);
1646 if (buffer->heap->id == heap->id) {
1647 struct mem_map_data *data =
1648 kzalloc(sizeof(*data), GFP_KERNEL);
1649 if (!data) {
1650 seq_printf(s, "ERROR: out of memory. "
1651 "Part of memory map will not be logged\n");
1652 break;
1653 }
1654 data->addr = buffer->priv_phys;
1655 data->addr_end = buffer->priv_phys + buffer->size-1;
1656 data->size = buffer->size;
1657 data->client_name = ion_debug_locate_owner(dev, buffer);
1658 ion_debug_mem_map_add(mem_map, data);
1659 }
1660 }
1661}
1662
1663/**
1664 * Free the memory allocated by ion_debug_mem_map_create
1665 * @param mem_map The mem map to free.
1666 */
1667static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
1668{
1669 if (mem_map) {
1670 struct rb_node *n;
1671 while ((n = rb_first(mem_map)) != 0) {
1672 struct mem_map_data *data =
1673 rb_entry(n, struct mem_map_data, node);
1674 rb_erase(&data->node, mem_map);
1675 kfree(data);
1676 }
1677 }
1678}
1679
1680/**
1681 * Print heap debug information.
1682 * @param s seq_file to log message to.
1683 * @param heap pointer to heap that we will print debug information for.
1684 */
1685static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
1686{
1687 if (heap->ops->print_debug) {
1688 struct rb_root mem_map = RB_ROOT;
1689 ion_debug_mem_map_create(s, heap, &mem_map);
1690 heap->ops->print_debug(heap, s, &mem_map);
1691 ion_debug_mem_map_destroy(&mem_map);
1692 }
1693}
1694
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001695static int ion_debug_heap_show(struct seq_file *s, void *unused)
1696{
1697 struct ion_heap *heap = s->private;
1698 struct ion_device *dev = heap->dev;
1699 struct rb_node *n;
1700
Olav Haugane4900b52012-05-25 11:58:03 -07001701 mutex_lock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001702 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001703
Laura Abbottb14ed962012-01-30 14:18:08 -08001704 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001705 struct ion_client *client = rb_entry(n, struct ion_client,
1706 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001707 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001708 if (!size)
1709 continue;
Laura Abbottb14ed962012-01-30 14:18:08 -08001710 if (client->task) {
1711 char task_comm[TASK_COMM_LEN];
1712
1713 get_task_comm(task_comm, client->task);
1714 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1715 client->pid, size);
1716 } else {
1717 seq_printf(s, "%16.s %16u %16u\n", client->name,
1718 client->pid, size);
1719 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001720 }
Olav Haugan0671b9a2012-05-25 11:58:56 -07001721 ion_heap_print_debug(s, heap);
Olav Haugane4900b52012-05-25 11:58:03 -07001722 mutex_unlock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001723 return 0;
1724}
1725
1726static int ion_debug_heap_open(struct inode *inode, struct file *file)
1727{
1728 return single_open(file, ion_debug_heap_show, inode->i_private);
1729}
1730
1731static const struct file_operations debug_heap_fops = {
1732 .open = ion_debug_heap_open,
1733 .read = seq_read,
1734 .llseek = seq_lseek,
1735 .release = single_release,
1736};
1737
1738void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1739{
1740 struct rb_node **p = &dev->heaps.rb_node;
1741 struct rb_node *parent = NULL;
1742 struct ion_heap *entry;
1743
Laura Abbottb14ed962012-01-30 14:18:08 -08001744 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1745 !heap->ops->unmap_dma)
1746 pr_err("%s: can not add heap with invalid ops struct.\n",
1747 __func__);
1748
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001749 heap->dev = dev;
1750 mutex_lock(&dev->lock);
1751 while (*p) {
1752 parent = *p;
1753 entry = rb_entry(parent, struct ion_heap, node);
1754
1755 if (heap->id < entry->id) {
1756 p = &(*p)->rb_left;
1757 } else if (heap->id > entry->id ) {
1758 p = &(*p)->rb_right;
1759 } else {
1760 pr_err("%s: can not insert multiple heaps with "
1761 "id %d\n", __func__, heap->id);
1762 goto end;
1763 }
1764 }
1765
1766 rb_link_node(&heap->node, parent, p);
1767 rb_insert_color(&heap->node, &dev->heaps);
1768 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1769 &debug_heap_fops);
1770end:
1771 mutex_unlock(&dev->lock);
1772}
1773
Laura Abbott7e446482012-06-13 15:59:39 -07001774int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
1775 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001776{
1777 struct rb_node *n;
1778 int ret_val = 0;
1779
1780 /*
1781 * traverse the list of heaps available in this system
1782 * and find the heap that is specified.
1783 */
1784 mutex_lock(&dev->lock);
1785 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
1786 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
1787 if (heap->type != ION_HEAP_TYPE_CP)
1788 continue;
1789 if (ION_HEAP(heap->id) != heap_id)
1790 continue;
1791 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001792 ret_val = heap->ops->secure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001793 else
1794 ret_val = -EINVAL;
1795 break;
1796 }
1797 mutex_unlock(&dev->lock);
1798 return ret_val;
1799}
Olav Hauganbd453a92012-07-05 14:21:34 -07001800EXPORT_SYMBOL(ion_secure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001801
Laura Abbott7e446482012-06-13 15:59:39 -07001802int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
1803 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001804{
1805 struct rb_node *n;
1806 int ret_val = 0;
1807
1808 /*
1809 * traverse the list of heaps available in this system
1810 * and find the heap that is specified.
1811 */
1812 mutex_lock(&dev->lock);
1813 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
1814 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
1815 if (heap->type != ION_HEAP_TYPE_CP)
1816 continue;
1817 if (ION_HEAP(heap->id) != heap_id)
1818 continue;
1819 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001820 ret_val = heap->ops->unsecure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001821 else
1822 ret_val = -EINVAL;
1823 break;
1824 }
1825 mutex_unlock(&dev->lock);
1826 return ret_val;
1827}
Olav Hauganbd453a92012-07-05 14:21:34 -07001828EXPORT_SYMBOL(ion_unsecure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001829
Laura Abbott404f8242011-10-31 14:22:53 -07001830static int ion_debug_leak_show(struct seq_file *s, void *unused)
1831{
1832 struct ion_device *dev = s->private;
1833 struct rb_node *n;
1834 struct rb_node *n2;
1835
1836 /* mark all buffers as 1 */
1837 seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
1838 "ref cnt");
1839 mutex_lock(&dev->lock);
1840 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1841 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1842 node);
1843
1844 buf->marked = 1;
1845 }
1846
1847 /* now see which buffers we can access */
Laura Abbottb14ed962012-01-30 14:18:08 -08001848 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Laura Abbott404f8242011-10-31 14:22:53 -07001849 struct ion_client *client = rb_entry(n, struct ion_client,
1850 node);
1851
1852 mutex_lock(&client->lock);
1853 for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
1854 struct ion_handle *handle = rb_entry(n2,
1855 struct ion_handle, node);
1856
1857 handle->buffer->marked = 0;
1858
1859 }
1860 mutex_unlock(&client->lock);
1861
1862 }
1863
Laura Abbott404f8242011-10-31 14:22:53 -07001864 /* And anyone still marked as a 1 means a leaked handle somewhere */
1865 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1866 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1867 node);
1868
1869 if (buf->marked == 1)
1870 seq_printf(s, "%16.x %16.s %16.x %16.d\n",
1871 (int)buf, buf->heap->name, buf->size,
1872 atomic_read(&buf->ref.refcount));
1873 }
1874 mutex_unlock(&dev->lock);
1875 return 0;
1876}
1877
1878static int ion_debug_leak_open(struct inode *inode, struct file *file)
1879{
1880 return single_open(file, ion_debug_leak_show, inode->i_private);
1881}
1882
1883static const struct file_operations debug_leak_fops = {
1884 .open = ion_debug_leak_open,
1885 .read = seq_read,
1886 .llseek = seq_lseek,
1887 .release = single_release,
1888};
1889
1890
1891
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001892struct ion_device *ion_device_create(long (*custom_ioctl)
1893 (struct ion_client *client,
1894 unsigned int cmd,
1895 unsigned long arg))
1896{
1897 struct ion_device *idev;
1898 int ret;
1899
1900 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1901 if (!idev)
1902 return ERR_PTR(-ENOMEM);
1903
1904 idev->dev.minor = MISC_DYNAMIC_MINOR;
1905 idev->dev.name = "ion";
1906 idev->dev.fops = &ion_fops;
1907 idev->dev.parent = NULL;
1908 ret = misc_register(&idev->dev);
1909 if (ret) {
1910 pr_err("ion: failed to register misc device.\n");
1911 return ERR_PTR(ret);
1912 }
1913
1914 idev->debug_root = debugfs_create_dir("ion", NULL);
1915 if (IS_ERR_OR_NULL(idev->debug_root))
1916 pr_err("ion: failed to create debug files.\n");
1917
1918 idev->custom_ioctl = custom_ioctl;
1919 idev->buffers = RB_ROOT;
1920 mutex_init(&idev->lock);
1921 idev->heaps = RB_ROOT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001922 idev->clients = RB_ROOT;
Laura Abbott404f8242011-10-31 14:22:53 -07001923 debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
1924 &debug_leak_fops);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001925 return idev;
1926}
1927
1928void ion_device_destroy(struct ion_device *dev)
1929{
1930 misc_deregister(&dev->dev);
1931 /* XXX need to free the heaps and clients ? */
1932 kfree(dev);
1933}
Laura Abbottb14ed962012-01-30 14:18:08 -08001934
1935void __init ion_reserve(struct ion_platform_data *data)
1936{
1937 int i, ret;
1938
1939 for (i = 0; i < data->nr; i++) {
1940 if (data->heaps[i].size == 0)
1941 continue;
1942 ret = memblock_reserve(data->heaps[i].base,
1943 data->heaps[i].size);
1944 if (ret)
1945 pr_err("memblock reserve of %x@%lx failed\n",
1946 data->heaps[i].size,
1947 data->heaps[i].base);
1948 }
1949}