blob: 80b6a6956c9f3ffdf21d2bce673a8927c553356f [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Olav Haugan0a852512012-01-09 10:20:55 -08005 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Steve Mucklef132c6c2012-06-06 18:30:57 -070018#include <linux/module.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070019#include <linux/device.h>
20#include <linux/file.h>
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
23#include <linux/ion.h>
24#include <linux/list.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080025#include <linux/memblock.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070026#include <linux/miscdevice.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070027#include <linux/mm.h>
28#include <linux/mm_types.h>
29#include <linux/rbtree.h>
30#include <linux/sched.h>
31#include <linux/slab.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
34#include <linux/debugfs.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080035#include <linux/dma-buf.h>
Mitchel Humpherysfd02cfb2012-09-04 17:00:29 -070036#include <linux/msm_ion.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070037
Laura Abbott8c017362011-09-22 20:59:12 -070038#include <mach/iommu_domains.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070039#include "ion_priv.h"
40#define DEBUG
41
42/**
43 * struct ion_device - the metadata of the ion device node
44 * @dev: the actual misc device
45 * @buffers: an rb tree of all the existing buffers
46 * @lock: lock protecting the buffers & heaps trees
47 * @heaps: list of all the heaps in the system
48 * @user_clients: list of all the clients created from userspace
49 */
50struct ion_device {
51 struct miscdevice dev;
52 struct rb_root buffers;
53 struct mutex lock;
54 struct rb_root heaps;
55 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
56 unsigned long arg);
Laura Abbottb14ed962012-01-30 14:18:08 -080057 struct rb_root clients;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070058 struct dentry *debug_root;
59};
60
61/**
62 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070063 * @node: node in the tree of all clients
64 * @dev: backpointer to ion device
65 * @handles: an rb tree of all the handles in this client
66 * @lock: lock protecting the tree of handles
67 * @heap_mask: mask of all supported heaps
68 * @name: used for debugging
69 * @task: used for debugging
70 *
71 * A client represents a list of buffers this client may access.
72 * The mutex stored here is used to protect both handles tree
73 * as well as the handles themselves, and should be held while modifying either.
74 */
75struct ion_client {
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070076 struct rb_node node;
77 struct ion_device *dev;
78 struct rb_root handles;
79 struct mutex lock;
80 unsigned int heap_mask;
Olav Haugan63e5f3b2012-01-11 16:42:37 -080081 char *name;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070082 struct task_struct *task;
83 pid_t pid;
84 struct dentry *debug_root;
85};
86
87/**
88 * ion_handle - a client local reference to a buffer
89 * @ref: reference count
90 * @client: back pointer to the client the buffer resides in
91 * @buffer: pointer to the buffer
92 * @node: node in the client's handle rbtree
93 * @kmap_cnt: count of times this client has mapped to kernel
94 * @dmap_cnt: count of times this client has mapped for dma
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070095 *
96 * Modifications to node, map_cnt or mapping should be protected by the
97 * lock in the client. Other fields are never changed after initialization.
98 */
99struct ion_handle {
100 struct kref ref;
101 struct ion_client *client;
102 struct ion_buffer *buffer;
103 struct rb_node node;
104 unsigned int kmap_cnt;
Laura Abbott8c017362011-09-22 20:59:12 -0700105 unsigned int iommu_map_cnt;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700106};
107
Olav Hauganb3676592012-03-02 15:02:25 -0800108static void ion_iommu_release(struct kref *kref);
109
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700110/* this function should only be called while dev->lock is held */
111static void ion_buffer_add(struct ion_device *dev,
112 struct ion_buffer *buffer)
113{
114 struct rb_node **p = &dev->buffers.rb_node;
115 struct rb_node *parent = NULL;
116 struct ion_buffer *entry;
117
118 while (*p) {
119 parent = *p;
120 entry = rb_entry(parent, struct ion_buffer, node);
121
122 if (buffer < entry) {
123 p = &(*p)->rb_left;
124 } else if (buffer > entry) {
125 p = &(*p)->rb_right;
126 } else {
127 pr_err("%s: buffer already found.", __func__);
128 BUG();
129 }
130 }
131
132 rb_link_node(&buffer->node, parent, p);
133 rb_insert_color(&buffer->node, &dev->buffers);
134}
135
Olav Haugan0fa9b602012-01-25 11:50:38 -0800136static void ion_iommu_add(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700137 struct ion_iommu_map *iommu)
138{
139 struct rb_node **p = &buffer->iommu_maps.rb_node;
140 struct rb_node *parent = NULL;
141 struct ion_iommu_map *entry;
142
143 while (*p) {
144 parent = *p;
145 entry = rb_entry(parent, struct ion_iommu_map, node);
146
147 if (iommu->key < entry->key) {
148 p = &(*p)->rb_left;
149 } else if (iommu->key > entry->key) {
150 p = &(*p)->rb_right;
151 } else {
152 pr_err("%s: buffer %p already has mapping for domain %d"
153 " and partition %d\n", __func__,
154 buffer,
155 iommu_map_domain(iommu),
156 iommu_map_partition(iommu));
157 BUG();
158 }
159 }
160
161 rb_link_node(&iommu->node, parent, p);
162 rb_insert_color(&iommu->node, &buffer->iommu_maps);
163
164}
165
166static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
167 unsigned int domain_no,
168 unsigned int partition_no)
169{
170 struct rb_node **p = &buffer->iommu_maps.rb_node;
171 struct rb_node *parent = NULL;
172 struct ion_iommu_map *entry;
173 uint64_t key = domain_no;
174 key = key << 32 | partition_no;
175
176 while (*p) {
177 parent = *p;
178 entry = rb_entry(parent, struct ion_iommu_map, node);
179
180 if (key < entry->key)
181 p = &(*p)->rb_left;
182 else if (key > entry->key)
183 p = &(*p)->rb_right;
184 else
185 return entry;
186 }
187
188 return NULL;
189}
190
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700191/* this function should only be called while dev->lock is held */
192static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
193 struct ion_device *dev,
194 unsigned long len,
195 unsigned long align,
196 unsigned long flags)
197{
198 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800199 struct sg_table *table;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700200 int ret;
201
202 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
203 if (!buffer)
204 return ERR_PTR(-ENOMEM);
205
206 buffer->heap = heap;
207 kref_init(&buffer->ref);
208
209 ret = heap->ops->allocate(heap, buffer, len, align, flags);
210 if (ret) {
211 kfree(buffer);
212 return ERR_PTR(ret);
213 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800214
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700215 buffer->dev = dev;
216 buffer->size = len;
Hanumant Singh2ac41c92012-08-29 18:39:44 -0700217 buffer->flags = flags;
Laura Abbottb14ed962012-01-30 14:18:08 -0800218
219 table = buffer->heap->ops->map_dma(buffer->heap, buffer);
220 if (IS_ERR_OR_NULL(table)) {
221 heap->ops->free(buffer);
222 kfree(buffer);
223 return ERR_PTR(PTR_ERR(table));
224 }
225 buffer->sg_table = table;
226
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700227 mutex_init(&buffer->lock);
228 ion_buffer_add(dev, buffer);
229 return buffer;
230}
231
Olav Hauganb3676592012-03-02 15:02:25 -0800232/**
233 * Check for delayed IOMMU unmapping. Also unmap any outstanding
234 * mappings which would otherwise have been leaked.
235 */
236static void ion_iommu_delayed_unmap(struct ion_buffer *buffer)
237{
238 struct ion_iommu_map *iommu_map;
239 struct rb_node *node;
240 const struct rb_root *rb = &(buffer->iommu_maps);
241 unsigned long ref_count;
242 unsigned int delayed_unmap;
243
244 mutex_lock(&buffer->lock);
245
246 while ((node = rb_first(rb)) != 0) {
247 iommu_map = rb_entry(node, struct ion_iommu_map, node);
248 ref_count = atomic_read(&iommu_map->ref.refcount);
249 delayed_unmap = iommu_map->flags & ION_IOMMU_UNMAP_DELAYED;
250
251 if ((delayed_unmap && ref_count > 1) || !delayed_unmap) {
252 pr_err("%s: Virtual memory address leak in domain %u, partition %u\n",
253 __func__, iommu_map->domain_info[DI_DOMAIN_NUM],
254 iommu_map->domain_info[DI_PARTITION_NUM]);
255 }
256 /* set ref count to 1 to force release */
257 kref_init(&iommu_map->ref);
258 kref_put(&iommu_map->ref, ion_iommu_release);
259 }
260
261 mutex_unlock(&buffer->lock);
262}
263
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700264static void ion_buffer_destroy(struct kref *kref)
265{
266 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
267 struct ion_device *dev = buffer->dev;
268
Laura Abbottb14ed962012-01-30 14:18:08 -0800269 if (WARN_ON(buffer->kmap_cnt > 0))
270 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
271
272 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
273
Olav Hauganb3676592012-03-02 15:02:25 -0800274 ion_iommu_delayed_unmap(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700275 buffer->heap->ops->free(buffer);
276 mutex_lock(&dev->lock);
277 rb_erase(&buffer->node, &dev->buffers);
278 mutex_unlock(&dev->lock);
279 kfree(buffer);
280}
281
282static void ion_buffer_get(struct ion_buffer *buffer)
283{
284 kref_get(&buffer->ref);
285}
286
287static int ion_buffer_put(struct ion_buffer *buffer)
288{
289 return kref_put(&buffer->ref, ion_buffer_destroy);
290}
291
292static struct ion_handle *ion_handle_create(struct ion_client *client,
293 struct ion_buffer *buffer)
294{
295 struct ion_handle *handle;
296
297 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
298 if (!handle)
299 return ERR_PTR(-ENOMEM);
300 kref_init(&handle->ref);
301 rb_init_node(&handle->node);
302 handle->client = client;
303 ion_buffer_get(buffer);
304 handle->buffer = buffer;
305
306 return handle;
307}
308
Laura Abbottb14ed962012-01-30 14:18:08 -0800309static void ion_handle_kmap_put(struct ion_handle *);
310
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700311static void ion_handle_destroy(struct kref *kref)
312{
313 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Laura Abbottb14ed962012-01-30 14:18:08 -0800314 struct ion_client *client = handle->client;
315 struct ion_buffer *buffer = handle->buffer;
316
Laura Abbottb14ed962012-01-30 14:18:08 -0800317 mutex_lock(&buffer->lock);
318 while (handle->kmap_cnt)
319 ion_handle_kmap_put(handle);
320 mutex_unlock(&buffer->lock);
321
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700322 if (!RB_EMPTY_NODE(&handle->node))
Laura Abbottb14ed962012-01-30 14:18:08 -0800323 rb_erase(&handle->node, &client->handles);
Laura Abbottb14ed962012-01-30 14:18:08 -0800324
325 ion_buffer_put(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700326 kfree(handle);
327}
328
329struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
330{
331 return handle->buffer;
332}
333
334static void ion_handle_get(struct ion_handle *handle)
335{
336 kref_get(&handle->ref);
337}
338
339static int ion_handle_put(struct ion_handle *handle)
340{
341 return kref_put(&handle->ref, ion_handle_destroy);
342}
343
344static struct ion_handle *ion_handle_lookup(struct ion_client *client,
345 struct ion_buffer *buffer)
346{
347 struct rb_node *n;
348
349 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
350 struct ion_handle *handle = rb_entry(n, struct ion_handle,
351 node);
352 if (handle->buffer == buffer)
353 return handle;
354 }
355 return NULL;
356}
357
358static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
359{
360 struct rb_node *n = client->handles.rb_node;
361
362 while (n) {
363 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
364 node);
365 if (handle < handle_node)
366 n = n->rb_left;
367 else if (handle > handle_node)
368 n = n->rb_right;
369 else
370 return true;
371 }
372 return false;
373}
374
375static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
376{
377 struct rb_node **p = &client->handles.rb_node;
378 struct rb_node *parent = NULL;
379 struct ion_handle *entry;
380
381 while (*p) {
382 parent = *p;
383 entry = rb_entry(parent, struct ion_handle, node);
384
385 if (handle < entry)
386 p = &(*p)->rb_left;
387 else if (handle > entry)
388 p = &(*p)->rb_right;
389 else
390 WARN(1, "%s: buffer already found.", __func__);
391 }
392
393 rb_link_node(&handle->node, parent, p);
394 rb_insert_color(&handle->node, &client->handles);
395}
396
397struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
Hanumant Singh2ac41c92012-08-29 18:39:44 -0700398 size_t align, unsigned int heap_mask,
399 unsigned int flags)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700400{
401 struct rb_node *n;
402 struct ion_handle *handle;
403 struct ion_device *dev = client->dev;
404 struct ion_buffer *buffer = NULL;
Olav Haugan0a852512012-01-09 10:20:55 -0800405 unsigned long secure_allocation = flags & ION_SECURE;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800406 const unsigned int MAX_DBG_STR_LEN = 64;
407 char dbg_str[MAX_DBG_STR_LEN];
408 unsigned int dbg_str_idx = 0;
409
410 dbg_str[0] = '\0';
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700411
412 /*
413 * traverse the list of heaps available in this system in priority
414 * order. If the heap type is supported by the client, and matches the
415 * request of the caller allocate from it. Repeat until allocate has
416 * succeeded or all heaps have been tried
417 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800418 if (WARN_ON(!len))
419 return ERR_PTR(-EINVAL);
420
421 len = PAGE_ALIGN(len);
422
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700423 mutex_lock(&dev->lock);
424 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
425 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
426 /* if the client doesn't support this heap type */
427 if (!((1 << heap->type) & client->heap_mask))
428 continue;
429 /* if the caller didn't specify this heap type */
Hanumant Singh2ac41c92012-08-29 18:39:44 -0700430 if (!((1 << heap->id) & heap_mask))
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700431 continue;
Olav Haugan0a852512012-01-09 10:20:55 -0800432 /* Do not allow un-secure heap if secure is specified */
433 if (secure_allocation && (heap->type != ION_HEAP_TYPE_CP))
434 continue;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700435 buffer = ion_buffer_create(heap, dev, len, align, flags);
436 if (!IS_ERR_OR_NULL(buffer))
437 break;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800438 if (dbg_str_idx < MAX_DBG_STR_LEN) {
439 unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
440 int ret_value = snprintf(&dbg_str[dbg_str_idx],
441 len_left, "%s ", heap->name);
442 if (ret_value >= len_left) {
443 /* overflow */
444 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
445 dbg_str_idx = MAX_DBG_STR_LEN;
446 } else if (ret_value >= 0) {
447 dbg_str_idx += ret_value;
448 } else {
449 /* error */
450 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
451 }
452 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700453 }
454 mutex_unlock(&dev->lock);
455
Laura Abbottb14ed962012-01-30 14:18:08 -0800456 if (buffer == NULL)
457 return ERR_PTR(-ENODEV);
458
459 if (IS_ERR(buffer)) {
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800460 pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
461 "0x%x) from heap(s) %sfor client %s with heap "
462 "mask 0x%x\n",
463 len, align, dbg_str, client->name, client->heap_mask);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700464 return ERR_PTR(PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800465 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700466
467 handle = ion_handle_create(client, buffer);
468
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700469 /*
470 * ion_buffer_create will create a buffer with a ref_cnt of 1,
471 * and ion_handle_create will take a second reference, drop one here
472 */
473 ion_buffer_put(buffer);
474
Laura Abbottb14ed962012-01-30 14:18:08 -0800475 if (!IS_ERR(handle)) {
476 mutex_lock(&client->lock);
477 ion_handle_add(client, handle);
478 mutex_unlock(&client->lock);
479 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700480
Laura Abbottb14ed962012-01-30 14:18:08 -0800481
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700482 return handle;
483}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800484EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700485
486void ion_free(struct ion_client *client, struct ion_handle *handle)
487{
488 bool valid_handle;
489
490 BUG_ON(client != handle->client);
491
492 mutex_lock(&client->lock);
493 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700494 if (!valid_handle) {
Laura Abbottec149ff2012-01-26 13:33:11 -0800495 mutex_unlock(&client->lock);
Olav Haugan6ede5672012-04-19 10:20:22 -0700496 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700497 return;
498 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800499 ion_handle_put(handle);
Ajay Dudani6ca95312012-08-20 15:41:11 -0700500 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700501}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800502EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700503
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700504int ion_phys(struct ion_client *client, struct ion_handle *handle,
505 ion_phys_addr_t *addr, size_t *len)
506{
507 struct ion_buffer *buffer;
508 int ret;
509
510 mutex_lock(&client->lock);
511 if (!ion_handle_validate(client, handle)) {
512 mutex_unlock(&client->lock);
513 return -EINVAL;
514 }
515
516 buffer = handle->buffer;
517
518 if (!buffer->heap->ops->phys) {
519 pr_err("%s: ion_phys is not implemented by this heap.\n",
520 __func__);
521 mutex_unlock(&client->lock);
522 return -ENODEV;
523 }
524 mutex_unlock(&client->lock);
525 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
526 return ret;
527}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800528EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700529
Laura Abbottb14ed962012-01-30 14:18:08 -0800530static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700531{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700532 void *vaddr;
533
Laura Abbottb14ed962012-01-30 14:18:08 -0800534 if (buffer->kmap_cnt) {
535 buffer->kmap_cnt++;
536 return buffer->vaddr;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700537 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800538 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
539 if (IS_ERR_OR_NULL(vaddr))
540 return vaddr;
541 buffer->vaddr = vaddr;
542 buffer->kmap_cnt++;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700543 return vaddr;
544}
Laura Abbottb14ed962012-01-30 14:18:08 -0800545
546static void *ion_handle_kmap_get(struct ion_handle *handle)
547{
548 struct ion_buffer *buffer = handle->buffer;
549 void *vaddr;
550
551 if (handle->kmap_cnt) {
552 handle->kmap_cnt++;
553 return buffer->vaddr;
554 }
555 vaddr = ion_buffer_kmap_get(buffer);
556 if (IS_ERR_OR_NULL(vaddr))
557 return vaddr;
558 handle->kmap_cnt++;
559 return vaddr;
560}
561
562static void ion_buffer_kmap_put(struct ion_buffer *buffer)
563{
564 buffer->kmap_cnt--;
565 if (!buffer->kmap_cnt) {
566 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
567 buffer->vaddr = NULL;
568 }
569}
570
571static void ion_handle_kmap_put(struct ion_handle *handle)
572{
573 struct ion_buffer *buffer = handle->buffer;
574
575 handle->kmap_cnt--;
576 if (!handle->kmap_cnt)
577 ion_buffer_kmap_put(buffer);
578}
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700579
Olav Hauganb3676592012-03-02 15:02:25 -0800580static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700581 int domain_num, int partition_num, unsigned long align,
582 unsigned long iova_length, unsigned long flags,
583 unsigned long *iova)
584{
585 struct ion_iommu_map *data;
586 int ret;
587
588 data = kmalloc(sizeof(*data), GFP_ATOMIC);
589
590 if (!data)
Olav Hauganb3676592012-03-02 15:02:25 -0800591 return ERR_PTR(-ENOMEM);
Laura Abbott8c017362011-09-22 20:59:12 -0700592
593 data->buffer = buffer;
594 iommu_map_domain(data) = domain_num;
595 iommu_map_partition(data) = partition_num;
596
597 ret = buffer->heap->ops->map_iommu(buffer, data,
598 domain_num,
599 partition_num,
600 align,
601 iova_length,
602 flags);
603
604 if (ret)
605 goto out;
606
607 kref_init(&data->ref);
608 *iova = data->iova_addr;
609
610 ion_iommu_add(buffer, data);
611
Olav Hauganb3676592012-03-02 15:02:25 -0800612 return data;
Laura Abbott8c017362011-09-22 20:59:12 -0700613
614out:
Laura Abbott8c017362011-09-22 20:59:12 -0700615 kfree(data);
Olav Hauganb3676592012-03-02 15:02:25 -0800616 return ERR_PTR(ret);
Laura Abbott8c017362011-09-22 20:59:12 -0700617}
618
619int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
620 int domain_num, int partition_num, unsigned long align,
621 unsigned long iova_length, unsigned long *iova,
622 unsigned long *buffer_size,
Olav Hauganb3676592012-03-02 15:02:25 -0800623 unsigned long flags, unsigned long iommu_flags)
Laura Abbott8c017362011-09-22 20:59:12 -0700624{
625 struct ion_buffer *buffer;
626 struct ion_iommu_map *iommu_map;
627 int ret = 0;
628
Olav Haugan79e9ffa2012-02-24 13:11:10 -0800629 if (ION_IS_CACHED(flags)) {
630 pr_err("%s: Cannot map iommu as cached.\n", __func__);
631 return -EINVAL;
632 }
633
Laura Abbott8c017362011-09-22 20:59:12 -0700634 mutex_lock(&client->lock);
635 if (!ion_handle_validate(client, handle)) {
636 pr_err("%s: invalid handle passed to map_kernel.\n",
637 __func__);
638 mutex_unlock(&client->lock);
639 return -EINVAL;
640 }
641
642 buffer = handle->buffer;
643 mutex_lock(&buffer->lock);
644
645 if (!handle->buffer->heap->ops->map_iommu) {
646 pr_err("%s: map_iommu is not implemented by this heap.\n",
647 __func__);
648 ret = -ENODEV;
649 goto out;
650 }
651
Laura Abbott8c017362011-09-22 20:59:12 -0700652 /*
653 * If clients don't want a custom iova length, just use whatever
654 * the buffer size is
655 */
656 if (!iova_length)
657 iova_length = buffer->size;
658
659 if (buffer->size > iova_length) {
660 pr_debug("%s: iova length %lx is not at least buffer size"
661 " %x\n", __func__, iova_length, buffer->size);
662 ret = -EINVAL;
663 goto out;
664 }
665
666 if (buffer->size & ~PAGE_MASK) {
667 pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
668 buffer->size, PAGE_SIZE);
669 ret = -EINVAL;
670 goto out;
671 }
672
673 if (iova_length & ~PAGE_MASK) {
674 pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
675 iova_length, PAGE_SIZE);
676 ret = -EINVAL;
677 goto out;
678 }
679
680 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
Olav Hauganb3676592012-03-02 15:02:25 -0800681 if (!iommu_map) {
682 iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
683 align, iova_length, flags, iova);
Laura Abbottb14ed962012-01-30 14:18:08 -0800684 if (!IS_ERR_OR_NULL(iommu_map)) {
Olav Hauganb3676592012-03-02 15:02:25 -0800685 iommu_map->flags = iommu_flags;
686
687 if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
688 kref_get(&iommu_map->ref);
689 }
Laura Abbott8c017362011-09-22 20:59:12 -0700690 } else {
Olav Hauganb3676592012-03-02 15:02:25 -0800691 if (iommu_map->flags != iommu_flags) {
692 pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
693 __func__, handle,
694 iommu_map->flags, iommu_flags);
Olav Hauganb3676592012-03-02 15:02:25 -0800695 ret = -EINVAL;
696 } else if (iommu_map->mapped_size != iova_length) {
Laura Abbott8c017362011-09-22 20:59:12 -0700697 pr_err("%s: handle %p is already mapped with length"
Olav Hauganb3676592012-03-02 15:02:25 -0800698 " %x, trying to map with length %lx\n",
Laura Abbott8c017362011-09-22 20:59:12 -0700699 __func__, handle, iommu_map->mapped_size,
700 iova_length);
Laura Abbott8c017362011-09-22 20:59:12 -0700701 ret = -EINVAL;
702 } else {
703 kref_get(&iommu_map->ref);
704 *iova = iommu_map->iova_addr;
705 }
706 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800707 if (!ret)
708 buffer->iommu_map_cnt++;
Laura Abbott8c017362011-09-22 20:59:12 -0700709 *buffer_size = buffer->size;
710out:
711 mutex_unlock(&buffer->lock);
712 mutex_unlock(&client->lock);
713 return ret;
714}
715EXPORT_SYMBOL(ion_map_iommu);
716
717static void ion_iommu_release(struct kref *kref)
718{
719 struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
720 ref);
721 struct ion_buffer *buffer = map->buffer;
722
723 rb_erase(&map->node, &buffer->iommu_maps);
724 buffer->heap->ops->unmap_iommu(map);
725 kfree(map);
726}
727
728void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
729 int domain_num, int partition_num)
730{
731 struct ion_iommu_map *iommu_map;
732 struct ion_buffer *buffer;
733
734 mutex_lock(&client->lock);
735 buffer = handle->buffer;
736
737 mutex_lock(&buffer->lock);
738
739 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
740
741 if (!iommu_map) {
742 WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
743 domain_num, partition_num, buffer);
744 goto out;
745 }
746
Laura Abbott8c017362011-09-22 20:59:12 -0700747 kref_put(&iommu_map->ref, ion_iommu_release);
748
Laura Abbottb14ed962012-01-30 14:18:08 -0800749 buffer->iommu_map_cnt--;
Laura Abbott8c017362011-09-22 20:59:12 -0700750out:
751 mutex_unlock(&buffer->lock);
752
753 mutex_unlock(&client->lock);
754
755}
756EXPORT_SYMBOL(ion_unmap_iommu);
757
Mitchel Humpherys456e2682012-09-12 14:42:50 -0700758void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700759{
760 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800761 void *vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700762
763 mutex_lock(&client->lock);
764 if (!ion_handle_validate(client, handle)) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800765 pr_err("%s: invalid handle passed to map_kernel.\n",
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700766 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700767 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700768 return ERR_PTR(-EINVAL);
769 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700770
Laura Abbottb14ed962012-01-30 14:18:08 -0800771 buffer = handle->buffer;
772
773 if (!handle->buffer->heap->ops->map_kernel) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700774 pr_err("%s: map_kernel is not implemented by this heap.\n",
775 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700776 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700777 return ERR_PTR(-ENODEV);
778 }
Laura Abbott894fd582011-08-19 13:33:56 -0700779
Laura Abbottb14ed962012-01-30 14:18:08 -0800780 mutex_lock(&buffer->lock);
781 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700782 mutex_unlock(&buffer->lock);
783 mutex_unlock(&client->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800784 return vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700785}
Olav Hauganbd453a92012-07-05 14:21:34 -0700786EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700787
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700788void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
789{
790 struct ion_buffer *buffer;
791
792 mutex_lock(&client->lock);
793 buffer = handle->buffer;
794 mutex_lock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800795 ion_handle_kmap_put(handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700796 mutex_unlock(&buffer->lock);
797 mutex_unlock(&client->lock);
798}
Olav Hauganbd453a92012-07-05 14:21:34 -0700799EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700800
Olav Haugan41f85792012-02-08 15:28:05 -0800801int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700802 void *uaddr, unsigned long offset, unsigned long len,
803 unsigned int cmd)
804{
805 struct ion_buffer *buffer;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700806 int ret = -EINVAL;
807
808 mutex_lock(&client->lock);
809 if (!ion_handle_validate(client, handle)) {
810 pr_err("%s: invalid handle passed to do_cache_op.\n",
811 __func__);
812 mutex_unlock(&client->lock);
813 return -EINVAL;
814 }
815 buffer = handle->buffer;
816 mutex_lock(&buffer->lock);
817
Laura Abbottcbaa6682011-10-19 12:14:14 -0700818 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbottabcb6f72011-10-04 16:26:49 -0700819 ret = 0;
820 goto out;
821 }
822
823 if (!handle->buffer->heap->ops->cache_op) {
824 pr_err("%s: cache_op is not implemented by this heap.\n",
825 __func__);
826 ret = -ENODEV;
827 goto out;
828 }
829
Laura Abbottabcb6f72011-10-04 16:26:49 -0700830
831 ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
832 offset, len, cmd);
833
834out:
835 mutex_unlock(&buffer->lock);
836 mutex_unlock(&client->lock);
837 return ret;
838
839}
Olav Hauganbd453a92012-07-05 14:21:34 -0700840EXPORT_SYMBOL(ion_do_cache_op);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700841
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700842static int ion_debug_client_show(struct seq_file *s, void *unused)
843{
844 struct ion_client *client = s->private;
845 struct rb_node *n;
Olav Haugan854c9e12012-05-16 16:34:28 -0700846 struct rb_node *n2;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700847
Olav Haugan854c9e12012-05-16 16:34:28 -0700848 seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
849 "heap_name", "size_in_bytes", "handle refcount",
850 "buffer", "physical", "[domain,partition] - virt");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700851
852 mutex_lock(&client->lock);
853 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
854 struct ion_handle *handle = rb_entry(n, struct ion_handle,
855 node);
856 enum ion_heap_type type = handle->buffer->heap->type;
857
Olav Haugan854c9e12012-05-16 16:34:28 -0700858 seq_printf(s, "%16.16s: %16x : %16d : %12p",
Laura Abbott68c80642011-10-21 17:32:27 -0700859 handle->buffer->heap->name,
860 handle->buffer->size,
861 atomic_read(&handle->ref.refcount),
862 handle->buffer);
Olav Haugan854c9e12012-05-16 16:34:28 -0700863
864 if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
865 type == ION_HEAP_TYPE_CARVEOUT ||
866 type == ION_HEAP_TYPE_CP)
867 seq_printf(s, " : %12lx", handle->buffer->priv_phys);
868 else
869 seq_printf(s, " : %12s", "N/A");
870
871 for (n2 = rb_first(&handle->buffer->iommu_maps); n2;
872 n2 = rb_next(n2)) {
873 struct ion_iommu_map *imap =
874 rb_entry(n2, struct ion_iommu_map, node);
875 seq_printf(s, " : [%d,%d] - %8lx",
876 imap->domain_info[DI_DOMAIN_NUM],
877 imap->domain_info[DI_PARTITION_NUM],
878 imap->iova_addr);
879 }
880 seq_printf(s, "\n");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700881 }
882 mutex_unlock(&client->lock);
883
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700884 return 0;
885}
886
887static int ion_debug_client_open(struct inode *inode, struct file *file)
888{
889 return single_open(file, ion_debug_client_show, inode->i_private);
890}
891
892static const struct file_operations debug_client_fops = {
893 .open = ion_debug_client_open,
894 .read = seq_read,
895 .llseek = seq_lseek,
896 .release = single_release,
897};
898
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700899struct ion_client *ion_client_create(struct ion_device *dev,
900 unsigned int heap_mask,
901 const char *name)
902{
903 struct ion_client *client;
904 struct task_struct *task;
905 struct rb_node **p;
906 struct rb_node *parent = NULL;
907 struct ion_client *entry;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700908 pid_t pid;
Olav Haugane8a31972012-05-16 13:11:41 -0700909 unsigned int name_len;
910
911 if (!name) {
912 pr_err("%s: Name cannot be null\n", __func__);
913 return ERR_PTR(-EINVAL);
914 }
915 name_len = strnlen(name, 64);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700916
917 get_task_struct(current->group_leader);
918 task_lock(current->group_leader);
919 pid = task_pid_nr(current->group_leader);
920 /* don't bother to store task struct for kernel threads,
921 they can't be killed anyway */
922 if (current->group_leader->flags & PF_KTHREAD) {
923 put_task_struct(current->group_leader);
924 task = NULL;
925 } else {
926 task = current->group_leader;
927 }
928 task_unlock(current->group_leader);
929
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700930 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
931 if (!client) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800932 if (task)
933 put_task_struct(current->group_leader);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700934 return ERR_PTR(-ENOMEM);
935 }
936
937 client->dev = dev;
938 client->handles = RB_ROOT;
939 mutex_init(&client->lock);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800940
Olav Haugan6625c7d2012-01-24 13:50:43 -0800941 client->name = kzalloc(name_len+1, GFP_KERNEL);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800942 if (!client->name) {
943 put_task_struct(current->group_leader);
944 kfree(client);
945 return ERR_PTR(-ENOMEM);
946 } else {
Olav Haugan6625c7d2012-01-24 13:50:43 -0800947 strlcpy(client->name, name, name_len+1);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800948 }
949
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700950 client->heap_mask = heap_mask;
951 client->task = task;
952 client->pid = pid;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700953
954 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800955 p = &dev->clients.rb_node;
956 while (*p) {
957 parent = *p;
958 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700959
Laura Abbottb14ed962012-01-30 14:18:08 -0800960 if (client < entry)
961 p = &(*p)->rb_left;
962 else if (client > entry)
963 p = &(*p)->rb_right;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700964 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800965 rb_link_node(&client->node, parent, p);
966 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700967
Laura Abbotteed86032011-12-05 15:32:36 -0800968
969 client->debug_root = debugfs_create_file(name, 0664,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700970 dev->debug_root, client,
971 &debug_client_fops);
972 mutex_unlock(&dev->lock);
973
974 return client;
975}
976
Laura Abbottb14ed962012-01-30 14:18:08 -0800977void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700978{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700979 struct ion_device *dev = client->dev;
980 struct rb_node *n;
981
982 pr_debug("%s: %d\n", __func__, __LINE__);
983 while ((n = rb_first(&client->handles))) {
984 struct ion_handle *handle = rb_entry(n, struct ion_handle,
985 node);
986 ion_handle_destroy(&handle->ref);
987 }
988 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800989 if (client->task)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700990 put_task_struct(client->task);
Laura Abbottb14ed962012-01-30 14:18:08 -0800991 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700992 debugfs_remove_recursive(client->debug_root);
993 mutex_unlock(&dev->lock);
994
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800995 kfree(client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700996 kfree(client);
997}
Olav Hauganbd453a92012-07-05 14:21:34 -0700998EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700999
Laura Abbott273dd8e2011-10-12 14:26:33 -07001000int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
1001 unsigned long *flags)
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001002{
1003 struct ion_buffer *buffer;
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001004
1005 mutex_lock(&client->lock);
1006 if (!ion_handle_validate(client, handle)) {
Laura Abbott273dd8e2011-10-12 14:26:33 -07001007 pr_err("%s: invalid handle passed to %s.\n",
1008 __func__, __func__);
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001009 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001010 return -EINVAL;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001011 }
Laura Abbott273dd8e2011-10-12 14:26:33 -07001012 buffer = handle->buffer;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001013 mutex_lock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001014 *flags = buffer->flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001015 mutex_unlock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001016 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001017
Laura Abbott273dd8e2011-10-12 14:26:33 -07001018 return 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001019}
Laura Abbott273dd8e2011-10-12 14:26:33 -07001020EXPORT_SYMBOL(ion_handle_get_flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001021
Laura Abbott8c017362011-09-22 20:59:12 -07001022int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
1023 unsigned long *size)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001024{
Laura Abbott8c017362011-09-22 20:59:12 -07001025 struct ion_buffer *buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001026
Laura Abbott8c017362011-09-22 20:59:12 -07001027 mutex_lock(&client->lock);
1028 if (!ion_handle_validate(client, handle)) {
1029 pr_err("%s: invalid handle passed to %s.\n",
1030 __func__, __func__);
1031 mutex_unlock(&client->lock);
1032 return -EINVAL;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001033 }
Laura Abbott8c017362011-09-22 20:59:12 -07001034 buffer = handle->buffer;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001035 mutex_lock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001036 *size = buffer->size;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001037 mutex_unlock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001038 mutex_unlock(&client->lock);
1039
1040 return 0;
1041}
1042EXPORT_SYMBOL(ion_handle_get_size);
1043
Laura Abbottb14ed962012-01-30 14:18:08 -08001044struct sg_table *ion_sg_table(struct ion_client *client,
1045 struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001046{
Laura Abbottb14ed962012-01-30 14:18:08 -08001047 struct ion_buffer *buffer;
1048 struct sg_table *table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001049
Laura Abbottb14ed962012-01-30 14:18:08 -08001050 mutex_lock(&client->lock);
1051 if (!ion_handle_validate(client, handle)) {
1052 pr_err("%s: invalid handle passed to map_dma.\n",
1053 __func__);
1054 mutex_unlock(&client->lock);
1055 return ERR_PTR(-EINVAL);
1056 }
1057 buffer = handle->buffer;
1058 table = buffer->sg_table;
1059 mutex_unlock(&client->lock);
1060 return table;
1061}
Olav Hauganbd453a92012-07-05 14:21:34 -07001062EXPORT_SYMBOL(ion_sg_table);
Laura Abbottb14ed962012-01-30 14:18:08 -08001063
1064static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1065 enum dma_data_direction direction)
1066{
1067 struct dma_buf *dmabuf = attachment->dmabuf;
1068 struct ion_buffer *buffer = dmabuf->priv;
1069
1070 return buffer->sg_table;
1071}
1072
1073static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1074 struct sg_table *table,
1075 enum dma_data_direction direction)
1076{
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001077}
1078
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001079static void ion_vma_open(struct vm_area_struct *vma)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001080{
Laura Abbottb14ed962012-01-30 14:18:08 -08001081 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001082
1083 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001084
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001085 mutex_lock(&buffer->lock);
Laura Abbott77168502011-12-05 11:06:24 -08001086 buffer->umap_cnt++;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001087 mutex_unlock(&buffer->lock);
1088}
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001089
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001090static void ion_vma_close(struct vm_area_struct *vma)
1091{
Laura Abbottb14ed962012-01-30 14:18:08 -08001092 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001093
1094 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001095
Laura Abbott77168502011-12-05 11:06:24 -08001096 mutex_lock(&buffer->lock);
1097 buffer->umap_cnt--;
1098 mutex_unlock(&buffer->lock);
Laura Abbotta6835092011-11-14 15:27:02 -08001099
1100 if (buffer->heap->ops->unmap_user)
1101 buffer->heap->ops->unmap_user(buffer->heap, buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001102}
1103
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001104static struct vm_operations_struct ion_vm_ops = {
1105 .open = ion_vma_open,
1106 .close = ion_vma_close,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001107};
1108
Laura Abbottb14ed962012-01-30 14:18:08 -08001109static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001110{
Laura Abbottb14ed962012-01-30 14:18:08 -08001111 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001112 int ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001113
Laura Abbottb14ed962012-01-30 14:18:08 -08001114 if (!buffer->heap->ops->map_user) {
1115 pr_err("%s: this heap does not define a method for mapping "
1116 "to userspace\n", __func__);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001117 return -EINVAL;
1118 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001119
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001120 mutex_lock(&buffer->lock);
1121 /* now map it to userspace */
Laura Abbottb14ed962012-01-30 14:18:08 -08001122 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001123
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001124 if (ret) {
Laura Abbottb14ed962012-01-30 14:18:08 -08001125 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001126 pr_err("%s: failure mapping buffer to userspace\n",
1127 __func__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001128 } else {
1129 buffer->umap_cnt++;
1130 mutex_unlock(&buffer->lock);
1131
1132 vma->vm_ops = &ion_vm_ops;
1133 /*
1134 * move the buffer into the vm_private_data so we can access it
1135 * from vma_open/close
1136 */
1137 vma->vm_private_data = buffer;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001138 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001139 return ret;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001140}
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001141
Laura Abbottb14ed962012-01-30 14:18:08 -08001142static void ion_dma_buf_release(struct dma_buf *dmabuf)
1143{
1144 struct ion_buffer *buffer = dmabuf->priv;
1145 ion_buffer_put(buffer);
1146}
1147
1148static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1149{
1150 struct ion_buffer *buffer = dmabuf->priv;
1151 return buffer->vaddr + offset;
1152}
1153
1154static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1155 void *ptr)
1156{
1157 return;
1158}
1159
1160static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1161 size_t len,
1162 enum dma_data_direction direction)
1163{
1164 struct ion_buffer *buffer = dmabuf->priv;
1165 void *vaddr;
1166
1167 if (!buffer->heap->ops->map_kernel) {
1168 pr_err("%s: map kernel is not implemented by this heap.\n",
1169 __func__);
1170 return -ENODEV;
1171 }
1172
1173 mutex_lock(&buffer->lock);
1174 vaddr = ion_buffer_kmap_get(buffer);
1175 mutex_unlock(&buffer->lock);
1176 if (IS_ERR(vaddr))
1177 return PTR_ERR(vaddr);
1178 if (!vaddr)
1179 return -ENOMEM;
1180 return 0;
1181}
1182
1183static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1184 size_t len,
1185 enum dma_data_direction direction)
1186{
1187 struct ion_buffer *buffer = dmabuf->priv;
1188
1189 mutex_lock(&buffer->lock);
1190 ion_buffer_kmap_put(buffer);
1191 mutex_unlock(&buffer->lock);
1192}
1193
1194struct dma_buf_ops dma_buf_ops = {
1195 .map_dma_buf = ion_map_dma_buf,
1196 .unmap_dma_buf = ion_unmap_dma_buf,
1197 .mmap = ion_mmap,
1198 .release = ion_dma_buf_release,
1199 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1200 .end_cpu_access = ion_dma_buf_end_cpu_access,
1201 .kmap_atomic = ion_dma_buf_kmap,
1202 .kunmap_atomic = ion_dma_buf_kunmap,
1203 .kmap = ion_dma_buf_kmap,
1204 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001205};
1206
Laura Abbottb14ed962012-01-30 14:18:08 -08001207static int ion_share_set_flags(struct ion_client *client,
1208 struct ion_handle *handle,
1209 unsigned long flags)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001210{
Laura Abbottb14ed962012-01-30 14:18:08 -08001211 struct ion_buffer *buffer;
1212 bool valid_handle;
Mitchel Humpherys97e21232012-09-11 15:59:11 -07001213 unsigned long ion_flags = 0;
Laura Abbottb14ed962012-01-30 14:18:08 -08001214 if (flags & O_DSYNC)
Mitchel Humpherys97e21232012-09-11 15:59:11 -07001215 ion_flags = ION_SET_UNCACHED(ion_flags);
1216 else
1217 ion_flags = ION_SET_CACHED(ion_flags);
1218
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001219
Laura Abbottb14ed962012-01-30 14:18:08 -08001220 mutex_lock(&client->lock);
1221 valid_handle = ion_handle_validate(client, handle);
1222 mutex_unlock(&client->lock);
1223 if (!valid_handle) {
1224 WARN(1, "%s: invalid handle passed to set_flags.\n", __func__);
1225 return -EINVAL;
1226 }
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001227
Laura Abbottb14ed962012-01-30 14:18:08 -08001228 buffer = handle->buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001229
Laura Abbottb14ed962012-01-30 14:18:08 -08001230 return 0;
1231}
Laura Abbott4b5d0482011-09-27 18:35:14 -07001232
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001233
Laura Abbottb14ed962012-01-30 14:18:08 -08001234int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
1235{
1236 struct ion_buffer *buffer;
1237 struct dma_buf *dmabuf;
1238 bool valid_handle;
1239 int fd;
1240
1241 mutex_lock(&client->lock);
1242 valid_handle = ion_handle_validate(client, handle);
1243 mutex_unlock(&client->lock);
1244 if (!valid_handle) {
Olav Haugan0df59942012-07-05 14:27:30 -07001245 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001246 return -EINVAL;
1247 }
1248
1249 buffer = handle->buffer;
1250 ion_buffer_get(buffer);
1251 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1252 if (IS_ERR(dmabuf)) {
1253 ion_buffer_put(buffer);
1254 return PTR_ERR(dmabuf);
1255 }
1256 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Ajay Dudani173f6132012-08-01 18:06:18 -07001257 if (fd < 0)
Laura Abbottb14ed962012-01-30 14:18:08 -08001258 dma_buf_put(dmabuf);
Ajay Dudani173f6132012-08-01 18:06:18 -07001259
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001260 return fd;
Laura Abbottb14ed962012-01-30 14:18:08 -08001261}
Olav Hauganbd453a92012-07-05 14:21:34 -07001262EXPORT_SYMBOL(ion_share_dma_buf);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001263
Laura Abbottb14ed962012-01-30 14:18:08 -08001264struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1265{
1266 struct dma_buf *dmabuf;
1267 struct ion_buffer *buffer;
1268 struct ion_handle *handle;
1269
1270 dmabuf = dma_buf_get(fd);
1271 if (IS_ERR_OR_NULL(dmabuf))
1272 return ERR_PTR(PTR_ERR(dmabuf));
1273 /* if this memory came from ion */
1274
1275 if (dmabuf->ops != &dma_buf_ops) {
1276 pr_err("%s: can not import dmabuf from another exporter\n",
1277 __func__);
1278 dma_buf_put(dmabuf);
1279 return ERR_PTR(-EINVAL);
1280 }
1281 buffer = dmabuf->priv;
1282
1283 mutex_lock(&client->lock);
1284 /* if a handle exists for this buffer just take a reference to it */
1285 handle = ion_handle_lookup(client, buffer);
1286 if (!IS_ERR_OR_NULL(handle)) {
1287 ion_handle_get(handle);
1288 goto end;
1289 }
1290 handle = ion_handle_create(client, buffer);
1291 if (IS_ERR_OR_NULL(handle))
1292 goto end;
1293 ion_handle_add(client, handle);
1294end:
1295 mutex_unlock(&client->lock);
1296 dma_buf_put(dmabuf);
1297 return handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001298}
Olav Hauganbd453a92012-07-05 14:21:34 -07001299EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001300
1301static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1302{
1303 struct ion_client *client = filp->private_data;
1304
1305 switch (cmd) {
1306 case ION_IOC_ALLOC:
1307 {
1308 struct ion_allocation_data data;
1309
1310 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1311 return -EFAULT;
1312 data.handle = ion_alloc(client, data.len, data.align,
Hanumant Singh2ac41c92012-08-29 18:39:44 -07001313 data.heap_mask, data.flags);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001314
Laura Abbottb14ed962012-01-30 14:18:08 -08001315 if (IS_ERR(data.handle))
1316 return PTR_ERR(data.handle);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001317
Laura Abbottb14ed962012-01-30 14:18:08 -08001318 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1319 ion_free(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001320 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001321 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001322 break;
1323 }
1324 case ION_IOC_FREE:
1325 {
1326 struct ion_handle_data data;
1327 bool valid;
1328
1329 if (copy_from_user(&data, (void __user *)arg,
1330 sizeof(struct ion_handle_data)))
1331 return -EFAULT;
1332 mutex_lock(&client->lock);
1333 valid = ion_handle_validate(client, data.handle);
1334 mutex_unlock(&client->lock);
1335 if (!valid)
1336 return -EINVAL;
1337 ion_free(client, data.handle);
1338 break;
1339 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001340 case ION_IOC_MAP:
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001341 case ION_IOC_SHARE:
1342 {
1343 struct ion_fd_data data;
Laura Abbottb14ed962012-01-30 14:18:08 -08001344 int ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001345 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1346 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001347
1348 ret = ion_share_set_flags(client, data.handle, filp->f_flags);
1349 if (ret)
1350 return ret;
1351
1352 data.fd = ion_share_dma_buf(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001353 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1354 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001355 if (data.fd < 0)
1356 return data.fd;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001357 break;
1358 }
1359 case ION_IOC_IMPORT:
1360 {
1361 struct ion_fd_data data;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001362 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001363 if (copy_from_user(&data, (void __user *)arg,
1364 sizeof(struct ion_fd_data)))
1365 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001366 data.handle = ion_import_dma_buf(client, data.fd);
Olav Haugan21ceb8a2012-05-15 14:40:11 -07001367 if (IS_ERR(data.handle)) {
1368 ret = PTR_ERR(data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001369 data.handle = NULL;
Olav Haugan21ceb8a2012-05-15 14:40:11 -07001370 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001371 if (copy_to_user((void __user *)arg, &data,
1372 sizeof(struct ion_fd_data)))
1373 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001374 if (ret < 0)
1375 return ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001376 break;
1377 }
1378 case ION_IOC_CUSTOM:
1379 {
1380 struct ion_device *dev = client->dev;
1381 struct ion_custom_data data;
1382
1383 if (!dev->custom_ioctl)
1384 return -ENOTTY;
1385 if (copy_from_user(&data, (void __user *)arg,
1386 sizeof(struct ion_custom_data)))
1387 return -EFAULT;
1388 return dev->custom_ioctl(client, data.cmd, data.arg);
1389 }
Laura Abbottabcb6f72011-10-04 16:26:49 -07001390 case ION_IOC_CLEAN_CACHES:
Mitchel Humpherysfd02cfb2012-09-04 17:00:29 -07001391 return client->dev->custom_ioctl(client,
1392 ION_IOC_CLEAN_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001393 case ION_IOC_INV_CACHES:
Mitchel Humpherysfd02cfb2012-09-04 17:00:29 -07001394 return client->dev->custom_ioctl(client,
1395 ION_IOC_INV_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001396 case ION_IOC_CLEAN_INV_CACHES:
Mitchel Humpherysfd02cfb2012-09-04 17:00:29 -07001397 return client->dev->custom_ioctl(client,
1398 ION_IOC_CLEAN_INV_CACHES, arg);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001399 case ION_IOC_GET_FLAGS:
Mitchel Humpherysfd02cfb2012-09-04 17:00:29 -07001400 return client->dev->custom_ioctl(client,
1401 ION_IOC_GET_FLAGS, arg);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001402 default:
1403 return -ENOTTY;
1404 }
1405 return 0;
1406}
1407
1408static int ion_release(struct inode *inode, struct file *file)
1409{
1410 struct ion_client *client = file->private_data;
1411
1412 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001413 ion_client_destroy(client);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001414 return 0;
1415}
1416
1417static int ion_open(struct inode *inode, struct file *file)
1418{
1419 struct miscdevice *miscdev = file->private_data;
1420 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1421 struct ion_client *client;
Laura Abbotteed86032011-12-05 15:32:36 -08001422 char debug_name[64];
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001423
1424 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbotteed86032011-12-05 15:32:36 -08001425 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1426 client = ion_client_create(dev, -1, debug_name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001427 if (IS_ERR_OR_NULL(client))
1428 return PTR_ERR(client);
1429 file->private_data = client;
1430
1431 return 0;
1432}
1433
1434static const struct file_operations ion_fops = {
1435 .owner = THIS_MODULE,
1436 .open = ion_open,
1437 .release = ion_release,
1438 .unlocked_ioctl = ion_ioctl,
1439};
1440
1441static size_t ion_debug_heap_total(struct ion_client *client,
Laura Abbott3647ac32011-10-31 14:09:53 -07001442 enum ion_heap_ids id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001443{
1444 size_t size = 0;
1445 struct rb_node *n;
1446
1447 mutex_lock(&client->lock);
1448 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1449 struct ion_handle *handle = rb_entry(n,
1450 struct ion_handle,
1451 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001452 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001453 size += handle->buffer->size;
1454 }
1455 mutex_unlock(&client->lock);
1456 return size;
1457}
1458
Olav Haugan0671b9a2012-05-25 11:58:56 -07001459/**
1460 * Searches through a clients handles to find if the buffer is owned
1461 * by this client. Used for debug output.
1462 * @param client pointer to candidate owner of buffer
1463 * @param buf pointer to buffer that we are trying to find the owner of
1464 * @return 1 if found, 0 otherwise
1465 */
1466static int ion_debug_find_buffer_owner(const struct ion_client *client,
1467 const struct ion_buffer *buf)
1468{
1469 struct rb_node *n;
1470
1471 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1472 const struct ion_handle *handle = rb_entry(n,
1473 const struct ion_handle,
1474 node);
1475 if (handle->buffer == buf)
1476 return 1;
1477 }
1478 return 0;
1479}
1480
1481/**
1482 * Adds mem_map_data pointer to the tree of mem_map
1483 * Used for debug output.
1484 * @param mem_map The mem_map tree
1485 * @param data The new data to add to the tree
1486 */
1487static void ion_debug_mem_map_add(struct rb_root *mem_map,
1488 struct mem_map_data *data)
1489{
1490 struct rb_node **p = &mem_map->rb_node;
1491 struct rb_node *parent = NULL;
1492 struct mem_map_data *entry;
1493
1494 while (*p) {
1495 parent = *p;
1496 entry = rb_entry(parent, struct mem_map_data, node);
1497
1498 if (data->addr < entry->addr) {
1499 p = &(*p)->rb_left;
1500 } else if (data->addr > entry->addr) {
1501 p = &(*p)->rb_right;
1502 } else {
1503 pr_err("%s: mem_map_data already found.", __func__);
1504 BUG();
1505 }
1506 }
1507 rb_link_node(&data->node, parent, p);
1508 rb_insert_color(&data->node, mem_map);
1509}
1510
1511/**
1512 * Search for an owner of a buffer by iterating over all ION clients.
1513 * @param dev ion device containing pointers to all the clients.
1514 * @param buffer pointer to buffer we are trying to find the owner of.
1515 * @return name of owner.
1516 */
1517const char *ion_debug_locate_owner(const struct ion_device *dev,
1518 const struct ion_buffer *buffer)
1519{
1520 struct rb_node *j;
1521 const char *client_name = NULL;
1522
Laura Abbottb14ed962012-01-30 14:18:08 -08001523 for (j = rb_first(&dev->clients); j && !client_name;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001524 j = rb_next(j)) {
1525 struct ion_client *client = rb_entry(j, struct ion_client,
1526 node);
1527 if (ion_debug_find_buffer_owner(client, buffer))
1528 client_name = client->name;
1529 }
1530 return client_name;
1531}
1532
1533/**
1534 * Create a mem_map of the heap.
1535 * @param s seq_file to log error message to.
1536 * @param heap The heap to create mem_map for.
1537 * @param mem_map The mem map to be created.
1538 */
1539void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
1540 struct rb_root *mem_map)
1541{
1542 struct ion_device *dev = heap->dev;
1543 struct rb_node *n;
1544
1545 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1546 struct ion_buffer *buffer =
1547 rb_entry(n, struct ion_buffer, node);
1548 if (buffer->heap->id == heap->id) {
1549 struct mem_map_data *data =
1550 kzalloc(sizeof(*data), GFP_KERNEL);
1551 if (!data) {
1552 seq_printf(s, "ERROR: out of memory. "
1553 "Part of memory map will not be logged\n");
1554 break;
1555 }
1556 data->addr = buffer->priv_phys;
1557 data->addr_end = buffer->priv_phys + buffer->size-1;
1558 data->size = buffer->size;
1559 data->client_name = ion_debug_locate_owner(dev, buffer);
1560 ion_debug_mem_map_add(mem_map, data);
1561 }
1562 }
1563}
1564
1565/**
1566 * Free the memory allocated by ion_debug_mem_map_create
1567 * @param mem_map The mem map to free.
1568 */
1569static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
1570{
1571 if (mem_map) {
1572 struct rb_node *n;
1573 while ((n = rb_first(mem_map)) != 0) {
1574 struct mem_map_data *data =
1575 rb_entry(n, struct mem_map_data, node);
1576 rb_erase(&data->node, mem_map);
1577 kfree(data);
1578 }
1579 }
1580}
1581
1582/**
1583 * Print heap debug information.
1584 * @param s seq_file to log message to.
1585 * @param heap pointer to heap that we will print debug information for.
1586 */
1587static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
1588{
1589 if (heap->ops->print_debug) {
1590 struct rb_root mem_map = RB_ROOT;
1591 ion_debug_mem_map_create(s, heap, &mem_map);
1592 heap->ops->print_debug(heap, s, &mem_map);
1593 ion_debug_mem_map_destroy(&mem_map);
1594 }
1595}
1596
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001597static int ion_debug_heap_show(struct seq_file *s, void *unused)
1598{
1599 struct ion_heap *heap = s->private;
1600 struct ion_device *dev = heap->dev;
1601 struct rb_node *n;
1602
Olav Haugane4900b52012-05-25 11:58:03 -07001603 mutex_lock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001604 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001605
Laura Abbottb14ed962012-01-30 14:18:08 -08001606 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001607 struct ion_client *client = rb_entry(n, struct ion_client,
1608 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001609 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001610 if (!size)
1611 continue;
Laura Abbottb14ed962012-01-30 14:18:08 -08001612 if (client->task) {
1613 char task_comm[TASK_COMM_LEN];
1614
1615 get_task_comm(task_comm, client->task);
1616 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1617 client->pid, size);
1618 } else {
1619 seq_printf(s, "%16.s %16u %16u\n", client->name,
1620 client->pid, size);
1621 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001622 }
Olav Haugan0671b9a2012-05-25 11:58:56 -07001623 ion_heap_print_debug(s, heap);
Olav Haugane4900b52012-05-25 11:58:03 -07001624 mutex_unlock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001625 return 0;
1626}
1627
1628static int ion_debug_heap_open(struct inode *inode, struct file *file)
1629{
1630 return single_open(file, ion_debug_heap_show, inode->i_private);
1631}
1632
1633static const struct file_operations debug_heap_fops = {
1634 .open = ion_debug_heap_open,
1635 .read = seq_read,
1636 .llseek = seq_lseek,
1637 .release = single_release,
1638};
1639
1640void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1641{
1642 struct rb_node **p = &dev->heaps.rb_node;
1643 struct rb_node *parent = NULL;
1644 struct ion_heap *entry;
1645
Laura Abbottb14ed962012-01-30 14:18:08 -08001646 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1647 !heap->ops->unmap_dma)
1648 pr_err("%s: can not add heap with invalid ops struct.\n",
1649 __func__);
1650
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001651 heap->dev = dev;
1652 mutex_lock(&dev->lock);
1653 while (*p) {
1654 parent = *p;
1655 entry = rb_entry(parent, struct ion_heap, node);
1656
1657 if (heap->id < entry->id) {
1658 p = &(*p)->rb_left;
1659 } else if (heap->id > entry->id ) {
1660 p = &(*p)->rb_right;
1661 } else {
1662 pr_err("%s: can not insert multiple heaps with "
1663 "id %d\n", __func__, heap->id);
1664 goto end;
1665 }
1666 }
1667
1668 rb_link_node(&heap->node, parent, p);
1669 rb_insert_color(&heap->node, &dev->heaps);
1670 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1671 &debug_heap_fops);
1672end:
1673 mutex_unlock(&dev->lock);
1674}
1675
Laura Abbott7e446482012-06-13 15:59:39 -07001676int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
1677 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001678{
1679 struct rb_node *n;
1680 int ret_val = 0;
1681
1682 /*
1683 * traverse the list of heaps available in this system
1684 * and find the heap that is specified.
1685 */
1686 mutex_lock(&dev->lock);
1687 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
1688 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
1689 if (heap->type != ION_HEAP_TYPE_CP)
1690 continue;
1691 if (ION_HEAP(heap->id) != heap_id)
1692 continue;
1693 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001694 ret_val = heap->ops->secure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001695 else
1696 ret_val = -EINVAL;
1697 break;
1698 }
1699 mutex_unlock(&dev->lock);
1700 return ret_val;
1701}
Olav Hauganbd453a92012-07-05 14:21:34 -07001702EXPORT_SYMBOL(ion_secure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001703
Laura Abbott7e446482012-06-13 15:59:39 -07001704int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
1705 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001706{
1707 struct rb_node *n;
1708 int ret_val = 0;
1709
1710 /*
1711 * traverse the list of heaps available in this system
1712 * and find the heap that is specified.
1713 */
1714 mutex_lock(&dev->lock);
1715 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
1716 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
1717 if (heap->type != ION_HEAP_TYPE_CP)
1718 continue;
1719 if (ION_HEAP(heap->id) != heap_id)
1720 continue;
1721 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001722 ret_val = heap->ops->unsecure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001723 else
1724 ret_val = -EINVAL;
1725 break;
1726 }
1727 mutex_unlock(&dev->lock);
1728 return ret_val;
1729}
Olav Hauganbd453a92012-07-05 14:21:34 -07001730EXPORT_SYMBOL(ion_unsecure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001731
Laura Abbott404f8242011-10-31 14:22:53 -07001732static int ion_debug_leak_show(struct seq_file *s, void *unused)
1733{
1734 struct ion_device *dev = s->private;
1735 struct rb_node *n;
1736 struct rb_node *n2;
1737
1738 /* mark all buffers as 1 */
1739 seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
1740 "ref cnt");
1741 mutex_lock(&dev->lock);
1742 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1743 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1744 node);
1745
1746 buf->marked = 1;
1747 }
1748
1749 /* now see which buffers we can access */
Laura Abbottb14ed962012-01-30 14:18:08 -08001750 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Laura Abbott404f8242011-10-31 14:22:53 -07001751 struct ion_client *client = rb_entry(n, struct ion_client,
1752 node);
1753
1754 mutex_lock(&client->lock);
1755 for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
1756 struct ion_handle *handle = rb_entry(n2,
1757 struct ion_handle, node);
1758
1759 handle->buffer->marked = 0;
1760
1761 }
1762 mutex_unlock(&client->lock);
1763
1764 }
1765
Laura Abbott404f8242011-10-31 14:22:53 -07001766 /* And anyone still marked as a 1 means a leaked handle somewhere */
1767 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1768 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1769 node);
1770
1771 if (buf->marked == 1)
1772 seq_printf(s, "%16.x %16.s %16.x %16.d\n",
1773 (int)buf, buf->heap->name, buf->size,
1774 atomic_read(&buf->ref.refcount));
1775 }
1776 mutex_unlock(&dev->lock);
1777 return 0;
1778}
1779
1780static int ion_debug_leak_open(struct inode *inode, struct file *file)
1781{
1782 return single_open(file, ion_debug_leak_show, inode->i_private);
1783}
1784
1785static const struct file_operations debug_leak_fops = {
1786 .open = ion_debug_leak_open,
1787 .read = seq_read,
1788 .llseek = seq_lseek,
1789 .release = single_release,
1790};
1791
1792
1793
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001794struct ion_device *ion_device_create(long (*custom_ioctl)
1795 (struct ion_client *client,
1796 unsigned int cmd,
1797 unsigned long arg))
1798{
1799 struct ion_device *idev;
1800 int ret;
1801
1802 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1803 if (!idev)
1804 return ERR_PTR(-ENOMEM);
1805
1806 idev->dev.minor = MISC_DYNAMIC_MINOR;
1807 idev->dev.name = "ion";
1808 idev->dev.fops = &ion_fops;
1809 idev->dev.parent = NULL;
1810 ret = misc_register(&idev->dev);
1811 if (ret) {
1812 pr_err("ion: failed to register misc device.\n");
1813 return ERR_PTR(ret);
1814 }
1815
1816 idev->debug_root = debugfs_create_dir("ion", NULL);
1817 if (IS_ERR_OR_NULL(idev->debug_root))
1818 pr_err("ion: failed to create debug files.\n");
1819
1820 idev->custom_ioctl = custom_ioctl;
1821 idev->buffers = RB_ROOT;
1822 mutex_init(&idev->lock);
1823 idev->heaps = RB_ROOT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001824 idev->clients = RB_ROOT;
Laura Abbott404f8242011-10-31 14:22:53 -07001825 debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
1826 &debug_leak_fops);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001827 return idev;
1828}
1829
1830void ion_device_destroy(struct ion_device *dev)
1831{
1832 misc_deregister(&dev->dev);
1833 /* XXX need to free the heaps and clients ? */
1834 kfree(dev);
1835}
Laura Abbottb14ed962012-01-30 14:18:08 -08001836
1837void __init ion_reserve(struct ion_platform_data *data)
1838{
1839 int i, ret;
1840
1841 for (i = 0; i < data->nr; i++) {
1842 if (data->heaps[i].size == 0)
1843 continue;
1844 ret = memblock_reserve(data->heaps[i].base,
1845 data->heaps[i].size);
1846 if (ret)
1847 pr_err("memblock reserve of %x@%lx failed\n",
1848 data->heaps[i].size,
1849 data->heaps[i].base);
1850 }
1851}