blob: 2e89829b1dc6d6c5a098560b86275b1ad89fbe75 [file] [log] [blame]
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
Olav Haugan0a852512012-01-09 10:20:55 -08005 * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07006 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
Steve Mucklef132c6c2012-06-06 18:30:57 -070018#include <linux/module.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070019#include <linux/device.h>
20#include <linux/file.h>
21#include <linux/fs.h>
22#include <linux/anon_inodes.h>
23#include <linux/ion.h>
24#include <linux/list.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080025#include <linux/memblock.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070026#include <linux/miscdevice.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070027#include <linux/mm.h>
28#include <linux/mm_types.h>
29#include <linux/rbtree.h>
30#include <linux/sched.h>
31#include <linux/slab.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
34#include <linux/debugfs.h>
Laura Abbottb14ed962012-01-30 14:18:08 -080035#include <linux/dma-buf.h>
Mitchel Humpherysfd02cfb2012-09-04 17:00:29 -070036#include <linux/msm_ion.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070037
Laura Abbott8c017362011-09-22 20:59:12 -070038#include <mach/iommu_domains.h>
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070039#include "ion_priv.h"
40#define DEBUG
41
42/**
43 * struct ion_device - the metadata of the ion device node
44 * @dev: the actual misc device
45 * @buffers: an rb tree of all the existing buffers
46 * @lock: lock protecting the buffers & heaps trees
47 * @heaps: list of all the heaps in the system
48 * @user_clients: list of all the clients created from userspace
49 */
50struct ion_device {
51 struct miscdevice dev;
52 struct rb_root buffers;
53 struct mutex lock;
54 struct rb_root heaps;
55 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
56 unsigned long arg);
Laura Abbottb14ed962012-01-30 14:18:08 -080057 struct rb_root clients;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070058 struct dentry *debug_root;
59};
60
61/**
62 * struct ion_client - a process/hw block local address space
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070063 * @node: node in the tree of all clients
64 * @dev: backpointer to ion device
65 * @handles: an rb tree of all the handles in this client
66 * @lock: lock protecting the tree of handles
67 * @heap_mask: mask of all supported heaps
68 * @name: used for debugging
69 * @task: used for debugging
70 *
71 * A client represents a list of buffers this client may access.
72 * The mutex stored here is used to protect both handles tree
73 * as well as the handles themselves, and should be held while modifying either.
74 */
75struct ion_client {
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070076 struct rb_node node;
77 struct ion_device *dev;
78 struct rb_root handles;
79 struct mutex lock;
80 unsigned int heap_mask;
Olav Haugan63e5f3b2012-01-11 16:42:37 -080081 char *name;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070082 struct task_struct *task;
83 pid_t pid;
84 struct dentry *debug_root;
85};
86
87/**
88 * ion_handle - a client local reference to a buffer
89 * @ref: reference count
90 * @client: back pointer to the client the buffer resides in
91 * @buffer: pointer to the buffer
92 * @node: node in the client's handle rbtree
93 * @kmap_cnt: count of times this client has mapped to kernel
94 * @dmap_cnt: count of times this client has mapped for dma
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -070095 *
96 * Modifications to node, map_cnt or mapping should be protected by the
97 * lock in the client. Other fields are never changed after initialization.
98 */
99struct ion_handle {
100 struct kref ref;
101 struct ion_client *client;
102 struct ion_buffer *buffer;
103 struct rb_node node;
104 unsigned int kmap_cnt;
Laura Abbott8c017362011-09-22 20:59:12 -0700105 unsigned int iommu_map_cnt;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700106};
107
Olav Hauganb3676592012-03-02 15:02:25 -0800108static void ion_iommu_release(struct kref *kref);
109
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700110/* this function should only be called while dev->lock is held */
111static void ion_buffer_add(struct ion_device *dev,
112 struct ion_buffer *buffer)
113{
114 struct rb_node **p = &dev->buffers.rb_node;
115 struct rb_node *parent = NULL;
116 struct ion_buffer *entry;
117
118 while (*p) {
119 parent = *p;
120 entry = rb_entry(parent, struct ion_buffer, node);
121
122 if (buffer < entry) {
123 p = &(*p)->rb_left;
124 } else if (buffer > entry) {
125 p = &(*p)->rb_right;
126 } else {
127 pr_err("%s: buffer already found.", __func__);
128 BUG();
129 }
130 }
131
132 rb_link_node(&buffer->node, parent, p);
133 rb_insert_color(&buffer->node, &dev->buffers);
134}
135
Olav Haugan0fa9b602012-01-25 11:50:38 -0800136static void ion_iommu_add(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700137 struct ion_iommu_map *iommu)
138{
139 struct rb_node **p = &buffer->iommu_maps.rb_node;
140 struct rb_node *parent = NULL;
141 struct ion_iommu_map *entry;
142
143 while (*p) {
144 parent = *p;
145 entry = rb_entry(parent, struct ion_iommu_map, node);
146
147 if (iommu->key < entry->key) {
148 p = &(*p)->rb_left;
149 } else if (iommu->key > entry->key) {
150 p = &(*p)->rb_right;
151 } else {
152 pr_err("%s: buffer %p already has mapping for domain %d"
153 " and partition %d\n", __func__,
154 buffer,
155 iommu_map_domain(iommu),
156 iommu_map_partition(iommu));
157 BUG();
158 }
159 }
160
161 rb_link_node(&iommu->node, parent, p);
162 rb_insert_color(&iommu->node, &buffer->iommu_maps);
163
164}
165
166static struct ion_iommu_map *ion_iommu_lookup(struct ion_buffer *buffer,
167 unsigned int domain_no,
168 unsigned int partition_no)
169{
170 struct rb_node **p = &buffer->iommu_maps.rb_node;
171 struct rb_node *parent = NULL;
172 struct ion_iommu_map *entry;
173 uint64_t key = domain_no;
174 key = key << 32 | partition_no;
175
176 while (*p) {
177 parent = *p;
178 entry = rb_entry(parent, struct ion_iommu_map, node);
179
180 if (key < entry->key)
181 p = &(*p)->rb_left;
182 else if (key > entry->key)
183 p = &(*p)->rb_right;
184 else
185 return entry;
186 }
187
188 return NULL;
189}
190
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700191/* this function should only be called while dev->lock is held */
192static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
193 struct ion_device *dev,
194 unsigned long len,
195 unsigned long align,
196 unsigned long flags)
197{
198 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800199 struct sg_table *table;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700200 int ret;
201
202 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
203 if (!buffer)
204 return ERR_PTR(-ENOMEM);
205
206 buffer->heap = heap;
207 kref_init(&buffer->ref);
208
209 ret = heap->ops->allocate(heap, buffer, len, align, flags);
210 if (ret) {
211 kfree(buffer);
212 return ERR_PTR(ret);
213 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800214
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700215 buffer->dev = dev;
216 buffer->size = len;
Laura Abbottb14ed962012-01-30 14:18:08 -0800217
218 table = buffer->heap->ops->map_dma(buffer->heap, buffer);
219 if (IS_ERR_OR_NULL(table)) {
220 heap->ops->free(buffer);
221 kfree(buffer);
222 return ERR_PTR(PTR_ERR(table));
223 }
224 buffer->sg_table = table;
225
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700226 mutex_init(&buffer->lock);
227 ion_buffer_add(dev, buffer);
228 return buffer;
229}
230
Olav Hauganb3676592012-03-02 15:02:25 -0800231/**
232 * Check for delayed IOMMU unmapping. Also unmap any outstanding
233 * mappings which would otherwise have been leaked.
234 */
235static void ion_iommu_delayed_unmap(struct ion_buffer *buffer)
236{
237 struct ion_iommu_map *iommu_map;
238 struct rb_node *node;
239 const struct rb_root *rb = &(buffer->iommu_maps);
240 unsigned long ref_count;
241 unsigned int delayed_unmap;
242
243 mutex_lock(&buffer->lock);
244
245 while ((node = rb_first(rb)) != 0) {
246 iommu_map = rb_entry(node, struct ion_iommu_map, node);
247 ref_count = atomic_read(&iommu_map->ref.refcount);
248 delayed_unmap = iommu_map->flags & ION_IOMMU_UNMAP_DELAYED;
249
250 if ((delayed_unmap && ref_count > 1) || !delayed_unmap) {
251 pr_err("%s: Virtual memory address leak in domain %u, partition %u\n",
252 __func__, iommu_map->domain_info[DI_DOMAIN_NUM],
253 iommu_map->domain_info[DI_PARTITION_NUM]);
254 }
255 /* set ref count to 1 to force release */
256 kref_init(&iommu_map->ref);
257 kref_put(&iommu_map->ref, ion_iommu_release);
258 }
259
260 mutex_unlock(&buffer->lock);
261}
262
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700263static void ion_buffer_destroy(struct kref *kref)
264{
265 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
266 struct ion_device *dev = buffer->dev;
267
Laura Abbottb14ed962012-01-30 14:18:08 -0800268 if (WARN_ON(buffer->kmap_cnt > 0))
269 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
270
271 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
272
Olav Hauganb3676592012-03-02 15:02:25 -0800273 ion_iommu_delayed_unmap(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700274 buffer->heap->ops->free(buffer);
275 mutex_lock(&dev->lock);
276 rb_erase(&buffer->node, &dev->buffers);
277 mutex_unlock(&dev->lock);
278 kfree(buffer);
279}
280
281static void ion_buffer_get(struct ion_buffer *buffer)
282{
283 kref_get(&buffer->ref);
284}
285
286static int ion_buffer_put(struct ion_buffer *buffer)
287{
288 return kref_put(&buffer->ref, ion_buffer_destroy);
289}
290
291static struct ion_handle *ion_handle_create(struct ion_client *client,
292 struct ion_buffer *buffer)
293{
294 struct ion_handle *handle;
295
296 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
297 if (!handle)
298 return ERR_PTR(-ENOMEM);
299 kref_init(&handle->ref);
300 rb_init_node(&handle->node);
301 handle->client = client;
302 ion_buffer_get(buffer);
303 handle->buffer = buffer;
304
305 return handle;
306}
307
Laura Abbottb14ed962012-01-30 14:18:08 -0800308static void ion_handle_kmap_put(struct ion_handle *);
309
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700310static void ion_handle_destroy(struct kref *kref)
311{
312 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
Laura Abbottb14ed962012-01-30 14:18:08 -0800313 struct ion_client *client = handle->client;
314 struct ion_buffer *buffer = handle->buffer;
315
Laura Abbottb14ed962012-01-30 14:18:08 -0800316 mutex_lock(&buffer->lock);
317 while (handle->kmap_cnt)
318 ion_handle_kmap_put(handle);
319 mutex_unlock(&buffer->lock);
320
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700321 if (!RB_EMPTY_NODE(&handle->node))
Laura Abbottb14ed962012-01-30 14:18:08 -0800322 rb_erase(&handle->node, &client->handles);
Laura Abbottb14ed962012-01-30 14:18:08 -0800323
324 ion_buffer_put(buffer);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700325 kfree(handle);
326}
327
328struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
329{
330 return handle->buffer;
331}
332
333static void ion_handle_get(struct ion_handle *handle)
334{
335 kref_get(&handle->ref);
336}
337
338static int ion_handle_put(struct ion_handle *handle)
339{
340 return kref_put(&handle->ref, ion_handle_destroy);
341}
342
343static struct ion_handle *ion_handle_lookup(struct ion_client *client,
344 struct ion_buffer *buffer)
345{
346 struct rb_node *n;
347
348 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
349 struct ion_handle *handle = rb_entry(n, struct ion_handle,
350 node);
351 if (handle->buffer == buffer)
352 return handle;
353 }
354 return NULL;
355}
356
357static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
358{
359 struct rb_node *n = client->handles.rb_node;
360
361 while (n) {
362 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
363 node);
364 if (handle < handle_node)
365 n = n->rb_left;
366 else if (handle > handle_node)
367 n = n->rb_right;
368 else
369 return true;
370 }
371 return false;
372}
373
374static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
375{
376 struct rb_node **p = &client->handles.rb_node;
377 struct rb_node *parent = NULL;
378 struct ion_handle *entry;
379
380 while (*p) {
381 parent = *p;
382 entry = rb_entry(parent, struct ion_handle, node);
383
384 if (handle < entry)
385 p = &(*p)->rb_left;
386 else if (handle > entry)
387 p = &(*p)->rb_right;
388 else
389 WARN(1, "%s: buffer already found.", __func__);
390 }
391
392 rb_link_node(&handle->node, parent, p);
393 rb_insert_color(&handle->node, &client->handles);
394}
395
396struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
397 size_t align, unsigned int flags)
398{
399 struct rb_node *n;
400 struct ion_handle *handle;
401 struct ion_device *dev = client->dev;
402 struct ion_buffer *buffer = NULL;
Olav Haugan0a852512012-01-09 10:20:55 -0800403 unsigned long secure_allocation = flags & ION_SECURE;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800404 const unsigned int MAX_DBG_STR_LEN = 64;
405 char dbg_str[MAX_DBG_STR_LEN];
406 unsigned int dbg_str_idx = 0;
407
408 dbg_str[0] = '\0';
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700409
410 /*
411 * traverse the list of heaps available in this system in priority
412 * order. If the heap type is supported by the client, and matches the
413 * request of the caller allocate from it. Repeat until allocate has
414 * succeeded or all heaps have been tried
415 */
Laura Abbottb14ed962012-01-30 14:18:08 -0800416 if (WARN_ON(!len))
417 return ERR_PTR(-EINVAL);
418
419 len = PAGE_ALIGN(len);
420
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700421 mutex_lock(&dev->lock);
422 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
423 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
424 /* if the client doesn't support this heap type */
425 if (!((1 << heap->type) & client->heap_mask))
426 continue;
427 /* if the caller didn't specify this heap type */
428 if (!((1 << heap->id) & flags))
429 continue;
Olav Haugan0a852512012-01-09 10:20:55 -0800430 /* Do not allow un-secure heap if secure is specified */
431 if (secure_allocation && (heap->type != ION_HEAP_TYPE_CP))
432 continue;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700433 buffer = ion_buffer_create(heap, dev, len, align, flags);
434 if (!IS_ERR_OR_NULL(buffer))
435 break;
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800436 if (dbg_str_idx < MAX_DBG_STR_LEN) {
437 unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1;
438 int ret_value = snprintf(&dbg_str[dbg_str_idx],
439 len_left, "%s ", heap->name);
440 if (ret_value >= len_left) {
441 /* overflow */
442 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
443 dbg_str_idx = MAX_DBG_STR_LEN;
444 } else if (ret_value >= 0) {
445 dbg_str_idx += ret_value;
446 } else {
447 /* error */
448 dbg_str[MAX_DBG_STR_LEN-1] = '\0';
449 }
450 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700451 }
452 mutex_unlock(&dev->lock);
453
Laura Abbottb14ed962012-01-30 14:18:08 -0800454 if (buffer == NULL)
455 return ERR_PTR(-ENODEV);
456
457 if (IS_ERR(buffer)) {
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800458 pr_debug("ION is unable to allocate 0x%x bytes (alignment: "
459 "0x%x) from heap(s) %sfor client %s with heap "
460 "mask 0x%x\n",
461 len, align, dbg_str, client->name, client->heap_mask);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700462 return ERR_PTR(PTR_ERR(buffer));
Olav Haugan35e2f2f2012-01-11 17:31:47 -0800463 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700464
465 handle = ion_handle_create(client, buffer);
466
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700467 /*
468 * ion_buffer_create will create a buffer with a ref_cnt of 1,
469 * and ion_handle_create will take a second reference, drop one here
470 */
471 ion_buffer_put(buffer);
472
Laura Abbottb14ed962012-01-30 14:18:08 -0800473 if (!IS_ERR(handle)) {
474 mutex_lock(&client->lock);
475 ion_handle_add(client, handle);
476 mutex_unlock(&client->lock);
477 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700478
Laura Abbottb14ed962012-01-30 14:18:08 -0800479
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700480 return handle;
481}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800482EXPORT_SYMBOL(ion_alloc);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700483
484void ion_free(struct ion_client *client, struct ion_handle *handle)
485{
486 bool valid_handle;
487
488 BUG_ON(client != handle->client);
489
490 mutex_lock(&client->lock);
491 valid_handle = ion_handle_validate(client, handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700492 if (!valid_handle) {
Laura Abbottec149ff2012-01-26 13:33:11 -0800493 mutex_unlock(&client->lock);
Olav Haugan6ede5672012-04-19 10:20:22 -0700494 WARN(1, "%s: invalid handle passed to free.\n", __func__);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700495 return;
496 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800497 ion_handle_put(handle);
Ajay Dudani6ca95312012-08-20 15:41:11 -0700498 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700499}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800500EXPORT_SYMBOL(ion_free);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700501
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700502int ion_phys(struct ion_client *client, struct ion_handle *handle,
503 ion_phys_addr_t *addr, size_t *len)
504{
505 struct ion_buffer *buffer;
506 int ret;
507
508 mutex_lock(&client->lock);
509 if (!ion_handle_validate(client, handle)) {
510 mutex_unlock(&client->lock);
511 return -EINVAL;
512 }
513
514 buffer = handle->buffer;
515
516 if (!buffer->heap->ops->phys) {
517 pr_err("%s: ion_phys is not implemented by this heap.\n",
518 __func__);
519 mutex_unlock(&client->lock);
520 return -ENODEV;
521 }
522 mutex_unlock(&client->lock);
523 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
524 return ret;
525}
Olav Hauganbd2b6922012-01-25 09:28:55 -0800526EXPORT_SYMBOL(ion_phys);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700527
Laura Abbottb14ed962012-01-30 14:18:08 -0800528static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700529{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700530 void *vaddr;
531
Laura Abbottb14ed962012-01-30 14:18:08 -0800532 if (buffer->kmap_cnt) {
533 buffer->kmap_cnt++;
534 return buffer->vaddr;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700535 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800536 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
537 if (IS_ERR_OR_NULL(vaddr))
538 return vaddr;
539 buffer->vaddr = vaddr;
540 buffer->kmap_cnt++;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700541 return vaddr;
542}
Laura Abbottb14ed962012-01-30 14:18:08 -0800543
544static void *ion_handle_kmap_get(struct ion_handle *handle)
545{
546 struct ion_buffer *buffer = handle->buffer;
547 void *vaddr;
548
549 if (handle->kmap_cnt) {
550 handle->kmap_cnt++;
551 return buffer->vaddr;
552 }
553 vaddr = ion_buffer_kmap_get(buffer);
554 if (IS_ERR_OR_NULL(vaddr))
555 return vaddr;
556 handle->kmap_cnt++;
557 return vaddr;
558}
559
560static void ion_buffer_kmap_put(struct ion_buffer *buffer)
561{
562 buffer->kmap_cnt--;
563 if (!buffer->kmap_cnt) {
564 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
565 buffer->vaddr = NULL;
566 }
567}
568
569static void ion_handle_kmap_put(struct ion_handle *handle)
570{
571 struct ion_buffer *buffer = handle->buffer;
572
573 handle->kmap_cnt--;
574 if (!handle->kmap_cnt)
575 ion_buffer_kmap_put(buffer);
576}
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700577
Olav Hauganb3676592012-03-02 15:02:25 -0800578static struct ion_iommu_map *__ion_iommu_map(struct ion_buffer *buffer,
Laura Abbott8c017362011-09-22 20:59:12 -0700579 int domain_num, int partition_num, unsigned long align,
580 unsigned long iova_length, unsigned long flags,
581 unsigned long *iova)
582{
583 struct ion_iommu_map *data;
584 int ret;
585
586 data = kmalloc(sizeof(*data), GFP_ATOMIC);
587
588 if (!data)
Olav Hauganb3676592012-03-02 15:02:25 -0800589 return ERR_PTR(-ENOMEM);
Laura Abbott8c017362011-09-22 20:59:12 -0700590
591 data->buffer = buffer;
592 iommu_map_domain(data) = domain_num;
593 iommu_map_partition(data) = partition_num;
594
595 ret = buffer->heap->ops->map_iommu(buffer, data,
596 domain_num,
597 partition_num,
598 align,
599 iova_length,
600 flags);
601
602 if (ret)
603 goto out;
604
605 kref_init(&data->ref);
606 *iova = data->iova_addr;
607
608 ion_iommu_add(buffer, data);
609
Olav Hauganb3676592012-03-02 15:02:25 -0800610 return data;
Laura Abbott8c017362011-09-22 20:59:12 -0700611
612out:
Laura Abbott8c017362011-09-22 20:59:12 -0700613 kfree(data);
Olav Hauganb3676592012-03-02 15:02:25 -0800614 return ERR_PTR(ret);
Laura Abbott8c017362011-09-22 20:59:12 -0700615}
616
617int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
618 int domain_num, int partition_num, unsigned long align,
619 unsigned long iova_length, unsigned long *iova,
620 unsigned long *buffer_size,
Olav Hauganb3676592012-03-02 15:02:25 -0800621 unsigned long flags, unsigned long iommu_flags)
Laura Abbott8c017362011-09-22 20:59:12 -0700622{
623 struct ion_buffer *buffer;
624 struct ion_iommu_map *iommu_map;
625 int ret = 0;
626
Olav Haugan79e9ffa2012-02-24 13:11:10 -0800627 if (ION_IS_CACHED(flags)) {
628 pr_err("%s: Cannot map iommu as cached.\n", __func__);
629 return -EINVAL;
630 }
631
Laura Abbott8c017362011-09-22 20:59:12 -0700632 mutex_lock(&client->lock);
633 if (!ion_handle_validate(client, handle)) {
634 pr_err("%s: invalid handle passed to map_kernel.\n",
635 __func__);
636 mutex_unlock(&client->lock);
637 return -EINVAL;
638 }
639
640 buffer = handle->buffer;
641 mutex_lock(&buffer->lock);
642
643 if (!handle->buffer->heap->ops->map_iommu) {
644 pr_err("%s: map_iommu is not implemented by this heap.\n",
645 __func__);
646 ret = -ENODEV;
647 goto out;
648 }
649
Laura Abbott8c017362011-09-22 20:59:12 -0700650 /*
651 * If clients don't want a custom iova length, just use whatever
652 * the buffer size is
653 */
654 if (!iova_length)
655 iova_length = buffer->size;
656
657 if (buffer->size > iova_length) {
658 pr_debug("%s: iova length %lx is not at least buffer size"
659 " %x\n", __func__, iova_length, buffer->size);
660 ret = -EINVAL;
661 goto out;
662 }
663
664 if (buffer->size & ~PAGE_MASK) {
665 pr_debug("%s: buffer size %x is not aligned to %lx", __func__,
666 buffer->size, PAGE_SIZE);
667 ret = -EINVAL;
668 goto out;
669 }
670
671 if (iova_length & ~PAGE_MASK) {
672 pr_debug("%s: iova_length %lx is not aligned to %lx", __func__,
673 iova_length, PAGE_SIZE);
674 ret = -EINVAL;
675 goto out;
676 }
677
678 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
Olav Hauganb3676592012-03-02 15:02:25 -0800679 if (!iommu_map) {
680 iommu_map = __ion_iommu_map(buffer, domain_num, partition_num,
681 align, iova_length, flags, iova);
Laura Abbottb14ed962012-01-30 14:18:08 -0800682 if (!IS_ERR_OR_NULL(iommu_map)) {
Olav Hauganb3676592012-03-02 15:02:25 -0800683 iommu_map->flags = iommu_flags;
684
685 if (iommu_map->flags & ION_IOMMU_UNMAP_DELAYED)
686 kref_get(&iommu_map->ref);
687 }
Laura Abbott8c017362011-09-22 20:59:12 -0700688 } else {
Olav Hauganb3676592012-03-02 15:02:25 -0800689 if (iommu_map->flags != iommu_flags) {
690 pr_err("%s: handle %p is already mapped with iommu flags %lx, trying to map with flags %lx\n",
691 __func__, handle,
692 iommu_map->flags, iommu_flags);
Olav Hauganb3676592012-03-02 15:02:25 -0800693 ret = -EINVAL;
694 } else if (iommu_map->mapped_size != iova_length) {
Laura Abbott8c017362011-09-22 20:59:12 -0700695 pr_err("%s: handle %p is already mapped with length"
Olav Hauganb3676592012-03-02 15:02:25 -0800696 " %x, trying to map with length %lx\n",
Laura Abbott8c017362011-09-22 20:59:12 -0700697 __func__, handle, iommu_map->mapped_size,
698 iova_length);
Laura Abbott8c017362011-09-22 20:59:12 -0700699 ret = -EINVAL;
700 } else {
701 kref_get(&iommu_map->ref);
702 *iova = iommu_map->iova_addr;
703 }
704 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800705 if (!ret)
706 buffer->iommu_map_cnt++;
Laura Abbott8c017362011-09-22 20:59:12 -0700707 *buffer_size = buffer->size;
708out:
709 mutex_unlock(&buffer->lock);
710 mutex_unlock(&client->lock);
711 return ret;
712}
713EXPORT_SYMBOL(ion_map_iommu);
714
715static void ion_iommu_release(struct kref *kref)
716{
717 struct ion_iommu_map *map = container_of(kref, struct ion_iommu_map,
718 ref);
719 struct ion_buffer *buffer = map->buffer;
720
721 rb_erase(&map->node, &buffer->iommu_maps);
722 buffer->heap->ops->unmap_iommu(map);
723 kfree(map);
724}
725
726void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
727 int domain_num, int partition_num)
728{
729 struct ion_iommu_map *iommu_map;
730 struct ion_buffer *buffer;
731
732 mutex_lock(&client->lock);
733 buffer = handle->buffer;
734
735 mutex_lock(&buffer->lock);
736
737 iommu_map = ion_iommu_lookup(buffer, domain_num, partition_num);
738
739 if (!iommu_map) {
740 WARN(1, "%s: (%d,%d) was never mapped for %p\n", __func__,
741 domain_num, partition_num, buffer);
742 goto out;
743 }
744
Laura Abbott8c017362011-09-22 20:59:12 -0700745 kref_put(&iommu_map->ref, ion_iommu_release);
746
Laura Abbottb14ed962012-01-30 14:18:08 -0800747 buffer->iommu_map_cnt--;
Laura Abbott8c017362011-09-22 20:59:12 -0700748out:
749 mutex_unlock(&buffer->lock);
750
751 mutex_unlock(&client->lock);
752
753}
754EXPORT_SYMBOL(ion_unmap_iommu);
755
Laura Abbottb14ed962012-01-30 14:18:08 -0800756void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
757 unsigned long flags)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700758{
759 struct ion_buffer *buffer;
Laura Abbottb14ed962012-01-30 14:18:08 -0800760 void *vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700761
762 mutex_lock(&client->lock);
763 if (!ion_handle_validate(client, handle)) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800764 pr_err("%s: invalid handle passed to map_kernel.\n",
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700765 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700766 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700767 return ERR_PTR(-EINVAL);
768 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700769
Laura Abbottb14ed962012-01-30 14:18:08 -0800770 buffer = handle->buffer;
771
772 if (!handle->buffer->heap->ops->map_kernel) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700773 pr_err("%s: map_kernel is not implemented by this heap.\n",
774 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700775 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700776 return ERR_PTR(-ENODEV);
777 }
Laura Abbott894fd582011-08-19 13:33:56 -0700778
Laura Abbott8c017362011-09-22 20:59:12 -0700779 if (ion_validate_buffer_flags(buffer, flags)) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800780 mutex_unlock(&client->lock);
781 return ERR_PTR(-EEXIST);
Laura Abbott894fd582011-08-19 13:33:56 -0700782 }
783
Laura Abbottb14ed962012-01-30 14:18:08 -0800784 mutex_lock(&buffer->lock);
785 vaddr = ion_handle_kmap_get(handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700786 mutex_unlock(&buffer->lock);
787 mutex_unlock(&client->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800788 return vaddr;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700789}
Olav Hauganbd453a92012-07-05 14:21:34 -0700790EXPORT_SYMBOL(ion_map_kernel);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700791
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700792void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
793{
794 struct ion_buffer *buffer;
795
796 mutex_lock(&client->lock);
797 buffer = handle->buffer;
798 mutex_lock(&buffer->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800799 ion_handle_kmap_put(handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700800 mutex_unlock(&buffer->lock);
801 mutex_unlock(&client->lock);
802}
Olav Hauganbd453a92012-07-05 14:21:34 -0700803EXPORT_SYMBOL(ion_unmap_kernel);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700804
Olav Haugan41f85792012-02-08 15:28:05 -0800805int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
Laura Abbottabcb6f72011-10-04 16:26:49 -0700806 void *uaddr, unsigned long offset, unsigned long len,
807 unsigned int cmd)
808{
809 struct ion_buffer *buffer;
Laura Abbottabcb6f72011-10-04 16:26:49 -0700810 int ret = -EINVAL;
811
812 mutex_lock(&client->lock);
813 if (!ion_handle_validate(client, handle)) {
814 pr_err("%s: invalid handle passed to do_cache_op.\n",
815 __func__);
816 mutex_unlock(&client->lock);
817 return -EINVAL;
818 }
819 buffer = handle->buffer;
820 mutex_lock(&buffer->lock);
821
Laura Abbottcbaa6682011-10-19 12:14:14 -0700822 if (!ION_IS_CACHED(buffer->flags)) {
Laura Abbottabcb6f72011-10-04 16:26:49 -0700823 ret = 0;
824 goto out;
825 }
826
827 if (!handle->buffer->heap->ops->cache_op) {
828 pr_err("%s: cache_op is not implemented by this heap.\n",
829 __func__);
830 ret = -ENODEV;
831 goto out;
832 }
833
Laura Abbottabcb6f72011-10-04 16:26:49 -0700834
835 ret = buffer->heap->ops->cache_op(buffer->heap, buffer, uaddr,
836 offset, len, cmd);
837
838out:
839 mutex_unlock(&buffer->lock);
840 mutex_unlock(&client->lock);
841 return ret;
842
843}
Olav Hauganbd453a92012-07-05 14:21:34 -0700844EXPORT_SYMBOL(ion_do_cache_op);
Laura Abbottabcb6f72011-10-04 16:26:49 -0700845
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700846static int ion_debug_client_show(struct seq_file *s, void *unused)
847{
848 struct ion_client *client = s->private;
849 struct rb_node *n;
Olav Haugan854c9e12012-05-16 16:34:28 -0700850 struct rb_node *n2;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700851
Olav Haugan854c9e12012-05-16 16:34:28 -0700852 seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s : %12.12s : %s\n",
853 "heap_name", "size_in_bytes", "handle refcount",
854 "buffer", "physical", "[domain,partition] - virt");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700855
856 mutex_lock(&client->lock);
857 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
858 struct ion_handle *handle = rb_entry(n, struct ion_handle,
859 node);
860 enum ion_heap_type type = handle->buffer->heap->type;
861
Olav Haugan854c9e12012-05-16 16:34:28 -0700862 seq_printf(s, "%16.16s: %16x : %16d : %12p",
Laura Abbott68c80642011-10-21 17:32:27 -0700863 handle->buffer->heap->name,
864 handle->buffer->size,
865 atomic_read(&handle->ref.refcount),
866 handle->buffer);
Olav Haugan854c9e12012-05-16 16:34:28 -0700867
868 if (type == ION_HEAP_TYPE_SYSTEM_CONTIG ||
869 type == ION_HEAP_TYPE_CARVEOUT ||
870 type == ION_HEAP_TYPE_CP)
871 seq_printf(s, " : %12lx", handle->buffer->priv_phys);
872 else
873 seq_printf(s, " : %12s", "N/A");
874
875 for (n2 = rb_first(&handle->buffer->iommu_maps); n2;
876 n2 = rb_next(n2)) {
877 struct ion_iommu_map *imap =
878 rb_entry(n2, struct ion_iommu_map, node);
879 seq_printf(s, " : [%d,%d] - %8lx",
880 imap->domain_info[DI_DOMAIN_NUM],
881 imap->domain_info[DI_PARTITION_NUM],
882 imap->iova_addr);
883 }
884 seq_printf(s, "\n");
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700885 }
886 mutex_unlock(&client->lock);
887
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700888 return 0;
889}
890
891static int ion_debug_client_open(struct inode *inode, struct file *file)
892{
893 return single_open(file, ion_debug_client_show, inode->i_private);
894}
895
896static const struct file_operations debug_client_fops = {
897 .open = ion_debug_client_open,
898 .read = seq_read,
899 .llseek = seq_lseek,
900 .release = single_release,
901};
902
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700903struct ion_client *ion_client_create(struct ion_device *dev,
904 unsigned int heap_mask,
905 const char *name)
906{
907 struct ion_client *client;
908 struct task_struct *task;
909 struct rb_node **p;
910 struct rb_node *parent = NULL;
911 struct ion_client *entry;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700912 pid_t pid;
Olav Haugane8a31972012-05-16 13:11:41 -0700913 unsigned int name_len;
914
915 if (!name) {
916 pr_err("%s: Name cannot be null\n", __func__);
917 return ERR_PTR(-EINVAL);
918 }
919 name_len = strnlen(name, 64);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700920
921 get_task_struct(current->group_leader);
922 task_lock(current->group_leader);
923 pid = task_pid_nr(current->group_leader);
924 /* don't bother to store task struct for kernel threads,
925 they can't be killed anyway */
926 if (current->group_leader->flags & PF_KTHREAD) {
927 put_task_struct(current->group_leader);
928 task = NULL;
929 } else {
930 task = current->group_leader;
931 }
932 task_unlock(current->group_leader);
933
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700934 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
935 if (!client) {
Laura Abbottb14ed962012-01-30 14:18:08 -0800936 if (task)
937 put_task_struct(current->group_leader);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700938 return ERR_PTR(-ENOMEM);
939 }
940
941 client->dev = dev;
942 client->handles = RB_ROOT;
943 mutex_init(&client->lock);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800944
Olav Haugan6625c7d2012-01-24 13:50:43 -0800945 client->name = kzalloc(name_len+1, GFP_KERNEL);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800946 if (!client->name) {
947 put_task_struct(current->group_leader);
948 kfree(client);
949 return ERR_PTR(-ENOMEM);
950 } else {
Olav Haugan6625c7d2012-01-24 13:50:43 -0800951 strlcpy(client->name, name, name_len+1);
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800952 }
953
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700954 client->heap_mask = heap_mask;
955 client->task = task;
956 client->pid = pid;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700957
958 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800959 p = &dev->clients.rb_node;
960 while (*p) {
961 parent = *p;
962 entry = rb_entry(parent, struct ion_client, node);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700963
Laura Abbottb14ed962012-01-30 14:18:08 -0800964 if (client < entry)
965 p = &(*p)->rb_left;
966 else if (client > entry)
967 p = &(*p)->rb_right;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700968 }
Laura Abbottb14ed962012-01-30 14:18:08 -0800969 rb_link_node(&client->node, parent, p);
970 rb_insert_color(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700971
Laura Abbotteed86032011-12-05 15:32:36 -0800972
973 client->debug_root = debugfs_create_file(name, 0664,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700974 dev->debug_root, client,
975 &debug_client_fops);
976 mutex_unlock(&dev->lock);
977
978 return client;
979}
980
Laura Abbottb14ed962012-01-30 14:18:08 -0800981void ion_client_destroy(struct ion_client *client)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700982{
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700983 struct ion_device *dev = client->dev;
984 struct rb_node *n;
985
986 pr_debug("%s: %d\n", __func__, __LINE__);
987 while ((n = rb_first(&client->handles))) {
988 struct ion_handle *handle = rb_entry(n, struct ion_handle,
989 node);
990 ion_handle_destroy(&handle->ref);
991 }
992 mutex_lock(&dev->lock);
Laura Abbottb14ed962012-01-30 14:18:08 -0800993 if (client->task)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700994 put_task_struct(client->task);
Laura Abbottb14ed962012-01-30 14:18:08 -0800995 rb_erase(&client->node, &dev->clients);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -0700996 debugfs_remove_recursive(client->debug_root);
997 mutex_unlock(&dev->lock);
998
Olav Haugan63e5f3b2012-01-11 16:42:37 -0800999 kfree(client->name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001000 kfree(client);
1001}
Olav Hauganbd453a92012-07-05 14:21:34 -07001002EXPORT_SYMBOL(ion_client_destroy);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001003
Laura Abbott273dd8e2011-10-12 14:26:33 -07001004int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
1005 unsigned long *flags)
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001006{
1007 struct ion_buffer *buffer;
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001008
1009 mutex_lock(&client->lock);
1010 if (!ion_handle_validate(client, handle)) {
Laura Abbott273dd8e2011-10-12 14:26:33 -07001011 pr_err("%s: invalid handle passed to %s.\n",
1012 __func__, __func__);
Rebecca Schultz Zavin46d71332012-05-07 16:06:32 -07001013 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001014 return -EINVAL;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001015 }
Laura Abbott273dd8e2011-10-12 14:26:33 -07001016 buffer = handle->buffer;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001017 mutex_lock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001018 *flags = buffer->flags;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001019 mutex_unlock(&buffer->lock);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001020 mutex_unlock(&client->lock);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001021
Laura Abbott273dd8e2011-10-12 14:26:33 -07001022 return 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001023}
Laura Abbott273dd8e2011-10-12 14:26:33 -07001024EXPORT_SYMBOL(ion_handle_get_flags);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001025
Laura Abbott8c017362011-09-22 20:59:12 -07001026int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
1027 unsigned long *size)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001028{
Laura Abbott8c017362011-09-22 20:59:12 -07001029 struct ion_buffer *buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001030
Laura Abbott8c017362011-09-22 20:59:12 -07001031 mutex_lock(&client->lock);
1032 if (!ion_handle_validate(client, handle)) {
1033 pr_err("%s: invalid handle passed to %s.\n",
1034 __func__, __func__);
1035 mutex_unlock(&client->lock);
1036 return -EINVAL;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001037 }
Laura Abbott8c017362011-09-22 20:59:12 -07001038 buffer = handle->buffer;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001039 mutex_lock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001040 *size = buffer->size;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001041 mutex_unlock(&buffer->lock);
Laura Abbott8c017362011-09-22 20:59:12 -07001042 mutex_unlock(&client->lock);
1043
1044 return 0;
1045}
1046EXPORT_SYMBOL(ion_handle_get_size);
1047
Laura Abbottb14ed962012-01-30 14:18:08 -08001048struct sg_table *ion_sg_table(struct ion_client *client,
1049 struct ion_handle *handle)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001050{
Laura Abbottb14ed962012-01-30 14:18:08 -08001051 struct ion_buffer *buffer;
1052 struct sg_table *table;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001053
Laura Abbottb14ed962012-01-30 14:18:08 -08001054 mutex_lock(&client->lock);
1055 if (!ion_handle_validate(client, handle)) {
1056 pr_err("%s: invalid handle passed to map_dma.\n",
1057 __func__);
1058 mutex_unlock(&client->lock);
1059 return ERR_PTR(-EINVAL);
1060 }
1061 buffer = handle->buffer;
1062 table = buffer->sg_table;
1063 mutex_unlock(&client->lock);
1064 return table;
1065}
Olav Hauganbd453a92012-07-05 14:21:34 -07001066EXPORT_SYMBOL(ion_sg_table);
Laura Abbottb14ed962012-01-30 14:18:08 -08001067
1068static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1069 enum dma_data_direction direction)
1070{
1071 struct dma_buf *dmabuf = attachment->dmabuf;
1072 struct ion_buffer *buffer = dmabuf->priv;
1073
1074 return buffer->sg_table;
1075}
1076
1077static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1078 struct sg_table *table,
1079 enum dma_data_direction direction)
1080{
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001081}
1082
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001083static void ion_vma_open(struct vm_area_struct *vma)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001084{
Laura Abbottb14ed962012-01-30 14:18:08 -08001085 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001086
1087 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001088
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001089 mutex_lock(&buffer->lock);
Laura Abbott77168502011-12-05 11:06:24 -08001090 buffer->umap_cnt++;
Rebecca Schultz Zavinbe4a1ee2012-04-26 20:44:10 -07001091 mutex_unlock(&buffer->lock);
1092}
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001093
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001094static void ion_vma_close(struct vm_area_struct *vma)
1095{
Laura Abbottb14ed962012-01-30 14:18:08 -08001096 struct ion_buffer *buffer = vma->vm_private_data;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001097
1098 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001099
Laura Abbott77168502011-12-05 11:06:24 -08001100 mutex_lock(&buffer->lock);
1101 buffer->umap_cnt--;
1102 mutex_unlock(&buffer->lock);
Laura Abbotta6835092011-11-14 15:27:02 -08001103
1104 if (buffer->heap->ops->unmap_user)
1105 buffer->heap->ops->unmap_user(buffer->heap, buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001106}
1107
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001108static struct vm_operations_struct ion_vm_ops = {
1109 .open = ion_vma_open,
1110 .close = ion_vma_close,
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001111};
1112
Laura Abbottb14ed962012-01-30 14:18:08 -08001113static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001114{
Laura Abbottb14ed962012-01-30 14:18:08 -08001115 struct ion_buffer *buffer = dmabuf->priv;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001116 int ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001117
Laura Abbottb14ed962012-01-30 14:18:08 -08001118 if (!buffer->heap->ops->map_user) {
1119 pr_err("%s: this heap does not define a method for mapping "
1120 "to userspace\n", __func__);
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001121 return -EINVAL;
1122 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001123
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001124 mutex_lock(&buffer->lock);
1125 /* now map it to userspace */
Laura Abbottb14ed962012-01-30 14:18:08 -08001126 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
Laura Abbotte8bc7aa2011-12-09 14:49:33 -08001127
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001128 if (ret) {
Laura Abbottb14ed962012-01-30 14:18:08 -08001129 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001130 pr_err("%s: failure mapping buffer to userspace\n",
1131 __func__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001132 } else {
1133 buffer->umap_cnt++;
1134 mutex_unlock(&buffer->lock);
1135
1136 vma->vm_ops = &ion_vm_ops;
1137 /*
1138 * move the buffer into the vm_private_data so we can access it
1139 * from vma_open/close
1140 */
1141 vma->vm_private_data = buffer;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001142 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001143 return ret;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001144}
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001145
Laura Abbottb14ed962012-01-30 14:18:08 -08001146static void ion_dma_buf_release(struct dma_buf *dmabuf)
1147{
1148 struct ion_buffer *buffer = dmabuf->priv;
1149 ion_buffer_put(buffer);
1150}
1151
1152static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1153{
1154 struct ion_buffer *buffer = dmabuf->priv;
1155 return buffer->vaddr + offset;
1156}
1157
1158static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1159 void *ptr)
1160{
1161 return;
1162}
1163
1164static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1165 size_t len,
1166 enum dma_data_direction direction)
1167{
1168 struct ion_buffer *buffer = dmabuf->priv;
1169 void *vaddr;
1170
1171 if (!buffer->heap->ops->map_kernel) {
1172 pr_err("%s: map kernel is not implemented by this heap.\n",
1173 __func__);
1174 return -ENODEV;
1175 }
1176
1177 mutex_lock(&buffer->lock);
1178 vaddr = ion_buffer_kmap_get(buffer);
1179 mutex_unlock(&buffer->lock);
1180 if (IS_ERR(vaddr))
1181 return PTR_ERR(vaddr);
1182 if (!vaddr)
1183 return -ENOMEM;
1184 return 0;
1185}
1186
1187static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1188 size_t len,
1189 enum dma_data_direction direction)
1190{
1191 struct ion_buffer *buffer = dmabuf->priv;
1192
1193 mutex_lock(&buffer->lock);
1194 ion_buffer_kmap_put(buffer);
1195 mutex_unlock(&buffer->lock);
1196}
1197
1198struct dma_buf_ops dma_buf_ops = {
1199 .map_dma_buf = ion_map_dma_buf,
1200 .unmap_dma_buf = ion_unmap_dma_buf,
1201 .mmap = ion_mmap,
1202 .release = ion_dma_buf_release,
1203 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1204 .end_cpu_access = ion_dma_buf_end_cpu_access,
1205 .kmap_atomic = ion_dma_buf_kmap,
1206 .kunmap_atomic = ion_dma_buf_kunmap,
1207 .kmap = ion_dma_buf_kmap,
1208 .kunmap = ion_dma_buf_kunmap,
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001209};
1210
Laura Abbottb14ed962012-01-30 14:18:08 -08001211static int ion_share_set_flags(struct ion_client *client,
1212 struct ion_handle *handle,
1213 unsigned long flags)
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001214{
Laura Abbottb14ed962012-01-30 14:18:08 -08001215 struct ion_buffer *buffer;
1216 bool valid_handle;
Mitchel Humpherys97e21232012-09-11 15:59:11 -07001217 unsigned long ion_flags = 0;
Laura Abbottb14ed962012-01-30 14:18:08 -08001218 if (flags & O_DSYNC)
Mitchel Humpherys97e21232012-09-11 15:59:11 -07001219 ion_flags = ION_SET_UNCACHED(ion_flags);
1220 else
1221 ion_flags = ION_SET_CACHED(ion_flags);
1222
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001223
Laura Abbottb14ed962012-01-30 14:18:08 -08001224 mutex_lock(&client->lock);
1225 valid_handle = ion_handle_validate(client, handle);
1226 mutex_unlock(&client->lock);
1227 if (!valid_handle) {
1228 WARN(1, "%s: invalid handle passed to set_flags.\n", __func__);
1229 return -EINVAL;
1230 }
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001231
Laura Abbottb14ed962012-01-30 14:18:08 -08001232 buffer = handle->buffer;
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001233
Laura Abbottb14ed962012-01-30 14:18:08 -08001234 return 0;
1235}
Laura Abbott4b5d0482011-09-27 18:35:14 -07001236
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001237
Laura Abbottb14ed962012-01-30 14:18:08 -08001238int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
1239{
1240 struct ion_buffer *buffer;
1241 struct dma_buf *dmabuf;
1242 bool valid_handle;
1243 int fd;
1244
1245 mutex_lock(&client->lock);
1246 valid_handle = ion_handle_validate(client, handle);
1247 mutex_unlock(&client->lock);
1248 if (!valid_handle) {
Olav Haugan0df59942012-07-05 14:27:30 -07001249 WARN(1, "%s: invalid handle passed to share.\n", __func__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001250 return -EINVAL;
1251 }
1252
1253 buffer = handle->buffer;
1254 ion_buffer_get(buffer);
1255 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1256 if (IS_ERR(dmabuf)) {
1257 ion_buffer_put(buffer);
1258 return PTR_ERR(dmabuf);
1259 }
1260 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
Ajay Dudani173f6132012-08-01 18:06:18 -07001261 if (fd < 0)
Laura Abbottb14ed962012-01-30 14:18:08 -08001262 dma_buf_put(dmabuf);
Ajay Dudani173f6132012-08-01 18:06:18 -07001263
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001264 return fd;
Laura Abbottb14ed962012-01-30 14:18:08 -08001265}
Olav Hauganbd453a92012-07-05 14:21:34 -07001266EXPORT_SYMBOL(ion_share_dma_buf);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001267
Laura Abbottb14ed962012-01-30 14:18:08 -08001268struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1269{
1270 struct dma_buf *dmabuf;
1271 struct ion_buffer *buffer;
1272 struct ion_handle *handle;
1273
1274 dmabuf = dma_buf_get(fd);
1275 if (IS_ERR_OR_NULL(dmabuf))
1276 return ERR_PTR(PTR_ERR(dmabuf));
1277 /* if this memory came from ion */
1278
1279 if (dmabuf->ops != &dma_buf_ops) {
1280 pr_err("%s: can not import dmabuf from another exporter\n",
1281 __func__);
1282 dma_buf_put(dmabuf);
1283 return ERR_PTR(-EINVAL);
1284 }
1285 buffer = dmabuf->priv;
1286
1287 mutex_lock(&client->lock);
1288 /* if a handle exists for this buffer just take a reference to it */
1289 handle = ion_handle_lookup(client, buffer);
1290 if (!IS_ERR_OR_NULL(handle)) {
1291 ion_handle_get(handle);
1292 goto end;
1293 }
1294 handle = ion_handle_create(client, buffer);
1295 if (IS_ERR_OR_NULL(handle))
1296 goto end;
1297 ion_handle_add(client, handle);
1298end:
1299 mutex_unlock(&client->lock);
1300 dma_buf_put(dmabuf);
1301 return handle;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001302}
Olav Hauganbd453a92012-07-05 14:21:34 -07001303EXPORT_SYMBOL(ion_import_dma_buf);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001304
1305static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1306{
1307 struct ion_client *client = filp->private_data;
1308
1309 switch (cmd) {
1310 case ION_IOC_ALLOC:
1311 {
1312 struct ion_allocation_data data;
1313
1314 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1315 return -EFAULT;
1316 data.handle = ion_alloc(client, data.len, data.align,
1317 data.flags);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001318
Laura Abbottb14ed962012-01-30 14:18:08 -08001319 if (IS_ERR(data.handle))
1320 return PTR_ERR(data.handle);
KyongHo Cho9ae7e012011-09-07 11:27:07 +09001321
Laura Abbottb14ed962012-01-30 14:18:08 -08001322 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
1323 ion_free(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001324 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001325 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001326 break;
1327 }
1328 case ION_IOC_FREE:
1329 {
1330 struct ion_handle_data data;
1331 bool valid;
1332
1333 if (copy_from_user(&data, (void __user *)arg,
1334 sizeof(struct ion_handle_data)))
1335 return -EFAULT;
1336 mutex_lock(&client->lock);
1337 valid = ion_handle_validate(client, data.handle);
1338 mutex_unlock(&client->lock);
1339 if (!valid)
1340 return -EINVAL;
1341 ion_free(client, data.handle);
1342 break;
1343 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001344 case ION_IOC_MAP:
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001345 case ION_IOC_SHARE:
1346 {
1347 struct ion_fd_data data;
Laura Abbottb14ed962012-01-30 14:18:08 -08001348 int ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001349 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1350 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001351
1352 ret = ion_share_set_flags(client, data.handle, filp->f_flags);
1353 if (ret)
1354 return ret;
1355
1356 data.fd = ion_share_dma_buf(client, data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001357 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1358 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001359 if (data.fd < 0)
1360 return data.fd;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001361 break;
1362 }
1363 case ION_IOC_IMPORT:
1364 {
1365 struct ion_fd_data data;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001366 int ret = 0;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001367 if (copy_from_user(&data, (void __user *)arg,
1368 sizeof(struct ion_fd_data)))
1369 return -EFAULT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001370 data.handle = ion_import_dma_buf(client, data.fd);
Olav Haugan21ceb8a2012-05-15 14:40:11 -07001371 if (IS_ERR(data.handle)) {
1372 ret = PTR_ERR(data.handle);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001373 data.handle = NULL;
Olav Haugan21ceb8a2012-05-15 14:40:11 -07001374 }
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001375 if (copy_to_user((void __user *)arg, &data,
1376 sizeof(struct ion_fd_data)))
1377 return -EFAULT;
Olav Hauganc2d2cf52012-05-15 14:40:11 -07001378 if (ret < 0)
1379 return ret;
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001380 break;
1381 }
1382 case ION_IOC_CUSTOM:
1383 {
1384 struct ion_device *dev = client->dev;
1385 struct ion_custom_data data;
1386
1387 if (!dev->custom_ioctl)
1388 return -ENOTTY;
1389 if (copy_from_user(&data, (void __user *)arg,
1390 sizeof(struct ion_custom_data)))
1391 return -EFAULT;
1392 return dev->custom_ioctl(client, data.cmd, data.arg);
1393 }
Laura Abbottabcb6f72011-10-04 16:26:49 -07001394 case ION_IOC_CLEAN_CACHES:
Mitchel Humpherysfd02cfb2012-09-04 17:00:29 -07001395 return client->dev->custom_ioctl(client,
1396 ION_IOC_CLEAN_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001397 case ION_IOC_INV_CACHES:
Mitchel Humpherysfd02cfb2012-09-04 17:00:29 -07001398 return client->dev->custom_ioctl(client,
1399 ION_IOC_INV_CACHES, arg);
Laura Abbottabcb6f72011-10-04 16:26:49 -07001400 case ION_IOC_CLEAN_INV_CACHES:
Mitchel Humpherysfd02cfb2012-09-04 17:00:29 -07001401 return client->dev->custom_ioctl(client,
1402 ION_IOC_CLEAN_INV_CACHES, arg);
Laura Abbott273dd8e2011-10-12 14:26:33 -07001403 case ION_IOC_GET_FLAGS:
Mitchel Humpherysfd02cfb2012-09-04 17:00:29 -07001404 return client->dev->custom_ioctl(client,
1405 ION_IOC_GET_FLAGS, arg);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001406 default:
1407 return -ENOTTY;
1408 }
1409 return 0;
1410}
1411
1412static int ion_release(struct inode *inode, struct file *file)
1413{
1414 struct ion_client *client = file->private_data;
1415
1416 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbottb14ed962012-01-30 14:18:08 -08001417 ion_client_destroy(client);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001418 return 0;
1419}
1420
1421static int ion_open(struct inode *inode, struct file *file)
1422{
1423 struct miscdevice *miscdev = file->private_data;
1424 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1425 struct ion_client *client;
Laura Abbotteed86032011-12-05 15:32:36 -08001426 char debug_name[64];
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001427
1428 pr_debug("%s: %d\n", __func__, __LINE__);
Laura Abbotteed86032011-12-05 15:32:36 -08001429 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1430 client = ion_client_create(dev, -1, debug_name);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001431 if (IS_ERR_OR_NULL(client))
1432 return PTR_ERR(client);
1433 file->private_data = client;
1434
1435 return 0;
1436}
1437
1438static const struct file_operations ion_fops = {
1439 .owner = THIS_MODULE,
1440 .open = ion_open,
1441 .release = ion_release,
1442 .unlocked_ioctl = ion_ioctl,
1443};
1444
1445static size_t ion_debug_heap_total(struct ion_client *client,
Laura Abbott3647ac32011-10-31 14:09:53 -07001446 enum ion_heap_ids id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001447{
1448 size_t size = 0;
1449 struct rb_node *n;
1450
1451 mutex_lock(&client->lock);
1452 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1453 struct ion_handle *handle = rb_entry(n,
1454 struct ion_handle,
1455 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001456 if (handle->buffer->heap->id == id)
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001457 size += handle->buffer->size;
1458 }
1459 mutex_unlock(&client->lock);
1460 return size;
1461}
1462
Olav Haugan0671b9a2012-05-25 11:58:56 -07001463/**
1464 * Searches through a clients handles to find if the buffer is owned
1465 * by this client. Used for debug output.
1466 * @param client pointer to candidate owner of buffer
1467 * @param buf pointer to buffer that we are trying to find the owner of
1468 * @return 1 if found, 0 otherwise
1469 */
1470static int ion_debug_find_buffer_owner(const struct ion_client *client,
1471 const struct ion_buffer *buf)
1472{
1473 struct rb_node *n;
1474
1475 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1476 const struct ion_handle *handle = rb_entry(n,
1477 const struct ion_handle,
1478 node);
1479 if (handle->buffer == buf)
1480 return 1;
1481 }
1482 return 0;
1483}
1484
1485/**
1486 * Adds mem_map_data pointer to the tree of mem_map
1487 * Used for debug output.
1488 * @param mem_map The mem_map tree
1489 * @param data The new data to add to the tree
1490 */
1491static void ion_debug_mem_map_add(struct rb_root *mem_map,
1492 struct mem_map_data *data)
1493{
1494 struct rb_node **p = &mem_map->rb_node;
1495 struct rb_node *parent = NULL;
1496 struct mem_map_data *entry;
1497
1498 while (*p) {
1499 parent = *p;
1500 entry = rb_entry(parent, struct mem_map_data, node);
1501
1502 if (data->addr < entry->addr) {
1503 p = &(*p)->rb_left;
1504 } else if (data->addr > entry->addr) {
1505 p = &(*p)->rb_right;
1506 } else {
1507 pr_err("%s: mem_map_data already found.", __func__);
1508 BUG();
1509 }
1510 }
1511 rb_link_node(&data->node, parent, p);
1512 rb_insert_color(&data->node, mem_map);
1513}
1514
1515/**
1516 * Search for an owner of a buffer by iterating over all ION clients.
1517 * @param dev ion device containing pointers to all the clients.
1518 * @param buffer pointer to buffer we are trying to find the owner of.
1519 * @return name of owner.
1520 */
1521const char *ion_debug_locate_owner(const struct ion_device *dev,
1522 const struct ion_buffer *buffer)
1523{
1524 struct rb_node *j;
1525 const char *client_name = NULL;
1526
Laura Abbottb14ed962012-01-30 14:18:08 -08001527 for (j = rb_first(&dev->clients); j && !client_name;
Olav Haugan0671b9a2012-05-25 11:58:56 -07001528 j = rb_next(j)) {
1529 struct ion_client *client = rb_entry(j, struct ion_client,
1530 node);
1531 if (ion_debug_find_buffer_owner(client, buffer))
1532 client_name = client->name;
1533 }
1534 return client_name;
1535}
1536
1537/**
1538 * Create a mem_map of the heap.
1539 * @param s seq_file to log error message to.
1540 * @param heap The heap to create mem_map for.
1541 * @param mem_map The mem map to be created.
1542 */
1543void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
1544 struct rb_root *mem_map)
1545{
1546 struct ion_device *dev = heap->dev;
1547 struct rb_node *n;
1548
1549 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1550 struct ion_buffer *buffer =
1551 rb_entry(n, struct ion_buffer, node);
1552 if (buffer->heap->id == heap->id) {
1553 struct mem_map_data *data =
1554 kzalloc(sizeof(*data), GFP_KERNEL);
1555 if (!data) {
1556 seq_printf(s, "ERROR: out of memory. "
1557 "Part of memory map will not be logged\n");
1558 break;
1559 }
1560 data->addr = buffer->priv_phys;
1561 data->addr_end = buffer->priv_phys + buffer->size-1;
1562 data->size = buffer->size;
1563 data->client_name = ion_debug_locate_owner(dev, buffer);
1564 ion_debug_mem_map_add(mem_map, data);
1565 }
1566 }
1567}
1568
1569/**
1570 * Free the memory allocated by ion_debug_mem_map_create
1571 * @param mem_map The mem map to free.
1572 */
1573static void ion_debug_mem_map_destroy(struct rb_root *mem_map)
1574{
1575 if (mem_map) {
1576 struct rb_node *n;
1577 while ((n = rb_first(mem_map)) != 0) {
1578 struct mem_map_data *data =
1579 rb_entry(n, struct mem_map_data, node);
1580 rb_erase(&data->node, mem_map);
1581 kfree(data);
1582 }
1583 }
1584}
1585
1586/**
1587 * Print heap debug information.
1588 * @param s seq_file to log message to.
1589 * @param heap pointer to heap that we will print debug information for.
1590 */
1591static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
1592{
1593 if (heap->ops->print_debug) {
1594 struct rb_root mem_map = RB_ROOT;
1595 ion_debug_mem_map_create(s, heap, &mem_map);
1596 heap->ops->print_debug(heap, s, &mem_map);
1597 ion_debug_mem_map_destroy(&mem_map);
1598 }
1599}
1600
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001601static int ion_debug_heap_show(struct seq_file *s, void *unused)
1602{
1603 struct ion_heap *heap = s->private;
1604 struct ion_device *dev = heap->dev;
1605 struct rb_node *n;
1606
Olav Haugane4900b52012-05-25 11:58:03 -07001607 mutex_lock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001608 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
Rebecca Schultz Zavin043a6142012-02-01 11:09:46 -08001609
Laura Abbottb14ed962012-01-30 14:18:08 -08001610 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001611 struct ion_client *client = rb_entry(n, struct ion_client,
1612 node);
Laura Abbott3647ac32011-10-31 14:09:53 -07001613 size_t size = ion_debug_heap_total(client, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001614 if (!size)
1615 continue;
Laura Abbottb14ed962012-01-30 14:18:08 -08001616 if (client->task) {
1617 char task_comm[TASK_COMM_LEN];
1618
1619 get_task_comm(task_comm, client->task);
1620 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1621 client->pid, size);
1622 } else {
1623 seq_printf(s, "%16.s %16u %16u\n", client->name,
1624 client->pid, size);
1625 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001626 }
Olav Haugan0671b9a2012-05-25 11:58:56 -07001627 ion_heap_print_debug(s, heap);
Olav Haugane4900b52012-05-25 11:58:03 -07001628 mutex_unlock(&dev->lock);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001629 return 0;
1630}
1631
1632static int ion_debug_heap_open(struct inode *inode, struct file *file)
1633{
1634 return single_open(file, ion_debug_heap_show, inode->i_private);
1635}
1636
1637static const struct file_operations debug_heap_fops = {
1638 .open = ion_debug_heap_open,
1639 .read = seq_read,
1640 .llseek = seq_lseek,
1641 .release = single_release,
1642};
1643
1644void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1645{
1646 struct rb_node **p = &dev->heaps.rb_node;
1647 struct rb_node *parent = NULL;
1648 struct ion_heap *entry;
1649
Laura Abbottb14ed962012-01-30 14:18:08 -08001650 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1651 !heap->ops->unmap_dma)
1652 pr_err("%s: can not add heap with invalid ops struct.\n",
1653 __func__);
1654
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001655 heap->dev = dev;
1656 mutex_lock(&dev->lock);
1657 while (*p) {
1658 parent = *p;
1659 entry = rb_entry(parent, struct ion_heap, node);
1660
1661 if (heap->id < entry->id) {
1662 p = &(*p)->rb_left;
1663 } else if (heap->id > entry->id ) {
1664 p = &(*p)->rb_right;
1665 } else {
1666 pr_err("%s: can not insert multiple heaps with "
1667 "id %d\n", __func__, heap->id);
1668 goto end;
1669 }
1670 }
1671
1672 rb_link_node(&heap->node, parent, p);
1673 rb_insert_color(&heap->node, &dev->heaps);
1674 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1675 &debug_heap_fops);
1676end:
1677 mutex_unlock(&dev->lock);
1678}
1679
Laura Abbott7e446482012-06-13 15:59:39 -07001680int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
1681 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001682{
1683 struct rb_node *n;
1684 int ret_val = 0;
1685
1686 /*
1687 * traverse the list of heaps available in this system
1688 * and find the heap that is specified.
1689 */
1690 mutex_lock(&dev->lock);
1691 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
1692 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
1693 if (heap->type != ION_HEAP_TYPE_CP)
1694 continue;
1695 if (ION_HEAP(heap->id) != heap_id)
1696 continue;
1697 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001698 ret_val = heap->ops->secure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001699 else
1700 ret_val = -EINVAL;
1701 break;
1702 }
1703 mutex_unlock(&dev->lock);
1704 return ret_val;
1705}
Olav Hauganbd453a92012-07-05 14:21:34 -07001706EXPORT_SYMBOL(ion_secure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001707
Laura Abbott7e446482012-06-13 15:59:39 -07001708int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
1709 void *data)
Olav Haugan0a852512012-01-09 10:20:55 -08001710{
1711 struct rb_node *n;
1712 int ret_val = 0;
1713
1714 /*
1715 * traverse the list of heaps available in this system
1716 * and find the heap that is specified.
1717 */
1718 mutex_lock(&dev->lock);
1719 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
1720 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
1721 if (heap->type != ION_HEAP_TYPE_CP)
1722 continue;
1723 if (ION_HEAP(heap->id) != heap_id)
1724 continue;
1725 if (heap->ops->secure_heap)
Laura Abbott7e446482012-06-13 15:59:39 -07001726 ret_val = heap->ops->unsecure_heap(heap, version, data);
Olav Haugan0a852512012-01-09 10:20:55 -08001727 else
1728 ret_val = -EINVAL;
1729 break;
1730 }
1731 mutex_unlock(&dev->lock);
1732 return ret_val;
1733}
Olav Hauganbd453a92012-07-05 14:21:34 -07001734EXPORT_SYMBOL(ion_unsecure_heap);
Olav Haugan0a852512012-01-09 10:20:55 -08001735
Laura Abbott404f8242011-10-31 14:22:53 -07001736static int ion_debug_leak_show(struct seq_file *s, void *unused)
1737{
1738 struct ion_device *dev = s->private;
1739 struct rb_node *n;
1740 struct rb_node *n2;
1741
1742 /* mark all buffers as 1 */
1743 seq_printf(s, "%16.s %16.s %16.s %16.s\n", "buffer", "heap", "size",
1744 "ref cnt");
1745 mutex_lock(&dev->lock);
1746 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1747 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1748 node);
1749
1750 buf->marked = 1;
1751 }
1752
1753 /* now see which buffers we can access */
Laura Abbottb14ed962012-01-30 14:18:08 -08001754 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
Laura Abbott404f8242011-10-31 14:22:53 -07001755 struct ion_client *client = rb_entry(n, struct ion_client,
1756 node);
1757
1758 mutex_lock(&client->lock);
1759 for (n2 = rb_first(&client->handles); n2; n2 = rb_next(n2)) {
1760 struct ion_handle *handle = rb_entry(n2,
1761 struct ion_handle, node);
1762
1763 handle->buffer->marked = 0;
1764
1765 }
1766 mutex_unlock(&client->lock);
1767
1768 }
1769
Laura Abbott404f8242011-10-31 14:22:53 -07001770 /* And anyone still marked as a 1 means a leaked handle somewhere */
1771 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1772 struct ion_buffer *buf = rb_entry(n, struct ion_buffer,
1773 node);
1774
1775 if (buf->marked == 1)
1776 seq_printf(s, "%16.x %16.s %16.x %16.d\n",
1777 (int)buf, buf->heap->name, buf->size,
1778 atomic_read(&buf->ref.refcount));
1779 }
1780 mutex_unlock(&dev->lock);
1781 return 0;
1782}
1783
1784static int ion_debug_leak_open(struct inode *inode, struct file *file)
1785{
1786 return single_open(file, ion_debug_leak_show, inode->i_private);
1787}
1788
1789static const struct file_operations debug_leak_fops = {
1790 .open = ion_debug_leak_open,
1791 .read = seq_read,
1792 .llseek = seq_lseek,
1793 .release = single_release,
1794};
1795
1796
1797
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001798struct ion_device *ion_device_create(long (*custom_ioctl)
1799 (struct ion_client *client,
1800 unsigned int cmd,
1801 unsigned long arg))
1802{
1803 struct ion_device *idev;
1804 int ret;
1805
1806 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1807 if (!idev)
1808 return ERR_PTR(-ENOMEM);
1809
1810 idev->dev.minor = MISC_DYNAMIC_MINOR;
1811 idev->dev.name = "ion";
1812 idev->dev.fops = &ion_fops;
1813 idev->dev.parent = NULL;
1814 ret = misc_register(&idev->dev);
1815 if (ret) {
1816 pr_err("ion: failed to register misc device.\n");
1817 return ERR_PTR(ret);
1818 }
1819
1820 idev->debug_root = debugfs_create_dir("ion", NULL);
1821 if (IS_ERR_OR_NULL(idev->debug_root))
1822 pr_err("ion: failed to create debug files.\n");
1823
1824 idev->custom_ioctl = custom_ioctl;
1825 idev->buffers = RB_ROOT;
1826 mutex_init(&idev->lock);
1827 idev->heaps = RB_ROOT;
Laura Abbottb14ed962012-01-30 14:18:08 -08001828 idev->clients = RB_ROOT;
Laura Abbott404f8242011-10-31 14:22:53 -07001829 debugfs_create_file("check_leaked_fds", 0664, idev->debug_root, idev,
1830 &debug_leak_fops);
Rebecca Schultz Zavin0c38bfd2011-06-29 19:44:29 -07001831 return idev;
1832}
1833
1834void ion_device_destroy(struct ion_device *dev)
1835{
1836 misc_deregister(&dev->dev);
1837 /* XXX need to free the heaps and clients ? */
1838 kfree(dev);
1839}
Laura Abbottb14ed962012-01-30 14:18:08 -08001840
1841void __init ion_reserve(struct ion_platform_data *data)
1842{
1843 int i, ret;
1844
1845 for (i = 0; i < data->nr; i++) {
1846 if (data->heaps[i].size == 0)
1847 continue;
1848 ret = memblock_reserve(data->heaps[i].base,
1849 data->heaps[i].size);
1850 if (ret)
1851 pr_err("memblock reserve of %x@%lx failed\n",
1852 data->heaps[i].size,
1853 data->heaps[i].base);
1854 }
1855}