blob: 9cb5b25bb1100970e6b58b94ecaaf5bedafffdbf [file] [log] [blame]
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001/*
2 * drivers/gpu/ion/ion.c
3 *
4 * Copyright (C) 2011 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/device.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/anon_inodes.h>
21#include <linux/ion.h>
22#include <linux/list.h>
23#include <linux/miscdevice.h>
24#include <linux/mm.h>
25#include <linux/mm_types.h>
26#include <linux/rbtree.h>
27#include <linux/sched.h>
28#include <linux/slab.h>
29#include <linux/seq_file.h>
30#include <linux/uaccess.h>
31#include <linux/debugfs.h>
32
33#include "ion_priv.h"
34#define DEBUG
35
36/**
37 * struct ion_device - the metadata of the ion device node
38 * @dev: the actual misc device
39 * @buffers: an rb tree of all the existing buffers
40 * @lock: lock protecting the buffers & heaps trees
41 * @heaps: list of all the heaps in the system
42 * @user_clients: list of all the clients created from userspace
43 */
44struct ion_device {
45 struct miscdevice dev;
46 struct rb_root buffers;
47 struct mutex lock;
48 struct rb_root heaps;
49 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
50 unsigned long arg);
51 struct rb_root user_clients;
52 struct rb_root kernel_clients;
53 struct dentry *debug_root;
54};
55
56/**
57 * struct ion_client - a process/hw block local address space
58 * @ref: for reference counting the client
59 * @node: node in the tree of all clients
60 * @dev: backpointer to ion device
61 * @handles: an rb tree of all the handles in this client
62 * @lock: lock protecting the tree of handles
63 * @heap_mask: mask of all supported heaps
64 * @name: used for debugging
65 * @task: used for debugging
66 *
67 * A client represents a list of buffers this client may access.
68 * The mutex stored here is used to protect both handles tree
69 * as well as the handles themselves, and should be held while modifying either.
70 */
71struct ion_client {
72 struct kref ref;
73 struct rb_node node;
74 struct ion_device *dev;
75 struct rb_root handles;
76 struct mutex lock;
77 unsigned int heap_mask;
78 const char *name;
79 struct task_struct *task;
80 pid_t pid;
81 struct dentry *debug_root;
82};
83
84/**
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070085 * ion_handle - a client local reference to a buffer
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070086 * @ref: reference count
87 * @client: back pointer to the client the buffer resides in
88 * @buffer: pointer to the buffer
89 * @node: node in the client's handle rbtree
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -070090 * @kmap_cnt: count of times this client has mapped to kernel
91 * @dmap_cnt: count of times this client has mapped for dma
92 * @usermap_cnt: count of times this client has mapped for userspace
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -070093 *
94 * Modifications to node, map_cnt or mapping should be protected by the
95 * lock in the client. Other fields are never changed after initialization.
96 */
97struct ion_handle {
98 struct kref ref;
99 struct ion_client *client;
100 struct ion_buffer *buffer;
101 struct rb_node node;
102 unsigned int kmap_cnt;
103 unsigned int dmap_cnt;
104 unsigned int usermap_cnt;
105};
106
107/* this function should only be called while dev->lock is held */
108static void ion_buffer_add(struct ion_device *dev,
109 struct ion_buffer *buffer)
110{
111 struct rb_node **p = &dev->buffers.rb_node;
112 struct rb_node *parent = NULL;
113 struct ion_buffer *entry;
114
115 while (*p) {
116 parent = *p;
117 entry = rb_entry(parent, struct ion_buffer, node);
118
Rebecca Schultz Zavinf9fb95e2011-06-30 18:09:05 -0700119 if (buffer < entry) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700120 p = &(*p)->rb_left;
Rebecca Schultz Zavinf9fb95e2011-06-30 18:09:05 -0700121 } else if (buffer > entry) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700122 p = &(*p)->rb_right;
Rebecca Schultz Zavinf9fb95e2011-06-30 18:09:05 -0700123 } else {
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700124 pr_err("%s: buffer already found.", __func__);
125 BUG();
Rebecca Schultz Zavinf9fb95e2011-06-30 18:09:05 -0700126 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700127 }
128
129 rb_link_node(&buffer->node, parent, p);
130 rb_insert_color(&buffer->node, &dev->buffers);
131}
132
133/* this function should only be called while dev->lock is held */
134struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
135 struct ion_device *dev,
136 unsigned long len,
137 unsigned long align,
138 unsigned long flags)
139{
140 struct ion_buffer *buffer;
141 int ret;
142
143 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
144 if (!buffer)
145 return ERR_PTR(-ENOMEM);
146
147 buffer->heap = heap;
148 kref_init(&buffer->ref);
149
150 ret = heap->ops->allocate(heap, buffer, len, align, flags);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700151 if (ret) {
152 kfree(buffer);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700153 return ERR_PTR(ret);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700154 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700155 buffer->dev = dev;
156 buffer->size = len;
157 mutex_init(&buffer->lock);
158 ion_buffer_add(dev, buffer);
159 return buffer;
160}
161
162static void ion_buffer_destroy(struct kref *kref)
163{
164 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
165 struct ion_device *dev = buffer->dev;
166
167 buffer->heap->ops->free(buffer);
168 mutex_lock(&dev->lock);
169 rb_erase(&buffer->node, &dev->buffers);
170 mutex_unlock(&dev->lock);
171 kfree(buffer);
172}
173
174static void ion_buffer_get(struct ion_buffer *buffer)
175{
176 kref_get(&buffer->ref);
177}
178
179static int ion_buffer_put(struct ion_buffer *buffer)
180{
181 return kref_put(&buffer->ref, ion_buffer_destroy);
182}
183
184struct ion_handle *ion_handle_create(struct ion_client *client,
185 struct ion_buffer *buffer)
186{
187 struct ion_handle *handle;
188
189 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
190 if (!handle)
191 return ERR_PTR(-ENOMEM);
192 kref_init(&handle->ref);
193 handle->client = client;
194 ion_buffer_get(buffer);
195 handle->buffer = buffer;
196
197 return handle;
198}
199
200static void ion_handle_destroy(struct kref *kref)
201{
202 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
203 /* XXX Can a handle be destroyed while it's map count is non-zero?:
204 if (handle->map_cnt) unmap
205 */
206 ion_buffer_put(handle->buffer);
207 mutex_lock(&handle->client->lock);
208 rb_erase(&handle->node, &handle->client->handles);
209 mutex_unlock(&handle->client->lock);
210 kfree(handle);
211}
212
213struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
214{
215 return handle->buffer;
216}
217
218static void ion_handle_get(struct ion_handle *handle)
219{
220 kref_get(&handle->ref);
221}
222
223static int ion_handle_put(struct ion_handle *handle)
224{
225 return kref_put(&handle->ref, ion_handle_destroy);
226}
227
228static struct ion_handle *ion_handle_lookup(struct ion_client *client,
229 struct ion_buffer *buffer)
230{
231 struct rb_node *n;
232
233 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
234 struct ion_handle *handle = rb_entry(n, struct ion_handle,
235 node);
236 if (handle->buffer == buffer)
237 return handle;
238 }
239 return NULL;
240}
241
242bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
243{
244 struct rb_node *n = client->handles.rb_node;
245
246 while (n) {
247 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
248 node);
249 if (handle < handle_node)
250 n = n->rb_left;
251 else if (handle > handle_node)
252 n = n->rb_right;
253 else
254 return true;
255 }
256 return false;
257}
258
259static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
260{
261 struct rb_node **p = &client->handles.rb_node;
262 struct rb_node *parent = NULL;
263 struct ion_handle *entry;
264
265 while (*p) {
266 parent = *p;
267 entry = rb_entry(parent, struct ion_handle, node);
268
269 if (handle < entry)
270 p = &(*p)->rb_left;
271 else if (handle > entry)
272 p = &(*p)->rb_right;
273 else
274 WARN(1, "%s: buffer already found.", __func__);
275 }
276
277 rb_link_node(&handle->node, parent, p);
278 rb_insert_color(&handle->node, &client->handles);
279}
280
281struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
282 size_t align, unsigned int flags)
283{
284 struct rb_node *n;
285 struct ion_handle *handle;
286 struct ion_device *dev = client->dev;
287 struct ion_buffer *buffer = NULL;
288
289 /*
290 * traverse the list of heaps available in this system in priority
291 * order. If the heap type is supported by the client, and matches the
292 * request of the caller allocate from it. Repeat until allocate has
293 * succeeded or all heaps have been tried
294 */
295 mutex_lock(&dev->lock);
296 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
297 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
298 /* if the client doesn't support this heap type */
299 if (!((1 << heap->type) & client->heap_mask))
300 continue;
301 /* if the caller didn't specify this heap type */
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700302 if (!((1 << heap->id) & flags))
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700303 continue;
304 buffer = ion_buffer_create(heap, dev, len, align, flags);
305 if (!IS_ERR_OR_NULL(buffer))
306 break;
307 }
308 mutex_unlock(&dev->lock);
309
310 if (IS_ERR_OR_NULL(buffer))
311 return ERR_PTR(PTR_ERR(buffer));
312
313 handle = ion_handle_create(client, buffer);
314
315 if (IS_ERR_OR_NULL(handle))
316 goto end;
317
318 /*
319 * ion_buffer_create will create a buffer with a ref_cnt of 1,
320 * and ion_handle_create will take a second reference, drop one here
321 */
322 ion_buffer_put(buffer);
323
324 mutex_lock(&client->lock);
325 ion_handle_add(client, handle);
326 mutex_unlock(&client->lock);
327 return handle;
328
329end:
330 ion_buffer_put(buffer);
331 return handle;
332}
333
334void ion_free(struct ion_client *client, struct ion_handle *handle)
335{
Rebecca Schultz Zavinc72866d2011-07-07 17:07:56 -0700336 bool valid_handle;
337
338 BUG_ON(client != handle->client);
339
340 mutex_lock(&client->lock);
341 valid_handle = ion_handle_validate(client, handle);
342 mutex_unlock(&client->lock);
343
344 if (!valid_handle) {
345 WARN("%s: invalid handle passed to free.\n", __func__);
346 return;
347 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700348 ion_handle_put(handle);
349}
350
351static void ion_client_get(struct ion_client *client);
352static int ion_client_put(struct ion_client *client);
353
354bool _ion_map(int *buffer_cnt, int *handle_cnt)
355{
356 bool map;
357
358 BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0);
359
360 if (*buffer_cnt)
361 map = false;
362 else
363 map = true;
364 if (*handle_cnt == 0)
365 (*buffer_cnt)++;
366 (*handle_cnt)++;
367 return map;
368}
369
370bool _ion_unmap(int *buffer_cnt, int *handle_cnt)
371{
372 BUG_ON(*handle_cnt == 0);
373 (*handle_cnt)--;
374 if (*handle_cnt != 0)
375 return false;
376 BUG_ON(*buffer_cnt == 0);
377 (*buffer_cnt)--;
378 if (*buffer_cnt == 0)
379 return true;
380 return false;
381}
382
383int ion_phys(struct ion_client *client, struct ion_handle *handle,
384 ion_phys_addr_t *addr, size_t *len)
385{
386 struct ion_buffer *buffer;
387 int ret;
388
389 mutex_lock(&client->lock);
390 if (!ion_handle_validate(client, handle)) {
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700391 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700392 return -EINVAL;
393 }
394
395 buffer = handle->buffer;
396
397 if (!buffer->heap->ops->phys) {
398 pr_err("%s: ion_phys is not implemented by this heap.\n",
399 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700400 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700401 return -ENODEV;
402 }
403 mutex_unlock(&client->lock);
404 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
405 return ret;
406}
407
408void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
409{
410 struct ion_buffer *buffer;
411 void *vaddr;
412
413 mutex_lock(&client->lock);
414 if (!ion_handle_validate(client, handle)) {
415 pr_err("%s: invalid handle passed to map_kernel.\n",
416 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700417 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700418 return ERR_PTR(-EINVAL);
419 }
420
421 buffer = handle->buffer;
422 mutex_lock(&buffer->lock);
423
424 if (!handle->buffer->heap->ops->map_kernel) {
425 pr_err("%s: map_kernel is not implemented by this heap.\n",
426 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700427 mutex_unlock(&buffer->lock);
428 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700429 return ERR_PTR(-ENODEV);
430 }
431
432 if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
433 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
434 if (IS_ERR_OR_NULL(vaddr))
435 _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt);
436 buffer->vaddr = vaddr;
437 } else {
438 vaddr = buffer->vaddr;
439 }
440 mutex_unlock(&buffer->lock);
441 mutex_unlock(&client->lock);
442 return vaddr;
443}
444
445struct scatterlist *ion_map_dma(struct ion_client *client,
446 struct ion_handle *handle)
447{
448 struct ion_buffer *buffer;
449 struct scatterlist *sglist;
450
451 mutex_lock(&client->lock);
452 if (!ion_handle_validate(client, handle)) {
453 pr_err("%s: invalid handle passed to map_dma.\n",
454 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700455 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700456 return ERR_PTR(-EINVAL);
457 }
458 buffer = handle->buffer;
459 mutex_lock(&buffer->lock);
460
461 if (!handle->buffer->heap->ops->map_dma) {
462 pr_err("%s: map_kernel is not implemented by this heap.\n",
463 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700464 mutex_unlock(&buffer->lock);
465 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700466 return ERR_PTR(-ENODEV);
467 }
468 if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
469 sglist = buffer->heap->ops->map_dma(buffer->heap, buffer);
470 if (IS_ERR_OR_NULL(sglist))
471 _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt);
472 buffer->sglist = sglist;
473 } else {
474 sglist = buffer->sglist;
475 }
476 mutex_unlock(&buffer->lock);
477 mutex_unlock(&client->lock);
478 return sglist;
479}
480
481void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
482{
483 struct ion_buffer *buffer;
484
485 mutex_lock(&client->lock);
486 buffer = handle->buffer;
487 mutex_lock(&buffer->lock);
488 if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) {
489 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
490 buffer->vaddr = NULL;
491 }
492 mutex_unlock(&buffer->lock);
493 mutex_unlock(&client->lock);
494}
495
496void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
497{
498 struct ion_buffer *buffer;
499
500 mutex_lock(&client->lock);
501 buffer = handle->buffer;
502 mutex_lock(&buffer->lock);
503 if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) {
504 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
505 buffer->sglist = NULL;
506 }
507 mutex_unlock(&buffer->lock);
508 mutex_unlock(&client->lock);
509}
510
511
512struct ion_buffer *ion_share(struct ion_client *client,
513 struct ion_handle *handle)
514{
Rebecca Schultz Zavinc72866d2011-07-07 17:07:56 -0700515 bool valid_handle;
516
517 mutex_lock(&client->lock);
518 valid_handle = ion_handle_validate(client, handle);
519 mutex_unlock(&client->lock);
520 if (!valid_handle) {
521 WARN("%s: invalid handle passed to share.\n", __func__);
522 return ERR_PTR(-EINVAL);
523 }
524
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700525 /* don't not take an extra refernce here, the burden is on the caller
526 * to make sure the buffer doesn't go away while it's passing it
527 * to another client -- ion_free should not be called on this handle
528 * until the buffer has been imported into the other client
529 */
530 return handle->buffer;
531}
532
533struct ion_handle *ion_import(struct ion_client *client,
534 struct ion_buffer *buffer)
535{
536 struct ion_handle *handle = NULL;
537
538 mutex_lock(&client->lock);
539 /* if a handle exists for this buffer just take a reference to it */
540 handle = ion_handle_lookup(client, buffer);
541 if (!IS_ERR_OR_NULL(handle)) {
542 ion_handle_get(handle);
543 goto end;
544 }
545 handle = ion_handle_create(client, buffer);
546 if (IS_ERR_OR_NULL(handle))
547 goto end;
548 ion_handle_add(client, handle);
549end:
550 mutex_unlock(&client->lock);
551 return handle;
552}
553
554static const struct file_operations ion_share_fops;
555
556struct ion_handle *ion_import_fd(struct ion_client *client, int fd)
557{
558 struct file *file = fget(fd);
559 struct ion_handle *handle;
560
561 if (!file) {
562 pr_err("%s: imported fd not found in file table.\n", __func__);
563 return ERR_PTR(-EINVAL);
564 }
565 if (file->f_op != &ion_share_fops) {
566 pr_err("%s: imported file is not a shared ion file.\n",
567 __func__);
568 handle = ERR_PTR(-EINVAL);
569 goto end;
570 }
571 handle = ion_import(client, file->private_data);
572end:
573 fput(file);
574 return handle;
575}
576
577static int ion_debug_client_show(struct seq_file *s, void *unused)
578{
579 struct ion_client *client = s->private;
580 struct rb_node *n;
581 size_t sizes[ION_NUM_HEAPS] = {0};
582 const char *names[ION_NUM_HEAPS] = {0};
583 int i;
584
585 mutex_lock(&client->lock);
586 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
587 struct ion_handle *handle = rb_entry(n, struct ion_handle,
588 node);
589 enum ion_heap_type type = handle->buffer->heap->type;
590
591 if (!names[type])
592 names[type] = handle->buffer->heap->name;
593 sizes[type] += handle->buffer->size;
594 }
595 mutex_unlock(&client->lock);
596
597 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
598 for (i = 0; i < ION_NUM_HEAPS; i++) {
599 if (!names[i])
600 continue;
601 seq_printf(s, "%16.16s: %16u %d\n", names[i], sizes[i],
602 atomic_read(&client->ref.refcount));
603 }
604 return 0;
605}
606
607static int ion_debug_client_open(struct inode *inode, struct file *file)
608{
609 return single_open(file, ion_debug_client_show, inode->i_private);
610}
611
612static const struct file_operations debug_client_fops = {
613 .open = ion_debug_client_open,
614 .read = seq_read,
615 .llseek = seq_lseek,
616 .release = single_release,
617};
618
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -0700619static struct ion_client *ion_client_lookup(struct ion_device *dev,
620 struct task_struct *task)
621{
622 struct rb_node *n = dev->user_clients.rb_node;
623 struct ion_client *client;
624
625 mutex_lock(&dev->lock);
626 while (n) {
627 client = rb_entry(n, struct ion_client, node);
628 if (task == client->task) {
629 ion_client_get(client);
630 mutex_unlock(&dev->lock);
631 return client;
632 } else if (task < client->task) {
633 n = n->rb_left;
634 } else if (task > client->task) {
635 n = n->rb_right;
636 }
637 }
638 mutex_unlock(&dev->lock);
639 return NULL;
640}
641
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700642struct ion_client *ion_client_create(struct ion_device *dev,
643 unsigned int heap_mask,
644 const char *name)
645{
646 struct ion_client *client;
647 struct task_struct *task;
648 struct rb_node **p;
649 struct rb_node *parent = NULL;
650 struct ion_client *entry;
651 char debug_name[64];
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -0700652 pid_t pid;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700653
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700654 get_task_struct(current->group_leader);
655 task_lock(current->group_leader);
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -0700656 pid = task_pid_nr(current->group_leader);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700657 /* don't bother to store task struct for kernel threads,
658 they can't be killed anyway */
659 if (current->group_leader->flags & PF_KTHREAD) {
660 put_task_struct(current->group_leader);
661 task = NULL;
662 } else {
663 task = current->group_leader;
664 }
665 task_unlock(current->group_leader);
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -0700666
667 /* if this isn't a kernel thread, see if a client already
668 exists */
669 if (task) {
670 client = ion_client_lookup(dev, task);
671 if (!IS_ERR_OR_NULL(client)) {
672 put_task_struct(current->group_leader);
673 return client;
674 }
675 }
676
677 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
678 if (!client) {
679 put_task_struct(current->group_leader);
680 return ERR_PTR(-ENOMEM);
681 }
682
683 client->dev = dev;
684 client->handles = RB_ROOT;
685 mutex_init(&client->lock);
686 client->name = name;
687 client->heap_mask = heap_mask;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700688 client->task = task;
Rebecca Schultz Zavin83e3dab2011-07-01 20:41:25 -0700689 client->pid = pid;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700690 kref_init(&client->ref);
691
692 mutex_lock(&dev->lock);
693 if (task) {
694 p = &dev->user_clients.rb_node;
695 while (*p) {
696 parent = *p;
697 entry = rb_entry(parent, struct ion_client, node);
698
699 if (task < entry->task)
700 p = &(*p)->rb_left;
701 else if (task > entry->task)
702 p = &(*p)->rb_right;
703 }
704 rb_link_node(&client->node, parent, p);
705 rb_insert_color(&client->node, &dev->user_clients);
706 } else {
707 p = &dev->kernel_clients.rb_node;
708 while (*p) {
709 parent = *p;
710 entry = rb_entry(parent, struct ion_client, node);
711
712 if (client < entry)
713 p = &(*p)->rb_left;
714 else if (client > entry)
715 p = &(*p)->rb_right;
716 }
717 rb_link_node(&client->node, parent, p);
718 rb_insert_color(&client->node, &dev->kernel_clients);
719 }
720
721 snprintf(debug_name, 64, "%u", client->pid);
722 client->debug_root = debugfs_create_file(debug_name, 0664,
723 dev->debug_root, client,
724 &debug_client_fops);
725 mutex_unlock(&dev->lock);
726
727 return client;
728}
729
Rebecca Schultz Zavin0b7e8ae2011-07-06 18:07:01 -0700730static void _ion_client_destroy(struct kref *kref)
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700731{
Rebecca Schultz Zavin0b7e8ae2011-07-06 18:07:01 -0700732 struct ion_client *client = container_of(kref, struct ion_client, ref);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700733 struct ion_device *dev = client->dev;
734 struct rb_node *n;
735
736 pr_debug("%s: %d\n", __func__, __LINE__);
737 while ((n = rb_first(&client->handles))) {
738 struct ion_handle *handle = rb_entry(n, struct ion_handle,
739 node);
740 ion_handle_destroy(&handle->ref);
741 }
742 mutex_lock(&dev->lock);
743 if (client->task) {
744 rb_erase(&client->node, &dev->user_clients);
745 put_task_struct(client->task);
746 } else {
747 rb_erase(&client->node, &dev->kernel_clients);
748 }
749 debugfs_remove_recursive(client->debug_root);
750 mutex_unlock(&dev->lock);
751
752 kfree(client);
753}
754
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700755static void ion_client_get(struct ion_client *client)
756{
757 kref_get(&client->ref);
758}
759
760static int ion_client_put(struct ion_client *client)
761{
762 return kref_put(&client->ref, _ion_client_destroy);
763}
764
Rebecca Schultz Zavin0b7e8ae2011-07-06 18:07:01 -0700765void ion_client_destroy(struct ion_client *client)
766{
767 ion_client_put(client);
768}
769
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700770static int ion_share_release(struct inode *inode, struct file* file)
771{
772 struct ion_buffer *buffer = file->private_data;
773
774 pr_debug("%s: %d\n", __func__, __LINE__);
775 /* drop the reference to the buffer -- this prevents the
776 buffer from going away because the client holding it exited
777 while it was being passed */
778 ion_buffer_put(buffer);
779 return 0;
780}
781
782static void ion_vma_open(struct vm_area_struct *vma)
783{
784
785 struct ion_buffer *buffer = vma->vm_file->private_data;
786 struct ion_handle *handle = vma->vm_private_data;
787 struct ion_client *client;
788
789 pr_debug("%s: %d\n", __func__, __LINE__);
790 /* check that the client still exists and take a reference so
791 it can't go away until this vma is closed */
792 client = ion_client_lookup(buffer->dev, current->group_leader);
793 if (IS_ERR_OR_NULL(client)) {
794 vma->vm_private_data = NULL;
795 return;
796 }
797 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
798 __func__, __LINE__,
799 atomic_read(&client->ref.refcount),
800 atomic_read(&handle->ref.refcount),
801 atomic_read(&buffer->ref.refcount));
802}
803
804static void ion_vma_close(struct vm_area_struct *vma)
805{
806 struct ion_handle *handle = vma->vm_private_data;
807 struct ion_buffer *buffer = vma->vm_file->private_data;
808 struct ion_client *client;
809
810 pr_debug("%s: %d\n", __func__, __LINE__);
811 /* this indicates the client is gone, nothing to do here */
812 if (!handle)
813 return;
814 client = handle->client;
815 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
816 __func__, __LINE__,
817 atomic_read(&client->ref.refcount),
818 atomic_read(&handle->ref.refcount),
819 atomic_read(&buffer->ref.refcount));
820 ion_handle_put(handle);
821 ion_client_put(client);
822 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
823 __func__, __LINE__,
824 atomic_read(&client->ref.refcount),
825 atomic_read(&handle->ref.refcount),
826 atomic_read(&buffer->ref.refcount));
827}
828
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700829static struct vm_operations_struct ion_vm_ops = {
830 .open = ion_vma_open,
831 .close = ion_vma_close,
832};
833
834static int ion_share_mmap(struct file *file, struct vm_area_struct *vma)
835{
836 struct ion_buffer *buffer = file->private_data;
837 unsigned long size = vma->vm_end - vma->vm_start;
838 struct ion_client *client;
839 struct ion_handle *handle;
840 int ret;
841
842 pr_debug("%s: %d\n", __func__, __LINE__);
843 /* make sure the client still exists, it's possible for the client to
844 have gone away but the map/share fd still to be around, take
845 a reference to it so it can't go away while this mapping exists */
846 client = ion_client_lookup(buffer->dev, current->group_leader);
847 if (IS_ERR_OR_NULL(client)) {
848 pr_err("%s: trying to mmap an ion handle in a process with no "
849 "ion client\n", __func__);
850 return -EINVAL;
851 }
852
853 if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) >
854 buffer->size)) {
855 pr_err("%s: trying to map larger area than handle has available"
856 "\n", __func__);
857 ret = -EINVAL;
858 goto err;
859 }
860
861 /* find the handle and take a reference to it */
862 handle = ion_import(client, buffer);
863 if (IS_ERR_OR_NULL(handle)) {
864 ret = -EINVAL;
865 goto err;
866 }
867
868 if (!handle->buffer->heap->ops->map_user) {
869 pr_err("%s: this heap does not define a method for mapping "
870 "to userspace\n", __func__);
871 ret = -EINVAL;
872 goto err1;
873 }
874
875 mutex_lock(&buffer->lock);
876 /* now map it to userspace */
877 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700878 mutex_unlock(&buffer->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700879 if (ret) {
880 pr_err("%s: failure mapping buffer to userspace\n",
881 __func__);
882 goto err1;
883 }
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700884
885 vma->vm_ops = &ion_vm_ops;
886 /* move the handle into the vm_private_data so we can access it from
887 vma_open/close */
888 vma->vm_private_data = handle;
889 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
890 __func__, __LINE__,
891 atomic_read(&client->ref.refcount),
892 atomic_read(&handle->ref.refcount),
893 atomic_read(&buffer->ref.refcount));
894 return 0;
895
896err1:
897 /* drop the reference to the handle */
898 ion_handle_put(handle);
899err:
900 /* drop the refernce to the client */
901 ion_client_put(client);
902 return ret;
903}
904
905static const struct file_operations ion_share_fops = {
906 .owner = THIS_MODULE,
907 .release = ion_share_release,
908 .mmap = ion_share_mmap,
909};
910
911static int ion_ioctl_share(struct file *parent, struct ion_client *client,
912 struct ion_handle *handle)
913{
914 int fd = get_unused_fd();
915 struct file *file;
916
917 if (fd < 0)
918 return -ENFILE;
919
920 file = anon_inode_getfile("ion_share_fd", &ion_share_fops,
921 handle->buffer, O_RDWR);
922 if (IS_ERR_OR_NULL(file))
923 goto err;
924 ion_buffer_get(handle->buffer);
925 fd_install(fd, file);
926
927 return fd;
928
929err:
930 put_unused_fd(fd);
931 return -ENFILE;
932}
933
934static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
935{
936 struct ion_client *client = filp->private_data;
937
938 switch (cmd) {
939 case ION_IOC_ALLOC:
940 {
941 struct ion_allocation_data data;
942
943 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
944 return -EFAULT;
945 data.handle = ion_alloc(client, data.len, data.align,
946 data.flags);
947 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
948 return -EFAULT;
949 break;
950 }
951 case ION_IOC_FREE:
952 {
953 struct ion_handle_data data;
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700954 bool valid;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700955
956 if (copy_from_user(&data, (void __user *)arg,
957 sizeof(struct ion_handle_data)))
958 return -EFAULT;
959 mutex_lock(&client->lock);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700960 valid = ion_handle_validate(client, data.handle);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700961 mutex_unlock(&client->lock);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700962 if (!valid)
963 return -EINVAL;
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700964 ion_free(client, data.handle);
965 break;
966 }
967 case ION_IOC_MAP:
968 case ION_IOC_SHARE:
969 {
970 struct ion_fd_data data;
971
972 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
973 return -EFAULT;
974 mutex_lock(&client->lock);
975 if (!ion_handle_validate(client, data.handle)) {
976 pr_err("%s: invalid handle passed to share ioctl.\n",
977 __func__);
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -0700978 mutex_unlock(&client->lock);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -0700979 return -EINVAL;
980 }
981 data.fd = ion_ioctl_share(filp, client, data.handle);
982 mutex_unlock(&client->lock);
983 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
984 return -EFAULT;
985 break;
986 }
987 case ION_IOC_IMPORT:
988 {
989 struct ion_fd_data data;
990 if (copy_from_user(&data, (void __user *)arg,
991 sizeof(struct ion_fd_data)))
992 return -EFAULT;
993
994 data.handle = ion_import_fd(client, data.fd);
995 if (IS_ERR(data.handle))
996 data.handle = NULL;
997 if (copy_to_user((void __user *)arg, &data,
998 sizeof(struct ion_fd_data)))
999 return -EFAULT;
1000 break;
1001 }
1002 case ION_IOC_CUSTOM:
1003 {
1004 struct ion_device *dev = client->dev;
1005 struct ion_custom_data data;
1006
1007 if (!dev->custom_ioctl)
1008 return -ENOTTY;
1009 if (copy_from_user(&data, (void __user *)arg,
1010 sizeof(struct ion_custom_data)))
1011 return -EFAULT;
1012 return dev->custom_ioctl(client, data.cmd, data.arg);
1013 }
1014 default:
1015 return -ENOTTY;
1016 }
1017 return 0;
1018}
1019
1020static int ion_release(struct inode *inode, struct file *file)
1021{
1022 struct ion_client *client = file->private_data;
1023
1024 pr_debug("%s: %d\n", __func__, __LINE__);
1025 ion_client_put(client);
1026 return 0;
1027}
1028
1029static int ion_open(struct inode *inode, struct file *file)
1030{
1031 struct miscdevice *miscdev = file->private_data;
1032 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1033 struct ion_client *client;
1034
1035 pr_debug("%s: %d\n", __func__, __LINE__);
Rebecca Schultz Zavin6d3b9582011-07-06 18:07:24 -07001036 client = ion_client_create(dev, -1, "user");
1037 if (IS_ERR_OR_NULL(client))
1038 return PTR_ERR(client);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001039 file->private_data = client;
1040
1041 return 0;
1042}
1043
1044static const struct file_operations ion_fops = {
1045 .owner = THIS_MODULE,
1046 .open = ion_open,
1047 .release = ion_release,
1048 .unlocked_ioctl = ion_ioctl,
1049};
1050
1051static size_t ion_debug_heap_total(struct ion_client *client,
1052 enum ion_heap_type type)
1053{
1054 size_t size = 0;
1055 struct rb_node *n;
1056
1057 mutex_lock(&client->lock);
1058 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1059 struct ion_handle *handle = rb_entry(n,
1060 struct ion_handle,
1061 node);
1062 if (handle->buffer->heap->type == type)
1063 size += handle->buffer->size;
1064 }
1065 mutex_unlock(&client->lock);
1066 return size;
1067}
1068
1069static int ion_debug_heap_show(struct seq_file *s, void *unused)
1070{
1071 struct ion_heap *heap = s->private;
1072 struct ion_device *dev = heap->dev;
1073 struct rb_node *n;
1074
1075 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1076 for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
1077 struct ion_client *client = rb_entry(n, struct ion_client,
1078 node);
1079 char task_comm[TASK_COMM_LEN];
1080 size_t size = ion_debug_heap_total(client, heap->type);
1081 if (!size)
1082 continue;
1083
1084 get_task_comm(task_comm, client->task);
1085 seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid,
1086 size);
1087 }
1088
1089 for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
1090 struct ion_client *client = rb_entry(n, struct ion_client,
1091 node);
1092 size_t size = ion_debug_heap_total(client, heap->type);
1093 if (!size)
1094 continue;
1095 seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid,
1096 size);
1097 }
1098 return 0;
1099}
1100
1101static int ion_debug_heap_open(struct inode *inode, struct file *file)
1102{
1103 return single_open(file, ion_debug_heap_show, inode->i_private);
1104}
1105
1106static const struct file_operations debug_heap_fops = {
1107 .open = ion_debug_heap_open,
1108 .read = seq_read,
1109 .llseek = seq_lseek,
1110 .release = single_release,
1111};
1112
1113void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1114{
1115 struct rb_node **p = &dev->heaps.rb_node;
1116 struct rb_node *parent = NULL;
1117 struct ion_heap *entry;
1118
1119 heap->dev = dev;
1120 mutex_lock(&dev->lock);
1121 while (*p) {
1122 parent = *p;
1123 entry = rb_entry(parent, struct ion_heap, node);
1124
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001125 if (heap->id < entry->id) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001126 p = &(*p)->rb_left;
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001127 } else if (heap->id > entry->id ) {
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001128 p = &(*p)->rb_right;
1129 } else {
1130 pr_err("%s: can not insert multiple heaps with "
Rebecca Schultz Zavine6ee1242011-06-30 12:19:55 -07001131 "id %d\n", __func__, heap->id);
Rebecca Schultz Zavinc80005a2011-06-29 19:44:29 -07001132 goto end;
1133 }
1134 }
1135
1136 rb_link_node(&heap->node, parent, p);
1137 rb_insert_color(&heap->node, &dev->heaps);
1138 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1139 &debug_heap_fops);
1140end:
1141 mutex_unlock(&dev->lock);
1142}
1143
1144struct ion_device *ion_device_create(long (*custom_ioctl)
1145 (struct ion_client *client,
1146 unsigned int cmd,
1147 unsigned long arg))
1148{
1149 struct ion_device *idev;
1150 int ret;
1151
1152 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1153 if (!idev)
1154 return ERR_PTR(-ENOMEM);
1155
1156 idev->dev.minor = MISC_DYNAMIC_MINOR;
1157 idev->dev.name = "ion";
1158 idev->dev.fops = &ion_fops;
1159 idev->dev.parent = NULL;
1160 ret = misc_register(&idev->dev);
1161 if (ret) {
1162 pr_err("ion: failed to register misc device.\n");
1163 return ERR_PTR(ret);
1164 }
1165
1166 idev->debug_root = debugfs_create_dir("ion", NULL);
1167 if (IS_ERR_OR_NULL(idev->debug_root))
1168 pr_err("ion: failed to create debug files.\n");
1169
1170 idev->custom_ioctl = custom_ioctl;
1171 idev->buffers = RB_ROOT;
1172 mutex_init(&idev->lock);
1173 idev->heaps = RB_ROOT;
1174 idev->user_clients = RB_ROOT;
1175 idev->kernel_clients = RB_ROOT;
1176 return idev;
1177}
1178
1179void ion_device_destroy(struct ion_device *dev)
1180{
1181 misc_deregister(&dev->dev);
1182 /* XXX need to free the heaps and clients ? */
1183 kfree(dev);
1184}