blob: be808b0031f24c6ae43c6ec03d63def56d8154cd [file] [log] [blame]
Erik Gilling010accf2012-03-13 15:34:34 -07001/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Erik Gilling981c8a92012-03-14 19:49:15 -070017#include <linux/debugfs.h>
Erik Gilling4fb837a2012-05-16 13:09:22 -070018#include <linux/export.h>
Erik Gilling010accf2012-03-13 15:34:34 -070019#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
Erik Gillinga1eeaca2012-03-19 17:28:32 -070022#include <linux/poll.h>
Erik Gilling010accf2012-03-13 15:34:34 -070023#include <linux/sched.h>
Erik Gilling981c8a92012-03-14 19:49:15 -070024#include <linux/seq_file.h>
Erik Gilling010accf2012-03-13 15:34:34 -070025#include <linux/slab.h>
26#include <linux/sync.h>
27#include <linux/uaccess.h>
28
29#include <linux/anon_inodes.h>
30
31static void sync_fence_signal_pt(struct sync_pt *pt);
32static int _sync_pt_has_signaled(struct sync_pt *pt);
Ajay Dudanic4af2662012-07-23 16:43:05 -070033static void sync_fence_free(struct kref *kref);
Erik Gilling010accf2012-03-13 15:34:34 -070034
Erik Gilling981c8a92012-03-14 19:49:15 -070035static LIST_HEAD(sync_timeline_list_head);
36static DEFINE_SPINLOCK(sync_timeline_list_lock);
37
38static LIST_HEAD(sync_fence_list_head);
39static DEFINE_SPINLOCK(sync_fence_list_lock);
40
Erik Gilling010accf2012-03-13 15:34:34 -070041struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
42 int size, const char *name)
43{
44 struct sync_timeline *obj;
Erik Gilling981c8a92012-03-14 19:49:15 -070045 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -070046
47 if (size < sizeof(struct sync_timeline))
48 return NULL;
49
50 obj = kzalloc(size, GFP_KERNEL);
51 if (obj == NULL)
52 return NULL;
53
54 obj->ops = ops;
55 strlcpy(obj->name, name, sizeof(obj->name));
56
57 INIT_LIST_HEAD(&obj->child_list_head);
58 spin_lock_init(&obj->child_list_lock);
59
60 INIT_LIST_HEAD(&obj->active_list_head);
61 spin_lock_init(&obj->active_list_lock);
62
Erik Gilling981c8a92012-03-14 19:49:15 -070063 spin_lock_irqsave(&sync_timeline_list_lock, flags);
64 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
65 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
66
Erik Gilling010accf2012-03-13 15:34:34 -070067 return obj;
68}
Erik Gilling4fb837a2012-05-16 13:09:22 -070069EXPORT_SYMBOL(sync_timeline_create);
Erik Gilling010accf2012-03-13 15:34:34 -070070
Erik Gilling981c8a92012-03-14 19:49:15 -070071static void sync_timeline_free(struct sync_timeline *obj)
72{
73 unsigned long flags;
74
75 if (obj->ops->release_obj)
76 obj->ops->release_obj(obj);
77
78 spin_lock_irqsave(&sync_timeline_list_lock, flags);
79 list_del(&obj->sync_timeline_list);
80 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
81
82 kfree(obj);
83}
84
Erik Gilling010accf2012-03-13 15:34:34 -070085void sync_timeline_destroy(struct sync_timeline *obj)
86{
87 unsigned long flags;
88 bool needs_freeing;
89
90 spin_lock_irqsave(&obj->child_list_lock, flags);
91 obj->destroyed = true;
92 needs_freeing = list_empty(&obj->child_list_head);
93 spin_unlock_irqrestore(&obj->child_list_lock, flags);
94
95 if (needs_freeing)
Erik Gilling981c8a92012-03-14 19:49:15 -070096 sync_timeline_free(obj);
Erik Gilling010accf2012-03-13 15:34:34 -070097 else
98 sync_timeline_signal(obj);
99}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700100EXPORT_SYMBOL(sync_timeline_destroy);
Erik Gilling010accf2012-03-13 15:34:34 -0700101
102static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
103{
104 unsigned long flags;
105
106 pt->parent = obj;
107
108 spin_lock_irqsave(&obj->child_list_lock, flags);
109 list_add_tail(&pt->child_list, &obj->child_list_head);
110 spin_unlock_irqrestore(&obj->child_list_lock, flags);
111}
112
113static void sync_timeline_remove_pt(struct sync_pt *pt)
114{
115 struct sync_timeline *obj = pt->parent;
116 unsigned long flags;
Ajay Dudanic4af2662012-07-23 16:43:05 -0700117 bool needs_freeing = false;
Erik Gilling010accf2012-03-13 15:34:34 -0700118
119 spin_lock_irqsave(&obj->active_list_lock, flags);
120 if (!list_empty(&pt->active_list))
121 list_del_init(&pt->active_list);
122 spin_unlock_irqrestore(&obj->active_list_lock, flags);
123
124 spin_lock_irqsave(&obj->child_list_lock, flags);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700125 if (!list_empty(&pt->child_list)) {
126 list_del_init(&pt->child_list);
127 needs_freeing = obj->destroyed &&
128 list_empty(&obj->child_list_head);
129 }
Erik Gilling010accf2012-03-13 15:34:34 -0700130 spin_unlock_irqrestore(&obj->child_list_lock, flags);
131
132 if (needs_freeing)
Erik Gilling981c8a92012-03-14 19:49:15 -0700133 sync_timeline_free(obj);
Erik Gilling010accf2012-03-13 15:34:34 -0700134}
135
136void sync_timeline_signal(struct sync_timeline *obj)
137{
138 unsigned long flags;
139 LIST_HEAD(signaled_pts);
140 struct list_head *pos, *n;
141
142 spin_lock_irqsave(&obj->active_list_lock, flags);
143
144 list_for_each_safe(pos, n, &obj->active_list_head) {
145 struct sync_pt *pt =
146 container_of(pos, struct sync_pt, active_list);
147
Ajay Dudanic4af2662012-07-23 16:43:05 -0700148 if (_sync_pt_has_signaled(pt)) {
149 list_del_init(pos);
150 list_add(&pt->signaled_list, &signaled_pts);
151 kref_get(&pt->fence->kref);
152 }
Erik Gilling010accf2012-03-13 15:34:34 -0700153 }
154
155 spin_unlock_irqrestore(&obj->active_list_lock, flags);
156
157 list_for_each_safe(pos, n, &signaled_pts) {
158 struct sync_pt *pt =
Ajay Dudanic4af2662012-07-23 16:43:05 -0700159 container_of(pos, struct sync_pt, signaled_list);
Erik Gilling010accf2012-03-13 15:34:34 -0700160
161 list_del_init(pos);
162 sync_fence_signal_pt(pt);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700163 kref_put(&pt->fence->kref, sync_fence_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700164 }
165}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700166EXPORT_SYMBOL(sync_timeline_signal);
Erik Gilling010accf2012-03-13 15:34:34 -0700167
168struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
169{
170 struct sync_pt *pt;
171
172 if (size < sizeof(struct sync_pt))
173 return NULL;
174
175 pt = kzalloc(size, GFP_KERNEL);
176 if (pt == NULL)
177 return NULL;
178
179 INIT_LIST_HEAD(&pt->active_list);
180 sync_timeline_add_pt(parent, pt);
181
182 return pt;
183}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700184EXPORT_SYMBOL(sync_pt_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700185
186void sync_pt_free(struct sync_pt *pt)
187{
188 if (pt->parent->ops->free_pt)
189 pt->parent->ops->free_pt(pt);
190
191 sync_timeline_remove_pt(pt);
192
193 kfree(pt);
194}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700195EXPORT_SYMBOL(sync_pt_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700196
197/* call with pt->parent->active_list_lock held */
198static int _sync_pt_has_signaled(struct sync_pt *pt)
199{
Erik Gillingad433ba2012-03-15 14:59:33 -0700200 int old_status = pt->status;
201
Erik Gilling010accf2012-03-13 15:34:34 -0700202 if (!pt->status)
203 pt->status = pt->parent->ops->has_signaled(pt);
204
205 if (!pt->status && pt->parent->destroyed)
206 pt->status = -ENOENT;
207
Erik Gillingad433ba2012-03-15 14:59:33 -0700208 if (pt->status != old_status)
209 pt->timestamp = ktime_get();
210
Erik Gilling010accf2012-03-13 15:34:34 -0700211 return pt->status;
212}
213
214static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
215{
216 return pt->parent->ops->dup(pt);
217}
218
219/* Adds a sync pt to the active queue. Called when added to a fence */
220static void sync_pt_activate(struct sync_pt *pt)
221{
222 struct sync_timeline *obj = pt->parent;
223 unsigned long flags;
224 int err;
225
226 spin_lock_irqsave(&obj->active_list_lock, flags);
227
228 err = _sync_pt_has_signaled(pt);
229 if (err != 0)
230 goto out;
231
232 list_add_tail(&pt->active_list, &obj->active_list_head);
233
234out:
235 spin_unlock_irqrestore(&obj->active_list_lock, flags);
236}
237
238static int sync_fence_release(struct inode *inode, struct file *file);
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700239static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700240static long sync_fence_ioctl(struct file *file, unsigned int cmd,
241 unsigned long arg);
242
243
244static const struct file_operations sync_fence_fops = {
245 .release = sync_fence_release,
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700246 .poll = sync_fence_poll,
Erik Gilling010accf2012-03-13 15:34:34 -0700247 .unlocked_ioctl = sync_fence_ioctl,
248};
249
250static struct sync_fence *sync_fence_alloc(const char *name)
251{
252 struct sync_fence *fence;
Erik Gilling981c8a92012-03-14 19:49:15 -0700253 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700254
255 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
256 if (fence == NULL)
257 return NULL;
258
259 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
260 fence, 0);
261 if (fence->file == NULL)
262 goto err;
263
Ajay Dudanic4af2662012-07-23 16:43:05 -0700264 kref_init(&fence->kref);
Erik Gilling010accf2012-03-13 15:34:34 -0700265 strlcpy(fence->name, name, sizeof(fence->name));
266
267 INIT_LIST_HEAD(&fence->pt_list_head);
268 INIT_LIST_HEAD(&fence->waiter_list_head);
269 spin_lock_init(&fence->waiter_list_lock);
270
271 init_waitqueue_head(&fence->wq);
Erik Gilling981c8a92012-03-14 19:49:15 -0700272
273 spin_lock_irqsave(&sync_fence_list_lock, flags);
274 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
275 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
276
Erik Gilling010accf2012-03-13 15:34:34 -0700277 return fence;
278
279err:
280 kfree(fence);
281 return NULL;
282}
283
284/* TODO: implement a create which takes more that one sync_pt */
285struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
286{
287 struct sync_fence *fence;
288
289 if (pt->fence)
290 return NULL;
291
292 fence = sync_fence_alloc(name);
293 if (fence == NULL)
294 return NULL;
295
296 pt->fence = fence;
297 list_add(&pt->pt_list, &fence->pt_list_head);
298 sync_pt_activate(pt);
299
300 return fence;
301}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700302EXPORT_SYMBOL(sync_fence_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700303
304static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
305{
306 struct list_head *pos;
307
308 list_for_each(pos, &src->pt_list_head) {
309 struct sync_pt *orig_pt =
310 container_of(pos, struct sync_pt, pt_list);
311 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
312
313 if (new_pt == NULL)
314 return -ENOMEM;
315
316 new_pt->fence = dst;
317 list_add(&new_pt->pt_list, &dst->pt_list_head);
318 sync_pt_activate(new_pt);
319 }
320
321 return 0;
322}
323
Ajay Dudani99343192012-07-11 17:13:50 -0700324static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
325{
326 struct list_head *src_pos, *dst_pos, *n;
327
328 list_for_each(src_pos, &src->pt_list_head) {
329 struct sync_pt *src_pt =
330 container_of(src_pos, struct sync_pt, pt_list);
331 bool collapsed = false;
332
333 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
334 struct sync_pt *dst_pt =
335 container_of(dst_pos, struct sync_pt, pt_list);
336 /* collapse two sync_pts on the same timeline
337 * to a single sync_pt that will signal at
338 * the later of the two
339 */
340 if (dst_pt->parent == src_pt->parent) {
341 if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
342 struct sync_pt *new_pt =
343 sync_pt_dup(src_pt);
344 if (new_pt == NULL)
345 return -ENOMEM;
346
347 new_pt->fence = dst;
348 list_replace(&dst_pt->pt_list,
349 &new_pt->pt_list);
350 sync_pt_activate(new_pt);
351 sync_pt_free(dst_pt);
352 }
353 collapsed = true;
354 break;
355 }
356 }
357
358 if (!collapsed) {
359 struct sync_pt *new_pt = sync_pt_dup(src_pt);
360
361 if (new_pt == NULL)
362 return -ENOMEM;
363
364 new_pt->fence = dst;
365 list_add(&new_pt->pt_list, &dst->pt_list_head);
366 sync_pt_activate(new_pt);
367 }
368 }
369
370 return 0;
371}
372
Ajay Dudanic4af2662012-07-23 16:43:05 -0700373static void sync_fence_detach_pts(struct sync_fence *fence)
374{
375 struct list_head *pos, *n;
376
377 list_for_each_safe(pos, n, &fence->pt_list_head) {
378 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
379 sync_timeline_remove_pt(pt);
380 }
381}
382
Erik Gilling010accf2012-03-13 15:34:34 -0700383static void sync_fence_free_pts(struct sync_fence *fence)
384{
385 struct list_head *pos, *n;
386
387 list_for_each_safe(pos, n, &fence->pt_list_head) {
388 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
389 sync_pt_free(pt);
390 }
391}
392
393struct sync_fence *sync_fence_fdget(int fd)
394{
395 struct file *file = fget(fd);
396
397 if (file == NULL)
398 return NULL;
399
400 if (file->f_op != &sync_fence_fops)
401 goto err;
402
403 return file->private_data;
404
405err:
406 fput(file);
407 return NULL;
408}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700409EXPORT_SYMBOL(sync_fence_fdget);
Erik Gilling010accf2012-03-13 15:34:34 -0700410
411void sync_fence_put(struct sync_fence *fence)
412{
413 fput(fence->file);
414}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700415EXPORT_SYMBOL(sync_fence_put);
Erik Gilling010accf2012-03-13 15:34:34 -0700416
417void sync_fence_install(struct sync_fence *fence, int fd)
418{
419 fd_install(fd, fence->file);
420}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700421EXPORT_SYMBOL(sync_fence_install);
Erik Gilling010accf2012-03-13 15:34:34 -0700422
423static int sync_fence_get_status(struct sync_fence *fence)
424{
425 struct list_head *pos;
426 int status = 1;
427
428 list_for_each(pos, &fence->pt_list_head) {
429 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
430 int pt_status = pt->status;
431
432 if (pt_status < 0) {
433 status = pt_status;
434 break;
435 } else if (status == 1) {
436 status = pt_status;
437 }
438 }
439
440 return status;
441}
442
443struct sync_fence *sync_fence_merge(const char *name,
444 struct sync_fence *a, struct sync_fence *b)
445{
446 struct sync_fence *fence;
447 int err;
448
449 fence = sync_fence_alloc(name);
450 if (fence == NULL)
451 return NULL;
452
453 err = sync_fence_copy_pts(fence, a);
454 if (err < 0)
455 goto err;
456
Ajay Dudani99343192012-07-11 17:13:50 -0700457 err = sync_fence_merge_pts(fence, b);
Erik Gilling010accf2012-03-13 15:34:34 -0700458 if (err < 0)
459 goto err;
460
461 fence->status = sync_fence_get_status(fence);
462
463 return fence;
464err:
465 sync_fence_free_pts(fence);
466 kfree(fence);
467 return NULL;
468}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700469EXPORT_SYMBOL(sync_fence_merge);
Erik Gilling010accf2012-03-13 15:34:34 -0700470
471static void sync_fence_signal_pt(struct sync_pt *pt)
472{
473 LIST_HEAD(signaled_waiters);
474 struct sync_fence *fence = pt->fence;
475 struct list_head *pos;
476 struct list_head *n;
477 unsigned long flags;
478 int status;
479
480 status = sync_fence_get_status(fence);
481
482 spin_lock_irqsave(&fence->waiter_list_lock, flags);
483 /*
484 * this should protect against two threads racing on the signaled
485 * false -> true transition
486 */
487 if (status && !fence->status) {
488 list_for_each_safe(pos, n, &fence->waiter_list_head)
489 list_move(pos, &signaled_waiters);
490
491 fence->status = status;
492 } else {
493 status = 0;
494 }
495 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
496
497 if (status) {
498 list_for_each_safe(pos, n, &signaled_waiters) {
499 struct sync_fence_waiter *waiter =
500 container_of(pos, struct sync_fence_waiter,
501 waiter_list);
502
Erik Gilling010accf2012-03-13 15:34:34 -0700503 list_del(pos);
Erik Gillingc80114f2012-05-15 16:23:26 -0700504 waiter->callback(fence, waiter);
Erik Gilling010accf2012-03-13 15:34:34 -0700505 }
506 wake_up(&fence->wq);
507 }
508}
509
510int sync_fence_wait_async(struct sync_fence *fence,
Erik Gillingc80114f2012-05-15 16:23:26 -0700511 struct sync_fence_waiter *waiter)
Erik Gilling010accf2012-03-13 15:34:34 -0700512{
Erik Gilling010accf2012-03-13 15:34:34 -0700513 unsigned long flags;
514 int err = 0;
515
Erik Gilling010accf2012-03-13 15:34:34 -0700516 spin_lock_irqsave(&fence->waiter_list_lock, flags);
517
518 if (fence->status) {
Erik Gilling010accf2012-03-13 15:34:34 -0700519 err = fence->status;
520 goto out;
521 }
522
523 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
524out:
525 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
526
527 return err;
528}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700529EXPORT_SYMBOL(sync_fence_wait_async);
Erik Gilling010accf2012-03-13 15:34:34 -0700530
Erik Gillingc80114f2012-05-15 16:23:26 -0700531int sync_fence_cancel_async(struct sync_fence *fence,
532 struct sync_fence_waiter *waiter)
533{
534 struct list_head *pos;
535 struct list_head *n;
536 unsigned long flags;
537 int ret = -ENOENT;
538
539 spin_lock_irqsave(&fence->waiter_list_lock, flags);
540 /*
541 * Make sure waiter is still in waiter_list because it is possible for
542 * the waiter to be removed from the list while the callback is still
543 * pending.
544 */
545 list_for_each_safe(pos, n, &fence->waiter_list_head) {
546 struct sync_fence_waiter *list_waiter =
547 container_of(pos, struct sync_fence_waiter,
548 waiter_list);
549 if (list_waiter == waiter) {
550 list_del(pos);
551 ret = 0;
552 break;
553 }
554 }
555 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
556 return ret;
557}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700558EXPORT_SYMBOL(sync_fence_cancel_async);
Erik Gillingc80114f2012-05-15 16:23:26 -0700559
Erik Gilling010accf2012-03-13 15:34:34 -0700560int sync_fence_wait(struct sync_fence *fence, long timeout)
561{
562 int err;
563
564 if (timeout) {
565 timeout = msecs_to_jiffies(timeout);
566 err = wait_event_interruptible_timeout(fence->wq,
567 fence->status != 0,
568 timeout);
569 } else {
570 err = wait_event_interruptible(fence->wq, fence->status != 0);
571 }
572
573 if (err < 0)
574 return err;
575
576 if (fence->status < 0)
577 return fence->status;
578
579 if (fence->status == 0)
580 return -ETIME;
581
582 return 0;
583}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700584EXPORT_SYMBOL(sync_fence_wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700585
Ajay Dudanic4af2662012-07-23 16:43:05 -0700586static void sync_fence_free(struct kref *kref)
587{
588 struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
589
590 sync_fence_free_pts(fence);
591
592 kfree(fence);
593}
594
Erik Gilling010accf2012-03-13 15:34:34 -0700595static int sync_fence_release(struct inode *inode, struct file *file)
596{
597 struct sync_fence *fence = file->private_data;
Erik Gilling981c8a92012-03-14 19:49:15 -0700598 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700599
Ajay Dudanic4af2662012-07-23 16:43:05 -0700600 /*
601 * We need to remove all ways to access this fence before droping
602 * our ref.
603 *
604 * start with its membership in the global fence list
605 */
Erik Gilling981c8a92012-03-14 19:49:15 -0700606 spin_lock_irqsave(&sync_fence_list_lock, flags);
607 list_del(&fence->sync_fence_list);
608 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
609
Ajay Dudanic4af2662012-07-23 16:43:05 -0700610 /*
611 * remove its pts from their parents so that sync_timeline_signal()
612 * can't reference the fence.
613 */
614 sync_fence_detach_pts(fence);
Ajay Dudani1ee76852012-07-11 17:07:39 -0700615
Ajay Dudanic4af2662012-07-23 16:43:05 -0700616 kref_put(&fence->kref, sync_fence_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700617
618 return 0;
619}
620
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700621static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
622{
623 struct sync_fence *fence = file->private_data;
624
625 poll_wait(file, &fence->wq, wait);
626
627 if (fence->status == 1)
628 return POLLIN;
629 else if (fence->status < 0)
630 return POLLERR;
631 else
632 return 0;
633}
634
Erik Gilling010accf2012-03-13 15:34:34 -0700635static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
636{
637 __u32 value;
638
639 if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
640 return -EFAULT;
641
642 return sync_fence_wait(fence, value);
643}
644
645static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
646{
647 int fd = get_unused_fd();
648 int err;
649 struct sync_fence *fence2, *fence3;
650 struct sync_merge_data data;
651
652 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
653 return -EFAULT;
654
655 fence2 = sync_fence_fdget(data.fd2);
656 if (fence2 == NULL) {
657 err = -ENOENT;
658 goto err_put_fd;
659 }
660
661 data.name[sizeof(data.name) - 1] = '\0';
662 fence3 = sync_fence_merge(data.name, fence, fence2);
663 if (fence3 == NULL) {
664 err = -ENOMEM;
665 goto err_put_fence2;
666 }
667
668 data.fence = fd;
669 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
670 err = -EFAULT;
671 goto err_put_fence3;
672 }
673
674 sync_fence_install(fence3, fd);
675 sync_fence_put(fence2);
676 return 0;
677
678err_put_fence3:
679 sync_fence_put(fence3);
680
681err_put_fence2:
682 sync_fence_put(fence2);
683
684err_put_fd:
685 put_unused_fd(fd);
686 return err;
687}
688
Erik Gilling3913bff2012-03-15 17:45:50 -0700689static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
690{
691 struct sync_pt_info *info = data;
692 int ret;
693
694 if (size < sizeof(struct sync_pt_info))
695 return -ENOMEM;
696
697 info->len = sizeof(struct sync_pt_info);
698
699 if (pt->parent->ops->fill_driver_data) {
700 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
701 size - sizeof(*info));
702 if (ret < 0)
703 return ret;
704
705 info->len += ret;
706 }
707
708 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
709 strlcpy(info->driver_name, pt->parent->ops->driver_name,
710 sizeof(info->driver_name));
711 info->status = pt->status;
712 info->timestamp_ns = ktime_to_ns(pt->timestamp);
713
714 return info->len;
715}
716
717static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
718 unsigned long arg)
719{
720 struct sync_fence_info_data *data;
721 struct list_head *pos;
722 __u32 size;
723 __u32 len = 0;
724 int ret;
725
726 if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
727 return -EFAULT;
728
729 if (size < sizeof(struct sync_fence_info_data))
730 return -EINVAL;
731
732 if (size > 4096)
733 size = 4096;
734
735 data = kzalloc(size, GFP_KERNEL);
736 if (data == NULL)
737 return -ENOMEM;
738
739 strlcpy(data->name, fence->name, sizeof(data->name));
740 data->status = fence->status;
741 len = sizeof(struct sync_fence_info_data);
742
743 list_for_each(pos, &fence->pt_list_head) {
744 struct sync_pt *pt =
745 container_of(pos, struct sync_pt, pt_list);
746
747 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
748
749 if (ret < 0)
750 goto out;
751
752 len += ret;
753 }
754
755 data->len = len;
756
757 if (copy_to_user((void __user *)arg, data, len))
758 ret = -EFAULT;
759 else
760 ret = 0;
761
762out:
763 kfree(data);
764
765 return ret;
766}
Erik Gilling010accf2012-03-13 15:34:34 -0700767
768static long sync_fence_ioctl(struct file *file, unsigned int cmd,
769 unsigned long arg)
770{
771 struct sync_fence *fence = file->private_data;
772 switch (cmd) {
773 case SYNC_IOC_WAIT:
774 return sync_fence_ioctl_wait(fence, arg);
775
776 case SYNC_IOC_MERGE:
777 return sync_fence_ioctl_merge(fence, arg);
Erik Gilling981c8a92012-03-14 19:49:15 -0700778
Erik Gilling3913bff2012-03-15 17:45:50 -0700779 case SYNC_IOC_FENCE_INFO:
780 return sync_fence_ioctl_fence_info(fence, arg);
781
Erik Gilling010accf2012-03-13 15:34:34 -0700782 default:
783 return -ENOTTY;
784 }
785}
786
Erik Gilling981c8a92012-03-14 19:49:15 -0700787#ifdef CONFIG_DEBUG_FS
788static const char *sync_status_str(int status)
789{
790 if (status > 0)
791 return "signaled";
792 else if (status == 0)
793 return "active";
794 else
795 return "error";
796}
797
798static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
799{
800 int status = pt->status;
801 seq_printf(s, " %s%spt %s",
802 fence ? pt->parent->name : "",
803 fence ? "_" : "",
804 sync_status_str(status));
805 if (pt->status) {
806 struct timeval tv = ktime_to_timeval(pt->timestamp);
807 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
808 }
809
810 if (pt->parent->ops->print_pt) {
811 seq_printf(s, ": ");
812 pt->parent->ops->print_pt(s, pt);
813 }
814
815 seq_printf(s, "\n");
816}
817
818static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
819{
820 struct list_head *pos;
821 unsigned long flags;
822
823 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
824
825 if (obj->ops->print_obj) {
826 seq_printf(s, ": ");
827 obj->ops->print_obj(s, obj);
828 }
829
830 seq_printf(s, "\n");
831
832 spin_lock_irqsave(&obj->child_list_lock, flags);
833 list_for_each(pos, &obj->child_list_head) {
834 struct sync_pt *pt =
835 container_of(pos, struct sync_pt, child_list);
836 sync_print_pt(s, pt, false);
837 }
838 spin_unlock_irqrestore(&obj->child_list_lock, flags);
839}
840
841static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
842{
843 struct list_head *pos;
844 unsigned long flags;
845
846 seq_printf(s, "%s: %s\n", fence->name, sync_status_str(fence->status));
847
848 list_for_each(pos, &fence->pt_list_head) {
849 struct sync_pt *pt =
850 container_of(pos, struct sync_pt, pt_list);
851 sync_print_pt(s, pt, true);
852 }
853
854 spin_lock_irqsave(&fence->waiter_list_lock, flags);
855 list_for_each(pos, &fence->waiter_list_head) {
856 struct sync_fence_waiter *waiter =
857 container_of(pos, struct sync_fence_waiter,
858 waiter_list);
859
Erik Gillingc80114f2012-05-15 16:23:26 -0700860 seq_printf(s, "waiter %pF\n", waiter->callback);
Erik Gilling981c8a92012-03-14 19:49:15 -0700861 }
862 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
863}
864
865static int sync_debugfs_show(struct seq_file *s, void *unused)
866{
867 unsigned long flags;
868 struct list_head *pos;
869
870 seq_printf(s, "objs:\n--------------\n");
871
872 spin_lock_irqsave(&sync_timeline_list_lock, flags);
873 list_for_each(pos, &sync_timeline_list_head) {
874 struct sync_timeline *obj =
875 container_of(pos, struct sync_timeline,
876 sync_timeline_list);
877
878 sync_print_obj(s, obj);
879 seq_printf(s, "\n");
880 }
881 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
882
883 seq_printf(s, "fences:\n--------------\n");
884
885 spin_lock_irqsave(&sync_fence_list_lock, flags);
886 list_for_each(pos, &sync_fence_list_head) {
887 struct sync_fence *fence =
888 container_of(pos, struct sync_fence, sync_fence_list);
889
890 sync_print_fence(s, fence);
891 seq_printf(s, "\n");
892 }
893 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
894 return 0;
895}
896
897static int sync_debugfs_open(struct inode *inode, struct file *file)
898{
899 return single_open(file, sync_debugfs_show, inode->i_private);
900}
901
902static const struct file_operations sync_debugfs_fops = {
903 .open = sync_debugfs_open,
904 .read = seq_read,
905 .llseek = seq_lseek,
906 .release = single_release,
907};
908
909static __init int sync_debugfs_init(void)
910{
911 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
912 return 0;
913}
914
915late_initcall(sync_debugfs_init);
916
917#endif