blob: ad3fee0bcbeb73fb32f2f57480b3d35d7fd96a9d [file] [log] [blame]
Erik Gilling010accf2012-03-13 15:34:34 -07001/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Erik Gilling981c8a92012-03-14 19:49:15 -070017#include <linux/debugfs.h>
Erik Gilling4fb837a2012-05-16 13:09:22 -070018#include <linux/export.h>
Erik Gilling010accf2012-03-13 15:34:34 -070019#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
Erik Gillinga1eeaca2012-03-19 17:28:32 -070022#include <linux/poll.h>
Erik Gilling010accf2012-03-13 15:34:34 -070023#include <linux/sched.h>
Erik Gilling981c8a92012-03-14 19:49:15 -070024#include <linux/seq_file.h>
Erik Gilling010accf2012-03-13 15:34:34 -070025#include <linux/slab.h>
26#include <linux/sync.h>
27#include <linux/uaccess.h>
28
29#include <linux/anon_inodes.h>
30
31static void sync_fence_signal_pt(struct sync_pt *pt);
32static int _sync_pt_has_signaled(struct sync_pt *pt);
Ajay Dudanic4af2662012-07-23 16:43:05 -070033static void sync_fence_free(struct kref *kref);
Erik Gilling010accf2012-03-13 15:34:34 -070034
Erik Gilling981c8a92012-03-14 19:49:15 -070035static LIST_HEAD(sync_timeline_list_head);
36static DEFINE_SPINLOCK(sync_timeline_list_lock);
37
38static LIST_HEAD(sync_fence_list_head);
39static DEFINE_SPINLOCK(sync_fence_list_lock);
40
Erik Gilling010accf2012-03-13 15:34:34 -070041struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
42 int size, const char *name)
43{
44 struct sync_timeline *obj;
Erik Gilling981c8a92012-03-14 19:49:15 -070045 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -070046
47 if (size < sizeof(struct sync_timeline))
48 return NULL;
49
50 obj = kzalloc(size, GFP_KERNEL);
51 if (obj == NULL)
52 return NULL;
53
Ajay Dudani741cdde2012-08-02 17:26:45 -070054 kref_init(&obj->kref);
Erik Gilling010accf2012-03-13 15:34:34 -070055 obj->ops = ops;
56 strlcpy(obj->name, name, sizeof(obj->name));
57
58 INIT_LIST_HEAD(&obj->child_list_head);
59 spin_lock_init(&obj->child_list_lock);
60
61 INIT_LIST_HEAD(&obj->active_list_head);
62 spin_lock_init(&obj->active_list_lock);
63
Erik Gilling981c8a92012-03-14 19:49:15 -070064 spin_lock_irqsave(&sync_timeline_list_lock, flags);
65 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
66 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
67
Erik Gilling010accf2012-03-13 15:34:34 -070068 return obj;
69}
Erik Gilling4fb837a2012-05-16 13:09:22 -070070EXPORT_SYMBOL(sync_timeline_create);
Erik Gilling010accf2012-03-13 15:34:34 -070071
Ajay Dudani741cdde2012-08-02 17:26:45 -070072static void sync_timeline_free(struct kref *kref)
Erik Gilling981c8a92012-03-14 19:49:15 -070073{
Ajay Dudani741cdde2012-08-02 17:26:45 -070074 struct sync_timeline *obj =
75 container_of(kref, struct sync_timeline, kref);
Erik Gilling981c8a92012-03-14 19:49:15 -070076 unsigned long flags;
77
78 if (obj->ops->release_obj)
79 obj->ops->release_obj(obj);
80
81 spin_lock_irqsave(&sync_timeline_list_lock, flags);
82 list_del(&obj->sync_timeline_list);
83 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
84
85 kfree(obj);
86}
87
Erik Gilling010accf2012-03-13 15:34:34 -070088void sync_timeline_destroy(struct sync_timeline *obj)
89{
90 unsigned long flags;
91 bool needs_freeing;
92
Erik Gilling010accf2012-03-13 15:34:34 -070093 obj->destroyed = true;
Erik Gilling010accf2012-03-13 15:34:34 -070094
Ajay Dudani741cdde2012-08-02 17:26:45 -070095 /*
96 * If this is not the last reference, signal any children
97 * that their parent is going away.
98 */
99
100 if (!kref_put(&obj->kref, sync_timeline_free))
Erik Gilling010accf2012-03-13 15:34:34 -0700101 sync_timeline_signal(obj);
102}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700103EXPORT_SYMBOL(sync_timeline_destroy);
Erik Gilling010accf2012-03-13 15:34:34 -0700104
105static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
106{
107 unsigned long flags;
108
109 pt->parent = obj;
110
111 spin_lock_irqsave(&obj->child_list_lock, flags);
112 list_add_tail(&pt->child_list, &obj->child_list_head);
113 spin_unlock_irqrestore(&obj->child_list_lock, flags);
114}
115
116static void sync_timeline_remove_pt(struct sync_pt *pt)
117{
118 struct sync_timeline *obj = pt->parent;
119 unsigned long flags;
Ajay Dudanic4af2662012-07-23 16:43:05 -0700120 bool needs_freeing = false;
Erik Gilling010accf2012-03-13 15:34:34 -0700121
122 spin_lock_irqsave(&obj->active_list_lock, flags);
123 if (!list_empty(&pt->active_list))
124 list_del_init(&pt->active_list);
125 spin_unlock_irqrestore(&obj->active_list_lock, flags);
126
127 spin_lock_irqsave(&obj->child_list_lock, flags);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700128 if (!list_empty(&pt->child_list)) {
129 list_del_init(&pt->child_list);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700130 }
Erik Gilling010accf2012-03-13 15:34:34 -0700131 spin_unlock_irqrestore(&obj->child_list_lock, flags);
Erik Gilling010accf2012-03-13 15:34:34 -0700132}
133
134void sync_timeline_signal(struct sync_timeline *obj)
135{
136 unsigned long flags;
137 LIST_HEAD(signaled_pts);
138 struct list_head *pos, *n;
139
140 spin_lock_irqsave(&obj->active_list_lock, flags);
141
142 list_for_each_safe(pos, n, &obj->active_list_head) {
143 struct sync_pt *pt =
144 container_of(pos, struct sync_pt, active_list);
145
Ajay Dudanic4af2662012-07-23 16:43:05 -0700146 if (_sync_pt_has_signaled(pt)) {
147 list_del_init(pos);
148 list_add(&pt->signaled_list, &signaled_pts);
149 kref_get(&pt->fence->kref);
150 }
Erik Gilling010accf2012-03-13 15:34:34 -0700151 }
152
153 spin_unlock_irqrestore(&obj->active_list_lock, flags);
154
155 list_for_each_safe(pos, n, &signaled_pts) {
156 struct sync_pt *pt =
Ajay Dudanic4af2662012-07-23 16:43:05 -0700157 container_of(pos, struct sync_pt, signaled_list);
Erik Gilling010accf2012-03-13 15:34:34 -0700158
159 list_del_init(pos);
160 sync_fence_signal_pt(pt);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700161 kref_put(&pt->fence->kref, sync_fence_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700162 }
163}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700164EXPORT_SYMBOL(sync_timeline_signal);
Erik Gilling010accf2012-03-13 15:34:34 -0700165
166struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
167{
168 struct sync_pt *pt;
169
170 if (size < sizeof(struct sync_pt))
171 return NULL;
172
173 pt = kzalloc(size, GFP_KERNEL);
174 if (pt == NULL)
175 return NULL;
176
177 INIT_LIST_HEAD(&pt->active_list);
Ajay Dudani741cdde2012-08-02 17:26:45 -0700178 kref_get(&parent->kref);
Erik Gilling010accf2012-03-13 15:34:34 -0700179 sync_timeline_add_pt(parent, pt);
180
181 return pt;
182}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700183EXPORT_SYMBOL(sync_pt_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700184
185void sync_pt_free(struct sync_pt *pt)
186{
187 if (pt->parent->ops->free_pt)
188 pt->parent->ops->free_pt(pt);
189
190 sync_timeline_remove_pt(pt);
191
Ajay Dudani741cdde2012-08-02 17:26:45 -0700192 kref_put(&pt->parent->kref, sync_timeline_free);
193
Erik Gilling010accf2012-03-13 15:34:34 -0700194 kfree(pt);
195}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700196EXPORT_SYMBOL(sync_pt_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700197
198/* call with pt->parent->active_list_lock held */
199static int _sync_pt_has_signaled(struct sync_pt *pt)
200{
Erik Gillingad433ba2012-03-15 14:59:33 -0700201 int old_status = pt->status;
202
Erik Gilling010accf2012-03-13 15:34:34 -0700203 if (!pt->status)
204 pt->status = pt->parent->ops->has_signaled(pt);
205
206 if (!pt->status && pt->parent->destroyed)
207 pt->status = -ENOENT;
208
Erik Gillingad433ba2012-03-15 14:59:33 -0700209 if (pt->status != old_status)
210 pt->timestamp = ktime_get();
211
Erik Gilling010accf2012-03-13 15:34:34 -0700212 return pt->status;
213}
214
215static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
216{
217 return pt->parent->ops->dup(pt);
218}
219
220/* Adds a sync pt to the active queue. Called when added to a fence */
221static void sync_pt_activate(struct sync_pt *pt)
222{
223 struct sync_timeline *obj = pt->parent;
224 unsigned long flags;
225 int err;
226
227 spin_lock_irqsave(&obj->active_list_lock, flags);
228
229 err = _sync_pt_has_signaled(pt);
230 if (err != 0)
231 goto out;
232
233 list_add_tail(&pt->active_list, &obj->active_list_head);
234
235out:
236 spin_unlock_irqrestore(&obj->active_list_lock, flags);
237}
238
239static int sync_fence_release(struct inode *inode, struct file *file);
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700240static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700241static long sync_fence_ioctl(struct file *file, unsigned int cmd,
242 unsigned long arg);
243
244
245static const struct file_operations sync_fence_fops = {
246 .release = sync_fence_release,
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700247 .poll = sync_fence_poll,
Erik Gilling010accf2012-03-13 15:34:34 -0700248 .unlocked_ioctl = sync_fence_ioctl,
249};
250
251static struct sync_fence *sync_fence_alloc(const char *name)
252{
253 struct sync_fence *fence;
Erik Gilling981c8a92012-03-14 19:49:15 -0700254 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700255
256 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
257 if (fence == NULL)
258 return NULL;
259
260 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
261 fence, 0);
262 if (fence->file == NULL)
263 goto err;
264
Ajay Dudanic4af2662012-07-23 16:43:05 -0700265 kref_init(&fence->kref);
Erik Gilling010accf2012-03-13 15:34:34 -0700266 strlcpy(fence->name, name, sizeof(fence->name));
267
268 INIT_LIST_HEAD(&fence->pt_list_head);
269 INIT_LIST_HEAD(&fence->waiter_list_head);
270 spin_lock_init(&fence->waiter_list_lock);
271
272 init_waitqueue_head(&fence->wq);
Erik Gilling981c8a92012-03-14 19:49:15 -0700273
274 spin_lock_irqsave(&sync_fence_list_lock, flags);
275 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
276 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
277
Erik Gilling010accf2012-03-13 15:34:34 -0700278 return fence;
279
280err:
281 kfree(fence);
282 return NULL;
283}
284
285/* TODO: implement a create which takes more that one sync_pt */
286struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
287{
288 struct sync_fence *fence;
289
290 if (pt->fence)
291 return NULL;
292
293 fence = sync_fence_alloc(name);
294 if (fence == NULL)
295 return NULL;
296
297 pt->fence = fence;
298 list_add(&pt->pt_list, &fence->pt_list_head);
299 sync_pt_activate(pt);
300
301 return fence;
302}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700303EXPORT_SYMBOL(sync_fence_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700304
305static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
306{
307 struct list_head *pos;
308
309 list_for_each(pos, &src->pt_list_head) {
310 struct sync_pt *orig_pt =
311 container_of(pos, struct sync_pt, pt_list);
312 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
313
314 if (new_pt == NULL)
315 return -ENOMEM;
316
317 new_pt->fence = dst;
318 list_add(&new_pt->pt_list, &dst->pt_list_head);
319 sync_pt_activate(new_pt);
320 }
321
322 return 0;
323}
324
Ajay Dudani99343192012-07-11 17:13:50 -0700325static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
326{
327 struct list_head *src_pos, *dst_pos, *n;
328
329 list_for_each(src_pos, &src->pt_list_head) {
330 struct sync_pt *src_pt =
331 container_of(src_pos, struct sync_pt, pt_list);
332 bool collapsed = false;
333
334 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
335 struct sync_pt *dst_pt =
336 container_of(dst_pos, struct sync_pt, pt_list);
337 /* collapse two sync_pts on the same timeline
338 * to a single sync_pt that will signal at
339 * the later of the two
340 */
341 if (dst_pt->parent == src_pt->parent) {
342 if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
343 struct sync_pt *new_pt =
344 sync_pt_dup(src_pt);
345 if (new_pt == NULL)
346 return -ENOMEM;
347
348 new_pt->fence = dst;
349 list_replace(&dst_pt->pt_list,
350 &new_pt->pt_list);
351 sync_pt_activate(new_pt);
352 sync_pt_free(dst_pt);
353 }
354 collapsed = true;
355 break;
356 }
357 }
358
359 if (!collapsed) {
360 struct sync_pt *new_pt = sync_pt_dup(src_pt);
361
362 if (new_pt == NULL)
363 return -ENOMEM;
364
365 new_pt->fence = dst;
366 list_add(&new_pt->pt_list, &dst->pt_list_head);
367 sync_pt_activate(new_pt);
368 }
369 }
370
371 return 0;
372}
373
Ajay Dudanic4af2662012-07-23 16:43:05 -0700374static void sync_fence_detach_pts(struct sync_fence *fence)
375{
376 struct list_head *pos, *n;
377
378 list_for_each_safe(pos, n, &fence->pt_list_head) {
379 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
380 sync_timeline_remove_pt(pt);
381 }
382}
383
Erik Gilling010accf2012-03-13 15:34:34 -0700384static void sync_fence_free_pts(struct sync_fence *fence)
385{
386 struct list_head *pos, *n;
387
388 list_for_each_safe(pos, n, &fence->pt_list_head) {
389 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
390 sync_pt_free(pt);
391 }
392}
393
394struct sync_fence *sync_fence_fdget(int fd)
395{
396 struct file *file = fget(fd);
397
398 if (file == NULL)
399 return NULL;
400
401 if (file->f_op != &sync_fence_fops)
402 goto err;
403
404 return file->private_data;
405
406err:
407 fput(file);
408 return NULL;
409}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700410EXPORT_SYMBOL(sync_fence_fdget);
Erik Gilling010accf2012-03-13 15:34:34 -0700411
412void sync_fence_put(struct sync_fence *fence)
413{
414 fput(fence->file);
415}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700416EXPORT_SYMBOL(sync_fence_put);
Erik Gilling010accf2012-03-13 15:34:34 -0700417
418void sync_fence_install(struct sync_fence *fence, int fd)
419{
420 fd_install(fd, fence->file);
421}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700422EXPORT_SYMBOL(sync_fence_install);
Erik Gilling010accf2012-03-13 15:34:34 -0700423
424static int sync_fence_get_status(struct sync_fence *fence)
425{
426 struct list_head *pos;
427 int status = 1;
428
429 list_for_each(pos, &fence->pt_list_head) {
430 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
431 int pt_status = pt->status;
432
433 if (pt_status < 0) {
434 status = pt_status;
435 break;
436 } else if (status == 1) {
437 status = pt_status;
438 }
439 }
440
441 return status;
442}
443
444struct sync_fence *sync_fence_merge(const char *name,
445 struct sync_fence *a, struct sync_fence *b)
446{
447 struct sync_fence *fence;
448 int err;
449
450 fence = sync_fence_alloc(name);
451 if (fence == NULL)
452 return NULL;
453
454 err = sync_fence_copy_pts(fence, a);
455 if (err < 0)
456 goto err;
457
Ajay Dudani99343192012-07-11 17:13:50 -0700458 err = sync_fence_merge_pts(fence, b);
Erik Gilling010accf2012-03-13 15:34:34 -0700459 if (err < 0)
460 goto err;
461
462 fence->status = sync_fence_get_status(fence);
463
464 return fence;
465err:
466 sync_fence_free_pts(fence);
467 kfree(fence);
468 return NULL;
469}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700470EXPORT_SYMBOL(sync_fence_merge);
Erik Gilling010accf2012-03-13 15:34:34 -0700471
472static void sync_fence_signal_pt(struct sync_pt *pt)
473{
474 LIST_HEAD(signaled_waiters);
475 struct sync_fence *fence = pt->fence;
476 struct list_head *pos;
477 struct list_head *n;
478 unsigned long flags;
479 int status;
480
481 status = sync_fence_get_status(fence);
482
483 spin_lock_irqsave(&fence->waiter_list_lock, flags);
484 /*
485 * this should protect against two threads racing on the signaled
486 * false -> true transition
487 */
488 if (status && !fence->status) {
489 list_for_each_safe(pos, n, &fence->waiter_list_head)
490 list_move(pos, &signaled_waiters);
491
492 fence->status = status;
493 } else {
494 status = 0;
495 }
496 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
497
498 if (status) {
499 list_for_each_safe(pos, n, &signaled_waiters) {
500 struct sync_fence_waiter *waiter =
501 container_of(pos, struct sync_fence_waiter,
502 waiter_list);
503
Erik Gilling010accf2012-03-13 15:34:34 -0700504 list_del(pos);
Erik Gillingc80114f2012-05-15 16:23:26 -0700505 waiter->callback(fence, waiter);
Erik Gilling010accf2012-03-13 15:34:34 -0700506 }
507 wake_up(&fence->wq);
508 }
509}
510
511int sync_fence_wait_async(struct sync_fence *fence,
Erik Gillingc80114f2012-05-15 16:23:26 -0700512 struct sync_fence_waiter *waiter)
Erik Gilling010accf2012-03-13 15:34:34 -0700513{
Erik Gilling010accf2012-03-13 15:34:34 -0700514 unsigned long flags;
515 int err = 0;
516
Erik Gilling010accf2012-03-13 15:34:34 -0700517 spin_lock_irqsave(&fence->waiter_list_lock, flags);
518
519 if (fence->status) {
Erik Gilling010accf2012-03-13 15:34:34 -0700520 err = fence->status;
521 goto out;
522 }
523
524 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
525out:
526 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
527
528 return err;
529}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700530EXPORT_SYMBOL(sync_fence_wait_async);
Erik Gilling010accf2012-03-13 15:34:34 -0700531
Erik Gillingc80114f2012-05-15 16:23:26 -0700532int sync_fence_cancel_async(struct sync_fence *fence,
533 struct sync_fence_waiter *waiter)
534{
535 struct list_head *pos;
536 struct list_head *n;
537 unsigned long flags;
538 int ret = -ENOENT;
539
540 spin_lock_irqsave(&fence->waiter_list_lock, flags);
541 /*
542 * Make sure waiter is still in waiter_list because it is possible for
543 * the waiter to be removed from the list while the callback is still
544 * pending.
545 */
546 list_for_each_safe(pos, n, &fence->waiter_list_head) {
547 struct sync_fence_waiter *list_waiter =
548 container_of(pos, struct sync_fence_waiter,
549 waiter_list);
550 if (list_waiter == waiter) {
551 list_del(pos);
552 ret = 0;
553 break;
554 }
555 }
556 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
557 return ret;
558}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700559EXPORT_SYMBOL(sync_fence_cancel_async);
Erik Gillingc80114f2012-05-15 16:23:26 -0700560
Erik Gilling010accf2012-03-13 15:34:34 -0700561int sync_fence_wait(struct sync_fence *fence, long timeout)
562{
563 int err;
564
565 if (timeout) {
566 timeout = msecs_to_jiffies(timeout);
567 err = wait_event_interruptible_timeout(fence->wq,
568 fence->status != 0,
569 timeout);
570 } else {
571 err = wait_event_interruptible(fence->wq, fence->status != 0);
572 }
573
574 if (err < 0)
575 return err;
576
577 if (fence->status < 0)
578 return fence->status;
579
580 if (fence->status == 0)
581 return -ETIME;
582
583 return 0;
584}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700585EXPORT_SYMBOL(sync_fence_wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700586
Ajay Dudanic4af2662012-07-23 16:43:05 -0700587static void sync_fence_free(struct kref *kref)
588{
589 struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
590
591 sync_fence_free_pts(fence);
592
593 kfree(fence);
594}
595
Erik Gilling010accf2012-03-13 15:34:34 -0700596static int sync_fence_release(struct inode *inode, struct file *file)
597{
598 struct sync_fence *fence = file->private_data;
Erik Gilling981c8a92012-03-14 19:49:15 -0700599 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700600
Ajay Dudanic4af2662012-07-23 16:43:05 -0700601 /*
602 * We need to remove all ways to access this fence before droping
603 * our ref.
604 *
605 * start with its membership in the global fence list
606 */
Erik Gilling981c8a92012-03-14 19:49:15 -0700607 spin_lock_irqsave(&sync_fence_list_lock, flags);
608 list_del(&fence->sync_fence_list);
609 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
610
Ajay Dudanic4af2662012-07-23 16:43:05 -0700611 /*
612 * remove its pts from their parents so that sync_timeline_signal()
613 * can't reference the fence.
614 */
615 sync_fence_detach_pts(fence);
Ajay Dudani1ee76852012-07-11 17:07:39 -0700616
Ajay Dudanic4af2662012-07-23 16:43:05 -0700617 kref_put(&fence->kref, sync_fence_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700618
619 return 0;
620}
621
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700622static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
623{
624 struct sync_fence *fence = file->private_data;
625
626 poll_wait(file, &fence->wq, wait);
627
628 if (fence->status == 1)
629 return POLLIN;
630 else if (fence->status < 0)
631 return POLLERR;
632 else
633 return 0;
634}
635
Erik Gilling010accf2012-03-13 15:34:34 -0700636static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
637{
638 __u32 value;
639
640 if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
641 return -EFAULT;
642
643 return sync_fence_wait(fence, value);
644}
645
646static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
647{
648 int fd = get_unused_fd();
649 int err;
650 struct sync_fence *fence2, *fence3;
651 struct sync_merge_data data;
652
653 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
654 return -EFAULT;
655
656 fence2 = sync_fence_fdget(data.fd2);
657 if (fence2 == NULL) {
658 err = -ENOENT;
659 goto err_put_fd;
660 }
661
662 data.name[sizeof(data.name) - 1] = '\0';
663 fence3 = sync_fence_merge(data.name, fence, fence2);
664 if (fence3 == NULL) {
665 err = -ENOMEM;
666 goto err_put_fence2;
667 }
668
669 data.fence = fd;
670 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
671 err = -EFAULT;
672 goto err_put_fence3;
673 }
674
675 sync_fence_install(fence3, fd);
676 sync_fence_put(fence2);
677 return 0;
678
679err_put_fence3:
680 sync_fence_put(fence3);
681
682err_put_fence2:
683 sync_fence_put(fence2);
684
685err_put_fd:
686 put_unused_fd(fd);
687 return err;
688}
689
Erik Gilling3913bff2012-03-15 17:45:50 -0700690static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
691{
692 struct sync_pt_info *info = data;
693 int ret;
694
695 if (size < sizeof(struct sync_pt_info))
696 return -ENOMEM;
697
698 info->len = sizeof(struct sync_pt_info);
699
700 if (pt->parent->ops->fill_driver_data) {
701 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
702 size - sizeof(*info));
703 if (ret < 0)
704 return ret;
705
706 info->len += ret;
707 }
708
709 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
710 strlcpy(info->driver_name, pt->parent->ops->driver_name,
711 sizeof(info->driver_name));
712 info->status = pt->status;
713 info->timestamp_ns = ktime_to_ns(pt->timestamp);
714
715 return info->len;
716}
717
718static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
719 unsigned long arg)
720{
721 struct sync_fence_info_data *data;
722 struct list_head *pos;
723 __u32 size;
724 __u32 len = 0;
725 int ret;
726
727 if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
728 return -EFAULT;
729
730 if (size < sizeof(struct sync_fence_info_data))
731 return -EINVAL;
732
733 if (size > 4096)
734 size = 4096;
735
736 data = kzalloc(size, GFP_KERNEL);
737 if (data == NULL)
738 return -ENOMEM;
739
740 strlcpy(data->name, fence->name, sizeof(data->name));
741 data->status = fence->status;
742 len = sizeof(struct sync_fence_info_data);
743
744 list_for_each(pos, &fence->pt_list_head) {
745 struct sync_pt *pt =
746 container_of(pos, struct sync_pt, pt_list);
747
748 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
749
750 if (ret < 0)
751 goto out;
752
753 len += ret;
754 }
755
756 data->len = len;
757
758 if (copy_to_user((void __user *)arg, data, len))
759 ret = -EFAULT;
760 else
761 ret = 0;
762
763out:
764 kfree(data);
765
766 return ret;
767}
Erik Gilling010accf2012-03-13 15:34:34 -0700768
769static long sync_fence_ioctl(struct file *file, unsigned int cmd,
770 unsigned long arg)
771{
772 struct sync_fence *fence = file->private_data;
773 switch (cmd) {
774 case SYNC_IOC_WAIT:
775 return sync_fence_ioctl_wait(fence, arg);
776
777 case SYNC_IOC_MERGE:
778 return sync_fence_ioctl_merge(fence, arg);
Erik Gilling981c8a92012-03-14 19:49:15 -0700779
Erik Gilling3913bff2012-03-15 17:45:50 -0700780 case SYNC_IOC_FENCE_INFO:
781 return sync_fence_ioctl_fence_info(fence, arg);
782
Erik Gilling010accf2012-03-13 15:34:34 -0700783 default:
784 return -ENOTTY;
785 }
786}
787
Erik Gilling981c8a92012-03-14 19:49:15 -0700788#ifdef CONFIG_DEBUG_FS
789static const char *sync_status_str(int status)
790{
791 if (status > 0)
792 return "signaled";
793 else if (status == 0)
794 return "active";
795 else
796 return "error";
797}
798
799static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
800{
801 int status = pt->status;
802 seq_printf(s, " %s%spt %s",
803 fence ? pt->parent->name : "",
804 fence ? "_" : "",
805 sync_status_str(status));
806 if (pt->status) {
807 struct timeval tv = ktime_to_timeval(pt->timestamp);
808 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
809 }
810
811 if (pt->parent->ops->print_pt) {
812 seq_printf(s, ": ");
813 pt->parent->ops->print_pt(s, pt);
814 }
815
816 seq_printf(s, "\n");
817}
818
819static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
820{
821 struct list_head *pos;
822 unsigned long flags;
823
824 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
825
826 if (obj->ops->print_obj) {
827 seq_printf(s, ": ");
828 obj->ops->print_obj(s, obj);
829 }
830
831 seq_printf(s, "\n");
832
833 spin_lock_irqsave(&obj->child_list_lock, flags);
834 list_for_each(pos, &obj->child_list_head) {
835 struct sync_pt *pt =
836 container_of(pos, struct sync_pt, child_list);
837 sync_print_pt(s, pt, false);
838 }
839 spin_unlock_irqrestore(&obj->child_list_lock, flags);
840}
841
842static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
843{
844 struct list_head *pos;
845 unsigned long flags;
846
847 seq_printf(s, "%s: %s\n", fence->name, sync_status_str(fence->status));
848
849 list_for_each(pos, &fence->pt_list_head) {
850 struct sync_pt *pt =
851 container_of(pos, struct sync_pt, pt_list);
852 sync_print_pt(s, pt, true);
853 }
854
855 spin_lock_irqsave(&fence->waiter_list_lock, flags);
856 list_for_each(pos, &fence->waiter_list_head) {
857 struct sync_fence_waiter *waiter =
858 container_of(pos, struct sync_fence_waiter,
859 waiter_list);
860
Erik Gillingc80114f2012-05-15 16:23:26 -0700861 seq_printf(s, "waiter %pF\n", waiter->callback);
Erik Gilling981c8a92012-03-14 19:49:15 -0700862 }
863 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
864}
865
866static int sync_debugfs_show(struct seq_file *s, void *unused)
867{
868 unsigned long flags;
869 struct list_head *pos;
870
871 seq_printf(s, "objs:\n--------------\n");
872
873 spin_lock_irqsave(&sync_timeline_list_lock, flags);
874 list_for_each(pos, &sync_timeline_list_head) {
875 struct sync_timeline *obj =
876 container_of(pos, struct sync_timeline,
877 sync_timeline_list);
878
879 sync_print_obj(s, obj);
880 seq_printf(s, "\n");
881 }
882 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
883
884 seq_printf(s, "fences:\n--------------\n");
885
886 spin_lock_irqsave(&sync_fence_list_lock, flags);
887 list_for_each(pos, &sync_fence_list_head) {
888 struct sync_fence *fence =
889 container_of(pos, struct sync_fence, sync_fence_list);
890
891 sync_print_fence(s, fence);
892 seq_printf(s, "\n");
893 }
894 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
895 return 0;
896}
897
898static int sync_debugfs_open(struct inode *inode, struct file *file)
899{
900 return single_open(file, sync_debugfs_show, inode->i_private);
901}
902
903static const struct file_operations sync_debugfs_fops = {
904 .open = sync_debugfs_open,
905 .read = seq_read,
906 .llseek = seq_lseek,
907 .release = single_release,
908};
909
910static __init int sync_debugfs_init(void)
911{
912 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
913 return 0;
914}
915
916late_initcall(sync_debugfs_init);
917
918#endif