blob: 4c5689ba56cebad1b5e7fd0bf1d020b4be6374b5 [file] [log] [blame]
Erik Gilling010accf2012-03-13 15:34:34 -07001/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Erik Gilling981c8a92012-03-14 19:49:15 -070017#include <linux/debugfs.h>
Erik Gilling4fb837a2012-05-16 13:09:22 -070018#include <linux/export.h>
Erik Gilling010accf2012-03-13 15:34:34 -070019#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
Erik Gillinga1eeaca2012-03-19 17:28:32 -070022#include <linux/poll.h>
Erik Gilling010accf2012-03-13 15:34:34 -070023#include <linux/sched.h>
Erik Gilling981c8a92012-03-14 19:49:15 -070024#include <linux/seq_file.h>
Erik Gilling010accf2012-03-13 15:34:34 -070025#include <linux/slab.h>
26#include <linux/sync.h>
27#include <linux/uaccess.h>
28
29#include <linux/anon_inodes.h>
30
31static void sync_fence_signal_pt(struct sync_pt *pt);
32static int _sync_pt_has_signaled(struct sync_pt *pt);
Ajay Dudanic4af2662012-07-23 16:43:05 -070033static void sync_fence_free(struct kref *kref);
Ajay Dudani442fff42012-08-24 13:48:57 -070034static void sync_dump(void);
Erik Gilling010accf2012-03-13 15:34:34 -070035
Erik Gilling981c8a92012-03-14 19:49:15 -070036static LIST_HEAD(sync_timeline_list_head);
37static DEFINE_SPINLOCK(sync_timeline_list_lock);
38
39static LIST_HEAD(sync_fence_list_head);
40static DEFINE_SPINLOCK(sync_fence_list_lock);
41
Erik Gilling010accf2012-03-13 15:34:34 -070042struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
43 int size, const char *name)
44{
45 struct sync_timeline *obj;
Erik Gilling981c8a92012-03-14 19:49:15 -070046 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -070047
48 if (size < sizeof(struct sync_timeline))
49 return NULL;
50
51 obj = kzalloc(size, GFP_KERNEL);
52 if (obj == NULL)
53 return NULL;
54
Ajay Dudani741cdde2012-08-02 17:26:45 -070055 kref_init(&obj->kref);
Erik Gilling010accf2012-03-13 15:34:34 -070056 obj->ops = ops;
57 strlcpy(obj->name, name, sizeof(obj->name));
58
59 INIT_LIST_HEAD(&obj->child_list_head);
60 spin_lock_init(&obj->child_list_lock);
61
62 INIT_LIST_HEAD(&obj->active_list_head);
63 spin_lock_init(&obj->active_list_lock);
64
Erik Gilling981c8a92012-03-14 19:49:15 -070065 spin_lock_irqsave(&sync_timeline_list_lock, flags);
66 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
67 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
68
Erik Gilling010accf2012-03-13 15:34:34 -070069 return obj;
70}
Erik Gilling4fb837a2012-05-16 13:09:22 -070071EXPORT_SYMBOL(sync_timeline_create);
Erik Gilling010accf2012-03-13 15:34:34 -070072
Ajay Dudani741cdde2012-08-02 17:26:45 -070073static void sync_timeline_free(struct kref *kref)
Erik Gilling981c8a92012-03-14 19:49:15 -070074{
Ajay Dudani741cdde2012-08-02 17:26:45 -070075 struct sync_timeline *obj =
76 container_of(kref, struct sync_timeline, kref);
Erik Gilling981c8a92012-03-14 19:49:15 -070077 unsigned long flags;
78
79 if (obj->ops->release_obj)
80 obj->ops->release_obj(obj);
81
82 spin_lock_irqsave(&sync_timeline_list_lock, flags);
83 list_del(&obj->sync_timeline_list);
84 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
85
86 kfree(obj);
87}
88
Erik Gilling010accf2012-03-13 15:34:34 -070089void sync_timeline_destroy(struct sync_timeline *obj)
90{
Erik Gilling010accf2012-03-13 15:34:34 -070091 obj->destroyed = true;
Erik Gilling010accf2012-03-13 15:34:34 -070092
Ajay Dudani741cdde2012-08-02 17:26:45 -070093 /*
94 * If this is not the last reference, signal any children
95 * that their parent is going away.
96 */
97
98 if (!kref_put(&obj->kref, sync_timeline_free))
Erik Gilling010accf2012-03-13 15:34:34 -070099 sync_timeline_signal(obj);
100}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700101EXPORT_SYMBOL(sync_timeline_destroy);
Erik Gilling010accf2012-03-13 15:34:34 -0700102
103static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
104{
105 unsigned long flags;
106
107 pt->parent = obj;
108
109 spin_lock_irqsave(&obj->child_list_lock, flags);
110 list_add_tail(&pt->child_list, &obj->child_list_head);
111 spin_unlock_irqrestore(&obj->child_list_lock, flags);
112}
113
114static void sync_timeline_remove_pt(struct sync_pt *pt)
115{
116 struct sync_timeline *obj = pt->parent;
117 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700118
119 spin_lock_irqsave(&obj->active_list_lock, flags);
120 if (!list_empty(&pt->active_list))
121 list_del_init(&pt->active_list);
122 spin_unlock_irqrestore(&obj->active_list_lock, flags);
123
124 spin_lock_irqsave(&obj->child_list_lock, flags);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700125 if (!list_empty(&pt->child_list)) {
126 list_del_init(&pt->child_list);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700127 }
Erik Gilling010accf2012-03-13 15:34:34 -0700128 spin_unlock_irqrestore(&obj->child_list_lock, flags);
Erik Gilling010accf2012-03-13 15:34:34 -0700129}
130
131void sync_timeline_signal(struct sync_timeline *obj)
132{
133 unsigned long flags;
134 LIST_HEAD(signaled_pts);
135 struct list_head *pos, *n;
136
137 spin_lock_irqsave(&obj->active_list_lock, flags);
138
139 list_for_each_safe(pos, n, &obj->active_list_head) {
140 struct sync_pt *pt =
141 container_of(pos, struct sync_pt, active_list);
142
Ajay Dudanic4af2662012-07-23 16:43:05 -0700143 if (_sync_pt_has_signaled(pt)) {
144 list_del_init(pos);
145 list_add(&pt->signaled_list, &signaled_pts);
146 kref_get(&pt->fence->kref);
147 }
Erik Gilling010accf2012-03-13 15:34:34 -0700148 }
149
150 spin_unlock_irqrestore(&obj->active_list_lock, flags);
151
152 list_for_each_safe(pos, n, &signaled_pts) {
153 struct sync_pt *pt =
Ajay Dudanic4af2662012-07-23 16:43:05 -0700154 container_of(pos, struct sync_pt, signaled_list);
Erik Gilling010accf2012-03-13 15:34:34 -0700155
156 list_del_init(pos);
157 sync_fence_signal_pt(pt);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700158 kref_put(&pt->fence->kref, sync_fence_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700159 }
160}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700161EXPORT_SYMBOL(sync_timeline_signal);
Erik Gilling010accf2012-03-13 15:34:34 -0700162
163struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
164{
165 struct sync_pt *pt;
166
167 if (size < sizeof(struct sync_pt))
168 return NULL;
169
170 pt = kzalloc(size, GFP_KERNEL);
171 if (pt == NULL)
172 return NULL;
173
174 INIT_LIST_HEAD(&pt->active_list);
Ajay Dudani741cdde2012-08-02 17:26:45 -0700175 kref_get(&parent->kref);
Erik Gilling010accf2012-03-13 15:34:34 -0700176 sync_timeline_add_pt(parent, pt);
177
178 return pt;
179}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700180EXPORT_SYMBOL(sync_pt_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700181
182void sync_pt_free(struct sync_pt *pt)
183{
184 if (pt->parent->ops->free_pt)
185 pt->parent->ops->free_pt(pt);
186
187 sync_timeline_remove_pt(pt);
188
Ajay Dudani741cdde2012-08-02 17:26:45 -0700189 kref_put(&pt->parent->kref, sync_timeline_free);
190
Erik Gilling010accf2012-03-13 15:34:34 -0700191 kfree(pt);
192}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700193EXPORT_SYMBOL(sync_pt_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700194
195/* call with pt->parent->active_list_lock held */
196static int _sync_pt_has_signaled(struct sync_pt *pt)
197{
Erik Gillingad433ba2012-03-15 14:59:33 -0700198 int old_status = pt->status;
199
Erik Gilling010accf2012-03-13 15:34:34 -0700200 if (!pt->status)
201 pt->status = pt->parent->ops->has_signaled(pt);
202
203 if (!pt->status && pt->parent->destroyed)
204 pt->status = -ENOENT;
205
Erik Gillingad433ba2012-03-15 14:59:33 -0700206 if (pt->status != old_status)
207 pt->timestamp = ktime_get();
208
Erik Gilling010accf2012-03-13 15:34:34 -0700209 return pt->status;
210}
211
212static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
213{
214 return pt->parent->ops->dup(pt);
215}
216
217/* Adds a sync pt to the active queue. Called when added to a fence */
218static void sync_pt_activate(struct sync_pt *pt)
219{
220 struct sync_timeline *obj = pt->parent;
221 unsigned long flags;
222 int err;
223
224 spin_lock_irqsave(&obj->active_list_lock, flags);
225
226 err = _sync_pt_has_signaled(pt);
Jeff Boodybd483da2012-08-17 12:59:08 -0600227 if (err != 0) {
228 spin_unlock_irqrestore(&obj->active_list_lock, flags);
229 sync_fence_signal_pt(pt);
230 return;
231 }
Erik Gilling010accf2012-03-13 15:34:34 -0700232
233 list_add_tail(&pt->active_list, &obj->active_list_head);
Erik Gilling010accf2012-03-13 15:34:34 -0700234 spin_unlock_irqrestore(&obj->active_list_lock, flags);
235}
236
237static int sync_fence_release(struct inode *inode, struct file *file);
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700238static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700239static long sync_fence_ioctl(struct file *file, unsigned int cmd,
240 unsigned long arg);
241
242
243static const struct file_operations sync_fence_fops = {
244 .release = sync_fence_release,
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700245 .poll = sync_fence_poll,
Erik Gilling010accf2012-03-13 15:34:34 -0700246 .unlocked_ioctl = sync_fence_ioctl,
247};
248
249static struct sync_fence *sync_fence_alloc(const char *name)
250{
251 struct sync_fence *fence;
Erik Gilling981c8a92012-03-14 19:49:15 -0700252 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700253
254 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
255 if (fence == NULL)
256 return NULL;
257
258 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
259 fence, 0);
260 if (fence->file == NULL)
261 goto err;
262
Ajay Dudanic4af2662012-07-23 16:43:05 -0700263 kref_init(&fence->kref);
Erik Gilling010accf2012-03-13 15:34:34 -0700264 strlcpy(fence->name, name, sizeof(fence->name));
265
266 INIT_LIST_HEAD(&fence->pt_list_head);
267 INIT_LIST_HEAD(&fence->waiter_list_head);
268 spin_lock_init(&fence->waiter_list_lock);
269
270 init_waitqueue_head(&fence->wq);
Erik Gilling981c8a92012-03-14 19:49:15 -0700271
272 spin_lock_irqsave(&sync_fence_list_lock, flags);
273 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
274 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
275
Erik Gilling010accf2012-03-13 15:34:34 -0700276 return fence;
277
278err:
279 kfree(fence);
280 return NULL;
281}
282
283/* TODO: implement a create which takes more that one sync_pt */
284struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
285{
286 struct sync_fence *fence;
287
288 if (pt->fence)
289 return NULL;
290
291 fence = sync_fence_alloc(name);
292 if (fence == NULL)
293 return NULL;
294
295 pt->fence = fence;
296 list_add(&pt->pt_list, &fence->pt_list_head);
297 sync_pt_activate(pt);
298
299 return fence;
300}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700301EXPORT_SYMBOL(sync_fence_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700302
303static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
304{
305 struct list_head *pos;
306
307 list_for_each(pos, &src->pt_list_head) {
308 struct sync_pt *orig_pt =
309 container_of(pos, struct sync_pt, pt_list);
310 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
311
312 if (new_pt == NULL)
313 return -ENOMEM;
314
315 new_pt->fence = dst;
316 list_add(&new_pt->pt_list, &dst->pt_list_head);
317 sync_pt_activate(new_pt);
318 }
319
320 return 0;
321}
322
Ajay Dudani99343192012-07-11 17:13:50 -0700323static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
324{
325 struct list_head *src_pos, *dst_pos, *n;
326
327 list_for_each(src_pos, &src->pt_list_head) {
328 struct sync_pt *src_pt =
329 container_of(src_pos, struct sync_pt, pt_list);
330 bool collapsed = false;
331
332 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
333 struct sync_pt *dst_pt =
334 container_of(dst_pos, struct sync_pt, pt_list);
335 /* collapse two sync_pts on the same timeline
336 * to a single sync_pt that will signal at
337 * the later of the two
338 */
339 if (dst_pt->parent == src_pt->parent) {
340 if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
341 struct sync_pt *new_pt =
342 sync_pt_dup(src_pt);
343 if (new_pt == NULL)
344 return -ENOMEM;
345
346 new_pt->fence = dst;
347 list_replace(&dst_pt->pt_list,
348 &new_pt->pt_list);
349 sync_pt_activate(new_pt);
350 sync_pt_free(dst_pt);
351 }
352 collapsed = true;
353 break;
354 }
355 }
356
357 if (!collapsed) {
358 struct sync_pt *new_pt = sync_pt_dup(src_pt);
359
360 if (new_pt == NULL)
361 return -ENOMEM;
362
363 new_pt->fence = dst;
364 list_add(&new_pt->pt_list, &dst->pt_list_head);
365 sync_pt_activate(new_pt);
366 }
367 }
368
369 return 0;
370}
371
Ajay Dudanic4af2662012-07-23 16:43:05 -0700372static void sync_fence_detach_pts(struct sync_fence *fence)
373{
374 struct list_head *pos, *n;
375
376 list_for_each_safe(pos, n, &fence->pt_list_head) {
377 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
378 sync_timeline_remove_pt(pt);
379 }
380}
381
Erik Gilling010accf2012-03-13 15:34:34 -0700382static void sync_fence_free_pts(struct sync_fence *fence)
383{
384 struct list_head *pos, *n;
385
386 list_for_each_safe(pos, n, &fence->pt_list_head) {
387 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
388 sync_pt_free(pt);
389 }
390}
391
392struct sync_fence *sync_fence_fdget(int fd)
393{
394 struct file *file = fget(fd);
395
396 if (file == NULL)
397 return NULL;
398
399 if (file->f_op != &sync_fence_fops)
400 goto err;
401
402 return file->private_data;
403
404err:
405 fput(file);
406 return NULL;
407}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700408EXPORT_SYMBOL(sync_fence_fdget);
Erik Gilling010accf2012-03-13 15:34:34 -0700409
410void sync_fence_put(struct sync_fence *fence)
411{
412 fput(fence->file);
413}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700414EXPORT_SYMBOL(sync_fence_put);
Erik Gilling010accf2012-03-13 15:34:34 -0700415
416void sync_fence_install(struct sync_fence *fence, int fd)
417{
418 fd_install(fd, fence->file);
419}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700420EXPORT_SYMBOL(sync_fence_install);
Erik Gilling010accf2012-03-13 15:34:34 -0700421
422static int sync_fence_get_status(struct sync_fence *fence)
423{
424 struct list_head *pos;
425 int status = 1;
426
427 list_for_each(pos, &fence->pt_list_head) {
428 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
429 int pt_status = pt->status;
430
431 if (pt_status < 0) {
432 status = pt_status;
433 break;
434 } else if (status == 1) {
435 status = pt_status;
436 }
437 }
438
439 return status;
440}
441
442struct sync_fence *sync_fence_merge(const char *name,
443 struct sync_fence *a, struct sync_fence *b)
444{
445 struct sync_fence *fence;
446 int err;
447
448 fence = sync_fence_alloc(name);
449 if (fence == NULL)
450 return NULL;
451
452 err = sync_fence_copy_pts(fence, a);
453 if (err < 0)
454 goto err;
455
Ajay Dudani99343192012-07-11 17:13:50 -0700456 err = sync_fence_merge_pts(fence, b);
Erik Gilling010accf2012-03-13 15:34:34 -0700457 if (err < 0)
458 goto err;
459
460 fence->status = sync_fence_get_status(fence);
461
462 return fence;
463err:
464 sync_fence_free_pts(fence);
465 kfree(fence);
466 return NULL;
467}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700468EXPORT_SYMBOL(sync_fence_merge);
Erik Gilling010accf2012-03-13 15:34:34 -0700469
470static void sync_fence_signal_pt(struct sync_pt *pt)
471{
472 LIST_HEAD(signaled_waiters);
473 struct sync_fence *fence = pt->fence;
474 struct list_head *pos;
475 struct list_head *n;
476 unsigned long flags;
477 int status;
478
479 status = sync_fence_get_status(fence);
480
481 spin_lock_irqsave(&fence->waiter_list_lock, flags);
482 /*
483 * this should protect against two threads racing on the signaled
484 * false -> true transition
485 */
486 if (status && !fence->status) {
487 list_for_each_safe(pos, n, &fence->waiter_list_head)
488 list_move(pos, &signaled_waiters);
489
490 fence->status = status;
491 } else {
492 status = 0;
493 }
494 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
495
496 if (status) {
497 list_for_each_safe(pos, n, &signaled_waiters) {
498 struct sync_fence_waiter *waiter =
499 container_of(pos, struct sync_fence_waiter,
500 waiter_list);
501
Erik Gilling010accf2012-03-13 15:34:34 -0700502 list_del(pos);
Erik Gillingc80114f2012-05-15 16:23:26 -0700503 waiter->callback(fence, waiter);
Erik Gilling010accf2012-03-13 15:34:34 -0700504 }
505 wake_up(&fence->wq);
506 }
507}
508
509int sync_fence_wait_async(struct sync_fence *fence,
Erik Gillingc80114f2012-05-15 16:23:26 -0700510 struct sync_fence_waiter *waiter)
Erik Gilling010accf2012-03-13 15:34:34 -0700511{
Erik Gilling010accf2012-03-13 15:34:34 -0700512 unsigned long flags;
513 int err = 0;
514
Erik Gilling010accf2012-03-13 15:34:34 -0700515 spin_lock_irqsave(&fence->waiter_list_lock, flags);
516
517 if (fence->status) {
Erik Gilling010accf2012-03-13 15:34:34 -0700518 err = fence->status;
519 goto out;
520 }
521
522 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
523out:
524 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
525
526 return err;
527}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700528EXPORT_SYMBOL(sync_fence_wait_async);
Erik Gilling010accf2012-03-13 15:34:34 -0700529
Erik Gillingc80114f2012-05-15 16:23:26 -0700530int sync_fence_cancel_async(struct sync_fence *fence,
531 struct sync_fence_waiter *waiter)
532{
533 struct list_head *pos;
534 struct list_head *n;
535 unsigned long flags;
536 int ret = -ENOENT;
537
538 spin_lock_irqsave(&fence->waiter_list_lock, flags);
539 /*
540 * Make sure waiter is still in waiter_list because it is possible for
541 * the waiter to be removed from the list while the callback is still
542 * pending.
543 */
544 list_for_each_safe(pos, n, &fence->waiter_list_head) {
545 struct sync_fence_waiter *list_waiter =
546 container_of(pos, struct sync_fence_waiter,
547 waiter_list);
548 if (list_waiter == waiter) {
549 list_del(pos);
550 ret = 0;
551 break;
552 }
553 }
554 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
555 return ret;
556}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700557EXPORT_SYMBOL(sync_fence_cancel_async);
Erik Gillingc80114f2012-05-15 16:23:26 -0700558
Erik Gilling7a476272012-10-11 12:35:22 -0700559static bool sync_fence_check(struct sync_fence *fence)
560{
561 /*
562 * Make sure that reads to fence->status are ordered with the
563 * wait queue event triggering
564 */
565 smp_rmb();
566 return fence->status != 0;
567}
568
Erik Gilling010accf2012-03-13 15:34:34 -0700569int sync_fence_wait(struct sync_fence *fence, long timeout)
570{
Ajay Dudanidf53a2ca2012-08-21 17:57:19 -0700571 int err = 0;
Erik Gilling010accf2012-03-13 15:34:34 -0700572
Ajay Dudanidf53a2ca2012-08-21 17:57:19 -0700573 if (timeout > 0) {
Erik Gilling010accf2012-03-13 15:34:34 -0700574 timeout = msecs_to_jiffies(timeout);
575 err = wait_event_interruptible_timeout(fence->wq,
Erik Gilling7a476272012-10-11 12:35:22 -0700576 sync_fence_check(fence),
Erik Gilling010accf2012-03-13 15:34:34 -0700577 timeout);
Ajay Dudani131ed182012-08-21 18:43:21 -0700578 } else if (timeout < 0) {
Erik Gilling010accf2012-03-13 15:34:34 -0700579 err = wait_event_interruptible(fence->wq, fence->status != 0);
580 }
581
582 if (err < 0)
583 return err;
584
585 if (fence->status < 0)
586 return fence->status;
587
Ajay Dudani442fff42012-08-24 13:48:57 -0700588 if (fence->status == 0) {
Ajay Dudani31802ec2012-09-04 15:29:09 -0700589 pr_info("fence timeout on [%p] after %dms\n", fence,
590 jiffies_to_msecs(timeout));
Ajay Dudani442fff42012-08-24 13:48:57 -0700591 sync_dump();
Erik Gilling010accf2012-03-13 15:34:34 -0700592 return -ETIME;
Ajay Dudani442fff42012-08-24 13:48:57 -0700593 }
Erik Gilling010accf2012-03-13 15:34:34 -0700594
595 return 0;
596}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700597EXPORT_SYMBOL(sync_fence_wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700598
Ajay Dudanic4af2662012-07-23 16:43:05 -0700599static void sync_fence_free(struct kref *kref)
600{
601 struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
602
603 sync_fence_free_pts(fence);
604
605 kfree(fence);
606}
607
Erik Gilling010accf2012-03-13 15:34:34 -0700608static int sync_fence_release(struct inode *inode, struct file *file)
609{
610 struct sync_fence *fence = file->private_data;
Erik Gilling981c8a92012-03-14 19:49:15 -0700611 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700612
Ajay Dudanic4af2662012-07-23 16:43:05 -0700613 /*
614 * We need to remove all ways to access this fence before droping
615 * our ref.
616 *
617 * start with its membership in the global fence list
618 */
Erik Gilling981c8a92012-03-14 19:49:15 -0700619 spin_lock_irqsave(&sync_fence_list_lock, flags);
620 list_del(&fence->sync_fence_list);
621 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
622
Ajay Dudanic4af2662012-07-23 16:43:05 -0700623 /*
624 * remove its pts from their parents so that sync_timeline_signal()
625 * can't reference the fence.
626 */
627 sync_fence_detach_pts(fence);
Ajay Dudani1ee76852012-07-11 17:07:39 -0700628
Ajay Dudanic4af2662012-07-23 16:43:05 -0700629 kref_put(&fence->kref, sync_fence_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700630
631 return 0;
632}
633
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700634static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
635{
636 struct sync_fence *fence = file->private_data;
637
638 poll_wait(file, &fence->wq, wait);
639
Erik Gilling7a476272012-10-11 12:35:22 -0700640 /*
641 * Make sure that reads to fence->status are ordered with the
642 * wait queue event triggering
643 */
644 smp_rmb();
645
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700646 if (fence->status == 1)
647 return POLLIN;
648 else if (fence->status < 0)
649 return POLLERR;
650 else
651 return 0;
652}
653
Erik Gilling010accf2012-03-13 15:34:34 -0700654static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
655{
Ajay Dudani93b10c92012-09-04 15:28:52 -0700656 __s32 value;
Erik Gilling010accf2012-03-13 15:34:34 -0700657
658 if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
659 return -EFAULT;
660
661 return sync_fence_wait(fence, value);
662}
663
664static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
665{
666 int fd = get_unused_fd();
667 int err;
668 struct sync_fence *fence2, *fence3;
669 struct sync_merge_data data;
670
671 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
672 return -EFAULT;
673
674 fence2 = sync_fence_fdget(data.fd2);
675 if (fence2 == NULL) {
676 err = -ENOENT;
677 goto err_put_fd;
678 }
679
680 data.name[sizeof(data.name) - 1] = '\0';
681 fence3 = sync_fence_merge(data.name, fence, fence2);
682 if (fence3 == NULL) {
683 err = -ENOMEM;
684 goto err_put_fence2;
685 }
686
687 data.fence = fd;
688 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
689 err = -EFAULT;
690 goto err_put_fence3;
691 }
692
693 sync_fence_install(fence3, fd);
694 sync_fence_put(fence2);
695 return 0;
696
697err_put_fence3:
698 sync_fence_put(fence3);
699
700err_put_fence2:
701 sync_fence_put(fence2);
702
703err_put_fd:
704 put_unused_fd(fd);
705 return err;
706}
707
Erik Gilling3913bff2012-03-15 17:45:50 -0700708static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
709{
710 struct sync_pt_info *info = data;
711 int ret;
712
713 if (size < sizeof(struct sync_pt_info))
714 return -ENOMEM;
715
716 info->len = sizeof(struct sync_pt_info);
717
718 if (pt->parent->ops->fill_driver_data) {
719 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
720 size - sizeof(*info));
721 if (ret < 0)
722 return ret;
723
724 info->len += ret;
725 }
726
727 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
728 strlcpy(info->driver_name, pt->parent->ops->driver_name,
729 sizeof(info->driver_name));
730 info->status = pt->status;
731 info->timestamp_ns = ktime_to_ns(pt->timestamp);
732
733 return info->len;
734}
735
736static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
737 unsigned long arg)
738{
739 struct sync_fence_info_data *data;
740 struct list_head *pos;
741 __u32 size;
742 __u32 len = 0;
743 int ret;
744
745 if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
746 return -EFAULT;
747
748 if (size < sizeof(struct sync_fence_info_data))
749 return -EINVAL;
750
751 if (size > 4096)
752 size = 4096;
753
754 data = kzalloc(size, GFP_KERNEL);
755 if (data == NULL)
756 return -ENOMEM;
757
758 strlcpy(data->name, fence->name, sizeof(data->name));
759 data->status = fence->status;
760 len = sizeof(struct sync_fence_info_data);
761
762 list_for_each(pos, &fence->pt_list_head) {
763 struct sync_pt *pt =
764 container_of(pos, struct sync_pt, pt_list);
765
766 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
767
768 if (ret < 0)
769 goto out;
770
771 len += ret;
772 }
773
774 data->len = len;
775
776 if (copy_to_user((void __user *)arg, data, len))
777 ret = -EFAULT;
778 else
779 ret = 0;
780
781out:
782 kfree(data);
783
784 return ret;
785}
Erik Gilling010accf2012-03-13 15:34:34 -0700786
787static long sync_fence_ioctl(struct file *file, unsigned int cmd,
788 unsigned long arg)
789{
790 struct sync_fence *fence = file->private_data;
791 switch (cmd) {
792 case SYNC_IOC_WAIT:
793 return sync_fence_ioctl_wait(fence, arg);
794
795 case SYNC_IOC_MERGE:
796 return sync_fence_ioctl_merge(fence, arg);
Erik Gilling981c8a92012-03-14 19:49:15 -0700797
Erik Gilling3913bff2012-03-15 17:45:50 -0700798 case SYNC_IOC_FENCE_INFO:
799 return sync_fence_ioctl_fence_info(fence, arg);
800
Erik Gilling010accf2012-03-13 15:34:34 -0700801 default:
802 return -ENOTTY;
803 }
804}
805
Erik Gilling981c8a92012-03-14 19:49:15 -0700806#ifdef CONFIG_DEBUG_FS
807static const char *sync_status_str(int status)
808{
809 if (status > 0)
810 return "signaled";
811 else if (status == 0)
812 return "active";
813 else
814 return "error";
815}
816
817static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
818{
819 int status = pt->status;
820 seq_printf(s, " %s%spt %s",
821 fence ? pt->parent->name : "",
822 fence ? "_" : "",
823 sync_status_str(status));
824 if (pt->status) {
825 struct timeval tv = ktime_to_timeval(pt->timestamp);
826 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
827 }
828
829 if (pt->parent->ops->print_pt) {
830 seq_printf(s, ": ");
831 pt->parent->ops->print_pt(s, pt);
832 }
833
834 seq_printf(s, "\n");
835}
836
837static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
838{
839 struct list_head *pos;
840 unsigned long flags;
841
842 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
843
844 if (obj->ops->print_obj) {
845 seq_printf(s, ": ");
846 obj->ops->print_obj(s, obj);
847 }
848
849 seq_printf(s, "\n");
850
851 spin_lock_irqsave(&obj->child_list_lock, flags);
852 list_for_each(pos, &obj->child_list_head) {
853 struct sync_pt *pt =
854 container_of(pos, struct sync_pt, child_list);
855 sync_print_pt(s, pt, false);
856 }
857 spin_unlock_irqrestore(&obj->child_list_lock, flags);
858}
859
860static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
861{
862 struct list_head *pos;
863 unsigned long flags;
864
Ajay Dudani31802ec2012-09-04 15:29:09 -0700865 seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
866 sync_status_str(fence->status));
Erik Gilling981c8a92012-03-14 19:49:15 -0700867
868 list_for_each(pos, &fence->pt_list_head) {
869 struct sync_pt *pt =
870 container_of(pos, struct sync_pt, pt_list);
871 sync_print_pt(s, pt, true);
872 }
873
874 spin_lock_irqsave(&fence->waiter_list_lock, flags);
875 list_for_each(pos, &fence->waiter_list_head) {
876 struct sync_fence_waiter *waiter =
877 container_of(pos, struct sync_fence_waiter,
878 waiter_list);
879
Erik Gillingc80114f2012-05-15 16:23:26 -0700880 seq_printf(s, "waiter %pF\n", waiter->callback);
Erik Gilling981c8a92012-03-14 19:49:15 -0700881 }
882 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
883}
884
885static int sync_debugfs_show(struct seq_file *s, void *unused)
886{
887 unsigned long flags;
888 struct list_head *pos;
889
890 seq_printf(s, "objs:\n--------------\n");
891
892 spin_lock_irqsave(&sync_timeline_list_lock, flags);
893 list_for_each(pos, &sync_timeline_list_head) {
894 struct sync_timeline *obj =
895 container_of(pos, struct sync_timeline,
896 sync_timeline_list);
897
898 sync_print_obj(s, obj);
899 seq_printf(s, "\n");
900 }
901 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
902
903 seq_printf(s, "fences:\n--------------\n");
904
905 spin_lock_irqsave(&sync_fence_list_lock, flags);
906 list_for_each(pos, &sync_fence_list_head) {
907 struct sync_fence *fence =
908 container_of(pos, struct sync_fence, sync_fence_list);
909
910 sync_print_fence(s, fence);
911 seq_printf(s, "\n");
912 }
913 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
914 return 0;
915}
916
917static int sync_debugfs_open(struct inode *inode, struct file *file)
918{
919 return single_open(file, sync_debugfs_show, inode->i_private);
920}
921
922static const struct file_operations sync_debugfs_fops = {
923 .open = sync_debugfs_open,
924 .read = seq_read,
925 .llseek = seq_lseek,
926 .release = single_release,
927};
928
929static __init int sync_debugfs_init(void)
930{
931 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
932 return 0;
933}
Erik Gilling981c8a92012-03-14 19:49:15 -0700934late_initcall(sync_debugfs_init);
935
Ajay Dudani442fff42012-08-24 13:48:57 -0700936#define DUMP_CHUNK 256
937static char sync_dump_buf[64 * 1024];
938void sync_dump(void)
939{
940 struct seq_file s = {
941 .buf = sync_dump_buf,
942 .size = sizeof(sync_dump_buf) - 1,
943 };
944 int i;
945
946 sync_debugfs_show(&s, NULL);
947
948 for (i = 0; i < s.count; i += DUMP_CHUNK) {
949 if ((s.count - i) > DUMP_CHUNK) {
950 char c = s.buf[i + DUMP_CHUNK];
951 s.buf[i + DUMP_CHUNK] = 0;
952 pr_cont("%s", s.buf + i);
953 s.buf[i + DUMP_CHUNK] = c;
954 } else {
955 s.buf[s.count] = 0;
956 pr_cont("%s", s.buf + i);
957 }
958 }
959}
960#else
961static void sync_dump(void)
962{
963}
Erik Gilling981c8a92012-03-14 19:49:15 -0700964#endif