blob: 468d675637f26f1e4b67d0dd107b2904cc5a8174 [file] [log] [blame]
Erik Gilling010accf2012-03-13 15:34:34 -07001/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Erik Gilling981c8a92012-03-14 19:49:15 -070017#include <linux/debugfs.h>
Erik Gilling4fb837a2012-05-16 13:09:22 -070018#include <linux/export.h>
Erik Gilling010accf2012-03-13 15:34:34 -070019#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
Erik Gillinga1eeaca2012-03-19 17:28:32 -070022#include <linux/poll.h>
Erik Gilling010accf2012-03-13 15:34:34 -070023#include <linux/sched.h>
Erik Gilling981c8a92012-03-14 19:49:15 -070024#include <linux/seq_file.h>
Erik Gilling010accf2012-03-13 15:34:34 -070025#include <linux/slab.h>
26#include <linux/sync.h>
27#include <linux/uaccess.h>
28
29#include <linux/anon_inodes.h>
30
31static void sync_fence_signal_pt(struct sync_pt *pt);
32static int _sync_pt_has_signaled(struct sync_pt *pt);
Ajay Dudanic4af2662012-07-23 16:43:05 -070033static void sync_fence_free(struct kref *kref);
Ajay Dudani442fff42012-08-24 13:48:57 -070034static void sync_dump(void);
Erik Gilling010accf2012-03-13 15:34:34 -070035
Erik Gilling981c8a92012-03-14 19:49:15 -070036static LIST_HEAD(sync_timeline_list_head);
37static DEFINE_SPINLOCK(sync_timeline_list_lock);
38
39static LIST_HEAD(sync_fence_list_head);
40static DEFINE_SPINLOCK(sync_fence_list_lock);
41
Erik Gilling010accf2012-03-13 15:34:34 -070042struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
43 int size, const char *name)
44{
45 struct sync_timeline *obj;
Erik Gilling981c8a92012-03-14 19:49:15 -070046 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -070047
48 if (size < sizeof(struct sync_timeline))
49 return NULL;
50
51 obj = kzalloc(size, GFP_KERNEL);
52 if (obj == NULL)
53 return NULL;
54
Ajay Dudani741cdde2012-08-02 17:26:45 -070055 kref_init(&obj->kref);
Erik Gilling010accf2012-03-13 15:34:34 -070056 obj->ops = ops;
57 strlcpy(obj->name, name, sizeof(obj->name));
58
59 INIT_LIST_HEAD(&obj->child_list_head);
60 spin_lock_init(&obj->child_list_lock);
61
62 INIT_LIST_HEAD(&obj->active_list_head);
63 spin_lock_init(&obj->active_list_lock);
64
Erik Gilling981c8a92012-03-14 19:49:15 -070065 spin_lock_irqsave(&sync_timeline_list_lock, flags);
66 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
67 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
68
Erik Gilling010accf2012-03-13 15:34:34 -070069 return obj;
70}
Erik Gilling4fb837a2012-05-16 13:09:22 -070071EXPORT_SYMBOL(sync_timeline_create);
Erik Gilling010accf2012-03-13 15:34:34 -070072
Ajay Dudani741cdde2012-08-02 17:26:45 -070073static void sync_timeline_free(struct kref *kref)
Erik Gilling981c8a92012-03-14 19:49:15 -070074{
Ajay Dudani741cdde2012-08-02 17:26:45 -070075 struct sync_timeline *obj =
76 container_of(kref, struct sync_timeline, kref);
Erik Gilling981c8a92012-03-14 19:49:15 -070077 unsigned long flags;
78
79 if (obj->ops->release_obj)
80 obj->ops->release_obj(obj);
81
82 spin_lock_irqsave(&sync_timeline_list_lock, flags);
83 list_del(&obj->sync_timeline_list);
84 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
85
86 kfree(obj);
87}
88
Erik Gilling010accf2012-03-13 15:34:34 -070089void sync_timeline_destroy(struct sync_timeline *obj)
90{
Erik Gilling010accf2012-03-13 15:34:34 -070091 obj->destroyed = true;
Erik Gilling010accf2012-03-13 15:34:34 -070092
Ajay Dudani741cdde2012-08-02 17:26:45 -070093 /*
94 * If this is not the last reference, signal any children
95 * that their parent is going away.
96 */
97
98 if (!kref_put(&obj->kref, sync_timeline_free))
Erik Gilling010accf2012-03-13 15:34:34 -070099 sync_timeline_signal(obj);
100}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700101EXPORT_SYMBOL(sync_timeline_destroy);
Erik Gilling010accf2012-03-13 15:34:34 -0700102
103static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
104{
105 unsigned long flags;
106
107 pt->parent = obj;
108
109 spin_lock_irqsave(&obj->child_list_lock, flags);
110 list_add_tail(&pt->child_list, &obj->child_list_head);
111 spin_unlock_irqrestore(&obj->child_list_lock, flags);
112}
113
114static void sync_timeline_remove_pt(struct sync_pt *pt)
115{
116 struct sync_timeline *obj = pt->parent;
117 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700118
119 spin_lock_irqsave(&obj->active_list_lock, flags);
120 if (!list_empty(&pt->active_list))
121 list_del_init(&pt->active_list);
122 spin_unlock_irqrestore(&obj->active_list_lock, flags);
123
124 spin_lock_irqsave(&obj->child_list_lock, flags);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700125 if (!list_empty(&pt->child_list)) {
126 list_del_init(&pt->child_list);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700127 }
Erik Gilling010accf2012-03-13 15:34:34 -0700128 spin_unlock_irqrestore(&obj->child_list_lock, flags);
Erik Gilling010accf2012-03-13 15:34:34 -0700129}
130
131void sync_timeline_signal(struct sync_timeline *obj)
132{
133 unsigned long flags;
134 LIST_HEAD(signaled_pts);
135 struct list_head *pos, *n;
136
137 spin_lock_irqsave(&obj->active_list_lock, flags);
138
139 list_for_each_safe(pos, n, &obj->active_list_head) {
140 struct sync_pt *pt =
141 container_of(pos, struct sync_pt, active_list);
142
Ajay Dudanic4af2662012-07-23 16:43:05 -0700143 if (_sync_pt_has_signaled(pt)) {
144 list_del_init(pos);
145 list_add(&pt->signaled_list, &signaled_pts);
146 kref_get(&pt->fence->kref);
147 }
Erik Gilling010accf2012-03-13 15:34:34 -0700148 }
149
150 spin_unlock_irqrestore(&obj->active_list_lock, flags);
151
152 list_for_each_safe(pos, n, &signaled_pts) {
153 struct sync_pt *pt =
Ajay Dudanic4af2662012-07-23 16:43:05 -0700154 container_of(pos, struct sync_pt, signaled_list);
Erik Gilling010accf2012-03-13 15:34:34 -0700155
156 list_del_init(pos);
157 sync_fence_signal_pt(pt);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700158 kref_put(&pt->fence->kref, sync_fence_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700159 }
160}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700161EXPORT_SYMBOL(sync_timeline_signal);
Erik Gilling010accf2012-03-13 15:34:34 -0700162
163struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
164{
165 struct sync_pt *pt;
166
167 if (size < sizeof(struct sync_pt))
168 return NULL;
169
170 pt = kzalloc(size, GFP_KERNEL);
171 if (pt == NULL)
172 return NULL;
173
174 INIT_LIST_HEAD(&pt->active_list);
Ajay Dudani741cdde2012-08-02 17:26:45 -0700175 kref_get(&parent->kref);
Erik Gilling010accf2012-03-13 15:34:34 -0700176 sync_timeline_add_pt(parent, pt);
177
178 return pt;
179}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700180EXPORT_SYMBOL(sync_pt_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700181
182void sync_pt_free(struct sync_pt *pt)
183{
184 if (pt->parent->ops->free_pt)
185 pt->parent->ops->free_pt(pt);
186
187 sync_timeline_remove_pt(pt);
188
Ajay Dudani741cdde2012-08-02 17:26:45 -0700189 kref_put(&pt->parent->kref, sync_timeline_free);
190
Erik Gilling010accf2012-03-13 15:34:34 -0700191 kfree(pt);
192}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700193EXPORT_SYMBOL(sync_pt_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700194
195/* call with pt->parent->active_list_lock held */
196static int _sync_pt_has_signaled(struct sync_pt *pt)
197{
Erik Gillingad433ba2012-03-15 14:59:33 -0700198 int old_status = pt->status;
199
Erik Gilling010accf2012-03-13 15:34:34 -0700200 if (!pt->status)
201 pt->status = pt->parent->ops->has_signaled(pt);
202
203 if (!pt->status && pt->parent->destroyed)
204 pt->status = -ENOENT;
205
Erik Gillingad433ba2012-03-15 14:59:33 -0700206 if (pt->status != old_status)
207 pt->timestamp = ktime_get();
208
Erik Gilling010accf2012-03-13 15:34:34 -0700209 return pt->status;
210}
211
212static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
213{
214 return pt->parent->ops->dup(pt);
215}
216
217/* Adds a sync pt to the active queue. Called when added to a fence */
218static void sync_pt_activate(struct sync_pt *pt)
219{
220 struct sync_timeline *obj = pt->parent;
221 unsigned long flags;
222 int err;
223
224 spin_lock_irqsave(&obj->active_list_lock, flags);
225
226 err = _sync_pt_has_signaled(pt);
227 if (err != 0)
228 goto out;
229
230 list_add_tail(&pt->active_list, &obj->active_list_head);
231
232out:
233 spin_unlock_irqrestore(&obj->active_list_lock, flags);
234}
235
236static int sync_fence_release(struct inode *inode, struct file *file);
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700237static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700238static long sync_fence_ioctl(struct file *file, unsigned int cmd,
239 unsigned long arg);
240
241
242static const struct file_operations sync_fence_fops = {
243 .release = sync_fence_release,
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700244 .poll = sync_fence_poll,
Erik Gilling010accf2012-03-13 15:34:34 -0700245 .unlocked_ioctl = sync_fence_ioctl,
246};
247
248static struct sync_fence *sync_fence_alloc(const char *name)
249{
250 struct sync_fence *fence;
Erik Gilling981c8a92012-03-14 19:49:15 -0700251 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700252
253 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
254 if (fence == NULL)
255 return NULL;
256
257 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
258 fence, 0);
259 if (fence->file == NULL)
260 goto err;
261
Ajay Dudanic4af2662012-07-23 16:43:05 -0700262 kref_init(&fence->kref);
Erik Gilling010accf2012-03-13 15:34:34 -0700263 strlcpy(fence->name, name, sizeof(fence->name));
264
265 INIT_LIST_HEAD(&fence->pt_list_head);
266 INIT_LIST_HEAD(&fence->waiter_list_head);
267 spin_lock_init(&fence->waiter_list_lock);
268
269 init_waitqueue_head(&fence->wq);
Erik Gilling981c8a92012-03-14 19:49:15 -0700270
271 spin_lock_irqsave(&sync_fence_list_lock, flags);
272 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
273 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
274
Erik Gilling010accf2012-03-13 15:34:34 -0700275 return fence;
276
277err:
278 kfree(fence);
279 return NULL;
280}
281
282/* TODO: implement a create which takes more that one sync_pt */
283struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
284{
285 struct sync_fence *fence;
286
287 if (pt->fence)
288 return NULL;
289
290 fence = sync_fence_alloc(name);
291 if (fence == NULL)
292 return NULL;
293
294 pt->fence = fence;
295 list_add(&pt->pt_list, &fence->pt_list_head);
296 sync_pt_activate(pt);
297
298 return fence;
299}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700300EXPORT_SYMBOL(sync_fence_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700301
302static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
303{
304 struct list_head *pos;
305
306 list_for_each(pos, &src->pt_list_head) {
307 struct sync_pt *orig_pt =
308 container_of(pos, struct sync_pt, pt_list);
309 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
310
311 if (new_pt == NULL)
312 return -ENOMEM;
313
314 new_pt->fence = dst;
315 list_add(&new_pt->pt_list, &dst->pt_list_head);
316 sync_pt_activate(new_pt);
317 }
318
319 return 0;
320}
321
Ajay Dudani99343192012-07-11 17:13:50 -0700322static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
323{
324 struct list_head *src_pos, *dst_pos, *n;
325
326 list_for_each(src_pos, &src->pt_list_head) {
327 struct sync_pt *src_pt =
328 container_of(src_pos, struct sync_pt, pt_list);
329 bool collapsed = false;
330
331 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
332 struct sync_pt *dst_pt =
333 container_of(dst_pos, struct sync_pt, pt_list);
334 /* collapse two sync_pts on the same timeline
335 * to a single sync_pt that will signal at
336 * the later of the two
337 */
338 if (dst_pt->parent == src_pt->parent) {
339 if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
340 struct sync_pt *new_pt =
341 sync_pt_dup(src_pt);
342 if (new_pt == NULL)
343 return -ENOMEM;
344
345 new_pt->fence = dst;
346 list_replace(&dst_pt->pt_list,
347 &new_pt->pt_list);
348 sync_pt_activate(new_pt);
349 sync_pt_free(dst_pt);
350 }
351 collapsed = true;
352 break;
353 }
354 }
355
356 if (!collapsed) {
357 struct sync_pt *new_pt = sync_pt_dup(src_pt);
358
359 if (new_pt == NULL)
360 return -ENOMEM;
361
362 new_pt->fence = dst;
363 list_add(&new_pt->pt_list, &dst->pt_list_head);
364 sync_pt_activate(new_pt);
365 }
366 }
367
368 return 0;
369}
370
Ajay Dudanic4af2662012-07-23 16:43:05 -0700371static void sync_fence_detach_pts(struct sync_fence *fence)
372{
373 struct list_head *pos, *n;
374
375 list_for_each_safe(pos, n, &fence->pt_list_head) {
376 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
377 sync_timeline_remove_pt(pt);
378 }
379}
380
Erik Gilling010accf2012-03-13 15:34:34 -0700381static void sync_fence_free_pts(struct sync_fence *fence)
382{
383 struct list_head *pos, *n;
384
385 list_for_each_safe(pos, n, &fence->pt_list_head) {
386 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
387 sync_pt_free(pt);
388 }
389}
390
391struct sync_fence *sync_fence_fdget(int fd)
392{
393 struct file *file = fget(fd);
394
395 if (file == NULL)
396 return NULL;
397
398 if (file->f_op != &sync_fence_fops)
399 goto err;
400
401 return file->private_data;
402
403err:
404 fput(file);
405 return NULL;
406}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700407EXPORT_SYMBOL(sync_fence_fdget);
Erik Gilling010accf2012-03-13 15:34:34 -0700408
409void sync_fence_put(struct sync_fence *fence)
410{
411 fput(fence->file);
412}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700413EXPORT_SYMBOL(sync_fence_put);
Erik Gilling010accf2012-03-13 15:34:34 -0700414
415void sync_fence_install(struct sync_fence *fence, int fd)
416{
417 fd_install(fd, fence->file);
418}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700419EXPORT_SYMBOL(sync_fence_install);
Erik Gilling010accf2012-03-13 15:34:34 -0700420
421static int sync_fence_get_status(struct sync_fence *fence)
422{
423 struct list_head *pos;
424 int status = 1;
425
426 list_for_each(pos, &fence->pt_list_head) {
427 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
428 int pt_status = pt->status;
429
430 if (pt_status < 0) {
431 status = pt_status;
432 break;
433 } else if (status == 1) {
434 status = pt_status;
435 }
436 }
437
438 return status;
439}
440
441struct sync_fence *sync_fence_merge(const char *name,
442 struct sync_fence *a, struct sync_fence *b)
443{
444 struct sync_fence *fence;
445 int err;
446
447 fence = sync_fence_alloc(name);
448 if (fence == NULL)
449 return NULL;
450
451 err = sync_fence_copy_pts(fence, a);
452 if (err < 0)
453 goto err;
454
Ajay Dudani99343192012-07-11 17:13:50 -0700455 err = sync_fence_merge_pts(fence, b);
Erik Gilling010accf2012-03-13 15:34:34 -0700456 if (err < 0)
457 goto err;
458
459 fence->status = sync_fence_get_status(fence);
460
461 return fence;
462err:
463 sync_fence_free_pts(fence);
464 kfree(fence);
465 return NULL;
466}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700467EXPORT_SYMBOL(sync_fence_merge);
Erik Gilling010accf2012-03-13 15:34:34 -0700468
469static void sync_fence_signal_pt(struct sync_pt *pt)
470{
471 LIST_HEAD(signaled_waiters);
472 struct sync_fence *fence = pt->fence;
473 struct list_head *pos;
474 struct list_head *n;
475 unsigned long flags;
476 int status;
477
478 status = sync_fence_get_status(fence);
479
480 spin_lock_irqsave(&fence->waiter_list_lock, flags);
481 /*
482 * this should protect against two threads racing on the signaled
483 * false -> true transition
484 */
485 if (status && !fence->status) {
486 list_for_each_safe(pos, n, &fence->waiter_list_head)
487 list_move(pos, &signaled_waiters);
488
489 fence->status = status;
490 } else {
491 status = 0;
492 }
493 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
494
495 if (status) {
496 list_for_each_safe(pos, n, &signaled_waiters) {
497 struct sync_fence_waiter *waiter =
498 container_of(pos, struct sync_fence_waiter,
499 waiter_list);
500
Erik Gilling010accf2012-03-13 15:34:34 -0700501 list_del(pos);
Erik Gillingc80114f2012-05-15 16:23:26 -0700502 waiter->callback(fence, waiter);
Erik Gilling010accf2012-03-13 15:34:34 -0700503 }
504 wake_up(&fence->wq);
505 }
506}
507
508int sync_fence_wait_async(struct sync_fence *fence,
Erik Gillingc80114f2012-05-15 16:23:26 -0700509 struct sync_fence_waiter *waiter)
Erik Gilling010accf2012-03-13 15:34:34 -0700510{
Erik Gilling010accf2012-03-13 15:34:34 -0700511 unsigned long flags;
512 int err = 0;
513
Erik Gilling010accf2012-03-13 15:34:34 -0700514 spin_lock_irqsave(&fence->waiter_list_lock, flags);
515
516 if (fence->status) {
Erik Gilling010accf2012-03-13 15:34:34 -0700517 err = fence->status;
518 goto out;
519 }
520
521 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
522out:
523 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
524
525 return err;
526}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700527EXPORT_SYMBOL(sync_fence_wait_async);
Erik Gilling010accf2012-03-13 15:34:34 -0700528
Erik Gillingc80114f2012-05-15 16:23:26 -0700529int sync_fence_cancel_async(struct sync_fence *fence,
530 struct sync_fence_waiter *waiter)
531{
532 struct list_head *pos;
533 struct list_head *n;
534 unsigned long flags;
535 int ret = -ENOENT;
536
537 spin_lock_irqsave(&fence->waiter_list_lock, flags);
538 /*
539 * Make sure waiter is still in waiter_list because it is possible for
540 * the waiter to be removed from the list while the callback is still
541 * pending.
542 */
543 list_for_each_safe(pos, n, &fence->waiter_list_head) {
544 struct sync_fence_waiter *list_waiter =
545 container_of(pos, struct sync_fence_waiter,
546 waiter_list);
547 if (list_waiter == waiter) {
548 list_del(pos);
549 ret = 0;
550 break;
551 }
552 }
553 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
554 return ret;
555}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700556EXPORT_SYMBOL(sync_fence_cancel_async);
Erik Gillingc80114f2012-05-15 16:23:26 -0700557
Erik Gilling010accf2012-03-13 15:34:34 -0700558int sync_fence_wait(struct sync_fence *fence, long timeout)
559{
Ajay Dudanidf53a2ca2012-08-21 17:57:19 -0700560 int err = 0;
Erik Gilling010accf2012-03-13 15:34:34 -0700561
Ajay Dudanidf53a2ca2012-08-21 17:57:19 -0700562 if (timeout > 0) {
Erik Gilling010accf2012-03-13 15:34:34 -0700563 timeout = msecs_to_jiffies(timeout);
564 err = wait_event_interruptible_timeout(fence->wq,
565 fence->status != 0,
566 timeout);
Ajay Dudani131ed182012-08-21 18:43:21 -0700567 } else if (timeout < 0) {
Erik Gilling010accf2012-03-13 15:34:34 -0700568 err = wait_event_interruptible(fence->wq, fence->status != 0);
569 }
570
571 if (err < 0)
572 return err;
573
574 if (fence->status < 0)
575 return fence->status;
576
Ajay Dudani442fff42012-08-24 13:48:57 -0700577 if (fence->status == 0) {
Ajay Dudani31802ec2012-09-04 15:29:09 -0700578 pr_info("fence timeout on [%p] after %dms\n", fence,
579 jiffies_to_msecs(timeout));
Ajay Dudani442fff42012-08-24 13:48:57 -0700580 sync_dump();
Erik Gilling010accf2012-03-13 15:34:34 -0700581 return -ETIME;
Ajay Dudani442fff42012-08-24 13:48:57 -0700582 }
Erik Gilling010accf2012-03-13 15:34:34 -0700583
584 return 0;
585}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700586EXPORT_SYMBOL(sync_fence_wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700587
Ajay Dudanic4af2662012-07-23 16:43:05 -0700588static void sync_fence_free(struct kref *kref)
589{
590 struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
591
592 sync_fence_free_pts(fence);
593
594 kfree(fence);
595}
596
Erik Gilling010accf2012-03-13 15:34:34 -0700597static int sync_fence_release(struct inode *inode, struct file *file)
598{
599 struct sync_fence *fence = file->private_data;
Erik Gilling981c8a92012-03-14 19:49:15 -0700600 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700601
Ajay Dudanic4af2662012-07-23 16:43:05 -0700602 /*
603 * We need to remove all ways to access this fence before droping
604 * our ref.
605 *
606 * start with its membership in the global fence list
607 */
Erik Gilling981c8a92012-03-14 19:49:15 -0700608 spin_lock_irqsave(&sync_fence_list_lock, flags);
609 list_del(&fence->sync_fence_list);
610 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
611
Ajay Dudanic4af2662012-07-23 16:43:05 -0700612 /*
613 * remove its pts from their parents so that sync_timeline_signal()
614 * can't reference the fence.
615 */
616 sync_fence_detach_pts(fence);
Ajay Dudani1ee76852012-07-11 17:07:39 -0700617
Ajay Dudanic4af2662012-07-23 16:43:05 -0700618 kref_put(&fence->kref, sync_fence_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700619
620 return 0;
621}
622
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700623static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
624{
625 struct sync_fence *fence = file->private_data;
626
627 poll_wait(file, &fence->wq, wait);
628
629 if (fence->status == 1)
630 return POLLIN;
631 else if (fence->status < 0)
632 return POLLERR;
633 else
634 return 0;
635}
636
Erik Gilling010accf2012-03-13 15:34:34 -0700637static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
638{
Ajay Dudani93b10c92012-09-04 15:28:52 -0700639 __s32 value;
Erik Gilling010accf2012-03-13 15:34:34 -0700640
641 if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
642 return -EFAULT;
643
644 return sync_fence_wait(fence, value);
645}
646
647static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
648{
649 int fd = get_unused_fd();
650 int err;
651 struct sync_fence *fence2, *fence3;
652 struct sync_merge_data data;
653
654 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
655 return -EFAULT;
656
657 fence2 = sync_fence_fdget(data.fd2);
658 if (fence2 == NULL) {
659 err = -ENOENT;
660 goto err_put_fd;
661 }
662
663 data.name[sizeof(data.name) - 1] = '\0';
664 fence3 = sync_fence_merge(data.name, fence, fence2);
665 if (fence3 == NULL) {
666 err = -ENOMEM;
667 goto err_put_fence2;
668 }
669
670 data.fence = fd;
671 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
672 err = -EFAULT;
673 goto err_put_fence3;
674 }
675
676 sync_fence_install(fence3, fd);
677 sync_fence_put(fence2);
678 return 0;
679
680err_put_fence3:
681 sync_fence_put(fence3);
682
683err_put_fence2:
684 sync_fence_put(fence2);
685
686err_put_fd:
687 put_unused_fd(fd);
688 return err;
689}
690
Erik Gilling3913bff2012-03-15 17:45:50 -0700691static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
692{
693 struct sync_pt_info *info = data;
694 int ret;
695
696 if (size < sizeof(struct sync_pt_info))
697 return -ENOMEM;
698
699 info->len = sizeof(struct sync_pt_info);
700
701 if (pt->parent->ops->fill_driver_data) {
702 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
703 size - sizeof(*info));
704 if (ret < 0)
705 return ret;
706
707 info->len += ret;
708 }
709
710 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
711 strlcpy(info->driver_name, pt->parent->ops->driver_name,
712 sizeof(info->driver_name));
713 info->status = pt->status;
714 info->timestamp_ns = ktime_to_ns(pt->timestamp);
715
716 return info->len;
717}
718
719static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
720 unsigned long arg)
721{
722 struct sync_fence_info_data *data;
723 struct list_head *pos;
724 __u32 size;
725 __u32 len = 0;
726 int ret;
727
728 if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
729 return -EFAULT;
730
731 if (size < sizeof(struct sync_fence_info_data))
732 return -EINVAL;
733
734 if (size > 4096)
735 size = 4096;
736
737 data = kzalloc(size, GFP_KERNEL);
738 if (data == NULL)
739 return -ENOMEM;
740
741 strlcpy(data->name, fence->name, sizeof(data->name));
742 data->status = fence->status;
743 len = sizeof(struct sync_fence_info_data);
744
745 list_for_each(pos, &fence->pt_list_head) {
746 struct sync_pt *pt =
747 container_of(pos, struct sync_pt, pt_list);
748
749 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
750
751 if (ret < 0)
752 goto out;
753
754 len += ret;
755 }
756
757 data->len = len;
758
759 if (copy_to_user((void __user *)arg, data, len))
760 ret = -EFAULT;
761 else
762 ret = 0;
763
764out:
765 kfree(data);
766
767 return ret;
768}
Erik Gilling010accf2012-03-13 15:34:34 -0700769
770static long sync_fence_ioctl(struct file *file, unsigned int cmd,
771 unsigned long arg)
772{
773 struct sync_fence *fence = file->private_data;
774 switch (cmd) {
775 case SYNC_IOC_WAIT:
776 return sync_fence_ioctl_wait(fence, arg);
777
778 case SYNC_IOC_MERGE:
779 return sync_fence_ioctl_merge(fence, arg);
Erik Gilling981c8a92012-03-14 19:49:15 -0700780
Erik Gilling3913bff2012-03-15 17:45:50 -0700781 case SYNC_IOC_FENCE_INFO:
782 return sync_fence_ioctl_fence_info(fence, arg);
783
Erik Gilling010accf2012-03-13 15:34:34 -0700784 default:
785 return -ENOTTY;
786 }
787}
788
Erik Gilling981c8a92012-03-14 19:49:15 -0700789#ifdef CONFIG_DEBUG_FS
790static const char *sync_status_str(int status)
791{
792 if (status > 0)
793 return "signaled";
794 else if (status == 0)
795 return "active";
796 else
797 return "error";
798}
799
800static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
801{
802 int status = pt->status;
803 seq_printf(s, " %s%spt %s",
804 fence ? pt->parent->name : "",
805 fence ? "_" : "",
806 sync_status_str(status));
807 if (pt->status) {
808 struct timeval tv = ktime_to_timeval(pt->timestamp);
809 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
810 }
811
812 if (pt->parent->ops->print_pt) {
813 seq_printf(s, ": ");
814 pt->parent->ops->print_pt(s, pt);
815 }
816
817 seq_printf(s, "\n");
818}
819
820static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
821{
822 struct list_head *pos;
823 unsigned long flags;
824
825 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
826
827 if (obj->ops->print_obj) {
828 seq_printf(s, ": ");
829 obj->ops->print_obj(s, obj);
830 }
831
832 seq_printf(s, "\n");
833
834 spin_lock_irqsave(&obj->child_list_lock, flags);
835 list_for_each(pos, &obj->child_list_head) {
836 struct sync_pt *pt =
837 container_of(pos, struct sync_pt, child_list);
838 sync_print_pt(s, pt, false);
839 }
840 spin_unlock_irqrestore(&obj->child_list_lock, flags);
841}
842
843static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
844{
845 struct list_head *pos;
846 unsigned long flags;
847
Ajay Dudani31802ec2012-09-04 15:29:09 -0700848 seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
849 sync_status_str(fence->status));
Erik Gilling981c8a92012-03-14 19:49:15 -0700850
851 list_for_each(pos, &fence->pt_list_head) {
852 struct sync_pt *pt =
853 container_of(pos, struct sync_pt, pt_list);
854 sync_print_pt(s, pt, true);
855 }
856
857 spin_lock_irqsave(&fence->waiter_list_lock, flags);
858 list_for_each(pos, &fence->waiter_list_head) {
859 struct sync_fence_waiter *waiter =
860 container_of(pos, struct sync_fence_waiter,
861 waiter_list);
862
Erik Gillingc80114f2012-05-15 16:23:26 -0700863 seq_printf(s, "waiter %pF\n", waiter->callback);
Erik Gilling981c8a92012-03-14 19:49:15 -0700864 }
865 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
866}
867
868static int sync_debugfs_show(struct seq_file *s, void *unused)
869{
870 unsigned long flags;
871 struct list_head *pos;
872
873 seq_printf(s, "objs:\n--------------\n");
874
875 spin_lock_irqsave(&sync_timeline_list_lock, flags);
876 list_for_each(pos, &sync_timeline_list_head) {
877 struct sync_timeline *obj =
878 container_of(pos, struct sync_timeline,
879 sync_timeline_list);
880
881 sync_print_obj(s, obj);
882 seq_printf(s, "\n");
883 }
884 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
885
886 seq_printf(s, "fences:\n--------------\n");
887
888 spin_lock_irqsave(&sync_fence_list_lock, flags);
889 list_for_each(pos, &sync_fence_list_head) {
890 struct sync_fence *fence =
891 container_of(pos, struct sync_fence, sync_fence_list);
892
893 sync_print_fence(s, fence);
894 seq_printf(s, "\n");
895 }
896 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
897 return 0;
898}
899
900static int sync_debugfs_open(struct inode *inode, struct file *file)
901{
902 return single_open(file, sync_debugfs_show, inode->i_private);
903}
904
905static const struct file_operations sync_debugfs_fops = {
906 .open = sync_debugfs_open,
907 .read = seq_read,
908 .llseek = seq_lseek,
909 .release = single_release,
910};
911
912static __init int sync_debugfs_init(void)
913{
914 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
915 return 0;
916}
Erik Gilling981c8a92012-03-14 19:49:15 -0700917late_initcall(sync_debugfs_init);
918
Ajay Dudani442fff42012-08-24 13:48:57 -0700919#define DUMP_CHUNK 256
920static char sync_dump_buf[64 * 1024];
921void sync_dump(void)
922{
923 struct seq_file s = {
924 .buf = sync_dump_buf,
925 .size = sizeof(sync_dump_buf) - 1,
926 };
927 int i;
928
929 sync_debugfs_show(&s, NULL);
930
931 for (i = 0; i < s.count; i += DUMP_CHUNK) {
932 if ((s.count - i) > DUMP_CHUNK) {
933 char c = s.buf[i + DUMP_CHUNK];
934 s.buf[i + DUMP_CHUNK] = 0;
935 pr_cont("%s", s.buf + i);
936 s.buf[i + DUMP_CHUNK] = c;
937 } else {
938 s.buf[s.count] = 0;
939 pr_cont("%s", s.buf + i);
940 }
941 }
942}
943#else
944static void sync_dump(void)
945{
946}
Erik Gilling981c8a92012-03-14 19:49:15 -0700947#endif