blob: 4e600b35ab52dd2ee8976ab8b48edc56b59be155 [file] [log] [blame]
Erik Gilling010accf2012-03-13 15:34:34 -07001/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Erik Gilling981c8a92012-03-14 19:49:15 -070017#include <linux/debugfs.h>
Erik Gilling4fb837a2012-05-16 13:09:22 -070018#include <linux/export.h>
Erik Gilling010accf2012-03-13 15:34:34 -070019#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
Erik Gillinga1eeaca2012-03-19 17:28:32 -070022#include <linux/poll.h>
Erik Gilling010accf2012-03-13 15:34:34 -070023#include <linux/sched.h>
Erik Gilling981c8a92012-03-14 19:49:15 -070024#include <linux/seq_file.h>
Erik Gilling010accf2012-03-13 15:34:34 -070025#include <linux/slab.h>
26#include <linux/sync.h>
27#include <linux/uaccess.h>
28
29#include <linux/anon_inodes.h>
30
31static void sync_fence_signal_pt(struct sync_pt *pt);
32static int _sync_pt_has_signaled(struct sync_pt *pt);
Ajay Dudanic4af2662012-07-23 16:43:05 -070033static void sync_fence_free(struct kref *kref);
Ajay Dudani442fff42012-08-24 13:48:57 -070034static void sync_dump(void);
Erik Gilling010accf2012-03-13 15:34:34 -070035
Erik Gilling981c8a92012-03-14 19:49:15 -070036static LIST_HEAD(sync_timeline_list_head);
37static DEFINE_SPINLOCK(sync_timeline_list_lock);
38
39static LIST_HEAD(sync_fence_list_head);
40static DEFINE_SPINLOCK(sync_fence_list_lock);
41
Erik Gilling010accf2012-03-13 15:34:34 -070042struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
43 int size, const char *name)
44{
45 struct sync_timeline *obj;
Erik Gilling981c8a92012-03-14 19:49:15 -070046 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -070047
48 if (size < sizeof(struct sync_timeline))
49 return NULL;
50
51 obj = kzalloc(size, GFP_KERNEL);
52 if (obj == NULL)
53 return NULL;
54
Ajay Dudani741cdde2012-08-02 17:26:45 -070055 kref_init(&obj->kref);
Erik Gilling010accf2012-03-13 15:34:34 -070056 obj->ops = ops;
57 strlcpy(obj->name, name, sizeof(obj->name));
58
59 INIT_LIST_HEAD(&obj->child_list_head);
60 spin_lock_init(&obj->child_list_lock);
61
62 INIT_LIST_HEAD(&obj->active_list_head);
63 spin_lock_init(&obj->active_list_lock);
64
Erik Gilling981c8a92012-03-14 19:49:15 -070065 spin_lock_irqsave(&sync_timeline_list_lock, flags);
66 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
67 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
68
Erik Gilling010accf2012-03-13 15:34:34 -070069 return obj;
70}
Erik Gilling4fb837a2012-05-16 13:09:22 -070071EXPORT_SYMBOL(sync_timeline_create);
Erik Gilling010accf2012-03-13 15:34:34 -070072
Ajay Dudani741cdde2012-08-02 17:26:45 -070073static void sync_timeline_free(struct kref *kref)
Erik Gilling981c8a92012-03-14 19:49:15 -070074{
Ajay Dudani741cdde2012-08-02 17:26:45 -070075 struct sync_timeline *obj =
76 container_of(kref, struct sync_timeline, kref);
Erik Gilling981c8a92012-03-14 19:49:15 -070077 unsigned long flags;
78
79 if (obj->ops->release_obj)
80 obj->ops->release_obj(obj);
81
82 spin_lock_irqsave(&sync_timeline_list_lock, flags);
83 list_del(&obj->sync_timeline_list);
84 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
85
86 kfree(obj);
87}
88
Erik Gilling010accf2012-03-13 15:34:34 -070089void sync_timeline_destroy(struct sync_timeline *obj)
90{
Erik Gilling010accf2012-03-13 15:34:34 -070091 obj->destroyed = true;
Erik Gilling010accf2012-03-13 15:34:34 -070092
Ajay Dudani741cdde2012-08-02 17:26:45 -070093 /*
94 * If this is not the last reference, signal any children
95 * that their parent is going away.
96 */
97
98 if (!kref_put(&obj->kref, sync_timeline_free))
Erik Gilling010accf2012-03-13 15:34:34 -070099 sync_timeline_signal(obj);
100}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700101EXPORT_SYMBOL(sync_timeline_destroy);
Erik Gilling010accf2012-03-13 15:34:34 -0700102
103static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
104{
105 unsigned long flags;
106
107 pt->parent = obj;
108
109 spin_lock_irqsave(&obj->child_list_lock, flags);
110 list_add_tail(&pt->child_list, &obj->child_list_head);
111 spin_unlock_irqrestore(&obj->child_list_lock, flags);
112}
113
114static void sync_timeline_remove_pt(struct sync_pt *pt)
115{
116 struct sync_timeline *obj = pt->parent;
117 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700118
119 spin_lock_irqsave(&obj->active_list_lock, flags);
120 if (!list_empty(&pt->active_list))
121 list_del_init(&pt->active_list);
122 spin_unlock_irqrestore(&obj->active_list_lock, flags);
123
124 spin_lock_irqsave(&obj->child_list_lock, flags);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700125 if (!list_empty(&pt->child_list)) {
126 list_del_init(&pt->child_list);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700127 }
Erik Gilling010accf2012-03-13 15:34:34 -0700128 spin_unlock_irqrestore(&obj->child_list_lock, flags);
Erik Gilling010accf2012-03-13 15:34:34 -0700129}
130
131void sync_timeline_signal(struct sync_timeline *obj)
132{
133 unsigned long flags;
134 LIST_HEAD(signaled_pts);
135 struct list_head *pos, *n;
136
137 spin_lock_irqsave(&obj->active_list_lock, flags);
138
139 list_for_each_safe(pos, n, &obj->active_list_head) {
140 struct sync_pt *pt =
141 container_of(pos, struct sync_pt, active_list);
142
Ajay Dudanic4af2662012-07-23 16:43:05 -0700143 if (_sync_pt_has_signaled(pt)) {
144 list_del_init(pos);
145 list_add(&pt->signaled_list, &signaled_pts);
146 kref_get(&pt->fence->kref);
147 }
Erik Gilling010accf2012-03-13 15:34:34 -0700148 }
149
150 spin_unlock_irqrestore(&obj->active_list_lock, flags);
151
152 list_for_each_safe(pos, n, &signaled_pts) {
153 struct sync_pt *pt =
Ajay Dudanic4af2662012-07-23 16:43:05 -0700154 container_of(pos, struct sync_pt, signaled_list);
Erik Gilling010accf2012-03-13 15:34:34 -0700155
156 list_del_init(pos);
157 sync_fence_signal_pt(pt);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700158 kref_put(&pt->fence->kref, sync_fence_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700159 }
160}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700161EXPORT_SYMBOL(sync_timeline_signal);
Erik Gilling010accf2012-03-13 15:34:34 -0700162
163struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
164{
165 struct sync_pt *pt;
166
167 if (size < sizeof(struct sync_pt))
168 return NULL;
169
170 pt = kzalloc(size, GFP_KERNEL);
171 if (pt == NULL)
172 return NULL;
173
174 INIT_LIST_HEAD(&pt->active_list);
Ajay Dudani741cdde2012-08-02 17:26:45 -0700175 kref_get(&parent->kref);
Erik Gilling010accf2012-03-13 15:34:34 -0700176 sync_timeline_add_pt(parent, pt);
177
178 return pt;
179}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700180EXPORT_SYMBOL(sync_pt_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700181
182void sync_pt_free(struct sync_pt *pt)
183{
184 if (pt->parent->ops->free_pt)
185 pt->parent->ops->free_pt(pt);
186
187 sync_timeline_remove_pt(pt);
188
Ajay Dudani741cdde2012-08-02 17:26:45 -0700189 kref_put(&pt->parent->kref, sync_timeline_free);
190
Erik Gilling010accf2012-03-13 15:34:34 -0700191 kfree(pt);
192}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700193EXPORT_SYMBOL(sync_pt_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700194
195/* call with pt->parent->active_list_lock held */
196static int _sync_pt_has_signaled(struct sync_pt *pt)
197{
Erik Gillingad433ba2012-03-15 14:59:33 -0700198 int old_status = pt->status;
199
Erik Gilling010accf2012-03-13 15:34:34 -0700200 if (!pt->status)
201 pt->status = pt->parent->ops->has_signaled(pt);
202
203 if (!pt->status && pt->parent->destroyed)
204 pt->status = -ENOENT;
205
Erik Gillingad433ba2012-03-15 14:59:33 -0700206 if (pt->status != old_status)
207 pt->timestamp = ktime_get();
208
Erik Gilling010accf2012-03-13 15:34:34 -0700209 return pt->status;
210}
211
212static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
213{
214 return pt->parent->ops->dup(pt);
215}
216
217/* Adds a sync pt to the active queue. Called when added to a fence */
218static void sync_pt_activate(struct sync_pt *pt)
219{
220 struct sync_timeline *obj = pt->parent;
221 unsigned long flags;
222 int err;
223
224 spin_lock_irqsave(&obj->active_list_lock, flags);
225
226 err = _sync_pt_has_signaled(pt);
Jeff Boodybd483da2012-08-17 12:59:08 -0600227 if (err != 0) {
228 spin_unlock_irqrestore(&obj->active_list_lock, flags);
229 sync_fence_signal_pt(pt);
230 return;
231 }
Erik Gilling010accf2012-03-13 15:34:34 -0700232
233 list_add_tail(&pt->active_list, &obj->active_list_head);
Erik Gilling010accf2012-03-13 15:34:34 -0700234 spin_unlock_irqrestore(&obj->active_list_lock, flags);
235}
236
237static int sync_fence_release(struct inode *inode, struct file *file);
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700238static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700239static long sync_fence_ioctl(struct file *file, unsigned int cmd,
240 unsigned long arg);
241
242
243static const struct file_operations sync_fence_fops = {
244 .release = sync_fence_release,
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700245 .poll = sync_fence_poll,
Erik Gilling010accf2012-03-13 15:34:34 -0700246 .unlocked_ioctl = sync_fence_ioctl,
247};
248
249static struct sync_fence *sync_fence_alloc(const char *name)
250{
251 struct sync_fence *fence;
Erik Gilling981c8a92012-03-14 19:49:15 -0700252 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700253
254 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
255 if (fence == NULL)
256 return NULL;
257
258 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
259 fence, 0);
260 if (fence->file == NULL)
261 goto err;
262
Ajay Dudanic4af2662012-07-23 16:43:05 -0700263 kref_init(&fence->kref);
Erik Gilling010accf2012-03-13 15:34:34 -0700264 strlcpy(fence->name, name, sizeof(fence->name));
265
266 INIT_LIST_HEAD(&fence->pt_list_head);
267 INIT_LIST_HEAD(&fence->waiter_list_head);
268 spin_lock_init(&fence->waiter_list_lock);
269
270 init_waitqueue_head(&fence->wq);
Erik Gilling981c8a92012-03-14 19:49:15 -0700271
272 spin_lock_irqsave(&sync_fence_list_lock, flags);
273 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
274 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
275
Erik Gilling010accf2012-03-13 15:34:34 -0700276 return fence;
277
278err:
279 kfree(fence);
280 return NULL;
281}
282
283/* TODO: implement a create which takes more that one sync_pt */
284struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
285{
286 struct sync_fence *fence;
287
288 if (pt->fence)
289 return NULL;
290
291 fence = sync_fence_alloc(name);
292 if (fence == NULL)
293 return NULL;
294
295 pt->fence = fence;
296 list_add(&pt->pt_list, &fence->pt_list_head);
297 sync_pt_activate(pt);
298
Ajay Dudanicc4ce562012-10-15 17:51:01 -0700299 /*
300 * signal the fence in case pt was activated before
301 * sync_pt_activate(pt) was called
302 */
303 sync_fence_signal_pt(pt);
304
Erik Gilling010accf2012-03-13 15:34:34 -0700305 return fence;
306}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700307EXPORT_SYMBOL(sync_fence_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700308
309static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
310{
311 struct list_head *pos;
312
313 list_for_each(pos, &src->pt_list_head) {
314 struct sync_pt *orig_pt =
315 container_of(pos, struct sync_pt, pt_list);
316 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
317
318 if (new_pt == NULL)
319 return -ENOMEM;
320
321 new_pt->fence = dst;
322 list_add(&new_pt->pt_list, &dst->pt_list_head);
323 sync_pt_activate(new_pt);
324 }
325
326 return 0;
327}
328
Ajay Dudani99343192012-07-11 17:13:50 -0700329static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
330{
331 struct list_head *src_pos, *dst_pos, *n;
332
333 list_for_each(src_pos, &src->pt_list_head) {
334 struct sync_pt *src_pt =
335 container_of(src_pos, struct sync_pt, pt_list);
336 bool collapsed = false;
337
338 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
339 struct sync_pt *dst_pt =
340 container_of(dst_pos, struct sync_pt, pt_list);
341 /* collapse two sync_pts on the same timeline
342 * to a single sync_pt that will signal at
343 * the later of the two
344 */
345 if (dst_pt->parent == src_pt->parent) {
346 if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
347 struct sync_pt *new_pt =
348 sync_pt_dup(src_pt);
349 if (new_pt == NULL)
350 return -ENOMEM;
351
352 new_pt->fence = dst;
353 list_replace(&dst_pt->pt_list,
354 &new_pt->pt_list);
355 sync_pt_activate(new_pt);
356 sync_pt_free(dst_pt);
357 }
358 collapsed = true;
359 break;
360 }
361 }
362
363 if (!collapsed) {
364 struct sync_pt *new_pt = sync_pt_dup(src_pt);
365
366 if (new_pt == NULL)
367 return -ENOMEM;
368
369 new_pt->fence = dst;
370 list_add(&new_pt->pt_list, &dst->pt_list_head);
371 sync_pt_activate(new_pt);
372 }
373 }
374
375 return 0;
376}
377
Ajay Dudanic4af2662012-07-23 16:43:05 -0700378static void sync_fence_detach_pts(struct sync_fence *fence)
379{
380 struct list_head *pos, *n;
381
382 list_for_each_safe(pos, n, &fence->pt_list_head) {
383 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
384 sync_timeline_remove_pt(pt);
385 }
386}
387
Erik Gilling010accf2012-03-13 15:34:34 -0700388static void sync_fence_free_pts(struct sync_fence *fence)
389{
390 struct list_head *pos, *n;
391
392 list_for_each_safe(pos, n, &fence->pt_list_head) {
393 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
394 sync_pt_free(pt);
395 }
396}
397
398struct sync_fence *sync_fence_fdget(int fd)
399{
400 struct file *file = fget(fd);
401
402 if (file == NULL)
403 return NULL;
404
405 if (file->f_op != &sync_fence_fops)
406 goto err;
407
408 return file->private_data;
409
410err:
411 fput(file);
412 return NULL;
413}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700414EXPORT_SYMBOL(sync_fence_fdget);
Erik Gilling010accf2012-03-13 15:34:34 -0700415
416void sync_fence_put(struct sync_fence *fence)
417{
418 fput(fence->file);
419}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700420EXPORT_SYMBOL(sync_fence_put);
Erik Gilling010accf2012-03-13 15:34:34 -0700421
422void sync_fence_install(struct sync_fence *fence, int fd)
423{
424 fd_install(fd, fence->file);
425}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700426EXPORT_SYMBOL(sync_fence_install);
Erik Gilling010accf2012-03-13 15:34:34 -0700427
428static int sync_fence_get_status(struct sync_fence *fence)
429{
430 struct list_head *pos;
431 int status = 1;
432
433 list_for_each(pos, &fence->pt_list_head) {
434 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
435 int pt_status = pt->status;
436
437 if (pt_status < 0) {
438 status = pt_status;
439 break;
440 } else if (status == 1) {
441 status = pt_status;
442 }
443 }
444
445 return status;
446}
447
448struct sync_fence *sync_fence_merge(const char *name,
449 struct sync_fence *a, struct sync_fence *b)
450{
451 struct sync_fence *fence;
452 int err;
453
454 fence = sync_fence_alloc(name);
455 if (fence == NULL)
456 return NULL;
457
458 err = sync_fence_copy_pts(fence, a);
459 if (err < 0)
460 goto err;
461
Ajay Dudani99343192012-07-11 17:13:50 -0700462 err = sync_fence_merge_pts(fence, b);
Erik Gilling010accf2012-03-13 15:34:34 -0700463 if (err < 0)
464 goto err;
465
Ajay Dudanicc4ce562012-10-15 17:51:01 -0700466 /*
467 * signal the fence in case one of it's pts were activated before
468 * they were activated
469 */
470 sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
471 struct sync_pt,
472 pt_list));
Erik Gilling010accf2012-03-13 15:34:34 -0700473
474 return fence;
475err:
476 sync_fence_free_pts(fence);
477 kfree(fence);
478 return NULL;
479}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700480EXPORT_SYMBOL(sync_fence_merge);
Erik Gilling010accf2012-03-13 15:34:34 -0700481
482static void sync_fence_signal_pt(struct sync_pt *pt)
483{
484 LIST_HEAD(signaled_waiters);
485 struct sync_fence *fence = pt->fence;
486 struct list_head *pos;
487 struct list_head *n;
488 unsigned long flags;
489 int status;
490
491 status = sync_fence_get_status(fence);
492
493 spin_lock_irqsave(&fence->waiter_list_lock, flags);
494 /*
495 * this should protect against two threads racing on the signaled
496 * false -> true transition
497 */
498 if (status && !fence->status) {
499 list_for_each_safe(pos, n, &fence->waiter_list_head)
500 list_move(pos, &signaled_waiters);
501
502 fence->status = status;
503 } else {
504 status = 0;
505 }
506 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
507
508 if (status) {
509 list_for_each_safe(pos, n, &signaled_waiters) {
510 struct sync_fence_waiter *waiter =
511 container_of(pos, struct sync_fence_waiter,
512 waiter_list);
513
Erik Gilling010accf2012-03-13 15:34:34 -0700514 list_del(pos);
Erik Gillingc80114f2012-05-15 16:23:26 -0700515 waiter->callback(fence, waiter);
Erik Gilling010accf2012-03-13 15:34:34 -0700516 }
517 wake_up(&fence->wq);
518 }
519}
520
521int sync_fence_wait_async(struct sync_fence *fence,
Erik Gillingc80114f2012-05-15 16:23:26 -0700522 struct sync_fence_waiter *waiter)
Erik Gilling010accf2012-03-13 15:34:34 -0700523{
Erik Gilling010accf2012-03-13 15:34:34 -0700524 unsigned long flags;
525 int err = 0;
526
Erik Gilling010accf2012-03-13 15:34:34 -0700527 spin_lock_irqsave(&fence->waiter_list_lock, flags);
528
529 if (fence->status) {
Erik Gilling010accf2012-03-13 15:34:34 -0700530 err = fence->status;
531 goto out;
532 }
533
534 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
535out:
536 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
537
538 return err;
539}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700540EXPORT_SYMBOL(sync_fence_wait_async);
Erik Gilling010accf2012-03-13 15:34:34 -0700541
Erik Gillingc80114f2012-05-15 16:23:26 -0700542int sync_fence_cancel_async(struct sync_fence *fence,
543 struct sync_fence_waiter *waiter)
544{
545 struct list_head *pos;
546 struct list_head *n;
547 unsigned long flags;
548 int ret = -ENOENT;
549
550 spin_lock_irqsave(&fence->waiter_list_lock, flags);
551 /*
552 * Make sure waiter is still in waiter_list because it is possible for
553 * the waiter to be removed from the list while the callback is still
554 * pending.
555 */
556 list_for_each_safe(pos, n, &fence->waiter_list_head) {
557 struct sync_fence_waiter *list_waiter =
558 container_of(pos, struct sync_fence_waiter,
559 waiter_list);
560 if (list_waiter == waiter) {
561 list_del(pos);
562 ret = 0;
563 break;
564 }
565 }
566 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
567 return ret;
568}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700569EXPORT_SYMBOL(sync_fence_cancel_async);
Erik Gillingc80114f2012-05-15 16:23:26 -0700570
Erik Gilling7a476272012-10-11 12:35:22 -0700571static bool sync_fence_check(struct sync_fence *fence)
572{
573 /*
574 * Make sure that reads to fence->status are ordered with the
575 * wait queue event triggering
576 */
577 smp_rmb();
578 return fence->status != 0;
579}
580
Erik Gilling010accf2012-03-13 15:34:34 -0700581int sync_fence_wait(struct sync_fence *fence, long timeout)
582{
Ajay Dudanidf53a2ca2012-08-21 17:57:19 -0700583 int err = 0;
Erik Gilling010accf2012-03-13 15:34:34 -0700584
Ajay Dudanidf53a2ca2012-08-21 17:57:19 -0700585 if (timeout > 0) {
Erik Gilling010accf2012-03-13 15:34:34 -0700586 timeout = msecs_to_jiffies(timeout);
587 err = wait_event_interruptible_timeout(fence->wq,
Erik Gilling7a476272012-10-11 12:35:22 -0700588 sync_fence_check(fence),
Erik Gilling010accf2012-03-13 15:34:34 -0700589 timeout);
Ajay Dudani131ed182012-08-21 18:43:21 -0700590 } else if (timeout < 0) {
Erik Gilling010accf2012-03-13 15:34:34 -0700591 err = wait_event_interruptible(fence->wq, fence->status != 0);
592 }
593
594 if (err < 0)
595 return err;
596
Ajay Dudaniea127652012-10-10 18:08:11 -0700597 if (fence->status < 0) {
598 pr_info("fence error %d on [%p]\n", fence->status, fence);
599 sync_dump();
Erik Gilling010accf2012-03-13 15:34:34 -0700600 return fence->status;
Ajay Dudaniea127652012-10-10 18:08:11 -0700601 }
Erik Gilling010accf2012-03-13 15:34:34 -0700602
Ajay Dudani442fff42012-08-24 13:48:57 -0700603 if (fence->status == 0) {
Ajay Dudani31802ec2012-09-04 15:29:09 -0700604 pr_info("fence timeout on [%p] after %dms\n", fence,
605 jiffies_to_msecs(timeout));
Ajay Dudani442fff42012-08-24 13:48:57 -0700606 sync_dump();
Erik Gilling010accf2012-03-13 15:34:34 -0700607 return -ETIME;
Ajay Dudani442fff42012-08-24 13:48:57 -0700608 }
Erik Gilling010accf2012-03-13 15:34:34 -0700609
610 return 0;
611}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700612EXPORT_SYMBOL(sync_fence_wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700613
Ajay Dudanic4af2662012-07-23 16:43:05 -0700614static void sync_fence_free(struct kref *kref)
615{
616 struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
617
618 sync_fence_free_pts(fence);
619
620 kfree(fence);
621}
622
Erik Gilling010accf2012-03-13 15:34:34 -0700623static int sync_fence_release(struct inode *inode, struct file *file)
624{
625 struct sync_fence *fence = file->private_data;
Erik Gilling981c8a92012-03-14 19:49:15 -0700626 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700627
Ajay Dudanic4af2662012-07-23 16:43:05 -0700628 /*
629 * We need to remove all ways to access this fence before droping
630 * our ref.
631 *
632 * start with its membership in the global fence list
633 */
Erik Gilling981c8a92012-03-14 19:49:15 -0700634 spin_lock_irqsave(&sync_fence_list_lock, flags);
635 list_del(&fence->sync_fence_list);
636 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
637
Ajay Dudanic4af2662012-07-23 16:43:05 -0700638 /*
639 * remove its pts from their parents so that sync_timeline_signal()
640 * can't reference the fence.
641 */
642 sync_fence_detach_pts(fence);
Ajay Dudani1ee76852012-07-11 17:07:39 -0700643
Ajay Dudanic4af2662012-07-23 16:43:05 -0700644 kref_put(&fence->kref, sync_fence_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700645
646 return 0;
647}
648
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700649static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
650{
651 struct sync_fence *fence = file->private_data;
652
653 poll_wait(file, &fence->wq, wait);
654
Erik Gilling7a476272012-10-11 12:35:22 -0700655 /*
656 * Make sure that reads to fence->status are ordered with the
657 * wait queue event triggering
658 */
659 smp_rmb();
660
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700661 if (fence->status == 1)
662 return POLLIN;
663 else if (fence->status < 0)
664 return POLLERR;
665 else
666 return 0;
667}
668
Erik Gilling010accf2012-03-13 15:34:34 -0700669static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
670{
Ajay Dudani93b10c92012-09-04 15:28:52 -0700671 __s32 value;
Erik Gilling010accf2012-03-13 15:34:34 -0700672
673 if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
674 return -EFAULT;
675
676 return sync_fence_wait(fence, value);
677}
678
679static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
680{
681 int fd = get_unused_fd();
682 int err;
683 struct sync_fence *fence2, *fence3;
684 struct sync_merge_data data;
685
686 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
687 return -EFAULT;
688
689 fence2 = sync_fence_fdget(data.fd2);
690 if (fence2 == NULL) {
691 err = -ENOENT;
692 goto err_put_fd;
693 }
694
695 data.name[sizeof(data.name) - 1] = '\0';
696 fence3 = sync_fence_merge(data.name, fence, fence2);
697 if (fence3 == NULL) {
698 err = -ENOMEM;
699 goto err_put_fence2;
700 }
701
702 data.fence = fd;
703 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
704 err = -EFAULT;
705 goto err_put_fence3;
706 }
707
708 sync_fence_install(fence3, fd);
709 sync_fence_put(fence2);
710 return 0;
711
712err_put_fence3:
713 sync_fence_put(fence3);
714
715err_put_fence2:
716 sync_fence_put(fence2);
717
718err_put_fd:
719 put_unused_fd(fd);
720 return err;
721}
722
Erik Gilling3913bff2012-03-15 17:45:50 -0700723static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
724{
725 struct sync_pt_info *info = data;
726 int ret;
727
728 if (size < sizeof(struct sync_pt_info))
729 return -ENOMEM;
730
731 info->len = sizeof(struct sync_pt_info);
732
733 if (pt->parent->ops->fill_driver_data) {
734 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
735 size - sizeof(*info));
736 if (ret < 0)
737 return ret;
738
739 info->len += ret;
740 }
741
742 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
743 strlcpy(info->driver_name, pt->parent->ops->driver_name,
744 sizeof(info->driver_name));
745 info->status = pt->status;
746 info->timestamp_ns = ktime_to_ns(pt->timestamp);
747
748 return info->len;
749}
750
751static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
752 unsigned long arg)
753{
754 struct sync_fence_info_data *data;
755 struct list_head *pos;
756 __u32 size;
757 __u32 len = 0;
758 int ret;
759
760 if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
761 return -EFAULT;
762
763 if (size < sizeof(struct sync_fence_info_data))
764 return -EINVAL;
765
766 if (size > 4096)
767 size = 4096;
768
769 data = kzalloc(size, GFP_KERNEL);
770 if (data == NULL)
771 return -ENOMEM;
772
773 strlcpy(data->name, fence->name, sizeof(data->name));
774 data->status = fence->status;
775 len = sizeof(struct sync_fence_info_data);
776
777 list_for_each(pos, &fence->pt_list_head) {
778 struct sync_pt *pt =
779 container_of(pos, struct sync_pt, pt_list);
780
781 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
782
783 if (ret < 0)
784 goto out;
785
786 len += ret;
787 }
788
789 data->len = len;
790
791 if (copy_to_user((void __user *)arg, data, len))
792 ret = -EFAULT;
793 else
794 ret = 0;
795
796out:
797 kfree(data);
798
799 return ret;
800}
Erik Gilling010accf2012-03-13 15:34:34 -0700801
802static long sync_fence_ioctl(struct file *file, unsigned int cmd,
803 unsigned long arg)
804{
805 struct sync_fence *fence = file->private_data;
806 switch (cmd) {
807 case SYNC_IOC_WAIT:
808 return sync_fence_ioctl_wait(fence, arg);
809
810 case SYNC_IOC_MERGE:
811 return sync_fence_ioctl_merge(fence, arg);
Erik Gilling981c8a92012-03-14 19:49:15 -0700812
Erik Gilling3913bff2012-03-15 17:45:50 -0700813 case SYNC_IOC_FENCE_INFO:
814 return sync_fence_ioctl_fence_info(fence, arg);
815
Erik Gilling010accf2012-03-13 15:34:34 -0700816 default:
817 return -ENOTTY;
818 }
819}
820
Erik Gilling981c8a92012-03-14 19:49:15 -0700821#ifdef CONFIG_DEBUG_FS
822static const char *sync_status_str(int status)
823{
824 if (status > 0)
825 return "signaled";
826 else if (status == 0)
827 return "active";
828 else
829 return "error";
830}
831
832static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
833{
834 int status = pt->status;
835 seq_printf(s, " %s%spt %s",
836 fence ? pt->parent->name : "",
837 fence ? "_" : "",
838 sync_status_str(status));
839 if (pt->status) {
840 struct timeval tv = ktime_to_timeval(pt->timestamp);
841 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
842 }
843
844 if (pt->parent->ops->print_pt) {
845 seq_printf(s, ": ");
846 pt->parent->ops->print_pt(s, pt);
847 }
848
849 seq_printf(s, "\n");
850}
851
852static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
853{
854 struct list_head *pos;
855 unsigned long flags;
856
857 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
858
859 if (obj->ops->print_obj) {
860 seq_printf(s, ": ");
861 obj->ops->print_obj(s, obj);
862 }
863
864 seq_printf(s, "\n");
865
866 spin_lock_irqsave(&obj->child_list_lock, flags);
867 list_for_each(pos, &obj->child_list_head) {
868 struct sync_pt *pt =
869 container_of(pos, struct sync_pt, child_list);
870 sync_print_pt(s, pt, false);
871 }
872 spin_unlock_irqrestore(&obj->child_list_lock, flags);
873}
874
875static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
876{
877 struct list_head *pos;
878 unsigned long flags;
879
Ajay Dudani31802ec2012-09-04 15:29:09 -0700880 seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
881 sync_status_str(fence->status));
Erik Gilling981c8a92012-03-14 19:49:15 -0700882
883 list_for_each(pos, &fence->pt_list_head) {
884 struct sync_pt *pt =
885 container_of(pos, struct sync_pt, pt_list);
886 sync_print_pt(s, pt, true);
887 }
888
889 spin_lock_irqsave(&fence->waiter_list_lock, flags);
890 list_for_each(pos, &fence->waiter_list_head) {
891 struct sync_fence_waiter *waiter =
892 container_of(pos, struct sync_fence_waiter,
893 waiter_list);
894
Erik Gillingc80114f2012-05-15 16:23:26 -0700895 seq_printf(s, "waiter %pF\n", waiter->callback);
Erik Gilling981c8a92012-03-14 19:49:15 -0700896 }
897 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
898}
899
900static int sync_debugfs_show(struct seq_file *s, void *unused)
901{
902 unsigned long flags;
903 struct list_head *pos;
904
905 seq_printf(s, "objs:\n--------------\n");
906
907 spin_lock_irqsave(&sync_timeline_list_lock, flags);
908 list_for_each(pos, &sync_timeline_list_head) {
909 struct sync_timeline *obj =
910 container_of(pos, struct sync_timeline,
911 sync_timeline_list);
912
913 sync_print_obj(s, obj);
914 seq_printf(s, "\n");
915 }
916 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
917
918 seq_printf(s, "fences:\n--------------\n");
919
920 spin_lock_irqsave(&sync_fence_list_lock, flags);
921 list_for_each(pos, &sync_fence_list_head) {
922 struct sync_fence *fence =
923 container_of(pos, struct sync_fence, sync_fence_list);
924
925 sync_print_fence(s, fence);
926 seq_printf(s, "\n");
927 }
928 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
929 return 0;
930}
931
932static int sync_debugfs_open(struct inode *inode, struct file *file)
933{
934 return single_open(file, sync_debugfs_show, inode->i_private);
935}
936
937static const struct file_operations sync_debugfs_fops = {
938 .open = sync_debugfs_open,
939 .read = seq_read,
940 .llseek = seq_lseek,
941 .release = single_release,
942};
943
944static __init int sync_debugfs_init(void)
945{
946 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
947 return 0;
948}
Erik Gilling981c8a92012-03-14 19:49:15 -0700949late_initcall(sync_debugfs_init);
950
Ajay Dudani442fff42012-08-24 13:48:57 -0700951#define DUMP_CHUNK 256
952static char sync_dump_buf[64 * 1024];
953void sync_dump(void)
954{
955 struct seq_file s = {
956 .buf = sync_dump_buf,
957 .size = sizeof(sync_dump_buf) - 1,
958 };
959 int i;
960
961 sync_debugfs_show(&s, NULL);
962
963 for (i = 0; i < s.count; i += DUMP_CHUNK) {
964 if ((s.count - i) > DUMP_CHUNK) {
965 char c = s.buf[i + DUMP_CHUNK];
966 s.buf[i + DUMP_CHUNK] = 0;
967 pr_cont("%s", s.buf + i);
968 s.buf[i + DUMP_CHUNK] = c;
969 } else {
970 s.buf[s.count] = 0;
971 pr_cont("%s", s.buf + i);
972 }
973 }
974}
975#else
976static void sync_dump(void)
977{
978}
Erik Gilling981c8a92012-03-14 19:49:15 -0700979#endif