blob: d7ec39d80f069d17c043023adf781ebfd2a93d66 [file] [log] [blame]
Erik Gilling010accf2012-03-13 15:34:34 -07001/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Erik Gilling981c8a92012-03-14 19:49:15 -070017#include <linux/debugfs.h>
Erik Gilling4fb837a2012-05-16 13:09:22 -070018#include <linux/export.h>
Erik Gilling010accf2012-03-13 15:34:34 -070019#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
Erik Gillinga1eeaca2012-03-19 17:28:32 -070022#include <linux/poll.h>
Erik Gilling010accf2012-03-13 15:34:34 -070023#include <linux/sched.h>
Erik Gilling981c8a92012-03-14 19:49:15 -070024#include <linux/seq_file.h>
Erik Gilling010accf2012-03-13 15:34:34 -070025#include <linux/slab.h>
26#include <linux/sync.h>
27#include <linux/uaccess.h>
28
29#include <linux/anon_inodes.h>
30
Ajay Dudani8e0b3252012-10-16 16:14:48 -070031#define CREATE_TRACE_POINTS
32#include <trace/events/sync.h>
33
Erik Gilling010accf2012-03-13 15:34:34 -070034static void sync_fence_signal_pt(struct sync_pt *pt);
35static int _sync_pt_has_signaled(struct sync_pt *pt);
Ajay Dudanic4af2662012-07-23 16:43:05 -070036static void sync_fence_free(struct kref *kref);
Ajay Dudani442fff42012-08-24 13:48:57 -070037static void sync_dump(void);
Erik Gilling010accf2012-03-13 15:34:34 -070038
Erik Gilling981c8a92012-03-14 19:49:15 -070039static LIST_HEAD(sync_timeline_list_head);
40static DEFINE_SPINLOCK(sync_timeline_list_lock);
41
42static LIST_HEAD(sync_fence_list_head);
43static DEFINE_SPINLOCK(sync_fence_list_lock);
44
Erik Gilling010accf2012-03-13 15:34:34 -070045struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
46 int size, const char *name)
47{
48 struct sync_timeline *obj;
Erik Gilling981c8a92012-03-14 19:49:15 -070049 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -070050
51 if (size < sizeof(struct sync_timeline))
52 return NULL;
53
54 obj = kzalloc(size, GFP_KERNEL);
55 if (obj == NULL)
56 return NULL;
57
Ajay Dudani741cdde2012-08-02 17:26:45 -070058 kref_init(&obj->kref);
Erik Gilling010accf2012-03-13 15:34:34 -070059 obj->ops = ops;
60 strlcpy(obj->name, name, sizeof(obj->name));
61
62 INIT_LIST_HEAD(&obj->child_list_head);
63 spin_lock_init(&obj->child_list_lock);
64
65 INIT_LIST_HEAD(&obj->active_list_head);
66 spin_lock_init(&obj->active_list_lock);
67
Erik Gilling981c8a92012-03-14 19:49:15 -070068 spin_lock_irqsave(&sync_timeline_list_lock, flags);
69 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
70 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
71
Erik Gilling010accf2012-03-13 15:34:34 -070072 return obj;
73}
Erik Gilling4fb837a2012-05-16 13:09:22 -070074EXPORT_SYMBOL(sync_timeline_create);
Erik Gilling010accf2012-03-13 15:34:34 -070075
Ajay Dudani741cdde2012-08-02 17:26:45 -070076static void sync_timeline_free(struct kref *kref)
Erik Gilling981c8a92012-03-14 19:49:15 -070077{
Ajay Dudani741cdde2012-08-02 17:26:45 -070078 struct sync_timeline *obj =
79 container_of(kref, struct sync_timeline, kref);
Erik Gilling981c8a92012-03-14 19:49:15 -070080 unsigned long flags;
81
82 if (obj->ops->release_obj)
83 obj->ops->release_obj(obj);
84
85 spin_lock_irqsave(&sync_timeline_list_lock, flags);
86 list_del(&obj->sync_timeline_list);
87 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
88
89 kfree(obj);
90}
91
Erik Gilling010accf2012-03-13 15:34:34 -070092void sync_timeline_destroy(struct sync_timeline *obj)
93{
Erik Gilling010accf2012-03-13 15:34:34 -070094 obj->destroyed = true;
Erik Gilling010accf2012-03-13 15:34:34 -070095
Ajay Dudani741cdde2012-08-02 17:26:45 -070096 /*
97 * If this is not the last reference, signal any children
98 * that their parent is going away.
99 */
100
101 if (!kref_put(&obj->kref, sync_timeline_free))
Erik Gilling010accf2012-03-13 15:34:34 -0700102 sync_timeline_signal(obj);
103}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700104EXPORT_SYMBOL(sync_timeline_destroy);
Erik Gilling010accf2012-03-13 15:34:34 -0700105
106static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
107{
108 unsigned long flags;
109
110 pt->parent = obj;
111
112 spin_lock_irqsave(&obj->child_list_lock, flags);
113 list_add_tail(&pt->child_list, &obj->child_list_head);
114 spin_unlock_irqrestore(&obj->child_list_lock, flags);
115}
116
117static void sync_timeline_remove_pt(struct sync_pt *pt)
118{
119 struct sync_timeline *obj = pt->parent;
120 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700121
122 spin_lock_irqsave(&obj->active_list_lock, flags);
123 if (!list_empty(&pt->active_list))
124 list_del_init(&pt->active_list);
125 spin_unlock_irqrestore(&obj->active_list_lock, flags);
126
127 spin_lock_irqsave(&obj->child_list_lock, flags);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700128 if (!list_empty(&pt->child_list)) {
129 list_del_init(&pt->child_list);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700130 }
Erik Gilling010accf2012-03-13 15:34:34 -0700131 spin_unlock_irqrestore(&obj->child_list_lock, flags);
Erik Gilling010accf2012-03-13 15:34:34 -0700132}
133
134void sync_timeline_signal(struct sync_timeline *obj)
135{
136 unsigned long flags;
137 LIST_HEAD(signaled_pts);
138 struct list_head *pos, *n;
139
Ajay Dudani8e0b3252012-10-16 16:14:48 -0700140 trace_sync_timeline(obj);
141
Erik Gilling010accf2012-03-13 15:34:34 -0700142 spin_lock_irqsave(&obj->active_list_lock, flags);
143
144 list_for_each_safe(pos, n, &obj->active_list_head) {
145 struct sync_pt *pt =
146 container_of(pos, struct sync_pt, active_list);
147
Ajay Dudanic4af2662012-07-23 16:43:05 -0700148 if (_sync_pt_has_signaled(pt)) {
149 list_del_init(pos);
150 list_add(&pt->signaled_list, &signaled_pts);
151 kref_get(&pt->fence->kref);
152 }
Erik Gilling010accf2012-03-13 15:34:34 -0700153 }
154
155 spin_unlock_irqrestore(&obj->active_list_lock, flags);
156
157 list_for_each_safe(pos, n, &signaled_pts) {
158 struct sync_pt *pt =
Ajay Dudanic4af2662012-07-23 16:43:05 -0700159 container_of(pos, struct sync_pt, signaled_list);
Erik Gilling010accf2012-03-13 15:34:34 -0700160
161 list_del_init(pos);
162 sync_fence_signal_pt(pt);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700163 kref_put(&pt->fence->kref, sync_fence_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700164 }
165}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700166EXPORT_SYMBOL(sync_timeline_signal);
Erik Gilling010accf2012-03-13 15:34:34 -0700167
168struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
169{
170 struct sync_pt *pt;
171
172 if (size < sizeof(struct sync_pt))
173 return NULL;
174
175 pt = kzalloc(size, GFP_KERNEL);
176 if (pt == NULL)
177 return NULL;
178
179 INIT_LIST_HEAD(&pt->active_list);
Ajay Dudani741cdde2012-08-02 17:26:45 -0700180 kref_get(&parent->kref);
Erik Gilling010accf2012-03-13 15:34:34 -0700181 sync_timeline_add_pt(parent, pt);
182
183 return pt;
184}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700185EXPORT_SYMBOL(sync_pt_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700186
187void sync_pt_free(struct sync_pt *pt)
188{
189 if (pt->parent->ops->free_pt)
190 pt->parent->ops->free_pt(pt);
191
192 sync_timeline_remove_pt(pt);
193
Ajay Dudani741cdde2012-08-02 17:26:45 -0700194 kref_put(&pt->parent->kref, sync_timeline_free);
195
Erik Gilling010accf2012-03-13 15:34:34 -0700196 kfree(pt);
197}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700198EXPORT_SYMBOL(sync_pt_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700199
200/* call with pt->parent->active_list_lock held */
201static int _sync_pt_has_signaled(struct sync_pt *pt)
202{
Erik Gillingad433ba2012-03-15 14:59:33 -0700203 int old_status = pt->status;
204
Erik Gilling010accf2012-03-13 15:34:34 -0700205 if (!pt->status)
206 pt->status = pt->parent->ops->has_signaled(pt);
207
208 if (!pt->status && pt->parent->destroyed)
209 pt->status = -ENOENT;
210
Erik Gillingad433ba2012-03-15 14:59:33 -0700211 if (pt->status != old_status)
212 pt->timestamp = ktime_get();
213
Erik Gilling010accf2012-03-13 15:34:34 -0700214 return pt->status;
215}
216
217static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
218{
219 return pt->parent->ops->dup(pt);
220}
221
222/* Adds a sync pt to the active queue. Called when added to a fence */
223static void sync_pt_activate(struct sync_pt *pt)
224{
225 struct sync_timeline *obj = pt->parent;
226 unsigned long flags;
227 int err;
228
229 spin_lock_irqsave(&obj->active_list_lock, flags);
230
231 err = _sync_pt_has_signaled(pt);
Jeff Boodybd483da2012-08-17 12:59:08 -0600232 if (err != 0) {
233 spin_unlock_irqrestore(&obj->active_list_lock, flags);
234 sync_fence_signal_pt(pt);
235 return;
236 }
Erik Gilling010accf2012-03-13 15:34:34 -0700237
238 list_add_tail(&pt->active_list, &obj->active_list_head);
Erik Gilling010accf2012-03-13 15:34:34 -0700239 spin_unlock_irqrestore(&obj->active_list_lock, flags);
240}
241
242static int sync_fence_release(struct inode *inode, struct file *file);
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700243static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700244static long sync_fence_ioctl(struct file *file, unsigned int cmd,
245 unsigned long arg);
246
247
248static const struct file_operations sync_fence_fops = {
249 .release = sync_fence_release,
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700250 .poll = sync_fence_poll,
Erik Gilling010accf2012-03-13 15:34:34 -0700251 .unlocked_ioctl = sync_fence_ioctl,
252};
253
254static struct sync_fence *sync_fence_alloc(const char *name)
255{
256 struct sync_fence *fence;
Erik Gilling981c8a92012-03-14 19:49:15 -0700257 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700258
259 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
260 if (fence == NULL)
261 return NULL;
262
263 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
264 fence, 0);
265 if (fence->file == NULL)
266 goto err;
267
Ajay Dudanic4af2662012-07-23 16:43:05 -0700268 kref_init(&fence->kref);
Erik Gilling010accf2012-03-13 15:34:34 -0700269 strlcpy(fence->name, name, sizeof(fence->name));
270
271 INIT_LIST_HEAD(&fence->pt_list_head);
272 INIT_LIST_HEAD(&fence->waiter_list_head);
273 spin_lock_init(&fence->waiter_list_lock);
274
275 init_waitqueue_head(&fence->wq);
Erik Gilling981c8a92012-03-14 19:49:15 -0700276
277 spin_lock_irqsave(&sync_fence_list_lock, flags);
278 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
279 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
280
Erik Gilling010accf2012-03-13 15:34:34 -0700281 return fence;
282
283err:
284 kfree(fence);
285 return NULL;
286}
287
288/* TODO: implement a create which takes more that one sync_pt */
289struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
290{
291 struct sync_fence *fence;
292
293 if (pt->fence)
294 return NULL;
295
296 fence = sync_fence_alloc(name);
297 if (fence == NULL)
298 return NULL;
299
300 pt->fence = fence;
301 list_add(&pt->pt_list, &fence->pt_list_head);
302 sync_pt_activate(pt);
303
Ajay Dudanicc4ce562012-10-15 17:51:01 -0700304 /*
305 * signal the fence in case pt was activated before
306 * sync_pt_activate(pt) was called
307 */
308 sync_fence_signal_pt(pt);
309
Erik Gilling010accf2012-03-13 15:34:34 -0700310 return fence;
311}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700312EXPORT_SYMBOL(sync_fence_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700313
314static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
315{
316 struct list_head *pos;
317
318 list_for_each(pos, &src->pt_list_head) {
319 struct sync_pt *orig_pt =
320 container_of(pos, struct sync_pt, pt_list);
321 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
322
323 if (new_pt == NULL)
324 return -ENOMEM;
325
326 new_pt->fence = dst;
327 list_add(&new_pt->pt_list, &dst->pt_list_head);
328 sync_pt_activate(new_pt);
329 }
330
331 return 0;
332}
333
Ajay Dudani99343192012-07-11 17:13:50 -0700334static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
335{
336 struct list_head *src_pos, *dst_pos, *n;
337
338 list_for_each(src_pos, &src->pt_list_head) {
339 struct sync_pt *src_pt =
340 container_of(src_pos, struct sync_pt, pt_list);
341 bool collapsed = false;
342
343 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
344 struct sync_pt *dst_pt =
345 container_of(dst_pos, struct sync_pt, pt_list);
346 /* collapse two sync_pts on the same timeline
347 * to a single sync_pt that will signal at
348 * the later of the two
349 */
350 if (dst_pt->parent == src_pt->parent) {
351 if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
352 struct sync_pt *new_pt =
353 sync_pt_dup(src_pt);
354 if (new_pt == NULL)
355 return -ENOMEM;
356
357 new_pt->fence = dst;
358 list_replace(&dst_pt->pt_list,
359 &new_pt->pt_list);
360 sync_pt_activate(new_pt);
361 sync_pt_free(dst_pt);
362 }
363 collapsed = true;
364 break;
365 }
366 }
367
368 if (!collapsed) {
369 struct sync_pt *new_pt = sync_pt_dup(src_pt);
370
371 if (new_pt == NULL)
372 return -ENOMEM;
373
374 new_pt->fence = dst;
375 list_add(&new_pt->pt_list, &dst->pt_list_head);
376 sync_pt_activate(new_pt);
377 }
378 }
379
380 return 0;
381}
382
Ajay Dudanic4af2662012-07-23 16:43:05 -0700383static void sync_fence_detach_pts(struct sync_fence *fence)
384{
385 struct list_head *pos, *n;
386
387 list_for_each_safe(pos, n, &fence->pt_list_head) {
388 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
389 sync_timeline_remove_pt(pt);
390 }
391}
392
Erik Gilling010accf2012-03-13 15:34:34 -0700393static void sync_fence_free_pts(struct sync_fence *fence)
394{
395 struct list_head *pos, *n;
396
397 list_for_each_safe(pos, n, &fence->pt_list_head) {
398 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
399 sync_pt_free(pt);
400 }
401}
402
403struct sync_fence *sync_fence_fdget(int fd)
404{
405 struct file *file = fget(fd);
406
407 if (file == NULL)
408 return NULL;
409
410 if (file->f_op != &sync_fence_fops)
411 goto err;
412
413 return file->private_data;
414
415err:
416 fput(file);
417 return NULL;
418}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700419EXPORT_SYMBOL(sync_fence_fdget);
Erik Gilling010accf2012-03-13 15:34:34 -0700420
421void sync_fence_put(struct sync_fence *fence)
422{
423 fput(fence->file);
424}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700425EXPORT_SYMBOL(sync_fence_put);
Erik Gilling010accf2012-03-13 15:34:34 -0700426
427void sync_fence_install(struct sync_fence *fence, int fd)
428{
429 fd_install(fd, fence->file);
430}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700431EXPORT_SYMBOL(sync_fence_install);
Erik Gilling010accf2012-03-13 15:34:34 -0700432
433static int sync_fence_get_status(struct sync_fence *fence)
434{
435 struct list_head *pos;
436 int status = 1;
437
438 list_for_each(pos, &fence->pt_list_head) {
439 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
440 int pt_status = pt->status;
441
442 if (pt_status < 0) {
443 status = pt_status;
444 break;
445 } else if (status == 1) {
446 status = pt_status;
447 }
448 }
449
450 return status;
451}
452
453struct sync_fence *sync_fence_merge(const char *name,
454 struct sync_fence *a, struct sync_fence *b)
455{
456 struct sync_fence *fence;
457 int err;
458
459 fence = sync_fence_alloc(name);
460 if (fence == NULL)
461 return NULL;
462
463 err = sync_fence_copy_pts(fence, a);
464 if (err < 0)
465 goto err;
466
Ajay Dudani99343192012-07-11 17:13:50 -0700467 err = sync_fence_merge_pts(fence, b);
Erik Gilling010accf2012-03-13 15:34:34 -0700468 if (err < 0)
469 goto err;
470
Ajay Dudanicc4ce562012-10-15 17:51:01 -0700471 /*
472 * signal the fence in case one of it's pts were activated before
473 * they were activated
474 */
475 sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
476 struct sync_pt,
477 pt_list));
Erik Gilling010accf2012-03-13 15:34:34 -0700478
479 return fence;
480err:
481 sync_fence_free_pts(fence);
482 kfree(fence);
483 return NULL;
484}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700485EXPORT_SYMBOL(sync_fence_merge);
Erik Gilling010accf2012-03-13 15:34:34 -0700486
487static void sync_fence_signal_pt(struct sync_pt *pt)
488{
489 LIST_HEAD(signaled_waiters);
490 struct sync_fence *fence = pt->fence;
491 struct list_head *pos;
492 struct list_head *n;
493 unsigned long flags;
494 int status;
495
496 status = sync_fence_get_status(fence);
497
498 spin_lock_irqsave(&fence->waiter_list_lock, flags);
499 /*
500 * this should protect against two threads racing on the signaled
501 * false -> true transition
502 */
503 if (status && !fence->status) {
504 list_for_each_safe(pos, n, &fence->waiter_list_head)
505 list_move(pos, &signaled_waiters);
506
507 fence->status = status;
508 } else {
509 status = 0;
510 }
511 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
512
513 if (status) {
514 list_for_each_safe(pos, n, &signaled_waiters) {
515 struct sync_fence_waiter *waiter =
516 container_of(pos, struct sync_fence_waiter,
517 waiter_list);
518
Erik Gilling010accf2012-03-13 15:34:34 -0700519 list_del(pos);
Erik Gillingc80114f2012-05-15 16:23:26 -0700520 waiter->callback(fence, waiter);
Erik Gilling010accf2012-03-13 15:34:34 -0700521 }
522 wake_up(&fence->wq);
523 }
524}
525
526int sync_fence_wait_async(struct sync_fence *fence,
Erik Gillingc80114f2012-05-15 16:23:26 -0700527 struct sync_fence_waiter *waiter)
Erik Gilling010accf2012-03-13 15:34:34 -0700528{
Erik Gilling010accf2012-03-13 15:34:34 -0700529 unsigned long flags;
530 int err = 0;
531
Erik Gilling010accf2012-03-13 15:34:34 -0700532 spin_lock_irqsave(&fence->waiter_list_lock, flags);
533
534 if (fence->status) {
Erik Gilling010accf2012-03-13 15:34:34 -0700535 err = fence->status;
536 goto out;
537 }
538
539 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
540out:
541 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
542
543 return err;
544}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700545EXPORT_SYMBOL(sync_fence_wait_async);
Erik Gilling010accf2012-03-13 15:34:34 -0700546
Erik Gillingc80114f2012-05-15 16:23:26 -0700547int sync_fence_cancel_async(struct sync_fence *fence,
548 struct sync_fence_waiter *waiter)
549{
550 struct list_head *pos;
551 struct list_head *n;
552 unsigned long flags;
553 int ret = -ENOENT;
554
555 spin_lock_irqsave(&fence->waiter_list_lock, flags);
556 /*
557 * Make sure waiter is still in waiter_list because it is possible for
558 * the waiter to be removed from the list while the callback is still
559 * pending.
560 */
561 list_for_each_safe(pos, n, &fence->waiter_list_head) {
562 struct sync_fence_waiter *list_waiter =
563 container_of(pos, struct sync_fence_waiter,
564 waiter_list);
565 if (list_waiter == waiter) {
566 list_del(pos);
567 ret = 0;
568 break;
569 }
570 }
571 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
572 return ret;
573}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700574EXPORT_SYMBOL(sync_fence_cancel_async);
Erik Gillingc80114f2012-05-15 16:23:26 -0700575
Erik Gilling7a476272012-10-11 12:35:22 -0700576static bool sync_fence_check(struct sync_fence *fence)
577{
578 /*
579 * Make sure that reads to fence->status are ordered with the
580 * wait queue event triggering
581 */
582 smp_rmb();
583 return fence->status != 0;
584}
585
Erik Gilling010accf2012-03-13 15:34:34 -0700586int sync_fence_wait(struct sync_fence *fence, long timeout)
587{
Ajay Dudanidf53a2ca2012-08-21 17:57:19 -0700588 int err = 0;
Ajay Dudani8e0b3252012-10-16 16:14:48 -0700589 struct sync_pt *pt;
590
591 trace_sync_wait(fence, 1);
592 list_for_each_entry(pt, &fence->pt_list_head, pt_list)
593 trace_sync_pt(pt);
Erik Gilling010accf2012-03-13 15:34:34 -0700594
Ajay Dudanidf53a2ca2012-08-21 17:57:19 -0700595 if (timeout > 0) {
Erik Gilling010accf2012-03-13 15:34:34 -0700596 timeout = msecs_to_jiffies(timeout);
597 err = wait_event_interruptible_timeout(fence->wq,
Erik Gilling7a476272012-10-11 12:35:22 -0700598 sync_fence_check(fence),
Erik Gilling010accf2012-03-13 15:34:34 -0700599 timeout);
Ajay Dudani131ed182012-08-21 18:43:21 -0700600 } else if (timeout < 0) {
Ajay Dudanif76d6d62012-10-15 17:58:46 -0700601 err = wait_event_interruptible(fence->wq,
602 sync_fence_check(fence));
Erik Gilling010accf2012-03-13 15:34:34 -0700603 }
Ajay Dudani8e0b3252012-10-16 16:14:48 -0700604 trace_sync_wait(fence, 0);
Erik Gilling010accf2012-03-13 15:34:34 -0700605
606 if (err < 0)
607 return err;
608
Ajay Dudaniea127652012-10-10 18:08:11 -0700609 if (fence->status < 0) {
610 pr_info("fence error %d on [%p]\n", fence->status, fence);
611 sync_dump();
Erik Gilling010accf2012-03-13 15:34:34 -0700612 return fence->status;
Ajay Dudaniea127652012-10-10 18:08:11 -0700613 }
Erik Gilling010accf2012-03-13 15:34:34 -0700614
Ajay Dudani442fff42012-08-24 13:48:57 -0700615 if (fence->status == 0) {
Ajay Dudani31802ec2012-09-04 15:29:09 -0700616 pr_info("fence timeout on [%p] after %dms\n", fence,
617 jiffies_to_msecs(timeout));
Ajay Dudani442fff42012-08-24 13:48:57 -0700618 sync_dump();
Erik Gilling010accf2012-03-13 15:34:34 -0700619 return -ETIME;
Ajay Dudani442fff42012-08-24 13:48:57 -0700620 }
Erik Gilling010accf2012-03-13 15:34:34 -0700621
622 return 0;
623}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700624EXPORT_SYMBOL(sync_fence_wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700625
Ajay Dudanic4af2662012-07-23 16:43:05 -0700626static void sync_fence_free(struct kref *kref)
627{
628 struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
629
630 sync_fence_free_pts(fence);
631
632 kfree(fence);
633}
634
Erik Gilling010accf2012-03-13 15:34:34 -0700635static int sync_fence_release(struct inode *inode, struct file *file)
636{
637 struct sync_fence *fence = file->private_data;
Erik Gilling981c8a92012-03-14 19:49:15 -0700638 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700639
Ajay Dudanic4af2662012-07-23 16:43:05 -0700640 /*
641 * We need to remove all ways to access this fence before droping
642 * our ref.
643 *
644 * start with its membership in the global fence list
645 */
Erik Gilling981c8a92012-03-14 19:49:15 -0700646 spin_lock_irqsave(&sync_fence_list_lock, flags);
647 list_del(&fence->sync_fence_list);
648 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
649
Ajay Dudanic4af2662012-07-23 16:43:05 -0700650 /*
651 * remove its pts from their parents so that sync_timeline_signal()
652 * can't reference the fence.
653 */
654 sync_fence_detach_pts(fence);
Ajay Dudani1ee76852012-07-11 17:07:39 -0700655
Ajay Dudanic4af2662012-07-23 16:43:05 -0700656 kref_put(&fence->kref, sync_fence_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700657
658 return 0;
659}
660
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700661static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
662{
663 struct sync_fence *fence = file->private_data;
664
665 poll_wait(file, &fence->wq, wait);
666
Erik Gilling7a476272012-10-11 12:35:22 -0700667 /*
668 * Make sure that reads to fence->status are ordered with the
669 * wait queue event triggering
670 */
671 smp_rmb();
672
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700673 if (fence->status == 1)
674 return POLLIN;
675 else if (fence->status < 0)
676 return POLLERR;
677 else
678 return 0;
679}
680
Erik Gilling010accf2012-03-13 15:34:34 -0700681static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
682{
Ajay Dudani93b10c92012-09-04 15:28:52 -0700683 __s32 value;
Erik Gilling010accf2012-03-13 15:34:34 -0700684
685 if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
686 return -EFAULT;
687
688 return sync_fence_wait(fence, value);
689}
690
691static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
692{
693 int fd = get_unused_fd();
694 int err;
695 struct sync_fence *fence2, *fence3;
696 struct sync_merge_data data;
697
698 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
699 return -EFAULT;
700
701 fence2 = sync_fence_fdget(data.fd2);
702 if (fence2 == NULL) {
703 err = -ENOENT;
704 goto err_put_fd;
705 }
706
707 data.name[sizeof(data.name) - 1] = '\0';
708 fence3 = sync_fence_merge(data.name, fence, fence2);
709 if (fence3 == NULL) {
710 err = -ENOMEM;
711 goto err_put_fence2;
712 }
713
714 data.fence = fd;
715 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
716 err = -EFAULT;
717 goto err_put_fence3;
718 }
719
720 sync_fence_install(fence3, fd);
721 sync_fence_put(fence2);
722 return 0;
723
724err_put_fence3:
725 sync_fence_put(fence3);
726
727err_put_fence2:
728 sync_fence_put(fence2);
729
730err_put_fd:
731 put_unused_fd(fd);
732 return err;
733}
734
Erik Gilling3913bff2012-03-15 17:45:50 -0700735static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
736{
737 struct sync_pt_info *info = data;
738 int ret;
739
740 if (size < sizeof(struct sync_pt_info))
741 return -ENOMEM;
742
743 info->len = sizeof(struct sync_pt_info);
744
745 if (pt->parent->ops->fill_driver_data) {
746 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
747 size - sizeof(*info));
748 if (ret < 0)
749 return ret;
750
751 info->len += ret;
752 }
753
754 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
755 strlcpy(info->driver_name, pt->parent->ops->driver_name,
756 sizeof(info->driver_name));
757 info->status = pt->status;
758 info->timestamp_ns = ktime_to_ns(pt->timestamp);
759
760 return info->len;
761}
762
763static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
764 unsigned long arg)
765{
766 struct sync_fence_info_data *data;
767 struct list_head *pos;
768 __u32 size;
769 __u32 len = 0;
770 int ret;
771
772 if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
773 return -EFAULT;
774
775 if (size < sizeof(struct sync_fence_info_data))
776 return -EINVAL;
777
778 if (size > 4096)
779 size = 4096;
780
781 data = kzalloc(size, GFP_KERNEL);
782 if (data == NULL)
783 return -ENOMEM;
784
785 strlcpy(data->name, fence->name, sizeof(data->name));
786 data->status = fence->status;
787 len = sizeof(struct sync_fence_info_data);
788
789 list_for_each(pos, &fence->pt_list_head) {
790 struct sync_pt *pt =
791 container_of(pos, struct sync_pt, pt_list);
792
793 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
794
795 if (ret < 0)
796 goto out;
797
798 len += ret;
799 }
800
801 data->len = len;
802
803 if (copy_to_user((void __user *)arg, data, len))
804 ret = -EFAULT;
805 else
806 ret = 0;
807
808out:
809 kfree(data);
810
811 return ret;
812}
Erik Gilling010accf2012-03-13 15:34:34 -0700813
814static long sync_fence_ioctl(struct file *file, unsigned int cmd,
815 unsigned long arg)
816{
817 struct sync_fence *fence = file->private_data;
818 switch (cmd) {
819 case SYNC_IOC_WAIT:
820 return sync_fence_ioctl_wait(fence, arg);
821
822 case SYNC_IOC_MERGE:
823 return sync_fence_ioctl_merge(fence, arg);
Erik Gilling981c8a92012-03-14 19:49:15 -0700824
Erik Gilling3913bff2012-03-15 17:45:50 -0700825 case SYNC_IOC_FENCE_INFO:
826 return sync_fence_ioctl_fence_info(fence, arg);
827
Erik Gilling010accf2012-03-13 15:34:34 -0700828 default:
829 return -ENOTTY;
830 }
831}
832
Erik Gilling981c8a92012-03-14 19:49:15 -0700833#ifdef CONFIG_DEBUG_FS
834static const char *sync_status_str(int status)
835{
836 if (status > 0)
837 return "signaled";
838 else if (status == 0)
839 return "active";
840 else
841 return "error";
842}
843
844static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
845{
846 int status = pt->status;
847 seq_printf(s, " %s%spt %s",
848 fence ? pt->parent->name : "",
849 fence ? "_" : "",
850 sync_status_str(status));
851 if (pt->status) {
852 struct timeval tv = ktime_to_timeval(pt->timestamp);
853 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
854 }
855
Ajay Dudani41991aa2012-10-16 15:16:55 -0700856 if (pt->parent->ops->timeline_value_str &&
857 pt->parent->ops->pt_value_str) {
858 char value[64];
859 pt->parent->ops->pt_value_str(pt, value, sizeof(value));
860 seq_printf(s, ": %s", value);
861 if (fence) {
862 pt->parent->ops->timeline_value_str(pt->parent, value,
863 sizeof(value));
864 seq_printf(s, " / %s", value);
865 }
866 } else if (pt->parent->ops->print_pt) {
Erik Gilling981c8a92012-03-14 19:49:15 -0700867 seq_printf(s, ": ");
868 pt->parent->ops->print_pt(s, pt);
869 }
870
871 seq_printf(s, "\n");
872}
873
874static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
875{
876 struct list_head *pos;
877 unsigned long flags;
878
879 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
880
Ajay Dudani41991aa2012-10-16 15:16:55 -0700881 if (obj->ops->timeline_value_str) {
882 char value[64];
883 obj->ops->timeline_value_str(obj, value, sizeof(value));
884 seq_printf(s, ": %s", value);
885 } else if (obj->ops->print_obj) {
Erik Gilling981c8a92012-03-14 19:49:15 -0700886 seq_printf(s, ": ");
887 obj->ops->print_obj(s, obj);
888 }
889
890 seq_printf(s, "\n");
891
892 spin_lock_irqsave(&obj->child_list_lock, flags);
893 list_for_each(pos, &obj->child_list_head) {
894 struct sync_pt *pt =
895 container_of(pos, struct sync_pt, child_list);
896 sync_print_pt(s, pt, false);
897 }
898 spin_unlock_irqrestore(&obj->child_list_lock, flags);
899}
900
901static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
902{
903 struct list_head *pos;
904 unsigned long flags;
905
Ajay Dudani31802ec2012-09-04 15:29:09 -0700906 seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
907 sync_status_str(fence->status));
Erik Gilling981c8a92012-03-14 19:49:15 -0700908
909 list_for_each(pos, &fence->pt_list_head) {
910 struct sync_pt *pt =
911 container_of(pos, struct sync_pt, pt_list);
912 sync_print_pt(s, pt, true);
913 }
914
915 spin_lock_irqsave(&fence->waiter_list_lock, flags);
916 list_for_each(pos, &fence->waiter_list_head) {
917 struct sync_fence_waiter *waiter =
918 container_of(pos, struct sync_fence_waiter,
919 waiter_list);
920
Erik Gillingc80114f2012-05-15 16:23:26 -0700921 seq_printf(s, "waiter %pF\n", waiter->callback);
Erik Gilling981c8a92012-03-14 19:49:15 -0700922 }
923 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
924}
925
926static int sync_debugfs_show(struct seq_file *s, void *unused)
927{
928 unsigned long flags;
929 struct list_head *pos;
930
931 seq_printf(s, "objs:\n--------------\n");
932
933 spin_lock_irqsave(&sync_timeline_list_lock, flags);
934 list_for_each(pos, &sync_timeline_list_head) {
935 struct sync_timeline *obj =
936 container_of(pos, struct sync_timeline,
937 sync_timeline_list);
938
939 sync_print_obj(s, obj);
940 seq_printf(s, "\n");
941 }
942 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
943
944 seq_printf(s, "fences:\n--------------\n");
945
946 spin_lock_irqsave(&sync_fence_list_lock, flags);
947 list_for_each(pos, &sync_fence_list_head) {
948 struct sync_fence *fence =
949 container_of(pos, struct sync_fence, sync_fence_list);
950
951 sync_print_fence(s, fence);
952 seq_printf(s, "\n");
953 }
954 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
955 return 0;
956}
957
958static int sync_debugfs_open(struct inode *inode, struct file *file)
959{
960 return single_open(file, sync_debugfs_show, inode->i_private);
961}
962
963static const struct file_operations sync_debugfs_fops = {
964 .open = sync_debugfs_open,
965 .read = seq_read,
966 .llseek = seq_lseek,
967 .release = single_release,
968};
969
970static __init int sync_debugfs_init(void)
971{
972 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
973 return 0;
974}
Erik Gilling981c8a92012-03-14 19:49:15 -0700975late_initcall(sync_debugfs_init);
976
Ajay Dudani442fff42012-08-24 13:48:57 -0700977#define DUMP_CHUNK 256
978static char sync_dump_buf[64 * 1024];
979void sync_dump(void)
980{
981 struct seq_file s = {
982 .buf = sync_dump_buf,
983 .size = sizeof(sync_dump_buf) - 1,
984 };
985 int i;
986
987 sync_debugfs_show(&s, NULL);
988
989 for (i = 0; i < s.count; i += DUMP_CHUNK) {
990 if ((s.count - i) > DUMP_CHUNK) {
991 char c = s.buf[i + DUMP_CHUNK];
992 s.buf[i + DUMP_CHUNK] = 0;
993 pr_cont("%s", s.buf + i);
994 s.buf[i + DUMP_CHUNK] = c;
995 } else {
996 s.buf[s.count] = 0;
997 pr_cont("%s", s.buf + i);
998 }
999 }
1000}
1001#else
1002static void sync_dump(void)
1003{
1004}
Erik Gilling981c8a92012-03-14 19:49:15 -07001005#endif