blob: dd4178db093765f75b3d8c88fbf8bb79b9fc22b2 [file] [log] [blame]
Erik Gilling010accf2012-03-13 15:34:34 -07001/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Erik Gilling981c8a92012-03-14 19:49:15 -070017#include <linux/debugfs.h>
Erik Gilling4fb837a2012-05-16 13:09:22 -070018#include <linux/export.h>
Erik Gilling010accf2012-03-13 15:34:34 -070019#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
Erik Gillinga1eeaca2012-03-19 17:28:32 -070022#include <linux/poll.h>
Erik Gilling010accf2012-03-13 15:34:34 -070023#include <linux/sched.h>
Erik Gilling981c8a92012-03-14 19:49:15 -070024#include <linux/seq_file.h>
Erik Gilling010accf2012-03-13 15:34:34 -070025#include <linux/slab.h>
26#include <linux/sync.h>
27#include <linux/uaccess.h>
28
29#include <linux/anon_inodes.h>
30
Ajay Dudani8e0b3252012-10-16 16:14:48 -070031#define CREATE_TRACE_POINTS
32#include <trace/events/sync.h>
33
Erik Gilling010accf2012-03-13 15:34:34 -070034static void sync_fence_signal_pt(struct sync_pt *pt);
35static int _sync_pt_has_signaled(struct sync_pt *pt);
Ajay Dudanic4af2662012-07-23 16:43:05 -070036static void sync_fence_free(struct kref *kref);
Ajay Dudani442fff42012-08-24 13:48:57 -070037static void sync_dump(void);
Erik Gilling010accf2012-03-13 15:34:34 -070038
Erik Gilling981c8a92012-03-14 19:49:15 -070039static LIST_HEAD(sync_timeline_list_head);
40static DEFINE_SPINLOCK(sync_timeline_list_lock);
41
42static LIST_HEAD(sync_fence_list_head);
43static DEFINE_SPINLOCK(sync_fence_list_lock);
44
Erik Gilling010accf2012-03-13 15:34:34 -070045struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
46 int size, const char *name)
47{
48 struct sync_timeline *obj;
Erik Gilling981c8a92012-03-14 19:49:15 -070049 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -070050
51 if (size < sizeof(struct sync_timeline))
52 return NULL;
53
54 obj = kzalloc(size, GFP_KERNEL);
55 if (obj == NULL)
56 return NULL;
57
Ajay Dudani741cdde2012-08-02 17:26:45 -070058 kref_init(&obj->kref);
Erik Gilling010accf2012-03-13 15:34:34 -070059 obj->ops = ops;
60 strlcpy(obj->name, name, sizeof(obj->name));
61
62 INIT_LIST_HEAD(&obj->child_list_head);
63 spin_lock_init(&obj->child_list_lock);
64
65 INIT_LIST_HEAD(&obj->active_list_head);
66 spin_lock_init(&obj->active_list_lock);
67
Erik Gilling981c8a92012-03-14 19:49:15 -070068 spin_lock_irqsave(&sync_timeline_list_lock, flags);
69 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
70 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
71
Erik Gilling010accf2012-03-13 15:34:34 -070072 return obj;
73}
Erik Gilling4fb837a2012-05-16 13:09:22 -070074EXPORT_SYMBOL(sync_timeline_create);
Erik Gilling010accf2012-03-13 15:34:34 -070075
Ajay Dudani741cdde2012-08-02 17:26:45 -070076static void sync_timeline_free(struct kref *kref)
Erik Gilling981c8a92012-03-14 19:49:15 -070077{
Ajay Dudani741cdde2012-08-02 17:26:45 -070078 struct sync_timeline *obj =
79 container_of(kref, struct sync_timeline, kref);
Erik Gilling981c8a92012-03-14 19:49:15 -070080 unsigned long flags;
81
82 if (obj->ops->release_obj)
83 obj->ops->release_obj(obj);
84
85 spin_lock_irqsave(&sync_timeline_list_lock, flags);
86 list_del(&obj->sync_timeline_list);
87 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
88
89 kfree(obj);
90}
91
Erik Gilling010accf2012-03-13 15:34:34 -070092void sync_timeline_destroy(struct sync_timeline *obj)
93{
Erik Gilling010accf2012-03-13 15:34:34 -070094 obj->destroyed = true;
Erik Gilling010accf2012-03-13 15:34:34 -070095
Ajay Dudani741cdde2012-08-02 17:26:45 -070096 /*
97 * If this is not the last reference, signal any children
98 * that their parent is going away.
99 */
100
101 if (!kref_put(&obj->kref, sync_timeline_free))
Erik Gilling010accf2012-03-13 15:34:34 -0700102 sync_timeline_signal(obj);
103}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700104EXPORT_SYMBOL(sync_timeline_destroy);
Erik Gilling010accf2012-03-13 15:34:34 -0700105
106static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
107{
108 unsigned long flags;
109
110 pt->parent = obj;
111
112 spin_lock_irqsave(&obj->child_list_lock, flags);
113 list_add_tail(&pt->child_list, &obj->child_list_head);
114 spin_unlock_irqrestore(&obj->child_list_lock, flags);
115}
116
117static void sync_timeline_remove_pt(struct sync_pt *pt)
118{
119 struct sync_timeline *obj = pt->parent;
120 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700121
122 spin_lock_irqsave(&obj->active_list_lock, flags);
123 if (!list_empty(&pt->active_list))
124 list_del_init(&pt->active_list);
125 spin_unlock_irqrestore(&obj->active_list_lock, flags);
126
127 spin_lock_irqsave(&obj->child_list_lock, flags);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700128 if (!list_empty(&pt->child_list)) {
129 list_del_init(&pt->child_list);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700130 }
Erik Gilling010accf2012-03-13 15:34:34 -0700131 spin_unlock_irqrestore(&obj->child_list_lock, flags);
Erik Gilling010accf2012-03-13 15:34:34 -0700132}
133
134void sync_timeline_signal(struct sync_timeline *obj)
135{
136 unsigned long flags;
137 LIST_HEAD(signaled_pts);
138 struct list_head *pos, *n;
139
Ajay Dudani8e0b3252012-10-16 16:14:48 -0700140 trace_sync_timeline(obj);
141
Erik Gilling010accf2012-03-13 15:34:34 -0700142 spin_lock_irqsave(&obj->active_list_lock, flags);
143
144 list_for_each_safe(pos, n, &obj->active_list_head) {
145 struct sync_pt *pt =
146 container_of(pos, struct sync_pt, active_list);
147
Ajay Dudanic4af2662012-07-23 16:43:05 -0700148 if (_sync_pt_has_signaled(pt)) {
149 list_del_init(pos);
150 list_add(&pt->signaled_list, &signaled_pts);
151 kref_get(&pt->fence->kref);
152 }
Erik Gilling010accf2012-03-13 15:34:34 -0700153 }
154
155 spin_unlock_irqrestore(&obj->active_list_lock, flags);
156
157 list_for_each_safe(pos, n, &signaled_pts) {
158 struct sync_pt *pt =
Ajay Dudanic4af2662012-07-23 16:43:05 -0700159 container_of(pos, struct sync_pt, signaled_list);
Erik Gilling010accf2012-03-13 15:34:34 -0700160
161 list_del_init(pos);
162 sync_fence_signal_pt(pt);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700163 kref_put(&pt->fence->kref, sync_fence_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700164 }
165}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700166EXPORT_SYMBOL(sync_timeline_signal);
Erik Gilling010accf2012-03-13 15:34:34 -0700167
168struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
169{
170 struct sync_pt *pt;
171
172 if (size < sizeof(struct sync_pt))
173 return NULL;
174
175 pt = kzalloc(size, GFP_KERNEL);
176 if (pt == NULL)
177 return NULL;
178
179 INIT_LIST_HEAD(&pt->active_list);
Ajay Dudani741cdde2012-08-02 17:26:45 -0700180 kref_get(&parent->kref);
Erik Gilling010accf2012-03-13 15:34:34 -0700181 sync_timeline_add_pt(parent, pt);
182
183 return pt;
184}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700185EXPORT_SYMBOL(sync_pt_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700186
187void sync_pt_free(struct sync_pt *pt)
188{
189 if (pt->parent->ops->free_pt)
190 pt->parent->ops->free_pt(pt);
191
192 sync_timeline_remove_pt(pt);
193
Ajay Dudani741cdde2012-08-02 17:26:45 -0700194 kref_put(&pt->parent->kref, sync_timeline_free);
195
Erik Gilling010accf2012-03-13 15:34:34 -0700196 kfree(pt);
197}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700198EXPORT_SYMBOL(sync_pt_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700199
200/* call with pt->parent->active_list_lock held */
201static int _sync_pt_has_signaled(struct sync_pt *pt)
202{
Erik Gillingad433ba2012-03-15 14:59:33 -0700203 int old_status = pt->status;
204
Erik Gilling010accf2012-03-13 15:34:34 -0700205 if (!pt->status)
206 pt->status = pt->parent->ops->has_signaled(pt);
207
208 if (!pt->status && pt->parent->destroyed)
209 pt->status = -ENOENT;
210
Erik Gillingad433ba2012-03-15 14:59:33 -0700211 if (pt->status != old_status)
212 pt->timestamp = ktime_get();
213
Erik Gilling010accf2012-03-13 15:34:34 -0700214 return pt->status;
215}
216
217static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
218{
219 return pt->parent->ops->dup(pt);
220}
221
222/* Adds a sync pt to the active queue. Called when added to a fence */
223static void sync_pt_activate(struct sync_pt *pt)
224{
225 struct sync_timeline *obj = pt->parent;
226 unsigned long flags;
227 int err;
228
229 spin_lock_irqsave(&obj->active_list_lock, flags);
230
231 err = _sync_pt_has_signaled(pt);
Jeff Boodybd483da2012-08-17 12:59:08 -0600232 if (err != 0) {
233 spin_unlock_irqrestore(&obj->active_list_lock, flags);
234 sync_fence_signal_pt(pt);
235 return;
236 }
Erik Gilling010accf2012-03-13 15:34:34 -0700237
238 list_add_tail(&pt->active_list, &obj->active_list_head);
Erik Gilling010accf2012-03-13 15:34:34 -0700239 spin_unlock_irqrestore(&obj->active_list_lock, flags);
240}
241
242static int sync_fence_release(struct inode *inode, struct file *file);
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700243static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700244static long sync_fence_ioctl(struct file *file, unsigned int cmd,
245 unsigned long arg);
246
247
248static const struct file_operations sync_fence_fops = {
249 .release = sync_fence_release,
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700250 .poll = sync_fence_poll,
Erik Gilling010accf2012-03-13 15:34:34 -0700251 .unlocked_ioctl = sync_fence_ioctl,
252};
253
254static struct sync_fence *sync_fence_alloc(const char *name)
255{
256 struct sync_fence *fence;
Erik Gilling981c8a92012-03-14 19:49:15 -0700257 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700258
259 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
260 if (fence == NULL)
261 return NULL;
262
263 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
264 fence, 0);
265 if (fence->file == NULL)
266 goto err;
267
Ajay Dudanic4af2662012-07-23 16:43:05 -0700268 kref_init(&fence->kref);
Erik Gilling010accf2012-03-13 15:34:34 -0700269 strlcpy(fence->name, name, sizeof(fence->name));
270
271 INIT_LIST_HEAD(&fence->pt_list_head);
272 INIT_LIST_HEAD(&fence->waiter_list_head);
273 spin_lock_init(&fence->waiter_list_lock);
274
275 init_waitqueue_head(&fence->wq);
Erik Gilling981c8a92012-03-14 19:49:15 -0700276
277 spin_lock_irqsave(&sync_fence_list_lock, flags);
278 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
279 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
280
Erik Gilling010accf2012-03-13 15:34:34 -0700281 return fence;
282
283err:
284 kfree(fence);
285 return NULL;
286}
287
288/* TODO: implement a create which takes more that one sync_pt */
289struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
290{
291 struct sync_fence *fence;
292
293 if (pt->fence)
294 return NULL;
295
296 fence = sync_fence_alloc(name);
297 if (fence == NULL)
298 return NULL;
299
300 pt->fence = fence;
301 list_add(&pt->pt_list, &fence->pt_list_head);
302 sync_pt_activate(pt);
303
Ajay Dudanicc4ce562012-10-15 17:51:01 -0700304 /*
305 * signal the fence in case pt was activated before
306 * sync_pt_activate(pt) was called
307 */
308 sync_fence_signal_pt(pt);
309
Erik Gilling010accf2012-03-13 15:34:34 -0700310 return fence;
311}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700312EXPORT_SYMBOL(sync_fence_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700313
314static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
315{
316 struct list_head *pos;
317
318 list_for_each(pos, &src->pt_list_head) {
319 struct sync_pt *orig_pt =
320 container_of(pos, struct sync_pt, pt_list);
321 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
322
323 if (new_pt == NULL)
324 return -ENOMEM;
325
326 new_pt->fence = dst;
327 list_add(&new_pt->pt_list, &dst->pt_list_head);
328 sync_pt_activate(new_pt);
329 }
330
331 return 0;
332}
333
Ajay Dudani99343192012-07-11 17:13:50 -0700334static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
335{
336 struct list_head *src_pos, *dst_pos, *n;
337
338 list_for_each(src_pos, &src->pt_list_head) {
339 struct sync_pt *src_pt =
340 container_of(src_pos, struct sync_pt, pt_list);
341 bool collapsed = false;
342
343 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
344 struct sync_pt *dst_pt =
345 container_of(dst_pos, struct sync_pt, pt_list);
346 /* collapse two sync_pts on the same timeline
347 * to a single sync_pt that will signal at
348 * the later of the two
349 */
350 if (dst_pt->parent == src_pt->parent) {
351 if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
352 struct sync_pt *new_pt =
353 sync_pt_dup(src_pt);
354 if (new_pt == NULL)
355 return -ENOMEM;
356
357 new_pt->fence = dst;
358 list_replace(&dst_pt->pt_list,
359 &new_pt->pt_list);
360 sync_pt_activate(new_pt);
361 sync_pt_free(dst_pt);
362 }
363 collapsed = true;
364 break;
365 }
366 }
367
368 if (!collapsed) {
369 struct sync_pt *new_pt = sync_pt_dup(src_pt);
370
371 if (new_pt == NULL)
372 return -ENOMEM;
373
374 new_pt->fence = dst;
375 list_add(&new_pt->pt_list, &dst->pt_list_head);
376 sync_pt_activate(new_pt);
377 }
378 }
379
380 return 0;
381}
382
Ajay Dudanic4af2662012-07-23 16:43:05 -0700383static void sync_fence_detach_pts(struct sync_fence *fence)
384{
385 struct list_head *pos, *n;
386
387 list_for_each_safe(pos, n, &fence->pt_list_head) {
388 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
389 sync_timeline_remove_pt(pt);
390 }
391}
392
Erik Gilling010accf2012-03-13 15:34:34 -0700393static void sync_fence_free_pts(struct sync_fence *fence)
394{
395 struct list_head *pos, *n;
396
397 list_for_each_safe(pos, n, &fence->pt_list_head) {
398 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
399 sync_pt_free(pt);
400 }
401}
402
403struct sync_fence *sync_fence_fdget(int fd)
404{
405 struct file *file = fget(fd);
406
407 if (file == NULL)
408 return NULL;
409
410 if (file->f_op != &sync_fence_fops)
411 goto err;
412
413 return file->private_data;
414
415err:
416 fput(file);
417 return NULL;
418}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700419EXPORT_SYMBOL(sync_fence_fdget);
Erik Gilling010accf2012-03-13 15:34:34 -0700420
421void sync_fence_put(struct sync_fence *fence)
422{
423 fput(fence->file);
424}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700425EXPORT_SYMBOL(sync_fence_put);
Erik Gilling010accf2012-03-13 15:34:34 -0700426
427void sync_fence_install(struct sync_fence *fence, int fd)
428{
429 fd_install(fd, fence->file);
430}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700431EXPORT_SYMBOL(sync_fence_install);
Erik Gilling010accf2012-03-13 15:34:34 -0700432
433static int sync_fence_get_status(struct sync_fence *fence)
434{
435 struct list_head *pos;
436 int status = 1;
437
438 list_for_each(pos, &fence->pt_list_head) {
439 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
440 int pt_status = pt->status;
441
442 if (pt_status < 0) {
443 status = pt_status;
444 break;
445 } else if (status == 1) {
446 status = pt_status;
447 }
448 }
449
450 return status;
451}
452
453struct sync_fence *sync_fence_merge(const char *name,
454 struct sync_fence *a, struct sync_fence *b)
455{
456 struct sync_fence *fence;
457 int err;
458
459 fence = sync_fence_alloc(name);
460 if (fence == NULL)
461 return NULL;
462
463 err = sync_fence_copy_pts(fence, a);
464 if (err < 0)
465 goto err;
466
Ajay Dudani99343192012-07-11 17:13:50 -0700467 err = sync_fence_merge_pts(fence, b);
Erik Gilling010accf2012-03-13 15:34:34 -0700468 if (err < 0)
469 goto err;
470
Ajay Dudanicc4ce562012-10-15 17:51:01 -0700471 /*
472 * signal the fence in case one of it's pts were activated before
473 * they were activated
474 */
475 sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
476 struct sync_pt,
477 pt_list));
Erik Gilling010accf2012-03-13 15:34:34 -0700478
479 return fence;
480err:
481 sync_fence_free_pts(fence);
482 kfree(fence);
483 return NULL;
484}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700485EXPORT_SYMBOL(sync_fence_merge);
Erik Gilling010accf2012-03-13 15:34:34 -0700486
487static void sync_fence_signal_pt(struct sync_pt *pt)
488{
489 LIST_HEAD(signaled_waiters);
490 struct sync_fence *fence = pt->fence;
491 struct list_head *pos;
492 struct list_head *n;
493 unsigned long flags;
494 int status;
495
496 status = sync_fence_get_status(fence);
497
498 spin_lock_irqsave(&fence->waiter_list_lock, flags);
499 /*
500 * this should protect against two threads racing on the signaled
501 * false -> true transition
502 */
503 if (status && !fence->status) {
504 list_for_each_safe(pos, n, &fence->waiter_list_head)
505 list_move(pos, &signaled_waiters);
506
507 fence->status = status;
508 } else {
509 status = 0;
510 }
511 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
512
513 if (status) {
514 list_for_each_safe(pos, n, &signaled_waiters) {
515 struct sync_fence_waiter *waiter =
516 container_of(pos, struct sync_fence_waiter,
517 waiter_list);
518
Erik Gilling010accf2012-03-13 15:34:34 -0700519 list_del(pos);
Erik Gillingc80114f2012-05-15 16:23:26 -0700520 waiter->callback(fence, waiter);
Erik Gilling010accf2012-03-13 15:34:34 -0700521 }
522 wake_up(&fence->wq);
523 }
524}
525
526int sync_fence_wait_async(struct sync_fence *fence,
Erik Gillingc80114f2012-05-15 16:23:26 -0700527 struct sync_fence_waiter *waiter)
Erik Gilling010accf2012-03-13 15:34:34 -0700528{
Erik Gilling010accf2012-03-13 15:34:34 -0700529 unsigned long flags;
530 int err = 0;
531
Erik Gilling010accf2012-03-13 15:34:34 -0700532 spin_lock_irqsave(&fence->waiter_list_lock, flags);
533
534 if (fence->status) {
Erik Gilling010accf2012-03-13 15:34:34 -0700535 err = fence->status;
536 goto out;
537 }
538
539 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
540out:
541 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
542
543 return err;
544}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700545EXPORT_SYMBOL(sync_fence_wait_async);
Erik Gilling010accf2012-03-13 15:34:34 -0700546
Erik Gillingc80114f2012-05-15 16:23:26 -0700547int sync_fence_cancel_async(struct sync_fence *fence,
548 struct sync_fence_waiter *waiter)
549{
550 struct list_head *pos;
551 struct list_head *n;
552 unsigned long flags;
553 int ret = -ENOENT;
554
555 spin_lock_irqsave(&fence->waiter_list_lock, flags);
556 /*
557 * Make sure waiter is still in waiter_list because it is possible for
558 * the waiter to be removed from the list while the callback is still
559 * pending.
560 */
561 list_for_each_safe(pos, n, &fence->waiter_list_head) {
562 struct sync_fence_waiter *list_waiter =
563 container_of(pos, struct sync_fence_waiter,
564 waiter_list);
565 if (list_waiter == waiter) {
566 list_del(pos);
567 ret = 0;
568 break;
569 }
570 }
571 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
572 return ret;
573}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700574EXPORT_SYMBOL(sync_fence_cancel_async);
Erik Gillingc80114f2012-05-15 16:23:26 -0700575
Erik Gilling7a476272012-10-11 12:35:22 -0700576static bool sync_fence_check(struct sync_fence *fence)
577{
578 /*
579 * Make sure that reads to fence->status are ordered with the
580 * wait queue event triggering
581 */
582 smp_rmb();
583 return fence->status != 0;
584}
585
Erik Gilling010accf2012-03-13 15:34:34 -0700586int sync_fence_wait(struct sync_fence *fence, long timeout)
587{
Ajay Dudanidf53a2ca2012-08-21 17:57:19 -0700588 int err = 0;
Ajay Dudani8e0b3252012-10-16 16:14:48 -0700589 struct sync_pt *pt;
590
591 trace_sync_wait(fence, 1);
592 list_for_each_entry(pt, &fence->pt_list_head, pt_list)
593 trace_sync_pt(pt);
Erik Gilling010accf2012-03-13 15:34:34 -0700594
Ajay Dudanidf53a2ca2012-08-21 17:57:19 -0700595 if (timeout > 0) {
Erik Gilling010accf2012-03-13 15:34:34 -0700596 timeout = msecs_to_jiffies(timeout);
597 err = wait_event_interruptible_timeout(fence->wq,
Erik Gilling7a476272012-10-11 12:35:22 -0700598 sync_fence_check(fence),
Erik Gilling010accf2012-03-13 15:34:34 -0700599 timeout);
Ajay Dudani131ed182012-08-21 18:43:21 -0700600 } else if (timeout < 0) {
Ajay Dudanif76d6d62012-10-15 17:58:46 -0700601 err = wait_event_interruptible(fence->wq,
602 sync_fence_check(fence));
Erik Gilling010accf2012-03-13 15:34:34 -0700603 }
Ajay Dudani8e0b3252012-10-16 16:14:48 -0700604 trace_sync_wait(fence, 0);
Erik Gilling010accf2012-03-13 15:34:34 -0700605
606 if (err < 0)
607 return err;
608
Ajay Dudaniea127652012-10-10 18:08:11 -0700609 if (fence->status < 0) {
610 pr_info("fence error %d on [%p]\n", fence->status, fence);
611 sync_dump();
Erik Gilling010accf2012-03-13 15:34:34 -0700612 return fence->status;
Ajay Dudaniea127652012-10-10 18:08:11 -0700613 }
Erik Gilling010accf2012-03-13 15:34:34 -0700614
Ajay Dudani442fff42012-08-24 13:48:57 -0700615 if (fence->status == 0) {
Ajay Dudani31802ec2012-09-04 15:29:09 -0700616 pr_info("fence timeout on [%p] after %dms\n", fence,
617 jiffies_to_msecs(timeout));
Ajay Dudani442fff42012-08-24 13:48:57 -0700618 sync_dump();
Erik Gilling010accf2012-03-13 15:34:34 -0700619 return -ETIME;
Ajay Dudani442fff42012-08-24 13:48:57 -0700620 }
Erik Gilling010accf2012-03-13 15:34:34 -0700621
622 return 0;
623}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700624EXPORT_SYMBOL(sync_fence_wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700625
Ajay Dudanic4af2662012-07-23 16:43:05 -0700626static void sync_fence_free(struct kref *kref)
627{
628 struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
629
630 sync_fence_free_pts(fence);
631
632 kfree(fence);
633}
634
Erik Gilling010accf2012-03-13 15:34:34 -0700635static int sync_fence_release(struct inode *inode, struct file *file)
636{
637 struct sync_fence *fence = file->private_data;
Erik Gilling981c8a92012-03-14 19:49:15 -0700638 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700639
Ajay Dudanic4af2662012-07-23 16:43:05 -0700640 /*
641 * We need to remove all ways to access this fence before droping
642 * our ref.
643 *
644 * start with its membership in the global fence list
645 */
Erik Gilling981c8a92012-03-14 19:49:15 -0700646 spin_lock_irqsave(&sync_fence_list_lock, flags);
647 list_del(&fence->sync_fence_list);
648 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
649
Ajay Dudanic4af2662012-07-23 16:43:05 -0700650 /*
651 * remove its pts from their parents so that sync_timeline_signal()
652 * can't reference the fence.
653 */
654 sync_fence_detach_pts(fence);
Ajay Dudani1ee76852012-07-11 17:07:39 -0700655
Ajay Dudanic4af2662012-07-23 16:43:05 -0700656 kref_put(&fence->kref, sync_fence_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700657
658 return 0;
659}
660
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700661static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
662{
663 struct sync_fence *fence = file->private_data;
664
665 poll_wait(file, &fence->wq, wait);
666
Erik Gilling7a476272012-10-11 12:35:22 -0700667 /*
668 * Make sure that reads to fence->status are ordered with the
669 * wait queue event triggering
670 */
671 smp_rmb();
672
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700673 if (fence->status == 1)
674 return POLLIN;
675 else if (fence->status < 0)
676 return POLLERR;
677 else
678 return 0;
679}
680
Erik Gilling010accf2012-03-13 15:34:34 -0700681static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
682{
Ajay Dudani93b10c92012-09-04 15:28:52 -0700683 __s32 value;
Erik Gilling010accf2012-03-13 15:34:34 -0700684
685 if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
686 return -EFAULT;
687
688 return sync_fence_wait(fence, value);
689}
690
691static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
692{
693 int fd = get_unused_fd();
694 int err;
695 struct sync_fence *fence2, *fence3;
696 struct sync_merge_data data;
697
Rebecca Schultz Zavin111ae132012-08-08 13:46:22 -0700698 if (fd < 0)
699 return fd;
700
701 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
702 err = -EFAULT;
703 goto err_put_fd;
704 }
Erik Gilling010accf2012-03-13 15:34:34 -0700705
706 fence2 = sync_fence_fdget(data.fd2);
707 if (fence2 == NULL) {
708 err = -ENOENT;
709 goto err_put_fd;
710 }
711
712 data.name[sizeof(data.name) - 1] = '\0';
713 fence3 = sync_fence_merge(data.name, fence, fence2);
714 if (fence3 == NULL) {
715 err = -ENOMEM;
716 goto err_put_fence2;
717 }
718
719 data.fence = fd;
720 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
721 err = -EFAULT;
722 goto err_put_fence3;
723 }
724
725 sync_fence_install(fence3, fd);
726 sync_fence_put(fence2);
727 return 0;
728
729err_put_fence3:
730 sync_fence_put(fence3);
731
732err_put_fence2:
733 sync_fence_put(fence2);
734
735err_put_fd:
736 put_unused_fd(fd);
737 return err;
738}
739
Erik Gilling3913bff2012-03-15 17:45:50 -0700740static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
741{
742 struct sync_pt_info *info = data;
743 int ret;
744
745 if (size < sizeof(struct sync_pt_info))
746 return -ENOMEM;
747
748 info->len = sizeof(struct sync_pt_info);
749
750 if (pt->parent->ops->fill_driver_data) {
751 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
752 size - sizeof(*info));
753 if (ret < 0)
754 return ret;
755
756 info->len += ret;
757 }
758
759 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
760 strlcpy(info->driver_name, pt->parent->ops->driver_name,
761 sizeof(info->driver_name));
762 info->status = pt->status;
763 info->timestamp_ns = ktime_to_ns(pt->timestamp);
764
765 return info->len;
766}
767
768static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
769 unsigned long arg)
770{
771 struct sync_fence_info_data *data;
772 struct list_head *pos;
773 __u32 size;
774 __u32 len = 0;
775 int ret;
776
777 if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
778 return -EFAULT;
779
780 if (size < sizeof(struct sync_fence_info_data))
781 return -EINVAL;
782
783 if (size > 4096)
784 size = 4096;
785
786 data = kzalloc(size, GFP_KERNEL);
787 if (data == NULL)
788 return -ENOMEM;
789
790 strlcpy(data->name, fence->name, sizeof(data->name));
791 data->status = fence->status;
792 len = sizeof(struct sync_fence_info_data);
793
794 list_for_each(pos, &fence->pt_list_head) {
795 struct sync_pt *pt =
796 container_of(pos, struct sync_pt, pt_list);
797
798 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
799
800 if (ret < 0)
801 goto out;
802
803 len += ret;
804 }
805
806 data->len = len;
807
808 if (copy_to_user((void __user *)arg, data, len))
809 ret = -EFAULT;
810 else
811 ret = 0;
812
813out:
814 kfree(data);
815
816 return ret;
817}
Erik Gilling010accf2012-03-13 15:34:34 -0700818
819static long sync_fence_ioctl(struct file *file, unsigned int cmd,
820 unsigned long arg)
821{
822 struct sync_fence *fence = file->private_data;
823 switch (cmd) {
824 case SYNC_IOC_WAIT:
825 return sync_fence_ioctl_wait(fence, arg);
826
827 case SYNC_IOC_MERGE:
828 return sync_fence_ioctl_merge(fence, arg);
Erik Gilling981c8a92012-03-14 19:49:15 -0700829
Erik Gilling3913bff2012-03-15 17:45:50 -0700830 case SYNC_IOC_FENCE_INFO:
831 return sync_fence_ioctl_fence_info(fence, arg);
832
Erik Gilling010accf2012-03-13 15:34:34 -0700833 default:
834 return -ENOTTY;
835 }
836}
837
Erik Gilling981c8a92012-03-14 19:49:15 -0700838#ifdef CONFIG_DEBUG_FS
839static const char *sync_status_str(int status)
840{
841 if (status > 0)
842 return "signaled";
843 else if (status == 0)
844 return "active";
845 else
846 return "error";
847}
848
849static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
850{
851 int status = pt->status;
852 seq_printf(s, " %s%spt %s",
853 fence ? pt->parent->name : "",
854 fence ? "_" : "",
855 sync_status_str(status));
856 if (pt->status) {
857 struct timeval tv = ktime_to_timeval(pt->timestamp);
858 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
859 }
860
Ajay Dudani41991aa2012-10-16 15:16:55 -0700861 if (pt->parent->ops->timeline_value_str &&
862 pt->parent->ops->pt_value_str) {
863 char value[64];
864 pt->parent->ops->pt_value_str(pt, value, sizeof(value));
865 seq_printf(s, ": %s", value);
866 if (fence) {
867 pt->parent->ops->timeline_value_str(pt->parent, value,
868 sizeof(value));
869 seq_printf(s, " / %s", value);
870 }
871 } else if (pt->parent->ops->print_pt) {
Erik Gilling981c8a92012-03-14 19:49:15 -0700872 seq_printf(s, ": ");
873 pt->parent->ops->print_pt(s, pt);
874 }
875
876 seq_printf(s, "\n");
877}
878
879static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
880{
881 struct list_head *pos;
882 unsigned long flags;
883
884 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
885
Ajay Dudani41991aa2012-10-16 15:16:55 -0700886 if (obj->ops->timeline_value_str) {
887 char value[64];
888 obj->ops->timeline_value_str(obj, value, sizeof(value));
889 seq_printf(s, ": %s", value);
890 } else if (obj->ops->print_obj) {
Erik Gilling981c8a92012-03-14 19:49:15 -0700891 seq_printf(s, ": ");
892 obj->ops->print_obj(s, obj);
893 }
894
895 seq_printf(s, "\n");
896
897 spin_lock_irqsave(&obj->child_list_lock, flags);
898 list_for_each(pos, &obj->child_list_head) {
899 struct sync_pt *pt =
900 container_of(pos, struct sync_pt, child_list);
901 sync_print_pt(s, pt, false);
902 }
903 spin_unlock_irqrestore(&obj->child_list_lock, flags);
904}
905
906static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
907{
908 struct list_head *pos;
909 unsigned long flags;
910
Ajay Dudani31802ec2012-09-04 15:29:09 -0700911 seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
912 sync_status_str(fence->status));
Erik Gilling981c8a92012-03-14 19:49:15 -0700913
914 list_for_each(pos, &fence->pt_list_head) {
915 struct sync_pt *pt =
916 container_of(pos, struct sync_pt, pt_list);
917 sync_print_pt(s, pt, true);
918 }
919
920 spin_lock_irqsave(&fence->waiter_list_lock, flags);
921 list_for_each(pos, &fence->waiter_list_head) {
922 struct sync_fence_waiter *waiter =
923 container_of(pos, struct sync_fence_waiter,
924 waiter_list);
925
Erik Gillingc80114f2012-05-15 16:23:26 -0700926 seq_printf(s, "waiter %pF\n", waiter->callback);
Erik Gilling981c8a92012-03-14 19:49:15 -0700927 }
928 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
929}
930
931static int sync_debugfs_show(struct seq_file *s, void *unused)
932{
933 unsigned long flags;
934 struct list_head *pos;
935
936 seq_printf(s, "objs:\n--------------\n");
937
938 spin_lock_irqsave(&sync_timeline_list_lock, flags);
939 list_for_each(pos, &sync_timeline_list_head) {
940 struct sync_timeline *obj =
941 container_of(pos, struct sync_timeline,
942 sync_timeline_list);
943
944 sync_print_obj(s, obj);
945 seq_printf(s, "\n");
946 }
947 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
948
949 seq_printf(s, "fences:\n--------------\n");
950
951 spin_lock_irqsave(&sync_fence_list_lock, flags);
952 list_for_each(pos, &sync_fence_list_head) {
953 struct sync_fence *fence =
954 container_of(pos, struct sync_fence, sync_fence_list);
955
956 sync_print_fence(s, fence);
957 seq_printf(s, "\n");
958 }
959 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
960 return 0;
961}
962
963static int sync_debugfs_open(struct inode *inode, struct file *file)
964{
965 return single_open(file, sync_debugfs_show, inode->i_private);
966}
967
968static const struct file_operations sync_debugfs_fops = {
969 .open = sync_debugfs_open,
970 .read = seq_read,
971 .llseek = seq_lseek,
972 .release = single_release,
973};
974
975static __init int sync_debugfs_init(void)
976{
977 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
978 return 0;
979}
Erik Gilling981c8a92012-03-14 19:49:15 -0700980late_initcall(sync_debugfs_init);
981
Ajay Dudani442fff42012-08-24 13:48:57 -0700982#define DUMP_CHUNK 256
983static char sync_dump_buf[64 * 1024];
984void sync_dump(void)
985{
986 struct seq_file s = {
987 .buf = sync_dump_buf,
988 .size = sizeof(sync_dump_buf) - 1,
989 };
990 int i;
991
992 sync_debugfs_show(&s, NULL);
993
994 for (i = 0; i < s.count; i += DUMP_CHUNK) {
995 if ((s.count - i) > DUMP_CHUNK) {
996 char c = s.buf[i + DUMP_CHUNK];
997 s.buf[i + DUMP_CHUNK] = 0;
998 pr_cont("%s", s.buf + i);
999 s.buf[i + DUMP_CHUNK] = c;
1000 } else {
1001 s.buf[s.count] = 0;
1002 pr_cont("%s", s.buf + i);
1003 }
1004 }
1005}
1006#else
1007static void sync_dump(void)
1008{
1009}
Erik Gilling981c8a92012-03-14 19:49:15 -07001010#endif