blob: a97a503f01320eeeb1a3db58e7474791a7c57a85 [file] [log] [blame]
Erik Gilling010accf2012-03-13 15:34:34 -07001/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
Erik Gilling981c8a92012-03-14 19:49:15 -070017#include <linux/debugfs.h>
Erik Gilling4fb837a2012-05-16 13:09:22 -070018#include <linux/export.h>
Erik Gilling010accf2012-03-13 15:34:34 -070019#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
Erik Gillinga1eeaca2012-03-19 17:28:32 -070022#include <linux/poll.h>
Erik Gilling010accf2012-03-13 15:34:34 -070023#include <linux/sched.h>
Erik Gilling981c8a92012-03-14 19:49:15 -070024#include <linux/seq_file.h>
Erik Gilling010accf2012-03-13 15:34:34 -070025#include <linux/slab.h>
26#include <linux/sync.h>
27#include <linux/uaccess.h>
28
29#include <linux/anon_inodes.h>
30
Ajay Dudani8e0b3252012-10-16 16:14:48 -070031#define CREATE_TRACE_POINTS
32#include <trace/events/sync.h>
33
Erik Gilling010accf2012-03-13 15:34:34 -070034static void sync_fence_signal_pt(struct sync_pt *pt);
35static int _sync_pt_has_signaled(struct sync_pt *pt);
Ajay Dudanic4af2662012-07-23 16:43:05 -070036static void sync_fence_free(struct kref *kref);
Ajay Dudani442fff42012-08-24 13:48:57 -070037static void sync_dump(void);
Erik Gilling010accf2012-03-13 15:34:34 -070038
Erik Gilling981c8a92012-03-14 19:49:15 -070039static LIST_HEAD(sync_timeline_list_head);
40static DEFINE_SPINLOCK(sync_timeline_list_lock);
41
42static LIST_HEAD(sync_fence_list_head);
43static DEFINE_SPINLOCK(sync_fence_list_lock);
44
Erik Gilling010accf2012-03-13 15:34:34 -070045struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
46 int size, const char *name)
47{
48 struct sync_timeline *obj;
Erik Gilling981c8a92012-03-14 19:49:15 -070049 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -070050
51 if (size < sizeof(struct sync_timeline))
52 return NULL;
53
54 obj = kzalloc(size, GFP_KERNEL);
55 if (obj == NULL)
56 return NULL;
57
Ajay Dudani741cdde2012-08-02 17:26:45 -070058 kref_init(&obj->kref);
Erik Gilling010accf2012-03-13 15:34:34 -070059 obj->ops = ops;
60 strlcpy(obj->name, name, sizeof(obj->name));
61
62 INIT_LIST_HEAD(&obj->child_list_head);
63 spin_lock_init(&obj->child_list_lock);
64
65 INIT_LIST_HEAD(&obj->active_list_head);
66 spin_lock_init(&obj->active_list_lock);
67
Erik Gilling981c8a92012-03-14 19:49:15 -070068 spin_lock_irqsave(&sync_timeline_list_lock, flags);
69 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
70 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
71
Erik Gilling010accf2012-03-13 15:34:34 -070072 return obj;
73}
Erik Gilling4fb837a2012-05-16 13:09:22 -070074EXPORT_SYMBOL(sync_timeline_create);
Erik Gilling010accf2012-03-13 15:34:34 -070075
Ajay Dudani741cdde2012-08-02 17:26:45 -070076static void sync_timeline_free(struct kref *kref)
Erik Gilling981c8a92012-03-14 19:49:15 -070077{
Ajay Dudani741cdde2012-08-02 17:26:45 -070078 struct sync_timeline *obj =
79 container_of(kref, struct sync_timeline, kref);
Erik Gilling981c8a92012-03-14 19:49:15 -070080 unsigned long flags;
81
82 if (obj->ops->release_obj)
83 obj->ops->release_obj(obj);
84
85 spin_lock_irqsave(&sync_timeline_list_lock, flags);
86 list_del(&obj->sync_timeline_list);
87 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
88
89 kfree(obj);
90}
91
Erik Gilling010accf2012-03-13 15:34:34 -070092void sync_timeline_destroy(struct sync_timeline *obj)
93{
Erik Gilling010accf2012-03-13 15:34:34 -070094 obj->destroyed = true;
Erik Gilling010accf2012-03-13 15:34:34 -070095
Ajay Dudani741cdde2012-08-02 17:26:45 -070096 /*
97 * If this is not the last reference, signal any children
98 * that their parent is going away.
99 */
100
101 if (!kref_put(&obj->kref, sync_timeline_free))
Erik Gilling010accf2012-03-13 15:34:34 -0700102 sync_timeline_signal(obj);
103}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700104EXPORT_SYMBOL(sync_timeline_destroy);
Erik Gilling010accf2012-03-13 15:34:34 -0700105
106static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
107{
108 unsigned long flags;
109
110 pt->parent = obj;
111
112 spin_lock_irqsave(&obj->child_list_lock, flags);
113 list_add_tail(&pt->child_list, &obj->child_list_head);
114 spin_unlock_irqrestore(&obj->child_list_lock, flags);
115}
116
117static void sync_timeline_remove_pt(struct sync_pt *pt)
118{
119 struct sync_timeline *obj = pt->parent;
120 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700121
122 spin_lock_irqsave(&obj->active_list_lock, flags);
123 if (!list_empty(&pt->active_list))
124 list_del_init(&pt->active_list);
125 spin_unlock_irqrestore(&obj->active_list_lock, flags);
126
127 spin_lock_irqsave(&obj->child_list_lock, flags);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700128 if (!list_empty(&pt->child_list)) {
129 list_del_init(&pt->child_list);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700130 }
Erik Gilling010accf2012-03-13 15:34:34 -0700131 spin_unlock_irqrestore(&obj->child_list_lock, flags);
Erik Gilling010accf2012-03-13 15:34:34 -0700132}
133
134void sync_timeline_signal(struct sync_timeline *obj)
135{
136 unsigned long flags;
137 LIST_HEAD(signaled_pts);
138 struct list_head *pos, *n;
139
Ajay Dudani8e0b3252012-10-16 16:14:48 -0700140 trace_sync_timeline(obj);
141
Erik Gilling010accf2012-03-13 15:34:34 -0700142 spin_lock_irqsave(&obj->active_list_lock, flags);
143
144 list_for_each_safe(pos, n, &obj->active_list_head) {
145 struct sync_pt *pt =
146 container_of(pos, struct sync_pt, active_list);
147
Ajay Dudanic4af2662012-07-23 16:43:05 -0700148 if (_sync_pt_has_signaled(pt)) {
149 list_del_init(pos);
150 list_add(&pt->signaled_list, &signaled_pts);
151 kref_get(&pt->fence->kref);
152 }
Erik Gilling010accf2012-03-13 15:34:34 -0700153 }
154
155 spin_unlock_irqrestore(&obj->active_list_lock, flags);
156
157 list_for_each_safe(pos, n, &signaled_pts) {
158 struct sync_pt *pt =
Ajay Dudanic4af2662012-07-23 16:43:05 -0700159 container_of(pos, struct sync_pt, signaled_list);
Erik Gilling010accf2012-03-13 15:34:34 -0700160
161 list_del_init(pos);
162 sync_fence_signal_pt(pt);
Ajay Dudanic4af2662012-07-23 16:43:05 -0700163 kref_put(&pt->fence->kref, sync_fence_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700164 }
165}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700166EXPORT_SYMBOL(sync_timeline_signal);
Erik Gilling010accf2012-03-13 15:34:34 -0700167
168struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
169{
170 struct sync_pt *pt;
171
172 if (size < sizeof(struct sync_pt))
173 return NULL;
174
175 pt = kzalloc(size, GFP_KERNEL);
176 if (pt == NULL)
177 return NULL;
178
179 INIT_LIST_HEAD(&pt->active_list);
Ajay Dudani741cdde2012-08-02 17:26:45 -0700180 kref_get(&parent->kref);
Erik Gilling010accf2012-03-13 15:34:34 -0700181 sync_timeline_add_pt(parent, pt);
182
183 return pt;
184}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700185EXPORT_SYMBOL(sync_pt_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700186
187void sync_pt_free(struct sync_pt *pt)
188{
189 if (pt->parent->ops->free_pt)
190 pt->parent->ops->free_pt(pt);
191
192 sync_timeline_remove_pt(pt);
193
Ajay Dudani741cdde2012-08-02 17:26:45 -0700194 kref_put(&pt->parent->kref, sync_timeline_free);
195
Erik Gilling010accf2012-03-13 15:34:34 -0700196 kfree(pt);
197}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700198EXPORT_SYMBOL(sync_pt_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700199
200/* call with pt->parent->active_list_lock held */
201static int _sync_pt_has_signaled(struct sync_pt *pt)
202{
Erik Gillingad433ba2012-03-15 14:59:33 -0700203 int old_status = pt->status;
204
Erik Gilling010accf2012-03-13 15:34:34 -0700205 if (!pt->status)
206 pt->status = pt->parent->ops->has_signaled(pt);
207
208 if (!pt->status && pt->parent->destroyed)
209 pt->status = -ENOENT;
210
Erik Gillingad433ba2012-03-15 14:59:33 -0700211 if (pt->status != old_status)
212 pt->timestamp = ktime_get();
213
Erik Gilling010accf2012-03-13 15:34:34 -0700214 return pt->status;
215}
216
217static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
218{
219 return pt->parent->ops->dup(pt);
220}
221
222/* Adds a sync pt to the active queue. Called when added to a fence */
223static void sync_pt_activate(struct sync_pt *pt)
224{
225 struct sync_timeline *obj = pt->parent;
226 unsigned long flags;
227 int err;
228
229 spin_lock_irqsave(&obj->active_list_lock, flags);
230
231 err = _sync_pt_has_signaled(pt);
Iliyan Malchev0f667e82012-10-19 12:27:48 -0700232 if (err != 0)
233 goto out;
Erik Gilling010accf2012-03-13 15:34:34 -0700234
235 list_add_tail(&pt->active_list, &obj->active_list_head);
Iliyan Malchev0f667e82012-10-19 12:27:48 -0700236
237out:
Erik Gilling010accf2012-03-13 15:34:34 -0700238 spin_unlock_irqrestore(&obj->active_list_lock, flags);
239}
240
241static int sync_fence_release(struct inode *inode, struct file *file);
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700242static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700243static long sync_fence_ioctl(struct file *file, unsigned int cmd,
244 unsigned long arg);
245
246
247static const struct file_operations sync_fence_fops = {
248 .release = sync_fence_release,
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700249 .poll = sync_fence_poll,
Erik Gilling010accf2012-03-13 15:34:34 -0700250 .unlocked_ioctl = sync_fence_ioctl,
251};
252
253static struct sync_fence *sync_fence_alloc(const char *name)
254{
255 struct sync_fence *fence;
Erik Gilling981c8a92012-03-14 19:49:15 -0700256 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700257
258 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
259 if (fence == NULL)
260 return NULL;
261
262 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
263 fence, 0);
264 if (fence->file == NULL)
265 goto err;
266
Ajay Dudanic4af2662012-07-23 16:43:05 -0700267 kref_init(&fence->kref);
Erik Gilling010accf2012-03-13 15:34:34 -0700268 strlcpy(fence->name, name, sizeof(fence->name));
269
270 INIT_LIST_HEAD(&fence->pt_list_head);
271 INIT_LIST_HEAD(&fence->waiter_list_head);
272 spin_lock_init(&fence->waiter_list_lock);
273
274 init_waitqueue_head(&fence->wq);
Erik Gilling981c8a92012-03-14 19:49:15 -0700275
276 spin_lock_irqsave(&sync_fence_list_lock, flags);
277 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
278 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
279
Erik Gilling010accf2012-03-13 15:34:34 -0700280 return fence;
281
282err:
283 kfree(fence);
284 return NULL;
285}
286
287/* TODO: implement a create which takes more that one sync_pt */
288struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
289{
290 struct sync_fence *fence;
291
292 if (pt->fence)
293 return NULL;
294
295 fence = sync_fence_alloc(name);
296 if (fence == NULL)
297 return NULL;
298
299 pt->fence = fence;
300 list_add(&pt->pt_list, &fence->pt_list_head);
301 sync_pt_activate(pt);
302
Ajay Dudanicc4ce562012-10-15 17:51:01 -0700303 /*
304 * signal the fence in case pt was activated before
305 * sync_pt_activate(pt) was called
306 */
307 sync_fence_signal_pt(pt);
308
Erik Gilling010accf2012-03-13 15:34:34 -0700309 return fence;
310}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700311EXPORT_SYMBOL(sync_fence_create);
Erik Gilling010accf2012-03-13 15:34:34 -0700312
313static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
314{
315 struct list_head *pos;
316
317 list_for_each(pos, &src->pt_list_head) {
318 struct sync_pt *orig_pt =
319 container_of(pos, struct sync_pt, pt_list);
320 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
321
322 if (new_pt == NULL)
323 return -ENOMEM;
324
325 new_pt->fence = dst;
326 list_add(&new_pt->pt_list, &dst->pt_list_head);
Erik Gilling010accf2012-03-13 15:34:34 -0700327 }
328
329 return 0;
330}
331
Ajay Dudani99343192012-07-11 17:13:50 -0700332static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
333{
334 struct list_head *src_pos, *dst_pos, *n;
335
336 list_for_each(src_pos, &src->pt_list_head) {
337 struct sync_pt *src_pt =
338 container_of(src_pos, struct sync_pt, pt_list);
339 bool collapsed = false;
340
341 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
342 struct sync_pt *dst_pt =
343 container_of(dst_pos, struct sync_pt, pt_list);
344 /* collapse two sync_pts on the same timeline
345 * to a single sync_pt that will signal at
346 * the later of the two
347 */
348 if (dst_pt->parent == src_pt->parent) {
349 if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
350 struct sync_pt *new_pt =
351 sync_pt_dup(src_pt);
352 if (new_pt == NULL)
353 return -ENOMEM;
354
355 new_pt->fence = dst;
356 list_replace(&dst_pt->pt_list,
357 &new_pt->pt_list);
Ajay Dudani99343192012-07-11 17:13:50 -0700358 sync_pt_free(dst_pt);
359 }
360 collapsed = true;
361 break;
362 }
363 }
364
365 if (!collapsed) {
366 struct sync_pt *new_pt = sync_pt_dup(src_pt);
367
368 if (new_pt == NULL)
369 return -ENOMEM;
370
371 new_pt->fence = dst;
372 list_add(&new_pt->pt_list, &dst->pt_list_head);
Ajay Dudani99343192012-07-11 17:13:50 -0700373 }
374 }
375
376 return 0;
377}
378
Ajay Dudanic4af2662012-07-23 16:43:05 -0700379static void sync_fence_detach_pts(struct sync_fence *fence)
380{
381 struct list_head *pos, *n;
382
383 list_for_each_safe(pos, n, &fence->pt_list_head) {
384 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
385 sync_timeline_remove_pt(pt);
386 }
387}
388
Erik Gilling010accf2012-03-13 15:34:34 -0700389static void sync_fence_free_pts(struct sync_fence *fence)
390{
391 struct list_head *pos, *n;
392
393 list_for_each_safe(pos, n, &fence->pt_list_head) {
394 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
395 sync_pt_free(pt);
396 }
397}
398
399struct sync_fence *sync_fence_fdget(int fd)
400{
401 struct file *file = fget(fd);
402
403 if (file == NULL)
404 return NULL;
405
406 if (file->f_op != &sync_fence_fops)
407 goto err;
408
409 return file->private_data;
410
411err:
412 fput(file);
413 return NULL;
414}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700415EXPORT_SYMBOL(sync_fence_fdget);
Erik Gilling010accf2012-03-13 15:34:34 -0700416
417void sync_fence_put(struct sync_fence *fence)
418{
419 fput(fence->file);
420}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700421EXPORT_SYMBOL(sync_fence_put);
Erik Gilling010accf2012-03-13 15:34:34 -0700422
423void sync_fence_install(struct sync_fence *fence, int fd)
424{
425 fd_install(fd, fence->file);
426}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700427EXPORT_SYMBOL(sync_fence_install);
Erik Gilling010accf2012-03-13 15:34:34 -0700428
429static int sync_fence_get_status(struct sync_fence *fence)
430{
431 struct list_head *pos;
432 int status = 1;
433
434 list_for_each(pos, &fence->pt_list_head) {
435 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
436 int pt_status = pt->status;
437
438 if (pt_status < 0) {
439 status = pt_status;
440 break;
441 } else if (status == 1) {
442 status = pt_status;
443 }
444 }
445
446 return status;
447}
448
449struct sync_fence *sync_fence_merge(const char *name,
450 struct sync_fence *a, struct sync_fence *b)
451{
452 struct sync_fence *fence;
Ørjan Eideb549e522012-12-05 16:38:08 +0100453 struct list_head *pos;
Erik Gilling010accf2012-03-13 15:34:34 -0700454 int err;
455
456 fence = sync_fence_alloc(name);
457 if (fence == NULL)
458 return NULL;
459
460 err = sync_fence_copy_pts(fence, a);
461 if (err < 0)
462 goto err;
463
Ajay Dudani99343192012-07-11 17:13:50 -0700464 err = sync_fence_merge_pts(fence, b);
Erik Gilling010accf2012-03-13 15:34:34 -0700465 if (err < 0)
466 goto err;
467
Ørjan Eideb549e522012-12-05 16:38:08 +0100468 list_for_each(pos, &fence->pt_list_head) {
469 struct sync_pt *pt =
470 container_of(pos, struct sync_pt, pt_list);
471 sync_pt_activate(pt);
472 }
473
Ajay Dudanicc4ce562012-10-15 17:51:01 -0700474 /*
475 * signal the fence in case one of it's pts were activated before
476 * they were activated
477 */
478 sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
479 struct sync_pt,
480 pt_list));
Erik Gilling010accf2012-03-13 15:34:34 -0700481
482 return fence;
483err:
484 sync_fence_free_pts(fence);
485 kfree(fence);
486 return NULL;
487}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700488EXPORT_SYMBOL(sync_fence_merge);
Erik Gilling010accf2012-03-13 15:34:34 -0700489
490static void sync_fence_signal_pt(struct sync_pt *pt)
491{
492 LIST_HEAD(signaled_waiters);
493 struct sync_fence *fence = pt->fence;
494 struct list_head *pos;
495 struct list_head *n;
496 unsigned long flags;
497 int status;
498
499 status = sync_fence_get_status(fence);
500
501 spin_lock_irqsave(&fence->waiter_list_lock, flags);
502 /*
503 * this should protect against two threads racing on the signaled
504 * false -> true transition
505 */
506 if (status && !fence->status) {
507 list_for_each_safe(pos, n, &fence->waiter_list_head)
508 list_move(pos, &signaled_waiters);
509
510 fence->status = status;
511 } else {
512 status = 0;
513 }
514 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
515
516 if (status) {
517 list_for_each_safe(pos, n, &signaled_waiters) {
518 struct sync_fence_waiter *waiter =
519 container_of(pos, struct sync_fence_waiter,
520 waiter_list);
521
Erik Gilling010accf2012-03-13 15:34:34 -0700522 list_del(pos);
Erik Gillingc80114f2012-05-15 16:23:26 -0700523 waiter->callback(fence, waiter);
Erik Gilling010accf2012-03-13 15:34:34 -0700524 }
525 wake_up(&fence->wq);
526 }
527}
528
529int sync_fence_wait_async(struct sync_fence *fence,
Erik Gillingc80114f2012-05-15 16:23:26 -0700530 struct sync_fence_waiter *waiter)
Erik Gilling010accf2012-03-13 15:34:34 -0700531{
Erik Gilling010accf2012-03-13 15:34:34 -0700532 unsigned long flags;
533 int err = 0;
534
Erik Gilling010accf2012-03-13 15:34:34 -0700535 spin_lock_irqsave(&fence->waiter_list_lock, flags);
536
537 if (fence->status) {
Erik Gilling010accf2012-03-13 15:34:34 -0700538 err = fence->status;
539 goto out;
540 }
541
542 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
543out:
544 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
545
546 return err;
547}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700548EXPORT_SYMBOL(sync_fence_wait_async);
Erik Gilling010accf2012-03-13 15:34:34 -0700549
Erik Gillingc80114f2012-05-15 16:23:26 -0700550int sync_fence_cancel_async(struct sync_fence *fence,
551 struct sync_fence_waiter *waiter)
552{
553 struct list_head *pos;
554 struct list_head *n;
555 unsigned long flags;
556 int ret = -ENOENT;
557
558 spin_lock_irqsave(&fence->waiter_list_lock, flags);
559 /*
560 * Make sure waiter is still in waiter_list because it is possible for
561 * the waiter to be removed from the list while the callback is still
562 * pending.
563 */
564 list_for_each_safe(pos, n, &fence->waiter_list_head) {
565 struct sync_fence_waiter *list_waiter =
566 container_of(pos, struct sync_fence_waiter,
567 waiter_list);
568 if (list_waiter == waiter) {
569 list_del(pos);
570 ret = 0;
571 break;
572 }
573 }
574 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
575 return ret;
576}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700577EXPORT_SYMBOL(sync_fence_cancel_async);
Erik Gillingc80114f2012-05-15 16:23:26 -0700578
Erik Gilling7a476272012-10-11 12:35:22 -0700579static bool sync_fence_check(struct sync_fence *fence)
580{
581 /*
582 * Make sure that reads to fence->status are ordered with the
583 * wait queue event triggering
584 */
585 smp_rmb();
586 return fence->status != 0;
587}
588
Erik Gilling010accf2012-03-13 15:34:34 -0700589int sync_fence_wait(struct sync_fence *fence, long timeout)
590{
Ajay Dudanidf53a2ca2012-08-21 17:57:19 -0700591 int err = 0;
Ajay Dudani8e0b3252012-10-16 16:14:48 -0700592 struct sync_pt *pt;
593
594 trace_sync_wait(fence, 1);
595 list_for_each_entry(pt, &fence->pt_list_head, pt_list)
596 trace_sync_pt(pt);
Erik Gilling010accf2012-03-13 15:34:34 -0700597
Ajay Dudanidf53a2ca2012-08-21 17:57:19 -0700598 if (timeout > 0) {
Erik Gilling010accf2012-03-13 15:34:34 -0700599 timeout = msecs_to_jiffies(timeout);
600 err = wait_event_interruptible_timeout(fence->wq,
Erik Gilling7a476272012-10-11 12:35:22 -0700601 sync_fence_check(fence),
Erik Gilling010accf2012-03-13 15:34:34 -0700602 timeout);
Ajay Dudani131ed182012-08-21 18:43:21 -0700603 } else if (timeout < 0) {
Ajay Dudanif76d6d62012-10-15 17:58:46 -0700604 err = wait_event_interruptible(fence->wq,
605 sync_fence_check(fence));
Erik Gilling010accf2012-03-13 15:34:34 -0700606 }
Ajay Dudani8e0b3252012-10-16 16:14:48 -0700607 trace_sync_wait(fence, 0);
Erik Gilling010accf2012-03-13 15:34:34 -0700608
609 if (err < 0)
610 return err;
611
Ajay Dudaniea127652012-10-10 18:08:11 -0700612 if (fence->status < 0) {
613 pr_info("fence error %d on [%p]\n", fence->status, fence);
614 sync_dump();
Erik Gilling010accf2012-03-13 15:34:34 -0700615 return fence->status;
Ajay Dudaniea127652012-10-10 18:08:11 -0700616 }
Erik Gilling010accf2012-03-13 15:34:34 -0700617
Ajay Dudani442fff42012-08-24 13:48:57 -0700618 if (fence->status == 0) {
Ajay Dudani31802ec2012-09-04 15:29:09 -0700619 pr_info("fence timeout on [%p] after %dms\n", fence,
620 jiffies_to_msecs(timeout));
Ajay Dudani442fff42012-08-24 13:48:57 -0700621 sync_dump();
Erik Gilling010accf2012-03-13 15:34:34 -0700622 return -ETIME;
Ajay Dudani442fff42012-08-24 13:48:57 -0700623 }
Erik Gilling010accf2012-03-13 15:34:34 -0700624
625 return 0;
626}
Erik Gilling4fb837a2012-05-16 13:09:22 -0700627EXPORT_SYMBOL(sync_fence_wait);
Erik Gilling010accf2012-03-13 15:34:34 -0700628
Ajay Dudanic4af2662012-07-23 16:43:05 -0700629static void sync_fence_free(struct kref *kref)
630{
631 struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
632
633 sync_fence_free_pts(fence);
634
635 kfree(fence);
636}
637
Erik Gilling010accf2012-03-13 15:34:34 -0700638static int sync_fence_release(struct inode *inode, struct file *file)
639{
640 struct sync_fence *fence = file->private_data;
Erik Gilling981c8a92012-03-14 19:49:15 -0700641 unsigned long flags;
Erik Gilling010accf2012-03-13 15:34:34 -0700642
Ajay Dudanic4af2662012-07-23 16:43:05 -0700643 /*
644 * We need to remove all ways to access this fence before droping
645 * our ref.
646 *
647 * start with its membership in the global fence list
648 */
Erik Gilling981c8a92012-03-14 19:49:15 -0700649 spin_lock_irqsave(&sync_fence_list_lock, flags);
650 list_del(&fence->sync_fence_list);
651 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
652
Ajay Dudanic4af2662012-07-23 16:43:05 -0700653 /*
654 * remove its pts from their parents so that sync_timeline_signal()
655 * can't reference the fence.
656 */
657 sync_fence_detach_pts(fence);
Ajay Dudani1ee76852012-07-11 17:07:39 -0700658
Ajay Dudanic4af2662012-07-23 16:43:05 -0700659 kref_put(&fence->kref, sync_fence_free);
Erik Gilling010accf2012-03-13 15:34:34 -0700660
661 return 0;
662}
663
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700664static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
665{
666 struct sync_fence *fence = file->private_data;
667
668 poll_wait(file, &fence->wq, wait);
669
Erik Gilling7a476272012-10-11 12:35:22 -0700670 /*
671 * Make sure that reads to fence->status are ordered with the
672 * wait queue event triggering
673 */
674 smp_rmb();
675
Erik Gillinga1eeaca2012-03-19 17:28:32 -0700676 if (fence->status == 1)
677 return POLLIN;
678 else if (fence->status < 0)
679 return POLLERR;
680 else
681 return 0;
682}
683
Erik Gilling010accf2012-03-13 15:34:34 -0700684static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
685{
Ajay Dudani93b10c92012-09-04 15:28:52 -0700686 __s32 value;
Erik Gilling010accf2012-03-13 15:34:34 -0700687
688 if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
689 return -EFAULT;
690
691 return sync_fence_wait(fence, value);
692}
693
694static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
695{
696 int fd = get_unused_fd();
697 int err;
698 struct sync_fence *fence2, *fence3;
699 struct sync_merge_data data;
700
Rebecca Schultz Zavin111ae132012-08-08 13:46:22 -0700701 if (fd < 0)
702 return fd;
703
704 if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
705 err = -EFAULT;
706 goto err_put_fd;
707 }
Erik Gilling010accf2012-03-13 15:34:34 -0700708
709 fence2 = sync_fence_fdget(data.fd2);
710 if (fence2 == NULL) {
711 err = -ENOENT;
712 goto err_put_fd;
713 }
714
715 data.name[sizeof(data.name) - 1] = '\0';
716 fence3 = sync_fence_merge(data.name, fence, fence2);
717 if (fence3 == NULL) {
718 err = -ENOMEM;
719 goto err_put_fence2;
720 }
721
722 data.fence = fd;
723 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
724 err = -EFAULT;
725 goto err_put_fence3;
726 }
727
728 sync_fence_install(fence3, fd);
729 sync_fence_put(fence2);
730 return 0;
731
732err_put_fence3:
733 sync_fence_put(fence3);
734
735err_put_fence2:
736 sync_fence_put(fence2);
737
738err_put_fd:
739 put_unused_fd(fd);
740 return err;
741}
742
Erik Gilling3913bff2012-03-15 17:45:50 -0700743static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
744{
745 struct sync_pt_info *info = data;
746 int ret;
747
748 if (size < sizeof(struct sync_pt_info))
749 return -ENOMEM;
750
751 info->len = sizeof(struct sync_pt_info);
752
753 if (pt->parent->ops->fill_driver_data) {
754 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
755 size - sizeof(*info));
756 if (ret < 0)
757 return ret;
758
759 info->len += ret;
760 }
761
762 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
763 strlcpy(info->driver_name, pt->parent->ops->driver_name,
764 sizeof(info->driver_name));
765 info->status = pt->status;
766 info->timestamp_ns = ktime_to_ns(pt->timestamp);
767
768 return info->len;
769}
770
771static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
772 unsigned long arg)
773{
774 struct sync_fence_info_data *data;
775 struct list_head *pos;
776 __u32 size;
777 __u32 len = 0;
778 int ret;
779
780 if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
781 return -EFAULT;
782
783 if (size < sizeof(struct sync_fence_info_data))
784 return -EINVAL;
785
786 if (size > 4096)
787 size = 4096;
788
789 data = kzalloc(size, GFP_KERNEL);
790 if (data == NULL)
791 return -ENOMEM;
792
793 strlcpy(data->name, fence->name, sizeof(data->name));
794 data->status = fence->status;
795 len = sizeof(struct sync_fence_info_data);
796
797 list_for_each(pos, &fence->pt_list_head) {
798 struct sync_pt *pt =
799 container_of(pos, struct sync_pt, pt_list);
800
801 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
802
803 if (ret < 0)
804 goto out;
805
806 len += ret;
807 }
808
809 data->len = len;
810
811 if (copy_to_user((void __user *)arg, data, len))
812 ret = -EFAULT;
813 else
814 ret = 0;
815
816out:
817 kfree(data);
818
819 return ret;
820}
Erik Gilling010accf2012-03-13 15:34:34 -0700821
822static long sync_fence_ioctl(struct file *file, unsigned int cmd,
823 unsigned long arg)
824{
825 struct sync_fence *fence = file->private_data;
826 switch (cmd) {
827 case SYNC_IOC_WAIT:
828 return sync_fence_ioctl_wait(fence, arg);
829
830 case SYNC_IOC_MERGE:
831 return sync_fence_ioctl_merge(fence, arg);
Erik Gilling981c8a92012-03-14 19:49:15 -0700832
Erik Gilling3913bff2012-03-15 17:45:50 -0700833 case SYNC_IOC_FENCE_INFO:
834 return sync_fence_ioctl_fence_info(fence, arg);
835
Erik Gilling010accf2012-03-13 15:34:34 -0700836 default:
837 return -ENOTTY;
838 }
839}
840
Erik Gilling981c8a92012-03-14 19:49:15 -0700841#ifdef CONFIG_DEBUG_FS
842static const char *sync_status_str(int status)
843{
844 if (status > 0)
845 return "signaled";
846 else if (status == 0)
847 return "active";
848 else
849 return "error";
850}
851
852static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
853{
854 int status = pt->status;
855 seq_printf(s, " %s%spt %s",
856 fence ? pt->parent->name : "",
857 fence ? "_" : "",
858 sync_status_str(status));
859 if (pt->status) {
860 struct timeval tv = ktime_to_timeval(pt->timestamp);
861 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
862 }
863
Ajay Dudani41991aa2012-10-16 15:16:55 -0700864 if (pt->parent->ops->timeline_value_str &&
865 pt->parent->ops->pt_value_str) {
866 char value[64];
867 pt->parent->ops->pt_value_str(pt, value, sizeof(value));
868 seq_printf(s, ": %s", value);
869 if (fence) {
870 pt->parent->ops->timeline_value_str(pt->parent, value,
871 sizeof(value));
872 seq_printf(s, " / %s", value);
873 }
874 } else if (pt->parent->ops->print_pt) {
Erik Gilling981c8a92012-03-14 19:49:15 -0700875 seq_printf(s, ": ");
876 pt->parent->ops->print_pt(s, pt);
877 }
878
879 seq_printf(s, "\n");
880}
881
882static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
883{
884 struct list_head *pos;
885 unsigned long flags;
886
887 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
888
Ajay Dudani41991aa2012-10-16 15:16:55 -0700889 if (obj->ops->timeline_value_str) {
890 char value[64];
891 obj->ops->timeline_value_str(obj, value, sizeof(value));
892 seq_printf(s, ": %s", value);
893 } else if (obj->ops->print_obj) {
Erik Gilling981c8a92012-03-14 19:49:15 -0700894 seq_printf(s, ": ");
895 obj->ops->print_obj(s, obj);
896 }
897
898 seq_printf(s, "\n");
899
900 spin_lock_irqsave(&obj->child_list_lock, flags);
901 list_for_each(pos, &obj->child_list_head) {
902 struct sync_pt *pt =
903 container_of(pos, struct sync_pt, child_list);
904 sync_print_pt(s, pt, false);
905 }
906 spin_unlock_irqrestore(&obj->child_list_lock, flags);
907}
908
909static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
910{
911 struct list_head *pos;
912 unsigned long flags;
913
Ajay Dudani31802ec2012-09-04 15:29:09 -0700914 seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
915 sync_status_str(fence->status));
Erik Gilling981c8a92012-03-14 19:49:15 -0700916
917 list_for_each(pos, &fence->pt_list_head) {
918 struct sync_pt *pt =
919 container_of(pos, struct sync_pt, pt_list);
920 sync_print_pt(s, pt, true);
921 }
922
923 spin_lock_irqsave(&fence->waiter_list_lock, flags);
924 list_for_each(pos, &fence->waiter_list_head) {
925 struct sync_fence_waiter *waiter =
926 container_of(pos, struct sync_fence_waiter,
927 waiter_list);
928
Erik Gillingc80114f2012-05-15 16:23:26 -0700929 seq_printf(s, "waiter %pF\n", waiter->callback);
Erik Gilling981c8a92012-03-14 19:49:15 -0700930 }
931 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
932}
933
934static int sync_debugfs_show(struct seq_file *s, void *unused)
935{
936 unsigned long flags;
937 struct list_head *pos;
938
939 seq_printf(s, "objs:\n--------------\n");
940
941 spin_lock_irqsave(&sync_timeline_list_lock, flags);
942 list_for_each(pos, &sync_timeline_list_head) {
943 struct sync_timeline *obj =
944 container_of(pos, struct sync_timeline,
945 sync_timeline_list);
946
947 sync_print_obj(s, obj);
948 seq_printf(s, "\n");
949 }
950 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
951
952 seq_printf(s, "fences:\n--------------\n");
953
954 spin_lock_irqsave(&sync_fence_list_lock, flags);
955 list_for_each(pos, &sync_fence_list_head) {
956 struct sync_fence *fence =
957 container_of(pos, struct sync_fence, sync_fence_list);
958
959 sync_print_fence(s, fence);
960 seq_printf(s, "\n");
961 }
962 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
963 return 0;
964}
965
966static int sync_debugfs_open(struct inode *inode, struct file *file)
967{
968 return single_open(file, sync_debugfs_show, inode->i_private);
969}
970
971static const struct file_operations sync_debugfs_fops = {
972 .open = sync_debugfs_open,
973 .read = seq_read,
974 .llseek = seq_lseek,
975 .release = single_release,
976};
977
978static __init int sync_debugfs_init(void)
979{
980 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
981 return 0;
982}
Erik Gilling981c8a92012-03-14 19:49:15 -0700983late_initcall(sync_debugfs_init);
984
Ajay Dudani442fff42012-08-24 13:48:57 -0700985#define DUMP_CHUNK 256
986static char sync_dump_buf[64 * 1024];
987void sync_dump(void)
988{
989 struct seq_file s = {
990 .buf = sync_dump_buf,
991 .size = sizeof(sync_dump_buf) - 1,
992 };
993 int i;
994
995 sync_debugfs_show(&s, NULL);
996
997 for (i = 0; i < s.count; i += DUMP_CHUNK) {
998 if ((s.count - i) > DUMP_CHUNK) {
999 char c = s.buf[i + DUMP_CHUNK];
1000 s.buf[i + DUMP_CHUNK] = 0;
1001 pr_cont("%s", s.buf + i);
1002 s.buf[i + DUMP_CHUNK] = c;
1003 } else {
1004 s.buf[s.count] = 0;
1005 pr_cont("%s", s.buf + i);
1006 }
1007 }
1008}
1009#else
1010static void sync_dump(void)
1011{
1012}
Erik Gilling981c8a92012-03-14 19:49:15 -07001013#endif