Copied caf 2.5.1 video/gpu genlock and rotator [WIP]
diff --git a/drivers/base/genlock.c b/drivers/base/genlock.c
index 653e1fe..8b7259a 100644
--- a/drivers/base/genlock.c
+++ b/drivers/base/genlock.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -23,6 +23,9 @@
 #include <linux/miscdevice.h>
 #include <linux/genlock.h>
 
+/* Lock states - can either be unlocked, held as an exclusive write lock or a
+ * shared read lock
+ */
 
 #define _UNLOCKED 0
 #define _RDLOCK  GENLOCK_RDLOCK
@@ -31,27 +34,35 @@
 #define GENLOCK_LOG_ERR(fmt, args...) \
 pr_err("genlock: %s: " fmt, __func__, ##args)
 
+/* The genlock magic stored in the kernel private data is used to protect
+ * against the possibility of user space passing a valid fd to a
+ * non-genlock file for genlock_attach_lock()
+ */
 #define GENLOCK_MAGIC_OK  0xD2EAD10C
 #define GENLOCK_MAGIC_BAD 0xD2EADBAD
 
 struct genlock {
-	unsigned int magic;       
-	struct list_head active;  
-	spinlock_t lock;          
-	wait_queue_head_t queue;  
-	struct file *file;        
-	int state;                
+	unsigned int magic;       /* Magic for attach verification */
+	struct list_head active;  /* List of handles holding lock */
+	spinlock_t lock;          /* Spinlock to protect the lock internals */
+	wait_queue_head_t queue;  /* Holding pen for processes pending lock */
+	struct file *file;        /* File structure for exported lock */
+	int state;                /* Current state of the lock */
 	struct kref refcount;
 };
 
 struct genlock_handle {
-	struct genlock *lock;     
-	struct list_head entry;   
-	struct file *file;        
-	int active;		  
-	struct genlock_info info;
+	struct genlock *lock;     /* Lock currently attached to the handle */
+	struct list_head entry;   /* List node for attaching to a lock */
+	struct file *file;        /* File structure associated with handle */
+	int active;		  /* Number of times the active lock has been
+				     taken */
 };
 
+/*
+ * Create a spinlock to protect against a race condition when a lock gets
+ * released while another process tries to attach it
+ */
 
 static DEFINE_SPINLOCK(genlock_ref_lock);
 
@@ -60,6 +71,10 @@
 	struct genlock *lock = container_of(kref, struct genlock,
 			refcount);
 
+	/*
+	 * Clear the private data for the file descriptor in case the fd is
+	 * still active after the lock gets released
+	 */
 
 	if (lock->file)
 		lock->file->private_data = NULL;
@@ -68,10 +83,18 @@
 	kfree(lock);
 }
 
+/*
+ * Release the genlock object. Called when all the references to
+ * the genlock file descriptor are released
+ */
 
 static int genlock_release(struct inode *inodep, struct file *file)
 {
 	struct genlock *lock = file->private_data;
+	/*
+	 * Clear the refrence back to this file structure to avoid
+	 * somehow reusing the lock after the file has been destroyed
+	 */
 
 	if (lock)
 		lock->file = NULL;
@@ -83,10 +106,17 @@
 	.release = genlock_release,
 };
 
+/**
+ * genlock_create_lock - Create a new lock
+ * @handle - genlock handle to attach the lock to
+ *
+ * Returns: a pointer to the genlock
+ */
 
 struct genlock *genlock_create_lock(struct genlock_handle *handle)
 {
 	struct genlock *lock;
+	void *ret;
 
 	if (IS_ERR_OR_NULL(handle)) {
 		GENLOCK_LOG_ERR("Invalid handle\n");
@@ -111,11 +141,20 @@
 	lock->magic = GENLOCK_MAGIC_OK;
 	lock->state = _UNLOCKED;
 
+	/*
+	 * Create an anonyonmous inode for the object that can exported to
+	 * other processes
+	 */
 
-	lock->file = anon_inode_getfile("genlock", &genlock_fops,
-		lock, O_RDWR);
+	ret = anon_inode_getfile("genlock", &genlock_fops, lock, O_RDWR);
+	if (IS_ERR_OR_NULL(ret)) {
+		GENLOCK_LOG_ERR("Unable to create lock inode\n");
+		kfree(lock);
+		return ret;
+	}
+	lock->file = ret;
 
-	
+	/* Attach the new lock to the handle */
 	handle->lock = lock;
 	kref_init(&lock->refcount);
 
@@ -123,6 +162,10 @@
 }
 EXPORT_SYMBOL(genlock_create_lock);
 
+/*
+ * Get a file descriptor reference to a lock suitable for sharing with
+ * other processes
+ */
 
 static int genlock_get_fd(struct genlock *lock)
 {
@@ -140,6 +183,13 @@
 	return ret;
 }
 
+/**
+ * genlock_attach_lock - Attach an existing lock to a handle
+ * @handle - Pointer to a genlock handle to attach the lock to
+ * @fd - file descriptor for the exported lock
+ *
+ * Returns: A pointer to the attached lock structure
+ */
 
 struct genlock *genlock_attach_lock(struct genlock_handle *handle, int fd)
 {
@@ -162,6 +212,10 @@
 		return ERR_PTR(-EBADF);
 	}
 
+	/*
+	 * take a spinlock to avoid a race condition if the lock is
+	 * released and then attached
+	 */
 
 	spin_lock(&genlock_ref_lock);
 	lock = file->private_data;
@@ -190,6 +244,7 @@
 }
 EXPORT_SYMBOL(genlock_attach_lock);
 
+/* Helper function that returns 1 if the specified handle holds the lock */
 
 static int handle_has_lock(struct genlock *lock, struct genlock_handle *handle)
 {
@@ -203,17 +258,19 @@
 	return 0;
 }
 
+/* If the lock just became available, signal the next entity waiting for it */
 
 static void _genlock_signal(struct genlock *lock)
 {
 	if (list_empty(&lock->active)) {
-		
+		/* If the list is empty, then the lock is free */
 		lock->state = _UNLOCKED;
-		
+		/* Wake up the first process sitting in the queue */
 		wake_up(&lock->queue);
 	}
 }
 
+/* Attempt to release the handle's ownership of the lock */
 
 static int _genlock_unlock(struct genlock *lock, struct genlock_handle *handle)
 {
@@ -227,11 +284,13 @@
 		goto done;
 	}
 
-	
+	/* Make sure this handle is an owner of the lock */
 	if (!handle_has_lock(lock, handle)) {
 		GENLOCK_LOG_ERR("handle does not have lock attached to it\n");
 		goto done;
 	}
+	/* If the handle holds no more references to the lock then
+	   release it (maybe) */
 
 	if (--handle->active == 0) {
 		list_del(&handle->entry);
@@ -245,6 +304,7 @@
 	return ret;
 }
 
+/* Attempt to acquire the lock for the handle */
 
 static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle,
 	int op, int flags, uint32_t timeout)
@@ -255,23 +315,42 @@
 
 	spin_lock_irqsave(&lock->lock, irqflags);
 
+	/* Sanity check - no blocking locks in a debug context. Even if it
+	 * succeed to not block, the mere idea is too dangerous to continue
+	 */
 
 	if (in_interrupt() && !(flags & GENLOCK_NOBLOCK))
 		BUG();
 
-	
+	/* Fast path - the lock is unlocked, so go do the needful */
 
 	if (lock->state == _UNLOCKED)
 		goto dolock;
 
 	if (handle_has_lock(lock, handle)) {
 
+		/*
+		 * If the handle already holds the lock and the lock type is
+		 * a read lock then just increment the active pointer. This
+		 * allows the handle to do recursive read locks. Recursive
+		 * write locks are not allowed in order to support
+		 * synchronization within a process using a single gralloc
+		 * handle.
+		 */
 
 		if (lock->state == _RDLOCK && op == _RDLOCK) {
 			handle->active++;
 			goto done;
 		}
 
+		/*
+		 * If the handle holds a write lock then the owner can switch
+		 * to a read lock if they want. Do the transition atomically
+		 * then wake up any pending waiters in case they want a read
+		 * lock too. In order to support synchronization within a
+		 * process the caller must explicity request to convert the
+		 * lock type with the GENLOCK_WRITE_TO_READ flag.
+		 */
 
 		if (flags & GENLOCK_WRITE_TO_READ) {
 			if (lock->state == _WRLOCK && op == _RDLOCK) {
@@ -287,6 +366,10 @@
 		}
 	} else {
 
+		/*
+		 * Check to ensure the caller has not attempted to convert a
+		 * write to a read without holding the lock.
+		 */
 
 		if (flags & GENLOCK_WRITE_TO_READ) {
 			GENLOCK_LOG_ERR("Handle must have lock to convert"
@@ -295,17 +378,32 @@
 			goto done;
 		}
 
+		/*
+		 * If we request a read and the lock is held by a read, then go
+		 * ahead and share the lock
+		 */
 
 		if (op == GENLOCK_RDLOCK && lock->state == _RDLOCK)
 			goto dolock;
 	}
 
+	/* Treat timeout 0 just like a NOBLOCK flag and return if the
+	   lock cannot be aquired without blocking */
 
 	if (flags & GENLOCK_NOBLOCK || timeout == 0) {
 		ret = -EAGAIN;
 		goto done;
 	}
 
+	/*
+	 * Wait while the lock remains in an incompatible state
+	 * state    op    wait
+	 * -------------------
+	 * unlocked n/a   no
+	 * read     read  no
+	 * read     write yes
+	 * write    n/a   yes
+	 */
 
 	while ((lock->state == _RDLOCK && op == _WRLOCK) ||
 			lock->state == _WRLOCK) {
@@ -321,15 +419,6 @@
 		spin_lock_irqsave(&lock->lock, irqflags);
 
 		if (elapsed <= 0) {
-			if(list_empty(&lock->active)) {
-				pr_info("[genlock] lock failed, but list_empty\n");
-			} else {
-				struct genlock_handle *h;
-				pr_info("[genlock] lock failed %d, the follows hold lock %d\n", op, lock->state);
-				list_for_each_entry(h, &lock->active, entry) {
-					printk("[genlock] handle %p pid %d\n", h, h->info.pid);
-				}
-			}
 			ret = (elapsed < 0) ? elapsed : -ETIMEDOUT;
 			goto done;
 		}
@@ -338,7 +427,7 @@
 	}
 
 dolock:
-	
+	/* We can now get the lock, add ourselves to the list of owners */
 
 	list_add_tail(&handle->entry, &lock->active);
 	lock->state = op;
@@ -350,6 +439,15 @@
 
 }
 
+/**
+ * genlock_lock - Acquire or release a lock (depreciated)
+ * @handle - pointer to the genlock handle that is requesting the lock
+ * @op - the operation to perform (RDLOCK, WRLOCK, UNLOCK)
+ * @flags - flags to control the operation
+ * @timeout - optional timeout to wait for the lock to come free
+ *
+ * Returns: 0 on success or error code on failure
+ */
 
 int genlock_lock(struct genlock_handle *handle, int op, int flags,
 	uint32_t timeout)
@@ -378,11 +476,11 @@
 	case GENLOCK_RDLOCK:
 		spin_lock_irqsave(&lock->lock, irqflags);
 		if (handle_has_lock(lock, handle)) {
-			
+			/* request the WRITE_TO_READ flag for compatibility */
 			flags |= GENLOCK_WRITE_TO_READ;
 		}
 		spin_unlock_irqrestore(&lock->lock, irqflags);
-		
+		/* fall through to take lock */
 	case GENLOCK_WRLOCK:
 		ret = _genlock_lock(lock, handle, op, flags, timeout);
 		break;
@@ -396,6 +494,15 @@
 }
 EXPORT_SYMBOL(genlock_lock);
 
+/**
+ * genlock_dreadlock - Acquire or release a lock
+ * @handle - pointer to the genlock handle that is requesting the lock
+ * @op - the operation to perform (RDLOCK, WRLOCK, UNLOCK)
+ * @flags - flags to control the operation
+ * @timeout - optional timeout to wait for the lock to come free
+ *
+ * Returns: 0 on success or error code on failure
+ */
 
 int genlock_dreadlock(struct genlock_handle *handle, int op, int flags,
 	uint32_t timeout)
@@ -434,6 +541,11 @@
 }
 EXPORT_SYMBOL(genlock_dreadlock);
 
+/**
+ * genlock_wait - Wait for the lock to be released
+ * @handle - pointer to the genlock handle that is waiting for the lock
+ * @timeout - optional timeout to wait for the lock to get released
+ */
 
 int genlock_wait(struct genlock_handle *handle, uint32_t timeout)
 {
@@ -456,6 +568,10 @@
 
 	spin_lock_irqsave(&lock->lock, irqflags);
 
+	/*
+	 * if timeout is 0 and the lock is already unlocked, then success
+	 * otherwise return -EAGAIN
+	 */
 
 	if (timeout == 0) {
 		ret = (lock->state == _UNLOCKED) ? 0 : -EAGAIN;
@@ -494,7 +610,7 @@
 
 	spin_lock_irqsave(&handle->lock->lock, flags);
 
-	
+	/* If the handle is holding the lock, then force it closed */
 
 	if (handle_has_lock(handle->lock, handle)) {
 		list_del(&handle->entry);
@@ -509,6 +625,9 @@
 	handle->active = 0;
 }
 
+/*
+ * Release function called when all references to a handle are released
+ */
 
 static int genlock_handle_release(struct inode *inodep, struct file *file)
 {
@@ -524,6 +643,9 @@
 	.release = genlock_handle_release
 };
 
+/*
+ * Allocate a new genlock handle
+ */
 
 static struct genlock_handle *_genlock_get_handle(void)
 {
@@ -536,20 +658,36 @@
 	return handle;
 }
 
+/**
+ * genlock_get_handle - Create a new genlock handle
+ *
+ * Returns: A pointer to a new genlock handle
+ */
 
 struct genlock_handle *genlock_get_handle(void)
 {
+	void *ret;
 	struct genlock_handle *handle = _genlock_get_handle();
 	if (IS_ERR(handle))
 		return handle;
 
-	handle->file = anon_inode_getfile("genlock-handle",
+	ret = anon_inode_getfile("genlock-handle",
 		&genlock_handle_fops, handle, O_RDWR);
+	if (IS_ERR_OR_NULL(ret)) {
+		GENLOCK_LOG_ERR("Unable to create handle inode\n");
+		kfree(handle);
+		return ret;
+	}
+	handle->file = ret;
 
 	return handle;
 }
 EXPORT_SYMBOL(genlock_get_handle);
 
+/**
+ * genlock_put_handle - release a reference to a genlock handle
+ * @handle - A pointer to the handle to release
+ */
 
 void genlock_put_handle(struct genlock_handle *handle)
 {
@@ -558,6 +696,10 @@
 }
 EXPORT_SYMBOL(genlock_put_handle);
 
+/**
+ * genlock_get_handle_fd - Get a handle reference from a file descriptor
+ * @fd - The file descriptor for a genlock handle
+ */
 
 struct genlock_handle *genlock_get_handle_fd(int fd)
 {
@@ -645,16 +787,14 @@
 		return genlock_wait(handle, param.timeout);
 	}
 	case GENLOCK_IOC_RELEASE: {
+		/*
+		 * Return error - this ioctl has been deprecated.
+		 * Locks should only be released when the handle is
+		 * destroyed
+		 */
 		GENLOCK_LOG_ERR("Deprecated RELEASE ioctl called\n");
 		return -EINVAL;
 	}
-	case GENLOCK_IOC_SETINFO: {
-		if (copy_from_user(&(handle->info), (void __user *) arg, sizeof(handle->info))) {
-			GENLOCK_LOG_ERR("invalid param size");
-			return -EINVAL;
-		}
-		return 0;
-	}
 	default:
 		GENLOCK_LOG_ERR("Invalid ioctl\n");
 		return -EINVAL;
@@ -665,16 +805,6 @@
 {
 	struct genlock_handle *handle = file->private_data;
 
-	if (handle->info.fd) {
-		char task_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
-		get_task_comm(task_name, current);
-
-		pr_warn("[GENLOCK] Unexpected! genlock fd was closed before detached. "
-		    "owner={fd=%d, pid:%d, res=[0x%x, 0x%x]} current thread = %d (%s)\n",
-			handle->info.fd, handle->info.pid,
-			handle->info.rsvd[0], handle->info.rsvd[1], current->pid, task_name);
-	}
-
 	genlock_release_lock(handle);
 	kfree(handle);
 
diff --git a/drivers/base/sw_sync.c b/drivers/base/sw_sync.c
new file mode 100644
index 0000000..14d3f39
--- /dev/null
+++ b/drivers/base/sw_sync.c
@@ -0,0 +1,257 @@
+/*
+ * drivers/base/sw_sync.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/sw_sync.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+
+static int sw_sync_cmp(u32 a, u32 b)
+{
+	if (a == b)
+		return 0;
+
+	return ((s32)a - (s32)b) < 0 ? -1 : 1;
+}
+
+struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value)
+{
+	struct sw_sync_pt *pt;
+
+	pt = (struct sw_sync_pt *)
+		sync_pt_create(&obj->obj, sizeof(struct sw_sync_pt));
+
+	pt->value = value;
+
+	return (struct sync_pt *)pt;
+}
+EXPORT_SYMBOL(sw_sync_pt_create);
+
+static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt)
+{
+	struct sw_sync_pt *pt = (struct sw_sync_pt *) sync_pt;
+	struct sw_sync_timeline *obj =
+		(struct sw_sync_timeline *)sync_pt->parent;
+
+	return (struct sync_pt *) sw_sync_pt_create(obj, pt->value);
+}
+
+static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt)
+{
+	struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+	struct sw_sync_timeline *obj =
+		(struct sw_sync_timeline *)sync_pt->parent;
+
+	return sw_sync_cmp(obj->value, pt->value) >= 0;
+}
+
+static int sw_sync_pt_compare(struct sync_pt *a, struct sync_pt *b)
+{
+	struct sw_sync_pt *pt_a = (struct sw_sync_pt *)a;
+	struct sw_sync_pt *pt_b = (struct sw_sync_pt *)b;
+
+	return sw_sync_cmp(pt_a->value, pt_b->value);
+}
+
+static int sw_sync_fill_driver_data(struct sync_pt *sync_pt,
+				    void *data, int size)
+{
+	struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+
+	if (size < sizeof(pt->value))
+		return -ENOMEM;
+
+	memcpy(data, &pt->value, sizeof(pt->value));
+
+	return sizeof(pt->value);
+}
+
+static void sw_sync_timeline_value_str(struct sync_timeline *sync_timeline,
+				       char *str, int size)
+{
+	struct sw_sync_timeline *timeline =
+		(struct sw_sync_timeline *)sync_timeline;
+	snprintf(str, size, "%d", timeline->value);
+}
+
+static void sw_sync_pt_value_str(struct sync_pt *sync_pt,
+				       char *str, int size)
+{
+	struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+	snprintf(str, size, "%d", pt->value);
+}
+
+struct sync_timeline_ops sw_sync_timeline_ops = {
+	.driver_name = "sw_sync",
+	.dup = sw_sync_pt_dup,
+	.has_signaled = sw_sync_pt_has_signaled,
+	.compare = sw_sync_pt_compare,
+	.fill_driver_data = sw_sync_fill_driver_data,
+	.timeline_value_str = sw_sync_timeline_value_str,
+	.pt_value_str = sw_sync_pt_value_str,
+};
+
+
+struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
+{
+	struct sw_sync_timeline *obj = (struct sw_sync_timeline *)
+		sync_timeline_create(&sw_sync_timeline_ops,
+				     sizeof(struct sw_sync_timeline),
+				     name);
+
+	return obj;
+}
+EXPORT_SYMBOL(sw_sync_timeline_create);
+
+void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
+{
+	obj->value += inc;
+
+	sync_timeline_signal(&obj->obj);
+}
+EXPORT_SYMBOL(sw_sync_timeline_inc);
+
+#ifdef CONFIG_SW_SYNC_USER
+/* *WARNING*
+ *
+ * improper use of this can result in deadlocking kernel drivers from userspace.
+ */
+
+/* opening sw_sync create a new sync obj */
+int sw_sync_open(struct inode *inode, struct file *file)
+{
+	struct sw_sync_timeline *obj;
+	char task_comm[TASK_COMM_LEN];
+
+	get_task_comm(task_comm, current);
+
+	obj = sw_sync_timeline_create(task_comm);
+	if (obj == NULL)
+		return -ENOMEM;
+
+	file->private_data = obj;
+
+	return 0;
+}
+
+int sw_sync_release(struct inode *inode, struct file *file)
+{
+	struct sw_sync_timeline *obj = file->private_data;
+	sync_timeline_destroy(&obj->obj);
+	return 0;
+}
+
+long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj, unsigned long arg)
+{
+	int fd = get_unused_fd();
+	int err;
+	struct sync_pt *pt;
+	struct sync_fence *fence;
+	struct sw_sync_create_fence_data data;
+
+	if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+		return -EFAULT;
+
+	pt = sw_sync_pt_create(obj, data.value);
+	if (pt == NULL) {
+		err = -ENOMEM;
+		goto err;
+	}
+
+	data.name[sizeof(data.name) - 1] = '\0';
+	fence = sync_fence_create(data.name, pt);
+	if (fence == NULL) {
+		sync_pt_free(pt);
+		err = -ENOMEM;
+		goto err;
+	}
+
+	data.fence = fd;
+	if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+		sync_fence_put(fence);
+		err = -EFAULT;
+		goto err;
+	}
+
+	sync_fence_install(fence, fd);
+
+	return 0;
+
+err:
+	put_unused_fd(fd);
+	return err;
+}
+
+long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg)
+{
+	u32 value;
+
+	if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
+		return -EFAULT;
+
+	sw_sync_timeline_inc(obj, value);
+
+	return 0;
+}
+
+long sw_sync_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct sw_sync_timeline *obj = file->private_data;
+
+	switch (cmd) {
+	case SW_SYNC_IOC_CREATE_FENCE:
+		return sw_sync_ioctl_create_fence(obj, arg);
+
+	case SW_SYNC_IOC_INC:
+		return sw_sync_ioctl_inc(obj, arg);
+
+	default:
+		return -ENOTTY;
+	}
+}
+
+static const struct file_operations sw_sync_fops = {
+	.owner = THIS_MODULE,
+	.open = sw_sync_open,
+	.release = sw_sync_release,
+	.unlocked_ioctl = sw_sync_ioctl,
+};
+
+static struct miscdevice sw_sync_dev = {
+	.minor	= MISC_DYNAMIC_MINOR,
+	.name	= "sw_sync",
+	.fops	= &sw_sync_fops,
+};
+
+int __init sw_sync_device_init(void)
+{
+	return misc_register(&sw_sync_dev);
+}
+
+void __exit sw_sync_device_remove(void)
+{
+	misc_deregister(&sw_sync_dev);
+}
+
+module_init(sw_sync_device_init);
+module_exit(sw_sync_device_remove);
+
+#endif /* CONFIG_SW_SYNC_USER */
diff --git a/drivers/base/sync.c b/drivers/base/sync.c
new file mode 100644
index 0000000..a97a503
--- /dev/null
+++ b/drivers/base/sync.c
@@ -0,0 +1,1013 @@
+/*
+ * drivers/base/sync.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/sync.h>
+#include <linux/uaccess.h>
+
+#include <linux/anon_inodes.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/sync.h>
+
+static void sync_fence_signal_pt(struct sync_pt *pt);
+static int _sync_pt_has_signaled(struct sync_pt *pt);
+static void sync_fence_free(struct kref *kref);
+static void sync_dump(void);
+
+static LIST_HEAD(sync_timeline_list_head);
+static DEFINE_SPINLOCK(sync_timeline_list_lock);
+
+static LIST_HEAD(sync_fence_list_head);
+static DEFINE_SPINLOCK(sync_fence_list_lock);
+
+struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
+					   int size, const char *name)
+{
+	struct sync_timeline *obj;
+	unsigned long flags;
+
+	if (size < sizeof(struct sync_timeline))
+		return NULL;
+
+	obj = kzalloc(size, GFP_KERNEL);
+	if (obj == NULL)
+		return NULL;
+
+	kref_init(&obj->kref);
+	obj->ops = ops;
+	strlcpy(obj->name, name, sizeof(obj->name));
+
+	INIT_LIST_HEAD(&obj->child_list_head);
+	spin_lock_init(&obj->child_list_lock);
+
+	INIT_LIST_HEAD(&obj->active_list_head);
+	spin_lock_init(&obj->active_list_lock);
+
+	spin_lock_irqsave(&sync_timeline_list_lock, flags);
+	list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
+	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+
+	return obj;
+}
+EXPORT_SYMBOL(sync_timeline_create);
+
+static void sync_timeline_free(struct kref *kref)
+{
+	struct sync_timeline *obj =
+		container_of(kref, struct sync_timeline, kref);
+	unsigned long flags;
+
+	if (obj->ops->release_obj)
+		obj->ops->release_obj(obj);
+
+	spin_lock_irqsave(&sync_timeline_list_lock, flags);
+	list_del(&obj->sync_timeline_list);
+	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+
+	kfree(obj);
+}
+
+void sync_timeline_destroy(struct sync_timeline *obj)
+{
+	obj->destroyed = true;
+
+	/*
+	 * If this is not the last reference, signal any children
+	 * that their parent is going away.
+	 */
+
+	if (!kref_put(&obj->kref, sync_timeline_free))
+		sync_timeline_signal(obj);
+}
+EXPORT_SYMBOL(sync_timeline_destroy);
+
+static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
+{
+	unsigned long flags;
+
+	pt->parent = obj;
+
+	spin_lock_irqsave(&obj->child_list_lock, flags);
+	list_add_tail(&pt->child_list, &obj->child_list_head);
+	spin_unlock_irqrestore(&obj->child_list_lock, flags);
+}
+
+static void sync_timeline_remove_pt(struct sync_pt *pt)
+{
+	struct sync_timeline *obj = pt->parent;
+	unsigned long flags;
+
+	spin_lock_irqsave(&obj->active_list_lock, flags);
+	if (!list_empty(&pt->active_list))
+		list_del_init(&pt->active_list);
+	spin_unlock_irqrestore(&obj->active_list_lock, flags);
+
+	spin_lock_irqsave(&obj->child_list_lock, flags);
+	if (!list_empty(&pt->child_list)) {
+		list_del_init(&pt->child_list);
+	}
+	spin_unlock_irqrestore(&obj->child_list_lock, flags);
+}
+
+void sync_timeline_signal(struct sync_timeline *obj)
+{
+	unsigned long flags;
+	LIST_HEAD(signaled_pts);
+	struct list_head *pos, *n;
+
+	trace_sync_timeline(obj);
+
+	spin_lock_irqsave(&obj->active_list_lock, flags);
+
+	list_for_each_safe(pos, n, &obj->active_list_head) {
+		struct sync_pt *pt =
+			container_of(pos, struct sync_pt, active_list);
+
+		if (_sync_pt_has_signaled(pt)) {
+			list_del_init(pos);
+			list_add(&pt->signaled_list, &signaled_pts);
+			kref_get(&pt->fence->kref);
+		}
+	}
+
+	spin_unlock_irqrestore(&obj->active_list_lock, flags);
+
+	list_for_each_safe(pos, n, &signaled_pts) {
+		struct sync_pt *pt =
+			container_of(pos, struct sync_pt, signaled_list);
+
+		list_del_init(pos);
+		sync_fence_signal_pt(pt);
+		kref_put(&pt->fence->kref, sync_fence_free);
+	}
+}
+EXPORT_SYMBOL(sync_timeline_signal);
+
+struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
+{
+	struct sync_pt *pt;
+
+	if (size < sizeof(struct sync_pt))
+		return NULL;
+
+	pt = kzalloc(size, GFP_KERNEL);
+	if (pt == NULL)
+		return NULL;
+
+	INIT_LIST_HEAD(&pt->active_list);
+	kref_get(&parent->kref);
+	sync_timeline_add_pt(parent, pt);
+
+	return pt;
+}
+EXPORT_SYMBOL(sync_pt_create);
+
+void sync_pt_free(struct sync_pt *pt)
+{
+	if (pt->parent->ops->free_pt)
+		pt->parent->ops->free_pt(pt);
+
+	sync_timeline_remove_pt(pt);
+
+	kref_put(&pt->parent->kref, sync_timeline_free);
+
+	kfree(pt);
+}
+EXPORT_SYMBOL(sync_pt_free);
+
+/* call with pt->parent->active_list_lock held */
+static int _sync_pt_has_signaled(struct sync_pt *pt)
+{
+	int old_status = pt->status;
+
+	if (!pt->status)
+		pt->status = pt->parent->ops->has_signaled(pt);
+
+	if (!pt->status && pt->parent->destroyed)
+		pt->status = -ENOENT;
+
+	if (pt->status != old_status)
+		pt->timestamp = ktime_get();
+
+	return pt->status;
+}
+
+static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
+{
+	return pt->parent->ops->dup(pt);
+}
+
+/* Adds a sync pt to the active queue.  Called when added to a fence */
+static void sync_pt_activate(struct sync_pt *pt)
+{
+	struct sync_timeline *obj = pt->parent;
+	unsigned long flags;
+	int err;
+
+	spin_lock_irqsave(&obj->active_list_lock, flags);
+
+	err = _sync_pt_has_signaled(pt);
+	if (err != 0)
+		goto out;
+
+	list_add_tail(&pt->active_list, &obj->active_list_head);
+
+out:
+	spin_unlock_irqrestore(&obj->active_list_lock, flags);
+}
+
+static int sync_fence_release(struct inode *inode, struct file *file);
+static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
+static long sync_fence_ioctl(struct file *file, unsigned int cmd,
+			     unsigned long arg);
+
+
+static const struct file_operations sync_fence_fops = {
+	.release = sync_fence_release,
+	.poll = sync_fence_poll,
+	.unlocked_ioctl = sync_fence_ioctl,
+};
+
+static struct sync_fence *sync_fence_alloc(const char *name)
+{
+	struct sync_fence *fence;
+	unsigned long flags;
+
+	fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
+	if (fence == NULL)
+		return NULL;
+
+	fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
+					 fence, 0);
+	if (fence->file == NULL)
+		goto err;
+
+	kref_init(&fence->kref);
+	strlcpy(fence->name, name, sizeof(fence->name));
+
+	INIT_LIST_HEAD(&fence->pt_list_head);
+	INIT_LIST_HEAD(&fence->waiter_list_head);
+	spin_lock_init(&fence->waiter_list_lock);
+
+	init_waitqueue_head(&fence->wq);
+
+	spin_lock_irqsave(&sync_fence_list_lock, flags);
+	list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
+	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+
+	return fence;
+
+err:
+	kfree(fence);
+	return NULL;
+}
+
+/* TODO: implement a create which takes more that one sync_pt */
+struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
+{
+	struct sync_fence *fence;
+
+	if (pt->fence)
+		return NULL;
+
+	fence = sync_fence_alloc(name);
+	if (fence == NULL)
+		return NULL;
+
+	pt->fence = fence;
+	list_add(&pt->pt_list, &fence->pt_list_head);
+	sync_pt_activate(pt);
+
+	/*
+	 * signal the fence in case pt was activated before
+	 * sync_pt_activate(pt) was called
+	 */
+	sync_fence_signal_pt(pt);
+
+	return fence;
+}
+EXPORT_SYMBOL(sync_fence_create);
+
+static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
+{
+	struct list_head *pos;
+
+	list_for_each(pos, &src->pt_list_head) {
+		struct sync_pt *orig_pt =
+			container_of(pos, struct sync_pt, pt_list);
+		struct sync_pt *new_pt = sync_pt_dup(orig_pt);
+
+		if (new_pt == NULL)
+			return -ENOMEM;
+
+		new_pt->fence = dst;
+		list_add(&new_pt->pt_list, &dst->pt_list_head);
+	}
+
+	return 0;
+}
+
+static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
+{
+	struct list_head *src_pos, *dst_pos, *n;
+
+	list_for_each(src_pos, &src->pt_list_head) {
+		struct sync_pt *src_pt =
+			container_of(src_pos, struct sync_pt, pt_list);
+		bool collapsed = false;
+
+		list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
+			struct sync_pt *dst_pt =
+				container_of(dst_pos, struct sync_pt, pt_list);
+			/* collapse two sync_pts on the same timeline
+			 * to a single sync_pt that will signal at
+			 * the later of the two
+			 */
+			if (dst_pt->parent == src_pt->parent) {
+				if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
+					struct sync_pt *new_pt =
+						sync_pt_dup(src_pt);
+					if (new_pt == NULL)
+						return -ENOMEM;
+
+					new_pt->fence = dst;
+					list_replace(&dst_pt->pt_list,
+						     &new_pt->pt_list);
+					sync_pt_free(dst_pt);
+				}
+				collapsed = true;
+				break;
+			}
+		}
+
+		if (!collapsed) {
+			struct sync_pt *new_pt = sync_pt_dup(src_pt);
+
+			if (new_pt == NULL)
+				return -ENOMEM;
+
+			new_pt->fence = dst;
+			list_add(&new_pt->pt_list, &dst->pt_list_head);
+		}
+	}
+
+	return 0;
+}
+
+static void sync_fence_detach_pts(struct sync_fence *fence)
+{
+	struct list_head *pos, *n;
+
+	list_for_each_safe(pos, n, &fence->pt_list_head) {
+		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
+		sync_timeline_remove_pt(pt);
+	}
+}
+
+static void sync_fence_free_pts(struct sync_fence *fence)
+{
+	struct list_head *pos, *n;
+
+	list_for_each_safe(pos, n, &fence->pt_list_head) {
+		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
+		sync_pt_free(pt);
+	}
+}
+
+struct sync_fence *sync_fence_fdget(int fd)
+{
+	struct file *file = fget(fd);
+
+	if (file == NULL)
+		return NULL;
+
+	if (file->f_op != &sync_fence_fops)
+		goto err;
+
+	return file->private_data;
+
+err:
+	fput(file);
+	return NULL;
+}
+EXPORT_SYMBOL(sync_fence_fdget);
+
+void sync_fence_put(struct sync_fence *fence)
+{
+	fput(fence->file);
+}
+EXPORT_SYMBOL(sync_fence_put);
+
+void sync_fence_install(struct sync_fence *fence, int fd)
+{
+	fd_install(fd, fence->file);
+}
+EXPORT_SYMBOL(sync_fence_install);
+
+static int sync_fence_get_status(struct sync_fence *fence)
+{
+	struct list_head *pos;
+	int status = 1;
+
+	list_for_each(pos, &fence->pt_list_head) {
+		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
+		int pt_status = pt->status;
+
+		if (pt_status < 0) {
+			status = pt_status;
+			break;
+		} else if (status == 1) {
+			status = pt_status;
+		}
+	}
+
+	return status;
+}
+
+struct sync_fence *sync_fence_merge(const char *name,
+				    struct sync_fence *a, struct sync_fence *b)
+{
+	struct sync_fence *fence;
+	struct list_head *pos;
+	int err;
+
+	fence = sync_fence_alloc(name);
+	if (fence == NULL)
+		return NULL;
+
+	err = sync_fence_copy_pts(fence, a);
+	if (err < 0)
+		goto err;
+
+	err = sync_fence_merge_pts(fence, b);
+	if (err < 0)
+		goto err;
+
+	list_for_each(pos, &fence->pt_list_head) {
+		struct sync_pt *pt =
+			container_of(pos, struct sync_pt, pt_list);
+		sync_pt_activate(pt);
+	}
+
+	/*
+	 * signal the fence in case one of it's pts were activated before
+	 * they were activated
+	 */
+	sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
+					      struct sync_pt,
+					      pt_list));
+
+	return fence;
+err:
+	sync_fence_free_pts(fence);
+	kfree(fence);
+	return NULL;
+}
+EXPORT_SYMBOL(sync_fence_merge);
+
+static void sync_fence_signal_pt(struct sync_pt *pt)
+{
+	LIST_HEAD(signaled_waiters);
+	struct sync_fence *fence = pt->fence;
+	struct list_head *pos;
+	struct list_head *n;
+	unsigned long flags;
+	int status;
+
+	status = sync_fence_get_status(fence);
+
+	spin_lock_irqsave(&fence->waiter_list_lock, flags);
+	/*
+	 * this should protect against two threads racing on the signaled
+	 * false -> true transition
+	 */
+	if (status && !fence->status) {
+		list_for_each_safe(pos, n, &fence->waiter_list_head)
+			list_move(pos, &signaled_waiters);
+
+		fence->status = status;
+	} else {
+		status = 0;
+	}
+	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
+
+	if (status) {
+		list_for_each_safe(pos, n, &signaled_waiters) {
+			struct sync_fence_waiter *waiter =
+				container_of(pos, struct sync_fence_waiter,
+					     waiter_list);
+
+			list_del(pos);
+			waiter->callback(fence, waiter);
+		}
+		wake_up(&fence->wq);
+	}
+}
+
+int sync_fence_wait_async(struct sync_fence *fence,
+			  struct sync_fence_waiter *waiter)
+{
+	unsigned long flags;
+	int err = 0;
+
+	spin_lock_irqsave(&fence->waiter_list_lock, flags);
+
+	if (fence->status) {
+		err = fence->status;
+		goto out;
+	}
+
+	list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
+out:
+	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
+
+	return err;
+}
+EXPORT_SYMBOL(sync_fence_wait_async);
+
+int sync_fence_cancel_async(struct sync_fence *fence,
+			     struct sync_fence_waiter *waiter)
+{
+	struct list_head *pos;
+	struct list_head *n;
+	unsigned long flags;
+	int ret = -ENOENT;
+
+	spin_lock_irqsave(&fence->waiter_list_lock, flags);
+	/*
+	 * Make sure waiter is still in waiter_list because it is possible for
+	 * the waiter to be removed from the list while the callback is still
+	 * pending.
+	 */
+	list_for_each_safe(pos, n, &fence->waiter_list_head) {
+		struct sync_fence_waiter *list_waiter =
+			container_of(pos, struct sync_fence_waiter,
+				     waiter_list);
+		if (list_waiter == waiter) {
+			list_del(pos);
+			ret = 0;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
+	return ret;
+}
+EXPORT_SYMBOL(sync_fence_cancel_async);
+
+static bool sync_fence_check(struct sync_fence *fence)
+{
+	/*
+	 * Make sure that reads to fence->status are ordered with the
+	 * wait queue event triggering
+	 */
+	smp_rmb();
+	return fence->status != 0;
+}
+
+int sync_fence_wait(struct sync_fence *fence, long timeout)
+{
+	int err = 0;
+	struct sync_pt *pt;
+
+	trace_sync_wait(fence, 1);
+	list_for_each_entry(pt, &fence->pt_list_head, pt_list)
+		trace_sync_pt(pt);
+
+	if (timeout > 0) {
+		timeout = msecs_to_jiffies(timeout);
+		err = wait_event_interruptible_timeout(fence->wq,
+						       sync_fence_check(fence),
+						       timeout);
+	} else if (timeout < 0) {
+		err = wait_event_interruptible(fence->wq,
+					       sync_fence_check(fence));
+	}
+	trace_sync_wait(fence, 0);
+
+	if (err < 0)
+		return err;
+
+	if (fence->status < 0) {
+		pr_info("fence error %d on [%p]\n", fence->status, fence);
+		sync_dump();
+		return fence->status;
+	}
+
+	if (fence->status == 0) {
+		pr_info("fence timeout on [%p] after %dms\n", fence,
+			jiffies_to_msecs(timeout));
+		sync_dump();
+		return -ETIME;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(sync_fence_wait);
+
+static void sync_fence_free(struct kref *kref)
+{
+	struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
+
+	sync_fence_free_pts(fence);
+
+	kfree(fence);
+}
+
+static int sync_fence_release(struct inode *inode, struct file *file)
+{
+	struct sync_fence *fence = file->private_data;
+	unsigned long flags;
+
+	/*
+	 * We need to remove all ways to access this fence before droping
+	 * our ref.
+	 *
+	 * start with its membership in the global fence list
+	 */
+	spin_lock_irqsave(&sync_fence_list_lock, flags);
+	list_del(&fence->sync_fence_list);
+	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+
+	/*
+	 * remove its pts from their parents so that sync_timeline_signal()
+	 * can't reference the fence.
+	 */
+	sync_fence_detach_pts(fence);
+
+	kref_put(&fence->kref, sync_fence_free);
+
+	return 0;
+}
+
+static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
+{
+	struct sync_fence *fence = file->private_data;
+
+	poll_wait(file, &fence->wq, wait);
+
+	/*
+	 * Make sure that reads to fence->status are ordered with the
+	 * wait queue event triggering
+	 */
+	smp_rmb();
+
+	if (fence->status == 1)
+		return POLLIN;
+	else if (fence->status < 0)
+		return POLLERR;
+	else
+		return 0;
+}
+
+static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
+{
+	__s32 value;
+
+	if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
+		return -EFAULT;
+
+	return sync_fence_wait(fence, value);
+}
+
+static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
+{
+	int fd = get_unused_fd();
+	int err;
+	struct sync_fence *fence2, *fence3;
+	struct sync_merge_data data;
+
+	if (fd < 0)
+		return fd;
+
+	if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+		err = -EFAULT;
+		goto err_put_fd;
+	}
+
+	fence2 = sync_fence_fdget(data.fd2);
+	if (fence2 == NULL) {
+		err = -ENOENT;
+		goto err_put_fd;
+	}
+
+	data.name[sizeof(data.name) - 1] = '\0';
+	fence3 = sync_fence_merge(data.name, fence, fence2);
+	if (fence3 == NULL) {
+		err = -ENOMEM;
+		goto err_put_fence2;
+	}
+
+	data.fence = fd;
+	if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+		err = -EFAULT;
+		goto err_put_fence3;
+	}
+
+	sync_fence_install(fence3, fd);
+	sync_fence_put(fence2);
+	return 0;
+
+err_put_fence3:
+	sync_fence_put(fence3);
+
+err_put_fence2:
+	sync_fence_put(fence2);
+
+err_put_fd:
+	put_unused_fd(fd);
+	return err;
+}
+
+static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
+{
+	struct sync_pt_info *info = data;
+	int ret;
+
+	if (size < sizeof(struct sync_pt_info))
+		return -ENOMEM;
+
+	info->len = sizeof(struct sync_pt_info);
+
+	if (pt->parent->ops->fill_driver_data) {
+		ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
+							size - sizeof(*info));
+		if (ret < 0)
+			return ret;
+
+		info->len += ret;
+	}
+
+	strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
+	strlcpy(info->driver_name, pt->parent->ops->driver_name,
+		sizeof(info->driver_name));
+	info->status = pt->status;
+	info->timestamp_ns = ktime_to_ns(pt->timestamp);
+
+	return info->len;
+}
+
+static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
+					unsigned long arg)
+{
+	struct sync_fence_info_data *data;
+	struct list_head *pos;
+	__u32 size;
+	__u32 len = 0;
+	int ret;
+
+	if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
+		return -EFAULT;
+
+	if (size < sizeof(struct sync_fence_info_data))
+		return -EINVAL;
+
+	if (size > 4096)
+		size = 4096;
+
+	data = kzalloc(size, GFP_KERNEL);
+	if (data == NULL)
+		return -ENOMEM;
+
+	strlcpy(data->name, fence->name, sizeof(data->name));
+	data->status = fence->status;
+	len = sizeof(struct sync_fence_info_data);
+
+	list_for_each(pos, &fence->pt_list_head) {
+		struct sync_pt *pt =
+			container_of(pos, struct sync_pt, pt_list);
+
+		ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
+
+		if (ret < 0)
+			goto out;
+
+		len += ret;
+	}
+
+	data->len = len;
+
+	if (copy_to_user((void __user *)arg, data, len))
+		ret = -EFAULT;
+	else
+		ret = 0;
+
+out:
+	kfree(data);
+
+	return ret;
+}
+
+static long sync_fence_ioctl(struct file *file, unsigned int cmd,
+			     unsigned long arg)
+{
+	struct sync_fence *fence = file->private_data;
+	switch (cmd) {
+	case SYNC_IOC_WAIT:
+		return sync_fence_ioctl_wait(fence, arg);
+
+	case SYNC_IOC_MERGE:
+		return sync_fence_ioctl_merge(fence, arg);
+
+	case SYNC_IOC_FENCE_INFO:
+		return sync_fence_ioctl_fence_info(fence, arg);
+
+	default:
+		return -ENOTTY;
+	}
+}
+
+#ifdef CONFIG_DEBUG_FS
+static const char *sync_status_str(int status)
+{
+	if (status > 0)
+		return "signaled";
+	else if (status == 0)
+		return "active";
+	else
+		return "error";
+}
+
+static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
+{
+	int status = pt->status;
+	seq_printf(s, "  %s%spt %s",
+		   fence ? pt->parent->name : "",
+		   fence ? "_" : "",
+		   sync_status_str(status));
+	if (pt->status) {
+		struct timeval tv = ktime_to_timeval(pt->timestamp);
+		seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
+	}
+
+	if (pt->parent->ops->timeline_value_str &&
+	    pt->parent->ops->pt_value_str) {
+		char value[64];
+		pt->parent->ops->pt_value_str(pt, value, sizeof(value));
+		seq_printf(s, ": %s", value);
+		if (fence) {
+			pt->parent->ops->timeline_value_str(pt->parent, value,
+						    sizeof(value));
+			seq_printf(s, " / %s", value);
+		}
+	} else if (pt->parent->ops->print_pt) {
+		seq_printf(s, ": ");
+		pt->parent->ops->print_pt(s, pt);
+	}
+
+	seq_printf(s, "\n");
+}
+
+static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
+{
+	struct list_head *pos;
+	unsigned long flags;
+
+	seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
+
+	if (obj->ops->timeline_value_str) {
+		char value[64];
+		obj->ops->timeline_value_str(obj, value, sizeof(value));
+		seq_printf(s, ": %s", value);
+	} else if (obj->ops->print_obj) {
+		seq_printf(s, ": ");
+		obj->ops->print_obj(s, obj);
+	}
+
+	seq_printf(s, "\n");
+
+	spin_lock_irqsave(&obj->child_list_lock, flags);
+	list_for_each(pos, &obj->child_list_head) {
+		struct sync_pt *pt =
+			container_of(pos, struct sync_pt, child_list);
+		sync_print_pt(s, pt, false);
+	}
+	spin_unlock_irqrestore(&obj->child_list_lock, flags);
+}
+
+static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
+{
+	struct list_head *pos;
+	unsigned long flags;
+
+	seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
+		   sync_status_str(fence->status));
+
+	list_for_each(pos, &fence->pt_list_head) {
+		struct sync_pt *pt =
+			container_of(pos, struct sync_pt, pt_list);
+		sync_print_pt(s, pt, true);
+	}
+
+	spin_lock_irqsave(&fence->waiter_list_lock, flags);
+	list_for_each(pos, &fence->waiter_list_head) {
+		struct sync_fence_waiter *waiter =
+			container_of(pos, struct sync_fence_waiter,
+				     waiter_list);
+
+		seq_printf(s, "waiter %pF\n", waiter->callback);
+	}
+	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
+}
+
+static int sync_debugfs_show(struct seq_file *s, void *unused)
+{
+	unsigned long flags;
+	struct list_head *pos;
+
+	seq_printf(s, "objs:\n--------------\n");
+
+	spin_lock_irqsave(&sync_timeline_list_lock, flags);
+	list_for_each(pos, &sync_timeline_list_head) {
+		struct sync_timeline *obj =
+			container_of(pos, struct sync_timeline,
+				     sync_timeline_list);
+
+		sync_print_obj(s, obj);
+		seq_printf(s, "\n");
+	}
+	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+
+	seq_printf(s, "fences:\n--------------\n");
+
+	spin_lock_irqsave(&sync_fence_list_lock, flags);
+	list_for_each(pos, &sync_fence_list_head) {
+		struct sync_fence *fence =
+			container_of(pos, struct sync_fence, sync_fence_list);
+
+		sync_print_fence(s, fence);
+		seq_printf(s, "\n");
+	}
+	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
+	return 0;
+}
+
+static int sync_debugfs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, sync_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations sync_debugfs_fops = {
+	.open           = sync_debugfs_open,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.release        = single_release,
+};
+
+static __init int sync_debugfs_init(void)
+{
+	debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
+	return 0;
+}
+late_initcall(sync_debugfs_init);
+
+#define DUMP_CHUNK 256
+static char sync_dump_buf[64 * 1024];
+void sync_dump(void)
+{
+       struct seq_file s = {
+               .buf = sync_dump_buf,
+               .size = sizeof(sync_dump_buf) - 1,
+       };
+       int i;
+
+       sync_debugfs_show(&s, NULL);
+
+       for (i = 0; i < s.count; i += DUMP_CHUNK) {
+               if ((s.count - i) > DUMP_CHUNK) {
+                       char c = s.buf[i + DUMP_CHUNK];
+                       s.buf[i + DUMP_CHUNK] = 0;
+                       pr_cont("%s", s.buf + i);
+                       s.buf[i + DUMP_CHUNK] = c;
+               } else {
+                       s.buf[s.count] = 0;
+                       pr_cont("%s", s.buf + i);
+               }
+       }
+}
+#else
+static void sync_dump(void)
+{
+}
+#endif