| Oleg Nesterov | e73f895 | 2012-05-11 10:59:07 +1000 | [diff] [blame] | 1 | #include <linux/spinlock.h> | 
 | 2 | #include <linux/task_work.h> | 
 | 3 | #include <linux/tracehook.h> | 
 | 4 |  | 
| Oleg Nesterov | 9da33de | 2012-08-26 21:12:11 +0200 | [diff] [blame] | 5 | static struct callback_head work_exited; /* all we need is ->next == NULL */ | 
 | 6 |  | 
| Oleg Nesterov | e73f895 | 2012-05-11 10:59:07 +1000 | [diff] [blame] | 7 | int | 
| Oleg Nesterov | ac3d0da | 2012-08-26 21:12:09 +0200 | [diff] [blame] | 8 | task_work_add(struct task_struct *task, struct callback_head *work, bool notify) | 
| Oleg Nesterov | e73f895 | 2012-05-11 10:59:07 +1000 | [diff] [blame] | 9 | { | 
| Oleg Nesterov | ac3d0da | 2012-08-26 21:12:09 +0200 | [diff] [blame] | 10 | 	struct callback_head *head; | 
| Oleg Nesterov | 9da33de | 2012-08-26 21:12:11 +0200 | [diff] [blame] | 11 |  | 
| Oleg Nesterov | ac3d0da | 2012-08-26 21:12:09 +0200 | [diff] [blame] | 12 | 	do { | 
 | 13 | 		head = ACCESS_ONCE(task->task_works); | 
| Oleg Nesterov | 9da33de | 2012-08-26 21:12:11 +0200 | [diff] [blame] | 14 | 		if (unlikely(head == &work_exited)) | 
 | 15 | 			return -ESRCH; | 
| Oleg Nesterov | ac3d0da | 2012-08-26 21:12:09 +0200 | [diff] [blame] | 16 | 		work->next = head; | 
 | 17 | 	} while (cmpxchg(&task->task_works, head, work) != head); | 
| Oleg Nesterov | e73f895 | 2012-05-11 10:59:07 +1000 | [diff] [blame] | 18 |  | 
| Al Viro | ed3e694 | 2012-06-27 11:31:24 +0400 | [diff] [blame] | 19 | 	if (notify) | 
| Oleg Nesterov | e73f895 | 2012-05-11 10:59:07 +1000 | [diff] [blame] | 20 | 		set_notify_resume(task); | 
| Al Viro | ed3e694 | 2012-06-27 11:31:24 +0400 | [diff] [blame] | 21 | 	return 0; | 
| Oleg Nesterov | e73f895 | 2012-05-11 10:59:07 +1000 | [diff] [blame] | 22 | } | 
 | 23 |  | 
| Al Viro | 67d1214 | 2012-06-27 11:07:19 +0400 | [diff] [blame] | 24 | struct callback_head * | 
| Oleg Nesterov | e73f895 | 2012-05-11 10:59:07 +1000 | [diff] [blame] | 25 | task_work_cancel(struct task_struct *task, task_work_func_t func) | 
 | 26 | { | 
| Oleg Nesterov | ac3d0da | 2012-08-26 21:12:09 +0200 | [diff] [blame] | 27 | 	struct callback_head **pprev = &task->task_works; | 
 | 28 | 	struct callback_head *work = NULL; | 
| Oleg Nesterov | e73f895 | 2012-05-11 10:59:07 +1000 | [diff] [blame] | 29 | 	unsigned long flags; | 
| Oleg Nesterov | ac3d0da | 2012-08-26 21:12:09 +0200 | [diff] [blame] | 30 | 	/* | 
 | 31 | 	 * If cmpxchg() fails we continue without updating pprev. | 
 | 32 | 	 * Either we raced with task_work_add() which added the | 
 | 33 | 	 * new entry before this work, we will find it again. Or | 
| Oleg Nesterov | 9da33de | 2012-08-26 21:12:11 +0200 | [diff] [blame] | 34 | 	 * we raced with task_work_run(), *pprev == NULL/exited. | 
| Oleg Nesterov | ac3d0da | 2012-08-26 21:12:09 +0200 | [diff] [blame] | 35 | 	 */ | 
| Oleg Nesterov | e73f895 | 2012-05-11 10:59:07 +1000 | [diff] [blame] | 36 | 	raw_spin_lock_irqsave(&task->pi_lock, flags); | 
| Oleg Nesterov | ac3d0da | 2012-08-26 21:12:09 +0200 | [diff] [blame] | 37 | 	while ((work = ACCESS_ONCE(*pprev))) { | 
 | 38 | 		read_barrier_depends(); | 
 | 39 | 		if (work->func != func) | 
 | 40 | 			pprev = &work->next; | 
 | 41 | 		else if (cmpxchg(pprev, work, work->next) == work) | 
 | 42 | 			break; | 
| Oleg Nesterov | e73f895 | 2012-05-11 10:59:07 +1000 | [diff] [blame] | 43 | 	} | 
| Oleg Nesterov | e73f895 | 2012-05-11 10:59:07 +1000 | [diff] [blame] | 44 | 	raw_spin_unlock_irqrestore(&task->pi_lock, flags); | 
| Oleg Nesterov | ac3d0da | 2012-08-26 21:12:09 +0200 | [diff] [blame] | 45 |  | 
 | 46 | 	return work; | 
| Oleg Nesterov | e73f895 | 2012-05-11 10:59:07 +1000 | [diff] [blame] | 47 | } | 
 | 48 |  | 
 | 49 | void task_work_run(void) | 
 | 50 | { | 
 | 51 | 	struct task_struct *task = current; | 
| Oleg Nesterov | ac3d0da | 2012-08-26 21:12:09 +0200 | [diff] [blame] | 52 | 	struct callback_head *work, *head, *next; | 
| Oleg Nesterov | e73f895 | 2012-05-11 10:59:07 +1000 | [diff] [blame] | 53 |  | 
| Oleg Nesterov | ac3d0da | 2012-08-26 21:12:09 +0200 | [diff] [blame] | 54 | 	for (;;) { | 
| Oleg Nesterov | 9da33de | 2012-08-26 21:12:11 +0200 | [diff] [blame] | 55 | 		/* | 
 | 56 | 		 * work->func() can do task_work_add(), do not set | 
 | 57 | 		 * work_exited unless the list is empty. | 
 | 58 | 		 */ | 
 | 59 | 		do { | 
 | 60 | 			work = ACCESS_ONCE(task->task_works); | 
 | 61 | 			head = !work && (task->flags & PF_EXITING) ? | 
 | 62 | 				&work_exited : NULL; | 
 | 63 | 		} while (cmpxchg(&task->task_works, work, head) != work); | 
 | 64 |  | 
| Oleg Nesterov | ac3d0da | 2012-08-26 21:12:09 +0200 | [diff] [blame] | 65 | 		if (!work) | 
 | 66 | 			break; | 
 | 67 | 		/* | 
 | 68 | 		 * Synchronize with task_work_cancel(). It can't remove | 
 | 69 | 		 * the first entry == work, cmpxchg(task_works) should | 
 | 70 | 		 * fail, but it can play with *work and other entries. | 
 | 71 | 		 */ | 
 | 72 | 		raw_spin_unlock_wait(&task->pi_lock); | 
 | 73 | 		smp_mb(); | 
| Oleg Nesterov | e73f895 | 2012-05-11 10:59:07 +1000 | [diff] [blame] | 74 |  | 
| Oleg Nesterov | ac3d0da | 2012-08-26 21:12:09 +0200 | [diff] [blame] | 75 | 		/* Reverse the list to run the works in fifo order */ | 
 | 76 | 		head = NULL; | 
 | 77 | 		do { | 
 | 78 | 			next = work->next; | 
 | 79 | 			work->next = head; | 
 | 80 | 			head = work; | 
 | 81 | 			work = next; | 
 | 82 | 		} while (work); | 
| Oleg Nesterov | e73f895 | 2012-05-11 10:59:07 +1000 | [diff] [blame] | 83 |  | 
| Oleg Nesterov | ac3d0da | 2012-08-26 21:12:09 +0200 | [diff] [blame] | 84 | 		work = head; | 
 | 85 | 		do { | 
 | 86 | 			next = work->next; | 
 | 87 | 			work->func(work); | 
 | 88 | 			work = next; | 
| Eric Dumazet | f341861 | 2012-08-21 15:05:14 +0200 | [diff] [blame] | 89 | 			cond_resched(); | 
| Oleg Nesterov | ac3d0da | 2012-08-26 21:12:09 +0200 | [diff] [blame] | 90 | 		} while (work); | 
| Oleg Nesterov | e73f895 | 2012-05-11 10:59:07 +1000 | [diff] [blame] | 91 | 	} | 
 | 92 | } |