| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 
 | 3 |  * | 
 | 4 |  * Provides a framework for enqueueing and running callbacks from hardirq | 
 | 5 |  * context. The enqueueing is NMI-safe. | 
 | 6 |  */ | 
 | 7 |  | 
| Paul Gortmaker | 83e3fa6 | 2012-04-01 16:38:37 -0400 | [diff] [blame] | 8 | #include <linux/bug.h> | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 9 | #include <linux/kernel.h> | 
| Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 10 | #include <linux/export.h> | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 11 | #include <linux/irq_work.h> | 
| Paul Gortmaker | 967d1f9 | 2011-07-18 13:03:04 -0400 | [diff] [blame] | 12 | #include <linux/percpu.h> | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 13 | #include <linux/hardirq.h> | 
| Chris Metcalf | ef1f098 | 2012-04-11 12:21:39 -0400 | [diff] [blame] | 14 | #include <linux/irqflags.h> | 
| Paul Gortmaker | 967d1f9 | 2011-07-18 13:03:04 -0400 | [diff] [blame] | 15 | #include <asm/processor.h> | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 16 |  | 
 | 17 | /* | 
 | 18 |  * An entry can be in one of four states: | 
 | 19 |  * | 
 | 20 |  * free	     NULL, 0 -> {claimed}       : free to be used | 
 | 21 |  * claimed   NULL, 3 -> {pending}       : claimed to be enqueued | 
 | 22 |  * pending   next, 3 -> {busy}          : queued, pending callback | 
 | 23 |  * busy      NULL, 2 -> {free, claimed} : callback in progress, can be claimed | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 24 |  */ | 
 | 25 |  | 
 | 26 | #define IRQ_WORK_PENDING	1UL | 
 | 27 | #define IRQ_WORK_BUSY		2UL | 
 | 28 | #define IRQ_WORK_FLAGS		3UL | 
 | 29 |  | 
| Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 30 | static DEFINE_PER_CPU(struct llist_head, irq_work_list); | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 31 |  | 
 | 32 | /* | 
 | 33 |  * Claim the entry so that no one else will poke at it. | 
 | 34 |  */ | 
| Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 35 | static bool irq_work_claim(struct irq_work *work) | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 36 | { | 
| Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 37 | 	unsigned long flags, nflags; | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 38 |  | 
| Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 39 | 	for (;;) { | 
 | 40 | 		flags = work->flags; | 
 | 41 | 		if (flags & IRQ_WORK_PENDING) | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 42 | 			return false; | 
| Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 43 | 		nflags = flags | IRQ_WORK_FLAGS; | 
 | 44 | 		if (cmpxchg(&work->flags, flags, nflags) == flags) | 
 | 45 | 			break; | 
 | 46 | 		cpu_relax(); | 
 | 47 | 	} | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 48 |  | 
 | 49 | 	return true; | 
 | 50 | } | 
 | 51 |  | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 52 | void __weak arch_irq_work_raise(void) | 
 | 53 | { | 
 | 54 | 	/* | 
 | 55 | 	 * Lame architectures will get the timer tick callback | 
 | 56 | 	 */ | 
 | 57 | } | 
 | 58 |  | 
 | 59 | /* | 
 | 60 |  * Queue the entry and raise the IPI if needed. | 
 | 61 |  */ | 
| Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 62 | static void __irq_work_queue(struct irq_work *work) | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 63 | { | 
| Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 64 | 	bool empty; | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 65 |  | 
| Christoph Lameter | 20b8769 | 2010-12-14 10:28:45 -0600 | [diff] [blame] | 66 | 	preempt_disable(); | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 67 |  | 
| Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 68 | 	empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 69 | 	/* The list was empty, raise self-interrupt to start processing. */ | 
| Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 70 | 	if (empty) | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 71 | 		arch_irq_work_raise(); | 
 | 72 |  | 
| Christoph Lameter | 20b8769 | 2010-12-14 10:28:45 -0600 | [diff] [blame] | 73 | 	preempt_enable(); | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 74 | } | 
 | 75 |  | 
 | 76 | /* | 
 | 77 |  * Enqueue the irq_work @entry, returns true on success, failure when the | 
 | 78 |  * @entry was already enqueued by someone else. | 
 | 79 |  * | 
 | 80 |  * Can be re-enqueued while the callback is still in progress. | 
 | 81 |  */ | 
| Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 82 | bool irq_work_queue(struct irq_work *work) | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 83 | { | 
| Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 84 | 	if (!irq_work_claim(work)) { | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 85 | 		/* | 
 | 86 | 		 * Already enqueued, can't do! | 
 | 87 | 		 */ | 
 | 88 | 		return false; | 
 | 89 | 	} | 
 | 90 |  | 
| Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 91 | 	__irq_work_queue(work); | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 92 | 	return true; | 
 | 93 | } | 
 | 94 | EXPORT_SYMBOL_GPL(irq_work_queue); | 
 | 95 |  | 
 | 96 | /* | 
 | 97 |  * Run the irq_work entries on this cpu. Requires to be ran from hardirq | 
 | 98 |  * context with local IRQs disabled. | 
 | 99 |  */ | 
 | 100 | void irq_work_run(void) | 
 | 101 | { | 
| Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 102 | 	struct irq_work *work; | 
 | 103 | 	struct llist_head *this_list; | 
 | 104 | 	struct llist_node *llnode; | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 105 |  | 
| Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 106 | 	this_list = &__get_cpu_var(irq_work_list); | 
 | 107 | 	if (llist_empty(this_list)) | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 108 | 		return; | 
 | 109 |  | 
 | 110 | 	BUG_ON(!in_irq()); | 
 | 111 | 	BUG_ON(!irqs_disabled()); | 
 | 112 |  | 
| Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 113 | 	llnode = llist_del_all(this_list); | 
 | 114 | 	while (llnode != NULL) { | 
 | 115 | 		work = llist_entry(llnode, struct irq_work, llnode); | 
| Christoph Lameter | 20b8769 | 2010-12-14 10:28:45 -0600 | [diff] [blame] | 116 |  | 
| Peter Zijlstra | 924f8f5 | 2011-09-12 13:12:28 +0200 | [diff] [blame] | 117 | 		llnode = llist_next(llnode); | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 118 |  | 
 | 119 | 		/* | 
| Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 120 | 		 * Clear the PENDING bit, after this point the @work | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 121 | 		 * can be re-used. | 
 | 122 | 		 */ | 
| Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 123 | 		work->flags = IRQ_WORK_BUSY; | 
 | 124 | 		work->func(work); | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 125 | 		/* | 
 | 126 | 		 * Clear the BUSY bit and return to the free state if | 
 | 127 | 		 * no-one else claimed it meanwhile. | 
 | 128 | 		 */ | 
| Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 129 | 		(void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0); | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 130 | 	} | 
 | 131 | } | 
 | 132 | EXPORT_SYMBOL_GPL(irq_work_run); | 
 | 133 |  | 
 | 134 | /* | 
 | 135 |  * Synchronize against the irq_work @entry, ensures the entry is not | 
 | 136 |  * currently in use. | 
 | 137 |  */ | 
| Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 138 | void irq_work_sync(struct irq_work *work) | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 139 | { | 
 | 140 | 	WARN_ON_ONCE(irqs_disabled()); | 
 | 141 |  | 
| Huang Ying | 38aaf80 | 2011-09-08 14:00:46 +0800 | [diff] [blame] | 142 | 	while (work->flags & IRQ_WORK_BUSY) | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 143 | 		cpu_relax(); | 
 | 144 | } | 
 | 145 | EXPORT_SYMBOL_GPL(irq_work_sync); |