| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 
 | 3 |  * | 
 | 4 |  * Provides a framework for enqueueing and running callbacks from hardirq | 
 | 5 |  * context. The enqueueing is NMI-safe. | 
 | 6 |  */ | 
 | 7 |  | 
 | 8 | #include <linux/kernel.h> | 
 | 9 | #include <linux/module.h> | 
 | 10 | #include <linux/irq_work.h> | 
 | 11 | #include <linux/hardirq.h> | 
 | 12 |  | 
 | 13 | /* | 
 | 14 |  * An entry can be in one of four states: | 
 | 15 |  * | 
 | 16 |  * free	     NULL, 0 -> {claimed}       : free to be used | 
 | 17 |  * claimed   NULL, 3 -> {pending}       : claimed to be enqueued | 
 | 18 |  * pending   next, 3 -> {busy}          : queued, pending callback | 
 | 19 |  * busy      NULL, 2 -> {free, claimed} : callback in progress, can be claimed | 
 | 20 |  * | 
 | 21 |  * We use the lower two bits of the next pointer to keep PENDING and BUSY | 
 | 22 |  * flags. | 
 | 23 |  */ | 
 | 24 |  | 
 | 25 | #define IRQ_WORK_PENDING	1UL | 
 | 26 | #define IRQ_WORK_BUSY		2UL | 
 | 27 | #define IRQ_WORK_FLAGS		3UL | 
 | 28 |  | 
 | 29 | static inline bool irq_work_is_set(struct irq_work *entry, int flags) | 
 | 30 | { | 
 | 31 | 	return (unsigned long)entry->next & flags; | 
 | 32 | } | 
 | 33 |  | 
 | 34 | static inline struct irq_work *irq_work_next(struct irq_work *entry) | 
 | 35 | { | 
 | 36 | 	unsigned long next = (unsigned long)entry->next; | 
 | 37 | 	next &= ~IRQ_WORK_FLAGS; | 
 | 38 | 	return (struct irq_work *)next; | 
 | 39 | } | 
 | 40 |  | 
 | 41 | static inline struct irq_work *next_flags(struct irq_work *entry, int flags) | 
 | 42 | { | 
 | 43 | 	unsigned long next = (unsigned long)entry; | 
 | 44 | 	next |= flags; | 
 | 45 | 	return (struct irq_work *)next; | 
 | 46 | } | 
 | 47 |  | 
 | 48 | static DEFINE_PER_CPU(struct irq_work *, irq_work_list); | 
 | 49 |  | 
 | 50 | /* | 
 | 51 |  * Claim the entry so that no one else will poke at it. | 
 | 52 |  */ | 
 | 53 | static bool irq_work_claim(struct irq_work *entry) | 
 | 54 | { | 
 | 55 | 	struct irq_work *next, *nflags; | 
 | 56 |  | 
 | 57 | 	do { | 
 | 58 | 		next = entry->next; | 
 | 59 | 		if ((unsigned long)next & IRQ_WORK_PENDING) | 
 | 60 | 			return false; | 
 | 61 | 		nflags = next_flags(next, IRQ_WORK_FLAGS); | 
 | 62 | 	} while (cmpxchg(&entry->next, next, nflags) != next); | 
 | 63 |  | 
 | 64 | 	return true; | 
 | 65 | } | 
 | 66 |  | 
 | 67 |  | 
 | 68 | void __weak arch_irq_work_raise(void) | 
 | 69 | { | 
 | 70 | 	/* | 
 | 71 | 	 * Lame architectures will get the timer tick callback | 
 | 72 | 	 */ | 
 | 73 | } | 
 | 74 |  | 
 | 75 | /* | 
 | 76 |  * Queue the entry and raise the IPI if needed. | 
 | 77 |  */ | 
 | 78 | static void __irq_work_queue(struct irq_work *entry) | 
 | 79 | { | 
 | 80 | 	struct irq_work **head, *next; | 
 | 81 |  | 
 | 82 | 	head = &get_cpu_var(irq_work_list); | 
 | 83 |  | 
 | 84 | 	do { | 
 | 85 | 		next = *head; | 
 | 86 | 		/* Can assign non-atomic because we keep the flags set. */ | 
 | 87 | 		entry->next = next_flags(next, IRQ_WORK_FLAGS); | 
 | 88 | 	} while (cmpxchg(head, next, entry) != next); | 
 | 89 |  | 
 | 90 | 	/* The list was empty, raise self-interrupt to start processing. */ | 
 | 91 | 	if (!irq_work_next(entry)) | 
 | 92 | 		arch_irq_work_raise(); | 
 | 93 |  | 
 | 94 | 	put_cpu_var(irq_work_list); | 
 | 95 | } | 
 | 96 |  | 
 | 97 | /* | 
 | 98 |  * Enqueue the irq_work @entry, returns true on success, failure when the | 
 | 99 |  * @entry was already enqueued by someone else. | 
 | 100 |  * | 
 | 101 |  * Can be re-enqueued while the callback is still in progress. | 
 | 102 |  */ | 
 | 103 | bool irq_work_queue(struct irq_work *entry) | 
 | 104 | { | 
 | 105 | 	if (!irq_work_claim(entry)) { | 
 | 106 | 		/* | 
 | 107 | 		 * Already enqueued, can't do! | 
 | 108 | 		 */ | 
 | 109 | 		return false; | 
 | 110 | 	} | 
 | 111 |  | 
 | 112 | 	__irq_work_queue(entry); | 
 | 113 | 	return true; | 
 | 114 | } | 
 | 115 | EXPORT_SYMBOL_GPL(irq_work_queue); | 
 | 116 |  | 
 | 117 | /* | 
 | 118 |  * Run the irq_work entries on this cpu. Requires to be ran from hardirq | 
 | 119 |  * context with local IRQs disabled. | 
 | 120 |  */ | 
 | 121 | void irq_work_run(void) | 
 | 122 | { | 
 | 123 | 	struct irq_work *list, **head; | 
 | 124 |  | 
 | 125 | 	head = &__get_cpu_var(irq_work_list); | 
 | 126 | 	if (*head == NULL) | 
 | 127 | 		return; | 
 | 128 |  | 
 | 129 | 	BUG_ON(!in_irq()); | 
 | 130 | 	BUG_ON(!irqs_disabled()); | 
 | 131 |  | 
 | 132 | 	list = xchg(head, NULL); | 
 | 133 | 	while (list != NULL) { | 
 | 134 | 		struct irq_work *entry = list; | 
 | 135 |  | 
 | 136 | 		list = irq_work_next(list); | 
 | 137 |  | 
 | 138 | 		/* | 
 | 139 | 		 * Clear the PENDING bit, after this point the @entry | 
 | 140 | 		 * can be re-used. | 
 | 141 | 		 */ | 
 | 142 | 		entry->next = next_flags(NULL, IRQ_WORK_BUSY); | 
 | 143 | 		entry->func(entry); | 
 | 144 | 		/* | 
 | 145 | 		 * Clear the BUSY bit and return to the free state if | 
 | 146 | 		 * no-one else claimed it meanwhile. | 
 | 147 | 		 */ | 
 | 148 | 		cmpxchg(&entry->next, next_flags(NULL, IRQ_WORK_BUSY), NULL); | 
 | 149 | 	} | 
 | 150 | } | 
 | 151 | EXPORT_SYMBOL_GPL(irq_work_run); | 
 | 152 |  | 
 | 153 | /* | 
 | 154 |  * Synchronize against the irq_work @entry, ensures the entry is not | 
 | 155 |  * currently in use. | 
 | 156 |  */ | 
 | 157 | void irq_work_sync(struct irq_work *entry) | 
 | 158 | { | 
 | 159 | 	WARN_ON_ONCE(irqs_disabled()); | 
 | 160 |  | 
 | 161 | 	while (irq_work_is_set(entry, IRQ_WORK_BUSY)) | 
 | 162 | 		cpu_relax(); | 
 | 163 | } | 
 | 164 | EXPORT_SYMBOL_GPL(irq_work_sync); |