| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 
|  | 3 | * | 
|  | 4 | * Provides a framework for enqueueing and running callbacks from hardirq | 
|  | 5 | * context. The enqueueing is NMI-safe. | 
|  | 6 | */ | 
|  | 7 |  | 
|  | 8 | #include <linux/kernel.h> | 
|  | 9 | #include <linux/module.h> | 
|  | 10 | #include <linux/irq_work.h> | 
|  | 11 | #include <linux/hardirq.h> | 
|  | 12 |  | 
|  | 13 | /* | 
|  | 14 | * An entry can be in one of four states: | 
|  | 15 | * | 
|  | 16 | * free	     NULL, 0 -> {claimed}       : free to be used | 
|  | 17 | * claimed   NULL, 3 -> {pending}       : claimed to be enqueued | 
|  | 18 | * pending   next, 3 -> {busy}          : queued, pending callback | 
|  | 19 | * busy      NULL, 2 -> {free, claimed} : callback in progress, can be claimed | 
|  | 20 | * | 
|  | 21 | * We use the lower two bits of the next pointer to keep PENDING and BUSY | 
|  | 22 | * flags. | 
|  | 23 | */ | 
|  | 24 |  | 
|  | 25 | #define IRQ_WORK_PENDING	1UL | 
|  | 26 | #define IRQ_WORK_BUSY		2UL | 
|  | 27 | #define IRQ_WORK_FLAGS		3UL | 
|  | 28 |  | 
|  | 29 | static inline bool irq_work_is_set(struct irq_work *entry, int flags) | 
|  | 30 | { | 
|  | 31 | return (unsigned long)entry->next & flags; | 
|  | 32 | } | 
|  | 33 |  | 
|  | 34 | static inline struct irq_work *irq_work_next(struct irq_work *entry) | 
|  | 35 | { | 
|  | 36 | unsigned long next = (unsigned long)entry->next; | 
|  | 37 | next &= ~IRQ_WORK_FLAGS; | 
|  | 38 | return (struct irq_work *)next; | 
|  | 39 | } | 
|  | 40 |  | 
|  | 41 | static inline struct irq_work *next_flags(struct irq_work *entry, int flags) | 
|  | 42 | { | 
|  | 43 | unsigned long next = (unsigned long)entry; | 
|  | 44 | next |= flags; | 
|  | 45 | return (struct irq_work *)next; | 
|  | 46 | } | 
|  | 47 |  | 
|  | 48 | static DEFINE_PER_CPU(struct irq_work *, irq_work_list); | 
|  | 49 |  | 
|  | 50 | /* | 
|  | 51 | * Claim the entry so that no one else will poke at it. | 
|  | 52 | */ | 
|  | 53 | static bool irq_work_claim(struct irq_work *entry) | 
|  | 54 | { | 
|  | 55 | struct irq_work *next, *nflags; | 
|  | 56 |  | 
|  | 57 | do { | 
|  | 58 | next = entry->next; | 
|  | 59 | if ((unsigned long)next & IRQ_WORK_PENDING) | 
|  | 60 | return false; | 
|  | 61 | nflags = next_flags(next, IRQ_WORK_FLAGS); | 
|  | 62 | } while (cmpxchg(&entry->next, next, nflags) != next); | 
|  | 63 |  | 
|  | 64 | return true; | 
|  | 65 | } | 
|  | 66 |  | 
|  | 67 |  | 
|  | 68 | void __weak arch_irq_work_raise(void) | 
|  | 69 | { | 
|  | 70 | /* | 
|  | 71 | * Lame architectures will get the timer tick callback | 
|  | 72 | */ | 
|  | 73 | } | 
|  | 74 |  | 
|  | 75 | /* | 
|  | 76 | * Queue the entry and raise the IPI if needed. | 
|  | 77 | */ | 
|  | 78 | static void __irq_work_queue(struct irq_work *entry) | 
|  | 79 | { | 
| Christoph Lameter | 20b8769 | 2010-12-14 10:28:45 -0600 | [diff] [blame] | 80 | struct irq_work *next; | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 81 |  | 
| Christoph Lameter | 20b8769 | 2010-12-14 10:28:45 -0600 | [diff] [blame] | 82 | preempt_disable(); | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 83 |  | 
|  | 84 | do { | 
| Christoph Lameter | 20b8769 | 2010-12-14 10:28:45 -0600 | [diff] [blame] | 85 | next = __this_cpu_read(irq_work_list); | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 86 | /* Can assign non-atomic because we keep the flags set. */ | 
|  | 87 | entry->next = next_flags(next, IRQ_WORK_FLAGS); | 
| Christoph Lameter | 20b8769 | 2010-12-14 10:28:45 -0600 | [diff] [blame] | 88 | } while (this_cpu_cmpxchg(irq_work_list, next, entry) != next); | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 89 |  | 
|  | 90 | /* The list was empty, raise self-interrupt to start processing. */ | 
|  | 91 | if (!irq_work_next(entry)) | 
|  | 92 | arch_irq_work_raise(); | 
|  | 93 |  | 
| Christoph Lameter | 20b8769 | 2010-12-14 10:28:45 -0600 | [diff] [blame] | 94 | preempt_enable(); | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 95 | } | 
|  | 96 |  | 
|  | 97 | /* | 
|  | 98 | * Enqueue the irq_work @entry, returns true on success, failure when the | 
|  | 99 | * @entry was already enqueued by someone else. | 
|  | 100 | * | 
|  | 101 | * Can be re-enqueued while the callback is still in progress. | 
|  | 102 | */ | 
|  | 103 | bool irq_work_queue(struct irq_work *entry) | 
|  | 104 | { | 
|  | 105 | if (!irq_work_claim(entry)) { | 
|  | 106 | /* | 
|  | 107 | * Already enqueued, can't do! | 
|  | 108 | */ | 
|  | 109 | return false; | 
|  | 110 | } | 
|  | 111 |  | 
|  | 112 | __irq_work_queue(entry); | 
|  | 113 | return true; | 
|  | 114 | } | 
|  | 115 | EXPORT_SYMBOL_GPL(irq_work_queue); | 
|  | 116 |  | 
|  | 117 | /* | 
|  | 118 | * Run the irq_work entries on this cpu. Requires to be ran from hardirq | 
|  | 119 | * context with local IRQs disabled. | 
|  | 120 | */ | 
|  | 121 | void irq_work_run(void) | 
|  | 122 | { | 
| Christoph Lameter | 20b8769 | 2010-12-14 10:28:45 -0600 | [diff] [blame] | 123 | struct irq_work *list; | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 124 |  | 
| Christoph Lameter | 20b8769 | 2010-12-14 10:28:45 -0600 | [diff] [blame] | 125 | if (this_cpu_read(irq_work_list) == NULL) | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 126 | return; | 
|  | 127 |  | 
|  | 128 | BUG_ON(!in_irq()); | 
|  | 129 | BUG_ON(!irqs_disabled()); | 
|  | 130 |  | 
| Christoph Lameter | 20b8769 | 2010-12-14 10:28:45 -0600 | [diff] [blame] | 131 | list = this_cpu_xchg(irq_work_list, NULL); | 
|  | 132 |  | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 133 | while (list != NULL) { | 
|  | 134 | struct irq_work *entry = list; | 
|  | 135 |  | 
|  | 136 | list = irq_work_next(list); | 
|  | 137 |  | 
|  | 138 | /* | 
|  | 139 | * Clear the PENDING bit, after this point the @entry | 
|  | 140 | * can be re-used. | 
|  | 141 | */ | 
|  | 142 | entry->next = next_flags(NULL, IRQ_WORK_BUSY); | 
|  | 143 | entry->func(entry); | 
|  | 144 | /* | 
|  | 145 | * Clear the BUSY bit and return to the free state if | 
|  | 146 | * no-one else claimed it meanwhile. | 
|  | 147 | */ | 
| Sergio Aguirre | 94e8ba7 | 2010-11-16 12:02:47 -0600 | [diff] [blame] | 148 | (void)cmpxchg(&entry->next, | 
|  | 149 | next_flags(NULL, IRQ_WORK_BUSY), | 
|  | 150 | NULL); | 
| Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 151 | } | 
|  | 152 | } | 
|  | 153 | EXPORT_SYMBOL_GPL(irq_work_run); | 
|  | 154 |  | 
|  | 155 | /* | 
|  | 156 | * Synchronize against the irq_work @entry, ensures the entry is not | 
|  | 157 | * currently in use. | 
|  | 158 | */ | 
|  | 159 | void irq_work_sync(struct irq_work *entry) | 
|  | 160 | { | 
|  | 161 | WARN_ON_ONCE(irqs_disabled()); | 
|  | 162 |  | 
|  | 163 | while (irq_work_is_set(entry, IRQ_WORK_BUSY)) | 
|  | 164 | cpu_relax(); | 
|  | 165 | } | 
|  | 166 | EXPORT_SYMBOL_GPL(irq_work_sync); |