| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *	linux/kernel/softirq.c | 
|  | 3 | * | 
|  | 4 | *	Copyright (C) 1992 Linus Torvalds | 
|  | 5 | * | 
| Pavel Machek | b10db7f | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 6 | *	Distribute under GPLv2. | 
|  | 7 | * | 
|  | 8 | *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) | 
| David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 9 | * | 
|  | 10 | *	Remote softirq infrastructure is by Jens Axboe. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | */ | 
|  | 12 |  | 
|  | 13 | #include <linux/module.h> | 
|  | 14 | #include <linux/kernel_stat.h> | 
|  | 15 | #include <linux/interrupt.h> | 
|  | 16 | #include <linux/init.h> | 
|  | 17 | #include <linux/mm.h> | 
|  | 18 | #include <linux/notifier.h> | 
|  | 19 | #include <linux/percpu.h> | 
|  | 20 | #include <linux/cpu.h> | 
| Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 21 | #include <linux/freezer.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/kthread.h> | 
|  | 23 | #include <linux/rcupdate.h> | 
| Steven Rostedt | 7e49fcc | 2009-01-22 19:01:40 -0500 | [diff] [blame] | 24 | #include <linux/ftrace.h> | 
| Andrew Morton | 78eef01 | 2006-03-22 00:08:16 -0800 | [diff] [blame] | 25 | #include <linux/smp.h> | 
| Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 26 | #include <linux/tick.h> | 
| Jason Baron | 3984232 | 2009-03-12 14:36:03 -0400 | [diff] [blame] | 27 | #include <trace/irq.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 |  | 
|  | 29 | #include <asm/irq.h> | 
|  | 30 | /* | 
|  | 31 | - No shared variables, all the data are CPU local. | 
|  | 32 | - If a softirq needs serialization, let it serialize itself | 
|  | 33 | by its own spinlocks. | 
|  | 34 | - Even if softirq is serialized, only local cpu is marked for | 
|  | 35 | execution. Hence, we get something sort of weak cpu binding. | 
|  | 36 | Though it is still not clear, will it result in better locality | 
|  | 37 | or will not. | 
|  | 38 |  | 
|  | 39 | Examples: | 
|  | 40 | - NET RX softirq. It is multithreaded and does not require | 
|  | 41 | any global serialization. | 
|  | 42 | - NET TX softirq. It kicks software netdevice queues, hence | 
|  | 43 | it is logically serialized per device, but this serialization | 
|  | 44 | is invisible to common code. | 
|  | 45 | - Tasklets: serialized wrt itself. | 
|  | 46 | */ | 
|  | 47 |  | 
|  | 48 | #ifndef __ARCH_IRQ_STAT | 
|  | 49 | irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; | 
|  | 50 | EXPORT_SYMBOL(irq_stat); | 
|  | 51 | #endif | 
|  | 52 |  | 
| Alexey Dobriyan | 978b011 | 2008-09-06 20:04:36 +0200 | [diff] [blame] | 53 | static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 |  | 
|  | 55 | static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); | 
|  | 56 |  | 
| Jason Baron | 5d592b4 | 2009-03-12 14:33:36 -0400 | [diff] [blame] | 57 | char *softirq_to_name[NR_SOFTIRQS] = { | 
| Steven Rostedt | 899039e | 2009-03-13 00:43:33 -0400 | [diff] [blame] | 58 | "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", | 
|  | 59 | "TASKLET", "SCHED", "HRTIMER",	"RCU" | 
| Jason Baron | 5d592b4 | 2009-03-12 14:33:36 -0400 | [diff] [blame] | 60 | }; | 
|  | 61 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | /* | 
|  | 63 | * we cannot loop indefinitely here to avoid userspace starvation, | 
|  | 64 | * but we also don't want to introduce a worst case 1/HZ latency | 
|  | 65 | * to the pending events, so lets the scheduler to balance | 
|  | 66 | * the softirq load for us. | 
|  | 67 | */ | 
| Peter Zijlstra | 7f1e2ca | 2009-03-13 12:21:27 +0100 | [diff] [blame] | 68 | void wakeup_softirqd(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | { | 
|  | 70 | /* Interrupts are disabled: no need to stop preemption */ | 
|  | 71 | struct task_struct *tsk = __get_cpu_var(ksoftirqd); | 
|  | 72 |  | 
|  | 73 | if (tsk && tsk->state != TASK_RUNNING) | 
|  | 74 | wake_up_process(tsk); | 
|  | 75 | } | 
|  | 76 |  | 
|  | 77 | /* | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 78 | * This one is for softirq.c-internal use, | 
|  | 79 | * where hardirqs are disabled legitimately: | 
|  | 80 | */ | 
| Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 81 | #ifdef CONFIG_TRACE_IRQFLAGS | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 82 | static void __local_bh_disable(unsigned long ip) | 
|  | 83 | { | 
|  | 84 | unsigned long flags; | 
|  | 85 |  | 
|  | 86 | WARN_ON_ONCE(in_irq()); | 
|  | 87 |  | 
|  | 88 | raw_local_irq_save(flags); | 
| Steven Rostedt | 7e49fcc | 2009-01-22 19:01:40 -0500 | [diff] [blame] | 89 | /* | 
|  | 90 | * The preempt tracer hooks into add_preempt_count and will break | 
|  | 91 | * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET | 
|  | 92 | * is set and before current->softirq_enabled is cleared. | 
|  | 93 | * We must manually increment preempt_count here and manually | 
|  | 94 | * call the trace_preempt_off later. | 
|  | 95 | */ | 
|  | 96 | preempt_count() += SOFTIRQ_OFFSET; | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 97 | /* | 
|  | 98 | * Were softirqs turned off above: | 
|  | 99 | */ | 
|  | 100 | if (softirq_count() == SOFTIRQ_OFFSET) | 
|  | 101 | trace_softirqs_off(ip); | 
|  | 102 | raw_local_irq_restore(flags); | 
| Steven Rostedt | 7e49fcc | 2009-01-22 19:01:40 -0500 | [diff] [blame] | 103 |  | 
|  | 104 | if (preempt_count() == SOFTIRQ_OFFSET) | 
|  | 105 | trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 106 | } | 
| Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 107 | #else /* !CONFIG_TRACE_IRQFLAGS */ | 
|  | 108 | static inline void __local_bh_disable(unsigned long ip) | 
|  | 109 | { | 
|  | 110 | add_preempt_count(SOFTIRQ_OFFSET); | 
|  | 111 | barrier(); | 
|  | 112 | } | 
|  | 113 | #endif /* CONFIG_TRACE_IRQFLAGS */ | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 114 |  | 
|  | 115 | void local_bh_disable(void) | 
|  | 116 | { | 
|  | 117 | __local_bh_disable((unsigned long)__builtin_return_address(0)); | 
|  | 118 | } | 
|  | 119 |  | 
|  | 120 | EXPORT_SYMBOL(local_bh_disable); | 
|  | 121 |  | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 122 | /* | 
|  | 123 | * Special-case - softirqs can safely be enabled in | 
|  | 124 | * cond_resched_softirq(), or by __do_softirq(), | 
|  | 125 | * without processing still-pending softirqs: | 
|  | 126 | */ | 
|  | 127 | void _local_bh_enable(void) | 
|  | 128 | { | 
|  | 129 | WARN_ON_ONCE(in_irq()); | 
|  | 130 | WARN_ON_ONCE(!irqs_disabled()); | 
|  | 131 |  | 
|  | 132 | if (softirq_count() == SOFTIRQ_OFFSET) | 
|  | 133 | trace_softirqs_on((unsigned long)__builtin_return_address(0)); | 
|  | 134 | sub_preempt_count(SOFTIRQ_OFFSET); | 
|  | 135 | } | 
|  | 136 |  | 
|  | 137 | EXPORT_SYMBOL(_local_bh_enable); | 
|  | 138 |  | 
| Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 139 | static inline void _local_bh_enable_ip(unsigned long ip) | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 140 | { | 
| Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 141 | WARN_ON_ONCE(in_irq() || irqs_disabled()); | 
| Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 142 | #ifdef CONFIG_TRACE_IRQFLAGS | 
| Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 143 | local_irq_disable(); | 
| Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 144 | #endif | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 145 | /* | 
|  | 146 | * Are softirqs going to be turned on now: | 
|  | 147 | */ | 
|  | 148 | if (softirq_count() == SOFTIRQ_OFFSET) | 
|  | 149 | trace_softirqs_on(ip); | 
|  | 150 | /* | 
|  | 151 | * Keep preemption disabled until we are done with | 
|  | 152 | * softirq processing: | 
|  | 153 | */ | 
|  | 154 | sub_preempt_count(SOFTIRQ_OFFSET - 1); | 
|  | 155 |  | 
|  | 156 | if (unlikely(!in_interrupt() && local_softirq_pending())) | 
|  | 157 | do_softirq(); | 
|  | 158 |  | 
|  | 159 | dec_preempt_count(); | 
| Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 160 | #ifdef CONFIG_TRACE_IRQFLAGS | 
| Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 161 | local_irq_enable(); | 
| Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 162 | #endif | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 163 | preempt_check_resched(); | 
|  | 164 | } | 
| Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 165 |  | 
|  | 166 | void local_bh_enable(void) | 
|  | 167 | { | 
|  | 168 | _local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | 
|  | 169 | } | 
|  | 170 | EXPORT_SYMBOL(local_bh_enable); | 
|  | 171 |  | 
|  | 172 | void local_bh_enable_ip(unsigned long ip) | 
|  | 173 | { | 
|  | 174 | _local_bh_enable_ip(ip); | 
|  | 175 | } | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 176 | EXPORT_SYMBOL(local_bh_enable_ip); | 
|  | 177 |  | 
|  | 178 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | * We restart softirq processing MAX_SOFTIRQ_RESTART times, | 
|  | 180 | * and we fall back to softirqd after that. | 
|  | 181 | * | 
|  | 182 | * This number has been established via experimentation. | 
|  | 183 | * The two things to balance is latency against fairness - | 
|  | 184 | * we want to handle softirqs as soon as possible, but they | 
|  | 185 | * should not be able to lock up the box. | 
|  | 186 | */ | 
|  | 187 | #define MAX_SOFTIRQ_RESTART 10 | 
|  | 188 |  | 
| Jason Baron | 3984232 | 2009-03-12 14:36:03 -0400 | [diff] [blame] | 189 | DEFINE_TRACE(softirq_entry); | 
|  | 190 | DEFINE_TRACE(softirq_exit); | 
|  | 191 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | asmlinkage void __do_softirq(void) | 
|  | 193 | { | 
|  | 194 | struct softirq_action *h; | 
|  | 195 | __u32 pending; | 
|  | 196 | int max_restart = MAX_SOFTIRQ_RESTART; | 
|  | 197 | int cpu; | 
|  | 198 |  | 
|  | 199 | pending = local_softirq_pending(); | 
| Paul Mackerras | 829035f | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 200 | account_system_vtime(current); | 
|  | 201 |  | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 202 | __local_bh_disable((unsigned long)__builtin_return_address(0)); | 
| Ingo Molnar | d820ac4 | 2009-03-13 01:30:40 +0100 | [diff] [blame] | 203 | lockdep_softirq_enter(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | cpu = smp_processor_id(); | 
|  | 206 | restart: | 
|  | 207 | /* Reset the pending bitmask before enabling irqs */ | 
| Andi Kleen | 3f74478 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 208 | set_softirq_pending(0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 |  | 
| Andrew Morton | c70f5d6 | 2005-07-30 10:22:49 -0700 | [diff] [blame] | 210 | local_irq_enable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 |  | 
|  | 212 | h = softirq_vec; | 
|  | 213 |  | 
|  | 214 | do { | 
|  | 215 | if (pending & 1) { | 
| Thomas Gleixner | 8e85b4b | 2008-10-02 10:50:53 +0200 | [diff] [blame] | 216 | int prev_count = preempt_count(); | 
|  | 217 |  | 
| Jason Baron | 3984232 | 2009-03-12 14:36:03 -0400 | [diff] [blame] | 218 | trace_softirq_entry(h, softirq_vec); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | h->action(h); | 
| Jason Baron | 3984232 | 2009-03-12 14:36:03 -0400 | [diff] [blame] | 220 | trace_softirq_exit(h, softirq_vec); | 
| Thomas Gleixner | 8e85b4b | 2008-10-02 10:50:53 +0200 | [diff] [blame] | 221 | if (unlikely(prev_count != preempt_count())) { | 
| Jason Baron | 5d592b4 | 2009-03-12 14:33:36 -0400 | [diff] [blame] | 222 | printk(KERN_ERR "huh, entered softirq %td %s %p" | 
| Thomas Gleixner | 8e85b4b | 2008-10-02 10:50:53 +0200 | [diff] [blame] | 223 | "with preempt_count %08x," | 
|  | 224 | " exited with %08x?\n", h - softirq_vec, | 
| Jason Baron | 5d592b4 | 2009-03-12 14:33:36 -0400 | [diff] [blame] | 225 | softirq_to_name[h - softirq_vec], | 
| Thomas Gleixner | 8e85b4b | 2008-10-02 10:50:53 +0200 | [diff] [blame] | 226 | h->action, prev_count, preempt_count()); | 
|  | 227 | preempt_count() = prev_count; | 
|  | 228 | } | 
|  | 229 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | rcu_bh_qsctr_inc(cpu); | 
|  | 231 | } | 
|  | 232 | h++; | 
|  | 233 | pending >>= 1; | 
|  | 234 | } while (pending); | 
|  | 235 |  | 
| Andrew Morton | c70f5d6 | 2005-07-30 10:22:49 -0700 | [diff] [blame] | 236 | local_irq_disable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 |  | 
|  | 238 | pending = local_softirq_pending(); | 
|  | 239 | if (pending && --max_restart) | 
|  | 240 | goto restart; | 
|  | 241 |  | 
|  | 242 | if (pending) | 
|  | 243 | wakeup_softirqd(); | 
|  | 244 |  | 
| Ingo Molnar | d820ac4 | 2009-03-13 01:30:40 +0100 | [diff] [blame] | 245 | lockdep_softirq_exit(); | 
| Paul Mackerras | 829035f | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 246 |  | 
|  | 247 | account_system_vtime(current); | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 248 | _local_bh_enable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | } | 
|  | 250 |  | 
|  | 251 | #ifndef __ARCH_HAS_DO_SOFTIRQ | 
|  | 252 |  | 
|  | 253 | asmlinkage void do_softirq(void) | 
|  | 254 | { | 
|  | 255 | __u32 pending; | 
|  | 256 | unsigned long flags; | 
|  | 257 |  | 
|  | 258 | if (in_interrupt()) | 
|  | 259 | return; | 
|  | 260 |  | 
|  | 261 | local_irq_save(flags); | 
|  | 262 |  | 
|  | 263 | pending = local_softirq_pending(); | 
|  | 264 |  | 
|  | 265 | if (pending) | 
|  | 266 | __do_softirq(); | 
|  | 267 |  | 
|  | 268 | local_irq_restore(flags); | 
|  | 269 | } | 
|  | 270 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | #endif | 
|  | 272 |  | 
| Ingo Molnar | dde4b2b | 2007-02-16 01:27:45 -0800 | [diff] [blame] | 273 | /* | 
|  | 274 | * Enter an interrupt context. | 
|  | 275 | */ | 
|  | 276 | void irq_enter(void) | 
|  | 277 | { | 
| Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 278 | int cpu = smp_processor_id(); | 
| Thomas Gleixner | 719254f | 2008-10-17 09:59:47 +0200 | [diff] [blame] | 279 |  | 
| Paul E. McKenney | 64db4cf | 2008-12-18 21:55:32 +0100 | [diff] [blame] | 280 | rcu_irq_enter(); | 
| Thomas Gleixner | ee5f80a | 2008-11-07 11:06:00 +0100 | [diff] [blame] | 281 | if (idle_cpu(cpu) && !in_interrupt()) { | 
|  | 282 | __irq_enter(); | 
| Thomas Gleixner | 719254f | 2008-10-17 09:59:47 +0200 | [diff] [blame] | 283 | tick_check_idle(cpu); | 
| Thomas Gleixner | ee5f80a | 2008-11-07 11:06:00 +0100 | [diff] [blame] | 284 | } else | 
|  | 285 | __irq_enter(); | 
| Ingo Molnar | dde4b2b | 2007-02-16 01:27:45 -0800 | [diff] [blame] | 286 | } | 
|  | 287 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED | 
|  | 289 | # define invoke_softirq()	__do_softirq() | 
|  | 290 | #else | 
|  | 291 | # define invoke_softirq()	do_softirq() | 
|  | 292 | #endif | 
|  | 293 |  | 
|  | 294 | /* | 
|  | 295 | * Exit an interrupt context. Process softirqs if needed and possible: | 
|  | 296 | */ | 
|  | 297 | void irq_exit(void) | 
|  | 298 | { | 
|  | 299 | account_system_vtime(current); | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 300 | trace_hardirq_exit(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | sub_preempt_count(IRQ_EXIT_OFFSET); | 
|  | 302 | if (!in_interrupt() && local_softirq_pending()) | 
|  | 303 | invoke_softirq(); | 
| Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 304 |  | 
|  | 305 | #ifdef CONFIG_NO_HZ | 
|  | 306 | /* Make sure that timer wheel updates are propagated */ | 
| Steven Rostedt | 2232c2d | 2008-02-29 18:46:50 +0100 | [diff] [blame] | 307 | rcu_irq_exit(); | 
| Paul E. McKenney | 64db4cf | 2008-12-18 21:55:32 +0100 | [diff] [blame] | 308 | if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) | 
|  | 309 | tick_nohz_stop_sched_tick(0); | 
| Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 310 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | preempt_enable_no_resched(); | 
|  | 312 | } | 
|  | 313 |  | 
|  | 314 | /* | 
|  | 315 | * This function must run with irqs disabled! | 
|  | 316 | */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 317 | inline void raise_softirq_irqoff(unsigned int nr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | { | 
|  | 319 | __raise_softirq_irqoff(nr); | 
|  | 320 |  | 
|  | 321 | /* | 
|  | 322 | * If we're in an interrupt or softirq, we're done | 
|  | 323 | * (this also catches softirq-disabled code). We will | 
|  | 324 | * actually run the softirq once we return from | 
|  | 325 | * the irq or softirq. | 
|  | 326 | * | 
|  | 327 | * Otherwise we wake up ksoftirqd to make sure we | 
|  | 328 | * schedule the softirq soon. | 
|  | 329 | */ | 
|  | 330 | if (!in_interrupt()) | 
|  | 331 | wakeup_softirqd(); | 
|  | 332 | } | 
|  | 333 |  | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 334 | void raise_softirq(unsigned int nr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | { | 
|  | 336 | unsigned long flags; | 
|  | 337 |  | 
|  | 338 | local_irq_save(flags); | 
|  | 339 | raise_softirq_irqoff(nr); | 
|  | 340 | local_irq_restore(flags); | 
|  | 341 | } | 
|  | 342 |  | 
| Carlos R. Mafra | 962cf36 | 2008-05-15 11:15:37 -0300 | [diff] [blame] | 343 | void open_softirq(int nr, void (*action)(struct softirq_action *)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | softirq_vec[nr].action = action; | 
|  | 346 | } | 
|  | 347 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | /* Tasklets */ | 
|  | 349 | struct tasklet_head | 
|  | 350 | { | 
| Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 351 | struct tasklet_struct *head; | 
|  | 352 | struct tasklet_struct **tail; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | }; | 
|  | 354 |  | 
| Vegard Nossum | 4620b49 | 2008-06-12 23:21:53 +0200 | [diff] [blame] | 355 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); | 
|  | 356 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 |  | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 358 | void __tasklet_schedule(struct tasklet_struct *t) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | { | 
|  | 360 | unsigned long flags; | 
|  | 361 |  | 
|  | 362 | local_irq_save(flags); | 
| Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 363 | t->next = NULL; | 
|  | 364 | *__get_cpu_var(tasklet_vec).tail = t; | 
|  | 365 | __get_cpu_var(tasklet_vec).tail = &(t->next); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | raise_softirq_irqoff(TASKLET_SOFTIRQ); | 
|  | 367 | local_irq_restore(flags); | 
|  | 368 | } | 
|  | 369 |  | 
|  | 370 | EXPORT_SYMBOL(__tasklet_schedule); | 
|  | 371 |  | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 372 | void __tasklet_hi_schedule(struct tasklet_struct *t) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | { | 
|  | 374 | unsigned long flags; | 
|  | 375 |  | 
|  | 376 | local_irq_save(flags); | 
| Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 377 | t->next = NULL; | 
|  | 378 | *__get_cpu_var(tasklet_hi_vec).tail = t; | 
|  | 379 | __get_cpu_var(tasklet_hi_vec).tail = &(t->next); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | raise_softirq_irqoff(HI_SOFTIRQ); | 
|  | 381 | local_irq_restore(flags); | 
|  | 382 | } | 
|  | 383 |  | 
|  | 384 | EXPORT_SYMBOL(__tasklet_hi_schedule); | 
|  | 385 |  | 
|  | 386 | static void tasklet_action(struct softirq_action *a) | 
|  | 387 | { | 
|  | 388 | struct tasklet_struct *list; | 
|  | 389 |  | 
|  | 390 | local_irq_disable(); | 
| Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 391 | list = __get_cpu_var(tasklet_vec).head; | 
|  | 392 | __get_cpu_var(tasklet_vec).head = NULL; | 
|  | 393 | __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | local_irq_enable(); | 
|  | 395 |  | 
|  | 396 | while (list) { | 
|  | 397 | struct tasklet_struct *t = list; | 
|  | 398 |  | 
|  | 399 | list = list->next; | 
|  | 400 |  | 
|  | 401 | if (tasklet_trylock(t)) { | 
|  | 402 | if (!atomic_read(&t->count)) { | 
|  | 403 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) | 
|  | 404 | BUG(); | 
|  | 405 | t->func(t->data); | 
|  | 406 | tasklet_unlock(t); | 
|  | 407 | continue; | 
|  | 408 | } | 
|  | 409 | tasklet_unlock(t); | 
|  | 410 | } | 
|  | 411 |  | 
|  | 412 | local_irq_disable(); | 
| Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 413 | t->next = NULL; | 
|  | 414 | *__get_cpu_var(tasklet_vec).tail = t; | 
|  | 415 | __get_cpu_var(tasklet_vec).tail = &(t->next); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 416 | __raise_softirq_irqoff(TASKLET_SOFTIRQ); | 
|  | 417 | local_irq_enable(); | 
|  | 418 | } | 
|  | 419 | } | 
|  | 420 |  | 
|  | 421 | static void tasklet_hi_action(struct softirq_action *a) | 
|  | 422 | { | 
|  | 423 | struct tasklet_struct *list; | 
|  | 424 |  | 
|  | 425 | local_irq_disable(); | 
| Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 426 | list = __get_cpu_var(tasklet_hi_vec).head; | 
|  | 427 | __get_cpu_var(tasklet_hi_vec).head = NULL; | 
|  | 428 | __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | local_irq_enable(); | 
|  | 430 |  | 
|  | 431 | while (list) { | 
|  | 432 | struct tasklet_struct *t = list; | 
|  | 433 |  | 
|  | 434 | list = list->next; | 
|  | 435 |  | 
|  | 436 | if (tasklet_trylock(t)) { | 
|  | 437 | if (!atomic_read(&t->count)) { | 
|  | 438 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) | 
|  | 439 | BUG(); | 
|  | 440 | t->func(t->data); | 
|  | 441 | tasklet_unlock(t); | 
|  | 442 | continue; | 
|  | 443 | } | 
|  | 444 | tasklet_unlock(t); | 
|  | 445 | } | 
|  | 446 |  | 
|  | 447 | local_irq_disable(); | 
| Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 448 | t->next = NULL; | 
|  | 449 | *__get_cpu_var(tasklet_hi_vec).tail = t; | 
|  | 450 | __get_cpu_var(tasklet_hi_vec).tail = &(t->next); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 451 | __raise_softirq_irqoff(HI_SOFTIRQ); | 
|  | 452 | local_irq_enable(); | 
|  | 453 | } | 
|  | 454 | } | 
|  | 455 |  | 
|  | 456 |  | 
|  | 457 | void tasklet_init(struct tasklet_struct *t, | 
|  | 458 | void (*func)(unsigned long), unsigned long data) | 
|  | 459 | { | 
|  | 460 | t->next = NULL; | 
|  | 461 | t->state = 0; | 
|  | 462 | atomic_set(&t->count, 0); | 
|  | 463 | t->func = func; | 
|  | 464 | t->data = data; | 
|  | 465 | } | 
|  | 466 |  | 
|  | 467 | EXPORT_SYMBOL(tasklet_init); | 
|  | 468 |  | 
|  | 469 | void tasklet_kill(struct tasklet_struct *t) | 
|  | 470 | { | 
|  | 471 | if (in_interrupt()) | 
|  | 472 | printk("Attempt to kill tasklet from interrupt\n"); | 
|  | 473 |  | 
|  | 474 | while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { | 
|  | 475 | do | 
|  | 476 | yield(); | 
|  | 477 | while (test_bit(TASKLET_STATE_SCHED, &t->state)); | 
|  | 478 | } | 
|  | 479 | tasklet_unlock_wait(t); | 
|  | 480 | clear_bit(TASKLET_STATE_SCHED, &t->state); | 
|  | 481 | } | 
|  | 482 |  | 
|  | 483 | EXPORT_SYMBOL(tasklet_kill); | 
|  | 484 |  | 
| David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 485 | DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); | 
|  | 486 | EXPORT_PER_CPU_SYMBOL(softirq_work_list); | 
|  | 487 |  | 
|  | 488 | static void __local_trigger(struct call_single_data *cp, int softirq) | 
|  | 489 | { | 
|  | 490 | struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]); | 
|  | 491 |  | 
|  | 492 | list_add_tail(&cp->list, head); | 
|  | 493 |  | 
|  | 494 | /* Trigger the softirq only if the list was previously empty.  */ | 
|  | 495 | if (head->next == &cp->list) | 
|  | 496 | raise_softirq_irqoff(softirq); | 
|  | 497 | } | 
|  | 498 |  | 
|  | 499 | #ifdef CONFIG_USE_GENERIC_SMP_HELPERS | 
|  | 500 | static void remote_softirq_receive(void *data) | 
|  | 501 | { | 
|  | 502 | struct call_single_data *cp = data; | 
|  | 503 | unsigned long flags; | 
|  | 504 | int softirq; | 
|  | 505 |  | 
|  | 506 | softirq = cp->priv; | 
|  | 507 |  | 
|  | 508 | local_irq_save(flags); | 
|  | 509 | __local_trigger(cp, softirq); | 
|  | 510 | local_irq_restore(flags); | 
|  | 511 | } | 
|  | 512 |  | 
|  | 513 | static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | 
|  | 514 | { | 
|  | 515 | if (cpu_online(cpu)) { | 
|  | 516 | cp->func = remote_softirq_receive; | 
|  | 517 | cp->info = cp; | 
|  | 518 | cp->flags = 0; | 
|  | 519 | cp->priv = softirq; | 
|  | 520 |  | 
| Peter Zijlstra | 6e27563 | 2009-02-25 13:59:48 +0100 | [diff] [blame] | 521 | __smp_call_function_single(cpu, cp, 0); | 
| David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 522 | return 0; | 
|  | 523 | } | 
|  | 524 | return 1; | 
|  | 525 | } | 
|  | 526 | #else /* CONFIG_USE_GENERIC_SMP_HELPERS */ | 
|  | 527 | static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | 
|  | 528 | { | 
|  | 529 | return 1; | 
|  | 530 | } | 
|  | 531 | #endif | 
|  | 532 |  | 
|  | 533 | /** | 
|  | 534 | * __send_remote_softirq - try to schedule softirq work on a remote cpu | 
|  | 535 | * @cp: private SMP call function data area | 
|  | 536 | * @cpu: the remote cpu | 
|  | 537 | * @this_cpu: the currently executing cpu | 
|  | 538 | * @softirq: the softirq for the work | 
|  | 539 | * | 
|  | 540 | * Attempt to schedule softirq work on a remote cpu.  If this cannot be | 
|  | 541 | * done, the work is instead queued up on the local cpu. | 
|  | 542 | * | 
|  | 543 | * Interrupts must be disabled. | 
|  | 544 | */ | 
|  | 545 | void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq) | 
|  | 546 | { | 
|  | 547 | if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq)) | 
|  | 548 | __local_trigger(cp, softirq); | 
|  | 549 | } | 
|  | 550 | EXPORT_SYMBOL(__send_remote_softirq); | 
|  | 551 |  | 
|  | 552 | /** | 
|  | 553 | * send_remote_softirq - try to schedule softirq work on a remote cpu | 
|  | 554 | * @cp: private SMP call function data area | 
|  | 555 | * @cpu: the remote cpu | 
|  | 556 | * @softirq: the softirq for the work | 
|  | 557 | * | 
|  | 558 | * Like __send_remote_softirq except that disabling interrupts and | 
|  | 559 | * computing the current cpu is done for the caller. | 
|  | 560 | */ | 
|  | 561 | void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | 
|  | 562 | { | 
|  | 563 | unsigned long flags; | 
|  | 564 | int this_cpu; | 
|  | 565 |  | 
|  | 566 | local_irq_save(flags); | 
|  | 567 | this_cpu = smp_processor_id(); | 
|  | 568 | __send_remote_softirq(cp, cpu, this_cpu, softirq); | 
|  | 569 | local_irq_restore(flags); | 
|  | 570 | } | 
|  | 571 | EXPORT_SYMBOL(send_remote_softirq); | 
|  | 572 |  | 
|  | 573 | static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self, | 
|  | 574 | unsigned long action, void *hcpu) | 
|  | 575 | { | 
|  | 576 | /* | 
|  | 577 | * If a CPU goes away, splice its entries to the current CPU | 
|  | 578 | * and trigger a run of the softirq | 
|  | 579 | */ | 
|  | 580 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | 
|  | 581 | int cpu = (unsigned long) hcpu; | 
|  | 582 | int i; | 
|  | 583 |  | 
|  | 584 | local_irq_disable(); | 
|  | 585 | for (i = 0; i < NR_SOFTIRQS; i++) { | 
|  | 586 | struct list_head *head = &per_cpu(softirq_work_list[i], cpu); | 
|  | 587 | struct list_head *local_head; | 
|  | 588 |  | 
|  | 589 | if (list_empty(head)) | 
|  | 590 | continue; | 
|  | 591 |  | 
|  | 592 | local_head = &__get_cpu_var(softirq_work_list[i]); | 
|  | 593 | list_splice_init(head, local_head); | 
|  | 594 | raise_softirq_irqoff(i); | 
|  | 595 | } | 
|  | 596 | local_irq_enable(); | 
|  | 597 | } | 
|  | 598 |  | 
|  | 599 | return NOTIFY_OK; | 
|  | 600 | } | 
|  | 601 |  | 
|  | 602 | static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = { | 
|  | 603 | .notifier_call	= remote_softirq_cpu_notify, | 
|  | 604 | }; | 
|  | 605 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | void __init softirq_init(void) | 
|  | 607 | { | 
| Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 608 | int cpu; | 
|  | 609 |  | 
|  | 610 | for_each_possible_cpu(cpu) { | 
| David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 611 | int i; | 
|  | 612 |  | 
| Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 613 | per_cpu(tasklet_vec, cpu).tail = | 
|  | 614 | &per_cpu(tasklet_vec, cpu).head; | 
|  | 615 | per_cpu(tasklet_hi_vec, cpu).tail = | 
|  | 616 | &per_cpu(tasklet_hi_vec, cpu).head; | 
| David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 617 | for (i = 0; i < NR_SOFTIRQS; i++) | 
|  | 618 | INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu)); | 
| Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 619 | } | 
|  | 620 |  | 
| David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 621 | register_hotcpu_notifier(&remote_softirq_cpu_notifier); | 
|  | 622 |  | 
| Carlos R. Mafra | 962cf36 | 2008-05-15 11:15:37 -0300 | [diff] [blame] | 623 | open_softirq(TASKLET_SOFTIRQ, tasklet_action); | 
|  | 624 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 625 | } | 
|  | 626 |  | 
|  | 627 | static int ksoftirqd(void * __bind_cpu) | 
|  | 628 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 629 | set_current_state(TASK_INTERRUPTIBLE); | 
|  | 630 |  | 
|  | 631 | while (!kthread_should_stop()) { | 
|  | 632 | preempt_disable(); | 
|  | 633 | if (!local_softirq_pending()) { | 
|  | 634 | preempt_enable_no_resched(); | 
|  | 635 | schedule(); | 
|  | 636 | preempt_disable(); | 
|  | 637 | } | 
|  | 638 |  | 
|  | 639 | __set_current_state(TASK_RUNNING); | 
|  | 640 |  | 
|  | 641 | while (local_softirq_pending()) { | 
|  | 642 | /* Preempt disable stops cpu going offline. | 
|  | 643 | If already offline, we'll be on wrong CPU: | 
|  | 644 | don't process */ | 
|  | 645 | if (cpu_is_offline((long)__bind_cpu)) | 
|  | 646 | goto wait_to_die; | 
|  | 647 | do_softirq(); | 
|  | 648 | preempt_enable_no_resched(); | 
|  | 649 | cond_resched(); | 
|  | 650 | preempt_disable(); | 
| Eric Dumazet | 64ca5ab | 2009-03-04 12:11:56 -0800 | [diff] [blame] | 651 | rcu_qsctr_inc((long)__bind_cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 | } | 
|  | 653 | preempt_enable(); | 
|  | 654 | set_current_state(TASK_INTERRUPTIBLE); | 
|  | 655 | } | 
|  | 656 | __set_current_state(TASK_RUNNING); | 
|  | 657 | return 0; | 
|  | 658 |  | 
|  | 659 | wait_to_die: | 
|  | 660 | preempt_enable(); | 
|  | 661 | /* Wait for kthread_stop */ | 
|  | 662 | set_current_state(TASK_INTERRUPTIBLE); | 
|  | 663 | while (!kthread_should_stop()) { | 
|  | 664 | schedule(); | 
|  | 665 | set_current_state(TASK_INTERRUPTIBLE); | 
|  | 666 | } | 
|  | 667 | __set_current_state(TASK_RUNNING); | 
|  | 668 | return 0; | 
|  | 669 | } | 
|  | 670 |  | 
|  | 671 | #ifdef CONFIG_HOTPLUG_CPU | 
|  | 672 | /* | 
|  | 673 | * tasklet_kill_immediate is called to remove a tasklet which can already be | 
|  | 674 | * scheduled for execution on @cpu. | 
|  | 675 | * | 
|  | 676 | * Unlike tasklet_kill, this function removes the tasklet | 
|  | 677 | * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state. | 
|  | 678 | * | 
|  | 679 | * When this function is called, @cpu must be in the CPU_DEAD state. | 
|  | 680 | */ | 
|  | 681 | void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) | 
|  | 682 | { | 
|  | 683 | struct tasklet_struct **i; | 
|  | 684 |  | 
|  | 685 | BUG_ON(cpu_online(cpu)); | 
|  | 686 | BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state)); | 
|  | 687 |  | 
|  | 688 | if (!test_bit(TASKLET_STATE_SCHED, &t->state)) | 
|  | 689 | return; | 
|  | 690 |  | 
|  | 691 | /* CPU is dead, so no lock needed. */ | 
| Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 692 | for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 693 | if (*i == t) { | 
|  | 694 | *i = t->next; | 
| Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 695 | /* If this was the tail element, move the tail ptr */ | 
|  | 696 | if (*i == NULL) | 
|  | 697 | per_cpu(tasklet_vec, cpu).tail = i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | return; | 
|  | 699 | } | 
|  | 700 | } | 
|  | 701 | BUG(); | 
|  | 702 | } | 
|  | 703 |  | 
|  | 704 | static void takeover_tasklets(unsigned int cpu) | 
|  | 705 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 706 | /* CPU is dead, so no lock needed. */ | 
|  | 707 | local_irq_disable(); | 
|  | 708 |  | 
|  | 709 | /* Find end, append list for that CPU. */ | 
| Christian Borntraeger | e5e4172 | 2008-05-01 04:34:23 -0700 | [diff] [blame] | 710 | if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { | 
|  | 711 | *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head; | 
|  | 712 | __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail; | 
|  | 713 | per_cpu(tasklet_vec, cpu).head = NULL; | 
|  | 714 | per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; | 
|  | 715 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 716 | raise_softirq_irqoff(TASKLET_SOFTIRQ); | 
|  | 717 |  | 
| Christian Borntraeger | e5e4172 | 2008-05-01 04:34:23 -0700 | [diff] [blame] | 718 | if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { | 
|  | 719 | *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head; | 
|  | 720 | __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail; | 
|  | 721 | per_cpu(tasklet_hi_vec, cpu).head = NULL; | 
|  | 722 | per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; | 
|  | 723 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 724 | raise_softirq_irqoff(HI_SOFTIRQ); | 
|  | 725 |  | 
|  | 726 | local_irq_enable(); | 
|  | 727 | } | 
|  | 728 | #endif /* CONFIG_HOTPLUG_CPU */ | 
|  | 729 |  | 
| Chandra Seetharaman | 8c78f30 | 2006-07-30 03:03:35 -0700 | [diff] [blame] | 730 | static int __cpuinit cpu_callback(struct notifier_block *nfb, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 731 | unsigned long action, | 
|  | 732 | void *hcpu) | 
|  | 733 | { | 
|  | 734 | int hotcpu = (unsigned long)hcpu; | 
|  | 735 | struct task_struct *p; | 
|  | 736 |  | 
|  | 737 | switch (action) { | 
|  | 738 | case CPU_UP_PREPARE: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 739 | case CPU_UP_PREPARE_FROZEN: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 740 | p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu); | 
|  | 741 | if (IS_ERR(p)) { | 
|  | 742 | printk("ksoftirqd for %i failed\n", hotcpu); | 
|  | 743 | return NOTIFY_BAD; | 
|  | 744 | } | 
|  | 745 | kthread_bind(p, hotcpu); | 
|  | 746 | per_cpu(ksoftirqd, hotcpu) = p; | 
|  | 747 | break; | 
|  | 748 | case CPU_ONLINE: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 749 | case CPU_ONLINE_FROZEN: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 750 | wake_up_process(per_cpu(ksoftirqd, hotcpu)); | 
|  | 751 | break; | 
|  | 752 | #ifdef CONFIG_HOTPLUG_CPU | 
|  | 753 | case CPU_UP_CANCELED: | 
| Rafael J. Wysocki | 8bb7844 | 2007-05-09 02:35:10 -0700 | [diff] [blame] | 754 | case CPU_UP_CANCELED_FROZEN: | 
| Heiko Carstens | fc75cdf | 2006-06-25 05:49:10 -0700 | [diff] [blame] | 755 | if (!per_cpu(ksoftirqd, hotcpu)) | 
|  | 756 | break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 757 | /* Unbind so it can run.  Fall thru. */ | 
| Heiko Carstens | a4c4af7 | 2005-11-07 00:58:38 -0800 | [diff] [blame] | 758 | kthread_bind(per_cpu(ksoftirqd, hotcpu), | 
| Rusty Russell | f1fc057 | 2009-01-01 10:12:23 +1030 | [diff] [blame] | 759 | cpumask_any(cpu_online_mask)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 760 | case CPU_DEAD: | 
| Satoru Takeuchi | 1c6b4aa | 2007-07-15 23:39:48 -0700 | [diff] [blame] | 761 | case CPU_DEAD_FROZEN: { | 
|  | 762 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | 
|  | 763 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 764 | p = per_cpu(ksoftirqd, hotcpu); | 
|  | 765 | per_cpu(ksoftirqd, hotcpu) = NULL; | 
| Rusty Russell | 961ccdd | 2008-06-23 13:55:38 +1000 | [diff] [blame] | 766 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 767 | kthread_stop(p); | 
|  | 768 | takeover_tasklets(hotcpu); | 
|  | 769 | break; | 
| Satoru Takeuchi | 1c6b4aa | 2007-07-15 23:39:48 -0700 | [diff] [blame] | 770 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 771 | #endif /* CONFIG_HOTPLUG_CPU */ | 
|  | 772 | } | 
|  | 773 | return NOTIFY_OK; | 
|  | 774 | } | 
|  | 775 |  | 
| Chandra Seetharaman | 8c78f30 | 2006-07-30 03:03:35 -0700 | [diff] [blame] | 776 | static struct notifier_block __cpuinitdata cpu_nfb = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 777 | .notifier_call = cpu_callback | 
|  | 778 | }; | 
|  | 779 |  | 
| Eduard - Gabriel Munteanu | 7babe8d | 2008-07-25 19:45:11 -0700 | [diff] [blame] | 780 | static __init int spawn_ksoftirqd(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 781 | { | 
|  | 782 | void *cpu = (void *)(long)smp_processor_id(); | 
| Akinobu Mita | 07dccf3 | 2006-09-29 02:00:22 -0700 | [diff] [blame] | 783 | int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); | 
|  | 784 |  | 
|  | 785 | BUG_ON(err == NOTIFY_BAD); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 786 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); | 
|  | 787 | register_cpu_notifier(&cpu_nfb); | 
|  | 788 | return 0; | 
|  | 789 | } | 
| Eduard - Gabriel Munteanu | 7babe8d | 2008-07-25 19:45:11 -0700 | [diff] [blame] | 790 | early_initcall(spawn_ksoftirqd); | 
| Andrew Morton | 78eef01 | 2006-03-22 00:08:16 -0800 | [diff] [blame] | 791 |  | 
|  | 792 | #ifdef CONFIG_SMP | 
|  | 793 | /* | 
|  | 794 | * Call a function on all processors | 
|  | 795 | */ | 
| Jens Axboe | 15c8b6c | 2008-05-09 09:39:44 +0200 | [diff] [blame] | 796 | int on_each_cpu(void (*func) (void *info), void *info, int wait) | 
| Andrew Morton | 78eef01 | 2006-03-22 00:08:16 -0800 | [diff] [blame] | 797 | { | 
|  | 798 | int ret = 0; | 
|  | 799 |  | 
|  | 800 | preempt_disable(); | 
| Jens Axboe | 8691e5a | 2008-06-06 11:18:06 +0200 | [diff] [blame] | 801 | ret = smp_call_function(func, info, wait); | 
| Andrew Morton | 78eef01 | 2006-03-22 00:08:16 -0800 | [diff] [blame] | 802 | local_irq_disable(); | 
|  | 803 | func(info); | 
|  | 804 | local_irq_enable(); | 
|  | 805 | preempt_enable(); | 
|  | 806 | return ret; | 
|  | 807 | } | 
|  | 808 | EXPORT_SYMBOL(on_each_cpu); | 
|  | 809 | #endif | 
| Yinghai Lu | 43a2563 | 2008-12-28 16:01:13 -0800 | [diff] [blame] | 810 |  | 
|  | 811 | /* | 
|  | 812 | * [ These __weak aliases are kept in a separate compilation unit, so that | 
|  | 813 | *   GCC does not inline them incorrectly. ] | 
|  | 814 | */ | 
|  | 815 |  | 
|  | 816 | int __init __weak early_irq_init(void) | 
|  | 817 | { | 
|  | 818 | return 0; | 
|  | 819 | } | 
|  | 820 |  | 
| Yinghai Lu | 4a046d1 | 2009-01-12 17:39:24 -0800 | [diff] [blame] | 821 | int __init __weak arch_probe_nr_irqs(void) | 
|  | 822 | { | 
|  | 823 | return 0; | 
|  | 824 | } | 
|  | 825 |  | 
| Yinghai Lu | 43a2563 | 2008-12-28 16:01:13 -0800 | [diff] [blame] | 826 | int __init __weak arch_early_irq_init(void) | 
|  | 827 | { | 
|  | 828 | return 0; | 
|  | 829 | } | 
|  | 830 |  | 
|  | 831 | int __weak arch_init_chip_data(struct irq_desc *desc, int cpu) | 
|  | 832 | { | 
|  | 833 | return 0; | 
|  | 834 | } |