| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *	linux/kernel/softirq.c | 
 | 3 |  * | 
 | 4 |  *	Copyright (C) 1992 Linus Torvalds | 
 | 5 |  * | 
| Pavel Machek | b10db7f | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 6 |  *	Distribute under GPLv2. | 
 | 7 |  * | 
 | 8 |  *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) | 
| David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 9 |  * | 
 | 10 |  *	Remote softirq infrastructure is by Jens Axboe. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 |  */ | 
 | 12 |  | 
| Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 13 | #include <linux/export.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/kernel_stat.h> | 
 | 15 | #include <linux/interrupt.h> | 
 | 16 | #include <linux/init.h> | 
 | 17 | #include <linux/mm.h> | 
 | 18 | #include <linux/notifier.h> | 
 | 19 | #include <linux/percpu.h> | 
 | 20 | #include <linux/cpu.h> | 
| Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 21 | #include <linux/freezer.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/kthread.h> | 
 | 23 | #include <linux/rcupdate.h> | 
| Steven Rostedt | 7e49fcc | 2009-01-22 19:01:40 -0500 | [diff] [blame] | 24 | #include <linux/ftrace.h> | 
| Andrew Morton | 78eef01 | 2006-03-22 00:08:16 -0800 | [diff] [blame] | 25 | #include <linux/smp.h> | 
| Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 26 | #include <linux/smpboot.h> | 
| Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 27 | #include <linux/tick.h> | 
| Heiko Carstens | a0e39ed | 2009-04-29 13:51:39 +0200 | [diff] [blame] | 28 |  | 
 | 29 | #define CREATE_TRACE_POINTS | 
| Steven Rostedt | ad8d75f | 2009-04-14 19:39:12 -0400 | [diff] [blame] | 30 | #include <trace/events/irq.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 |  | 
 | 32 | #include <asm/irq.h> | 
 | 33 | /* | 
 | 34 |    - No shared variables, all the data are CPU local. | 
 | 35 |    - If a softirq needs serialization, let it serialize itself | 
 | 36 |      by its own spinlocks. | 
 | 37 |    - Even if softirq is serialized, only local cpu is marked for | 
 | 38 |      execution. Hence, we get something sort of weak cpu binding. | 
 | 39 |      Though it is still not clear, will it result in better locality | 
 | 40 |      or will not. | 
 | 41 |  | 
 | 42 |    Examples: | 
 | 43 |    - NET RX softirq. It is multithreaded and does not require | 
 | 44 |      any global serialization. | 
 | 45 |    - NET TX softirq. It kicks software netdevice queues, hence | 
 | 46 |      it is logically serialized per device, but this serialization | 
 | 47 |      is invisible to common code. | 
 | 48 |    - Tasklets: serialized wrt itself. | 
 | 49 |  */ | 
 | 50 |  | 
 | 51 | #ifndef __ARCH_IRQ_STAT | 
 | 52 | irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; | 
 | 53 | EXPORT_SYMBOL(irq_stat); | 
 | 54 | #endif | 
 | 55 |  | 
| Alexey Dobriyan | 978b011 | 2008-09-06 20:04:36 +0200 | [diff] [blame] | 56 | static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 |  | 
| Venkatesh Pallipadi | 4dd53d8 | 2010-12-21 17:09:00 -0800 | [diff] [blame] | 58 | DEFINE_PER_CPU(struct task_struct *, ksoftirqd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 |  | 
| Jason Baron | 5d592b4 | 2009-03-12 14:33:36 -0400 | [diff] [blame] | 60 | char *softirq_to_name[NR_SOFTIRQS] = { | 
| Li Zefan | 5dd4de5 | 2009-09-17 17:38:32 +0800 | [diff] [blame] | 61 | 	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", | 
| Shaohua Li | 0922337 | 2011-06-14 13:26:25 +0800 | [diff] [blame] | 62 | 	"TASKLET", "SCHED", "HRTIMER", "RCU" | 
| Jason Baron | 5d592b4 | 2009-03-12 14:33:36 -0400 | [diff] [blame] | 63 | }; | 
 | 64 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | /* | 
 | 66 |  * we cannot loop indefinitely here to avoid userspace starvation, | 
 | 67 |  * but we also don't want to introduce a worst case 1/HZ latency | 
 | 68 |  * to the pending events, so lets the scheduler to balance | 
 | 69 |  * the softirq load for us. | 
 | 70 |  */ | 
| Thomas Gleixner | 676cb02 | 2009-07-20 23:33:49 +0200 | [diff] [blame] | 71 | static void wakeup_softirqd(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | { | 
 | 73 | 	/* Interrupts are disabled: no need to stop preemption */ | 
| Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 74 | 	struct task_struct *tsk = __this_cpu_read(ksoftirqd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 |  | 
 | 76 | 	if (tsk && tsk->state != TASK_RUNNING) | 
 | 77 | 		wake_up_process(tsk); | 
 | 78 | } | 
 | 79 |  | 
 | 80 | /* | 
| Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 81 |  * preempt_count and SOFTIRQ_OFFSET usage: | 
 | 82 |  * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving | 
 | 83 |  *   softirq processing. | 
 | 84 |  * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) | 
 | 85 |  *   on local_bh_disable or local_bh_enable. | 
 | 86 |  * This lets us distinguish between whether we are currently processing | 
 | 87 |  * softirq and whether we just have bh disabled. | 
 | 88 |  */ | 
 | 89 |  | 
 | 90 | /* | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 91 |  * This one is for softirq.c-internal use, | 
 | 92 |  * where hardirqs are disabled legitimately: | 
 | 93 |  */ | 
| Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 94 | #ifdef CONFIG_TRACE_IRQFLAGS | 
| Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 95 | static void __local_bh_disable(unsigned long ip, unsigned int cnt) | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 96 | { | 
 | 97 | 	unsigned long flags; | 
 | 98 |  | 
 | 99 | 	WARN_ON_ONCE(in_irq()); | 
 | 100 |  | 
 | 101 | 	raw_local_irq_save(flags); | 
| Steven Rostedt | 7e49fcc | 2009-01-22 19:01:40 -0500 | [diff] [blame] | 102 | 	/* | 
 | 103 | 	 * The preempt tracer hooks into add_preempt_count and will break | 
 | 104 | 	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET | 
 | 105 | 	 * is set and before current->softirq_enabled is cleared. | 
 | 106 | 	 * We must manually increment preempt_count here and manually | 
 | 107 | 	 * call the trace_preempt_off later. | 
 | 108 | 	 */ | 
| Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 109 | 	preempt_count() += cnt; | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 110 | 	/* | 
 | 111 | 	 * Were softirqs turned off above: | 
 | 112 | 	 */ | 
| Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 113 | 	if (softirq_count() == cnt) | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 114 | 		trace_softirqs_off(ip); | 
 | 115 | 	raw_local_irq_restore(flags); | 
| Steven Rostedt | 7e49fcc | 2009-01-22 19:01:40 -0500 | [diff] [blame] | 116 |  | 
| Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 117 | 	if (preempt_count() == cnt) | 
| Steven Rostedt | 7e49fcc | 2009-01-22 19:01:40 -0500 | [diff] [blame] | 118 | 		trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 119 | } | 
| Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 120 | #else /* !CONFIG_TRACE_IRQFLAGS */ | 
| Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 121 | static inline void __local_bh_disable(unsigned long ip, unsigned int cnt) | 
| Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 122 | { | 
| Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 123 | 	add_preempt_count(cnt); | 
| Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 124 | 	barrier(); | 
 | 125 | } | 
 | 126 | #endif /* CONFIG_TRACE_IRQFLAGS */ | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 127 |  | 
 | 128 | void local_bh_disable(void) | 
 | 129 | { | 
| Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 130 | 	__local_bh_disable((unsigned long)__builtin_return_address(0), | 
 | 131 | 				SOFTIRQ_DISABLE_OFFSET); | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 132 | } | 
 | 133 |  | 
 | 134 | EXPORT_SYMBOL(local_bh_disable); | 
 | 135 |  | 
| Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 136 | static void __local_bh_enable(unsigned int cnt) | 
 | 137 | { | 
 | 138 | 	WARN_ON_ONCE(in_irq()); | 
 | 139 | 	WARN_ON_ONCE(!irqs_disabled()); | 
 | 140 |  | 
 | 141 | 	if (softirq_count() == cnt) | 
 | 142 | 		trace_softirqs_on((unsigned long)__builtin_return_address(0)); | 
 | 143 | 	sub_preempt_count(cnt); | 
 | 144 | } | 
 | 145 |  | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 146 | /* | 
 | 147 |  * Special-case - softirqs can safely be enabled in | 
 | 148 |  * cond_resched_softirq(), or by __do_softirq(), | 
 | 149 |  * without processing still-pending softirqs: | 
 | 150 |  */ | 
 | 151 | void _local_bh_enable(void) | 
 | 152 | { | 
| Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 153 | 	__local_bh_enable(SOFTIRQ_DISABLE_OFFSET); | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 154 | } | 
 | 155 |  | 
 | 156 | EXPORT_SYMBOL(_local_bh_enable); | 
 | 157 |  | 
| Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 158 | static inline void _local_bh_enable_ip(unsigned long ip) | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 159 | { | 
| Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 160 | 	WARN_ON_ONCE(in_irq() || irqs_disabled()); | 
| Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 161 | #ifdef CONFIG_TRACE_IRQFLAGS | 
| Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 162 | 	local_irq_disable(); | 
| Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 163 | #endif | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 164 | 	/* | 
 | 165 | 	 * Are softirqs going to be turned on now: | 
 | 166 | 	 */ | 
| Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 167 | 	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 168 | 		trace_softirqs_on(ip); | 
 | 169 | 	/* | 
 | 170 | 	 * Keep preemption disabled until we are done with | 
 | 171 | 	 * softirq processing: | 
 | 172 |  	 */ | 
| Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 173 | 	sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1); | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 174 |  | 
 | 175 | 	if (unlikely(!in_interrupt() && local_softirq_pending())) | 
 | 176 | 		do_softirq(); | 
 | 177 |  | 
 | 178 | 	dec_preempt_count(); | 
| Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 179 | #ifdef CONFIG_TRACE_IRQFLAGS | 
| Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 180 | 	local_irq_enable(); | 
| Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 181 | #endif | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 182 | 	preempt_check_resched(); | 
 | 183 | } | 
| Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 184 |  | 
 | 185 | void local_bh_enable(void) | 
 | 186 | { | 
 | 187 | 	_local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | 
 | 188 | } | 
 | 189 | EXPORT_SYMBOL(local_bh_enable); | 
 | 190 |  | 
 | 191 | void local_bh_enable_ip(unsigned long ip) | 
 | 192 | { | 
 | 193 | 	_local_bh_enable_ip(ip); | 
 | 194 | } | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 195 | EXPORT_SYMBOL(local_bh_enable_ip); | 
 | 196 |  | 
 | 197 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 |  * We restart softirq processing MAX_SOFTIRQ_RESTART times, | 
 | 199 |  * and we fall back to softirqd after that. | 
 | 200 |  * | 
 | 201 |  * This number has been established via experimentation. | 
 | 202 |  * The two things to balance is latency against fairness - | 
 | 203 |  * we want to handle softirqs as soon as possible, but they | 
 | 204 |  * should not be able to lock up the box. | 
 | 205 |  */ | 
 | 206 | #define MAX_SOFTIRQ_RESTART 10 | 
 | 207 |  | 
 | 208 | asmlinkage void __do_softirq(void) | 
 | 209 | { | 
 | 210 | 	struct softirq_action *h; | 
 | 211 | 	__u32 pending; | 
 | 212 | 	int max_restart = MAX_SOFTIRQ_RESTART; | 
 | 213 | 	int cpu; | 
| Mel Gorman | 907aed4 | 2012-07-31 16:44:07 -0700 | [diff] [blame] | 214 | 	unsigned long old_flags = current->flags; | 
 | 215 |  | 
 | 216 | 	/* | 
 | 217 | 	 * Mask out PF_MEMALLOC s current task context is borrowed for the | 
 | 218 | 	 * softirq. A softirq handled such as network RX might set PF_MEMALLOC | 
 | 219 | 	 * again if the socket is related to swap | 
 | 220 | 	 */ | 
 | 221 | 	current->flags &= ~PF_MEMALLOC; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 |  | 
 | 223 | 	pending = local_softirq_pending(); | 
| Frederic Weisbecker | fa5058f | 2012-10-06 04:07:19 +0200 | [diff] [blame] | 224 | 	vtime_account_irq_enter(current); | 
| Paul Mackerras | 829035fd | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 225 |  | 
| Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 226 | 	__local_bh_disable((unsigned long)__builtin_return_address(0), | 
 | 227 | 				SOFTIRQ_OFFSET); | 
| Ingo Molnar | d820ac4 | 2009-03-13 01:30:40 +0100 | [diff] [blame] | 228 | 	lockdep_softirq_enter(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | 	cpu = smp_processor_id(); | 
 | 231 | restart: | 
 | 232 | 	/* Reset the pending bitmask before enabling irqs */ | 
| Andi Kleen | 3f74478 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 233 | 	set_softirq_pending(0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 |  | 
| Andrew Morton | c70f5d6 | 2005-07-30 10:22:49 -0700 | [diff] [blame] | 235 | 	local_irq_enable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 |  | 
 | 237 | 	h = softirq_vec; | 
 | 238 |  | 
 | 239 | 	do { | 
 | 240 | 		if (pending & 1) { | 
| Thomas Gleixner | f4bc6bb | 2010-10-19 15:00:13 +0200 | [diff] [blame] | 241 | 			unsigned int vec_nr = h - softirq_vec; | 
| Thomas Gleixner | 8e85b4b | 2008-10-02 10:50:53 +0200 | [diff] [blame] | 242 | 			int prev_count = preempt_count(); | 
 | 243 |  | 
| Thomas Gleixner | f4bc6bb | 2010-10-19 15:00:13 +0200 | [diff] [blame] | 244 | 			kstat_incr_softirqs_this_cpu(vec_nr); | 
 | 245 |  | 
 | 246 | 			trace_softirq_entry(vec_nr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | 			h->action(h); | 
| Thomas Gleixner | f4bc6bb | 2010-10-19 15:00:13 +0200 | [diff] [blame] | 248 | 			trace_softirq_exit(vec_nr); | 
| Thomas Gleixner | 8e85b4b | 2008-10-02 10:50:53 +0200 | [diff] [blame] | 249 | 			if (unlikely(prev_count != preempt_count())) { | 
| Thomas Gleixner | f4bc6bb | 2010-10-19 15:00:13 +0200 | [diff] [blame] | 250 | 				printk(KERN_ERR "huh, entered softirq %u %s %p" | 
| Thomas Gleixner | 8e85b4b | 2008-10-02 10:50:53 +0200 | [diff] [blame] | 251 | 				       "with preempt_count %08x," | 
| Thomas Gleixner | f4bc6bb | 2010-10-19 15:00:13 +0200 | [diff] [blame] | 252 | 				       " exited with %08x?\n", vec_nr, | 
 | 253 | 				       softirq_to_name[vec_nr], h->action, | 
 | 254 | 				       prev_count, preempt_count()); | 
| Thomas Gleixner | 8e85b4b | 2008-10-02 10:50:53 +0200 | [diff] [blame] | 255 | 				preempt_count() = prev_count; | 
 | 256 | 			} | 
 | 257 |  | 
| Paul E. McKenney | d6714c2 | 2009-08-22 13:56:46 -0700 | [diff] [blame] | 258 | 			rcu_bh_qs(cpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | 		} | 
 | 260 | 		h++; | 
 | 261 | 		pending >>= 1; | 
 | 262 | 	} while (pending); | 
 | 263 |  | 
| Andrew Morton | c70f5d6 | 2005-07-30 10:22:49 -0700 | [diff] [blame] | 264 | 	local_irq_disable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 |  | 
 | 266 | 	pending = local_softirq_pending(); | 
 | 267 | 	if (pending && --max_restart) | 
 | 268 | 		goto restart; | 
 | 269 |  | 
 | 270 | 	if (pending) | 
 | 271 | 		wakeup_softirqd(); | 
 | 272 |  | 
| Ingo Molnar | d820ac4 | 2009-03-13 01:30:40 +0100 | [diff] [blame] | 273 | 	lockdep_softirq_exit(); | 
| Paul Mackerras | 829035fd | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 274 |  | 
| Frederic Weisbecker | fa5058f | 2012-10-06 04:07:19 +0200 | [diff] [blame] | 275 | 	vtime_account_irq_exit(current); | 
| Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 276 | 	__local_bh_enable(SOFTIRQ_OFFSET); | 
| Mel Gorman | 907aed4 | 2012-07-31 16:44:07 -0700 | [diff] [blame] | 277 | 	tsk_restore_flags(current, old_flags, PF_MEMALLOC); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | } | 
 | 279 |  | 
 | 280 | #ifndef __ARCH_HAS_DO_SOFTIRQ | 
 | 281 |  | 
 | 282 | asmlinkage void do_softirq(void) | 
 | 283 | { | 
 | 284 | 	__u32 pending; | 
 | 285 | 	unsigned long flags; | 
 | 286 |  | 
 | 287 | 	if (in_interrupt()) | 
 | 288 | 		return; | 
 | 289 |  | 
 | 290 | 	local_irq_save(flags); | 
 | 291 |  | 
 | 292 | 	pending = local_softirq_pending(); | 
 | 293 |  | 
 | 294 | 	if (pending) | 
 | 295 | 		__do_softirq(); | 
 | 296 |  | 
 | 297 | 	local_irq_restore(flags); | 
 | 298 | } | 
 | 299 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | #endif | 
 | 301 |  | 
| Ingo Molnar | dde4b2b | 2007-02-16 01:27:45 -0800 | [diff] [blame] | 302 | /* | 
 | 303 |  * Enter an interrupt context. | 
 | 304 |  */ | 
 | 305 | void irq_enter(void) | 
 | 306 | { | 
| Venki Pallipadi | 6378ddb | 2008-01-30 13:30:04 +0100 | [diff] [blame] | 307 | 	int cpu = smp_processor_id(); | 
| Thomas Gleixner | 719254f | 2008-10-17 09:59:47 +0200 | [diff] [blame] | 308 |  | 
| Paul E. McKenney | 64db4cf | 2008-12-18 21:55:32 +0100 | [diff] [blame] | 309 | 	rcu_irq_enter(); | 
| Frederic Weisbecker | 0a8a2e7 | 2012-01-24 18:59:44 +0100 | [diff] [blame] | 310 | 	if (is_idle_task(current) && !in_interrupt()) { | 
| Venkatesh Pallipadi | d267f87 | 2010-10-04 17:03:23 -0700 | [diff] [blame] | 311 | 		/* | 
 | 312 | 		 * Prevent raise_softirq from needlessly waking up ksoftirqd | 
 | 313 | 		 * here, as softirq will be serviced on return from interrupt. | 
 | 314 | 		 */ | 
 | 315 | 		local_bh_disable(); | 
| Thomas Gleixner | 719254f | 2008-10-17 09:59:47 +0200 | [diff] [blame] | 316 | 		tick_check_idle(cpu); | 
| Venkatesh Pallipadi | d267f87 | 2010-10-04 17:03:23 -0700 | [diff] [blame] | 317 | 		_local_bh_enable(); | 
 | 318 | 	} | 
 | 319 |  | 
 | 320 | 	__irq_enter(); | 
| Ingo Molnar | dde4b2b | 2007-02-16 01:27:45 -0800 | [diff] [blame] | 321 | } | 
 | 322 |  | 
| Heiko Carstens | b2a0017 | 2012-03-05 15:07:25 -0800 | [diff] [blame] | 323 | static inline void invoke_softirq(void) | 
 | 324 | { | 
 | 325 | 	if (!force_irqthreads) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED | 
| Thomas Gleixner | 8d32a30 | 2011-02-23 23:52:23 +0000 | [diff] [blame] | 327 | 		__do_softirq(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | #else | 
| Thomas Gleixner | 8d32a30 | 2011-02-23 23:52:23 +0000 | [diff] [blame] | 329 | 		do_softirq(); | 
| Heiko Carstens | b2a0017 | 2012-03-05 15:07:25 -0800 | [diff] [blame] | 330 | #endif | 
 | 331 | 	} else { | 
| Peter Zijlstra | ec433f0 | 2011-07-19 15:32:00 -0700 | [diff] [blame] | 332 | 		__local_bh_disable((unsigned long)__builtin_return_address(0), | 
 | 333 | 				SOFTIRQ_OFFSET); | 
| Thomas Gleixner | 8d32a30 | 2011-02-23 23:52:23 +0000 | [diff] [blame] | 334 | 		wakeup_softirqd(); | 
| Peter Zijlstra | ec433f0 | 2011-07-19 15:32:00 -0700 | [diff] [blame] | 335 | 		__local_bh_enable(SOFTIRQ_OFFSET); | 
 | 336 | 	} | 
| Thomas Gleixner | 8d32a30 | 2011-02-23 23:52:23 +0000 | [diff] [blame] | 337 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 |  | 
 | 339 | /* | 
 | 340 |  * Exit an interrupt context. Process softirqs if needed and possible: | 
 | 341 |  */ | 
 | 342 | void irq_exit(void) | 
 | 343 | { | 
| Frederic Weisbecker | fa5058f | 2012-10-06 04:07:19 +0200 | [diff] [blame] | 344 | 	vtime_account_irq_exit(current); | 
| Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 345 | 	trace_hardirq_exit(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | 	sub_preempt_count(IRQ_EXIT_OFFSET); | 
 | 347 | 	if (!in_interrupt() && local_softirq_pending()) | 
 | 348 | 		invoke_softirq(); | 
| Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 349 |  | 
 | 350 | #ifdef CONFIG_NO_HZ | 
 | 351 | 	/* Make sure that timer wheel updates are propagated */ | 
| Paul E. McKenney | 64db4cf | 2008-12-18 21:55:32 +0100 | [diff] [blame] | 352 | 	if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) | 
| Frederic Weisbecker | 280f067 | 2011-10-07 18:22:06 +0200 | [diff] [blame] | 353 | 		tick_nohz_irq_exit(); | 
| Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 354 | #endif | 
| Frederic Weisbecker | 416eb33 | 2011-10-07 16:31:02 -0700 | [diff] [blame] | 355 | 	rcu_irq_exit(); | 
| Thomas Gleixner | ba74c14 | 2011-03-21 13:32:17 +0100 | [diff] [blame] | 356 | 	sched_preempt_enable_no_resched(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | } | 
 | 358 |  | 
 | 359 | /* | 
 | 360 |  * This function must run with irqs disabled! | 
 | 361 |  */ | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 362 | inline void raise_softirq_irqoff(unsigned int nr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | { | 
 | 364 | 	__raise_softirq_irqoff(nr); | 
 | 365 |  | 
 | 366 | 	/* | 
 | 367 | 	 * If we're in an interrupt or softirq, we're done | 
 | 368 | 	 * (this also catches softirq-disabled code). We will | 
 | 369 | 	 * actually run the softirq once we return from | 
 | 370 | 	 * the irq or softirq. | 
 | 371 | 	 * | 
 | 372 | 	 * Otherwise we wake up ksoftirqd to make sure we | 
 | 373 | 	 * schedule the softirq soon. | 
 | 374 | 	 */ | 
 | 375 | 	if (!in_interrupt()) | 
 | 376 | 		wakeup_softirqd(); | 
 | 377 | } | 
 | 378 |  | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 379 | void raise_softirq(unsigned int nr) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | { | 
 | 381 | 	unsigned long flags; | 
 | 382 |  | 
 | 383 | 	local_irq_save(flags); | 
 | 384 | 	raise_softirq_irqoff(nr); | 
 | 385 | 	local_irq_restore(flags); | 
 | 386 | } | 
 | 387 |  | 
| Steven Rostedt | f069686 | 2012-01-25 20:18:55 -0500 | [diff] [blame] | 388 | void __raise_softirq_irqoff(unsigned int nr) | 
 | 389 | { | 
 | 390 | 	trace_softirq_raise(nr); | 
 | 391 | 	or_softirq_pending(1UL << nr); | 
 | 392 | } | 
 | 393 |  | 
| Carlos R. Mafra | 962cf36 | 2008-05-15 11:15:37 -0300 | [diff] [blame] | 394 | void open_softirq(int nr, void (*action)(struct softirq_action *)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | 	softirq_vec[nr].action = action; | 
 | 397 | } | 
 | 398 |  | 
| Peter Zijlstra | 9ba5f00 | 2009-07-22 14:18:35 +0200 | [diff] [blame] | 399 | /* | 
 | 400 |  * Tasklets | 
 | 401 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | struct tasklet_head | 
 | 403 | { | 
| Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 404 | 	struct tasklet_struct *head; | 
 | 405 | 	struct tasklet_struct **tail; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | }; | 
 | 407 |  | 
| Vegard Nossum | 4620b49 | 2008-06-12 23:21:53 +0200 | [diff] [blame] | 408 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); | 
 | 409 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 |  | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 411 | void __tasklet_schedule(struct tasklet_struct *t) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | { | 
 | 413 | 	unsigned long flags; | 
 | 414 |  | 
 | 415 | 	local_irq_save(flags); | 
| Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 416 | 	t->next = NULL; | 
| Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 417 | 	*__this_cpu_read(tasklet_vec.tail) = t; | 
 | 418 | 	__this_cpu_write(tasklet_vec.tail, &(t->next)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | 	raise_softirq_irqoff(TASKLET_SOFTIRQ); | 
 | 420 | 	local_irq_restore(flags); | 
 | 421 | } | 
 | 422 |  | 
 | 423 | EXPORT_SYMBOL(__tasklet_schedule); | 
 | 424 |  | 
| Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 425 | void __tasklet_hi_schedule(struct tasklet_struct *t) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | { | 
 | 427 | 	unsigned long flags; | 
 | 428 |  | 
 | 429 | 	local_irq_save(flags); | 
| Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 430 | 	t->next = NULL; | 
| Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 431 | 	*__this_cpu_read(tasklet_hi_vec.tail) = t; | 
 | 432 | 	__this_cpu_write(tasklet_hi_vec.tail,  &(t->next)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | 	raise_softirq_irqoff(HI_SOFTIRQ); | 
 | 434 | 	local_irq_restore(flags); | 
 | 435 | } | 
 | 436 |  | 
 | 437 | EXPORT_SYMBOL(__tasklet_hi_schedule); | 
 | 438 |  | 
| Vegard Nossum | 7c692cb | 2008-05-21 22:53:13 +0200 | [diff] [blame] | 439 | void __tasklet_hi_schedule_first(struct tasklet_struct *t) | 
 | 440 | { | 
 | 441 | 	BUG_ON(!irqs_disabled()); | 
 | 442 |  | 
| Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 443 | 	t->next = __this_cpu_read(tasklet_hi_vec.head); | 
 | 444 | 	__this_cpu_write(tasklet_hi_vec.head, t); | 
| Vegard Nossum | 7c692cb | 2008-05-21 22:53:13 +0200 | [diff] [blame] | 445 | 	__raise_softirq_irqoff(HI_SOFTIRQ); | 
 | 446 | } | 
 | 447 |  | 
 | 448 | EXPORT_SYMBOL(__tasklet_hi_schedule_first); | 
 | 449 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | static void tasklet_action(struct softirq_action *a) | 
 | 451 | { | 
 | 452 | 	struct tasklet_struct *list; | 
 | 453 |  | 
 | 454 | 	local_irq_disable(); | 
| Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 455 | 	list = __this_cpu_read(tasklet_vec.head); | 
 | 456 | 	__this_cpu_write(tasklet_vec.head, NULL); | 
 | 457 | 	__this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | 	local_irq_enable(); | 
 | 459 |  | 
 | 460 | 	while (list) { | 
 | 461 | 		struct tasklet_struct *t = list; | 
 | 462 |  | 
 | 463 | 		list = list->next; | 
 | 464 |  | 
 | 465 | 		if (tasklet_trylock(t)) { | 
 | 466 | 			if (!atomic_read(&t->count)) { | 
 | 467 | 				if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) | 
 | 468 | 					BUG(); | 
 | 469 | 				t->func(t->data); | 
 | 470 | 				tasklet_unlock(t); | 
 | 471 | 				continue; | 
 | 472 | 			} | 
 | 473 | 			tasklet_unlock(t); | 
 | 474 | 		} | 
 | 475 |  | 
 | 476 | 		local_irq_disable(); | 
| Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 477 | 		t->next = NULL; | 
| Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 478 | 		*__this_cpu_read(tasklet_vec.tail) = t; | 
 | 479 | 		__this_cpu_write(tasklet_vec.tail, &(t->next)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 480 | 		__raise_softirq_irqoff(TASKLET_SOFTIRQ); | 
 | 481 | 		local_irq_enable(); | 
 | 482 | 	} | 
 | 483 | } | 
 | 484 |  | 
 | 485 | static void tasklet_hi_action(struct softirq_action *a) | 
 | 486 | { | 
 | 487 | 	struct tasklet_struct *list; | 
 | 488 |  | 
 | 489 | 	local_irq_disable(); | 
| Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 490 | 	list = __this_cpu_read(tasklet_hi_vec.head); | 
 | 491 | 	__this_cpu_write(tasklet_hi_vec.head, NULL); | 
 | 492 | 	__this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 493 | 	local_irq_enable(); | 
 | 494 |  | 
 | 495 | 	while (list) { | 
 | 496 | 		struct tasklet_struct *t = list; | 
 | 497 |  | 
 | 498 | 		list = list->next; | 
 | 499 |  | 
 | 500 | 		if (tasklet_trylock(t)) { | 
 | 501 | 			if (!atomic_read(&t->count)) { | 
 | 502 | 				if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) | 
 | 503 | 					BUG(); | 
 | 504 | 				t->func(t->data); | 
 | 505 | 				tasklet_unlock(t); | 
 | 506 | 				continue; | 
 | 507 | 			} | 
 | 508 | 			tasklet_unlock(t); | 
 | 509 | 		} | 
 | 510 |  | 
 | 511 | 		local_irq_disable(); | 
| Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 512 | 		t->next = NULL; | 
| Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 513 | 		*__this_cpu_read(tasklet_hi_vec.tail) = t; | 
 | 514 | 		__this_cpu_write(tasklet_hi_vec.tail, &(t->next)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | 		__raise_softirq_irqoff(HI_SOFTIRQ); | 
 | 516 | 		local_irq_enable(); | 
 | 517 | 	} | 
 | 518 | } | 
 | 519 |  | 
 | 520 |  | 
 | 521 | void tasklet_init(struct tasklet_struct *t, | 
 | 522 | 		  void (*func)(unsigned long), unsigned long data) | 
 | 523 | { | 
 | 524 | 	t->next = NULL; | 
 | 525 | 	t->state = 0; | 
 | 526 | 	atomic_set(&t->count, 0); | 
 | 527 | 	t->func = func; | 
 | 528 | 	t->data = data; | 
 | 529 | } | 
 | 530 |  | 
 | 531 | EXPORT_SYMBOL(tasklet_init); | 
 | 532 |  | 
 | 533 | void tasklet_kill(struct tasklet_struct *t) | 
 | 534 | { | 
 | 535 | 	if (in_interrupt()) | 
 | 536 | 		printk("Attempt to kill tasklet from interrupt\n"); | 
 | 537 |  | 
 | 538 | 	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { | 
| H Hartley Sweeten | 79d381c | 2009-04-16 19:30:18 -0400 | [diff] [blame] | 539 | 		do { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 540 | 			yield(); | 
| H Hartley Sweeten | 79d381c | 2009-04-16 19:30:18 -0400 | [diff] [blame] | 541 | 		} while (test_bit(TASKLET_STATE_SCHED, &t->state)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | 	} | 
 | 543 | 	tasklet_unlock_wait(t); | 
 | 544 | 	clear_bit(TASKLET_STATE_SCHED, &t->state); | 
 | 545 | } | 
 | 546 |  | 
 | 547 | EXPORT_SYMBOL(tasklet_kill); | 
 | 548 |  | 
| Peter Zijlstra | 9ba5f00 | 2009-07-22 14:18:35 +0200 | [diff] [blame] | 549 | /* | 
 | 550 |  * tasklet_hrtimer | 
 | 551 |  */ | 
 | 552 |  | 
 | 553 | /* | 
| Peter Zijlstra | b9c3032 | 2010-02-03 18:08:52 +0100 | [diff] [blame] | 554 |  * The trampoline is called when the hrtimer expires. It schedules a tasklet | 
 | 555 |  * to run __tasklet_hrtimer_trampoline() which in turn will call the intended | 
 | 556 |  * hrtimer callback, but from softirq context. | 
| Peter Zijlstra | 9ba5f00 | 2009-07-22 14:18:35 +0200 | [diff] [blame] | 557 |  */ | 
 | 558 | static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer) | 
 | 559 | { | 
 | 560 | 	struct tasklet_hrtimer *ttimer = | 
 | 561 | 		container_of(timer, struct tasklet_hrtimer, timer); | 
 | 562 |  | 
| Peter Zijlstra | b9c3032 | 2010-02-03 18:08:52 +0100 | [diff] [blame] | 563 | 	tasklet_hi_schedule(&ttimer->tasklet); | 
 | 564 | 	return HRTIMER_NORESTART; | 
| Peter Zijlstra | 9ba5f00 | 2009-07-22 14:18:35 +0200 | [diff] [blame] | 565 | } | 
 | 566 |  | 
 | 567 | /* | 
 | 568 |  * Helper function which calls the hrtimer callback from | 
 | 569 |  * tasklet/softirq context | 
 | 570 |  */ | 
 | 571 | static void __tasklet_hrtimer_trampoline(unsigned long data) | 
 | 572 | { | 
 | 573 | 	struct tasklet_hrtimer *ttimer = (void *)data; | 
 | 574 | 	enum hrtimer_restart restart; | 
 | 575 |  | 
 | 576 | 	restart = ttimer->function(&ttimer->timer); | 
 | 577 | 	if (restart != HRTIMER_NORESTART) | 
 | 578 | 		hrtimer_restart(&ttimer->timer); | 
 | 579 | } | 
 | 580 |  | 
 | 581 | /** | 
 | 582 |  * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks | 
 | 583 |  * @ttimer:	 tasklet_hrtimer which is initialized | 
| Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 584 |  * @function:	 hrtimer callback function which gets called from softirq context | 
| Peter Zijlstra | 9ba5f00 | 2009-07-22 14:18:35 +0200 | [diff] [blame] | 585 |  * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME) | 
 | 586 |  * @mode:	 hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL) | 
 | 587 |  */ | 
 | 588 | void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, | 
 | 589 | 			  enum hrtimer_restart (*function)(struct hrtimer *), | 
 | 590 | 			  clockid_t which_clock, enum hrtimer_mode mode) | 
 | 591 | { | 
 | 592 | 	hrtimer_init(&ttimer->timer, which_clock, mode); | 
 | 593 | 	ttimer->timer.function = __hrtimer_tasklet_trampoline; | 
 | 594 | 	tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline, | 
 | 595 | 		     (unsigned long)ttimer); | 
 | 596 | 	ttimer->function = function; | 
 | 597 | } | 
 | 598 | EXPORT_SYMBOL_GPL(tasklet_hrtimer_init); | 
 | 599 |  | 
 | 600 | /* | 
 | 601 |  * Remote softirq bits | 
 | 602 |  */ | 
 | 603 |  | 
| David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 604 | DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); | 
 | 605 | EXPORT_PER_CPU_SYMBOL(softirq_work_list); | 
 | 606 |  | 
 | 607 | static void __local_trigger(struct call_single_data *cp, int softirq) | 
 | 608 | { | 
 | 609 | 	struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]); | 
 | 610 |  | 
 | 611 | 	list_add_tail(&cp->list, head); | 
 | 612 |  | 
 | 613 | 	/* Trigger the softirq only if the list was previously empty.  */ | 
 | 614 | 	if (head->next == &cp->list) | 
 | 615 | 		raise_softirq_irqoff(softirq); | 
 | 616 | } | 
 | 617 |  | 
 | 618 | #ifdef CONFIG_USE_GENERIC_SMP_HELPERS | 
 | 619 | static void remote_softirq_receive(void *data) | 
 | 620 | { | 
 | 621 | 	struct call_single_data *cp = data; | 
 | 622 | 	unsigned long flags; | 
 | 623 | 	int softirq; | 
 | 624 |  | 
 | 625 | 	softirq = cp->priv; | 
 | 626 |  | 
 | 627 | 	local_irq_save(flags); | 
 | 628 | 	__local_trigger(cp, softirq); | 
 | 629 | 	local_irq_restore(flags); | 
 | 630 | } | 
 | 631 |  | 
 | 632 | static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | 
 | 633 | { | 
 | 634 | 	if (cpu_online(cpu)) { | 
 | 635 | 		cp->func = remote_softirq_receive; | 
 | 636 | 		cp->info = cp; | 
 | 637 | 		cp->flags = 0; | 
 | 638 | 		cp->priv = softirq; | 
 | 639 |  | 
| Peter Zijlstra | 6e27563 | 2009-02-25 13:59:48 +0100 | [diff] [blame] | 640 | 		__smp_call_function_single(cpu, cp, 0); | 
| David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 641 | 		return 0; | 
 | 642 | 	} | 
 | 643 | 	return 1; | 
 | 644 | } | 
 | 645 | #else /* CONFIG_USE_GENERIC_SMP_HELPERS */ | 
 | 646 | static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | 
 | 647 | { | 
 | 648 | 	return 1; | 
 | 649 | } | 
 | 650 | #endif | 
 | 651 |  | 
 | 652 | /** | 
 | 653 |  * __send_remote_softirq - try to schedule softirq work on a remote cpu | 
 | 654 |  * @cp: private SMP call function data area | 
 | 655 |  * @cpu: the remote cpu | 
 | 656 |  * @this_cpu: the currently executing cpu | 
 | 657 |  * @softirq: the softirq for the work | 
 | 658 |  * | 
 | 659 |  * Attempt to schedule softirq work on a remote cpu.  If this cannot be | 
 | 660 |  * done, the work is instead queued up on the local cpu. | 
 | 661 |  * | 
 | 662 |  * Interrupts must be disabled. | 
 | 663 |  */ | 
 | 664 | void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq) | 
 | 665 | { | 
 | 666 | 	if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq)) | 
 | 667 | 		__local_trigger(cp, softirq); | 
 | 668 | } | 
 | 669 | EXPORT_SYMBOL(__send_remote_softirq); | 
 | 670 |  | 
 | 671 | /** | 
 | 672 |  * send_remote_softirq - try to schedule softirq work on a remote cpu | 
 | 673 |  * @cp: private SMP call function data area | 
 | 674 |  * @cpu: the remote cpu | 
 | 675 |  * @softirq: the softirq for the work | 
 | 676 |  * | 
 | 677 |  * Like __send_remote_softirq except that disabling interrupts and | 
 | 678 |  * computing the current cpu is done for the caller. | 
 | 679 |  */ | 
 | 680 | void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | 
 | 681 | { | 
 | 682 | 	unsigned long flags; | 
 | 683 | 	int this_cpu; | 
 | 684 |  | 
 | 685 | 	local_irq_save(flags); | 
 | 686 | 	this_cpu = smp_processor_id(); | 
 | 687 | 	__send_remote_softirq(cp, cpu, this_cpu, softirq); | 
 | 688 | 	local_irq_restore(flags); | 
 | 689 | } | 
 | 690 | EXPORT_SYMBOL(send_remote_softirq); | 
 | 691 |  | 
 | 692 | static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self, | 
 | 693 | 					       unsigned long action, void *hcpu) | 
 | 694 | { | 
 | 695 | 	/* | 
 | 696 | 	 * If a CPU goes away, splice its entries to the current CPU | 
 | 697 | 	 * and trigger a run of the softirq | 
 | 698 | 	 */ | 
 | 699 | 	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | 
 | 700 | 		int cpu = (unsigned long) hcpu; | 
 | 701 | 		int i; | 
 | 702 |  | 
 | 703 | 		local_irq_disable(); | 
 | 704 | 		for (i = 0; i < NR_SOFTIRQS; i++) { | 
 | 705 | 			struct list_head *head = &per_cpu(softirq_work_list[i], cpu); | 
 | 706 | 			struct list_head *local_head; | 
 | 707 |  | 
 | 708 | 			if (list_empty(head)) | 
 | 709 | 				continue; | 
 | 710 |  | 
 | 711 | 			local_head = &__get_cpu_var(softirq_work_list[i]); | 
 | 712 | 			list_splice_init(head, local_head); | 
 | 713 | 			raise_softirq_irqoff(i); | 
 | 714 | 		} | 
 | 715 | 		local_irq_enable(); | 
 | 716 | 	} | 
 | 717 |  | 
 | 718 | 	return NOTIFY_OK; | 
 | 719 | } | 
 | 720 |  | 
 | 721 | static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = { | 
 | 722 | 	.notifier_call	= remote_softirq_cpu_notify, | 
 | 723 | }; | 
 | 724 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 725 | void __init softirq_init(void) | 
 | 726 | { | 
| Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 727 | 	int cpu; | 
 | 728 |  | 
 | 729 | 	for_each_possible_cpu(cpu) { | 
| David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 730 | 		int i; | 
 | 731 |  | 
| Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 732 | 		per_cpu(tasklet_vec, cpu).tail = | 
 | 733 | 			&per_cpu(tasklet_vec, cpu).head; | 
 | 734 | 		per_cpu(tasklet_hi_vec, cpu).tail = | 
 | 735 | 			&per_cpu(tasklet_hi_vec, cpu).head; | 
| David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 736 | 		for (i = 0; i < NR_SOFTIRQS; i++) | 
 | 737 | 			INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu)); | 
| Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 738 | 	} | 
 | 739 |  | 
| David S. Miller | 54514a7 | 2008-09-23 22:15:57 -0700 | [diff] [blame] | 740 | 	register_hotcpu_notifier(&remote_softirq_cpu_notifier); | 
 | 741 |  | 
| Carlos R. Mafra | 962cf36 | 2008-05-15 11:15:37 -0300 | [diff] [blame] | 742 | 	open_softirq(TASKLET_SOFTIRQ, tasklet_action); | 
 | 743 | 	open_softirq(HI_SOFTIRQ, tasklet_hi_action); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | } | 
 | 745 |  | 
| Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 746 | static int ksoftirqd_should_run(unsigned int cpu) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 747 | { | 
| Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 748 | 	return local_softirq_pending(); | 
 | 749 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 750 |  | 
| Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 751 | static void run_ksoftirqd(unsigned int cpu) | 
 | 752 | { | 
 | 753 | 	local_irq_disable(); | 
 | 754 | 	if (local_softirq_pending()) { | 
 | 755 | 		__do_softirq(); | 
 | 756 | 		rcu_note_context_switch(cpu); | 
 | 757 | 		local_irq_enable(); | 
 | 758 | 		cond_resched(); | 
 | 759 | 		return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 760 | 	} | 
| Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 761 | 	local_irq_enable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 762 | } | 
 | 763 |  | 
 | 764 | #ifdef CONFIG_HOTPLUG_CPU | 
 | 765 | /* | 
 | 766 |  * tasklet_kill_immediate is called to remove a tasklet which can already be | 
 | 767 |  * scheduled for execution on @cpu. | 
 | 768 |  * | 
 | 769 |  * Unlike tasklet_kill, this function removes the tasklet | 
 | 770 |  * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state. | 
 | 771 |  * | 
 | 772 |  * When this function is called, @cpu must be in the CPU_DEAD state. | 
 | 773 |  */ | 
 | 774 | void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) | 
 | 775 | { | 
 | 776 | 	struct tasklet_struct **i; | 
 | 777 |  | 
 | 778 | 	BUG_ON(cpu_online(cpu)); | 
 | 779 | 	BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state)); | 
 | 780 |  | 
 | 781 | 	if (!test_bit(TASKLET_STATE_SCHED, &t->state)) | 
 | 782 | 		return; | 
 | 783 |  | 
 | 784 | 	/* CPU is dead, so no lock needed. */ | 
| Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 785 | 	for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 786 | 		if (*i == t) { | 
 | 787 | 			*i = t->next; | 
| Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 788 | 			/* If this was the tail element, move the tail ptr */ | 
 | 789 | 			if (*i == NULL) | 
 | 790 | 				per_cpu(tasklet_vec, cpu).tail = i; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 791 | 			return; | 
 | 792 | 		} | 
 | 793 | 	} | 
 | 794 | 	BUG(); | 
 | 795 | } | 
 | 796 |  | 
 | 797 | static void takeover_tasklets(unsigned int cpu) | 
 | 798 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 799 | 	/* CPU is dead, so no lock needed. */ | 
 | 800 | 	local_irq_disable(); | 
 | 801 |  | 
 | 802 | 	/* Find end, append list for that CPU. */ | 
| Christian Borntraeger | e5e4172 | 2008-05-01 04:34:23 -0700 | [diff] [blame] | 803 | 	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { | 
| Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 804 | 		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; | 
 | 805 | 		this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); | 
| Christian Borntraeger | e5e4172 | 2008-05-01 04:34:23 -0700 | [diff] [blame] | 806 | 		per_cpu(tasklet_vec, cpu).head = NULL; | 
 | 807 | 		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; | 
 | 808 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 809 | 	raise_softirq_irqoff(TASKLET_SOFTIRQ); | 
 | 810 |  | 
| Christian Borntraeger | e5e4172 | 2008-05-01 04:34:23 -0700 | [diff] [blame] | 811 | 	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { | 
| Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 812 | 		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; | 
 | 813 | 		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail); | 
| Christian Borntraeger | e5e4172 | 2008-05-01 04:34:23 -0700 | [diff] [blame] | 814 | 		per_cpu(tasklet_hi_vec, cpu).head = NULL; | 
 | 815 | 		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; | 
 | 816 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 817 | 	raise_softirq_irqoff(HI_SOFTIRQ); | 
 | 818 |  | 
 | 819 | 	local_irq_enable(); | 
 | 820 | } | 
 | 821 | #endif /* CONFIG_HOTPLUG_CPU */ | 
 | 822 |  | 
| Chandra Seetharaman | 8c78f30 | 2006-07-30 03:03:35 -0700 | [diff] [blame] | 823 | static int __cpuinit cpu_callback(struct notifier_block *nfb, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 824 | 				  unsigned long action, | 
 | 825 | 				  void *hcpu) | 
 | 826 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 827 | 	switch (action) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 828 | #ifdef CONFIG_HOTPLUG_CPU | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 829 | 	case CPU_DEAD: | 
| Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 830 | 	case CPU_DEAD_FROZEN: | 
 | 831 | 		takeover_tasklets((unsigned long)hcpu); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 832 | 		break; | 
 | 833 | #endif /* CONFIG_HOTPLUG_CPU */ | 
| Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 834 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 835 | 	return NOTIFY_OK; | 
 | 836 | } | 
 | 837 |  | 
| Chandra Seetharaman | 8c78f30 | 2006-07-30 03:03:35 -0700 | [diff] [blame] | 838 | static struct notifier_block __cpuinitdata cpu_nfb = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 839 | 	.notifier_call = cpu_callback | 
 | 840 | }; | 
 | 841 |  | 
| Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 842 | static struct smp_hotplug_thread softirq_threads = { | 
 | 843 | 	.store			= &ksoftirqd, | 
 | 844 | 	.thread_should_run	= ksoftirqd_should_run, | 
 | 845 | 	.thread_fn		= run_ksoftirqd, | 
 | 846 | 	.thread_comm		= "ksoftirqd/%u", | 
 | 847 | }; | 
 | 848 |  | 
| Eduard - Gabriel Munteanu | 7babe8d | 2008-07-25 19:45:11 -0700 | [diff] [blame] | 849 | static __init int spawn_ksoftirqd(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 850 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 851 | 	register_cpu_notifier(&cpu_nfb); | 
| Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 852 |  | 
 | 853 | 	BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); | 
 | 854 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 855 | 	return 0; | 
 | 856 | } | 
| Eduard - Gabriel Munteanu | 7babe8d | 2008-07-25 19:45:11 -0700 | [diff] [blame] | 857 | early_initcall(spawn_ksoftirqd); | 
| Andrew Morton | 78eef01 | 2006-03-22 00:08:16 -0800 | [diff] [blame] | 858 |  | 
| Yinghai Lu | 43a2563 | 2008-12-28 16:01:13 -0800 | [diff] [blame] | 859 | /* | 
 | 860 |  * [ These __weak aliases are kept in a separate compilation unit, so that | 
 | 861 |  *   GCC does not inline them incorrectly. ] | 
 | 862 |  */ | 
 | 863 |  | 
 | 864 | int __init __weak early_irq_init(void) | 
 | 865 | { | 
 | 866 | 	return 0; | 
 | 867 | } | 
 | 868 |  | 
| Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 869 | #ifdef CONFIG_GENERIC_HARDIRQS | 
| Yinghai Lu | 4a046d1 | 2009-01-12 17:39:24 -0800 | [diff] [blame] | 870 | int __init __weak arch_probe_nr_irqs(void) | 
 | 871 | { | 
| Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 872 | 	return NR_IRQS_LEGACY; | 
| Yinghai Lu | 4a046d1 | 2009-01-12 17:39:24 -0800 | [diff] [blame] | 873 | } | 
 | 874 |  | 
| Yinghai Lu | 43a2563 | 2008-12-28 16:01:13 -0800 | [diff] [blame] | 875 | int __init __weak arch_early_irq_init(void) | 
 | 876 | { | 
 | 877 | 	return 0; | 
 | 878 | } | 
| Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 879 | #endif |