blob: 5cc401e6d3b495222b5553a2b193838a03a3199a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/softirq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 *
Pavel Machekb10db7f2008-01-30 13:30:00 +01006 * Distribute under GPLv2.
7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
David S. Miller54514a72008-09-23 22:15:57 -07009 *
10 * Remote softirq infrastructure is by Jens Axboe.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 */
12
Paul Gortmaker9984de12011-05-23 14:51:41 -040013#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/kernel_stat.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070021#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/kthread.h>
23#include <linux/rcupdate.h>
Steven Rostedt7e49fcc2009-01-22 19:01:40 -050024#include <linux/ftrace.h>
Andrew Morton78eef012006-03-22 00:08:16 -080025#include <linux/smp.h>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080026#include <linux/tick.h>
Heiko Carstensa0e39ed2009-04-29 13:51:39 +020027
28#define CREATE_TRACE_POINTS
Steven Rostedtad8d75f2009-04-14 19:39:12 -040029#include <trace/events/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31#include <asm/irq.h>
32/*
33 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself
35 by its own spinlocks.
36 - Even if softirq is serialized, only local cpu is marked for
37 execution. Hence, we get something sort of weak cpu binding.
38 Though it is still not clear, will it result in better locality
39 or will not.
40
41 Examples:
42 - NET RX softirq. It is multithreaded and does not require
43 any global serialization.
44 - NET TX softirq. It kicks software netdevice queues, hence
45 it is logically serialized per device, but this serialization
46 is invisible to common code.
47 - Tasklets: serialized wrt itself.
48 */
49
50#ifndef __ARCH_IRQ_STAT
51irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
52EXPORT_SYMBOL(irq_stat);
53#endif
54
Alexey Dobriyan978b0112008-09-06 20:04:36 +020055static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Venkatesh Pallipadi4dd53d82010-12-21 17:09:00 -080057DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
Jason Baron5d592b42009-03-12 14:33:36 -040059char *softirq_to_name[NR_SOFTIRQS] = {
Li Zefan5dd4de52009-09-17 17:38:32 +080060 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
Shaohua Li09223372011-06-14 13:26:25 +080061 "TASKLET", "SCHED", "HRTIMER", "RCU"
Jason Baron5d592b42009-03-12 14:33:36 -040062};
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/*
65 * we cannot loop indefinitely here to avoid userspace starvation,
66 * but we also don't want to introduce a worst case 1/HZ latency
67 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us.
69 */
Thomas Gleixner676cb022009-07-20 23:33:49 +020070static void wakeup_softirqd(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
72 /* Interrupts are disabled: no need to stop preemption */
Christoph Lameter909ea962010-12-08 16:22:55 +010073 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75 if (tsk && tsk->state != TASK_RUNNING)
76 wake_up_process(tsk);
77}
78
79/*
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -070080 * preempt_count and SOFTIRQ_OFFSET usage:
81 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
82 * softirq processing.
83 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
84 * on local_bh_disable or local_bh_enable.
85 * This lets us distinguish between whether we are currently processing
86 * softirq and whether we just have bh disabled.
87 */
88
89/*
Ingo Molnarde30a2b2006-07-03 00:24:42 -070090 * This one is for softirq.c-internal use,
91 * where hardirqs are disabled legitimately:
92 */
Tim Chen3c829c32006-07-30 03:04:02 -070093#ifdef CONFIG_TRACE_IRQFLAGS
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -070094static void __local_bh_disable(unsigned long ip, unsigned int cnt)
Ingo Molnarde30a2b2006-07-03 00:24:42 -070095{
96 unsigned long flags;
97
98 WARN_ON_ONCE(in_irq());
99
100 raw_local_irq_save(flags);
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500101 /*
102 * The preempt tracer hooks into add_preempt_count and will break
103 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
104 * is set and before current->softirq_enabled is cleared.
105 * We must manually increment preempt_count here and manually
106 * call the trace_preempt_off later.
107 */
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700108 preempt_count() += cnt;
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700109 /*
110 * Were softirqs turned off above:
111 */
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700112 if (softirq_count() == cnt)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700113 trace_softirqs_off(ip);
114 raw_local_irq_restore(flags);
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500115
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700116 if (preempt_count() == cnt)
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500117 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700118}
Tim Chen3c829c32006-07-30 03:04:02 -0700119#else /* !CONFIG_TRACE_IRQFLAGS */
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700120static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
Tim Chen3c829c32006-07-30 03:04:02 -0700121{
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700122 add_preempt_count(cnt);
Tim Chen3c829c32006-07-30 03:04:02 -0700123 barrier();
124}
125#endif /* CONFIG_TRACE_IRQFLAGS */
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700126
127void local_bh_disable(void)
128{
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700129 __local_bh_disable((unsigned long)__builtin_return_address(0),
130 SOFTIRQ_DISABLE_OFFSET);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700131}
132
133EXPORT_SYMBOL(local_bh_disable);
134
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700135static void __local_bh_enable(unsigned int cnt)
136{
137 WARN_ON_ONCE(in_irq());
138 WARN_ON_ONCE(!irqs_disabled());
139
140 if (softirq_count() == cnt)
141 trace_softirqs_on((unsigned long)__builtin_return_address(0));
142 sub_preempt_count(cnt);
143}
144
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700145/*
146 * Special-case - softirqs can safely be enabled in
147 * cond_resched_softirq(), or by __do_softirq(),
148 * without processing still-pending softirqs:
149 */
150void _local_bh_enable(void)
151{
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700152 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700153}
154
155EXPORT_SYMBOL(_local_bh_enable);
156
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200157static inline void _local_bh_enable_ip(unsigned long ip)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700158{
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200159 WARN_ON_ONCE(in_irq() || irqs_disabled());
Tim Chen3c829c32006-07-30 03:04:02 -0700160#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200161 local_irq_disable();
Tim Chen3c829c32006-07-30 03:04:02 -0700162#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700163 /*
164 * Are softirqs going to be turned on now:
165 */
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700166 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700167 trace_softirqs_on(ip);
168 /*
169 * Keep preemption disabled until we are done with
170 * softirq processing:
171 */
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700172 sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700173
174 if (unlikely(!in_interrupt() && local_softirq_pending()))
175 do_softirq();
176
177 dec_preempt_count();
Tim Chen3c829c32006-07-30 03:04:02 -0700178#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200179 local_irq_enable();
Tim Chen3c829c32006-07-30 03:04:02 -0700180#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700181 preempt_check_resched();
182}
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200183
184void local_bh_enable(void)
185{
186 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
187}
188EXPORT_SYMBOL(local_bh_enable);
189
190void local_bh_enable_ip(unsigned long ip)
191{
192 _local_bh_enable_ip(ip);
193}
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700194EXPORT_SYMBOL(local_bh_enable_ip);
195
196/*
José Adolfo Galdámez900469d2015-06-20 23:45:36 -0600197 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
198 * but break the loop if need_resched() is set or after 2 ms.
199 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
200 * certain cases, such as stop_machine(), jiffies may cease to
201 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
202 * well to make sure we eventually return from this method.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 *
José Adolfo Galdámez900469d2015-06-20 23:45:36 -0600204 * These limits have been established via experimentation.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 * The two things to balance is latency against fairness -
206 * we want to handle softirqs as soon as possible, but they
207 * should not be able to lock up the box.
208 */
José Adolfo Galdámez900469d2015-06-20 23:45:36 -0600209#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210#define MAX_SOFTIRQ_RESTART 10
211
212asmlinkage void __do_softirq(void)
213{
214 struct softirq_action *h;
215 __u32 pending;
José Adolfo Galdámez900469d2015-06-20 23:45:36 -0600216 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 int cpu;
José Adolfo Galdámez900469d2015-06-20 23:45:36 -0600218 int max_restart = MAX_SOFTIRQ_RESTART;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
220 pending = local_softirq_pending();
Paul Mackerras829035f2006-07-03 00:25:40 -0700221 account_system_vtime(current);
222
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700223 __local_bh_disable((unsigned long)__builtin_return_address(0),
224 SOFTIRQ_OFFSET);
Ingo Molnard820ac42009-03-13 01:30:40 +0100225 lockdep_softirq_enter();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 cpu = smp_processor_id();
228restart:
229 /* Reset the pending bitmask before enabling irqs */
Andi Kleen3f744782005-09-12 18:49:24 +0200230 set_softirq_pending(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700232 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
234 h = softirq_vec;
235
236 do {
237 if (pending & 1) {
Thomas Gleixnerf4bc6bb2010-10-19 15:00:13 +0200238 unsigned int vec_nr = h - softirq_vec;
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200239 int prev_count = preempt_count();
240
Thomas Gleixnerf4bc6bb2010-10-19 15:00:13 +0200241 kstat_incr_softirqs_this_cpu(vec_nr);
242
243 trace_softirq_entry(vec_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 h->action(h);
Thomas Gleixnerf4bc6bb2010-10-19 15:00:13 +0200245 trace_softirq_exit(vec_nr);
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200246 if (unlikely(prev_count != preempt_count())) {
Thomas Gleixnerf4bc6bb2010-10-19 15:00:13 +0200247 printk(KERN_ERR "huh, entered softirq %u %s %p"
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200248 "with preempt_count %08x,"
Thomas Gleixnerf4bc6bb2010-10-19 15:00:13 +0200249 " exited with %08x?\n", vec_nr,
250 softirq_to_name[vec_nr], h->action,
251 prev_count, preempt_count());
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200252 preempt_count() = prev_count;
253 }
254
Paul E. McKenneyd6714c22009-08-22 13:56:46 -0700255 rcu_bh_qs(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 }
257 h++;
258 pending >>= 1;
259 } while (pending);
260
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700261 local_irq_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
263 pending = local_softirq_pending();
José Adolfo Galdámez900469d2015-06-20 23:45:36 -0600264 if (pending) {
265 if (time_before(jiffies, end) && !need_resched() &&
266 --max_restart)
267 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 wakeup_softirqd();
José Adolfo Galdámez900469d2015-06-20 23:45:36 -0600270 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
Ingo Molnard820ac42009-03-13 01:30:40 +0100272 lockdep_softirq_exit();
Paul Mackerras829035f2006-07-03 00:25:40 -0700273
274 account_system_vtime(current);
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700275 __local_bh_enable(SOFTIRQ_OFFSET);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276}
277
278#ifndef __ARCH_HAS_DO_SOFTIRQ
279
280asmlinkage void do_softirq(void)
281{
282 __u32 pending;
283 unsigned long flags;
284
285 if (in_interrupt())
286 return;
287
288 local_irq_save(flags);
289
290 pending = local_softirq_pending();
291
292 if (pending)
293 __do_softirq();
294
295 local_irq_restore(flags);
296}
297
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298#endif
299
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800300/*
301 * Enter an interrupt context.
302 */
303void irq_enter(void)
304{
Venki Pallipadi6378ddb2008-01-30 13:30:04 +0100305 int cpu = smp_processor_id();
Thomas Gleixner719254f2008-10-17 09:59:47 +0200306
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100307 rcu_irq_enter();
Frederic Weisbecker0a8a2e72012-01-24 18:59:44 +0100308 if (is_idle_task(current) && !in_interrupt()) {
Venkatesh Pallipadid267f872010-10-04 17:03:23 -0700309 /*
310 * Prevent raise_softirq from needlessly waking up ksoftirqd
311 * here, as softirq will be serviced on return from interrupt.
312 */
313 local_bh_disable();
Thomas Gleixner719254f2008-10-17 09:59:47 +0200314 tick_check_idle(cpu);
Venkatesh Pallipadid267f872010-10-04 17:03:23 -0700315 _local_bh_enable();
316 }
317
318 __irq_enter();
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800319}
320
Heiko Carstensb2a00172012-03-05 15:07:25 -0800321static inline void invoke_softirq(void)
322{
323 if (!force_irqthreads) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000325 __do_softirq();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326#else
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000327 do_softirq();
Heiko Carstensb2a00172012-03-05 15:07:25 -0800328#endif
329 } else {
Peter Zijlstraec433f02011-07-19 15:32:00 -0700330 __local_bh_disable((unsigned long)__builtin_return_address(0),
331 SOFTIRQ_OFFSET);
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000332 wakeup_softirqd();
Peter Zijlstraec433f02011-07-19 15:32:00 -0700333 __local_bh_enable(SOFTIRQ_OFFSET);
334 }
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000335}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
337/*
338 * Exit an interrupt context. Process softirqs if needed and possible:
339 */
340void irq_exit(void)
341{
342 account_system_vtime(current);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700343 trace_hardirq_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 sub_preempt_count(IRQ_EXIT_OFFSET);
345 if (!in_interrupt() && local_softirq_pending())
346 invoke_softirq();
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800347
348#ifdef CONFIG_NO_HZ
349 /* Make sure that timer wheel updates are propagated */
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100350 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
Frederic Weisbecker280f0672011-10-07 18:22:06 +0200351 tick_nohz_irq_exit();
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800352#endif
Frederic Weisbecker416eb332011-10-07 16:31:02 -0700353 rcu_irq_exit();
Thomas Gleixnerba74c142011-03-21 13:32:17 +0100354 sched_preempt_enable_no_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355}
356
357/*
358 * This function must run with irqs disabled!
359 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800360inline void raise_softirq_irqoff(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
362 __raise_softirq_irqoff(nr);
363
364 /*
365 * If we're in an interrupt or softirq, we're done
366 * (this also catches softirq-disabled code). We will
367 * actually run the softirq once we return from
368 * the irq or softirq.
369 *
370 * Otherwise we wake up ksoftirqd to make sure we
371 * schedule the softirq soon.
372 */
373 if (!in_interrupt())
374 wakeup_softirqd();
375}
376
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800377void raise_softirq(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378{
379 unsigned long flags;
380
381 local_irq_save(flags);
382 raise_softirq_irqoff(nr);
383 local_irq_restore(flags);
384}
385
Steven Rostedtf0696862012-01-25 20:18:55 -0500386void __raise_softirq_irqoff(unsigned int nr)
387{
388 trace_softirq_raise(nr);
389 or_softirq_pending(1UL << nr);
390}
391
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300392void open_softirq(int nr, void (*action)(struct softirq_action *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 softirq_vec[nr].action = action;
395}
396
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200397/*
398 * Tasklets
399 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400struct tasklet_head
401{
Olof Johansson48f20a92008-03-04 15:23:25 -0800402 struct tasklet_struct *head;
403 struct tasklet_struct **tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404};
405
Vegard Nossum4620b492008-06-12 23:21:53 +0200406static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
407static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800409void __tasklet_schedule(struct tasklet_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410{
411 unsigned long flags;
412
413 local_irq_save(flags);
Olof Johansson48f20a92008-03-04 15:23:25 -0800414 t->next = NULL;
Christoph Lameter909ea962010-12-08 16:22:55 +0100415 *__this_cpu_read(tasklet_vec.tail) = t;
416 __this_cpu_write(tasklet_vec.tail, &(t->next));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 raise_softirq_irqoff(TASKLET_SOFTIRQ);
418 local_irq_restore(flags);
419}
420
421EXPORT_SYMBOL(__tasklet_schedule);
422
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800423void __tasklet_hi_schedule(struct tasklet_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424{
425 unsigned long flags;
426
427 local_irq_save(flags);
Olof Johansson48f20a92008-03-04 15:23:25 -0800428 t->next = NULL;
Christoph Lameter909ea962010-12-08 16:22:55 +0100429 *__this_cpu_read(tasklet_hi_vec.tail) = t;
430 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 raise_softirq_irqoff(HI_SOFTIRQ);
432 local_irq_restore(flags);
433}
434
435EXPORT_SYMBOL(__tasklet_hi_schedule);
436
Vegard Nossum7c692cb2008-05-21 22:53:13 +0200437void __tasklet_hi_schedule_first(struct tasklet_struct *t)
438{
439 BUG_ON(!irqs_disabled());
440
Christoph Lameter909ea962010-12-08 16:22:55 +0100441 t->next = __this_cpu_read(tasklet_hi_vec.head);
442 __this_cpu_write(tasklet_hi_vec.head, t);
Vegard Nossum7c692cb2008-05-21 22:53:13 +0200443 __raise_softirq_irqoff(HI_SOFTIRQ);
444}
445
446EXPORT_SYMBOL(__tasklet_hi_schedule_first);
447
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448static void tasklet_action(struct softirq_action *a)
449{
450 struct tasklet_struct *list;
451
452 local_irq_disable();
Christoph Lameter909ea962010-12-08 16:22:55 +0100453 list = __this_cpu_read(tasklet_vec.head);
454 __this_cpu_write(tasklet_vec.head, NULL);
455 __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 local_irq_enable();
457
458 while (list) {
459 struct tasklet_struct *t = list;
460
461 list = list->next;
462
463 if (tasklet_trylock(t)) {
464 if (!atomic_read(&t->count)) {
465 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
466 BUG();
467 t->func(t->data);
468 tasklet_unlock(t);
469 continue;
470 }
471 tasklet_unlock(t);
472 }
473
474 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800475 t->next = NULL;
Christoph Lameter909ea962010-12-08 16:22:55 +0100476 *__this_cpu_read(tasklet_vec.tail) = t;
477 __this_cpu_write(tasklet_vec.tail, &(t->next));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
479 local_irq_enable();
480 }
481}
482
483static void tasklet_hi_action(struct softirq_action *a)
484{
485 struct tasklet_struct *list;
486
487 local_irq_disable();
Christoph Lameter909ea962010-12-08 16:22:55 +0100488 list = __this_cpu_read(tasklet_hi_vec.head);
489 __this_cpu_write(tasklet_hi_vec.head, NULL);
490 __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 local_irq_enable();
492
493 while (list) {
494 struct tasklet_struct *t = list;
495
496 list = list->next;
497
498 if (tasklet_trylock(t)) {
499 if (!atomic_read(&t->count)) {
500 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
501 BUG();
502 t->func(t->data);
503 tasklet_unlock(t);
504 continue;
505 }
506 tasklet_unlock(t);
507 }
508
509 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800510 t->next = NULL;
Christoph Lameter909ea962010-12-08 16:22:55 +0100511 *__this_cpu_read(tasklet_hi_vec.tail) = t;
512 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 __raise_softirq_irqoff(HI_SOFTIRQ);
514 local_irq_enable();
515 }
516}
517
518
519void tasklet_init(struct tasklet_struct *t,
520 void (*func)(unsigned long), unsigned long data)
521{
522 t->next = NULL;
523 t->state = 0;
524 atomic_set(&t->count, 0);
525 t->func = func;
526 t->data = data;
527}
528
529EXPORT_SYMBOL(tasklet_init);
530
531void tasklet_kill(struct tasklet_struct *t)
532{
533 if (in_interrupt())
534 printk("Attempt to kill tasklet from interrupt\n");
535
536 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
H Hartley Sweeten79d381c2009-04-16 19:30:18 -0400537 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 yield();
H Hartley Sweeten79d381c2009-04-16 19:30:18 -0400539 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 }
541 tasklet_unlock_wait(t);
542 clear_bit(TASKLET_STATE_SCHED, &t->state);
543}
544
545EXPORT_SYMBOL(tasklet_kill);
546
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200547/*
548 * tasklet_hrtimer
549 */
550
551/*
Peter Zijlstrab9c30322010-02-03 18:08:52 +0100552 * The trampoline is called when the hrtimer expires. It schedules a tasklet
553 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
554 * hrtimer callback, but from softirq context.
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200555 */
556static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
557{
558 struct tasklet_hrtimer *ttimer =
559 container_of(timer, struct tasklet_hrtimer, timer);
560
Peter Zijlstrab9c30322010-02-03 18:08:52 +0100561 tasklet_hi_schedule(&ttimer->tasklet);
562 return HRTIMER_NORESTART;
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200563}
564
565/*
566 * Helper function which calls the hrtimer callback from
567 * tasklet/softirq context
568 */
569static void __tasklet_hrtimer_trampoline(unsigned long data)
570{
571 struct tasklet_hrtimer *ttimer = (void *)data;
572 enum hrtimer_restart restart;
573
574 restart = ttimer->function(&ttimer->timer);
575 if (restart != HRTIMER_NORESTART)
576 hrtimer_restart(&ttimer->timer);
577}
578
579/**
580 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
581 * @ttimer: tasklet_hrtimer which is initialized
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300582 * @function: hrtimer callback function which gets called from softirq context
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200583 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
584 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
585 */
586void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
587 enum hrtimer_restart (*function)(struct hrtimer *),
588 clockid_t which_clock, enum hrtimer_mode mode)
589{
590 hrtimer_init(&ttimer->timer, which_clock, mode);
591 ttimer->timer.function = __hrtimer_tasklet_trampoline;
592 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
593 (unsigned long)ttimer);
594 ttimer->function = function;
595}
596EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
597
598/*
599 * Remote softirq bits
600 */
601
David S. Miller54514a72008-09-23 22:15:57 -0700602DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
603EXPORT_PER_CPU_SYMBOL(softirq_work_list);
604
605static void __local_trigger(struct call_single_data *cp, int softirq)
606{
607 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
608
609 list_add_tail(&cp->list, head);
610
611 /* Trigger the softirq only if the list was previously empty. */
612 if (head->next == &cp->list)
613 raise_softirq_irqoff(softirq);
614}
615
616#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
617static void remote_softirq_receive(void *data)
618{
619 struct call_single_data *cp = data;
620 unsigned long flags;
621 int softirq;
622
623 softirq = cp->priv;
624
625 local_irq_save(flags);
626 __local_trigger(cp, softirq);
627 local_irq_restore(flags);
628}
629
630static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
631{
632 if (cpu_online(cpu)) {
633 cp->func = remote_softirq_receive;
634 cp->info = cp;
635 cp->flags = 0;
636 cp->priv = softirq;
637
Peter Zijlstra6e275632009-02-25 13:59:48 +0100638 __smp_call_function_single(cpu, cp, 0);
David S. Miller54514a72008-09-23 22:15:57 -0700639 return 0;
640 }
641 return 1;
642}
643#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
644static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
645{
646 return 1;
647}
648#endif
649
650/**
651 * __send_remote_softirq - try to schedule softirq work on a remote cpu
652 * @cp: private SMP call function data area
653 * @cpu: the remote cpu
654 * @this_cpu: the currently executing cpu
655 * @softirq: the softirq for the work
656 *
657 * Attempt to schedule softirq work on a remote cpu. If this cannot be
658 * done, the work is instead queued up on the local cpu.
659 *
660 * Interrupts must be disabled.
661 */
662void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
663{
664 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
665 __local_trigger(cp, softirq);
666}
667EXPORT_SYMBOL(__send_remote_softirq);
668
669/**
670 * send_remote_softirq - try to schedule softirq work on a remote cpu
671 * @cp: private SMP call function data area
672 * @cpu: the remote cpu
673 * @softirq: the softirq for the work
674 *
675 * Like __send_remote_softirq except that disabling interrupts and
676 * computing the current cpu is done for the caller.
677 */
678void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
679{
680 unsigned long flags;
681 int this_cpu;
682
683 local_irq_save(flags);
684 this_cpu = smp_processor_id();
685 __send_remote_softirq(cp, cpu, this_cpu, softirq);
686 local_irq_restore(flags);
687}
688EXPORT_SYMBOL(send_remote_softirq);
689
690static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
691 unsigned long action, void *hcpu)
692{
693 /*
694 * If a CPU goes away, splice its entries to the current CPU
695 * and trigger a run of the softirq
696 */
697 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
698 int cpu = (unsigned long) hcpu;
699 int i;
700
701 local_irq_disable();
702 for (i = 0; i < NR_SOFTIRQS; i++) {
703 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
704 struct list_head *local_head;
705
706 if (list_empty(head))
707 continue;
708
709 local_head = &__get_cpu_var(softirq_work_list[i]);
710 list_splice_init(head, local_head);
711 raise_softirq_irqoff(i);
712 }
713 local_irq_enable();
714 }
715
716 return NOTIFY_OK;
717}
718
719static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
720 .notifier_call = remote_softirq_cpu_notify,
721};
722
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723void __init softirq_init(void)
724{
Olof Johansson48f20a92008-03-04 15:23:25 -0800725 int cpu;
726
727 for_each_possible_cpu(cpu) {
David S. Miller54514a72008-09-23 22:15:57 -0700728 int i;
729
Olof Johansson48f20a92008-03-04 15:23:25 -0800730 per_cpu(tasklet_vec, cpu).tail =
731 &per_cpu(tasklet_vec, cpu).head;
732 per_cpu(tasklet_hi_vec, cpu).tail =
733 &per_cpu(tasklet_hi_vec, cpu).head;
David S. Miller54514a72008-09-23 22:15:57 -0700734 for (i = 0; i < NR_SOFTIRQS; i++)
735 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
Olof Johansson48f20a92008-03-04 15:23:25 -0800736 }
737
David S. Miller54514a72008-09-23 22:15:57 -0700738 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
739
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300740 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
741 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742}
743
Tejun Heo1871e522009-10-29 22:34:13 +0900744static int run_ksoftirqd(void * __bind_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 set_current_state(TASK_INTERRUPTIBLE);
747
748 while (!kthread_should_stop()) {
749 preempt_disable();
750 if (!local_softirq_pending()) {
Thomas Gleixnerbd2f5532011-03-21 12:33:18 +0100751 schedule_preempt_disabled();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 }
753
754 __set_current_state(TASK_RUNNING);
755
756 while (local_softirq_pending()) {
757 /* Preempt disable stops cpu going offline.
758 If already offline, we'll be on wrong CPU:
759 don't process */
760 if (cpu_is_offline((long)__bind_cpu))
761 goto wait_to_die;
Thomas Gleixnerc305d522011-02-02 17:10:48 +0100762 local_irq_disable();
763 if (local_softirq_pending())
764 __do_softirq();
765 local_irq_enable();
Thomas Gleixnerba74c142011-03-21 13:32:17 +0100766 sched_preempt_enable_no_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 cond_resched();
768 preempt_disable();
Paul E. McKenney25502a62010-04-01 17:37:01 -0700769 rcu_note_context_switch((long)__bind_cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 }
771 preempt_enable();
772 set_current_state(TASK_INTERRUPTIBLE);
773 }
774 __set_current_state(TASK_RUNNING);
775 return 0;
776
777wait_to_die:
778 preempt_enable();
779 /* Wait for kthread_stop */
780 set_current_state(TASK_INTERRUPTIBLE);
781 while (!kthread_should_stop()) {
782 schedule();
783 set_current_state(TASK_INTERRUPTIBLE);
784 }
785 __set_current_state(TASK_RUNNING);
786 return 0;
787}
788
789#ifdef CONFIG_HOTPLUG_CPU
790/*
791 * tasklet_kill_immediate is called to remove a tasklet which can already be
792 * scheduled for execution on @cpu.
793 *
794 * Unlike tasklet_kill, this function removes the tasklet
795 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
796 *
797 * When this function is called, @cpu must be in the CPU_DEAD state.
798 */
799void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
800{
801 struct tasklet_struct **i;
802
803 BUG_ON(cpu_online(cpu));
804 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
805
806 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
807 return;
808
809 /* CPU is dead, so no lock needed. */
Olof Johansson48f20a92008-03-04 15:23:25 -0800810 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 if (*i == t) {
812 *i = t->next;
Olof Johansson48f20a92008-03-04 15:23:25 -0800813 /* If this was the tail element, move the tail ptr */
814 if (*i == NULL)
815 per_cpu(tasklet_vec, cpu).tail = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 return;
817 }
818 }
819 BUG();
820}
821
822static void takeover_tasklets(unsigned int cpu)
823{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 /* CPU is dead, so no lock needed. */
825 local_irq_disable();
826
827 /* Find end, append list for that CPU. */
Christian Borntraegere5e41722008-05-01 04:34:23 -0700828 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
Christoph Lameter909ea962010-12-08 16:22:55 +0100829 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
830 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
Christian Borntraegere5e41722008-05-01 04:34:23 -0700831 per_cpu(tasklet_vec, cpu).head = NULL;
832 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
833 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 raise_softirq_irqoff(TASKLET_SOFTIRQ);
835
Christian Borntraegere5e41722008-05-01 04:34:23 -0700836 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
Christoph Lameter909ea962010-12-08 16:22:55 +0100837 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
838 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
Christian Borntraegere5e41722008-05-01 04:34:23 -0700839 per_cpu(tasklet_hi_vec, cpu).head = NULL;
840 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
841 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 raise_softirq_irqoff(HI_SOFTIRQ);
843
844 local_irq_enable();
845}
846#endif /* CONFIG_HOTPLUG_CPU */
847
Chandra Seetharaman8c78f302006-07-30 03:03:35 -0700848static int __cpuinit cpu_callback(struct notifier_block *nfb,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 unsigned long action,
850 void *hcpu)
851{
852 int hotcpu = (unsigned long)hcpu;
853 struct task_struct *p;
854
855 switch (action) {
856 case CPU_UP_PREPARE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700857 case CPU_UP_PREPARE_FROZEN:
Eric Dumazet94dcf292011-03-22 16:30:45 -0700858 p = kthread_create_on_node(run_ksoftirqd,
859 hcpu,
860 cpu_to_node(hotcpu),
861 "ksoftirqd/%d", hotcpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 if (IS_ERR(p)) {
863 printk("ksoftirqd for %i failed\n", hotcpu);
Akinobu Mita80b51842010-05-26 14:43:32 -0700864 return notifier_from_errno(PTR_ERR(p));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 }
866 kthread_bind(p, hotcpu);
867 per_cpu(ksoftirqd, hotcpu) = p;
868 break;
869 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700870 case CPU_ONLINE_FROZEN:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 wake_up_process(per_cpu(ksoftirqd, hotcpu));
872 break;
873#ifdef CONFIG_HOTPLUG_CPU
874 case CPU_UP_CANCELED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700875 case CPU_UP_CANCELED_FROZEN:
Heiko Carstensfc75cdf2006-06-25 05:49:10 -0700876 if (!per_cpu(ksoftirqd, hotcpu))
877 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 /* Unbind so it can run. Fall thru. */
Heiko Carstensa4c4af72005-11-07 00:58:38 -0800879 kthread_bind(per_cpu(ksoftirqd, hotcpu),
Rusty Russellf1fc0572009-01-01 10:12:23 +1030880 cpumask_any(cpu_online_mask));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 case CPU_DEAD:
Satoru Takeuchi1c6b4aa2007-07-15 23:39:48 -0700882 case CPU_DEAD_FROZEN: {
Peter Zijlstrac9b5f502011-01-07 13:41:40 +0100883 static const struct sched_param param = {
KOSAKI Motohirofe7de492010-10-20 16:01:12 -0700884 .sched_priority = MAX_RT_PRIO-1
885 };
Satoru Takeuchi1c6b4aa2007-07-15 23:39:48 -0700886
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 p = per_cpu(ksoftirqd, hotcpu);
888 per_cpu(ksoftirqd, hotcpu) = NULL;
Rusty Russell961ccdd2008-06-23 13:55:38 +1000889 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 kthread_stop(p);
891 takeover_tasklets(hotcpu);
892 break;
Satoru Takeuchi1c6b4aa2007-07-15 23:39:48 -0700893 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894#endif /* CONFIG_HOTPLUG_CPU */
895 }
896 return NOTIFY_OK;
897}
898
Chandra Seetharaman8c78f302006-07-30 03:03:35 -0700899static struct notifier_block __cpuinitdata cpu_nfb = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 .notifier_call = cpu_callback
901};
902
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700903static __init int spawn_ksoftirqd(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904{
905 void *cpu = (void *)(long)smp_processor_id();
Akinobu Mita07dccf32006-09-29 02:00:22 -0700906 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
907
Akinobu Mita9e506f72010-06-04 14:15:04 -0700908 BUG_ON(err != NOTIFY_OK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
910 register_cpu_notifier(&cpu_nfb);
911 return 0;
912}
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700913early_initcall(spawn_ksoftirqd);
Andrew Morton78eef012006-03-22 00:08:16 -0800914
Yinghai Lu43a25632008-12-28 16:01:13 -0800915/*
916 * [ These __weak aliases are kept in a separate compilation unit, so that
917 * GCC does not inline them incorrectly. ]
918 */
919
920int __init __weak early_irq_init(void)
921{
922 return 0;
923}
924
Thomas Gleixnerb683de22010-09-27 20:55:03 +0200925#ifdef CONFIG_GENERIC_HARDIRQS
Yinghai Lu4a046d12009-01-12 17:39:24 -0800926int __init __weak arch_probe_nr_irqs(void)
927{
Thomas Gleixnerb683de22010-09-27 20:55:03 +0200928 return NR_IRQS_LEGACY;
Yinghai Lu4a046d12009-01-12 17:39:24 -0800929}
930
Yinghai Lu43a25632008-12-28 16:01:13 -0800931int __init __weak arch_early_irq_init(void)
932{
933 return 0;
934}
Thomas Gleixnerb683de22010-09-27 20:55:03 +0200935#endif