blob: 3cbd82821c445a25d7507dd89318ba19e29a83dc [file] [log] [blame]
Jens Axboe3d442232008-06-26 11:21:34 +02001/*
2 * Generic helpers for smp ipi calls
3 *
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
Jens Axboe3d442232008-06-26 11:21:34 +02005 */
Jens Axboe3d442232008-06-26 11:21:34 +02006#include <linux/rcupdate.h>
Linus Torvalds59190f42008-07-15 14:02:33 -07007#include <linux/rculist.h>
Ingo Molnar641cd4c2009-03-13 10:47:34 +01008#include <linux/kernel.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -04009#include <linux/export.h>
Ingo Molnar0b13fda2009-02-25 16:52:11 +010010#include <linux/percpu.h>
11#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/gfp.h>
Jens Axboe3d442232008-06-26 11:21:34 +020013#include <linux/smp.h>
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010014#include <linux/cpu.h>
Jens Axboe3d442232008-06-26 11:21:34 +020015
Amerigo Wang351f8f82011-01-12 16:59:39 -080016#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010017static struct {
18 struct list_head queue;
Thomas Gleixner9f5a5622009-11-17 15:40:01 +010019 raw_spinlock_t lock;
Ingo Molnar0b13fda2009-02-25 16:52:11 +010020} call_function __cacheline_aligned_in_smp =
21 {
22 .queue = LIST_HEAD_INIT(call_function.queue),
Thomas Gleixner9f5a5622009-11-17 15:40:01 +010023 .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
Ingo Molnar0b13fda2009-02-25 16:52:11 +010024 };
Jens Axboe3d442232008-06-26 11:21:34 +020025
26enum {
Peter Zijlstra6e275632009-02-25 13:59:48 +010027 CSD_FLAG_LOCK = 0x01,
Jens Axboe3d442232008-06-26 11:21:34 +020028};
29
30struct call_function_data {
Ingo Molnar0b13fda2009-02-25 16:52:11 +010031 struct call_single_data csd;
Xiao Guangrong54fdade2009-09-22 16:43:39 -070032 atomic_t refs;
Ingo Molnar0b13fda2009-02-25 16:52:11 +010033 cpumask_var_t cpumask;
Jens Axboe3d442232008-06-26 11:21:34 +020034};
35
Milton Millere03bcb62010-01-18 13:00:51 +110036static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
37
Jens Axboe3d442232008-06-26 11:21:34 +020038struct call_single_queue {
Ingo Molnar0b13fda2009-02-25 16:52:11 +010039 struct list_head list;
Thomas Gleixner9f5a5622009-11-17 15:40:01 +010040 raw_spinlock_t lock;
Jens Axboe3d442232008-06-26 11:21:34 +020041};
42
Milton Millere03bcb62010-01-18 13:00:51 +110043static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010044
45static int
46hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
47{
48 long cpu = (long)hcpu;
49 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
50
51 switch (action) {
52 case CPU_UP_PREPARE:
53 case CPU_UP_PREPARE_FROZEN:
Yinghai Lueaa95842009-06-06 14:51:36 -070054 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010055 cpu_to_node(cpu)))
Akinobu Mita80b51842010-05-26 14:43:32 -070056 return notifier_from_errno(-ENOMEM);
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010057 break;
58
Xiao Guangrong69dd6472009-08-06 15:07:29 -070059#ifdef CONFIG_HOTPLUG_CPU
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010060 case CPU_UP_CANCELED:
61 case CPU_UP_CANCELED_FROZEN:
62
63 case CPU_DEAD:
64 case CPU_DEAD_FROZEN:
65 free_cpumask_var(cfd->cpumask);
66 break;
67#endif
68 };
69
70 return NOTIFY_OK;
71}
72
73static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
Ingo Molnar0b13fda2009-02-25 16:52:11 +010074 .notifier_call = hotplug_cfd,
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010075};
76
Takao Indohd8ad7d12011-03-29 12:35:04 -040077void __init call_function_init(void)
Jens Axboe3d442232008-06-26 11:21:34 +020078{
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010079 void *cpu = (void *)(long)smp_processor_id();
Jens Axboe3d442232008-06-26 11:21:34 +020080 int i;
81
82 for_each_possible_cpu(i) {
83 struct call_single_queue *q = &per_cpu(call_single_queue, i);
84
Thomas Gleixner9f5a5622009-11-17 15:40:01 +010085 raw_spin_lock_init(&q->lock);
Jens Axboe3d442232008-06-26 11:21:34 +020086 INIT_LIST_HEAD(&q->list);
87 }
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010088
89 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
90 register_cpu_notifier(&hotplug_cfd_notifier);
Jens Axboe3d442232008-06-26 11:21:34 +020091}
92
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010093/*
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010094 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
95 *
Ingo Molnar0b13fda2009-02-25 16:52:11 +010096 * For non-synchronous ipi calls the csd can still be in use by the
97 * previous function call. For multi-cpu calls its even more interesting
98 * as we'll have to ensure no other cpu is observing our csd.
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010099 */
Peter Zijlstra6e275632009-02-25 13:59:48 +0100100static void csd_lock_wait(struct call_single_data *data)
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100101{
102 while (data->flags & CSD_FLAG_LOCK)
103 cpu_relax();
Peter Zijlstra6e275632009-02-25 13:59:48 +0100104}
105
106static void csd_lock(struct call_single_data *data)
107{
108 csd_lock_wait(data);
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100109 data->flags = CSD_FLAG_LOCK;
110
111 /*
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100112 * prevent CPU from reordering the above assignment
113 * to ->flags with any subsequent assignments to other
114 * fields of the specified call_single_data structure:
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100115 */
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100116 smp_mb();
117}
118
119static void csd_unlock(struct call_single_data *data)
120{
121 WARN_ON(!(data->flags & CSD_FLAG_LOCK));
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100122
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100123 /*
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100124 * ensure we're all done before releasing data:
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100125 */
126 smp_mb();
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100127
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100128 data->flags &= ~CSD_FLAG_LOCK;
Jens Axboe3d442232008-06-26 11:21:34 +0200129}
130
131/*
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100132 * Insert a previously allocated call_single_data element
133 * for execution on the given CPU. data must already have
134 * ->func, ->info, and ->flags set.
Jens Axboe3d442232008-06-26 11:21:34 +0200135 */
Peter Zijlstra6e275632009-02-25 13:59:48 +0100136static
137void generic_exec_single(int cpu, struct call_single_data *data, int wait)
Jens Axboe3d442232008-06-26 11:21:34 +0200138{
139 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
Jens Axboe3d442232008-06-26 11:21:34 +0200140 unsigned long flags;
Peter Zijlstra6e275632009-02-25 13:59:48 +0100141 int ipi;
Jens Axboe3d442232008-06-26 11:21:34 +0200142
Thomas Gleixner9f5a5622009-11-17 15:40:01 +0100143 raw_spin_lock_irqsave(&dst->lock, flags);
Jens Axboe3d442232008-06-26 11:21:34 +0200144 ipi = list_empty(&dst->list);
145 list_add_tail(&data->list, &dst->list);
Thomas Gleixner9f5a5622009-11-17 15:40:01 +0100146 raw_spin_unlock_irqrestore(&dst->lock, flags);
Jens Axboe3d442232008-06-26 11:21:34 +0200147
Suresh Siddha561920a02008-10-30 18:28:41 +0100148 /*
Nick Piggin15d0d3b2009-02-25 06:22:45 +0100149 * The list addition should be visible before sending the IPI
150 * handler locks the list to pull the entry off it because of
151 * normal cache coherency rules implied by spinlocks.
152 *
153 * If IPIs can go out of order to the cache coherency protocol
154 * in an architecture, sufficient synchronisation should be added
155 * to arch code to make it appear to obey cache coherency WRT
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100156 * locking and barrier primitives. Generic code isn't really
157 * equipped to do the right thing...
Suresh Siddha561920a02008-10-30 18:28:41 +0100158 */
Jens Axboe3d442232008-06-26 11:21:34 +0200159 if (ipi)
160 arch_send_call_function_single_ipi(cpu);
161
162 if (wait)
Peter Zijlstra6e275632009-02-25 13:59:48 +0100163 csd_lock_wait(data);
Jens Axboe3d442232008-06-26 11:21:34 +0200164}
165
166/*
167 * Invoked by arch to handle an IPI for call function. Must be called with
168 * interrupts disabled.
169 */
170void generic_smp_call_function_interrupt(void)
171{
172 struct call_function_data *data;
Xiao Guangrongc0f68c22009-12-14 18:00:16 -0800173 int cpu = smp_processor_id();
Jens Axboe3d442232008-06-26 11:21:34 +0200174
175 /*
Suresh Siddha269c8612009-08-19 18:05:35 -0700176 * Shouldn't receive this interrupt on a cpu that is not yet online.
177 */
178 WARN_ON_ONCE(!cpu_online(cpu));
179
180 /*
Nick Piggin15d0d3b2009-02-25 06:22:45 +0100181 * Ensure entry is visible on call_function_queue after we have
182 * entered the IPI. See comment in smp_call_function_many.
183 * If we don't have this, then we may miss an entry on the list
184 * and never get another IPI to process it.
185 */
186 smp_mb();
187
188 /*
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100189 * It's ok to use list_for_each_rcu() here even though we may
190 * delete 'pos', since list_del_rcu() doesn't clear ->next
Jens Axboe3d442232008-06-26 11:21:34 +0200191 */
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100192 list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
Jens Axboe3d442232008-06-26 11:21:34 +0200193 int refs;
Milton Millerc8def552011-03-15 13:27:17 -0600194 smp_call_func_t func;
Jens Axboe3d442232008-06-26 11:21:34 +0200195
Anton Blanchard6dc19892011-01-20 14:44:33 -0800196 /*
197 * Since we walk the list without any locks, we might
198 * see an entry that was completed, removed from the
199 * list and is in the process of being reused.
200 *
201 * We must check that the cpu is in the cpumask before
202 * checking the refs, and both must be set before
203 * executing the callback on this cpu.
204 */
205
206 if (!cpumask_test_cpu(cpu, data->cpumask))
207 continue;
208
209 smp_rmb();
210
211 if (atomic_read(&data->refs) == 0)
212 continue;
213
Milton Millerc8def552011-03-15 13:27:17 -0600214 func = data->csd.func; /* save for later warn */
215 func(data->csd.info);
Jens Axboe3d442232008-06-26 11:21:34 +0200216
Milton Miller225c8e02011-01-20 14:44:34 -0800217 /*
Milton Millerc8def552011-03-15 13:27:17 -0600218 * If the cpu mask is not still set then func enabled
219 * interrupts (BUG), and this cpu took another smp call
220 * function interrupt and executed func(info) twice
221 * on this cpu. That nested execution decremented refs.
Milton Miller225c8e02011-01-20 14:44:34 -0800222 */
223 if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) {
Milton Millerc8def552011-03-15 13:27:17 -0600224 WARN(1, "%pf enabled interrupts and double executed\n", func);
Milton Miller225c8e02011-01-20 14:44:34 -0800225 continue;
226 }
227
Xiao Guangrong54fdade2009-09-22 16:43:39 -0700228 refs = atomic_dec_return(&data->refs);
229 WARN_ON(refs < 0);
Jens Axboe3d442232008-06-26 11:21:34 +0200230
231 if (refs)
232 continue;
233
Milton Miller225c8e02011-01-20 14:44:34 -0800234 WARN_ON(!cpumask_empty(data->cpumask));
235
236 raw_spin_lock(&call_function.lock);
237 list_del_rcu(&data->csd.list);
238 raw_spin_unlock(&call_function.lock);
239
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100240 csd_unlock(&data->csd);
Jens Axboe3d442232008-06-26 11:21:34 +0200241 }
Jens Axboe3d442232008-06-26 11:21:34 +0200242
Jens Axboe3d442232008-06-26 11:21:34 +0200243}
244
245/*
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100246 * Invoked by arch to handle an IPI for call function single. Must be
247 * called from the arch with interrupts disabled.
Jens Axboe3d442232008-06-26 11:21:34 +0200248 */
249void generic_smp_call_function_single_interrupt(void)
250{
251 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
Nick Piggin15d0d3b2009-02-25 06:22:45 +0100252 unsigned int data_flags;
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100253 LIST_HEAD(list);
Jens Axboe3d442232008-06-26 11:21:34 +0200254
Suresh Siddha269c8612009-08-19 18:05:35 -0700255 /*
256 * Shouldn't receive this interrupt on a cpu that is not yet online.
257 */
258 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
259
Thomas Gleixner9f5a5622009-11-17 15:40:01 +0100260 raw_spin_lock(&q->lock);
Nick Piggin15d0d3b2009-02-25 06:22:45 +0100261 list_replace_init(&q->list, &list);
Thomas Gleixner9f5a5622009-11-17 15:40:01 +0100262 raw_spin_unlock(&q->lock);
Jens Axboe3d442232008-06-26 11:21:34 +0200263
Nick Piggin15d0d3b2009-02-25 06:22:45 +0100264 while (!list_empty(&list)) {
265 struct call_single_data *data;
Jens Axboe3d442232008-06-26 11:21:34 +0200266
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100267 data = list_entry(list.next, struct call_single_data, list);
Nick Piggin15d0d3b2009-02-25 06:22:45 +0100268 list_del(&data->list);
Jens Axboe3d442232008-06-26 11:21:34 +0200269
Jens Axboe3d442232008-06-26 11:21:34 +0200270 /*
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100271 * 'data' can be invalid after this call if flags == 0
272 * (when called through generic_exec_single()),
273 * so save them away before making the call:
Jens Axboe3d442232008-06-26 11:21:34 +0200274 */
Nick Piggin15d0d3b2009-02-25 06:22:45 +0100275 data_flags = data->flags;
276
277 data->func(data->info);
278
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100279 /*
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100280 * Unlocked CSDs are valid through generic_exec_single():
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100281 */
282 if (data_flags & CSD_FLAG_LOCK)
283 csd_unlock(data);
Jens Axboe3d442232008-06-26 11:21:34 +0200284 }
285}
286
Milton Millere03bcb62010-01-18 13:00:51 +1100287static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
Steven Rostedtd7240b92009-01-29 10:08:01 -0500288
Jens Axboe3d442232008-06-26 11:21:34 +0200289/*
290 * smp_call_function_single - Run a function on a specific CPU
291 * @func: The function to run. This must be fast and non-blocking.
292 * @info: An arbitrary pointer to pass to the function.
Jens Axboe3d442232008-06-26 11:21:34 +0200293 * @wait: If true, wait until function has completed on other CPUs.
294 *
Sheng Yang72f279b2009-10-22 19:19:34 +0800295 * Returns 0 on success, else a negative status code.
Jens Axboe3d442232008-06-26 11:21:34 +0200296 */
David Howells3a5f65d2010-10-27 17:28:36 +0100297int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
Jens Axboe8691e5a2008-06-06 11:18:06 +0200298 int wait)
Jens Axboe3d442232008-06-26 11:21:34 +0200299{
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100300 struct call_single_data d = {
301 .flags = 0,
302 };
Jens Axboe3d442232008-06-26 11:21:34 +0200303 unsigned long flags;
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100304 int this_cpu;
H. Peter Anvinf73be6d2008-08-25 17:07:14 -0700305 int err = 0;
Jens Axboe3d442232008-06-26 11:21:34 +0200306
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100307 /*
308 * prevent preemption and reschedule on another processor,
309 * as well as CPU removal
310 */
311 this_cpu = get_cpu();
312
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100313 if (cpu == this_cpu) {
Jens Axboe3d442232008-06-26 11:21:34 +0200314 local_irq_save(flags);
315 func(info);
316 local_irq_restore(flags);
H. Peter Anvinf73be6d2008-08-25 17:07:14 -0700317 } else {
Matt Wagantall61675fc2013-09-23 15:19:49 -0700318 /*
319 * Can deadlock when called with interrupts disabled.
320 * We allow cpu's that are not yet online though, as no one else
321 * can send smp call function interrupt to this cpu and as such
322 * deadlocks can't happen.
323 */
324 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
325 && !oops_in_progress);
326
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100327 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
328 struct call_single_data *data = &d;
329
330 if (!wait)
331 data = &__get_cpu_var(csd_data);
332
333 csd_lock(data);
334
335 data->func = func;
336 data->info = info;
337 generic_exec_single(cpu, data, wait);
338 } else {
339 err = -ENXIO; /* CPU not online */
340 }
Jens Axboe3d442232008-06-26 11:21:34 +0200341 }
342
343 put_cpu();
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100344
H. Peter Anvinf73be6d2008-08-25 17:07:14 -0700345 return err;
Jens Axboe3d442232008-06-26 11:21:34 +0200346}
347EXPORT_SYMBOL(smp_call_function_single);
348
Rusty Russell2ea6dec2009-11-17 14:27:27 -0800349/*
350 * smp_call_function_any - Run a function on any of the given cpus
351 * @mask: The mask of cpus it can run on.
352 * @func: The function to run. This must be fast and non-blocking.
353 * @info: An arbitrary pointer to pass to the function.
354 * @wait: If true, wait until function has completed.
355 *
356 * Returns 0 on success, else a negative status code (if no cpus were online).
357 * Note that @wait will be implicitly turned on in case of allocation failures,
358 * since we fall back to on-stack allocation.
359 *
360 * Selection preference:
361 * 1) current cpu if in @mask
362 * 2) any cpu of current node if in @mask
363 * 3) any other online cpu in @mask
364 */
365int smp_call_function_any(const struct cpumask *mask,
David Howells3a5f65d2010-10-27 17:28:36 +0100366 smp_call_func_t func, void *info, int wait)
Rusty Russell2ea6dec2009-11-17 14:27:27 -0800367{
368 unsigned int cpu;
369 const struct cpumask *nodemask;
370 int ret;
371
372 /* Try for same CPU (cheapest) */
373 cpu = get_cpu();
374 if (cpumask_test_cpu(cpu, mask))
375 goto call;
376
377 /* Try for same node. */
David Johnaf2422c2010-01-15 17:01:23 -0800378 nodemask = cpumask_of_node(cpu_to_node(cpu));
Rusty Russell2ea6dec2009-11-17 14:27:27 -0800379 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
380 cpu = cpumask_next_and(cpu, nodemask, mask)) {
381 if (cpu_online(cpu))
382 goto call;
383 }
384
385 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
386 cpu = cpumask_any_and(mask, cpu_online_mask);
387call:
388 ret = smp_call_function_single(cpu, func, info, wait);
389 put_cpu();
390 return ret;
391}
392EXPORT_SYMBOL_GPL(smp_call_function_any);
393
Jens Axboe3d442232008-06-26 11:21:34 +0200394/**
Heiko Carstens27c379f2010-09-10 13:47:29 +0200395 * __smp_call_function_single(): Run a function on a specific CPU
Jens Axboe3d442232008-06-26 11:21:34 +0200396 * @cpu: The CPU to run on.
397 * @data: Pre-allocated and setup data structure
Heiko Carstens27c379f2010-09-10 13:47:29 +0200398 * @wait: If true, wait until function has completed on specified CPU.
Jens Axboe3d442232008-06-26 11:21:34 +0200399 *
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100400 * Like smp_call_function_single(), but allow caller to pass in a
401 * pre-allocated data structure. Useful for embedding @data inside
402 * other structures, for instance.
Jens Axboe3d442232008-06-26 11:21:34 +0200403 */
Peter Zijlstra6e275632009-02-25 13:59:48 +0100404void __smp_call_function_single(int cpu, struct call_single_data *data,
405 int wait)
Jens Axboe3d442232008-06-26 11:21:34 +0200406{
Heiko Carstens27c379f2010-09-10 13:47:29 +0200407 unsigned int this_cpu;
408 unsigned long flags;
Jens Axboe3d442232008-06-26 11:21:34 +0200409
Heiko Carstens27c379f2010-09-10 13:47:29 +0200410 this_cpu = get_cpu();
Peter Zijlstra6e275632009-02-25 13:59:48 +0100411
Heiko Carstens27c379f2010-09-10 13:47:29 +0200412 if (cpu == this_cpu) {
413 local_irq_save(flags);
414 data->func(data->info);
415 local_irq_restore(flags);
416 } else {
Matt Wagantall61675fc2013-09-23 15:19:49 -0700417 /*
418 * Can deadlock when called with interrupts disabled.
419 * We allow cpu's that are not yet online though, as no one else
420 * can send smp call function interrupt to this cpu and as such
421 * deadlocks can't happen.
422 */
423 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait
424 && irqs_disabled() && !oops_in_progress);
425
Heiko Carstens27c379f2010-09-10 13:47:29 +0200426 csd_lock(data);
427 generic_exec_single(cpu, data, wait);
428 }
429 put_cpu();
Jens Axboe3d442232008-06-26 11:21:34 +0200430}
431
432/**
Rusty Russell54b11e62008-12-30 09:05:16 +1030433 * smp_call_function_many(): Run a function on a set of other CPUs.
434 * @mask: The set of cpus to run on (only runs on online subset).
Jens Axboe3d442232008-06-26 11:21:34 +0200435 * @func: The function to run. This must be fast and non-blocking.
436 * @info: An arbitrary pointer to pass to the function.
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100437 * @wait: If true, wait (atomically) until function has completed
438 * on other CPUs.
Jens Axboe3d442232008-06-26 11:21:34 +0200439 *
Sheng Yang72f279b2009-10-22 19:19:34 +0800440 * If @wait is true, then returns once @func has returned.
Jens Axboe3d442232008-06-26 11:21:34 +0200441 *
442 * You must not call this function with disabled interrupts or from a
443 * hardware interrupt handler or from a bottom half handler. Preemption
444 * must be disabled when calling this function.
445 */
Rusty Russell54b11e62008-12-30 09:05:16 +1030446void smp_call_function_many(const struct cpumask *mask,
David Howells3a5f65d2010-10-27 17:28:36 +0100447 smp_call_func_t func, void *info, bool wait)
Jens Axboe3d442232008-06-26 11:21:34 +0200448{
Rusty Russell54b11e62008-12-30 09:05:16 +1030449 struct call_function_data *data;
Jens Axboe3d442232008-06-26 11:21:34 +0200450 unsigned long flags;
Milton Miller723aae22011-03-15 13:27:17 -0600451 int refs, cpu, next_cpu, this_cpu = smp_processor_id();
Jens Axboe3d442232008-06-26 11:21:34 +0200452
Suresh Siddha269c8612009-08-19 18:05:35 -0700453 /*
454 * Can deadlock when called with interrupts disabled.
455 * We allow cpu's that are not yet online though, as no one else can
456 * send smp call function interrupt to this cpu and as such deadlocks
457 * can't happen.
458 */
459 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
Tejun Heobd924e82011-01-20 12:07:13 +0100460 && !oops_in_progress && !early_boot_irqs_disabled);
Jens Axboe3d442232008-06-26 11:21:34 +0200461
Milton Miller723aae22011-03-15 13:27:17 -0600462 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
Rusty Russell54b11e62008-12-30 09:05:16 +1030463 cpu = cpumask_first_and(mask, cpu_online_mask);
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100464 if (cpu == this_cpu)
Rusty Russell54b11e62008-12-30 09:05:16 +1030465 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100466
Rusty Russell54b11e62008-12-30 09:05:16 +1030467 /* No online cpus? We're done. */
468 if (cpu >= nr_cpu_ids)
469 return;
Jens Axboe3d442232008-06-26 11:21:34 +0200470
Rusty Russell54b11e62008-12-30 09:05:16 +1030471 /* Do we have another CPU which isn't us? */
472 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100473 if (next_cpu == this_cpu)
Rusty Russell54b11e62008-12-30 09:05:16 +1030474 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
475
476 /* Fastpath: do that cpu by itself. */
477 if (next_cpu >= nr_cpu_ids) {
478 smp_call_function_single(cpu, func, info, wait);
479 return;
Jens Axboe3d442232008-06-26 11:21:34 +0200480 }
481
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100482 data = &__get_cpu_var(cfd_data);
483 csd_lock(&data->csd);
Milton Miller45a57912011-03-15 13:27:16 -0600484
485 /* This BUG_ON verifies our reuse assertions and can be removed */
Anton Blanchard6dc19892011-01-20 14:44:33 -0800486 BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
Jens Axboe3d442232008-06-26 11:21:34 +0200487
Milton Miller45a57912011-03-15 13:27:16 -0600488 /*
489 * The global call function queue list add and delete are protected
490 * by a lock, but the list is traversed without any lock, relying
491 * on the rcu list add and delete to allow safe concurrent traversal.
492 * We reuse the call function data without waiting for any grace
493 * period after some other cpu removes it from the global queue.
494 * This means a cpu might find our data block as it is being
495 * filled out.
496 *
497 * We hold off the interrupt handler on the other cpu by
498 * ordering our writes to the cpu mask vs our setting of the
499 * refs counter. We assert only the cpu owning the data block
500 * will set a bit in cpumask, and each bit will only be cleared
501 * by the subject cpu. Each cpu must first find its bit is
502 * set and then check that refs is set indicating the element is
503 * ready to be processed, otherwise it must skip the entry.
504 *
505 * On the previous iteration refs was set to 0 by another cpu.
506 * To avoid the use of transitivity, set the counter to 0 here
507 * so the wmb will pair with the rmb in the interrupt handler.
508 */
509 atomic_set(&data->refs, 0); /* convert 3rd to 1st party write */
510
Jens Axboe3d442232008-06-26 11:21:34 +0200511 data->csd.func = func;
512 data->csd.info = info;
Milton Miller45a57912011-03-15 13:27:16 -0600513
514 /* Ensure 0 refs is visible before mask. Also orders func and info */
515 smp_wmb();
516
517 /* We rely on the "and" being processed before the store */
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100518 cpumask_and(data->cpumask, mask, cpu_online_mask);
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100519 cpumask_clear_cpu(this_cpu, data->cpumask);
Milton Miller723aae22011-03-15 13:27:17 -0600520 refs = cpumask_weight(data->cpumask);
521
522 /* Some callers race with other cpus changing the passed mask */
523 if (unlikely(!refs)) {
524 csd_unlock(&data->csd);
525 return;
526 }
Anton Blanchard6dc19892011-01-20 14:44:33 -0800527
Thomas Gleixner9f5a5622009-11-17 15:40:01 +0100528 raw_spin_lock_irqsave(&call_function.lock, flags);
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100529 /*
530 * Place entry at the _HEAD_ of the list, so that any cpu still
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100531 * observing the entry in generic_smp_call_function_interrupt()
532 * will not miss any other list entries:
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100533 */
534 list_add_rcu(&data->csd.list, &call_function.queue);
Milton Millere6cd1e02011-03-15 13:27:16 -0600535 /*
Milton Miller45a57912011-03-15 13:27:16 -0600536 * We rely on the wmb() in list_add_rcu to complete our writes
537 * to the cpumask before this write to refs, which indicates
538 * data is on the list and is ready to be processed.
Milton Millere6cd1e02011-03-15 13:27:16 -0600539 */
Milton Miller723aae22011-03-15 13:27:17 -0600540 atomic_set(&data->refs, refs);
Thomas Gleixner9f5a5622009-11-17 15:40:01 +0100541 raw_spin_unlock_irqrestore(&call_function.lock, flags);
Jens Axboe3d442232008-06-26 11:21:34 +0200542
Suresh Siddha561920a02008-10-30 18:28:41 +0100543 /*
544 * Make the list addition visible before sending the ipi.
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100545 * (IPIs must obey or appear to obey normal Linux cache
546 * coherency rules -- see comment in generic_exec_single).
Suresh Siddha561920a02008-10-30 18:28:41 +0100547 */
548 smp_mb();
549
Jens Axboe3d442232008-06-26 11:21:34 +0200550 /* Send a message to all CPUs in the map */
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100551 arch_send_call_function_ipi_mask(data->cpumask);
Jens Axboe3d442232008-06-26 11:21:34 +0200552
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100553 /* Optionally wait for the CPUs to complete */
Rusty Russell54b11e62008-12-30 09:05:16 +1030554 if (wait)
Peter Zijlstra6e275632009-02-25 13:59:48 +0100555 csd_lock_wait(&data->csd);
Jens Axboe3d442232008-06-26 11:21:34 +0200556}
Rusty Russell54b11e62008-12-30 09:05:16 +1030557EXPORT_SYMBOL(smp_call_function_many);
Jens Axboe3d442232008-06-26 11:21:34 +0200558
559/**
560 * smp_call_function(): Run a function on all other CPUs.
561 * @func: The function to run. This must be fast and non-blocking.
562 * @info: An arbitrary pointer to pass to the function.
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100563 * @wait: If true, wait (atomically) until function has completed
564 * on other CPUs.
Jens Axboe3d442232008-06-26 11:21:34 +0200565 *
Rusty Russell54b11e62008-12-30 09:05:16 +1030566 * Returns 0.
Jens Axboe3d442232008-06-26 11:21:34 +0200567 *
568 * If @wait is true, then returns once @func has returned; otherwise
Sheng Yang72f279b2009-10-22 19:19:34 +0800569 * it returns just before the target cpu calls @func.
Jens Axboe3d442232008-06-26 11:21:34 +0200570 *
571 * You must not call this function with disabled interrupts or from a
572 * hardware interrupt handler or from a bottom half handler.
573 */
David Howells3a5f65d2010-10-27 17:28:36 +0100574int smp_call_function(smp_call_func_t func, void *info, int wait)
Jens Axboe3d442232008-06-26 11:21:34 +0200575{
Jens Axboe3d442232008-06-26 11:21:34 +0200576 preempt_disable();
Rusty Russell54b11e62008-12-30 09:05:16 +1030577 smp_call_function_many(cpu_online_mask, func, info, wait);
Jens Axboe3d442232008-06-26 11:21:34 +0200578 preempt_enable();
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100579
Rusty Russell54b11e62008-12-30 09:05:16 +1030580 return 0;
Jens Axboe3d442232008-06-26 11:21:34 +0200581}
582EXPORT_SYMBOL(smp_call_function);
583
584void ipi_call_lock(void)
585{
Thomas Gleixner9f5a5622009-11-17 15:40:01 +0100586 raw_spin_lock(&call_function.lock);
Jens Axboe3d442232008-06-26 11:21:34 +0200587}
588
589void ipi_call_unlock(void)
590{
Thomas Gleixner9f5a5622009-11-17 15:40:01 +0100591 raw_spin_unlock(&call_function.lock);
Jens Axboe3d442232008-06-26 11:21:34 +0200592}
593
594void ipi_call_lock_irq(void)
595{
Thomas Gleixner9f5a5622009-11-17 15:40:01 +0100596 raw_spin_lock_irq(&call_function.lock);
Jens Axboe3d442232008-06-26 11:21:34 +0200597}
598
599void ipi_call_unlock_irq(void)
600{
Thomas Gleixner9f5a5622009-11-17 15:40:01 +0100601 raw_spin_unlock_irq(&call_function.lock);
Jens Axboe3d442232008-06-26 11:21:34 +0200602}
Amerigo Wang351f8f82011-01-12 16:59:39 -0800603#endif /* USE_GENERIC_SMP_HELPERS */
604
Amerigo Wang34db18a02011-03-22 16:34:06 -0700605/* Setup configured maximum number of CPUs to activate */
606unsigned int setup_max_cpus = NR_CPUS;
607EXPORT_SYMBOL(setup_max_cpus);
608
609
610/*
611 * Setup routine for controlling SMP activation
612 *
613 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
614 * activation entirely (the MPS table probe still happens, though).
615 *
616 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
617 * greater than 0, limits the maximum number of CPUs activated in
618 * SMP mode to <NUM>.
619 */
620
621void __weak arch_disable_smp_support(void) { }
622
623static int __init nosmp(char *str)
624{
625 setup_max_cpus = 0;
626 arch_disable_smp_support();
627
628 return 0;
629}
630
631early_param("nosmp", nosmp);
632
633/* this is hard limit */
634static int __init nrcpus(char *str)
635{
636 int nr_cpus;
637
638 get_option(&str, &nr_cpus);
639 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
640 nr_cpu_ids = nr_cpus;
641
642 return 0;
643}
644
645early_param("nr_cpus", nrcpus);
646
647static int __init maxcpus(char *str)
648{
649 get_option(&str, &setup_max_cpus);
650 if (setup_max_cpus == 0)
651 arch_disable_smp_support();
652
653 return 0;
654}
655
656early_param("maxcpus", maxcpus);
657
658/* Setup number of possible processor ids */
659int nr_cpu_ids __read_mostly = NR_CPUS;
660EXPORT_SYMBOL(nr_cpu_ids);
661
662/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
663void __init setup_nr_cpu_ids(void)
664{
665 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
666}
667
668/* Called by boot processor to activate the rest. */
669void __init smp_init(void)
670{
671 unsigned int cpu;
672
673 /* FIXME: This should be done in userspace --RR */
674 for_each_present_cpu(cpu) {
675 if (num_online_cpus() >= setup_max_cpus)
676 break;
677 if (!cpu_online(cpu))
678 cpu_up(cpu);
679 }
680
681 /* Any cleanup work */
682 printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus());
683 smp_cpus_done(setup_max_cpus);
684}
685
Amerigo Wang351f8f82011-01-12 16:59:39 -0800686/*
Tejun Heobd924e82011-01-20 12:07:13 +0100687 * Call a function on all processors. May be used during early boot while
688 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
689 * of local_irq_disable/enable().
Amerigo Wang351f8f82011-01-12 16:59:39 -0800690 */
691int on_each_cpu(void (*func) (void *info), void *info, int wait)
692{
Tejun Heobd924e82011-01-20 12:07:13 +0100693 unsigned long flags;
Amerigo Wang351f8f82011-01-12 16:59:39 -0800694 int ret = 0;
695
696 preempt_disable();
697 ret = smp_call_function(func, info, wait);
Tejun Heobd924e82011-01-20 12:07:13 +0100698 local_irq_save(flags);
Amerigo Wang351f8f82011-01-12 16:59:39 -0800699 func(info);
Tejun Heobd924e82011-01-20 12:07:13 +0100700 local_irq_restore(flags);
Amerigo Wang351f8f82011-01-12 16:59:39 -0800701 preempt_enable();
702 return ret;
703}
704EXPORT_SYMBOL(on_each_cpu);
Gilad Ben-Yossef3fc498f2012-03-28 14:42:43 -0700705
706/**
707 * on_each_cpu_mask(): Run a function on processors specified by
708 * cpumask, which may include the local processor.
709 * @mask: The set of cpus to run on (only runs on online subset).
710 * @func: The function to run. This must be fast and non-blocking.
711 * @info: An arbitrary pointer to pass to the function.
712 * @wait: If true, wait (atomically) until function has completed
713 * on other CPUs.
714 *
715 * If @wait is true, then returns once @func has returned.
716 *
717 * You must not call this function with disabled interrupts or
718 * from a hardware interrupt handler or from a bottom half handler.
719 */
720void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
721 void *info, bool wait)
722{
723 int cpu = get_cpu();
724
725 smp_call_function_many(mask, func, info, wait);
726 if (cpumask_test_cpu(cpu, mask)) {
727 local_irq_disable();
728 func(info);
729 local_irq_enable();
730 }
731 put_cpu();
732}
733EXPORT_SYMBOL(on_each_cpu_mask);
Gilad Ben-Yossefb3a7e982012-03-28 14:42:43 -0700734
735/*
736 * on_each_cpu_cond(): Call a function on each processor for which
737 * the supplied function cond_func returns true, optionally waiting
738 * for all the required CPUs to finish. This may include the local
739 * processor.
740 * @cond_func: A callback function that is passed a cpu id and
741 * the the info parameter. The function is called
742 * with preemption disabled. The function should
743 * return a blooean value indicating whether to IPI
744 * the specified CPU.
745 * @func: The function to run on all applicable CPUs.
746 * This must be fast and non-blocking.
747 * @info: An arbitrary pointer to pass to both functions.
748 * @wait: If true, wait (atomically) until function has
749 * completed on other CPUs.
750 * @gfp_flags: GFP flags to use when allocating the cpumask
751 * used internally by the function.
752 *
753 * The function might sleep if the GFP flags indicates a non
754 * atomic allocation is allowed.
755 *
756 * Preemption is disabled to protect against CPUs going offline but not online.
757 * CPUs going online during the call will not be seen or sent an IPI.
758 *
759 * You must not call this function with disabled interrupts or
760 * from a hardware interrupt handler or from a bottom half handler.
761 */
762void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
763 smp_call_func_t func, void *info, bool wait,
764 gfp_t gfp_flags)
765{
766 cpumask_var_t cpus;
767 int cpu, ret;
768
769 might_sleep_if(gfp_flags & __GFP_WAIT);
770
771 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
772 preempt_disable();
773 for_each_online_cpu(cpu)
774 if (cond_func(cpu, info))
775 cpumask_set_cpu(cpu, cpus);
776 on_each_cpu_mask(cpus, func, info, wait);
777 preempt_enable();
778 free_cpumask_var(cpus);
779 } else {
780 /*
781 * No free cpumask, bother. No matter, we'll
782 * just have to IPI them one by one.
783 */
784 preempt_disable();
785 for_each_online_cpu(cpu)
786 if (cond_func(cpu, info)) {
787 ret = smp_call_function_single(cpu, func,
788 info, wait);
789 WARN_ON_ONCE(!ret);
790 }
791 preempt_enable();
792 }
793}
794EXPORT_SYMBOL(on_each_cpu_cond);