blob: 44f958a88b217249112436077b7886c53b687124 [file] [log] [blame]
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
Paul E. McKenney6cc68792011-03-02 13:15:15 -08004 * or preemptible semantics.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
22 *
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 */
26
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -080027#include <linux/delay.h>
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -070028#include <linux/gfp.h>
Paul E. McKenneyb626c1b2012-06-11 17:39:43 -070029#include <linux/oom.h>
Paul E. McKenney62ab7072012-07-16 10:42:38 +000030#include <linux/smpboot.h>
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070031
Mike Galbraith5b61b0b2011-08-19 11:39:11 -070032#define RCU_KTHREAD_PRIO 1
33
34#ifdef CONFIG_RCU_BOOST
35#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
36#else
37#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
38#endif
39
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -070040#ifdef CONFIG_RCU_NOCB_CPU
41static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
42static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
Paul Gortmaker1b0048a2012-12-20 13:19:22 -080043static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -070044static char __initdata nocb_buf[NR_CPUS * 5];
45#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
46
Paul E. McKenney26845c22010-04-13 14:19:23 -070047/*
48 * Check the RCU kernel configuration parameters and print informative
49 * messages about anything out of the ordinary. If you like #ifdef, you
50 * will love this function.
51 */
52static void __init rcu_bootup_announce_oddness(void)
53{
54#ifdef CONFIG_RCU_TRACE
55 printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
56#endif
57#if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
58 printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
59 CONFIG_RCU_FANOUT);
60#endif
61#ifdef CONFIG_RCU_FANOUT_EXACT
62 printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
63#endif
64#ifdef CONFIG_RCU_FAST_NO_HZ
65 printk(KERN_INFO
66 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
67#endif
68#ifdef CONFIG_PROVE_RCU
69 printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
70#endif
71#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
72 printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
73#endif
Paul E. McKenney81a294c2010-08-30 09:52:50 -070074#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
Paul E. McKenneya858af22012-01-16 13:29:10 -080075 printk(KERN_INFO "\tDump stacks of tasks blocking RCU-preempt GP.\n");
76#endif
77#if defined(CONFIG_RCU_CPU_STALL_INFO)
78 printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n");
Paul E. McKenney26845c22010-04-13 14:19:23 -070079#endif
80#if NUM_RCU_LVL_4 != 0
Paul E. McKenneycc5df652012-06-15 18:16:00 -070081 printk(KERN_INFO "\tFour-level hierarchy is enabled.\n");
Paul E. McKenney26845c22010-04-13 14:19:23 -070082#endif
Paul E. McKenneyf885b7f2012-04-23 15:52:53 -070083 if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
84 printk(KERN_INFO "\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
Paul E. McKenneycca6f392012-05-08 21:00:28 -070085 if (nr_cpu_ids != NR_CPUS)
86 printk(KERN_INFO "\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -070087#ifdef CONFIG_RCU_NOCB_CPU
88 if (have_rcu_nocb_mask) {
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -070089 cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
90 pr_info("\tExperimental no-CBs CPUs: %s.\n", nocb_buf);
91 if (rcu_nocb_poll)
92 pr_info("\tExperimental polled no-CBs CPUs.\n");
93 }
94#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
Paul E. McKenney26845c22010-04-13 14:19:23 -070095}
96
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070097#ifdef CONFIG_TREE_PREEMPT_RCU
98
Paul E. McKenney037b64e2012-05-28 23:26:01 -070099struct rcu_state rcu_preempt_state =
100 RCU_STATE_INITIALIZER(rcu_preempt, call_rcu);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700101DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800102static struct rcu_state *rcu_state = &rcu_preempt_state;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700103
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800104static int rcu_preempted_readers_exp(struct rcu_node *rnp);
105
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700106/*
107 * Tell them what RCU they are running.
108 */
Paul E. McKenney0e0fc1c2009-11-11 11:28:06 -0800109static void __init rcu_bootup_announce(void)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700110{
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800111 printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
Paul E. McKenney26845c22010-04-13 14:19:23 -0700112 rcu_bootup_announce_oddness();
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700113}
114
115/*
116 * Return the number of RCU-preempt batches processed thus far
117 * for debug and statistics.
118 */
119long rcu_batches_completed_preempt(void)
120{
121 return rcu_preempt_state.completed;
122}
123EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
124
125/*
126 * Return the number of RCU batches processed thus far for debug & stats.
127 */
128long rcu_batches_completed(void)
129{
130 return rcu_batches_completed_preempt();
131}
132EXPORT_SYMBOL_GPL(rcu_batches_completed);
133
134/*
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800135 * Force a quiescent state for preemptible RCU.
136 */
137void rcu_force_quiescent_state(void)
138{
Paul E. McKenney4cdfc172012-06-22 17:06:26 -0700139 force_quiescent_state(&rcu_preempt_state);
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800140}
141EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
142
143/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800144 * Record a preemptible-RCU quiescent state for the specified CPU. Note
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700145 * that this just means that the task currently running on the CPU is
146 * not in a quiescent state. There might be any number of tasks blocked
147 * while in an RCU read-side critical section.
Paul E. McKenney25502a62010-04-01 17:37:01 -0700148 *
149 * Unlike the other rcu_*_qs() functions, callers to this function
150 * must disable irqs in order to protect the assignment to
151 * ->rcu_read_unlock_special.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700152 */
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700153static void rcu_preempt_qs(int cpu)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700154{
155 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
Paul E. McKenney25502a62010-04-01 17:37:01 -0700156
Paul E. McKenneye4cc1f22011-06-27 00:17:43 -0700157 if (rdp->passed_quiesce == 0)
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700158 trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
Paul E. McKenneye4cc1f22011-06-27 00:17:43 -0700159 rdp->passed_quiesce = 1;
Paul E. McKenney25502a62010-04-01 17:37:01 -0700160 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700161}
162
163/*
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700164 * We have entered the scheduler, and the current task might soon be
165 * context-switched away from. If this task is in an RCU read-side
166 * critical section, we will no longer be able to rely on the CPU to
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800167 * record that fact, so we enqueue the task on the blkd_tasks list.
168 * The task will dequeue itself when it exits the outermost enclosing
169 * RCU read-side critical section. Therefore, the current grace period
170 * cannot be permitted to complete until the blkd_tasks list entries
171 * predating the current grace period drain, in other words, until
172 * rnp->gp_tasks becomes NULL.
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700173 *
174 * Caller must disable preemption.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700175 */
Paul E. McKenneycba6d0d2012-07-02 07:08:42 -0700176static void rcu_preempt_note_context_switch(int cpu)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700177{
178 struct task_struct *t = current;
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700179 unsigned long flags;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700180 struct rcu_data *rdp;
181 struct rcu_node *rnp;
182
Paul E. McKenney10f39bb2011-07-17 21:14:35 -0700183 if (t->rcu_read_lock_nesting > 0 &&
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700184 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
185
186 /* Possibly blocking in an RCU read-side critical section. */
Paul E. McKenneycba6d0d2012-07-02 07:08:42 -0700187 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700188 rnp = rdp->mynode;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800189 raw_spin_lock_irqsave(&rnp->lock, flags);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700190 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
Paul E. McKenney86848962009-08-27 15:00:12 -0700191 t->rcu_blocked_node = rnp;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700192
193 /*
194 * If this CPU has already checked in, then this task
195 * will hold up the next grace period rather than the
196 * current grace period. Queue the task accordingly.
197 * If the task is queued for the current grace period
198 * (i.e., this CPU has not yet passed through a quiescent
199 * state for the current grace period), then as long
200 * as that task remains queued, the current grace period
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800201 * cannot end. Note that there is some uncertainty as
202 * to exactly when the current grace period started.
203 * We take a conservative approach, which can result
204 * in unnecessarily waiting on tasks that started very
205 * slightly after the current grace period began. C'est
206 * la vie!!!
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700207 *
208 * But first, note that the current CPU must still be
209 * on line!
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700210 */
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700211 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
Paul E. McKenneye7d88422009-09-18 09:50:18 -0700212 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800213 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
214 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
215 rnp->gp_tasks = &t->rcu_node_entry;
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800216#ifdef CONFIG_RCU_BOOST
217 if (rnp->boost_tasks != NULL)
218 rnp->boost_tasks = rnp->gp_tasks;
219#endif /* #ifdef CONFIG_RCU_BOOST */
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800220 } else {
221 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
222 if (rnp->qsmask & rdp->grpmask)
223 rnp->gp_tasks = &t->rcu_node_entry;
224 }
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700225 trace_rcu_preempt_task(rdp->rsp->name,
226 t->pid,
227 (rnp->qsmask & rdp->grpmask)
228 ? rnp->gpnum
229 : rnp->gpnum + 1);
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800230 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney10f39bb2011-07-17 21:14:35 -0700231 } else if (t->rcu_read_lock_nesting < 0 &&
232 t->rcu_read_unlock_special) {
233
234 /*
235 * Complete exit from RCU read-side critical section on
236 * behalf of preempted instance of __rcu_read_unlock().
237 */
238 rcu_read_unlock_special(t);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700239 }
240
241 /*
242 * Either we were not in an RCU read-side critical section to
243 * begin with, or we have now recorded that critical section
244 * globally. Either way, we can now note a quiescent state
245 * for this CPU. Again, if we were in an RCU read-side critical
246 * section, and if that critical section was blocking the current
247 * grace period, then the fact that the task has been enqueued
248 * means that we continue to block the current grace period.
249 */
Paul E. McKenneye7d88422009-09-18 09:50:18 -0700250 local_irq_save(flags);
Paul E. McKenneycba6d0d2012-07-02 07:08:42 -0700251 rcu_preempt_qs(cpu);
Paul E. McKenneye7d88422009-09-18 09:50:18 -0700252 local_irq_restore(flags);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700253}
254
255/*
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700256 * Check for preempted RCU readers blocking the current grace period
257 * for the specified rcu_node structure. If the caller needs a reliable
258 * answer, it must hold the rcu_node's ->lock.
259 */
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800260static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700261{
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800262 return rnp->gp_tasks != NULL;
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700263}
264
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800265/*
266 * Record a quiescent state for all tasks that were previously queued
267 * on the specified rcu_node structure and that were blocking the current
268 * RCU grace period. The caller must hold the specified rnp->lock with
269 * irqs disabled, and this lock is released upon return, but irqs remain
270 * disabled.
271 */
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800272static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800273 __releases(rnp->lock)
274{
275 unsigned long mask;
276 struct rcu_node *rnp_p;
277
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800278 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800279 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800280 return; /* Still need more quiescent states! */
281 }
282
283 rnp_p = rnp->parent;
284 if (rnp_p == NULL) {
285 /*
286 * Either there is only one rcu_node in the tree,
287 * or tasks were kicked up to root rcu_node due to
288 * CPUs going offline.
289 */
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800290 rcu_report_qs_rsp(&rcu_preempt_state, flags);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800291 return;
292 }
293
294 /* Report up the rest of the hierarchy. */
295 mask = rnp->grpmask;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800296 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
297 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800298 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800299}
300
301/*
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800302 * Advance a ->blkd_tasks-list pointer to the next entry, instead
303 * returning NULL if at the end of the list.
304 */
305static struct list_head *rcu_next_node_entry(struct task_struct *t,
306 struct rcu_node *rnp)
307{
308 struct list_head *np;
309
310 np = t->rcu_node_entry.next;
311 if (np == &rnp->blkd_tasks)
312 np = NULL;
313 return np;
314}
315
316/*
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800317 * Handle special cases during rcu_read_unlock(), such as needing to
318 * notify RCU core processing or task having blocked during the RCU
319 * read-side critical section.
320 */
Paul E. McKenney2a3fa842012-05-21 11:58:36 -0700321void rcu_read_unlock_special(struct task_struct *t)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700322{
323 int empty;
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800324 int empty_exp;
Paul E. McKenney389abd42011-09-21 14:41:37 -0700325 int empty_exp_now;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700326 unsigned long flags;
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800327 struct list_head *np;
Paul E. McKenney82e78d82011-08-04 07:55:34 -0700328#ifdef CONFIG_RCU_BOOST
329 struct rt_mutex *rbmp = NULL;
330#endif /* #ifdef CONFIG_RCU_BOOST */
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700331 struct rcu_node *rnp;
332 int special;
333
334 /* NMI handlers cannot block and cannot safely manipulate state. */
335 if (in_nmi())
336 return;
337
338 local_irq_save(flags);
339
340 /*
341 * If RCU core is waiting for this CPU to exit critical section,
342 * let it know that we have done so.
343 */
344 special = t->rcu_read_unlock_special;
345 if (special & RCU_READ_UNLOCK_NEED_QS) {
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700346 rcu_preempt_qs(smp_processor_id());
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700347 }
348
349 /* Hardware IRQ handlers cannot block. */
Peter Zijlstraec433f02011-07-19 15:32:00 -0700350 if (in_irq() || in_serving_softirq()) {
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700351 local_irq_restore(flags);
352 return;
353 }
354
355 /* Clean up if blocked during RCU read-side critical section. */
356 if (special & RCU_READ_UNLOCK_BLOCKED) {
357 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
358
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700359 /*
360 * Remove this task from the list it blocked on. The
361 * task can migrate while we acquire the lock, but at
362 * most one time. So at most two passes through loop.
363 */
364 for (;;) {
Paul E. McKenney86848962009-08-27 15:00:12 -0700365 rnp = t->rcu_blocked_node;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800366 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
Paul E. McKenney86848962009-08-27 15:00:12 -0700367 if (rnp == t->rcu_blocked_node)
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700368 break;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800369 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700370 }
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800371 empty = !rcu_preempt_blocked_readers_cgp(rnp);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800372 empty_exp = !rcu_preempted_readers_exp(rnp);
373 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800374 np = rcu_next_node_entry(t, rnp);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700375 list_del_init(&t->rcu_node_entry);
Paul E. McKenney82e78d82011-08-04 07:55:34 -0700376 t->rcu_blocked_node = NULL;
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700377 trace_rcu_unlock_preempted_task("rcu_preempt",
378 rnp->gpnum, t->pid);
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800379 if (&t->rcu_node_entry == rnp->gp_tasks)
380 rnp->gp_tasks = np;
381 if (&t->rcu_node_entry == rnp->exp_tasks)
382 rnp->exp_tasks = np;
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800383#ifdef CONFIG_RCU_BOOST
384 if (&t->rcu_node_entry == rnp->boost_tasks)
385 rnp->boost_tasks = np;
Paul E. McKenney82e78d82011-08-04 07:55:34 -0700386 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
387 if (t->rcu_boost_mutex) {
388 rbmp = t->rcu_boost_mutex;
389 t->rcu_boost_mutex = NULL;
Paul E. McKenney7765be22011-07-14 12:24:11 -0700390 }
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800391#endif /* #ifdef CONFIG_RCU_BOOST */
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700392
393 /*
394 * If this was the last task on the current list, and if
395 * we aren't waiting on any CPUs, report the quiescent state.
Paul E. McKenney389abd42011-09-21 14:41:37 -0700396 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
397 * so we must take a snapshot of the expedited state.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700398 */
Paul E. McKenney389abd42011-09-21 14:41:37 -0700399 empty_exp_now = !rcu_preempted_readers_exp(rnp);
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700400 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
401 trace_rcu_quiescent_state_report("preempt_rcu",
402 rnp->gpnum,
403 0, rnp->qsmask,
404 rnp->level,
405 rnp->grplo,
406 rnp->grphi,
407 !!rnp->gp_tasks);
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800408 rcu_report_unblock_qs_rnp(rnp, flags);
Paul E. McKenneyc701d5d2012-06-28 08:08:25 -0700409 } else {
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700410 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneyc701d5d2012-06-28 08:08:25 -0700411 }
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800412
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800413#ifdef CONFIG_RCU_BOOST
414 /* Unboost if we were boosted. */
Paul E. McKenney82e78d82011-08-04 07:55:34 -0700415 if (rbmp)
416 rt_mutex_unlock(rbmp);
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800417#endif /* #ifdef CONFIG_RCU_BOOST */
418
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800419 /*
420 * If this was the last task on the expedited lists,
421 * then we need to report up the rcu_node hierarchy.
422 */
Paul E. McKenney389abd42011-09-21 14:41:37 -0700423 if (!empty_exp && empty_exp_now)
Thomas Gleixnerb40d2932011-10-22 07:12:34 -0700424 rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800425 } else {
426 local_irq_restore(flags);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700427 }
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700428}
429
Paul E. McKenney1ed509a2010-02-22 17:05:05 -0800430#ifdef CONFIG_RCU_CPU_STALL_VERBOSE
431
432/*
433 * Dump detailed information for all tasks blocking the current RCU
434 * grace period on the specified rcu_node structure.
435 */
436static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
437{
438 unsigned long flags;
Paul E. McKenney1ed509a2010-02-22 17:05:05 -0800439 struct task_struct *t;
440
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800441 raw_spin_lock_irqsave(&rnp->lock, flags);
Paul E. McKenney5fd4dc02012-08-10 16:00:11 -0700442 if (!rcu_preempt_blocked_readers_cgp(rnp)) {
443 raw_spin_unlock_irqrestore(&rnp->lock, flags);
444 return;
445 }
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800446 t = list_entry(rnp->gp_tasks,
447 struct task_struct, rcu_node_entry);
448 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
449 sched_show_task(t);
450 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney1ed509a2010-02-22 17:05:05 -0800451}
452
453/*
454 * Dump detailed information for all tasks blocking the current RCU
455 * grace period.
456 */
457static void rcu_print_detail_task_stall(struct rcu_state *rsp)
458{
459 struct rcu_node *rnp = rcu_get_root(rsp);
460
461 rcu_print_detail_task_stall_rnp(rnp);
462 rcu_for_each_leaf_node(rsp, rnp)
463 rcu_print_detail_task_stall_rnp(rnp);
464}
465
466#else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
467
468static void rcu_print_detail_task_stall(struct rcu_state *rsp)
469{
470}
471
472#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
473
Paul E. McKenneya858af22012-01-16 13:29:10 -0800474#ifdef CONFIG_RCU_CPU_STALL_INFO
475
476static void rcu_print_task_stall_begin(struct rcu_node *rnp)
477{
478 printk(KERN_ERR "\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
479 rnp->level, rnp->grplo, rnp->grphi);
480}
481
482static void rcu_print_task_stall_end(void)
483{
484 printk(KERN_CONT "\n");
485}
486
487#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
488
489static void rcu_print_task_stall_begin(struct rcu_node *rnp)
490{
491}
492
493static void rcu_print_task_stall_end(void)
494{
495}
496
497#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
498
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700499/*
500 * Scan the current list of tasks blocked within RCU read-side critical
501 * sections, printing out the tid of each.
502 */
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700503static int rcu_print_task_stall(struct rcu_node *rnp)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700504{
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700505 struct task_struct *t;
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700506 int ndetected = 0;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700507
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800508 if (!rcu_preempt_blocked_readers_cgp(rnp))
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700509 return 0;
Paul E. McKenneya858af22012-01-16 13:29:10 -0800510 rcu_print_task_stall_begin(rnp);
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800511 t = list_entry(rnp->gp_tasks,
512 struct task_struct, rcu_node_entry);
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700513 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
Paul E. McKenneya858af22012-01-16 13:29:10 -0800514 printk(KERN_CONT " P%d", t->pid);
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700515 ndetected++;
516 }
Paul E. McKenneya858af22012-01-16 13:29:10 -0800517 rcu_print_task_stall_end();
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700518 return ndetected;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700519}
520
Paul E. McKenney53d84e02010-08-10 14:28:53 -0700521/*
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700522 * Check that the list of blocked tasks for the newly completed grace
523 * period is in fact empty. It is a serious bug to complete a grace
524 * period that still has RCU readers blocked! This function must be
525 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
526 * must be held by the caller.
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800527 *
528 * Also, if there are blocked tasks on the list, they automatically
529 * block the newly created grace period, so set up ->gp_tasks accordingly.
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700530 */
531static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
532{
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800533 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800534 if (!list_empty(&rnp->blkd_tasks))
535 rnp->gp_tasks = rnp->blkd_tasks.next;
Paul E. McKenney28ecd582009-09-18 09:50:17 -0700536 WARN_ON_ONCE(rnp->qsmask);
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700537}
538
Paul E. McKenney33f76142009-08-24 09:42:01 -0700539#ifdef CONFIG_HOTPLUG_CPU
540
541/*
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700542 * Handle tasklist migration for case in which all CPUs covered by the
543 * specified rcu_node have gone offline. Move them up to the root
544 * rcu_node. The reason for not just moving them to the immediate
545 * parent is to remove the need for rcu_read_unlock_special() to
546 * make more than two attempts to acquire the target rcu_node's lock.
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800547 * Returns true if there were tasks blocking the current RCU grace
548 * period.
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700549 *
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700550 * Returns 1 if there was previously a task blocking the current grace
551 * period on the specified rcu_node structure.
552 *
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700553 * The caller must hold rnp->lock with irqs disabled.
554 */
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700555static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
556 struct rcu_node *rnp,
557 struct rcu_data *rdp)
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700558{
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700559 struct list_head *lp;
560 struct list_head *lp_root;
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800561 int retval = 0;
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700562 struct rcu_node *rnp_root = rcu_get_root(rsp);
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800563 struct task_struct *t;
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700564
Paul E. McKenney86848962009-08-27 15:00:12 -0700565 if (rnp == rnp_root) {
566 WARN_ONCE(1, "Last CPU thought to be offlined?");
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700567 return 0; /* Shouldn't happen: at least one CPU online. */
Paul E. McKenney86848962009-08-27 15:00:12 -0700568 }
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800569
570 /* If we are on an internal node, complain bitterly. */
571 WARN_ON_ONCE(rnp != rdp->mynode);
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700572
573 /*
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800574 * Move tasks up to root rcu_node. Don't try to get fancy for
575 * this corner-case operation -- just put this node's tasks
576 * at the head of the root node's list, and update the root node's
577 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
578 * if non-NULL. This might result in waiting for more tasks than
579 * absolutely necessary, but this is a good performance/complexity
580 * tradeoff.
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700581 */
Paul E. McKenney2036d942012-01-30 17:02:47 -0800582 if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800583 retval |= RCU_OFL_TASKS_NORM_GP;
584 if (rcu_preempted_readers_exp(rnp))
585 retval |= RCU_OFL_TASKS_EXP_GP;
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800586 lp = &rnp->blkd_tasks;
587 lp_root = &rnp_root->blkd_tasks;
588 while (!list_empty(lp)) {
589 t = list_entry(lp->next, typeof(*t), rcu_node_entry);
590 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
591 list_del(&t->rcu_node_entry);
592 t->rcu_blocked_node = rnp_root;
593 list_add(&t->rcu_node_entry, lp_root);
594 if (&t->rcu_node_entry == rnp->gp_tasks)
595 rnp_root->gp_tasks = rnp->gp_tasks;
596 if (&t->rcu_node_entry == rnp->exp_tasks)
597 rnp_root->exp_tasks = rnp->exp_tasks;
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800598#ifdef CONFIG_RCU_BOOST
599 if (&t->rcu_node_entry == rnp->boost_tasks)
600 rnp_root->boost_tasks = rnp->boost_tasks;
601#endif /* #ifdef CONFIG_RCU_BOOST */
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800602 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700603 }
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800604
Paul E. McKenney1e3fd2b2012-07-27 13:41:47 -0700605 rnp->gp_tasks = NULL;
606 rnp->exp_tasks = NULL;
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800607#ifdef CONFIG_RCU_BOOST
Paul E. McKenney1e3fd2b2012-07-27 13:41:47 -0700608 rnp->boost_tasks = NULL;
Paul E. McKenney5cc900c2012-07-31 14:09:49 -0700609 /*
610 * In case root is being boosted and leaf was not. Make sure
611 * that we boost the tasks blocking the current grace period
612 * in this case.
613 */
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800614 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
615 if (rnp_root->boost_tasks != NULL &&
Paul E. McKenney5cc900c2012-07-31 14:09:49 -0700616 rnp_root->boost_tasks != rnp_root->gp_tasks &&
617 rnp_root->boost_tasks != rnp_root->exp_tasks)
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800618 rnp_root->boost_tasks = rnp_root->gp_tasks;
619 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
620#endif /* #ifdef CONFIG_RCU_BOOST */
621
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700622 return retval;
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700623}
624
Paul E. McKenneye5601402012-01-07 11:03:57 -0800625#endif /* #ifdef CONFIG_HOTPLUG_CPU */
626
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700627/*
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700628 * Check for a quiescent state from the current CPU. When a task blocks,
629 * the task is recorded in the corresponding CPU's rcu_node structure,
630 * which is checked elsewhere.
631 *
632 * Caller must disable hard irqs.
633 */
634static void rcu_preempt_check_callbacks(int cpu)
635{
636 struct task_struct *t = current;
637
638 if (t->rcu_read_lock_nesting == 0) {
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700639 rcu_preempt_qs(cpu);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700640 return;
641 }
Paul E. McKenney10f39bb2011-07-17 21:14:35 -0700642 if (t->rcu_read_lock_nesting > 0 &&
643 per_cpu(rcu_preempt_data, cpu).qs_pending)
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700644 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700645}
646
Paul E. McKenneya46e0892011-06-15 15:47:09 -0700647#ifdef CONFIG_RCU_BOOST
648
Shaohua Li09223372011-06-14 13:26:25 +0800649static void rcu_preempt_do_callbacks(void)
650{
651 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
652}
653
Paul E. McKenneya46e0892011-06-15 15:47:09 -0700654#endif /* #ifdef CONFIG_RCU_BOOST */
655
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700656/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800657 * Queue a preemptible-RCU callback for invocation after a grace period.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700658 */
659void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
660{
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -0700661 __call_rcu(head, func, &rcu_preempt_state, -1, 0);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700662}
663EXPORT_SYMBOL_GPL(call_rcu);
664
Paul E. McKenney486e2592012-01-06 14:11:30 -0800665/*
666 * Queue an RCU callback for lazy invocation after a grace period.
667 * This will likely be later named something like "call_rcu_lazy()",
668 * but this change will require some way of tagging the lazy RCU
669 * callbacks in the list of pending callbacks. Until then, this
670 * function may only be called from __kfree_rcu().
671 */
672void kfree_call_rcu(struct rcu_head *head,
673 void (*func)(struct rcu_head *rcu))
674{
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -0700675 __call_rcu(head, func, &rcu_preempt_state, -1, 1);
Paul E. McKenney486e2592012-01-06 14:11:30 -0800676}
677EXPORT_SYMBOL_GPL(kfree_call_rcu);
678
Paul E. McKenney6ebb2372009-11-22 08:53:50 -0800679/**
680 * synchronize_rcu - wait until a grace period has elapsed.
681 *
682 * Control will return to the caller some time after a full grace
683 * period has elapsed, in other words after all currently executing RCU
Paul E. McKenney77d84852010-07-08 17:38:59 -0700684 * read-side critical sections have completed. Note, however, that
685 * upon return from synchronize_rcu(), the caller might well be executing
686 * concurrently with new RCU read-side critical sections that began while
687 * synchronize_rcu() was waiting. RCU read-side critical sections are
688 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
Paul E. McKenneyf0a0e6f2012-10-23 13:47:01 -0700689 *
690 * See the description of synchronize_sched() for more detailed information
691 * on memory ordering guarantees.
Paul E. McKenney6ebb2372009-11-22 08:53:50 -0800692 */
693void synchronize_rcu(void)
694{
Paul E. McKenneyfe15d702012-01-04 13:30:33 -0800695 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
696 !lock_is_held(&rcu_lock_map) &&
697 !lock_is_held(&rcu_sched_lock_map),
698 "Illegal synchronize_rcu() in RCU read-side critical section");
Paul E. McKenney6ebb2372009-11-22 08:53:50 -0800699 if (!rcu_scheduler_active)
700 return;
Antti P Miettinen3705b882012-10-05 09:59:15 +0300701 if (rcu_expedited)
702 synchronize_rcu_expedited();
703 else
704 wait_rcu_gp(call_rcu);
Paul E. McKenney6ebb2372009-11-22 08:53:50 -0800705}
706EXPORT_SYMBOL_GPL(synchronize_rcu);
707
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800708static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
Paul E. McKenneybcfa57c2012-07-23 16:03:51 -0700709static unsigned long sync_rcu_preempt_exp_count;
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800710static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
711
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700712/*
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800713 * Return non-zero if there are any tasks in RCU read-side critical
714 * sections blocking the current preemptible-RCU expedited grace period.
715 * If there is no preemptible-RCU expedited grace period currently in
716 * progress, returns zero unconditionally.
717 */
718static int rcu_preempted_readers_exp(struct rcu_node *rnp)
719{
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800720 return rnp->exp_tasks != NULL;
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800721}
722
723/*
724 * return non-zero if there is no RCU expedited grace period in progress
725 * for the specified rcu_node structure, in other words, if all CPUs and
726 * tasks covered by the specified rcu_node structure have done their bit
727 * for the current expedited grace period. Works only for preemptible
728 * RCU -- other RCU implementation use other means.
729 *
730 * Caller must hold sync_rcu_preempt_exp_mutex.
731 */
732static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
733{
734 return !rcu_preempted_readers_exp(rnp) &&
735 ACCESS_ONCE(rnp->expmask) == 0;
736}
737
738/*
739 * Report the exit from RCU read-side critical section for the last task
740 * that queued itself during or before the current expedited preemptible-RCU
741 * grace period. This event is reported either to the rcu_node structure on
742 * which the task was queued or to one of that rcu_node structure's ancestors,
743 * recursively up the tree. (Calm down, calm down, we do the recursion
744 * iteratively!)
745 *
Thomas Gleixnerb40d2932011-10-22 07:12:34 -0700746 * Most callers will set the "wake" flag, but the task initiating the
747 * expedited grace period need not wake itself.
748 *
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800749 * Caller must hold sync_rcu_preempt_exp_mutex.
750 */
Thomas Gleixnerb40d2932011-10-22 07:12:34 -0700751static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
752 bool wake)
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800753{
754 unsigned long flags;
755 unsigned long mask;
756
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800757 raw_spin_lock_irqsave(&rnp->lock, flags);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800758 for (;;) {
Paul E. McKenney131906b2011-07-17 02:05:49 -0700759 if (!sync_rcu_preempt_exp_done(rnp)) {
760 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800761 break;
Paul E. McKenney131906b2011-07-17 02:05:49 -0700762 }
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800763 if (rnp->parent == NULL) {
Paul E. McKenney131906b2011-07-17 02:05:49 -0700764 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Thomas Gleixnerb40d2932011-10-22 07:12:34 -0700765 if (wake)
766 wake_up(&sync_rcu_preempt_exp_wq);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800767 break;
768 }
769 mask = rnp->grpmask;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800770 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800771 rnp = rnp->parent;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800772 raw_spin_lock(&rnp->lock); /* irqs already disabled */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800773 rnp->expmask &= ~mask;
774 }
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800775}
776
777/*
778 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
779 * grace period for the specified rcu_node structure. If there are no such
780 * tasks, report it up the rcu_node hierarchy.
781 *
Paul E. McKenney7b2e6012012-10-08 10:54:03 -0700782 * Caller must hold sync_rcu_preempt_exp_mutex and must exclude
783 * CPU hotplug operations.
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800784 */
785static void
786sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
787{
Paul E. McKenney1217ed12011-05-04 21:43:49 -0700788 unsigned long flags;
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800789 int must_wait = 0;
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800790
Paul E. McKenney1217ed12011-05-04 21:43:49 -0700791 raw_spin_lock_irqsave(&rnp->lock, flags);
Paul E. McKenneyc701d5d2012-06-28 08:08:25 -0700792 if (list_empty(&rnp->blkd_tasks)) {
Paul E. McKenney1217ed12011-05-04 21:43:49 -0700793 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneyc701d5d2012-06-28 08:08:25 -0700794 } else {
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800795 rnp->exp_tasks = rnp->blkd_tasks.next;
Paul E. McKenney1217ed12011-05-04 21:43:49 -0700796 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800797 must_wait = 1;
798 }
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800799 if (!must_wait)
Thomas Gleixnerb40d2932011-10-22 07:12:34 -0700800 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800801}
802
Paul E. McKenney236fefa2012-01-31 14:00:41 -0800803/**
804 * synchronize_rcu_expedited - Brute-force RCU grace period
805 *
806 * Wait for an RCU-preempt grace period, but expedite it. The basic
807 * idea is to invoke synchronize_sched_expedited() to push all the tasks to
808 * the ->blkd_tasks lists and wait for this list to drain. This consumes
809 * significant time on all CPUs and is unfriendly to real-time workloads,
810 * so is thus not recommended for any sort of common-case code.
811 * In fact, if you are using synchronize_rcu_expedited() in a loop,
812 * please restructure your code to batch your updates, and then Use a
813 * single synchronize_rcu() instead.
814 *
815 * Note that it is illegal to call this function while holding any lock
816 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
817 * to call this function from a CPU-hotplug notifier. Failing to observe
818 * these restriction will result in deadlock.
Paul E. McKenney019129d52009-10-14 10:15:56 -0700819 */
820void synchronize_rcu_expedited(void)
821{
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800822 unsigned long flags;
823 struct rcu_node *rnp;
824 struct rcu_state *rsp = &rcu_preempt_state;
Paul E. McKenneybcfa57c2012-07-23 16:03:51 -0700825 unsigned long snap;
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800826 int trycount = 0;
827
828 smp_mb(); /* Caller's modifications seen first by other CPUs. */
829 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
830 smp_mb(); /* Above access cannot bleed into critical section. */
831
832 /*
Paul E. McKenney1943c892012-07-30 17:19:25 -0700833 * Block CPU-hotplug operations. This means that any CPU-hotplug
834 * operation that finds an rcu_node structure with tasks in the
835 * process of being boosted will know that all tasks blocking
836 * this expedited grace period will already be in the process of
837 * being boosted. This simplifies the process of moving tasks
838 * from leaf to root rcu_node structures.
839 */
840 get_online_cpus();
841
842 /*
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800843 * Acquire lock, falling back to synchronize_rcu() if too many
844 * lock-acquisition failures. Of course, if someone does the
845 * expedited grace period for us, just leave.
846 */
847 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
Paul E. McKenney1943c892012-07-30 17:19:25 -0700848 if (ULONG_CMP_LT(snap,
849 ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
850 put_online_cpus();
851 goto mb_ret; /* Others did our work for us. */
852 }
Paul E. McKenneyc701d5d2012-06-28 08:08:25 -0700853 if (trycount++ < 10) {
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800854 udelay(trycount * num_online_cpus());
Paul E. McKenneyc701d5d2012-06-28 08:08:25 -0700855 } else {
Paul E. McKenney1943c892012-07-30 17:19:25 -0700856 put_online_cpus();
Antti P Miettinen3705b882012-10-05 09:59:15 +0300857 wait_rcu_gp(call_rcu);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800858 return;
859 }
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800860 }
Paul E. McKenney1943c892012-07-30 17:19:25 -0700861 if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
862 put_online_cpus();
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800863 goto unlock_mb_ret; /* Others did our work for us. */
Paul E. McKenney1943c892012-07-30 17:19:25 -0700864 }
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800865
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800866 /* force all RCU readers onto ->blkd_tasks lists. */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800867 synchronize_sched_expedited();
868
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800869 /* Initialize ->expmask for all non-leaf rcu_node structures. */
870 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
Paul E. McKenney1943c892012-07-30 17:19:25 -0700871 raw_spin_lock_irqsave(&rnp->lock, flags);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800872 rnp->expmask = rnp->qsmaskinit;
Paul E. McKenney1943c892012-07-30 17:19:25 -0700873 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800874 }
875
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800876 /* Snapshot current state of ->blkd_tasks lists. */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800877 rcu_for_each_leaf_node(rsp, rnp)
878 sync_rcu_preempt_exp_init(rsp, rnp);
879 if (NUM_RCU_NODES > 1)
880 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
881
Paul E. McKenney1943c892012-07-30 17:19:25 -0700882 put_online_cpus();
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800883
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800884 /* Wait for snapshotted ->blkd_tasks lists to drain. */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800885 rnp = rcu_get_root(rsp);
886 wait_event(sync_rcu_preempt_exp_wq,
887 sync_rcu_preempt_exp_done(rnp));
888
889 /* Clean up and exit. */
890 smp_mb(); /* ensure expedited GP seen before counter increment. */
891 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
892unlock_mb_ret:
893 mutex_unlock(&sync_rcu_preempt_exp_mutex);
894mb_ret:
895 smp_mb(); /* ensure subsequent action seen after grace period. */
Paul E. McKenney019129d52009-10-14 10:15:56 -0700896}
897EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
898
Paul E. McKenneye74f4c42009-10-06 21:48:17 -0700899/**
900 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
Paul E. McKenneyf0a0e6f2012-10-23 13:47:01 -0700901 *
902 * Note that this primitive does not necessarily wait for an RCU grace period
903 * to complete. For example, if there are no RCU callbacks queued anywhere
904 * in the system, then rcu_barrier() is within its rights to return
905 * immediately, without waiting for anything, much less an RCU grace period.
Paul E. McKenneye74f4c42009-10-06 21:48:17 -0700906 */
907void rcu_barrier(void)
908{
Paul E. McKenney037b64e2012-05-28 23:26:01 -0700909 _rcu_barrier(&rcu_preempt_state);
Paul E. McKenneye74f4c42009-10-06 21:48:17 -0700910}
911EXPORT_SYMBOL_GPL(rcu_barrier);
912
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700913/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800914 * Initialize preemptible RCU's state structures.
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700915 */
916static void __init __rcu_init_preempt(void)
917{
Lai Jiangshan394f99a2010-06-28 16:25:04 +0800918 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700919}
920
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700921#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
922
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800923static struct rcu_state *rcu_state = &rcu_sched_state;
924
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700925/*
926 * Tell them what RCU they are running.
927 */
Paul E. McKenney0e0fc1c2009-11-11 11:28:06 -0800928static void __init rcu_bootup_announce(void)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700929{
930 printk(KERN_INFO "Hierarchical RCU implementation.\n");
Paul E. McKenney26845c22010-04-13 14:19:23 -0700931 rcu_bootup_announce_oddness();
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700932}
933
934/*
935 * Return the number of RCU batches processed thus far for debug & stats.
936 */
937long rcu_batches_completed(void)
938{
939 return rcu_batches_completed_sched();
940}
941EXPORT_SYMBOL_GPL(rcu_batches_completed);
942
943/*
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800944 * Force a quiescent state for RCU, which, because there is no preemptible
945 * RCU, becomes the same as rcu-sched.
946 */
947void rcu_force_quiescent_state(void)
948{
949 rcu_sched_force_quiescent_state();
950}
951EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
952
953/*
Paul E. McKenneycba6d0d2012-07-02 07:08:42 -0700954 * Because preemptible RCU does not exist, we never have to check for
955 * CPUs being in quiescent states.
956 */
957static void rcu_preempt_note_context_switch(int cpu)
958{
959}
960
961/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800962 * Because preemptible RCU does not exist, there are never any preempted
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700963 * RCU readers.
964 */
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800965static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700966{
967 return 0;
968}
969
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800970#ifdef CONFIG_HOTPLUG_CPU
971
972/* Because preemptible RCU does not exist, no quieting of tasks. */
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800973static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800974{
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800975 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800976}
977
978#endif /* #ifdef CONFIG_HOTPLUG_CPU */
979
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700980/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800981 * Because preemptible RCU does not exist, we never have to check for
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700982 * tasks blocked within RCU read-side critical sections.
983 */
Paul E. McKenney1ed509a2010-02-22 17:05:05 -0800984static void rcu_print_detail_task_stall(struct rcu_state *rsp)
985{
986}
987
988/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800989 * Because preemptible RCU does not exist, we never have to check for
Paul E. McKenney1ed509a2010-02-22 17:05:05 -0800990 * tasks blocked within RCU read-side critical sections.
991 */
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700992static int rcu_print_task_stall(struct rcu_node *rnp)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700993{
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700994 return 0;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700995}
996
Paul E. McKenney53d84e02010-08-10 14:28:53 -0700997/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800998 * Because there is no preemptible RCU, there can be no readers blocked,
Paul E. McKenney49e29122009-09-18 09:50:19 -0700999 * so there is no need to check for blocked tasks. So check only for
1000 * bogus qsmask values.
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -07001001 */
1002static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
1003{
Paul E. McKenney49e29122009-09-18 09:50:19 -07001004 WARN_ON_ONCE(rnp->qsmask);
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -07001005}
1006
Paul E. McKenney33f76142009-08-24 09:42:01 -07001007#ifdef CONFIG_HOTPLUG_CPU
1008
1009/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001010 * Because preemptible RCU does not exist, it never needs to migrate
Paul E. McKenney237c80c2009-10-15 09:26:14 -07001011 * tasks that were blocked within RCU read-side critical sections, and
1012 * such non-existent tasks cannot possibly have been blocking the current
1013 * grace period.
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -07001014 */
Paul E. McKenney237c80c2009-10-15 09:26:14 -07001015static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1016 struct rcu_node *rnp,
1017 struct rcu_data *rdp)
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -07001018{
Paul E. McKenney237c80c2009-10-15 09:26:14 -07001019 return 0;
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -07001020}
1021
Paul E. McKenneye5601402012-01-07 11:03:57 -08001022#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1023
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -07001024/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001025 * Because preemptible RCU does not exist, it never has any callbacks
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001026 * to check.
1027 */
Paul E. McKenney1eba8f82009-09-23 09:50:42 -07001028static void rcu_preempt_check_callbacks(int cpu)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001029{
1030}
1031
1032/*
Paul E. McKenney486e2592012-01-06 14:11:30 -08001033 * Queue an RCU callback for lazy invocation after a grace period.
1034 * This will likely be later named something like "call_rcu_lazy()",
1035 * but this change will require some way of tagging the lazy RCU
1036 * callbacks in the list of pending callbacks. Until then, this
1037 * function may only be called from __kfree_rcu().
1038 *
1039 * Because there is no preemptible RCU, we use RCU-sched instead.
1040 */
1041void kfree_call_rcu(struct rcu_head *head,
1042 void (*func)(struct rcu_head *rcu))
1043{
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07001044 __call_rcu(head, func, &rcu_sched_state, -1, 1);
Paul E. McKenney486e2592012-01-06 14:11:30 -08001045}
1046EXPORT_SYMBOL_GPL(kfree_call_rcu);
1047
1048/*
Paul E. McKenney019129d52009-10-14 10:15:56 -07001049 * Wait for an rcu-preempt grace period, but make it happen quickly.
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001050 * But because preemptible RCU does not exist, map to rcu-sched.
Paul E. McKenney019129d52009-10-14 10:15:56 -07001051 */
1052void synchronize_rcu_expedited(void)
1053{
1054 synchronize_sched_expedited();
1055}
1056EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1057
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -08001058#ifdef CONFIG_HOTPLUG_CPU
1059
1060/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001061 * Because preemptible RCU does not exist, there is never any need to
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -08001062 * report on tasks preempted in RCU read-side critical sections during
1063 * expedited RCU grace periods.
1064 */
Thomas Gleixnerb40d2932011-10-22 07:12:34 -07001065static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1066 bool wake)
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -08001067{
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -08001068}
1069
1070#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1071
Paul E. McKenney019129d52009-10-14 10:15:56 -07001072/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001073 * Because preemptible RCU does not exist, rcu_barrier() is just
Paul E. McKenneye74f4c42009-10-06 21:48:17 -07001074 * another name for rcu_barrier_sched().
1075 */
1076void rcu_barrier(void)
1077{
1078 rcu_barrier_sched();
1079}
1080EXPORT_SYMBOL_GPL(rcu_barrier);
1081
1082/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001083 * Because preemptible RCU does not exist, it need not be initialized.
Paul E. McKenney1eba8f82009-09-23 09:50:42 -07001084 */
1085static void __init __rcu_init_preempt(void)
1086{
1087}
1088
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001089#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001090
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001091#ifdef CONFIG_RCU_BOOST
1092
1093#include "rtmutex_common.h"
1094
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001095#ifdef CONFIG_RCU_TRACE
1096
1097static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1098{
1099 if (list_empty(&rnp->blkd_tasks))
1100 rnp->n_balk_blkd_tasks++;
1101 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1102 rnp->n_balk_exp_gp_tasks++;
1103 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1104 rnp->n_balk_boost_tasks++;
1105 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1106 rnp->n_balk_notblocked++;
1107 else if (rnp->gp_tasks != NULL &&
Paul E. McKenneya9f47932011-05-02 03:46:10 -07001108 ULONG_CMP_LT(jiffies, rnp->boost_time))
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001109 rnp->n_balk_notyet++;
1110 else
1111 rnp->n_balk_nos++;
1112}
1113
1114#else /* #ifdef CONFIG_RCU_TRACE */
1115
1116static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1117{
1118}
1119
1120#endif /* #else #ifdef CONFIG_RCU_TRACE */
1121
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001122static void rcu_wake_cond(struct task_struct *t, int status)
1123{
1124 /*
1125 * If the thread is yielding, only wake it when this
1126 * is invoked from idle
1127 */
1128 if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
1129 wake_up_process(t);
1130}
1131
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001132/*
1133 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1134 * or ->boost_tasks, advancing the pointer to the next task in the
1135 * ->blkd_tasks list.
1136 *
1137 * Note that irqs must be enabled: boosting the task can block.
1138 * Returns 1 if there are more tasks needing to be boosted.
1139 */
1140static int rcu_boost(struct rcu_node *rnp)
1141{
1142 unsigned long flags;
1143 struct rt_mutex mtx;
1144 struct task_struct *t;
1145 struct list_head *tb;
1146
1147 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1148 return 0; /* Nothing left to boost. */
1149
1150 raw_spin_lock_irqsave(&rnp->lock, flags);
1151
1152 /*
1153 * Recheck under the lock: all tasks in need of boosting
1154 * might exit their RCU read-side critical sections on their own.
1155 */
1156 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1157 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1158 return 0;
1159 }
1160
1161 /*
1162 * Preferentially boost tasks blocking expedited grace periods.
1163 * This cannot starve the normal grace periods because a second
1164 * expedited grace period must boost all blocked tasks, including
1165 * those blocking the pre-existing normal grace period.
1166 */
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001167 if (rnp->exp_tasks != NULL) {
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001168 tb = rnp->exp_tasks;
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001169 rnp->n_exp_boosts++;
1170 } else {
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001171 tb = rnp->boost_tasks;
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001172 rnp->n_normal_boosts++;
1173 }
1174 rnp->n_tasks_boosted++;
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001175
1176 /*
1177 * We boost task t by manufacturing an rt_mutex that appears to
1178 * be held by task t. We leave a pointer to that rt_mutex where
1179 * task t can find it, and task t will release the mutex when it
1180 * exits its outermost RCU read-side critical section. Then
1181 * simply acquiring this artificial rt_mutex will boost task
1182 * t's priority. (Thanks to tglx for suggesting this approach!)
1183 *
1184 * Note that task t must acquire rnp->lock to remove itself from
1185 * the ->blkd_tasks list, which it will do from exit() if from
1186 * nowhere else. We therefore are guaranteed that task t will
1187 * stay around at least until we drop rnp->lock. Note that
1188 * rnp->lock also resolves races between our priority boosting
1189 * and task t's exiting its outermost RCU read-side critical
1190 * section.
1191 */
1192 t = container_of(tb, struct task_struct, rcu_node_entry);
1193 rt_mutex_init_proxy_locked(&mtx, t);
1194 t->rcu_boost_mutex = &mtx;
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001195 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1196 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
1197 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
1198
Paul E. McKenney4f89b332011-12-09 14:43:47 -08001199 return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1200 ACCESS_ONCE(rnp->boost_tasks) != NULL;
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001201}
1202
1203/*
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001204 * Priority-boosting kthread. One per leaf rcu_node and one for the
1205 * root rcu_node.
1206 */
1207static int rcu_boost_kthread(void *arg)
1208{
1209 struct rcu_node *rnp = (struct rcu_node *)arg;
1210 int spincnt = 0;
1211 int more2boost;
1212
Paul E. McKenney385680a2011-06-21 22:43:26 -07001213 trace_rcu_utilization("Start boost kthread@init");
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001214 for (;;) {
Paul E. McKenneyd71df902011-03-29 17:48:28 -07001215 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
Paul E. McKenney385680a2011-06-21 22:43:26 -07001216 trace_rcu_utilization("End boost kthread@rcu_wait");
Peter Zijlstra08bca602011-05-20 16:06:29 -07001217 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
Paul E. McKenney385680a2011-06-21 22:43:26 -07001218 trace_rcu_utilization("Start boost kthread@rcu_wait");
Paul E. McKenneyd71df902011-03-29 17:48:28 -07001219 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001220 more2boost = rcu_boost(rnp);
1221 if (more2boost)
1222 spincnt++;
1223 else
1224 spincnt = 0;
1225 if (spincnt > 10) {
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001226 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
Paul E. McKenney385680a2011-06-21 22:43:26 -07001227 trace_rcu_utilization("End boost kthread@rcu_yield");
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001228 schedule_timeout_interruptible(2);
Paul E. McKenney385680a2011-06-21 22:43:26 -07001229 trace_rcu_utilization("Start boost kthread@rcu_yield");
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001230 spincnt = 0;
1231 }
1232 }
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001233 /* NOTREACHED */
Paul E. McKenney385680a2011-06-21 22:43:26 -07001234 trace_rcu_utilization("End boost kthread@notreached");
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001235 return 0;
1236}
1237
1238/*
1239 * Check to see if it is time to start boosting RCU readers that are
1240 * blocking the current grace period, and, if so, tell the per-rcu_node
1241 * kthread to start boosting them. If there is an expedited grace
1242 * period in progress, it is always time to boost.
1243 *
Paul E. McKenneyb065a852012-08-01 15:57:54 -07001244 * The caller must hold rnp->lock, which this function releases.
1245 * The ->boost_kthread_task is immortal, so we don't need to worry
1246 * about it going away.
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001247 */
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001248static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001249{
1250 struct task_struct *t;
1251
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001252 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1253 rnp->n_balk_exp_gp_tasks++;
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001254 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001255 return;
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001256 }
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001257 if (rnp->exp_tasks != NULL ||
1258 (rnp->gp_tasks != NULL &&
1259 rnp->boost_tasks == NULL &&
1260 rnp->qsmask == 0 &&
1261 ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1262 if (rnp->exp_tasks == NULL)
1263 rnp->boost_tasks = rnp->gp_tasks;
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001264 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001265 t = rnp->boost_kthread_task;
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001266 if (t)
1267 rcu_wake_cond(t, rnp->boost_kthread_status);
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001268 } else {
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001269 rcu_initiate_boost_trace(rnp);
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001270 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1271 }
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001272}
1273
Paul E. McKenney0f962a52011-04-14 12:13:53 -07001274/*
Paul E. McKenneya46e0892011-06-15 15:47:09 -07001275 * Wake up the per-CPU kthread to invoke RCU callbacks.
1276 */
1277static void invoke_rcu_callbacks_kthread(void)
1278{
1279 unsigned long flags;
1280
1281 local_irq_save(flags);
1282 __this_cpu_write(rcu_cpu_has_work, 1);
Shaohua Li1eb52122011-06-16 16:02:54 -07001283 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001284 current != __this_cpu_read(rcu_cpu_kthread_task)) {
1285 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
1286 __this_cpu_read(rcu_cpu_kthread_status));
1287 }
Paul E. McKenneya46e0892011-06-15 15:47:09 -07001288 local_irq_restore(flags);
1289}
1290
1291/*
Paul E. McKenneydff16722011-11-29 15:57:13 -08001292 * Is the current CPU running the RCU-callbacks kthread?
1293 * Caller must have preemption disabled.
1294 */
1295static bool rcu_is_callbacks_kthread(void)
1296{
1297 return __get_cpu_var(rcu_cpu_kthread_task) == current;
1298}
1299
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001300#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1301
1302/*
1303 * Do priority-boost accounting for the start of a new grace period.
1304 */
1305static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1306{
1307 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1308}
1309
1310/*
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001311 * Create an RCU-boost kthread for the specified node if one does not
1312 * already exist. We only create this kthread for preemptible RCU.
1313 * Returns zero if all is well, a negated errno otherwise.
1314 */
1315static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001316 struct rcu_node *rnp)
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001317{
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001318 int rnp_index = rnp - &rsp->node[0];
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001319 unsigned long flags;
1320 struct sched_param sp;
1321 struct task_struct *t;
1322
1323 if (&rcu_preempt_state != rsp)
1324 return 0;
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001325
1326 if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0)
1327 return 0;
1328
Paul E. McKenneya46e0892011-06-15 15:47:09 -07001329 rsp->boost = 1;
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001330 if (rnp->boost_kthread_task != NULL)
1331 return 0;
1332 t = kthread_create(rcu_boost_kthread, (void *)rnp,
Mike Galbraith5b61b0b2011-08-19 11:39:11 -07001333 "rcub/%d", rnp_index);
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001334 if (IS_ERR(t))
1335 return PTR_ERR(t);
1336 raw_spin_lock_irqsave(&rnp->lock, flags);
1337 rnp->boost_kthread_task = t;
1338 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Mike Galbraith5b61b0b2011-08-19 11:39:11 -07001339 sp.sched_priority = RCU_BOOST_PRIO;
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001340 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
Paul E. McKenney9a432732011-05-30 20:38:55 -07001341 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001342 return 0;
1343}
1344
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001345static void rcu_kthread_do_work(void)
1346{
1347 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1348 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1349 rcu_preempt_do_callbacks();
1350}
1351
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001352static void rcu_cpu_kthread_setup(unsigned int cpu)
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001353{
1354 struct sched_param sp;
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001355
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001356 sp.sched_priority = RCU_KTHREAD_PRIO;
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001357 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001358}
1359
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001360static void rcu_cpu_kthread_park(unsigned int cpu)
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001361{
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001362 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1363}
1364
1365static int rcu_cpu_kthread_should_run(unsigned int cpu)
1366{
1367 return __get_cpu_var(rcu_cpu_has_work);
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001368}
1369
1370/*
1371 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
Paul E. McKenneye0f23062011-06-21 01:29:39 -07001372 * RCU softirq used in flavors and configurations of RCU that do not
1373 * support RCU priority boosting.
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001374 */
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001375static void rcu_cpu_kthread(unsigned int cpu)
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001376{
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001377 unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
1378 char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
1379 int spincnt;
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001380
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001381 for (spincnt = 0; spincnt < 10; spincnt++) {
Paul E. McKenney385680a2011-06-21 22:43:26 -07001382 trace_rcu_utilization("Start CPU kthread@rcu_wait");
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001383 local_bh_disable();
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001384 *statusp = RCU_KTHREAD_RUNNING;
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001385 this_cpu_inc(rcu_cpu_kthread_loops);
1386 local_irq_disable();
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001387 work = *workp;
1388 *workp = 0;
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001389 local_irq_enable();
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001390 if (work)
1391 rcu_kthread_do_work();
1392 local_bh_enable();
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001393 if (*workp == 0) {
1394 trace_rcu_utilization("End CPU kthread@rcu_wait");
1395 *statusp = RCU_KTHREAD_WAITING;
1396 return;
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001397 }
1398 }
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001399 *statusp = RCU_KTHREAD_YIELDING;
1400 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1401 schedule_timeout_interruptible(2);
1402 trace_rcu_utilization("End CPU kthread@rcu_yield");
1403 *statusp = RCU_KTHREAD_WAITING;
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001404}
1405
1406/*
1407 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1408 * served by the rcu_node in question. The CPU hotplug lock is still
1409 * held, so the value of rnp->qsmaskinit will be stable.
1410 *
1411 * We don't include outgoingcpu in the affinity set, use -1 if there is
1412 * no outgoing CPU. If there are no CPUs left in the affinity set,
1413 * this function allows the kthread to execute on any CPU.
1414 */
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001415static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001416{
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001417 struct task_struct *t = rnp->boost_kthread_task;
1418 unsigned long mask = rnp->qsmaskinit;
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001419 cpumask_var_t cm;
1420 int cpu;
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001421
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001422 if (!t)
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001423 return;
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001424 if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001425 return;
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001426 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1427 if ((mask & 0x1) && cpu != outgoingcpu)
1428 cpumask_set_cpu(cpu, cm);
1429 if (cpumask_weight(cm) == 0) {
1430 cpumask_setall(cm);
1431 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1432 cpumask_clear_cpu(cpu, cm);
1433 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1434 }
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001435 set_cpus_allowed_ptr(t, cm);
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001436 free_cpumask_var(cm);
1437}
1438
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001439static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1440 .store = &rcu_cpu_kthread_task,
1441 .thread_should_run = rcu_cpu_kthread_should_run,
1442 .thread_fn = rcu_cpu_kthread,
1443 .thread_comm = "rcuc/%u",
1444 .setup = rcu_cpu_kthread_setup,
1445 .park = rcu_cpu_kthread_park,
1446};
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001447
1448/*
1449 * Spawn all kthreads -- called as soon as the scheduler is running.
1450 */
1451static int __init rcu_spawn_kthreads(void)
1452{
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001453 struct rcu_node *rnp;
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001454 int cpu;
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001455
Paul E. McKenneyb0d30412011-07-10 15:57:35 -07001456 rcu_scheduler_fully_active = 1;
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001457 for_each_possible_cpu(cpu)
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001458 per_cpu(rcu_cpu_has_work, cpu) = 0;
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001459 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001460 rnp = rcu_get_root(rcu_state);
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001461 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001462 if (NUM_RCU_NODES > 1) {
1463 rcu_for_each_leaf_node(rcu_state, rnp)
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001464 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001465 }
1466 return 0;
1467}
1468early_initcall(rcu_spawn_kthreads);
1469
1470static void __cpuinit rcu_prepare_kthreads(int cpu)
1471{
1472 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1473 struct rcu_node *rnp = rdp->mynode;
1474
1475 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001476 if (rcu_scheduler_fully_active)
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001477 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001478}
1479
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001480#else /* #ifdef CONFIG_RCU_BOOST */
1481
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001482static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001483{
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001484 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001485}
1486
Paul E. McKenneya46e0892011-06-15 15:47:09 -07001487static void invoke_rcu_callbacks_kthread(void)
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001488{
Paul E. McKenneya46e0892011-06-15 15:47:09 -07001489 WARN_ON_ONCE(1);
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001490}
1491
Paul E. McKenneydff16722011-11-29 15:57:13 -08001492static bool rcu_is_callbacks_kthread(void)
1493{
1494 return false;
1495}
1496
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001497static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1498{
1499}
1500
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001501static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001502{
1503}
1504
Paul E. McKenneyb0d30412011-07-10 15:57:35 -07001505static int __init rcu_scheduler_really_started(void)
1506{
1507 rcu_scheduler_fully_active = 1;
1508 return 0;
1509}
1510early_initcall(rcu_scheduler_really_started);
1511
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001512static void __cpuinit rcu_prepare_kthreads(int cpu)
1513{
1514}
1515
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001516#endif /* #else #ifdef CONFIG_RCU_BOOST */
1517
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001518#if !defined(CONFIG_RCU_FAST_NO_HZ)
1519
1520/*
1521 * Check to see if any future RCU-related work will need to be done
1522 * by the current CPU, even if none need be done immediately, returning
1523 * 1 if so. This function is part of the RCU implementation; it is -not-
1524 * an exported member of the RCU API.
1525 *
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001526 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1527 * any flavor of RCU.
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001528 */
Paul E. McKenneyaa9b16302012-05-10 16:41:44 -07001529int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001530{
Paul E. McKenneyaa9b16302012-05-10 16:41:44 -07001531 *delta_jiffies = ULONG_MAX;
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001532 return rcu_cpu_has_callbacks(cpu);
1533}
1534
1535/*
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001536 * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
1537 */
1538static void rcu_prepare_for_idle_init(int cpu)
1539{
1540}
1541
1542/*
1543 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1544 * after it.
1545 */
1546static void rcu_cleanup_after_idle(int cpu)
1547{
1548}
1549
1550/*
Paul E. McKenneya858af22012-01-16 13:29:10 -08001551 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001552 * is nothing.
1553 */
1554static void rcu_prepare_for_idle(int cpu)
1555{
1556}
1557
Paul E. McKenneyc57afe82012-02-28 11:02:21 -08001558/*
1559 * Don't bother keeping a running count of the number of RCU callbacks
1560 * posted because CONFIG_RCU_FAST_NO_HZ=n.
1561 */
1562static void rcu_idle_count_callbacks_posted(void)
1563{
1564}
1565
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001566#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1567
Paul E. McKenneyf23f7fa2011-11-30 15:41:14 -08001568/*
1569 * This code is invoked when a CPU goes idle, at which point we want
1570 * to have the CPU do everything required for RCU so that it can enter
1571 * the energy-efficient dyntick-idle mode. This is handled by a
1572 * state machine implemented by rcu_prepare_for_idle() below.
1573 *
1574 * The following three proprocessor symbols control this state machine:
1575 *
1576 * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
1577 * to satisfy RCU. Beyond this point, it is better to incur a periodic
1578 * scheduling-clock interrupt than to loop through the state machine
1579 * at full power.
1580 * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
1581 * optional if RCU does not need anything immediately from this
1582 * CPU, even if this CPU still has RCU callbacks queued. The first
1583 * times through the state machine are mandatory: we need to give
1584 * the state machine a chance to communicate a quiescent state
1585 * to the RCU core.
1586 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1587 * to sleep in dyntick-idle mode with RCU callbacks pending. This
1588 * is sized to be roughly one RCU grace period. Those energy-efficiency
1589 * benchmarkers who might otherwise be tempted to set this to a large
1590 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1591 * system. And if you are -that- concerned about energy efficiency,
1592 * just power the system down and be done with it!
Paul E. McKenney778d2502012-01-10 14:13:24 -08001593 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1594 * permitted to sleep in dyntick-idle mode with only lazy RCU
1595 * callbacks pending. Setting this too high can OOM your system.
Paul E. McKenneyf23f7fa2011-11-30 15:41:14 -08001596 *
1597 * The values below work well in practice. If future workloads require
1598 * adjustment, they can be converted into kernel config parameters, though
1599 * making the state machine smarter might be a better option.
1600 */
1601#define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */
1602#define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */
Paul E. McKenneye84c48a2012-06-04 20:45:10 -07001603#define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */
Paul E. McKenney778d2502012-01-10 14:13:24 -08001604#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
Paul E. McKenneyf23f7fa2011-11-30 15:41:14 -08001605
Paul E. McKenney9d2ad242012-06-24 10:15:02 -07001606extern int tick_nohz_enabled;
1607
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001608/*
Paul E. McKenney486e2592012-01-06 14:11:30 -08001609 * Does the specified flavor of RCU have non-lazy callbacks pending on
1610 * the specified CPU? Both RCU flavor and CPU are specified by the
1611 * rcu_data structure.
1612 */
1613static bool __rcu_cpu_has_nonlazy_callbacks(struct rcu_data *rdp)
1614{
1615 return rdp->qlen != rdp->qlen_lazy;
1616}
1617
1618#ifdef CONFIG_TREE_PREEMPT_RCU
1619
1620/*
1621 * Are there non-lazy RCU-preempt callbacks? (There cannot be if there
1622 * is no RCU-preempt in the kernel.)
1623 */
1624static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
1625{
1626 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
1627
1628 return __rcu_cpu_has_nonlazy_callbacks(rdp);
1629}
1630
1631#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1632
1633static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
1634{
1635 return 0;
1636}
1637
1638#endif /* else #ifdef CONFIG_TREE_PREEMPT_RCU */
1639
1640/*
1641 * Does any flavor of RCU have non-lazy callbacks on the specified CPU?
1642 */
1643static bool rcu_cpu_has_nonlazy_callbacks(int cpu)
1644{
1645 return __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_sched_data, cpu)) ||
1646 __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_bh_data, cpu)) ||
1647 rcu_preempt_cpu_has_nonlazy_callbacks(cpu);
1648}
1649
1650/*
Paul E. McKenneyaa9b16302012-05-10 16:41:44 -07001651 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
1652 * callbacks on this CPU, (2) this CPU has not yet attempted to enter
1653 * dyntick-idle mode, or (3) this CPU is in the process of attempting to
1654 * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
1655 * to enter dyntick-idle mode, we refuse to try to enter it. After all,
1656 * it is better to incur scheduling-clock interrupts than to spin
1657 * continuously for the same time duration!
1658 *
1659 * The delta_jiffies argument is used to store the time when RCU is
1660 * going to need the CPU again if it still has callbacks. The reason
1661 * for this is that rcu_prepare_for_idle() might need to post a timer,
1662 * but if so, it will do so after tick_nohz_stop_sched_tick() has set
1663 * the wakeup time for this CPU. This means that RCU's timer can be
1664 * delayed until the wakeup time, which defeats the purpose of posting
1665 * a timer.
1666 */
1667int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
1668{
1669 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1670
1671 /* Flag a new idle sojourn to the idle-entry state machine. */
1672 rdtp->idle_first_pass = 1;
1673 /* If no callbacks, RCU doesn't need the CPU. */
1674 if (!rcu_cpu_has_callbacks(cpu)) {
1675 *delta_jiffies = ULONG_MAX;
1676 return 0;
1677 }
1678 if (rdtp->dyntick_holdoff == jiffies) {
1679 /* RCU recently tried and failed, so don't try again. */
1680 *delta_jiffies = 1;
1681 return 1;
1682 }
1683 /* Set up for the possibility that RCU will post a timer. */
Paul E. McKenneye84c48a2012-06-04 20:45:10 -07001684 if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
1685 *delta_jiffies = round_up(RCU_IDLE_GP_DELAY + jiffies,
1686 RCU_IDLE_GP_DELAY) - jiffies;
1687 } else {
1688 *delta_jiffies = jiffies + RCU_IDLE_LAZY_GP_DELAY;
1689 *delta_jiffies = round_jiffies(*delta_jiffies) - jiffies;
1690 }
Paul E. McKenneyaa9b16302012-05-10 16:41:44 -07001691 return 0;
1692}
1693
1694/*
Paul E. McKenney21e52e12012-04-30 14:16:19 -07001695 * Handler for smp_call_function_single(). The only point of this
1696 * handler is to wake the CPU up, so the handler does only tracing.
1697 */
1698void rcu_idle_demigrate(void *unused)
1699{
1700 trace_rcu_prep_idle("Demigrate");
1701}
1702
1703/*
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001704 * Timer handler used to force CPU to start pushing its remaining RCU
1705 * callbacks in the case where it entered dyntick-idle mode with callbacks
1706 * pending. The hander doesn't really need to do anything because the
1707 * real work is done upon re-entry to idle, or by the next scheduling-clock
1708 * interrupt should idle not be re-entered.
Paul E. McKenney21e52e12012-04-30 14:16:19 -07001709 *
1710 * One special case: the timer gets migrated without awakening the CPU
1711 * on which the timer was scheduled on. In this case, we must wake up
1712 * that CPU. We do so with smp_call_function_single().
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001713 */
Paul E. McKenney21e52e12012-04-30 14:16:19 -07001714static void rcu_idle_gp_timer_func(unsigned long cpu_in)
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001715{
Paul E. McKenney21e52e12012-04-30 14:16:19 -07001716 int cpu = (int)cpu_in;
1717
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001718 trace_rcu_prep_idle("Timer");
Paul E. McKenney21e52e12012-04-30 14:16:19 -07001719 if (cpu != smp_processor_id())
1720 smp_call_function_single(cpu, rcu_idle_demigrate, NULL, 0);
1721 else
1722 WARN_ON_ONCE(1); /* Getting here can hang the system... */
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001723}
1724
1725/*
1726 * Initialize the timer used to pull CPUs out of dyntick-idle mode.
1727 */
1728static void rcu_prepare_for_idle_init(int cpu)
1729{
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001730 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1731
1732 rdtp->dyntick_holdoff = jiffies - 1;
1733 setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu);
1734 rdtp->idle_gp_timer_expires = jiffies - 1;
1735 rdtp->idle_first_pass = 1;
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001736}
1737
1738/*
1739 * Clean up for exit from idle. Because we are exiting from idle, there
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001740 * is no longer any point to ->idle_gp_timer, so cancel it. This will
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001741 * do nothing if this timer is not active, so just cancel it unconditionally.
1742 */
1743static void rcu_cleanup_after_idle(int cpu)
1744{
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001745 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1746
1747 del_timer(&rdtp->idle_gp_timer);
Paul E. McKenney2fdbb312012-02-23 15:58:29 -08001748 trace_rcu_prep_idle("Cleanup after idle");
Paul E. McKenney9d2ad242012-06-24 10:15:02 -07001749 rdtp->tick_nohz_enabled_snap = ACCESS_ONCE(tick_nohz_enabled);
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001750}
1751
1752/*
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001753 * Check to see if any RCU-related work can be done by the current CPU,
1754 * and if so, schedule a softirq to get it done. This function is part
1755 * of the RCU implementation; it is -not- an exported member of the RCU API.
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001756 *
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001757 * The idea is for the current CPU to clear out all work required by the
1758 * RCU core for the current grace period, so that this CPU can be permitted
1759 * to enter dyntick-idle mode. In some cases, it will need to be awakened
1760 * at the end of the grace period by whatever CPU ends the grace period.
1761 * This allows CPUs to go dyntick-idle more quickly, and to reduce the
1762 * number of wakeups by a modest integer factor.
Paul E. McKenneya47cd882010-02-26 16:38:56 -08001763 *
1764 * Because it is not legal to invoke rcu_process_callbacks() with irqs
1765 * disabled, we do one pass of force_quiescent_state(), then do a
Paul E. McKenneya46e0892011-06-15 15:47:09 -07001766 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001767 * later. The ->dyntick_drain field controls the sequencing.
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001768 *
1769 * The caller must have disabled interrupts.
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001770 */
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001771static void rcu_prepare_for_idle(int cpu)
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001772{
Paul E. McKenneyf511fc62012-03-15 12:16:26 -07001773 struct timer_list *tp;
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001774 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
Paul E. McKenney9d2ad242012-06-24 10:15:02 -07001775 int tne;
1776
1777 /* Handle nohz enablement switches conservatively. */
1778 tne = ACCESS_ONCE(tick_nohz_enabled);
1779 if (tne != rdtp->tick_nohz_enabled_snap) {
1780 if (rcu_cpu_has_callbacks(cpu))
1781 invoke_rcu_core(); /* force nohz to see update. */
1782 rdtp->tick_nohz_enabled_snap = tne;
1783 return;
1784 }
1785 if (!tne)
1786 return;
Paul E. McKenneyf511fc62012-03-15 12:16:26 -07001787
Paul E. McKenney9a0c6fe2012-06-28 12:33:51 -07001788 /* Adaptive-tick mode, where usermode execution is idle to RCU. */
1789 if (!is_idle_task(current)) {
1790 rdtp->dyntick_holdoff = jiffies - 1;
1791 if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
1792 trace_rcu_prep_idle("User dyntick with callbacks");
1793 rdtp->idle_gp_timer_expires =
1794 round_up(jiffies + RCU_IDLE_GP_DELAY,
1795 RCU_IDLE_GP_DELAY);
1796 } else if (rcu_cpu_has_callbacks(cpu)) {
1797 rdtp->idle_gp_timer_expires =
1798 round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY);
1799 trace_rcu_prep_idle("User dyntick with lazy callbacks");
1800 } else {
1801 return;
1802 }
1803 tp = &rdtp->idle_gp_timer;
1804 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
1805 return;
1806 }
1807
Paul E. McKenney3084f2f2011-11-22 17:07:11 -08001808 /*
Paul E. McKenneyc57afe82012-02-28 11:02:21 -08001809 * If this is an idle re-entry, for example, due to use of
1810 * RCU_NONIDLE() or the new idle-loop tracing API within the idle
1811 * loop, then don't take any state-machine actions, unless the
1812 * momentary exit from idle queued additional non-lazy callbacks.
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001813 * Instead, repost the ->idle_gp_timer if this CPU has callbacks
Paul E. McKenneyc57afe82012-02-28 11:02:21 -08001814 * pending.
1815 */
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001816 if (!rdtp->idle_first_pass &&
1817 (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) {
Paul E. McKenneyf511fc62012-03-15 12:16:26 -07001818 if (rcu_cpu_has_callbacks(cpu)) {
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001819 tp = &rdtp->idle_gp_timer;
1820 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
Paul E. McKenneyf511fc62012-03-15 12:16:26 -07001821 }
Paul E. McKenneyc57afe82012-02-28 11:02:21 -08001822 return;
1823 }
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001824 rdtp->idle_first_pass = 0;
1825 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1;
Paul E. McKenneyc57afe82012-02-28 11:02:21 -08001826
1827 /*
Paul E. McKenneyf535a602011-11-22 20:43:02 -08001828 * If there are no callbacks on this CPU, enter dyntick-idle mode.
1829 * Also reset state to avoid prejudicing later attempts.
Paul E. McKenney3084f2f2011-11-22 17:07:11 -08001830 */
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001831 if (!rcu_cpu_has_callbacks(cpu)) {
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001832 rdtp->dyntick_holdoff = jiffies - 1;
1833 rdtp->dyntick_drain = 0;
Paul E. McKenney433cddd2011-11-22 14:58:03 -08001834 trace_rcu_prep_idle("No callbacks");
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001835 return;
Paul E. McKenney77e38ed2010-04-25 21:04:29 -07001836 }
Paul E. McKenney3084f2f2011-11-22 17:07:11 -08001837
1838 /*
1839 * If in holdoff mode, just return. We will presumably have
1840 * refrained from disabling the scheduling-clock tick.
1841 */
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001842 if (rdtp->dyntick_holdoff == jiffies) {
Paul E. McKenney433cddd2011-11-22 14:58:03 -08001843 trace_rcu_prep_idle("In holdoff");
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001844 return;
Paul E. McKenney433cddd2011-11-22 14:58:03 -08001845 }
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001846
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001847 /* Check and update the ->dyntick_drain sequencing. */
1848 if (rdtp->dyntick_drain <= 0) {
Paul E. McKenneya47cd882010-02-26 16:38:56 -08001849 /* First time through, initialize the counter. */
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001850 rdtp->dyntick_drain = RCU_IDLE_FLUSHES;
1851 } else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES &&
Paul E. McKenneyc3ce9102012-02-14 10:12:54 -08001852 !rcu_pending(cpu) &&
1853 !local_softirq_pending()) {
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001854 /* Can we go dyntick-idle despite still having callbacks? */
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001855 rdtp->dyntick_drain = 0;
1856 rdtp->dyntick_holdoff = jiffies;
Paul E. McKenneyfd4b3522012-05-05 19:10:35 -07001857 if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
1858 trace_rcu_prep_idle("Dyntick with callbacks");
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001859 rdtp->idle_gp_timer_expires =
Paul E. McKenneye84c48a2012-06-04 20:45:10 -07001860 round_up(jiffies + RCU_IDLE_GP_DELAY,
1861 RCU_IDLE_GP_DELAY);
Paul E. McKenneyfd4b3522012-05-05 19:10:35 -07001862 } else {
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001863 rdtp->idle_gp_timer_expires =
Paul E. McKenneye84c48a2012-06-04 20:45:10 -07001864 round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY);
Paul E. McKenneyfd4b3522012-05-05 19:10:35 -07001865 trace_rcu_prep_idle("Dyntick with lazy callbacks");
1866 }
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001867 tp = &rdtp->idle_gp_timer;
1868 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
1869 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
Paul E. McKenneyf23f7fa2011-11-30 15:41:14 -08001870 return; /* Nothing more to do immediately. */
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001871 } else if (--(rdtp->dyntick_drain) <= 0) {
Paul E. McKenneya47cd882010-02-26 16:38:56 -08001872 /* We have hit the limit, so time to give up. */
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001873 rdtp->dyntick_holdoff = jiffies;
Paul E. McKenney433cddd2011-11-22 14:58:03 -08001874 trace_rcu_prep_idle("Begin holdoff");
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001875 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
1876 return;
Paul E. McKenneya47cd882010-02-26 16:38:56 -08001877 }
1878
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001879 /*
1880 * Do one step of pushing the remaining RCU callbacks through
1881 * the RCU core state machine.
1882 */
1883#ifdef CONFIG_TREE_PREEMPT_RCU
1884 if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
1885 rcu_preempt_qs(cpu);
Paul E. McKenney4cdfc172012-06-22 17:06:26 -07001886 force_quiescent_state(&rcu_preempt_state);
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001887 }
1888#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
Paul E. McKenneya47cd882010-02-26 16:38:56 -08001889 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
1890 rcu_sched_qs(cpu);
Paul E. McKenney4cdfc172012-06-22 17:06:26 -07001891 force_quiescent_state(&rcu_sched_state);
Paul E. McKenneya47cd882010-02-26 16:38:56 -08001892 }
1893 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
1894 rcu_bh_qs(cpu);
Paul E. McKenney4cdfc172012-06-22 17:06:26 -07001895 force_quiescent_state(&rcu_bh_state);
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001896 }
1897
Paul E. McKenney433cddd2011-11-22 14:58:03 -08001898 /*
1899 * If RCU callbacks are still pending, RCU still needs this CPU.
1900 * So try forcing the callbacks through the grace period.
1901 */
Paul E. McKenney3ad0dec2011-11-22 21:08:13 -08001902 if (rcu_cpu_has_callbacks(cpu)) {
Paul E. McKenney433cddd2011-11-22 14:58:03 -08001903 trace_rcu_prep_idle("More callbacks");
Paul E. McKenneya46e0892011-06-15 15:47:09 -07001904 invoke_rcu_core();
Paul E. McKenneyc701d5d2012-06-28 08:08:25 -07001905 } else {
Paul E. McKenney433cddd2011-11-22 14:58:03 -08001906 trace_rcu_prep_idle("Callbacks drained");
Paul E. McKenneyc701d5d2012-06-28 08:08:25 -07001907 }
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001908}
1909
Paul E. McKenneyc57afe82012-02-28 11:02:21 -08001910/*
Paul E. McKenney98248a02012-05-03 15:38:10 -07001911 * Keep a running count of the number of non-lazy callbacks posted
1912 * on this CPU. This running counter (which is never decremented) allows
1913 * rcu_prepare_for_idle() to detect when something out of the idle loop
1914 * posts a callback, even if an equal number of callbacks are invoked.
1915 * Of course, callbacks should only be posted from within a trace event
1916 * designed to be called from idle or from within RCU_NONIDLE().
Paul E. McKenneyc57afe82012-02-28 11:02:21 -08001917 */
1918static void rcu_idle_count_callbacks_posted(void)
1919{
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001920 __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
Paul E. McKenneyc57afe82012-02-28 11:02:21 -08001921}
1922
Paul E. McKenneyb626c1b2012-06-11 17:39:43 -07001923/*
1924 * Data for flushing lazy RCU callbacks at OOM time.
1925 */
1926static atomic_t oom_callback_count;
1927static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq);
1928
1929/*
1930 * RCU OOM callback -- decrement the outstanding count and deliver the
1931 * wake-up if we are the last one.
1932 */
1933static void rcu_oom_callback(struct rcu_head *rhp)
1934{
1935 if (atomic_dec_and_test(&oom_callback_count))
1936 wake_up(&oom_callback_wq);
1937}
1938
1939/*
1940 * Post an rcu_oom_notify callback on the current CPU if it has at
1941 * least one lazy callback. This will unnecessarily post callbacks
1942 * to CPUs that already have a non-lazy callback at the end of their
1943 * callback list, but this is an infrequent operation, so accept some
1944 * extra overhead to keep things simple.
1945 */
1946static void rcu_oom_notify_cpu(void *unused)
1947{
1948 struct rcu_state *rsp;
1949 struct rcu_data *rdp;
1950
1951 for_each_rcu_flavor(rsp) {
1952 rdp = __this_cpu_ptr(rsp->rda);
1953 if (rdp->qlen_lazy != 0) {
1954 atomic_inc(&oom_callback_count);
1955 rsp->call(&rdp->oom_head, rcu_oom_callback);
1956 }
1957 }
1958}
1959
1960/*
1961 * If low on memory, ensure that each CPU has a non-lazy callback.
1962 * This will wake up CPUs that have only lazy callbacks, in turn
1963 * ensuring that they free up the corresponding memory in a timely manner.
1964 * Because an uncertain amount of memory will be freed in some uncertain
1965 * timeframe, we do not claim to have freed anything.
1966 */
1967static int rcu_oom_notify(struct notifier_block *self,
1968 unsigned long notused, void *nfreed)
1969{
1970 int cpu;
1971
1972 /* Wait for callbacks from earlier instance to complete. */
1973 wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0);
1974
1975 /*
1976 * Prevent premature wakeup: ensure that all increments happen
1977 * before there is a chance of the counter reaching zero.
1978 */
1979 atomic_set(&oom_callback_count, 1);
1980
1981 get_online_cpus();
1982 for_each_online_cpu(cpu) {
1983 smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
1984 cond_resched();
1985 }
1986 put_online_cpus();
1987
1988 /* Unconditionally decrement: no need to wake ourselves up. */
1989 atomic_dec(&oom_callback_count);
1990
1991 return NOTIFY_OK;
1992}
1993
1994static struct notifier_block rcu_oom_nb = {
1995 .notifier_call = rcu_oom_notify
1996};
1997
1998static int __init rcu_register_oom_notifier(void)
1999{
2000 register_oom_notifier(&rcu_oom_nb);
2001 return 0;
2002}
2003early_initcall(rcu_register_oom_notifier);
2004
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08002005#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
Paul E. McKenneya858af22012-01-16 13:29:10 -08002006
2007#ifdef CONFIG_RCU_CPU_STALL_INFO
2008
2009#ifdef CONFIG_RCU_FAST_NO_HZ
2010
2011static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2012{
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07002013 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2014 struct timer_list *tltp = &rdtp->idle_gp_timer;
Paul E. McKenney86f343b2012-09-21 10:41:50 -07002015 char c;
Paul E. McKenneya858af22012-01-16 13:29:10 -08002016
Paul E. McKenney86f343b2012-09-21 10:41:50 -07002017 c = rdtp->dyntick_holdoff == jiffies ? 'H' : '.';
2018 if (timer_pending(tltp))
2019 sprintf(cp, "drain=%d %c timer=%lu",
2020 rdtp->dyntick_drain, c, tltp->expires - jiffies);
2021 else
2022 sprintf(cp, "drain=%d %c timer not pending",
2023 rdtp->dyntick_drain, c);
Paul E. McKenneya858af22012-01-16 13:29:10 -08002024}
2025
2026#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
2027
2028static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2029{
Carsten Emde1c17e4d2012-06-19 10:43:16 +02002030 *cp = '\0';
Paul E. McKenneya858af22012-01-16 13:29:10 -08002031}
2032
2033#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
2034
2035/* Initiate the stall-info list. */
2036static void print_cpu_stall_info_begin(void)
2037{
2038 printk(KERN_CONT "\n");
2039}
2040
2041/*
2042 * Print out diagnostic information for the specified stalled CPU.
2043 *
2044 * If the specified CPU is aware of the current RCU grace period
2045 * (flavor specified by rsp), then print the number of scheduling
2046 * clock interrupts the CPU has taken during the time that it has
2047 * been aware. Otherwise, print the number of RCU grace periods
2048 * that this CPU is ignorant of, for example, "1" if the CPU was
2049 * aware of the previous grace period.
2050 *
2051 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
2052 */
2053static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
2054{
2055 char fast_no_hz[72];
2056 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2057 struct rcu_dynticks *rdtp = rdp->dynticks;
2058 char *ticks_title;
2059 unsigned long ticks_value;
2060
2061 if (rsp->gpnum == rdp->gpnum) {
2062 ticks_title = "ticks this GP";
2063 ticks_value = rdp->ticks_this_gp;
2064 } else {
2065 ticks_title = "GPs behind";
2066 ticks_value = rsp->gpnum - rdp->gpnum;
2067 }
2068 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
2069 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
2070 cpu, ticks_value, ticks_title,
2071 atomic_read(&rdtp->dynticks) & 0xfff,
2072 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
2073 fast_no_hz);
2074}
2075
2076/* Terminate the stall-info list. */
2077static void print_cpu_stall_info_end(void)
2078{
2079 printk(KERN_ERR "\t");
2080}
2081
2082/* Zero ->ticks_this_gp for all flavors of RCU. */
2083static void zero_cpu_stall_ticks(struct rcu_data *rdp)
2084{
2085 rdp->ticks_this_gp = 0;
2086}
2087
2088/* Increment ->ticks_this_gp for all flavors of RCU. */
2089static void increment_cpu_stall_ticks(void)
2090{
Paul E. McKenney115f7a72012-08-10 13:55:03 -07002091 struct rcu_state *rsp;
2092
2093 for_each_rcu_flavor(rsp)
2094 __this_cpu_ptr(rsp->rda)->ticks_this_gp++;
Paul E. McKenneya858af22012-01-16 13:29:10 -08002095}
2096
2097#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
2098
2099static void print_cpu_stall_info_begin(void)
2100{
2101 printk(KERN_CONT " {");
2102}
2103
2104static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
2105{
2106 printk(KERN_CONT " %d", cpu);
2107}
2108
2109static void print_cpu_stall_info_end(void)
2110{
2111 printk(KERN_CONT "} ");
2112}
2113
2114static void zero_cpu_stall_ticks(struct rcu_data *rdp)
2115{
2116}
2117
2118static void increment_cpu_stall_ticks(void)
2119{
2120}
2121
2122#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07002123
2124#ifdef CONFIG_RCU_NOCB_CPU
2125
2126/*
2127 * Offload callback processing from the boot-time-specified set of CPUs
2128 * specified by rcu_nocb_mask. For each CPU in the set, there is a
2129 * kthread created that pulls the callbacks from the corresponding CPU,
2130 * waits for a grace period to elapse, and invokes the callbacks.
2131 * The no-CBs CPUs do a wake_up() on their kthread when they insert
2132 * a callback into any empty list, unless the rcu_nocb_poll boot parameter
2133 * has been specified, in which case each kthread actively polls its
2134 * CPU. (Which isn't so great for energy efficiency, but which does
2135 * reduce RCU's overhead on that CPU.)
2136 *
2137 * This is intended to be used in conjunction with Frederic Weisbecker's
2138 * adaptive-idle work, which would seriously reduce OS jitter on CPUs
2139 * running CPU-bound user-mode computations.
2140 *
2141 * Offloading of callback processing could also in theory be used as
2142 * an energy-efficiency measure because CPUs with no RCU callbacks
2143 * queued are more aggressive about entering dyntick-idle mode.
2144 */
2145
2146
2147/* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters. */
2148static int __init rcu_nocb_setup(char *str)
2149{
2150 alloc_bootmem_cpumask_var(&rcu_nocb_mask);
2151 have_rcu_nocb_mask = true;
2152 cpulist_parse(str, rcu_nocb_mask);
2153 return 1;
2154}
2155__setup("rcu_nocbs=", rcu_nocb_setup);
2156
Paul Gortmaker1b0048a2012-12-20 13:19:22 -08002157static int __init parse_rcu_nocb_poll(char *arg)
2158{
2159 rcu_nocb_poll = 1;
2160 return 0;
2161}
2162early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
2163
Paul E. McKenney34ed62462013-01-07 13:37:42 -08002164/*
2165 * Does this CPU needs a grace period due to offloaded callbacks?
2166 */
2167static int rcu_nocb_needs_gp(struct rcu_data *rdp)
2168{
2169 return rdp->nocb_needs_gp;
2170}
2171
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07002172/* Is the specified CPU a no-CPUs CPU? */
2173static bool is_nocb_cpu(int cpu)
2174{
2175 if (have_rcu_nocb_mask)
2176 return cpumask_test_cpu(cpu, rcu_nocb_mask);
2177 return false;
2178}
2179
2180/*
2181 * Enqueue the specified string of rcu_head structures onto the specified
2182 * CPU's no-CBs lists. The CPU is specified by rdp, the head of the
2183 * string by rhp, and the tail of the string by rhtp. The non-lazy/lazy
2184 * counts are supplied by rhcount and rhcount_lazy.
2185 *
2186 * If warranted, also wake up the kthread servicing this CPUs queues.
2187 */
2188static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
2189 struct rcu_head *rhp,
2190 struct rcu_head **rhtp,
2191 int rhcount, int rhcount_lazy)
2192{
2193 int len;
2194 struct rcu_head **old_rhpp;
2195 struct task_struct *t;
2196
2197 /* Enqueue the callback on the nocb list and update counts. */
2198 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
2199 ACCESS_ONCE(*old_rhpp) = rhp;
2200 atomic_long_add(rhcount, &rdp->nocb_q_count);
2201 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
2202
2203 /* If we are not being polled and there is a kthread, awaken it ... */
2204 t = ACCESS_ONCE(rdp->nocb_kthread);
2205 if (rcu_nocb_poll | !t)
2206 return;
2207 len = atomic_long_read(&rdp->nocb_q_count);
2208 if (old_rhpp == &rdp->nocb_head) {
2209 wake_up(&rdp->nocb_wq); /* ... only if queue was empty ... */
2210 rdp->qlen_last_fqs_check = 0;
2211 } else if (len > rdp->qlen_last_fqs_check + qhimark) {
2212 wake_up_process(t); /* ... or if many callbacks queued. */
2213 rdp->qlen_last_fqs_check = LONG_MAX / 2;
2214 }
2215 return;
2216}
2217
2218/*
2219 * This is a helper for __call_rcu(), which invokes this when the normal
2220 * callback queue is inoperable. If this is not a no-CBs CPU, this
2221 * function returns failure back to __call_rcu(), which can complain
2222 * appropriately.
2223 *
2224 * Otherwise, this function queues the callback where the corresponding
2225 * "rcuo" kthread can find it.
2226 */
2227static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2228 bool lazy)
2229{
2230
2231 if (!is_nocb_cpu(rdp->cpu))
2232 return 0;
2233 __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy);
2234 return 1;
2235}
2236
2237/*
2238 * Adopt orphaned callbacks on a no-CBs CPU, or return 0 if this is
2239 * not a no-CBs CPU.
2240 */
2241static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2242 struct rcu_data *rdp)
2243{
2244 long ql = rsp->qlen;
2245 long qll = rsp->qlen_lazy;
2246
2247 /* If this is not a no-CBs CPU, tell the caller to do it the old way. */
2248 if (!is_nocb_cpu(smp_processor_id()))
2249 return 0;
2250 rsp->qlen = 0;
2251 rsp->qlen_lazy = 0;
2252
2253 /* First, enqueue the donelist, if any. This preserves CB ordering. */
2254 if (rsp->orphan_donelist != NULL) {
2255 __call_rcu_nocb_enqueue(rdp, rsp->orphan_donelist,
2256 rsp->orphan_donetail, ql, qll);
2257 ql = qll = 0;
2258 rsp->orphan_donelist = NULL;
2259 rsp->orphan_donetail = &rsp->orphan_donelist;
2260 }
2261 if (rsp->orphan_nxtlist != NULL) {
2262 __call_rcu_nocb_enqueue(rdp, rsp->orphan_nxtlist,
2263 rsp->orphan_nxttail, ql, qll);
2264 ql = qll = 0;
2265 rsp->orphan_nxtlist = NULL;
2266 rsp->orphan_nxttail = &rsp->orphan_nxtlist;
2267 }
2268 return 1;
2269}
2270
2271/*
Paul E. McKenney34ed62462013-01-07 13:37:42 -08002272 * If necessary, kick off a new grace period, and either way wait
2273 * for a subsequent grace period to complete.
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07002274 */
Paul E. McKenney34ed62462013-01-07 13:37:42 -08002275static void rcu_nocb_wait_gp(struct rcu_data *rdp)
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07002276{
Paul E. McKenney34ed62462013-01-07 13:37:42 -08002277 unsigned long c;
2278 unsigned long flags;
2279 unsigned long j;
2280 struct rcu_node *rnp = rdp->mynode;
2281
2282 raw_spin_lock_irqsave(&rnp->lock, flags);
2283 c = rnp->completed + 2;
2284 rdp->nocb_needs_gp = true;
2285 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07002286
2287 /*
Paul E. McKenney34ed62462013-01-07 13:37:42 -08002288 * Wait for the grace period. Do so interruptibly to avoid messing
2289 * up the load average.
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07002290 */
Paul E. McKenney34ed62462013-01-07 13:37:42 -08002291 for (;;) {
2292 j = jiffies;
2293 schedule_timeout_interruptible(2);
2294 raw_spin_lock_irqsave(&rnp->lock, flags);
2295 if (ULONG_CMP_GE(rnp->completed, c)) {
2296 rdp->nocb_needs_gp = false;
2297 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2298 break;
2299 }
2300 if (j == jiffies)
2301 flush_signals(current);
2302 raw_spin_unlock_irqrestore(&rnp->lock, flags);
2303 }
2304 smp_mb(); /* Ensure that CB invocation happens after GP end. */
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07002305}
2306
2307/*
2308 * Per-rcu_data kthread, but only for no-CBs CPUs. Each kthread invokes
2309 * callbacks queued by the corresponding no-CBs CPU.
2310 */
2311static int rcu_nocb_kthread(void *arg)
2312{
2313 int c, cl;
2314 struct rcu_head *list;
2315 struct rcu_head *next;
2316 struct rcu_head **tail;
2317 struct rcu_data *rdp = arg;
2318
2319 /* Each pass through this loop invokes one batch of callbacks */
2320 for (;;) {
2321 /* If not polling, wait for next batch of callbacks. */
2322 if (!rcu_nocb_poll)
Paul Gortmaker353af9c2012-12-20 09:35:02 -08002323 wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07002324 list = ACCESS_ONCE(rdp->nocb_head);
2325 if (!list) {
2326 schedule_timeout_interruptible(1);
Paul Gortmaker353af9c2012-12-20 09:35:02 -08002327 flush_signals(current);
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07002328 continue;
2329 }
2330
2331 /*
2332 * Extract queued callbacks, update counts, and wait
2333 * for a grace period to elapse.
2334 */
2335 ACCESS_ONCE(rdp->nocb_head) = NULL;
2336 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
2337 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
2338 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
2339 ACCESS_ONCE(rdp->nocb_p_count) += c;
2340 ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
Paul E. McKenney34ed62462013-01-07 13:37:42 -08002341 rcu_nocb_wait_gp(rdp);
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07002342
2343 /* Each pass through the following loop invokes a callback. */
2344 trace_rcu_batch_start(rdp->rsp->name, cl, c, -1);
2345 c = cl = 0;
2346 while (list) {
2347 next = list->next;
2348 /* Wait for enqueuing to complete, if needed. */
2349 while (next == NULL && &list->next != tail) {
2350 schedule_timeout_interruptible(1);
2351 next = list->next;
2352 }
2353 debug_rcu_head_unqueue(list);
2354 local_bh_disable();
2355 if (__rcu_reclaim(rdp->rsp->name, list))
2356 cl++;
2357 c++;
2358 local_bh_enable();
2359 list = next;
2360 }
2361 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
2362 ACCESS_ONCE(rdp->nocb_p_count) -= c;
2363 ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
Paul E. McKenneyc635a4e2012-10-29 07:29:20 -07002364 rdp->n_nocbs_invoked += c;
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07002365 }
2366 return 0;
2367}
2368
2369/* Initialize per-rcu_data variables for no-CBs CPUs. */
2370static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2371{
2372 rdp->nocb_tail = &rdp->nocb_head;
2373 init_waitqueue_head(&rdp->nocb_wq);
2374}
2375
2376/* Create a kthread for each RCU flavor for each no-CBs CPU. */
2377static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2378{
2379 int cpu;
2380 struct rcu_data *rdp;
2381 struct task_struct *t;
2382
2383 if (rcu_nocb_mask == NULL)
2384 return;
2385 for_each_cpu(cpu, rcu_nocb_mask) {
2386 rdp = per_cpu_ptr(rsp->rda, cpu);
2387 t = kthread_run(rcu_nocb_kthread, rdp, "rcuo%d", cpu);
2388 BUG_ON(IS_ERR(t));
2389 ACCESS_ONCE(rdp->nocb_kthread) = t;
2390 }
2391}
2392
2393/* Prevent __call_rcu() from enqueuing callbacks on no-CBs CPUs */
Paul E. McKenney34ed62462013-01-07 13:37:42 -08002394static bool init_nocb_callback_list(struct rcu_data *rdp)
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07002395{
2396 if (rcu_nocb_mask == NULL ||
2397 !cpumask_test_cpu(rdp->cpu, rcu_nocb_mask))
Paul E. McKenney34ed62462013-01-07 13:37:42 -08002398 return false;
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07002399 rdp->nxttail[RCU_NEXT_TAIL] = NULL;
Paul E. McKenney34ed62462013-01-07 13:37:42 -08002400 return true;
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07002401}
2402
2403#else /* #ifdef CONFIG_RCU_NOCB_CPU */
2404
Paul E. McKenney34ed62462013-01-07 13:37:42 -08002405static int rcu_nocb_needs_gp(struct rcu_data *rdp)
2406{
2407 return 0;
2408}
2409
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07002410static bool is_nocb_cpu(int cpu)
2411{
2412 return false;
2413}
2414
2415static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
2416 bool lazy)
2417{
2418 return 0;
2419}
2420
2421static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
2422 struct rcu_data *rdp)
2423{
2424 return 0;
2425}
2426
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07002427static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2428{
2429}
2430
2431static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
2432{
2433}
2434
Paul E. McKenney34ed62462013-01-07 13:37:42 -08002435static bool init_nocb_callback_list(struct rcu_data *rdp)
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07002436{
Paul E. McKenney34ed62462013-01-07 13:37:42 -08002437 return false;
Paul E. McKenney3fbfbf72012-08-19 21:35:53 -07002438}
2439
2440#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */