blob: c1961aed12138a39e04ee352d1a070dd238dceb1 [file] [log] [blame]
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
Paul E. McKenney6cc68792011-03-02 13:15:15 -08004 * or preemptible semantics.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
22 *
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 */
26
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -080027#include <linux/delay.h>
Paul E. McKenney62ab7072012-07-16 10:42:38 +000028#include <linux/smpboot.h>
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070029
Mike Galbraith5b61b0b2011-08-19 11:39:11 -070030#define RCU_KTHREAD_PRIO 1
31
32#ifdef CONFIG_RCU_BOOST
33#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
34#else
35#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
36#endif
37
Paul E. McKenney26845c22010-04-13 14:19:23 -070038/*
39 * Check the RCU kernel configuration parameters and print informative
40 * messages about anything out of the ordinary. If you like #ifdef, you
41 * will love this function.
42 */
43static void __init rcu_bootup_announce_oddness(void)
44{
45#ifdef CONFIG_RCU_TRACE
46 printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
47#endif
48#if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
49 printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
50 CONFIG_RCU_FANOUT);
51#endif
52#ifdef CONFIG_RCU_FANOUT_EXACT
53 printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
54#endif
55#ifdef CONFIG_RCU_FAST_NO_HZ
56 printk(KERN_INFO
57 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
58#endif
59#ifdef CONFIG_PROVE_RCU
60 printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
61#endif
62#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
63 printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
64#endif
Paul E. McKenney81a294c2010-08-30 09:52:50 -070065#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
Paul E. McKenneya858af22012-01-16 13:29:10 -080066 printk(KERN_INFO "\tDump stacks of tasks blocking RCU-preempt GP.\n");
67#endif
68#if defined(CONFIG_RCU_CPU_STALL_INFO)
69 printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n");
Paul E. McKenney26845c22010-04-13 14:19:23 -070070#endif
71#if NUM_RCU_LVL_4 != 0
Paul E. McKenneycc5df652012-06-15 18:16:00 -070072 printk(KERN_INFO "\tFour-level hierarchy is enabled.\n");
Paul E. McKenney26845c22010-04-13 14:19:23 -070073#endif
Paul E. McKenneyf885b7f2012-04-23 15:52:53 -070074 if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
75 printk(KERN_INFO "\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
Paul E. McKenneycca6f392012-05-08 21:00:28 -070076 if (nr_cpu_ids != NR_CPUS)
77 printk(KERN_INFO "\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
Paul E. McKenney26845c22010-04-13 14:19:23 -070078}
79
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070080#ifdef CONFIG_TREE_PREEMPT_RCU
81
Paul E. McKenney037b64e2012-05-28 23:26:01 -070082struct rcu_state rcu_preempt_state =
83 RCU_STATE_INITIALIZER(rcu_preempt, call_rcu);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070084DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
Paul E. McKenney27f4d282011-02-07 12:47:15 -080085static struct rcu_state *rcu_state = &rcu_preempt_state;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070086
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -080087static int rcu_preempted_readers_exp(struct rcu_node *rnp);
88
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070089/*
90 * Tell them what RCU they are running.
91 */
Paul E. McKenney0e0fc1c2009-11-11 11:28:06 -080092static void __init rcu_bootup_announce(void)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070093{
Paul E. McKenney6cc68792011-03-02 13:15:15 -080094 printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
Paul E. McKenney26845c22010-04-13 14:19:23 -070095 rcu_bootup_announce_oddness();
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070096}
97
98/*
99 * Return the number of RCU-preempt batches processed thus far
100 * for debug and statistics.
101 */
102long rcu_batches_completed_preempt(void)
103{
104 return rcu_preempt_state.completed;
105}
106EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
107
108/*
109 * Return the number of RCU batches processed thus far for debug & stats.
110 */
111long rcu_batches_completed(void)
112{
113 return rcu_batches_completed_preempt();
114}
115EXPORT_SYMBOL_GPL(rcu_batches_completed);
116
117/*
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800118 * Force a quiescent state for preemptible RCU.
119 */
120void rcu_force_quiescent_state(void)
121{
122 force_quiescent_state(&rcu_preempt_state, 0);
123}
124EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
125
126/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800127 * Record a preemptible-RCU quiescent state for the specified CPU. Note
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700128 * that this just means that the task currently running on the CPU is
129 * not in a quiescent state. There might be any number of tasks blocked
130 * while in an RCU read-side critical section.
Paul E. McKenney25502a62010-04-01 17:37:01 -0700131 *
132 * Unlike the other rcu_*_qs() functions, callers to this function
133 * must disable irqs in order to protect the assignment to
134 * ->rcu_read_unlock_special.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700135 */
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700136static void rcu_preempt_qs(int cpu)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700137{
138 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
Paul E. McKenney25502a62010-04-01 17:37:01 -0700139
Paul E. McKenneye4cc1f22011-06-27 00:17:43 -0700140 rdp->passed_quiesce_gpnum = rdp->gpnum;
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700141 barrier();
Paul E. McKenneye4cc1f22011-06-27 00:17:43 -0700142 if (rdp->passed_quiesce == 0)
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700143 trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
Paul E. McKenneye4cc1f22011-06-27 00:17:43 -0700144 rdp->passed_quiesce = 1;
Paul E. McKenney25502a62010-04-01 17:37:01 -0700145 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700146}
147
148/*
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700149 * We have entered the scheduler, and the current task might soon be
150 * context-switched away from. If this task is in an RCU read-side
151 * critical section, we will no longer be able to rely on the CPU to
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800152 * record that fact, so we enqueue the task on the blkd_tasks list.
153 * The task will dequeue itself when it exits the outermost enclosing
154 * RCU read-side critical section. Therefore, the current grace period
155 * cannot be permitted to complete until the blkd_tasks list entries
156 * predating the current grace period drain, in other words, until
157 * rnp->gp_tasks becomes NULL.
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700158 *
159 * Caller must disable preemption.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700160 */
Paul E. McKenneycba6d0d2012-07-02 07:08:42 -0700161static void rcu_preempt_note_context_switch(int cpu)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700162{
163 struct task_struct *t = current;
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700164 unsigned long flags;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700165 struct rcu_data *rdp;
166 struct rcu_node *rnp;
167
Paul E. McKenney10f39bb2011-07-17 21:14:35 -0700168 if (t->rcu_read_lock_nesting > 0 &&
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700169 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
170
171 /* Possibly blocking in an RCU read-side critical section. */
Paul E. McKenneycba6d0d2012-07-02 07:08:42 -0700172 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700173 rnp = rdp->mynode;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800174 raw_spin_lock_irqsave(&rnp->lock, flags);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700175 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
Paul E. McKenney86848962009-08-27 15:00:12 -0700176 t->rcu_blocked_node = rnp;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700177
178 /*
179 * If this CPU has already checked in, then this task
180 * will hold up the next grace period rather than the
181 * current grace period. Queue the task accordingly.
182 * If the task is queued for the current grace period
183 * (i.e., this CPU has not yet passed through a quiescent
184 * state for the current grace period), then as long
185 * as that task remains queued, the current grace period
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800186 * cannot end. Note that there is some uncertainty as
187 * to exactly when the current grace period started.
188 * We take a conservative approach, which can result
189 * in unnecessarily waiting on tasks that started very
190 * slightly after the current grace period began. C'est
191 * la vie!!!
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700192 *
193 * But first, note that the current CPU must still be
194 * on line!
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700195 */
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700196 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
Paul E. McKenneye7d88422009-09-18 09:50:18 -0700197 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800198 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
199 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
200 rnp->gp_tasks = &t->rcu_node_entry;
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800201#ifdef CONFIG_RCU_BOOST
202 if (rnp->boost_tasks != NULL)
203 rnp->boost_tasks = rnp->gp_tasks;
204#endif /* #ifdef CONFIG_RCU_BOOST */
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800205 } else {
206 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
207 if (rnp->qsmask & rdp->grpmask)
208 rnp->gp_tasks = &t->rcu_node_entry;
209 }
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700210 trace_rcu_preempt_task(rdp->rsp->name,
211 t->pid,
212 (rnp->qsmask & rdp->grpmask)
213 ? rnp->gpnum
214 : rnp->gpnum + 1);
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800215 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney10f39bb2011-07-17 21:14:35 -0700216 } else if (t->rcu_read_lock_nesting < 0 &&
217 t->rcu_read_unlock_special) {
218
219 /*
220 * Complete exit from RCU read-side critical section on
221 * behalf of preempted instance of __rcu_read_unlock().
222 */
223 rcu_read_unlock_special(t);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700224 }
225
226 /*
227 * Either we were not in an RCU read-side critical section to
228 * begin with, or we have now recorded that critical section
229 * globally. Either way, we can now note a quiescent state
230 * for this CPU. Again, if we were in an RCU read-side critical
231 * section, and if that critical section was blocking the current
232 * grace period, then the fact that the task has been enqueued
233 * means that we continue to block the current grace period.
234 */
Paul E. McKenneye7d88422009-09-18 09:50:18 -0700235 local_irq_save(flags);
Paul E. McKenneycba6d0d2012-07-02 07:08:42 -0700236 rcu_preempt_qs(cpu);
Paul E. McKenneye7d88422009-09-18 09:50:18 -0700237 local_irq_restore(flags);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700238}
239
240/*
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700241 * Check for preempted RCU readers blocking the current grace period
242 * for the specified rcu_node structure. If the caller needs a reliable
243 * answer, it must hold the rcu_node's ->lock.
244 */
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800245static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700246{
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800247 return rnp->gp_tasks != NULL;
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700248}
249
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800250/*
251 * Record a quiescent state for all tasks that were previously queued
252 * on the specified rcu_node structure and that were blocking the current
253 * RCU grace period. The caller must hold the specified rnp->lock with
254 * irqs disabled, and this lock is released upon return, but irqs remain
255 * disabled.
256 */
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800257static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800258 __releases(rnp->lock)
259{
260 unsigned long mask;
261 struct rcu_node *rnp_p;
262
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800263 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800264 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800265 return; /* Still need more quiescent states! */
266 }
267
268 rnp_p = rnp->parent;
269 if (rnp_p == NULL) {
270 /*
271 * Either there is only one rcu_node in the tree,
272 * or tasks were kicked up to root rcu_node due to
273 * CPUs going offline.
274 */
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800275 rcu_report_qs_rsp(&rcu_preempt_state, flags);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800276 return;
277 }
278
279 /* Report up the rest of the hierarchy. */
280 mask = rnp->grpmask;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800281 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
282 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800283 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800284}
285
286/*
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800287 * Advance a ->blkd_tasks-list pointer to the next entry, instead
288 * returning NULL if at the end of the list.
289 */
290static struct list_head *rcu_next_node_entry(struct task_struct *t,
291 struct rcu_node *rnp)
292{
293 struct list_head *np;
294
295 np = t->rcu_node_entry.next;
296 if (np == &rnp->blkd_tasks)
297 np = NULL;
298 return np;
299}
300
301/*
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800302 * Handle special cases during rcu_read_unlock(), such as needing to
303 * notify RCU core processing or task having blocked during the RCU
304 * read-side critical section.
305 */
Paul E. McKenney2a3fa842012-05-21 11:58:36 -0700306void rcu_read_unlock_special(struct task_struct *t)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700307{
308 int empty;
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800309 int empty_exp;
Paul E. McKenney389abd42011-09-21 14:41:37 -0700310 int empty_exp_now;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700311 unsigned long flags;
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800312 struct list_head *np;
Paul E. McKenney82e78d82011-08-04 07:55:34 -0700313#ifdef CONFIG_RCU_BOOST
314 struct rt_mutex *rbmp = NULL;
315#endif /* #ifdef CONFIG_RCU_BOOST */
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700316 struct rcu_node *rnp;
317 int special;
318
319 /* NMI handlers cannot block and cannot safely manipulate state. */
320 if (in_nmi())
321 return;
322
323 local_irq_save(flags);
324
325 /*
326 * If RCU core is waiting for this CPU to exit critical section,
327 * let it know that we have done so.
328 */
329 special = t->rcu_read_unlock_special;
330 if (special & RCU_READ_UNLOCK_NEED_QS) {
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700331 rcu_preempt_qs(smp_processor_id());
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700332 }
333
334 /* Hardware IRQ handlers cannot block. */
Peter Zijlstraec433f02011-07-19 15:32:00 -0700335 if (in_irq() || in_serving_softirq()) {
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700336 local_irq_restore(flags);
337 return;
338 }
339
340 /* Clean up if blocked during RCU read-side critical section. */
341 if (special & RCU_READ_UNLOCK_BLOCKED) {
342 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
343
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700344 /*
345 * Remove this task from the list it blocked on. The
346 * task can migrate while we acquire the lock, but at
347 * most one time. So at most two passes through loop.
348 */
349 for (;;) {
Paul E. McKenney86848962009-08-27 15:00:12 -0700350 rnp = t->rcu_blocked_node;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800351 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
Paul E. McKenney86848962009-08-27 15:00:12 -0700352 if (rnp == t->rcu_blocked_node)
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700353 break;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800354 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700355 }
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800356 empty = !rcu_preempt_blocked_readers_cgp(rnp);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800357 empty_exp = !rcu_preempted_readers_exp(rnp);
358 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800359 np = rcu_next_node_entry(t, rnp);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700360 list_del_init(&t->rcu_node_entry);
Paul E. McKenney82e78d82011-08-04 07:55:34 -0700361 t->rcu_blocked_node = NULL;
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700362 trace_rcu_unlock_preempted_task("rcu_preempt",
363 rnp->gpnum, t->pid);
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800364 if (&t->rcu_node_entry == rnp->gp_tasks)
365 rnp->gp_tasks = np;
366 if (&t->rcu_node_entry == rnp->exp_tasks)
367 rnp->exp_tasks = np;
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800368#ifdef CONFIG_RCU_BOOST
369 if (&t->rcu_node_entry == rnp->boost_tasks)
370 rnp->boost_tasks = np;
Paul E. McKenney82e78d82011-08-04 07:55:34 -0700371 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
372 if (t->rcu_boost_mutex) {
373 rbmp = t->rcu_boost_mutex;
374 t->rcu_boost_mutex = NULL;
Paul E. McKenney7765be22011-07-14 12:24:11 -0700375 }
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800376#endif /* #ifdef CONFIG_RCU_BOOST */
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700377
378 /*
379 * If this was the last task on the current list, and if
380 * we aren't waiting on any CPUs, report the quiescent state.
Paul E. McKenney389abd42011-09-21 14:41:37 -0700381 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
382 * so we must take a snapshot of the expedited state.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700383 */
Paul E. McKenney389abd42011-09-21 14:41:37 -0700384 empty_exp_now = !rcu_preempted_readers_exp(rnp);
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700385 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
386 trace_rcu_quiescent_state_report("preempt_rcu",
387 rnp->gpnum,
388 0, rnp->qsmask,
389 rnp->level,
390 rnp->grplo,
391 rnp->grphi,
392 !!rnp->gp_tasks);
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800393 rcu_report_unblock_qs_rnp(rnp, flags);
Paul E. McKenneyc701d5d2012-06-28 08:08:25 -0700394 } else {
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700395 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneyc701d5d2012-06-28 08:08:25 -0700396 }
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800397
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800398#ifdef CONFIG_RCU_BOOST
399 /* Unboost if we were boosted. */
Paul E. McKenney82e78d82011-08-04 07:55:34 -0700400 if (rbmp)
401 rt_mutex_unlock(rbmp);
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800402#endif /* #ifdef CONFIG_RCU_BOOST */
403
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800404 /*
405 * If this was the last task on the expedited lists,
406 * then we need to report up the rcu_node hierarchy.
407 */
Paul E. McKenney389abd42011-09-21 14:41:37 -0700408 if (!empty_exp && empty_exp_now)
Thomas Gleixnerb40d2932011-10-22 07:12:34 -0700409 rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800410 } else {
411 local_irq_restore(flags);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700412 }
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700413}
414
Paul E. McKenney1ed509a2010-02-22 17:05:05 -0800415#ifdef CONFIG_RCU_CPU_STALL_VERBOSE
416
417/*
418 * Dump detailed information for all tasks blocking the current RCU
419 * grace period on the specified rcu_node structure.
420 */
421static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
422{
423 unsigned long flags;
Paul E. McKenney1ed509a2010-02-22 17:05:05 -0800424 struct task_struct *t;
425
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800426 if (!rcu_preempt_blocked_readers_cgp(rnp))
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800427 return;
428 raw_spin_lock_irqsave(&rnp->lock, flags);
429 t = list_entry(rnp->gp_tasks,
430 struct task_struct, rcu_node_entry);
431 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
432 sched_show_task(t);
433 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney1ed509a2010-02-22 17:05:05 -0800434}
435
436/*
437 * Dump detailed information for all tasks blocking the current RCU
438 * grace period.
439 */
440static void rcu_print_detail_task_stall(struct rcu_state *rsp)
441{
442 struct rcu_node *rnp = rcu_get_root(rsp);
443
444 rcu_print_detail_task_stall_rnp(rnp);
445 rcu_for_each_leaf_node(rsp, rnp)
446 rcu_print_detail_task_stall_rnp(rnp);
447}
448
449#else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
450
451static void rcu_print_detail_task_stall(struct rcu_state *rsp)
452{
453}
454
455#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
456
Paul E. McKenneya858af22012-01-16 13:29:10 -0800457#ifdef CONFIG_RCU_CPU_STALL_INFO
458
459static void rcu_print_task_stall_begin(struct rcu_node *rnp)
460{
461 printk(KERN_ERR "\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
462 rnp->level, rnp->grplo, rnp->grphi);
463}
464
465static void rcu_print_task_stall_end(void)
466{
467 printk(KERN_CONT "\n");
468}
469
470#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
471
472static void rcu_print_task_stall_begin(struct rcu_node *rnp)
473{
474}
475
476static void rcu_print_task_stall_end(void)
477{
478}
479
480#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
481
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700482/*
483 * Scan the current list of tasks blocked within RCU read-side critical
484 * sections, printing out the tid of each.
485 */
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700486static int rcu_print_task_stall(struct rcu_node *rnp)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700487{
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700488 struct task_struct *t;
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700489 int ndetected = 0;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700490
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800491 if (!rcu_preempt_blocked_readers_cgp(rnp))
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700492 return 0;
Paul E. McKenneya858af22012-01-16 13:29:10 -0800493 rcu_print_task_stall_begin(rnp);
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800494 t = list_entry(rnp->gp_tasks,
495 struct task_struct, rcu_node_entry);
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700496 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
Paul E. McKenneya858af22012-01-16 13:29:10 -0800497 printk(KERN_CONT " P%d", t->pid);
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700498 ndetected++;
499 }
Paul E. McKenneya858af22012-01-16 13:29:10 -0800500 rcu_print_task_stall_end();
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700501 return ndetected;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700502}
503
Paul E. McKenney53d84e02010-08-10 14:28:53 -0700504/*
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700505 * Check that the list of blocked tasks for the newly completed grace
506 * period is in fact empty. It is a serious bug to complete a grace
507 * period that still has RCU readers blocked! This function must be
508 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
509 * must be held by the caller.
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800510 *
511 * Also, if there are blocked tasks on the list, they automatically
512 * block the newly created grace period, so set up ->gp_tasks accordingly.
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700513 */
514static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
515{
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800516 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800517 if (!list_empty(&rnp->blkd_tasks))
518 rnp->gp_tasks = rnp->blkd_tasks.next;
Paul E. McKenney28ecd582009-09-18 09:50:17 -0700519 WARN_ON_ONCE(rnp->qsmask);
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700520}
521
Paul E. McKenney33f76142009-08-24 09:42:01 -0700522#ifdef CONFIG_HOTPLUG_CPU
523
524/*
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700525 * Handle tasklist migration for case in which all CPUs covered by the
526 * specified rcu_node have gone offline. Move them up to the root
527 * rcu_node. The reason for not just moving them to the immediate
528 * parent is to remove the need for rcu_read_unlock_special() to
529 * make more than two attempts to acquire the target rcu_node's lock.
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800530 * Returns true if there were tasks blocking the current RCU grace
531 * period.
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700532 *
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700533 * Returns 1 if there was previously a task blocking the current grace
534 * period on the specified rcu_node structure.
535 *
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700536 * The caller must hold rnp->lock with irqs disabled.
537 */
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700538static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
539 struct rcu_node *rnp,
540 struct rcu_data *rdp)
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700541{
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700542 struct list_head *lp;
543 struct list_head *lp_root;
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800544 int retval = 0;
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700545 struct rcu_node *rnp_root = rcu_get_root(rsp);
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800546 struct task_struct *t;
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700547
Paul E. McKenney86848962009-08-27 15:00:12 -0700548 if (rnp == rnp_root) {
549 WARN_ONCE(1, "Last CPU thought to be offlined?");
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700550 return 0; /* Shouldn't happen: at least one CPU online. */
Paul E. McKenney86848962009-08-27 15:00:12 -0700551 }
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800552
553 /* If we are on an internal node, complain bitterly. */
554 WARN_ON_ONCE(rnp != rdp->mynode);
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700555
556 /*
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800557 * Move tasks up to root rcu_node. Don't try to get fancy for
558 * this corner-case operation -- just put this node's tasks
559 * at the head of the root node's list, and update the root node's
560 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
561 * if non-NULL. This might result in waiting for more tasks than
562 * absolutely necessary, but this is a good performance/complexity
563 * tradeoff.
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700564 */
Paul E. McKenney2036d942012-01-30 17:02:47 -0800565 if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800566 retval |= RCU_OFL_TASKS_NORM_GP;
567 if (rcu_preempted_readers_exp(rnp))
568 retval |= RCU_OFL_TASKS_EXP_GP;
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800569 lp = &rnp->blkd_tasks;
570 lp_root = &rnp_root->blkd_tasks;
571 while (!list_empty(lp)) {
572 t = list_entry(lp->next, typeof(*t), rcu_node_entry);
573 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
574 list_del(&t->rcu_node_entry);
575 t->rcu_blocked_node = rnp_root;
576 list_add(&t->rcu_node_entry, lp_root);
577 if (&t->rcu_node_entry == rnp->gp_tasks)
578 rnp_root->gp_tasks = rnp->gp_tasks;
579 if (&t->rcu_node_entry == rnp->exp_tasks)
580 rnp_root->exp_tasks = rnp->exp_tasks;
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800581#ifdef CONFIG_RCU_BOOST
582 if (&t->rcu_node_entry == rnp->boost_tasks)
583 rnp_root->boost_tasks = rnp->boost_tasks;
584#endif /* #ifdef CONFIG_RCU_BOOST */
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800585 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700586 }
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800587
588#ifdef CONFIG_RCU_BOOST
589 /* In case root is being boosted and leaf is not. */
590 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
591 if (rnp_root->boost_tasks != NULL &&
592 rnp_root->boost_tasks != rnp_root->gp_tasks)
593 rnp_root->boost_tasks = rnp_root->gp_tasks;
594 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
595#endif /* #ifdef CONFIG_RCU_BOOST */
596
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800597 rnp->gp_tasks = NULL;
598 rnp->exp_tasks = NULL;
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700599 return retval;
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700600}
601
Paul E. McKenneye5601402012-01-07 11:03:57 -0800602#endif /* #ifdef CONFIG_HOTPLUG_CPU */
603
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700604/*
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700605 * Check for a quiescent state from the current CPU. When a task blocks,
606 * the task is recorded in the corresponding CPU's rcu_node structure,
607 * which is checked elsewhere.
608 *
609 * Caller must disable hard irqs.
610 */
611static void rcu_preempt_check_callbacks(int cpu)
612{
613 struct task_struct *t = current;
614
615 if (t->rcu_read_lock_nesting == 0) {
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700616 rcu_preempt_qs(cpu);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700617 return;
618 }
Paul E. McKenney10f39bb2011-07-17 21:14:35 -0700619 if (t->rcu_read_lock_nesting > 0 &&
620 per_cpu(rcu_preempt_data, cpu).qs_pending)
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700621 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700622}
623
Paul E. McKenneya46e0892011-06-15 15:47:09 -0700624#ifdef CONFIG_RCU_BOOST
625
Shaohua Li09223372011-06-14 13:26:25 +0800626static void rcu_preempt_do_callbacks(void)
627{
628 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
629}
630
Paul E. McKenneya46e0892011-06-15 15:47:09 -0700631#endif /* #ifdef CONFIG_RCU_BOOST */
632
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700633/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800634 * Queue a preemptible-RCU callback for invocation after a grace period.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700635 */
636void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
637{
Paul E. McKenney486e2592012-01-06 14:11:30 -0800638 __call_rcu(head, func, &rcu_preempt_state, 0);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700639}
640EXPORT_SYMBOL_GPL(call_rcu);
641
Paul E. McKenney486e2592012-01-06 14:11:30 -0800642/*
643 * Queue an RCU callback for lazy invocation after a grace period.
644 * This will likely be later named something like "call_rcu_lazy()",
645 * but this change will require some way of tagging the lazy RCU
646 * callbacks in the list of pending callbacks. Until then, this
647 * function may only be called from __kfree_rcu().
648 */
649void kfree_call_rcu(struct rcu_head *head,
650 void (*func)(struct rcu_head *rcu))
651{
652 __call_rcu(head, func, &rcu_preempt_state, 1);
653}
654EXPORT_SYMBOL_GPL(kfree_call_rcu);
655
Paul E. McKenney6ebb2372009-11-22 08:53:50 -0800656/**
657 * synchronize_rcu - wait until a grace period has elapsed.
658 *
659 * Control will return to the caller some time after a full grace
660 * period has elapsed, in other words after all currently executing RCU
Paul E. McKenney77d84852010-07-08 17:38:59 -0700661 * read-side critical sections have completed. Note, however, that
662 * upon return from synchronize_rcu(), the caller might well be executing
663 * concurrently with new RCU read-side critical sections that began while
664 * synchronize_rcu() was waiting. RCU read-side critical sections are
665 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
Paul E. McKenney6ebb2372009-11-22 08:53:50 -0800666 */
667void synchronize_rcu(void)
668{
Paul E. McKenneyfe15d702012-01-04 13:30:33 -0800669 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
670 !lock_is_held(&rcu_lock_map) &&
671 !lock_is_held(&rcu_sched_lock_map),
672 "Illegal synchronize_rcu() in RCU read-side critical section");
Paul E. McKenney6ebb2372009-11-22 08:53:50 -0800673 if (!rcu_scheduler_active)
674 return;
Paul E. McKenney2c428182011-05-26 22:14:36 -0700675 wait_rcu_gp(call_rcu);
Paul E. McKenney6ebb2372009-11-22 08:53:50 -0800676}
677EXPORT_SYMBOL_GPL(synchronize_rcu);
678
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800679static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
680static long sync_rcu_preempt_exp_count;
681static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
682
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700683/*
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800684 * Return non-zero if there are any tasks in RCU read-side critical
685 * sections blocking the current preemptible-RCU expedited grace period.
686 * If there is no preemptible-RCU expedited grace period currently in
687 * progress, returns zero unconditionally.
688 */
689static int rcu_preempted_readers_exp(struct rcu_node *rnp)
690{
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800691 return rnp->exp_tasks != NULL;
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800692}
693
694/*
695 * return non-zero if there is no RCU expedited grace period in progress
696 * for the specified rcu_node structure, in other words, if all CPUs and
697 * tasks covered by the specified rcu_node structure have done their bit
698 * for the current expedited grace period. Works only for preemptible
699 * RCU -- other RCU implementation use other means.
700 *
701 * Caller must hold sync_rcu_preempt_exp_mutex.
702 */
703static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
704{
705 return !rcu_preempted_readers_exp(rnp) &&
706 ACCESS_ONCE(rnp->expmask) == 0;
707}
708
709/*
710 * Report the exit from RCU read-side critical section for the last task
711 * that queued itself during or before the current expedited preemptible-RCU
712 * grace period. This event is reported either to the rcu_node structure on
713 * which the task was queued or to one of that rcu_node structure's ancestors,
714 * recursively up the tree. (Calm down, calm down, we do the recursion
715 * iteratively!)
716 *
Thomas Gleixnerb40d2932011-10-22 07:12:34 -0700717 * Most callers will set the "wake" flag, but the task initiating the
718 * expedited grace period need not wake itself.
719 *
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800720 * Caller must hold sync_rcu_preempt_exp_mutex.
721 */
Thomas Gleixnerb40d2932011-10-22 07:12:34 -0700722static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
723 bool wake)
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800724{
725 unsigned long flags;
726 unsigned long mask;
727
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800728 raw_spin_lock_irqsave(&rnp->lock, flags);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800729 for (;;) {
Paul E. McKenney131906b2011-07-17 02:05:49 -0700730 if (!sync_rcu_preempt_exp_done(rnp)) {
731 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800732 break;
Paul E. McKenney131906b2011-07-17 02:05:49 -0700733 }
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800734 if (rnp->parent == NULL) {
Paul E. McKenney131906b2011-07-17 02:05:49 -0700735 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Thomas Gleixnerb40d2932011-10-22 07:12:34 -0700736 if (wake)
737 wake_up(&sync_rcu_preempt_exp_wq);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800738 break;
739 }
740 mask = rnp->grpmask;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800741 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800742 rnp = rnp->parent;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800743 raw_spin_lock(&rnp->lock); /* irqs already disabled */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800744 rnp->expmask &= ~mask;
745 }
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800746}
747
748/*
749 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
750 * grace period for the specified rcu_node structure. If there are no such
751 * tasks, report it up the rcu_node hierarchy.
752 *
753 * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
754 */
755static void
756sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
757{
Paul E. McKenney1217ed12011-05-04 21:43:49 -0700758 unsigned long flags;
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800759 int must_wait = 0;
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800760
Paul E. McKenney1217ed12011-05-04 21:43:49 -0700761 raw_spin_lock_irqsave(&rnp->lock, flags);
Paul E. McKenneyc701d5d2012-06-28 08:08:25 -0700762 if (list_empty(&rnp->blkd_tasks)) {
Paul E. McKenney1217ed12011-05-04 21:43:49 -0700763 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneyc701d5d2012-06-28 08:08:25 -0700764 } else {
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800765 rnp->exp_tasks = rnp->blkd_tasks.next;
Paul E. McKenney1217ed12011-05-04 21:43:49 -0700766 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800767 must_wait = 1;
768 }
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800769 if (!must_wait)
Thomas Gleixnerb40d2932011-10-22 07:12:34 -0700770 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800771}
772
Paul E. McKenney236fefa2012-01-31 14:00:41 -0800773/**
774 * synchronize_rcu_expedited - Brute-force RCU grace period
775 *
776 * Wait for an RCU-preempt grace period, but expedite it. The basic
777 * idea is to invoke synchronize_sched_expedited() to push all the tasks to
778 * the ->blkd_tasks lists and wait for this list to drain. This consumes
779 * significant time on all CPUs and is unfriendly to real-time workloads,
780 * so is thus not recommended for any sort of common-case code.
781 * In fact, if you are using synchronize_rcu_expedited() in a loop,
782 * please restructure your code to batch your updates, and then Use a
783 * single synchronize_rcu() instead.
784 *
785 * Note that it is illegal to call this function while holding any lock
786 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
787 * to call this function from a CPU-hotplug notifier. Failing to observe
788 * these restriction will result in deadlock.
Paul E. McKenney019129d52009-10-14 10:15:56 -0700789 */
790void synchronize_rcu_expedited(void)
791{
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800792 unsigned long flags;
793 struct rcu_node *rnp;
794 struct rcu_state *rsp = &rcu_preempt_state;
795 long snap;
796 int trycount = 0;
797
798 smp_mb(); /* Caller's modifications seen first by other CPUs. */
799 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
800 smp_mb(); /* Above access cannot bleed into critical section. */
801
802 /*
803 * Acquire lock, falling back to synchronize_rcu() if too many
804 * lock-acquisition failures. Of course, if someone does the
805 * expedited grace period for us, just leave.
806 */
807 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
Paul E. McKenneyc701d5d2012-06-28 08:08:25 -0700808 if (trycount++ < 10) {
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800809 udelay(trycount * num_online_cpus());
Paul E. McKenneyc701d5d2012-06-28 08:08:25 -0700810 } else {
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800811 synchronize_rcu();
812 return;
813 }
814 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
815 goto mb_ret; /* Others did our work for us. */
816 }
817 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
818 goto unlock_mb_ret; /* Others did our work for us. */
819
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800820 /* force all RCU readers onto ->blkd_tasks lists. */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800821 synchronize_sched_expedited();
822
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800823 raw_spin_lock_irqsave(&rsp->onofflock, flags);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800824
825 /* Initialize ->expmask for all non-leaf rcu_node structures. */
826 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800827 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800828 rnp->expmask = rnp->qsmaskinit;
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800829 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800830 }
831
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800832 /* Snapshot current state of ->blkd_tasks lists. */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800833 rcu_for_each_leaf_node(rsp, rnp)
834 sync_rcu_preempt_exp_init(rsp, rnp);
835 if (NUM_RCU_NODES > 1)
836 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
837
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800838 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800839
Paul E. McKenney12f5f522010-11-29 21:56:39 -0800840 /* Wait for snapshotted ->blkd_tasks lists to drain. */
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -0800841 rnp = rcu_get_root(rsp);
842 wait_event(sync_rcu_preempt_exp_wq,
843 sync_rcu_preempt_exp_done(rnp));
844
845 /* Clean up and exit. */
846 smp_mb(); /* ensure expedited GP seen before counter increment. */
847 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
848unlock_mb_ret:
849 mutex_unlock(&sync_rcu_preempt_exp_mutex);
850mb_ret:
851 smp_mb(); /* ensure subsequent action seen after grace period. */
Paul E. McKenney019129d52009-10-14 10:15:56 -0700852}
853EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
854
Paul E. McKenneye74f4c42009-10-06 21:48:17 -0700855/**
856 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
857 */
858void rcu_barrier(void)
859{
Paul E. McKenney037b64e2012-05-28 23:26:01 -0700860 _rcu_barrier(&rcu_preempt_state);
Paul E. McKenneye74f4c42009-10-06 21:48:17 -0700861}
862EXPORT_SYMBOL_GPL(rcu_barrier);
863
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700864/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800865 * Initialize preemptible RCU's state structures.
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700866 */
867static void __init __rcu_init_preempt(void)
868{
Lai Jiangshan394f99a2010-06-28 16:25:04 +0800869 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700870}
871
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700872#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
873
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800874static struct rcu_state *rcu_state = &rcu_sched_state;
875
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700876/*
877 * Tell them what RCU they are running.
878 */
Paul E. McKenney0e0fc1c2009-11-11 11:28:06 -0800879static void __init rcu_bootup_announce(void)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700880{
881 printk(KERN_INFO "Hierarchical RCU implementation.\n");
Paul E. McKenney26845c22010-04-13 14:19:23 -0700882 rcu_bootup_announce_oddness();
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700883}
884
885/*
886 * Return the number of RCU batches processed thus far for debug & stats.
887 */
888long rcu_batches_completed(void)
889{
890 return rcu_batches_completed_sched();
891}
892EXPORT_SYMBOL_GPL(rcu_batches_completed);
893
894/*
Paul E. McKenneybf66f182010-01-04 15:09:10 -0800895 * Force a quiescent state for RCU, which, because there is no preemptible
896 * RCU, becomes the same as rcu-sched.
897 */
898void rcu_force_quiescent_state(void)
899{
900 rcu_sched_force_quiescent_state();
901}
902EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
903
904/*
Paul E. McKenneycba6d0d2012-07-02 07:08:42 -0700905 * Because preemptible RCU does not exist, we never have to check for
906 * CPUs being in quiescent states.
907 */
908static void rcu_preempt_note_context_switch(int cpu)
909{
910}
911
912/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800913 * Because preemptible RCU does not exist, there are never any preempted
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700914 * RCU readers.
915 */
Paul E. McKenney27f4d282011-02-07 12:47:15 -0800916static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700917{
918 return 0;
919}
920
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800921#ifdef CONFIG_HOTPLUG_CPU
922
923/* Because preemptible RCU does not exist, no quieting of tasks. */
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800924static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800925{
Paul E. McKenney1304afb2010-02-22 17:05:02 -0800926 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800927}
928
929#endif /* #ifdef CONFIG_HOTPLUG_CPU */
930
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700931/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800932 * Because preemptible RCU does not exist, we never have to check for
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700933 * tasks blocked within RCU read-side critical sections.
934 */
Paul E. McKenney1ed509a2010-02-22 17:05:05 -0800935static void rcu_print_detail_task_stall(struct rcu_state *rsp)
936{
937}
938
939/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800940 * Because preemptible RCU does not exist, we never have to check for
Paul E. McKenney1ed509a2010-02-22 17:05:05 -0800941 * tasks blocked within RCU read-side critical sections.
942 */
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700943static int rcu_print_task_stall(struct rcu_node *rnp)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700944{
Paul E. McKenney9bc8b552011-08-13 13:31:47 -0700945 return 0;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700946}
947
Paul E. McKenney53d84e02010-08-10 14:28:53 -0700948/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800949 * Because there is no preemptible RCU, there can be no readers blocked,
Paul E. McKenney49e29122009-09-18 09:50:19 -0700950 * so there is no need to check for blocked tasks. So check only for
951 * bogus qsmask values.
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700952 */
953static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
954{
Paul E. McKenney49e29122009-09-18 09:50:19 -0700955 WARN_ON_ONCE(rnp->qsmask);
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700956}
957
Paul E. McKenney33f76142009-08-24 09:42:01 -0700958#ifdef CONFIG_HOTPLUG_CPU
959
960/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800961 * Because preemptible RCU does not exist, it never needs to migrate
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700962 * tasks that were blocked within RCU read-side critical sections, and
963 * such non-existent tasks cannot possibly have been blocking the current
964 * grace period.
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700965 */
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700966static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
967 struct rcu_node *rnp,
968 struct rcu_data *rdp)
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700969{
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700970 return 0;
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700971}
972
Paul E. McKenneye5601402012-01-07 11:03:57 -0800973#endif /* #ifdef CONFIG_HOTPLUG_CPU */
974
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700975/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -0800976 * Because preemptible RCU does not exist, it never has any callbacks
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700977 * to check.
978 */
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700979static void rcu_preempt_check_callbacks(int cpu)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700980{
981}
982
983/*
Paul E. McKenney486e2592012-01-06 14:11:30 -0800984 * Queue an RCU callback for lazy invocation after a grace period.
985 * This will likely be later named something like "call_rcu_lazy()",
986 * but this change will require some way of tagging the lazy RCU
987 * callbacks in the list of pending callbacks. Until then, this
988 * function may only be called from __kfree_rcu().
989 *
990 * Because there is no preemptible RCU, we use RCU-sched instead.
991 */
992void kfree_call_rcu(struct rcu_head *head,
993 void (*func)(struct rcu_head *rcu))
994{
995 __call_rcu(head, func, &rcu_sched_state, 1);
996}
997EXPORT_SYMBOL_GPL(kfree_call_rcu);
998
999/*
Paul E. McKenney019129d52009-10-14 10:15:56 -07001000 * Wait for an rcu-preempt grace period, but make it happen quickly.
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001001 * But because preemptible RCU does not exist, map to rcu-sched.
Paul E. McKenney019129d52009-10-14 10:15:56 -07001002 */
1003void synchronize_rcu_expedited(void)
1004{
1005 synchronize_sched_expedited();
1006}
1007EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1008
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -08001009#ifdef CONFIG_HOTPLUG_CPU
1010
1011/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001012 * Because preemptible RCU does not exist, there is never any need to
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -08001013 * report on tasks preempted in RCU read-side critical sections during
1014 * expedited RCU grace periods.
1015 */
Thomas Gleixnerb40d2932011-10-22 07:12:34 -07001016static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1017 bool wake)
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -08001018{
Paul E. McKenneyd9a3da02009-12-02 12:10:15 -08001019}
1020
1021#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1022
Paul E. McKenney019129d52009-10-14 10:15:56 -07001023/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001024 * Because preemptible RCU does not exist, rcu_barrier() is just
Paul E. McKenneye74f4c42009-10-06 21:48:17 -07001025 * another name for rcu_barrier_sched().
1026 */
1027void rcu_barrier(void)
1028{
1029 rcu_barrier_sched();
1030}
1031EXPORT_SYMBOL_GPL(rcu_barrier);
1032
1033/*
Paul E. McKenney6cc68792011-03-02 13:15:15 -08001034 * Because preemptible RCU does not exist, it need not be initialized.
Paul E. McKenney1eba8f82009-09-23 09:50:42 -07001035 */
1036static void __init __rcu_init_preempt(void)
1037{
1038}
1039
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001040#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001041
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001042#ifdef CONFIG_RCU_BOOST
1043
1044#include "rtmutex_common.h"
1045
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001046#ifdef CONFIG_RCU_TRACE
1047
1048static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1049{
1050 if (list_empty(&rnp->blkd_tasks))
1051 rnp->n_balk_blkd_tasks++;
1052 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1053 rnp->n_balk_exp_gp_tasks++;
1054 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1055 rnp->n_balk_boost_tasks++;
1056 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1057 rnp->n_balk_notblocked++;
1058 else if (rnp->gp_tasks != NULL &&
Paul E. McKenneya9f47932011-05-02 03:46:10 -07001059 ULONG_CMP_LT(jiffies, rnp->boost_time))
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001060 rnp->n_balk_notyet++;
1061 else
1062 rnp->n_balk_nos++;
1063}
1064
1065#else /* #ifdef CONFIG_RCU_TRACE */
1066
1067static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1068{
1069}
1070
1071#endif /* #else #ifdef CONFIG_RCU_TRACE */
1072
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001073static void rcu_wake_cond(struct task_struct *t, int status)
1074{
1075 /*
1076 * If the thread is yielding, only wake it when this
1077 * is invoked from idle
1078 */
1079 if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
1080 wake_up_process(t);
1081}
1082
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001083/*
1084 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1085 * or ->boost_tasks, advancing the pointer to the next task in the
1086 * ->blkd_tasks list.
1087 *
1088 * Note that irqs must be enabled: boosting the task can block.
1089 * Returns 1 if there are more tasks needing to be boosted.
1090 */
1091static int rcu_boost(struct rcu_node *rnp)
1092{
1093 unsigned long flags;
1094 struct rt_mutex mtx;
1095 struct task_struct *t;
1096 struct list_head *tb;
1097
1098 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1099 return 0; /* Nothing left to boost. */
1100
1101 raw_spin_lock_irqsave(&rnp->lock, flags);
1102
1103 /*
1104 * Recheck under the lock: all tasks in need of boosting
1105 * might exit their RCU read-side critical sections on their own.
1106 */
1107 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1108 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1109 return 0;
1110 }
1111
1112 /*
1113 * Preferentially boost tasks blocking expedited grace periods.
1114 * This cannot starve the normal grace periods because a second
1115 * expedited grace period must boost all blocked tasks, including
1116 * those blocking the pre-existing normal grace period.
1117 */
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001118 if (rnp->exp_tasks != NULL) {
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001119 tb = rnp->exp_tasks;
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001120 rnp->n_exp_boosts++;
1121 } else {
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001122 tb = rnp->boost_tasks;
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001123 rnp->n_normal_boosts++;
1124 }
1125 rnp->n_tasks_boosted++;
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001126
1127 /*
1128 * We boost task t by manufacturing an rt_mutex that appears to
1129 * be held by task t. We leave a pointer to that rt_mutex where
1130 * task t can find it, and task t will release the mutex when it
1131 * exits its outermost RCU read-side critical section. Then
1132 * simply acquiring this artificial rt_mutex will boost task
1133 * t's priority. (Thanks to tglx for suggesting this approach!)
1134 *
1135 * Note that task t must acquire rnp->lock to remove itself from
1136 * the ->blkd_tasks list, which it will do from exit() if from
1137 * nowhere else. We therefore are guaranteed that task t will
1138 * stay around at least until we drop rnp->lock. Note that
1139 * rnp->lock also resolves races between our priority boosting
1140 * and task t's exiting its outermost RCU read-side critical
1141 * section.
1142 */
1143 t = container_of(tb, struct task_struct, rcu_node_entry);
1144 rt_mutex_init_proxy_locked(&mtx, t);
1145 t->rcu_boost_mutex = &mtx;
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001146 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1147 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
1148 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
1149
Paul E. McKenney4f89b332011-12-09 14:43:47 -08001150 return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1151 ACCESS_ONCE(rnp->boost_tasks) != NULL;
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001152}
1153
1154/*
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001155 * Priority-boosting kthread. One per leaf rcu_node and one for the
1156 * root rcu_node.
1157 */
1158static int rcu_boost_kthread(void *arg)
1159{
1160 struct rcu_node *rnp = (struct rcu_node *)arg;
1161 int spincnt = 0;
1162 int more2boost;
1163
Paul E. McKenney385680a2011-06-21 22:43:26 -07001164 trace_rcu_utilization("Start boost kthread@init");
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001165 for (;;) {
Paul E. McKenneyd71df902011-03-29 17:48:28 -07001166 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
Paul E. McKenney385680a2011-06-21 22:43:26 -07001167 trace_rcu_utilization("End boost kthread@rcu_wait");
Peter Zijlstra08bca602011-05-20 16:06:29 -07001168 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
Paul E. McKenney385680a2011-06-21 22:43:26 -07001169 trace_rcu_utilization("Start boost kthread@rcu_wait");
Paul E. McKenneyd71df902011-03-29 17:48:28 -07001170 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001171 more2boost = rcu_boost(rnp);
1172 if (more2boost)
1173 spincnt++;
1174 else
1175 spincnt = 0;
1176 if (spincnt > 10) {
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001177 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
Paul E. McKenney385680a2011-06-21 22:43:26 -07001178 trace_rcu_utilization("End boost kthread@rcu_yield");
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001179 schedule_timeout_interruptible(2);
Paul E. McKenney385680a2011-06-21 22:43:26 -07001180 trace_rcu_utilization("Start boost kthread@rcu_yield");
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001181 spincnt = 0;
1182 }
1183 }
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001184 /* NOTREACHED */
Paul E. McKenney385680a2011-06-21 22:43:26 -07001185 trace_rcu_utilization("End boost kthread@notreached");
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001186 return 0;
1187}
1188
1189/*
1190 * Check to see if it is time to start boosting RCU readers that are
1191 * blocking the current grace period, and, if so, tell the per-rcu_node
1192 * kthread to start boosting them. If there is an expedited grace
1193 * period in progress, it is always time to boost.
1194 *
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001195 * The caller must hold rnp->lock, which this function releases,
1196 * but irqs remain disabled. The ->boost_kthread_task is immortal,
1197 * so we don't need to worry about it going away.
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001198 */
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001199static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001200{
1201 struct task_struct *t;
1202
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001203 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1204 rnp->n_balk_exp_gp_tasks++;
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001205 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001206 return;
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001207 }
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001208 if (rnp->exp_tasks != NULL ||
1209 (rnp->gp_tasks != NULL &&
1210 rnp->boost_tasks == NULL &&
1211 rnp->qsmask == 0 &&
1212 ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1213 if (rnp->exp_tasks == NULL)
1214 rnp->boost_tasks = rnp->gp_tasks;
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001215 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001216 t = rnp->boost_kthread_task;
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001217 if (t)
1218 rcu_wake_cond(t, rnp->boost_kthread_status);
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001219 } else {
Paul E. McKenney0ea1f2e2011-02-22 13:42:43 -08001220 rcu_initiate_boost_trace(rnp);
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001221 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1222 }
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001223}
1224
Paul E. McKenney0f962a52011-04-14 12:13:53 -07001225/*
Paul E. McKenneya46e0892011-06-15 15:47:09 -07001226 * Wake up the per-CPU kthread to invoke RCU callbacks.
1227 */
1228static void invoke_rcu_callbacks_kthread(void)
1229{
1230 unsigned long flags;
1231
1232 local_irq_save(flags);
1233 __this_cpu_write(rcu_cpu_has_work, 1);
Shaohua Li1eb52122011-06-16 16:02:54 -07001234 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001235 current != __this_cpu_read(rcu_cpu_kthread_task)) {
1236 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
1237 __this_cpu_read(rcu_cpu_kthread_status));
1238 }
Paul E. McKenneya46e0892011-06-15 15:47:09 -07001239 local_irq_restore(flags);
1240}
1241
1242/*
Paul E. McKenneydff16722011-11-29 15:57:13 -08001243 * Is the current CPU running the RCU-callbacks kthread?
1244 * Caller must have preemption disabled.
1245 */
1246static bool rcu_is_callbacks_kthread(void)
1247{
1248 return __get_cpu_var(rcu_cpu_kthread_task) == current;
1249}
1250
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001251#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1252
1253/*
1254 * Do priority-boost accounting for the start of a new grace period.
1255 */
1256static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1257{
1258 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1259}
1260
1261/*
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001262 * Create an RCU-boost kthread for the specified node if one does not
1263 * already exist. We only create this kthread for preemptible RCU.
1264 * Returns zero if all is well, a negated errno otherwise.
1265 */
1266static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001267 struct rcu_node *rnp)
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001268{
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001269 int rnp_index = rnp - &rsp->node[0];
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001270 unsigned long flags;
1271 struct sched_param sp;
1272 struct task_struct *t;
1273
1274 if (&rcu_preempt_state != rsp)
1275 return 0;
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001276
1277 if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0)
1278 return 0;
1279
Paul E. McKenneya46e0892011-06-15 15:47:09 -07001280 rsp->boost = 1;
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001281 if (rnp->boost_kthread_task != NULL)
1282 return 0;
1283 t = kthread_create(rcu_boost_kthread, (void *)rnp,
Mike Galbraith5b61b0b2011-08-19 11:39:11 -07001284 "rcub/%d", rnp_index);
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001285 if (IS_ERR(t))
1286 return PTR_ERR(t);
1287 raw_spin_lock_irqsave(&rnp->lock, flags);
1288 rnp->boost_kthread_task = t;
1289 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Mike Galbraith5b61b0b2011-08-19 11:39:11 -07001290 sp.sched_priority = RCU_BOOST_PRIO;
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001291 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
Paul E. McKenney9a432732011-05-30 20:38:55 -07001292 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001293 return 0;
1294}
1295
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001296static void rcu_kthread_do_work(void)
1297{
1298 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1299 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1300 rcu_preempt_do_callbacks();
1301}
1302
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001303static void rcu_cpu_kthread_setup(unsigned int cpu)
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001304{
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001305 struct sched_param sp;
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001306
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001307 sp.sched_priority = RCU_KTHREAD_PRIO;
1308 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001309}
1310
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001311static void rcu_cpu_kthread_park(unsigned int cpu)
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001312{
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001313 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1314}
1315
1316static int rcu_cpu_kthread_should_run(unsigned int cpu)
1317{
1318 return __get_cpu_var(rcu_cpu_has_work);
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001319}
1320
1321/*
1322 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
Paul E. McKenneye0f23062011-06-21 01:29:39 -07001323 * RCU softirq used in flavors and configurations of RCU that do not
1324 * support RCU priority boosting.
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001325 */
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001326static void rcu_cpu_kthread(unsigned int cpu)
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001327{
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001328 unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
1329 char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
1330 int spincnt;
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001331
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001332 for (spincnt = 0; spincnt < 10; spincnt++) {
Paul E. McKenney385680a2011-06-21 22:43:26 -07001333 trace_rcu_utilization("Start CPU kthread@rcu_wait");
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001334 local_bh_disable();
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001335 *statusp = RCU_KTHREAD_RUNNING;
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001336 this_cpu_inc(rcu_cpu_kthread_loops);
1337 local_irq_disable();
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001338 work = *workp;
1339 *workp = 0;
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001340 local_irq_enable();
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001341 if (work)
1342 rcu_kthread_do_work();
1343 local_bh_enable();
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001344 if (*workp == 0) {
1345 trace_rcu_utilization("End CPU kthread@rcu_wait");
1346 *statusp = RCU_KTHREAD_WAITING;
1347 return;
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001348 }
1349 }
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001350 *statusp = RCU_KTHREAD_YIELDING;
1351 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1352 schedule_timeout_interruptible(2);
1353 trace_rcu_utilization("End CPU kthread@rcu_yield");
1354 *statusp = RCU_KTHREAD_WAITING;
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001355}
1356
1357/*
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001358 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1359 * served by the rcu_node in question. The CPU hotplug lock is still
1360 * held, so the value of rnp->qsmaskinit will be stable.
1361 *
1362 * We don't include outgoingcpu in the affinity set, use -1 if there is
1363 * no outgoing CPU. If there are no CPUs left in the affinity set,
1364 * this function allows the kthread to execute on any CPU.
1365 */
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001366static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001367{
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001368 struct task_struct *t = rnp->boost_kthread_task;
1369 unsigned long mask = rnp->qsmaskinit;
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001370 cpumask_var_t cm;
1371 int cpu;
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001372
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001373 if (!t)
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001374 return;
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001375 if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001376 return;
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001377 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1378 if ((mask & 0x1) && cpu != outgoingcpu)
1379 cpumask_set_cpu(cpu, cm);
1380 if (cpumask_weight(cm) == 0) {
1381 cpumask_setall(cm);
1382 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1383 cpumask_clear_cpu(cpu, cm);
1384 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1385 }
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001386 set_cpus_allowed_ptr(t, cm);
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001387 free_cpumask_var(cm);
1388}
1389
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001390static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1391 .store = &rcu_cpu_kthread_task,
1392 .thread_should_run = rcu_cpu_kthread_should_run,
1393 .thread_fn = rcu_cpu_kthread,
1394 .thread_comm = "rcuc/%u",
1395 .setup = rcu_cpu_kthread_setup,
1396 .park = rcu_cpu_kthread_park,
1397};
1398
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001399/*
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001400 * Spawn all kthreads -- called as soon as the scheduler is running.
1401 */
1402static int __init rcu_spawn_kthreads(void)
1403{
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001404 struct rcu_node *rnp;
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001405 int cpu;
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001406
Paul E. McKenneyb0d30412011-07-10 15:57:35 -07001407 rcu_scheduler_fully_active = 1;
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001408 for_each_possible_cpu(cpu)
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001409 per_cpu(rcu_cpu_has_work, cpu) = 0;
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001410 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001411 rnp = rcu_get_root(rcu_state);
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001412 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001413 if (NUM_RCU_NODES > 1) {
1414 rcu_for_each_leaf_node(rcu_state, rnp)
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001415 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001416 }
1417 return 0;
1418}
1419early_initcall(rcu_spawn_kthreads);
1420
1421static void __cpuinit rcu_prepare_kthreads(int cpu)
1422{
1423 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1424 struct rcu_node *rnp = rdp->mynode;
1425
1426 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
Paul E. McKenney62ab7072012-07-16 10:42:38 +00001427 if (rcu_scheduler_fully_active)
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001428 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001429}
1430
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001431#else /* #ifdef CONFIG_RCU_BOOST */
1432
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001433static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001434{
Paul E. McKenney1217ed12011-05-04 21:43:49 -07001435 raw_spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001436}
1437
Paul E. McKenneya46e0892011-06-15 15:47:09 -07001438static void invoke_rcu_callbacks_kthread(void)
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001439{
Paul E. McKenneya46e0892011-06-15 15:47:09 -07001440 WARN_ON_ONCE(1);
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001441}
1442
Paul E. McKenneydff16722011-11-29 15:57:13 -08001443static bool rcu_is_callbacks_kthread(void)
1444{
1445 return false;
1446}
1447
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001448static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1449{
1450}
1451
Thomas Gleixner5d01bbd2012-07-16 10:42:35 +00001452static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001453{
1454}
1455
Paul E. McKenneyb0d30412011-07-10 15:57:35 -07001456static int __init rcu_scheduler_really_started(void)
1457{
1458 rcu_scheduler_fully_active = 1;
1459 return 0;
1460}
1461early_initcall(rcu_scheduler_really_started);
1462
Paul E. McKenneyf8b7fc62011-06-16 08:26:32 -07001463static void __cpuinit rcu_prepare_kthreads(int cpu)
1464{
1465}
1466
Paul E. McKenney27f4d282011-02-07 12:47:15 -08001467#endif /* #else #ifdef CONFIG_RCU_BOOST */
1468
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001469#if !defined(CONFIG_RCU_FAST_NO_HZ)
1470
1471/*
1472 * Check to see if any future RCU-related work will need to be done
1473 * by the current CPU, even if none need be done immediately, returning
1474 * 1 if so. This function is part of the RCU implementation; it is -not-
1475 * an exported member of the RCU API.
1476 *
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001477 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1478 * any flavor of RCU.
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001479 */
Paul E. McKenneyaa9b16302012-05-10 16:41:44 -07001480int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001481{
Paul E. McKenneyaa9b16302012-05-10 16:41:44 -07001482 *delta_jiffies = ULONG_MAX;
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001483 return rcu_cpu_has_callbacks(cpu);
1484}
1485
1486/*
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001487 * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
1488 */
1489static void rcu_prepare_for_idle_init(int cpu)
1490{
1491}
1492
1493/*
1494 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1495 * after it.
1496 */
1497static void rcu_cleanup_after_idle(int cpu)
1498{
1499}
1500
1501/*
Paul E. McKenneya858af22012-01-16 13:29:10 -08001502 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001503 * is nothing.
1504 */
1505static void rcu_prepare_for_idle(int cpu)
1506{
1507}
1508
Paul E. McKenneyc57afe82012-02-28 11:02:21 -08001509/*
1510 * Don't bother keeping a running count of the number of RCU callbacks
1511 * posted because CONFIG_RCU_FAST_NO_HZ=n.
1512 */
1513static void rcu_idle_count_callbacks_posted(void)
1514{
1515}
1516
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001517#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1518
Paul E. McKenneyf23f7fa2011-11-30 15:41:14 -08001519/*
1520 * This code is invoked when a CPU goes idle, at which point we want
1521 * to have the CPU do everything required for RCU so that it can enter
1522 * the energy-efficient dyntick-idle mode. This is handled by a
1523 * state machine implemented by rcu_prepare_for_idle() below.
1524 *
1525 * The following three proprocessor symbols control this state machine:
1526 *
1527 * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
1528 * to satisfy RCU. Beyond this point, it is better to incur a periodic
1529 * scheduling-clock interrupt than to loop through the state machine
1530 * at full power.
1531 * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
1532 * optional if RCU does not need anything immediately from this
1533 * CPU, even if this CPU still has RCU callbacks queued. The first
1534 * times through the state machine are mandatory: we need to give
1535 * the state machine a chance to communicate a quiescent state
1536 * to the RCU core.
1537 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1538 * to sleep in dyntick-idle mode with RCU callbacks pending. This
1539 * is sized to be roughly one RCU grace period. Those energy-efficiency
1540 * benchmarkers who might otherwise be tempted to set this to a large
1541 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1542 * system. And if you are -that- concerned about energy efficiency,
1543 * just power the system down and be done with it!
Paul E. McKenney778d2502012-01-10 14:13:24 -08001544 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1545 * permitted to sleep in dyntick-idle mode with only lazy RCU
1546 * callbacks pending. Setting this too high can OOM your system.
Paul E. McKenneyf23f7fa2011-11-30 15:41:14 -08001547 *
1548 * The values below work well in practice. If future workloads require
1549 * adjustment, they can be converted into kernel config parameters, though
1550 * making the state machine smarter might be a better option.
1551 */
1552#define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */
1553#define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */
Paul E. McKenneye84c48a2012-06-04 20:45:10 -07001554#define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */
Paul E. McKenney778d2502012-01-10 14:13:24 -08001555#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
Paul E. McKenneyf23f7fa2011-11-30 15:41:14 -08001556
Paul E. McKenney9d2ad242012-06-24 10:15:02 -07001557extern int tick_nohz_enabled;
1558
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001559/*
Paul E. McKenney486e2592012-01-06 14:11:30 -08001560 * Does the specified flavor of RCU have non-lazy callbacks pending on
1561 * the specified CPU? Both RCU flavor and CPU are specified by the
1562 * rcu_data structure.
1563 */
1564static bool __rcu_cpu_has_nonlazy_callbacks(struct rcu_data *rdp)
1565{
1566 return rdp->qlen != rdp->qlen_lazy;
1567}
1568
1569#ifdef CONFIG_TREE_PREEMPT_RCU
1570
1571/*
1572 * Are there non-lazy RCU-preempt callbacks? (There cannot be if there
1573 * is no RCU-preempt in the kernel.)
1574 */
1575static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
1576{
1577 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
1578
1579 return __rcu_cpu_has_nonlazy_callbacks(rdp);
1580}
1581
1582#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1583
1584static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
1585{
1586 return 0;
1587}
1588
1589#endif /* else #ifdef CONFIG_TREE_PREEMPT_RCU */
1590
1591/*
1592 * Does any flavor of RCU have non-lazy callbacks on the specified CPU?
1593 */
1594static bool rcu_cpu_has_nonlazy_callbacks(int cpu)
1595{
1596 return __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_sched_data, cpu)) ||
1597 __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_bh_data, cpu)) ||
1598 rcu_preempt_cpu_has_nonlazy_callbacks(cpu);
1599}
1600
1601/*
Paul E. McKenneyaa9b16302012-05-10 16:41:44 -07001602 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
1603 * callbacks on this CPU, (2) this CPU has not yet attempted to enter
1604 * dyntick-idle mode, or (3) this CPU is in the process of attempting to
1605 * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
1606 * to enter dyntick-idle mode, we refuse to try to enter it. After all,
1607 * it is better to incur scheduling-clock interrupts than to spin
1608 * continuously for the same time duration!
1609 *
1610 * The delta_jiffies argument is used to store the time when RCU is
1611 * going to need the CPU again if it still has callbacks. The reason
1612 * for this is that rcu_prepare_for_idle() might need to post a timer,
1613 * but if so, it will do so after tick_nohz_stop_sched_tick() has set
1614 * the wakeup time for this CPU. This means that RCU's timer can be
1615 * delayed until the wakeup time, which defeats the purpose of posting
1616 * a timer.
1617 */
1618int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
1619{
1620 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1621
1622 /* Flag a new idle sojourn to the idle-entry state machine. */
1623 rdtp->idle_first_pass = 1;
1624 /* If no callbacks, RCU doesn't need the CPU. */
1625 if (!rcu_cpu_has_callbacks(cpu)) {
1626 *delta_jiffies = ULONG_MAX;
1627 return 0;
1628 }
1629 if (rdtp->dyntick_holdoff == jiffies) {
1630 /* RCU recently tried and failed, so don't try again. */
1631 *delta_jiffies = 1;
1632 return 1;
1633 }
1634 /* Set up for the possibility that RCU will post a timer. */
Paul E. McKenneye84c48a2012-06-04 20:45:10 -07001635 if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
1636 *delta_jiffies = round_up(RCU_IDLE_GP_DELAY + jiffies,
1637 RCU_IDLE_GP_DELAY) - jiffies;
1638 } else {
1639 *delta_jiffies = jiffies + RCU_IDLE_LAZY_GP_DELAY;
1640 *delta_jiffies = round_jiffies(*delta_jiffies) - jiffies;
1641 }
Paul E. McKenneyaa9b16302012-05-10 16:41:44 -07001642 return 0;
1643}
1644
1645/*
Paul E. McKenney21e52e12012-04-30 14:16:19 -07001646 * Handler for smp_call_function_single(). The only point of this
1647 * handler is to wake the CPU up, so the handler does only tracing.
1648 */
1649void rcu_idle_demigrate(void *unused)
1650{
1651 trace_rcu_prep_idle("Demigrate");
1652}
1653
1654/*
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001655 * Timer handler used to force CPU to start pushing its remaining RCU
1656 * callbacks in the case where it entered dyntick-idle mode with callbacks
1657 * pending. The hander doesn't really need to do anything because the
1658 * real work is done upon re-entry to idle, or by the next scheduling-clock
1659 * interrupt should idle not be re-entered.
Paul E. McKenney21e52e12012-04-30 14:16:19 -07001660 *
1661 * One special case: the timer gets migrated without awakening the CPU
1662 * on which the timer was scheduled on. In this case, we must wake up
1663 * that CPU. We do so with smp_call_function_single().
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001664 */
Paul E. McKenney21e52e12012-04-30 14:16:19 -07001665static void rcu_idle_gp_timer_func(unsigned long cpu_in)
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001666{
Paul E. McKenney21e52e12012-04-30 14:16:19 -07001667 int cpu = (int)cpu_in;
1668
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001669 trace_rcu_prep_idle("Timer");
Paul E. McKenney21e52e12012-04-30 14:16:19 -07001670 if (cpu != smp_processor_id())
1671 smp_call_function_single(cpu, rcu_idle_demigrate, NULL, 0);
1672 else
1673 WARN_ON_ONCE(1); /* Getting here can hang the system... */
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001674}
1675
1676/*
1677 * Initialize the timer used to pull CPUs out of dyntick-idle mode.
1678 */
1679static void rcu_prepare_for_idle_init(int cpu)
1680{
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001681 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1682
1683 rdtp->dyntick_holdoff = jiffies - 1;
1684 setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu);
1685 rdtp->idle_gp_timer_expires = jiffies - 1;
1686 rdtp->idle_first_pass = 1;
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001687}
1688
1689/*
1690 * Clean up for exit from idle. Because we are exiting from idle, there
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001691 * is no longer any point to ->idle_gp_timer, so cancel it. This will
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001692 * do nothing if this timer is not active, so just cancel it unconditionally.
1693 */
1694static void rcu_cleanup_after_idle(int cpu)
1695{
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001696 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1697
1698 del_timer(&rdtp->idle_gp_timer);
Paul E. McKenney2fdbb312012-02-23 15:58:29 -08001699 trace_rcu_prep_idle("Cleanup after idle");
Paul E. McKenney9d2ad242012-06-24 10:15:02 -07001700 rdtp->tick_nohz_enabled_snap = ACCESS_ONCE(tick_nohz_enabled);
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001701}
1702
1703/*
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001704 * Check to see if any RCU-related work can be done by the current CPU,
1705 * and if so, schedule a softirq to get it done. This function is part
1706 * of the RCU implementation; it is -not- an exported member of the RCU API.
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001707 *
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001708 * The idea is for the current CPU to clear out all work required by the
1709 * RCU core for the current grace period, so that this CPU can be permitted
1710 * to enter dyntick-idle mode. In some cases, it will need to be awakened
1711 * at the end of the grace period by whatever CPU ends the grace period.
1712 * This allows CPUs to go dyntick-idle more quickly, and to reduce the
1713 * number of wakeups by a modest integer factor.
Paul E. McKenneya47cd882010-02-26 16:38:56 -08001714 *
1715 * Because it is not legal to invoke rcu_process_callbacks() with irqs
1716 * disabled, we do one pass of force_quiescent_state(), then do a
Paul E. McKenneya46e0892011-06-15 15:47:09 -07001717 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001718 * later. The ->dyntick_drain field controls the sequencing.
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001719 *
1720 * The caller must have disabled interrupts.
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001721 */
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001722static void rcu_prepare_for_idle(int cpu)
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001723{
Paul E. McKenneyf511fc62012-03-15 12:16:26 -07001724 struct timer_list *tp;
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001725 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
Paul E. McKenney9d2ad242012-06-24 10:15:02 -07001726 int tne;
1727
1728 /* Handle nohz enablement switches conservatively. */
1729 tne = ACCESS_ONCE(tick_nohz_enabled);
1730 if (tne != rdtp->tick_nohz_enabled_snap) {
1731 if (rcu_cpu_has_callbacks(cpu))
1732 invoke_rcu_core(); /* force nohz to see update. */
1733 rdtp->tick_nohz_enabled_snap = tne;
1734 return;
1735 }
1736 if (!tne)
1737 return;
Paul E. McKenneyf511fc62012-03-15 12:16:26 -07001738
Paul E. McKenney3084f2f2011-11-22 17:07:11 -08001739 /*
Paul E. McKenneyc57afe82012-02-28 11:02:21 -08001740 * If this is an idle re-entry, for example, due to use of
1741 * RCU_NONIDLE() or the new idle-loop tracing API within the idle
1742 * loop, then don't take any state-machine actions, unless the
1743 * momentary exit from idle queued additional non-lazy callbacks.
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001744 * Instead, repost the ->idle_gp_timer if this CPU has callbacks
Paul E. McKenneyc57afe82012-02-28 11:02:21 -08001745 * pending.
1746 */
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001747 if (!rdtp->idle_first_pass &&
1748 (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) {
Paul E. McKenneyf511fc62012-03-15 12:16:26 -07001749 if (rcu_cpu_has_callbacks(cpu)) {
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001750 tp = &rdtp->idle_gp_timer;
1751 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
Paul E. McKenneyf511fc62012-03-15 12:16:26 -07001752 }
Paul E. McKenneyc57afe82012-02-28 11:02:21 -08001753 return;
1754 }
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001755 rdtp->idle_first_pass = 0;
1756 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1;
Paul E. McKenneyc57afe82012-02-28 11:02:21 -08001757
1758 /*
Paul E. McKenneyf535a602011-11-22 20:43:02 -08001759 * If there are no callbacks on this CPU, enter dyntick-idle mode.
1760 * Also reset state to avoid prejudicing later attempts.
Paul E. McKenney3084f2f2011-11-22 17:07:11 -08001761 */
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001762 if (!rcu_cpu_has_callbacks(cpu)) {
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001763 rdtp->dyntick_holdoff = jiffies - 1;
1764 rdtp->dyntick_drain = 0;
Paul E. McKenney433cddd2011-11-22 14:58:03 -08001765 trace_rcu_prep_idle("No callbacks");
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001766 return;
Paul E. McKenney77e38ed2010-04-25 21:04:29 -07001767 }
Paul E. McKenney3084f2f2011-11-22 17:07:11 -08001768
1769 /*
1770 * If in holdoff mode, just return. We will presumably have
1771 * refrained from disabling the scheduling-clock tick.
1772 */
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001773 if (rdtp->dyntick_holdoff == jiffies) {
Paul E. McKenney433cddd2011-11-22 14:58:03 -08001774 trace_rcu_prep_idle("In holdoff");
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001775 return;
Paul E. McKenney433cddd2011-11-22 14:58:03 -08001776 }
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001777
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001778 /* Check and update the ->dyntick_drain sequencing. */
1779 if (rdtp->dyntick_drain <= 0) {
Paul E. McKenneya47cd882010-02-26 16:38:56 -08001780 /* First time through, initialize the counter. */
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001781 rdtp->dyntick_drain = RCU_IDLE_FLUSHES;
1782 } else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES &&
Paul E. McKenneyc3ce9102012-02-14 10:12:54 -08001783 !rcu_pending(cpu) &&
1784 !local_softirq_pending()) {
Paul E. McKenney7cb92492011-11-28 12:28:34 -08001785 /* Can we go dyntick-idle despite still having callbacks? */
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001786 rdtp->dyntick_drain = 0;
1787 rdtp->dyntick_holdoff = jiffies;
Paul E. McKenneyfd4b3522012-05-05 19:10:35 -07001788 if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
1789 trace_rcu_prep_idle("Dyntick with callbacks");
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001790 rdtp->idle_gp_timer_expires =
Paul E. McKenneye84c48a2012-06-04 20:45:10 -07001791 round_up(jiffies + RCU_IDLE_GP_DELAY,
1792 RCU_IDLE_GP_DELAY);
Paul E. McKenneyfd4b3522012-05-05 19:10:35 -07001793 } else {
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001794 rdtp->idle_gp_timer_expires =
Paul E. McKenneye84c48a2012-06-04 20:45:10 -07001795 round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY);
Paul E. McKenneyfd4b3522012-05-05 19:10:35 -07001796 trace_rcu_prep_idle("Dyntick with lazy callbacks");
1797 }
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001798 tp = &rdtp->idle_gp_timer;
1799 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
1800 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
Paul E. McKenneyf23f7fa2011-11-30 15:41:14 -08001801 return; /* Nothing more to do immediately. */
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001802 } else if (--(rdtp->dyntick_drain) <= 0) {
Paul E. McKenneya47cd882010-02-26 16:38:56 -08001803 /* We have hit the limit, so time to give up. */
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001804 rdtp->dyntick_holdoff = jiffies;
Paul E. McKenney433cddd2011-11-22 14:58:03 -08001805 trace_rcu_prep_idle("Begin holdoff");
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001806 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
1807 return;
Paul E. McKenneya47cd882010-02-26 16:38:56 -08001808 }
1809
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001810 /*
1811 * Do one step of pushing the remaining RCU callbacks through
1812 * the RCU core state machine.
1813 */
1814#ifdef CONFIG_TREE_PREEMPT_RCU
1815 if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
1816 rcu_preempt_qs(cpu);
1817 force_quiescent_state(&rcu_preempt_state, 0);
Paul E. McKenneyaea1b352011-11-02 06:54:54 -07001818 }
1819#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
Paul E. McKenneya47cd882010-02-26 16:38:56 -08001820 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
1821 rcu_sched_qs(cpu);
1822 force_quiescent_state(&rcu_sched_state, 0);
Paul E. McKenneya47cd882010-02-26 16:38:56 -08001823 }
1824 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
1825 rcu_bh_qs(cpu);
1826 force_quiescent_state(&rcu_bh_state, 0);
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001827 }
1828
Paul E. McKenney433cddd2011-11-22 14:58:03 -08001829 /*
1830 * If RCU callbacks are still pending, RCU still needs this CPU.
1831 * So try forcing the callbacks through the grace period.
1832 */
Paul E. McKenney3ad0dec2011-11-22 21:08:13 -08001833 if (rcu_cpu_has_callbacks(cpu)) {
Paul E. McKenney433cddd2011-11-22 14:58:03 -08001834 trace_rcu_prep_idle("More callbacks");
Paul E. McKenneya46e0892011-06-15 15:47:09 -07001835 invoke_rcu_core();
Paul E. McKenneyc701d5d2012-06-28 08:08:25 -07001836 } else {
Paul E. McKenney433cddd2011-11-22 14:58:03 -08001837 trace_rcu_prep_idle("Callbacks drained");
Paul E. McKenneyc701d5d2012-06-28 08:08:25 -07001838 }
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001839}
1840
Paul E. McKenneyc57afe82012-02-28 11:02:21 -08001841/*
Paul E. McKenney98248a02012-05-03 15:38:10 -07001842 * Keep a running count of the number of non-lazy callbacks posted
1843 * on this CPU. This running counter (which is never decremented) allows
1844 * rcu_prepare_for_idle() to detect when something out of the idle loop
1845 * posts a callback, even if an equal number of callbacks are invoked.
1846 * Of course, callbacks should only be posted from within a trace event
1847 * designed to be called from idle or from within RCU_NONIDLE().
Paul E. McKenneyc57afe82012-02-28 11:02:21 -08001848 */
1849static void rcu_idle_count_callbacks_posted(void)
1850{
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001851 __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
Paul E. McKenneyc57afe82012-02-28 11:02:21 -08001852}
1853
Paul E. McKenney8bd93a22010-02-22 17:04:59 -08001854#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
Paul E. McKenneya858af22012-01-16 13:29:10 -08001855
1856#ifdef CONFIG_RCU_CPU_STALL_INFO
1857
1858#ifdef CONFIG_RCU_FAST_NO_HZ
1859
1860static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1861{
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001862 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
1863 struct timer_list *tltp = &rdtp->idle_gp_timer;
Paul E. McKenneya858af22012-01-16 13:29:10 -08001864
Paul E. McKenney2ee3dc82012-02-23 17:13:19 -08001865 sprintf(cp, "drain=%d %c timer=%lu",
Paul E. McKenney5955f7e2012-05-09 12:07:05 -07001866 rdtp->dyntick_drain,
1867 rdtp->dyntick_holdoff == jiffies ? 'H' : '.',
Paul E. McKenney2ee3dc82012-02-23 17:13:19 -08001868 timer_pending(tltp) ? tltp->expires - jiffies : -1);
Paul E. McKenneya858af22012-01-16 13:29:10 -08001869}
1870
1871#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
1872
1873static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
1874{
Carsten Emde1c17e4d2012-06-19 10:43:16 +02001875 *cp = '\0';
Paul E. McKenneya858af22012-01-16 13:29:10 -08001876}
1877
1878#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
1879
1880/* Initiate the stall-info list. */
1881static void print_cpu_stall_info_begin(void)
1882{
1883 printk(KERN_CONT "\n");
1884}
1885
1886/*
1887 * Print out diagnostic information for the specified stalled CPU.
1888 *
1889 * If the specified CPU is aware of the current RCU grace period
1890 * (flavor specified by rsp), then print the number of scheduling
1891 * clock interrupts the CPU has taken during the time that it has
1892 * been aware. Otherwise, print the number of RCU grace periods
1893 * that this CPU is ignorant of, for example, "1" if the CPU was
1894 * aware of the previous grace period.
1895 *
1896 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
1897 */
1898static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1899{
1900 char fast_no_hz[72];
1901 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1902 struct rcu_dynticks *rdtp = rdp->dynticks;
1903 char *ticks_title;
1904 unsigned long ticks_value;
1905
1906 if (rsp->gpnum == rdp->gpnum) {
1907 ticks_title = "ticks this GP";
1908 ticks_value = rdp->ticks_this_gp;
1909 } else {
1910 ticks_title = "GPs behind";
1911 ticks_value = rsp->gpnum - rdp->gpnum;
1912 }
1913 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
1914 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
1915 cpu, ticks_value, ticks_title,
1916 atomic_read(&rdtp->dynticks) & 0xfff,
1917 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
1918 fast_no_hz);
1919}
1920
1921/* Terminate the stall-info list. */
1922static void print_cpu_stall_info_end(void)
1923{
1924 printk(KERN_ERR "\t");
1925}
1926
1927/* Zero ->ticks_this_gp for all flavors of RCU. */
1928static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1929{
1930 rdp->ticks_this_gp = 0;
1931}
1932
1933/* Increment ->ticks_this_gp for all flavors of RCU. */
1934static void increment_cpu_stall_ticks(void)
1935{
1936 __get_cpu_var(rcu_sched_data).ticks_this_gp++;
1937 __get_cpu_var(rcu_bh_data).ticks_this_gp++;
1938#ifdef CONFIG_TREE_PREEMPT_RCU
1939 __get_cpu_var(rcu_preempt_data).ticks_this_gp++;
1940#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1941}
1942
1943#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
1944
1945static void print_cpu_stall_info_begin(void)
1946{
1947 printk(KERN_CONT " {");
1948}
1949
1950static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
1951{
1952 printk(KERN_CONT " %d", cpu);
1953}
1954
1955static void print_cpu_stall_info_end(void)
1956{
1957 printk(KERN_CONT "} ");
1958}
1959
1960static void zero_cpu_stall_ticks(struct rcu_data *rdp)
1961{
1962}
1963
1964static void increment_cpu_stall_ticks(void)
1965{
1966}
1967
1968#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */