blob: c9f0c975c00394349e822f6f3b561096a6665e4e [file] [log] [blame]
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptable semantics.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
22 *
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 */
26
27
28#ifdef CONFIG_TREE_PREEMPT_RCU
29
30struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
31DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
32
33/*
34 * Tell them what RCU they are running.
35 */
Paul E. McKenney0e0fc1c2009-11-11 11:28:06 -080036static void __init rcu_bootup_announce(void)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070037{
38 printk(KERN_INFO
39 "Experimental preemptable hierarchical RCU implementation.\n");
40}
41
42/*
43 * Return the number of RCU-preempt batches processed thus far
44 * for debug and statistics.
45 */
46long rcu_batches_completed_preempt(void)
47{
48 return rcu_preempt_state.completed;
49}
50EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
51
52/*
53 * Return the number of RCU batches processed thus far for debug & stats.
54 */
55long rcu_batches_completed(void)
56{
57 return rcu_batches_completed_preempt();
58}
59EXPORT_SYMBOL_GPL(rcu_batches_completed);
60
61/*
62 * Record a preemptable-RCU quiescent state for the specified CPU. Note
63 * that this just means that the task currently running on the CPU is
64 * not in a quiescent state. There might be any number of tasks blocked
65 * while in an RCU read-side critical section.
66 */
Paul E. McKenneyc3422be2009-09-13 09:15:10 -070067static void rcu_preempt_qs(int cpu)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070068{
69 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
Paul E. McKenneyc64ac3c2009-11-10 13:37:22 -080070 rdp->passed_quiesc_completed = rdp->gpnum - 1;
Paul E. McKenneyc3422be2009-09-13 09:15:10 -070071 barrier();
72 rdp->passed_quiesc = 1;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070073}
74
75/*
Paul E. McKenneyc3422be2009-09-13 09:15:10 -070076 * We have entered the scheduler, and the current task might soon be
77 * context-switched away from. If this task is in an RCU read-side
78 * critical section, we will no longer be able to rely on the CPU to
79 * record that fact, so we enqueue the task on the appropriate entry
80 * of the blocked_tasks[] array. The task will dequeue itself when
81 * it exits the outermost enclosing RCU read-side critical section.
82 * Therefore, the current grace period cannot be permitted to complete
83 * until the blocked_tasks[] entry indexed by the low-order bit of
84 * rnp->gpnum empties.
85 *
86 * Caller must disable preemption.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070087 */
Paul E. McKenneyc3422be2009-09-13 09:15:10 -070088static void rcu_preempt_note_context_switch(int cpu)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070089{
90 struct task_struct *t = current;
Paul E. McKenneyc3422be2009-09-13 09:15:10 -070091 unsigned long flags;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -070092 int phase;
93 struct rcu_data *rdp;
94 struct rcu_node *rnp;
95
96 if (t->rcu_read_lock_nesting &&
97 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
98
99 /* Possibly blocking in an RCU read-side critical section. */
100 rdp = rcu_preempt_state.rda[cpu];
101 rnp = rdp->mynode;
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700102 spin_lock_irqsave(&rnp->lock, flags);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700103 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
Paul E. McKenney86848962009-08-27 15:00:12 -0700104 t->rcu_blocked_node = rnp;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700105
106 /*
107 * If this CPU has already checked in, then this task
108 * will hold up the next grace period rather than the
109 * current grace period. Queue the task accordingly.
110 * If the task is queued for the current grace period
111 * (i.e., this CPU has not yet passed through a quiescent
112 * state for the current grace period), then as long
113 * as that task remains queued, the current grace period
114 * cannot end.
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700115 *
116 * But first, note that the current CPU must still be
117 * on line!
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700118 */
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700119 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
Paul E. McKenneye7d88422009-09-18 09:50:18 -0700120 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
121 phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700122 list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]);
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700123 spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700124 }
125
126 /*
127 * Either we were not in an RCU read-side critical section to
128 * begin with, or we have now recorded that critical section
129 * globally. Either way, we can now note a quiescent state
130 * for this CPU. Again, if we were in an RCU read-side critical
131 * section, and if that critical section was blocking the current
132 * grace period, then the fact that the task has been enqueued
133 * means that we continue to block the current grace period.
134 */
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700135 rcu_preempt_qs(cpu);
Paul E. McKenneye7d88422009-09-18 09:50:18 -0700136 local_irq_save(flags);
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700137 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
Paul E. McKenneye7d88422009-09-18 09:50:18 -0700138 local_irq_restore(flags);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700139}
140
141/*
142 * Tree-preemptable RCU implementation for rcu_read_lock().
143 * Just increment ->rcu_read_lock_nesting, shared state will be updated
144 * if we block.
145 */
146void __rcu_read_lock(void)
147{
148 ACCESS_ONCE(current->rcu_read_lock_nesting)++;
149 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
150}
151EXPORT_SYMBOL_GPL(__rcu_read_lock);
152
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700153/*
154 * Check for preempted RCU readers blocking the current grace period
155 * for the specified rcu_node structure. If the caller needs a reliable
156 * answer, it must hold the rcu_node's ->lock.
157 */
158static int rcu_preempted_readers(struct rcu_node *rnp)
159{
160 return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
161}
162
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800163/*
164 * Record a quiescent state for all tasks that were previously queued
165 * on the specified rcu_node structure and that were blocking the current
166 * RCU grace period. The caller must hold the specified rnp->lock with
167 * irqs disabled, and this lock is released upon return, but irqs remain
168 * disabled.
169 */
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800170static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800171 __releases(rnp->lock)
172{
173 unsigned long mask;
174 struct rcu_node *rnp_p;
175
176 if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) {
177 spin_unlock_irqrestore(&rnp->lock, flags);
178 return; /* Still need more quiescent states! */
179 }
180
181 rnp_p = rnp->parent;
182 if (rnp_p == NULL) {
183 /*
184 * Either there is only one rcu_node in the tree,
185 * or tasks were kicked up to root rcu_node due to
186 * CPUs going offline.
187 */
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800188 rcu_report_qs_rsp(&rcu_preempt_state, flags);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800189 return;
190 }
191
192 /* Report up the rest of the hierarchy. */
193 mask = rnp->grpmask;
194 spin_unlock(&rnp->lock); /* irqs remain disabled. */
195 spin_lock(&rnp_p->lock); /* irqs already disabled. */
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800196 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800197}
198
199/*
200 * Handle special cases during rcu_read_unlock(), such as needing to
201 * notify RCU core processing or task having blocked during the RCU
202 * read-side critical section.
203 */
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700204static void rcu_read_unlock_special(struct task_struct *t)
205{
206 int empty;
207 unsigned long flags;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700208 struct rcu_node *rnp;
209 int special;
210
211 /* NMI handlers cannot block and cannot safely manipulate state. */
212 if (in_nmi())
213 return;
214
215 local_irq_save(flags);
216
217 /*
218 * If RCU core is waiting for this CPU to exit critical section,
219 * let it know that we have done so.
220 */
221 special = t->rcu_read_unlock_special;
222 if (special & RCU_READ_UNLOCK_NEED_QS) {
223 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700224 rcu_preempt_qs(smp_processor_id());
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700225 }
226
227 /* Hardware IRQ handlers cannot block. */
228 if (in_irq()) {
229 local_irq_restore(flags);
230 return;
231 }
232
233 /* Clean up if blocked during RCU read-side critical section. */
234 if (special & RCU_READ_UNLOCK_BLOCKED) {
235 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
236
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700237 /*
238 * Remove this task from the list it blocked on. The
239 * task can migrate while we acquire the lock, but at
240 * most one time. So at most two passes through loop.
241 */
242 for (;;) {
Paul E. McKenney86848962009-08-27 15:00:12 -0700243 rnp = t->rcu_blocked_node;
Paul E. McKenneye7d88422009-09-18 09:50:18 -0700244 spin_lock(&rnp->lock); /* irqs already disabled. */
Paul E. McKenney86848962009-08-27 15:00:12 -0700245 if (rnp == t->rcu_blocked_node)
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700246 break;
Paul E. McKenneye7d88422009-09-18 09:50:18 -0700247 spin_unlock(&rnp->lock); /* irqs remain disabled. */
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700248 }
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700249 empty = !rcu_preempted_readers(rnp);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700250 list_del_init(&t->rcu_node_entry);
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700251 t->rcu_blocked_node = NULL;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700252
253 /*
254 * If this was the last task on the current list, and if
255 * we aren't waiting on any CPUs, report the quiescent state.
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800256 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700257 */
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800258 if (empty)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700259 spin_unlock_irqrestore(&rnp->lock, flags);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800260 else
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800261 rcu_report_unblock_qs_rnp(rnp, flags);
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800262 } else {
263 local_irq_restore(flags);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700264 }
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700265}
266
267/*
268 * Tree-preemptable RCU implementation for rcu_read_unlock().
269 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
270 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
271 * invoke rcu_read_unlock_special() to clean up after a context switch
272 * in an RCU read-side critical section and other special cases.
273 */
274void __rcu_read_unlock(void)
275{
276 struct task_struct *t = current;
277
278 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
279 if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
280 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
281 rcu_read_unlock_special(t);
282}
283EXPORT_SYMBOL_GPL(__rcu_read_unlock);
284
285#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
286
287/*
288 * Scan the current list of tasks blocked within RCU read-side critical
289 * sections, printing out the tid of each.
290 */
291static void rcu_print_task_stall(struct rcu_node *rnp)
292{
293 unsigned long flags;
294 struct list_head *lp;
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700295 int phase;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700296 struct task_struct *t;
297
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700298 if (rcu_preempted_readers(rnp)) {
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700299 spin_lock_irqsave(&rnp->lock, flags);
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700300 phase = rnp->gpnum & 0x1;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700301 lp = &rnp->blocked_tasks[phase];
302 list_for_each_entry(t, lp, rcu_node_entry)
303 printk(" P%d", t->pid);
304 spin_unlock_irqrestore(&rnp->lock, flags);
305 }
306}
307
308#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
309
310/*
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700311 * Check that the list of blocked tasks for the newly completed grace
312 * period is in fact empty. It is a serious bug to complete a grace
313 * period that still has RCU readers blocked! This function must be
314 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
315 * must be held by the caller.
316 */
317static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
318{
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700319 WARN_ON_ONCE(rcu_preempted_readers(rnp));
Paul E. McKenney28ecd582009-09-18 09:50:17 -0700320 WARN_ON_ONCE(rnp->qsmask);
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700321}
322
Paul E. McKenney33f76142009-08-24 09:42:01 -0700323#ifdef CONFIG_HOTPLUG_CPU
324
325/*
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700326 * Handle tasklist migration for case in which all CPUs covered by the
327 * specified rcu_node have gone offline. Move them up to the root
328 * rcu_node. The reason for not just moving them to the immediate
329 * parent is to remove the need for rcu_read_unlock_special() to
330 * make more than two attempts to acquire the target rcu_node's lock.
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800331 * Returns true if there were tasks blocking the current RCU grace
332 * period.
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700333 *
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700334 * Returns 1 if there was previously a task blocking the current grace
335 * period on the specified rcu_node structure.
336 *
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700337 * The caller must hold rnp->lock with irqs disabled.
338 */
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700339static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
340 struct rcu_node *rnp,
341 struct rcu_data *rdp)
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700342{
343 int i;
344 struct list_head *lp;
345 struct list_head *lp_root;
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800346 int retval;
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700347 struct rcu_node *rnp_root = rcu_get_root(rsp);
348 struct task_struct *tp;
349
Paul E. McKenney86848962009-08-27 15:00:12 -0700350 if (rnp == rnp_root) {
351 WARN_ONCE(1, "Last CPU thought to be offlined?");
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700352 return 0; /* Shouldn't happen: at least one CPU online. */
Paul E. McKenney86848962009-08-27 15:00:12 -0700353 }
Paul E. McKenney28ecd582009-09-18 09:50:17 -0700354 WARN_ON_ONCE(rnp != rdp->mynode &&
355 (!list_empty(&rnp->blocked_tasks[0]) ||
356 !list_empty(&rnp->blocked_tasks[1])));
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700357
358 /*
359 * Move tasks up to root rcu_node. Rely on the fact that the
360 * root rcu_node can be at most one ahead of the rest of the
361 * rcu_nodes in terms of gp_num value. This fact allows us to
362 * move the blocked_tasks[] array directly, element by element.
363 */
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800364 retval = rcu_preempted_readers(rnp);
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700365 for (i = 0; i < 2; i++) {
366 lp = &rnp->blocked_tasks[i];
367 lp_root = &rnp_root->blocked_tasks[i];
368 while (!list_empty(lp)) {
369 tp = list_entry(lp->next, typeof(*tp), rcu_node_entry);
370 spin_lock(&rnp_root->lock); /* irqs already disabled */
371 list_del(&tp->rcu_node_entry);
372 tp->rcu_blocked_node = rnp_root;
373 list_add(&tp->rcu_node_entry, lp_root);
374 spin_unlock(&rnp_root->lock); /* irqs remain disabled */
375 }
376 }
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700377 return retval;
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700378}
379
380/*
Paul E. McKenney33f76142009-08-24 09:42:01 -0700381 * Do CPU-offline processing for preemptable RCU.
382 */
383static void rcu_preempt_offline_cpu(int cpu)
384{
385 __rcu_offline_cpu(cpu, &rcu_preempt_state);
386}
387
388#endif /* #ifdef CONFIG_HOTPLUG_CPU */
389
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700390/*
391 * Check for a quiescent state from the current CPU. When a task blocks,
392 * the task is recorded in the corresponding CPU's rcu_node structure,
393 * which is checked elsewhere.
394 *
395 * Caller must disable hard irqs.
396 */
397static void rcu_preempt_check_callbacks(int cpu)
398{
399 struct task_struct *t = current;
400
401 if (t->rcu_read_lock_nesting == 0) {
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700402 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
403 rcu_preempt_qs(cpu);
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700404 return;
405 }
Paul E. McKenneya71fca52009-09-18 10:28:19 -0700406 if (per_cpu(rcu_preempt_data, cpu).qs_pending)
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700407 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700408}
409
410/*
411 * Process callbacks for preemptable RCU.
412 */
413static void rcu_preempt_process_callbacks(void)
414{
415 __rcu_process_callbacks(&rcu_preempt_state,
416 &__get_cpu_var(rcu_preempt_data));
417}
418
419/*
420 * Queue a preemptable-RCU callback for invocation after a grace period.
421 */
422void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
423{
424 __call_rcu(head, func, &rcu_preempt_state);
425}
426EXPORT_SYMBOL_GPL(call_rcu);
427
Paul E. McKenney6ebb2372009-11-22 08:53:50 -0800428/**
429 * synchronize_rcu - wait until a grace period has elapsed.
430 *
431 * Control will return to the caller some time after a full grace
432 * period has elapsed, in other words after all currently executing RCU
433 * read-side critical sections have completed. RCU read-side critical
434 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
435 * and may be nested.
436 */
437void synchronize_rcu(void)
438{
439 struct rcu_synchronize rcu;
440
441 if (!rcu_scheduler_active)
442 return;
443
444 init_completion(&rcu.completion);
445 /* Will wake me after RCU finished. */
446 call_rcu(&rcu.head, wakeme_after_rcu);
447 /* Wait for it. */
448 wait_for_completion(&rcu.completion);
449}
450EXPORT_SYMBOL_GPL(synchronize_rcu);
451
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700452/*
Paul E. McKenney019129d52009-10-14 10:15:56 -0700453 * Wait for an rcu-preempt grace period. We are supposed to expedite the
454 * grace period, but this is the crude slow compatability hack, so just
455 * invoke synchronize_rcu().
456 */
457void synchronize_rcu_expedited(void)
458{
459 synchronize_rcu();
460}
461EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
462
463/*
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700464 * Check to see if there is any immediate preemptable-RCU-related work
465 * to be done.
466 */
467static int rcu_preempt_pending(int cpu)
468{
469 return __rcu_pending(&rcu_preempt_state,
470 &per_cpu(rcu_preempt_data, cpu));
471}
472
473/*
474 * Does preemptable RCU need the CPU to stay out of dynticks mode?
475 */
476static int rcu_preempt_needs_cpu(int cpu)
477{
478 return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
479}
480
Paul E. McKenneye74f4c42009-10-06 21:48:17 -0700481/**
482 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
483 */
484void rcu_barrier(void)
485{
486 _rcu_barrier(&rcu_preempt_state, call_rcu);
487}
488EXPORT_SYMBOL_GPL(rcu_barrier);
489
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700490/*
491 * Initialize preemptable RCU's per-CPU data.
492 */
493static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
494{
495 rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
496}
497
498/*
Paul E. McKenneye74f4c42009-10-06 21:48:17 -0700499 * Move preemptable RCU's callbacks to ->orphan_cbs_list.
500 */
501static void rcu_preempt_send_cbs_to_orphanage(void)
502{
503 rcu_send_cbs_to_orphanage(&rcu_preempt_state);
504}
505
506/*
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700507 * Initialize preemptable RCU's state structures.
508 */
509static void __init __rcu_init_preempt(void)
510{
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700511 RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
512}
513
514/*
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700515 * Check for a task exiting while in a preemptable-RCU read-side
516 * critical section, clean up if so. No need to issue warnings,
517 * as debug_check_no_locks_held() already does this if lockdep
518 * is enabled.
519 */
520void exit_rcu(void)
521{
522 struct task_struct *t = current;
523
524 if (t->rcu_read_lock_nesting == 0)
525 return;
526 t->rcu_read_lock_nesting = 1;
527 rcu_read_unlock();
528}
529
530#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
531
532/*
533 * Tell them what RCU they are running.
534 */
Paul E. McKenney0e0fc1c2009-11-11 11:28:06 -0800535static void __init rcu_bootup_announce(void)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700536{
537 printk(KERN_INFO "Hierarchical RCU implementation.\n");
538}
539
540/*
541 * Return the number of RCU batches processed thus far for debug & stats.
542 */
543long rcu_batches_completed(void)
544{
545 return rcu_batches_completed_sched();
546}
547EXPORT_SYMBOL_GPL(rcu_batches_completed);
548
549/*
550 * Because preemptable RCU does not exist, we never have to check for
551 * CPUs being in quiescent states.
552 */
Paul E. McKenneyc3422be2009-09-13 09:15:10 -0700553static void rcu_preempt_note_context_switch(int cpu)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700554{
555}
556
Paul E. McKenneyfc2219d2009-09-23 09:50:41 -0700557/*
558 * Because preemptable RCU does not exist, there are never any preempted
559 * RCU readers.
560 */
561static int rcu_preempted_readers(struct rcu_node *rnp)
562{
563 return 0;
564}
565
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800566#ifdef CONFIG_HOTPLUG_CPU
567
568/* Because preemptible RCU does not exist, no quieting of tasks. */
Paul E. McKenneyd3f6bad2009-12-02 12:10:13 -0800569static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
Paul E. McKenneyb668c9c2009-11-22 08:53:48 -0800570{
571 spin_unlock_irqrestore(&rnp->lock, flags);
572}
573
574#endif /* #ifdef CONFIG_HOTPLUG_CPU */
575
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700576#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
577
578/*
579 * Because preemptable RCU does not exist, we never have to check for
580 * tasks blocked within RCU read-side critical sections.
581 */
582static void rcu_print_task_stall(struct rcu_node *rnp)
583{
584}
585
586#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
587
588/*
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700589 * Because there is no preemptable RCU, there can be no readers blocked,
Paul E. McKenney49e29122009-09-18 09:50:19 -0700590 * so there is no need to check for blocked tasks. So check only for
591 * bogus qsmask values.
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700592 */
593static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
594{
Paul E. McKenney49e29122009-09-18 09:50:19 -0700595 WARN_ON_ONCE(rnp->qsmask);
Paul E. McKenneyb0e165c2009-09-13 09:15:09 -0700596}
597
Paul E. McKenney33f76142009-08-24 09:42:01 -0700598#ifdef CONFIG_HOTPLUG_CPU
599
600/*
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700601 * Because preemptable RCU does not exist, it never needs to migrate
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700602 * tasks that were blocked within RCU read-side critical sections, and
603 * such non-existent tasks cannot possibly have been blocking the current
604 * grace period.
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700605 */
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700606static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
607 struct rcu_node *rnp,
608 struct rcu_data *rdp)
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700609{
Paul E. McKenney237c80c2009-10-15 09:26:14 -0700610 return 0;
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -0700611}
612
613/*
Paul E. McKenney33f76142009-08-24 09:42:01 -0700614 * Because preemptable RCU does not exist, it never needs CPU-offline
615 * processing.
616 */
617static void rcu_preempt_offline_cpu(int cpu)
618{
619}
620
621#endif /* #ifdef CONFIG_HOTPLUG_CPU */
622
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700623/*
624 * Because preemptable RCU does not exist, it never has any callbacks
625 * to check.
626 */
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700627static void rcu_preempt_check_callbacks(int cpu)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700628{
629}
630
631/*
632 * Because preemptable RCU does not exist, it never has any callbacks
633 * to process.
634 */
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700635static void rcu_preempt_process_callbacks(void)
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700636{
637}
638
639/*
640 * In classic RCU, call_rcu() is just call_rcu_sched().
641 */
642void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
643{
644 call_rcu_sched(head, func);
645}
646EXPORT_SYMBOL_GPL(call_rcu);
647
648/*
Paul E. McKenney019129d52009-10-14 10:15:56 -0700649 * Wait for an rcu-preempt grace period, but make it happen quickly.
650 * But because preemptable RCU does not exist, map to rcu-sched.
651 */
652void synchronize_rcu_expedited(void)
653{
654 synchronize_sched_expedited();
655}
656EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
657
658/*
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700659 * Because preemptable RCU does not exist, it never has any work to do.
660 */
661static int rcu_preempt_pending(int cpu)
662{
663 return 0;
664}
665
666/*
667 * Because preemptable RCU does not exist, it never needs any CPU.
668 */
669static int rcu_preempt_needs_cpu(int cpu)
670{
671 return 0;
672}
673
674/*
Paul E. McKenneye74f4c42009-10-06 21:48:17 -0700675 * Because preemptable RCU does not exist, rcu_barrier() is just
676 * another name for rcu_barrier_sched().
677 */
678void rcu_barrier(void)
679{
680 rcu_barrier_sched();
681}
682EXPORT_SYMBOL_GPL(rcu_barrier);
683
684/*
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700685 * Because preemptable RCU does not exist, there is no per-CPU
686 * data to initialize.
687 */
688static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
689{
690}
691
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700692/*
Paul E. McKenneye74f4c42009-10-06 21:48:17 -0700693 * Because there is no preemptable RCU, there are no callbacks to move.
694 */
695static void rcu_preempt_send_cbs_to_orphanage(void)
696{
697}
698
699/*
Paul E. McKenney1eba8f82009-09-23 09:50:42 -0700700 * Because preemptable RCU does not exist, it need not be initialized.
701 */
702static void __init __rcu_init_preempt(void)
703{
704}
705
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700706#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */