blob: 9b9bdf666fb5027a061ca50cd70c842266bb92c5 [file] [log] [blame]
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -07001/*
2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
Ingo Molnar4ce5b902009-10-26 07:55:55 +010023 * Documentation/RCU
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070024 */
Ingo Molnar4ce5b902009-10-26 07:55:55 +010025#include <linux/completion.h>
26#include <linux/interrupt.h>
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070027#include <linux/notifier.h>
Ingo Molnar4ce5b902009-10-26 07:55:55 +010028#include <linux/rcupdate.h>
29#include <linux/kernel.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040030#include <linux/export.h>
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070031#include <linux/mutex.h>
Ingo Molnar4ce5b902009-10-26 07:55:55 +010032#include <linux/sched.h>
33#include <linux/types.h>
34#include <linux/init.h>
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070035#include <linux/time.h>
Ingo Molnar4ce5b902009-10-26 07:55:55 +010036#include <linux/cpu.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070037#include <linux/prefetch.h>
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070038
Paul E. McKenney29c00b42011-06-17 15:53:19 -070039#ifdef CONFIG_RCU_TRACE
Paul E. McKenney29c00b42011-06-17 15:53:19 -070040#include <trace/events/rcu.h>
Paul E. McKenney29c00b42011-06-17 15:53:19 -070041#endif /* #else #ifdef CONFIG_RCU_TRACE */
42
43#include "rcu.h"
44
Paul E. McKenneya57eb942010-06-29 16:49:16 -070045/* Forward declarations for rcutiny_plugin.h. */
Paul E. McKenney24278d12010-09-27 17:25:23 -070046struct rcu_ctrlblk;
Paul E. McKenney965a0022011-06-18 09:55:39 -070047static void invoke_rcu_callbacks(void);
48static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
49static void rcu_process_callbacks(struct softirq_action *unused);
Paul E. McKenneya57eb942010-06-29 16:49:16 -070050static void __call_rcu(struct rcu_head *head,
51 void (*func)(struct rcu_head *rcu),
52 struct rcu_ctrlblk *rcp);
53
54#include "rcutiny_plugin.h"
55
Paul E. McKenney4145fa72011-10-31 15:01:54 -070056static long long rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070057
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070058/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
Paul E. McKenney4145fa72011-10-31 15:01:54 -070059static void rcu_idle_enter_common(long long oldval)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070060{
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070061 if (rcu_dynticks_nesting) {
Paul E. McKenney4145fa72011-10-31 15:01:54 -070062 RCU_TRACE(trace_rcu_dyntick("--=",
63 oldval, rcu_dynticks_nesting));
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070064 return;
65 }
Paul E. McKenney4145fa72011-10-31 15:01:54 -070066 RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting));
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070067 if (!idle_cpu(smp_processor_id())) {
68 WARN_ON_ONCE(1); /* must be idle task! */
69 RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
Paul E. McKenney4145fa72011-10-31 15:01:54 -070070 oldval, rcu_dynticks_nesting));
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070071 ftrace_dump(DUMP_ALL);
72 }
73 rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070074}
75
76/*
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070077 * Enter idle, which is an extended quiescent state if we have fully
78 * entered that mode (i.e., if the new value of dynticks_nesting is zero).
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070079 */
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070080void rcu_idle_enter(void)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070081{
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070082 unsigned long flags;
Paul E. McKenney4145fa72011-10-31 15:01:54 -070083 long long oldval;
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070084
85 local_irq_save(flags);
Paul E. McKenney4145fa72011-10-31 15:01:54 -070086 oldval = rcu_dynticks_nesting;
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070087 rcu_dynticks_nesting = 0;
Paul E. McKenney4145fa72011-10-31 15:01:54 -070088 rcu_idle_enter_common(oldval);
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070089 local_irq_restore(flags);
90}
91
92/*
93 * Exit an interrupt handler towards idle.
94 */
95void rcu_irq_exit(void)
96{
97 unsigned long flags;
Paul E. McKenney4145fa72011-10-31 15:01:54 -070098 long long oldval;
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070099
100 local_irq_save(flags);
Paul E. McKenney4145fa72011-10-31 15:01:54 -0700101 oldval = rcu_dynticks_nesting;
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700102 rcu_dynticks_nesting--;
103 WARN_ON_ONCE(rcu_dynticks_nesting < 0);
Paul E. McKenney4145fa72011-10-31 15:01:54 -0700104 rcu_idle_enter_common(oldval);
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700105 local_irq_restore(flags);
106}
107
108/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */
109static void rcu_idle_exit_common(long long oldval)
110{
111 if (oldval) {
Paul E. McKenney4145fa72011-10-31 15:01:54 -0700112 RCU_TRACE(trace_rcu_dyntick("++=",
113 oldval, rcu_dynticks_nesting));
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700114 return;
115 }
Paul E. McKenney4145fa72011-10-31 15:01:54 -0700116 RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700117 if (!idle_cpu(smp_processor_id())) {
118 WARN_ON_ONCE(1); /* must be idle task! */
119 RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
Paul E. McKenney4145fa72011-10-31 15:01:54 -0700120 oldval, rcu_dynticks_nesting));
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700121 ftrace_dump(DUMP_ALL);
122 }
123}
124
125/*
126 * Exit idle, so that we are no longer in an extended quiescent state.
127 */
128void rcu_idle_exit(void)
129{
130 unsigned long flags;
131 long long oldval;
132
133 local_irq_save(flags);
134 oldval = rcu_dynticks_nesting;
135 WARN_ON_ONCE(oldval != 0);
Paul E. McKenney4145fa72011-10-31 15:01:54 -0700136 rcu_dynticks_nesting = DYNTICK_TASK_NESTING;
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700137 rcu_idle_exit_common(oldval);
138 local_irq_restore(flags);
139}
140
141/*
142 * Enter an interrupt handler, moving away from idle.
143 */
144void rcu_irq_enter(void)
145{
146 unsigned long flags;
147 long long oldval;
148
149 local_irq_save(flags);
150 oldval = rcu_dynticks_nesting;
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700151 rcu_dynticks_nesting++;
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700152 WARN_ON_ONCE(rcu_dynticks_nesting == 0);
153 rcu_idle_exit_common(oldval);
154 local_irq_restore(flags);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700155}
156
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700157#ifdef CONFIG_PROVE_RCU
158
159/*
160 * Test whether RCU thinks that the current CPU is idle.
161 */
162int rcu_is_cpu_idle(void)
163{
164 return !rcu_dynticks_nesting;
165}
Frederic Weisbeckere6b80a32011-10-07 16:25:18 -0700166EXPORT_SYMBOL(rcu_is_cpu_idle);
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700167
168#endif /* #ifdef CONFIG_PROVE_RCU */
169
170/*
171 * Test whether the current CPU was interrupted from idle. Nested
172 * interrupts don't count, we must be running at the first interrupt
173 * level.
174 */
175int rcu_is_cpu_rrupt_from_idle(void)
176{
177 return rcu_dynticks_nesting <= 0;
178}
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700179
180/*
Eric Dumazetb554d7d2011-04-28 07:23:45 +0200181 * Helper function for rcu_sched_qs() and rcu_bh_qs().
182 * Also irqs are disabled to avoid confusion due to interrupt handlers
Ingo Molnar4ce5b902009-10-26 07:55:55 +0100183 * invoking call_rcu().
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700184 */
185static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
186{
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700187 if (rcp->rcucblist != NULL &&
188 rcp->donetail != rcp->curtail) {
189 rcp->donetail = rcp->curtail;
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700190 return 1;
191 }
Ingo Molnar4ce5b902009-10-26 07:55:55 +0100192
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700193 return 0;
194}
195
196/*
197 * Record an rcu quiescent state. And an rcu_bh quiescent state while we
198 * are at it, given that any rcu quiescent state is also an rcu_bh
199 * quiescent state. Use "+" instead of "||" to defeat short circuiting.
200 */
201void rcu_sched_qs(int cpu)
202{
Eric Dumazetb554d7d2011-04-28 07:23:45 +0200203 unsigned long flags;
204
205 local_irq_save(flags);
Paul E. McKenney99652b52010-03-30 15:50:01 -0700206 if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
207 rcu_qsctr_help(&rcu_bh_ctrlblk))
Paul E. McKenney965a0022011-06-18 09:55:39 -0700208 invoke_rcu_callbacks();
Eric Dumazetb554d7d2011-04-28 07:23:45 +0200209 local_irq_restore(flags);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700210}
211
212/*
213 * Record an rcu_bh quiescent state.
214 */
215void rcu_bh_qs(int cpu)
216{
Eric Dumazetb554d7d2011-04-28 07:23:45 +0200217 unsigned long flags;
218
219 local_irq_save(flags);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700220 if (rcu_qsctr_help(&rcu_bh_ctrlblk))
Paul E. McKenney965a0022011-06-18 09:55:39 -0700221 invoke_rcu_callbacks();
Eric Dumazetb554d7d2011-04-28 07:23:45 +0200222 local_irq_restore(flags);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700223}
224
225/*
226 * Check to see if the scheduling-clock interrupt came from an extended
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700227 * quiescent state, and, if so, tell RCU about it. This function must
228 * be called from hardirq context. It is normally called from the
229 * scheduling-clock interrupt.
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700230 */
231void rcu_check_callbacks(int cpu, int user)
232{
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700233 if (user || rcu_is_cpu_rrupt_from_idle())
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700234 rcu_sched_qs(cpu);
235 else if (!in_softirq())
236 rcu_bh_qs(cpu);
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700237 rcu_preempt_check_callbacks();
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700238}
239
240/*
Paul E. McKenneyb2c07102010-09-09 13:40:39 -0700241 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
242 * whose grace period has elapsed.
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700243 */
Paul E. McKenney965a0022011-06-18 09:55:39 -0700244static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700245{
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700246 char *rn = NULL;
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700247 struct rcu_head *next, *list;
Ingo Molnar4ce5b902009-10-26 07:55:55 +0100248 unsigned long flags;
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700249 RCU_TRACE(int cb_count = 0);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700250
251 /* If no RCU callbacks ready to invoke, just return. */
Paul E. McKenney29c00b42011-06-17 15:53:19 -0700252 if (&rcp->rcucblist == rcp->donetail) {
Paul E. McKenney72fe7012011-06-21 01:14:54 -0700253 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1));
254 RCU_TRACE(trace_rcu_batch_end(rcp->name, 0));
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700255 return;
Paul E. McKenney29c00b42011-06-17 15:53:19 -0700256 }
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700257
258 /* Move the ready-to-invoke callbacks to a local list. */
259 local_irq_save(flags);
Paul E. McKenney72fe7012011-06-21 01:14:54 -0700260 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1));
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700261 list = rcp->rcucblist;
262 rcp->rcucblist = *rcp->donetail;
263 *rcp->donetail = NULL;
264 if (rcp->curtail == rcp->donetail)
265 rcp->curtail = &rcp->rcucblist;
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700266 rcu_preempt_remove_callbacks(rcp);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700267 rcp->donetail = &rcp->rcucblist;
268 local_irq_restore(flags);
269
270 /* Invoke the callbacks on the local list. */
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700271 RCU_TRACE(rn = rcp->name);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700272 while (list) {
273 next = list->next;
274 prefetch(next);
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400275 debug_rcu_head_unqueue(list);
Paul E. McKenneyb2c07102010-09-09 13:40:39 -0700276 local_bh_disable();
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700277 __rcu_reclaim(rn, list);
Paul E. McKenneyb2c07102010-09-09 13:40:39 -0700278 local_bh_enable();
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700279 list = next;
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700280 RCU_TRACE(cb_count++);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700281 }
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700282 RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
Paul E. McKenney72fe7012011-06-21 01:14:54 -0700283 RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count));
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700284}
285
Paul E. McKenney965a0022011-06-18 09:55:39 -0700286static void rcu_process_callbacks(struct softirq_action *unused)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700287{
Paul E. McKenney965a0022011-06-18 09:55:39 -0700288 __rcu_process_callbacks(&rcu_sched_ctrlblk);
289 __rcu_process_callbacks(&rcu_bh_ctrlblk);
290 rcu_preempt_process_callbacks();
Paul E. McKenneyb2c07102010-09-09 13:40:39 -0700291}
292
293/*
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700294 * Wait for a grace period to elapse. But it is illegal to invoke
295 * synchronize_sched() from within an RCU read-side critical section.
296 * Therefore, any legal call to synchronize_sched() is a quiescent
297 * state, and so on a UP system, synchronize_sched() need do nothing.
298 * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the
299 * benefits of doing might_sleep() to reduce latency.)
300 *
301 * Cool, huh? (Due to Josh Triplett.)
302 *
Paul E. McKenneyda848c42010-03-30 15:46:01 -0700303 * But we want to make this a static inline later. The cond_resched()
304 * currently makes this problematic.
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700305 */
306void synchronize_sched(void)
307{
308 cond_resched();
309}
310EXPORT_SYMBOL_GPL(synchronize_sched);
311
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700312/*
313 * Helper function for call_rcu() and call_rcu_bh().
314 */
315static void __call_rcu(struct rcu_head *head,
316 void (*func)(struct rcu_head *rcu),
317 struct rcu_ctrlblk *rcp)
318{
319 unsigned long flags;
320
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400321 debug_rcu_head_queue(head);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700322 head->func = func;
323 head->next = NULL;
Ingo Molnar4ce5b902009-10-26 07:55:55 +0100324
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700325 local_irq_save(flags);
326 *rcp->curtail = head;
327 rcp->curtail = &head->next;
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700328 RCU_TRACE(rcp->qlen++);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700329 local_irq_restore(flags);
330}
331
332/*
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700333 * Post an RCU callback to be invoked after the end of an RCU-sched grace
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700334 * period. But since we have but one CPU, that would be after any
335 * quiescent state.
336 */
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700337void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700338{
Paul E. McKenney99652b52010-03-30 15:50:01 -0700339 __call_rcu(head, func, &rcu_sched_ctrlblk);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700340}
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700341EXPORT_SYMBOL_GPL(call_rcu_sched);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700342
343/*
344 * Post an RCU bottom-half callback to be invoked after any subsequent
345 * quiescent state.
346 */
Ingo Molnar4ce5b902009-10-26 07:55:55 +0100347void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700348{
349 __call_rcu(head, func, &rcu_bh_ctrlblk);
350}
351EXPORT_SYMBOL_GPL(call_rcu_bh);