Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 17 | * |
| 18 | * Copyright IBM Corporation, 2008 |
| 19 | * |
| 20 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
| 21 | * |
| 22 | * For detailed explanation of Read-Copy Update mechanism see - |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 23 | * Documentation/RCU |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 24 | */ |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 25 | #include <linux/completion.h> |
| 26 | #include <linux/interrupt.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 27 | #include <linux/notifier.h> |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 28 | #include <linux/rcupdate.h> |
| 29 | #include <linux/kernel.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 30 | #include <linux/export.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 31 | #include <linux/mutex.h> |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 32 | #include <linux/sched.h> |
| 33 | #include <linux/types.h> |
| 34 | #include <linux/init.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 35 | #include <linux/time.h> |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 36 | #include <linux/cpu.h> |
Linus Torvalds | 268bb0c | 2011-05-20 12:50:29 -0700 | [diff] [blame] | 37 | #include <linux/prefetch.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 38 | |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 39 | #ifdef CONFIG_RCU_TRACE |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 40 | #include <trace/events/rcu.h> |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 41 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ |
| 42 | |
| 43 | #include "rcu.h" |
| 44 | |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 45 | /* Forward declarations for rcutiny_plugin.h. */ |
Paul E. McKenney | 24278d1 | 2010-09-27 17:25:23 -0700 | [diff] [blame] | 46 | struct rcu_ctrlblk; |
Paul E. McKenney | 965a002 | 2011-06-18 09:55:39 -0700 | [diff] [blame] | 47 | static void invoke_rcu_callbacks(void); |
| 48 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp); |
| 49 | static void rcu_process_callbacks(struct softirq_action *unused); |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 50 | static void __call_rcu(struct rcu_head *head, |
| 51 | void (*func)(struct rcu_head *rcu), |
| 52 | struct rcu_ctrlblk *rcp); |
| 53 | |
| 54 | #include "rcutiny_plugin.h" |
| 55 | |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame^] | 56 | static long long rcu_dynticks_nesting = DYNTICK_TASK_NESTING; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 57 | |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 58 | /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */ |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame^] | 59 | static void rcu_idle_enter_common(long long oldval) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 60 | { |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 61 | if (rcu_dynticks_nesting) { |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame^] | 62 | RCU_TRACE(trace_rcu_dyntick("--=", |
| 63 | oldval, rcu_dynticks_nesting)); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 64 | return; |
| 65 | } |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame^] | 66 | RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting)); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 67 | if (!idle_cpu(smp_processor_id())) { |
| 68 | WARN_ON_ONCE(1); /* must be idle task! */ |
| 69 | RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task", |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame^] | 70 | oldval, rcu_dynticks_nesting)); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 71 | ftrace_dump(DUMP_ALL); |
| 72 | } |
| 73 | rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 74 | } |
| 75 | |
| 76 | /* |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 77 | * Enter idle, which is an extended quiescent state if we have fully |
| 78 | * entered that mode (i.e., if the new value of dynticks_nesting is zero). |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 79 | */ |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 80 | void rcu_idle_enter(void) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 81 | { |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 82 | unsigned long flags; |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame^] | 83 | long long oldval; |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 84 | |
| 85 | local_irq_save(flags); |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame^] | 86 | oldval = rcu_dynticks_nesting; |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 87 | rcu_dynticks_nesting = 0; |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame^] | 88 | rcu_idle_enter_common(oldval); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 89 | local_irq_restore(flags); |
| 90 | } |
| 91 | |
| 92 | /* |
| 93 | * Exit an interrupt handler towards idle. |
| 94 | */ |
| 95 | void rcu_irq_exit(void) |
| 96 | { |
| 97 | unsigned long flags; |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame^] | 98 | long long oldval; |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 99 | |
| 100 | local_irq_save(flags); |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame^] | 101 | oldval = rcu_dynticks_nesting; |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 102 | rcu_dynticks_nesting--; |
| 103 | WARN_ON_ONCE(rcu_dynticks_nesting < 0); |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame^] | 104 | rcu_idle_enter_common(oldval); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 105 | local_irq_restore(flags); |
| 106 | } |
| 107 | |
| 108 | /* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */ |
| 109 | static void rcu_idle_exit_common(long long oldval) |
| 110 | { |
| 111 | if (oldval) { |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame^] | 112 | RCU_TRACE(trace_rcu_dyntick("++=", |
| 113 | oldval, rcu_dynticks_nesting)); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 114 | return; |
| 115 | } |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame^] | 116 | RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting)); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 117 | if (!idle_cpu(smp_processor_id())) { |
| 118 | WARN_ON_ONCE(1); /* must be idle task! */ |
| 119 | RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task", |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame^] | 120 | oldval, rcu_dynticks_nesting)); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 121 | ftrace_dump(DUMP_ALL); |
| 122 | } |
| 123 | } |
| 124 | |
| 125 | /* |
| 126 | * Exit idle, so that we are no longer in an extended quiescent state. |
| 127 | */ |
| 128 | void rcu_idle_exit(void) |
| 129 | { |
| 130 | unsigned long flags; |
| 131 | long long oldval; |
| 132 | |
| 133 | local_irq_save(flags); |
| 134 | oldval = rcu_dynticks_nesting; |
| 135 | WARN_ON_ONCE(oldval != 0); |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame^] | 136 | rcu_dynticks_nesting = DYNTICK_TASK_NESTING; |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 137 | rcu_idle_exit_common(oldval); |
| 138 | local_irq_restore(flags); |
| 139 | } |
| 140 | |
| 141 | /* |
| 142 | * Enter an interrupt handler, moving away from idle. |
| 143 | */ |
| 144 | void rcu_irq_enter(void) |
| 145 | { |
| 146 | unsigned long flags; |
| 147 | long long oldval; |
| 148 | |
| 149 | local_irq_save(flags); |
| 150 | oldval = rcu_dynticks_nesting; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 151 | rcu_dynticks_nesting++; |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 152 | WARN_ON_ONCE(rcu_dynticks_nesting == 0); |
| 153 | rcu_idle_exit_common(oldval); |
| 154 | local_irq_restore(flags); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 155 | } |
| 156 | |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 157 | #ifdef CONFIG_PROVE_RCU |
| 158 | |
| 159 | /* |
| 160 | * Test whether RCU thinks that the current CPU is idle. |
| 161 | */ |
| 162 | int rcu_is_cpu_idle(void) |
| 163 | { |
| 164 | return !rcu_dynticks_nesting; |
| 165 | } |
Frederic Weisbecker | e6b80a3 | 2011-10-07 16:25:18 -0700 | [diff] [blame] | 166 | EXPORT_SYMBOL(rcu_is_cpu_idle); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 167 | |
| 168 | #endif /* #ifdef CONFIG_PROVE_RCU */ |
| 169 | |
| 170 | /* |
| 171 | * Test whether the current CPU was interrupted from idle. Nested |
| 172 | * interrupts don't count, we must be running at the first interrupt |
| 173 | * level. |
| 174 | */ |
| 175 | int rcu_is_cpu_rrupt_from_idle(void) |
| 176 | { |
| 177 | return rcu_dynticks_nesting <= 0; |
| 178 | } |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 179 | |
| 180 | /* |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 181 | * Helper function for rcu_sched_qs() and rcu_bh_qs(). |
| 182 | * Also irqs are disabled to avoid confusion due to interrupt handlers |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 183 | * invoking call_rcu(). |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 184 | */ |
| 185 | static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) |
| 186 | { |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 187 | if (rcp->rcucblist != NULL && |
| 188 | rcp->donetail != rcp->curtail) { |
| 189 | rcp->donetail = rcp->curtail; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 190 | return 1; |
| 191 | } |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 192 | |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 193 | return 0; |
| 194 | } |
| 195 | |
| 196 | /* |
| 197 | * Record an rcu quiescent state. And an rcu_bh quiescent state while we |
| 198 | * are at it, given that any rcu quiescent state is also an rcu_bh |
| 199 | * quiescent state. Use "+" instead of "||" to defeat short circuiting. |
| 200 | */ |
| 201 | void rcu_sched_qs(int cpu) |
| 202 | { |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 203 | unsigned long flags; |
| 204 | |
| 205 | local_irq_save(flags); |
Paul E. McKenney | 99652b5 | 2010-03-30 15:50:01 -0700 | [diff] [blame] | 206 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + |
| 207 | rcu_qsctr_help(&rcu_bh_ctrlblk)) |
Paul E. McKenney | 965a002 | 2011-06-18 09:55:39 -0700 | [diff] [blame] | 208 | invoke_rcu_callbacks(); |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 209 | local_irq_restore(flags); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 210 | } |
| 211 | |
| 212 | /* |
| 213 | * Record an rcu_bh quiescent state. |
| 214 | */ |
| 215 | void rcu_bh_qs(int cpu) |
| 216 | { |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 217 | unsigned long flags; |
| 218 | |
| 219 | local_irq_save(flags); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 220 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) |
Paul E. McKenney | 965a002 | 2011-06-18 09:55:39 -0700 | [diff] [blame] | 221 | invoke_rcu_callbacks(); |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 222 | local_irq_restore(flags); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 223 | } |
| 224 | |
| 225 | /* |
| 226 | * Check to see if the scheduling-clock interrupt came from an extended |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 227 | * quiescent state, and, if so, tell RCU about it. This function must |
| 228 | * be called from hardirq context. It is normally called from the |
| 229 | * scheduling-clock interrupt. |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 230 | */ |
| 231 | void rcu_check_callbacks(int cpu, int user) |
| 232 | { |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 233 | if (user || rcu_is_cpu_rrupt_from_idle()) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 234 | rcu_sched_qs(cpu); |
| 235 | else if (!in_softirq()) |
| 236 | rcu_bh_qs(cpu); |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 237 | rcu_preempt_check_callbacks(); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 238 | } |
| 239 | |
| 240 | /* |
Paul E. McKenney | b2c0710 | 2010-09-09 13:40:39 -0700 | [diff] [blame] | 241 | * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure |
| 242 | * whose grace period has elapsed. |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 243 | */ |
Paul E. McKenney | 965a002 | 2011-06-18 09:55:39 -0700 | [diff] [blame] | 244 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 245 | { |
Paul E. McKenney | d4c08f2 | 2011-06-25 06:36:56 -0700 | [diff] [blame] | 246 | char *rn = NULL; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 247 | struct rcu_head *next, *list; |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 248 | unsigned long flags; |
Paul E. McKenney | 9e571a8 | 2010-09-30 21:26:52 -0700 | [diff] [blame] | 249 | RCU_TRACE(int cb_count = 0); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 250 | |
| 251 | /* If no RCU callbacks ready to invoke, just return. */ |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 252 | if (&rcp->rcucblist == rcp->donetail) { |
Paul E. McKenney | 72fe701 | 2011-06-21 01:14:54 -0700 | [diff] [blame] | 253 | RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1)); |
| 254 | RCU_TRACE(trace_rcu_batch_end(rcp->name, 0)); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 255 | return; |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 256 | } |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 257 | |
| 258 | /* Move the ready-to-invoke callbacks to a local list. */ |
| 259 | local_irq_save(flags); |
Paul E. McKenney | 72fe701 | 2011-06-21 01:14:54 -0700 | [diff] [blame] | 260 | RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1)); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 261 | list = rcp->rcucblist; |
| 262 | rcp->rcucblist = *rcp->donetail; |
| 263 | *rcp->donetail = NULL; |
| 264 | if (rcp->curtail == rcp->donetail) |
| 265 | rcp->curtail = &rcp->rcucblist; |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 266 | rcu_preempt_remove_callbacks(rcp); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 267 | rcp->donetail = &rcp->rcucblist; |
| 268 | local_irq_restore(flags); |
| 269 | |
| 270 | /* Invoke the callbacks on the local list. */ |
Paul E. McKenney | d4c08f2 | 2011-06-25 06:36:56 -0700 | [diff] [blame] | 271 | RCU_TRACE(rn = rcp->name); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 272 | while (list) { |
| 273 | next = list->next; |
| 274 | prefetch(next); |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 275 | debug_rcu_head_unqueue(list); |
Paul E. McKenney | b2c0710 | 2010-09-09 13:40:39 -0700 | [diff] [blame] | 276 | local_bh_disable(); |
Paul E. McKenney | d4c08f2 | 2011-06-25 06:36:56 -0700 | [diff] [blame] | 277 | __rcu_reclaim(rn, list); |
Paul E. McKenney | b2c0710 | 2010-09-09 13:40:39 -0700 | [diff] [blame] | 278 | local_bh_enable(); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 279 | list = next; |
Paul E. McKenney | 9e571a8 | 2010-09-30 21:26:52 -0700 | [diff] [blame] | 280 | RCU_TRACE(cb_count++); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 281 | } |
Paul E. McKenney | 9e571a8 | 2010-09-30 21:26:52 -0700 | [diff] [blame] | 282 | RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); |
Paul E. McKenney | 72fe701 | 2011-06-21 01:14:54 -0700 | [diff] [blame] | 283 | RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count)); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 284 | } |
| 285 | |
Paul E. McKenney | 965a002 | 2011-06-18 09:55:39 -0700 | [diff] [blame] | 286 | static void rcu_process_callbacks(struct softirq_action *unused) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 287 | { |
Paul E. McKenney | 965a002 | 2011-06-18 09:55:39 -0700 | [diff] [blame] | 288 | __rcu_process_callbacks(&rcu_sched_ctrlblk); |
| 289 | __rcu_process_callbacks(&rcu_bh_ctrlblk); |
| 290 | rcu_preempt_process_callbacks(); |
Paul E. McKenney | b2c0710 | 2010-09-09 13:40:39 -0700 | [diff] [blame] | 291 | } |
| 292 | |
| 293 | /* |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 294 | * Wait for a grace period to elapse. But it is illegal to invoke |
| 295 | * synchronize_sched() from within an RCU read-side critical section. |
| 296 | * Therefore, any legal call to synchronize_sched() is a quiescent |
| 297 | * state, and so on a UP system, synchronize_sched() need do nothing. |
| 298 | * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the |
| 299 | * benefits of doing might_sleep() to reduce latency.) |
| 300 | * |
| 301 | * Cool, huh? (Due to Josh Triplett.) |
| 302 | * |
Paul E. McKenney | da848c4 | 2010-03-30 15:46:01 -0700 | [diff] [blame] | 303 | * But we want to make this a static inline later. The cond_resched() |
| 304 | * currently makes this problematic. |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 305 | */ |
| 306 | void synchronize_sched(void) |
| 307 | { |
| 308 | cond_resched(); |
| 309 | } |
| 310 | EXPORT_SYMBOL_GPL(synchronize_sched); |
| 311 | |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 312 | /* |
| 313 | * Helper function for call_rcu() and call_rcu_bh(). |
| 314 | */ |
| 315 | static void __call_rcu(struct rcu_head *head, |
| 316 | void (*func)(struct rcu_head *rcu), |
| 317 | struct rcu_ctrlblk *rcp) |
| 318 | { |
| 319 | unsigned long flags; |
| 320 | |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 321 | debug_rcu_head_queue(head); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 322 | head->func = func; |
| 323 | head->next = NULL; |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 324 | |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 325 | local_irq_save(flags); |
| 326 | *rcp->curtail = head; |
| 327 | rcp->curtail = &head->next; |
Paul E. McKenney | 9e571a8 | 2010-09-30 21:26:52 -0700 | [diff] [blame] | 328 | RCU_TRACE(rcp->qlen++); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 329 | local_irq_restore(flags); |
| 330 | } |
| 331 | |
| 332 | /* |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 333 | * Post an RCU callback to be invoked after the end of an RCU-sched grace |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 334 | * period. But since we have but one CPU, that would be after any |
| 335 | * quiescent state. |
| 336 | */ |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 337 | void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 338 | { |
Paul E. McKenney | 99652b5 | 2010-03-30 15:50:01 -0700 | [diff] [blame] | 339 | __call_rcu(head, func, &rcu_sched_ctrlblk); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 340 | } |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 341 | EXPORT_SYMBOL_GPL(call_rcu_sched); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 342 | |
| 343 | /* |
| 344 | * Post an RCU bottom-half callback to be invoked after any subsequent |
| 345 | * quiescent state. |
| 346 | */ |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 347 | void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 348 | { |
| 349 | __call_rcu(head, func, &rcu_bh_ctrlblk); |
| 350 | } |
| 351 | EXPORT_SYMBOL_GPL(call_rcu_bh); |