| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | 
|  | 3 | * Internal non-public definitions that provide either classic | 
|  | 4 | * or preemptable semantics. | 
|  | 5 | * | 
|  | 6 | * This program is free software; you can redistribute it and/or modify | 
|  | 7 | * it under the terms of the GNU General Public License as published by | 
|  | 8 | * the Free Software Foundation; either version 2 of the License, or | 
|  | 9 | * (at your option) any later version. | 
|  | 10 | * | 
|  | 11 | * This program is distributed in the hope that it will be useful, | 
|  | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 14 | * GNU General Public License for more details. | 
|  | 15 | * | 
|  | 16 | * You should have received a copy of the GNU General Public License | 
|  | 17 | * along with this program; if not, write to the Free Software | 
|  | 18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 
|  | 19 | * | 
|  | 20 | * Copyright Red Hat, 2009 | 
|  | 21 | * Copyright IBM Corporation, 2009 | 
|  | 22 | * | 
|  | 23 | * Author: Ingo Molnar <mingo@elte.hu> | 
|  | 24 | *	   Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 
|  | 25 | */ | 
|  | 26 |  | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 27 | #include <linux/delay.h> | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 28 |  | 
| Paul E. McKenney | 26845c2 | 2010-04-13 14:19:23 -0700 | [diff] [blame] | 29 | /* | 
|  | 30 | * Check the RCU kernel configuration parameters and print informative | 
|  | 31 | * messages about anything out of the ordinary.  If you like #ifdef, you | 
|  | 32 | * will love this function. | 
|  | 33 | */ | 
|  | 34 | static void __init rcu_bootup_announce_oddness(void) | 
|  | 35 | { | 
|  | 36 | #ifdef CONFIG_RCU_TRACE | 
|  | 37 | printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n"); | 
|  | 38 | #endif | 
|  | 39 | #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32) | 
|  | 40 | printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n", | 
|  | 41 | CONFIG_RCU_FANOUT); | 
|  | 42 | #endif | 
|  | 43 | #ifdef CONFIG_RCU_FANOUT_EXACT | 
|  | 44 | printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n"); | 
|  | 45 | #endif | 
|  | 46 | #ifdef CONFIG_RCU_FAST_NO_HZ | 
|  | 47 | printk(KERN_INFO | 
|  | 48 | "\tRCU dyntick-idle grace-period acceleration is enabled.\n"); | 
|  | 49 | #endif | 
|  | 50 | #ifdef CONFIG_PROVE_RCU | 
|  | 51 | printk(KERN_INFO "\tRCU lockdep checking is enabled.\n"); | 
|  | 52 | #endif | 
|  | 53 | #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE | 
|  | 54 | printk(KERN_INFO "\tRCU torture testing starts during boot.\n"); | 
|  | 55 | #endif | 
|  | 56 | #ifndef CONFIG_RCU_CPU_STALL_DETECTOR | 
|  | 57 | printk(KERN_INFO | 
|  | 58 | "\tRCU-based detection of stalled CPUs is disabled.\n"); | 
|  | 59 | #endif | 
|  | 60 | #ifndef CONFIG_RCU_CPU_STALL_VERBOSE | 
|  | 61 | printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n"); | 
|  | 62 | #endif | 
|  | 63 | #if NUM_RCU_LVL_4 != 0 | 
|  | 64 | printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n"); | 
|  | 65 | #endif | 
|  | 66 | } | 
|  | 67 |  | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 68 | #ifdef CONFIG_TREE_PREEMPT_RCU | 
|  | 69 |  | 
|  | 70 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); | 
|  | 71 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); | 
|  | 72 |  | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 73 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); | 
|  | 74 |  | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 75 | /* | 
|  | 76 | * Tell them what RCU they are running. | 
|  | 77 | */ | 
| Paul E. McKenney | 0e0fc1c | 2009-11-11 11:28:06 -0800 | [diff] [blame] | 78 | static void __init rcu_bootup_announce(void) | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 79 | { | 
| Paul E. McKenney | 26845c2 | 2010-04-13 14:19:23 -0700 | [diff] [blame] | 80 | printk(KERN_INFO "Preemptable hierarchical RCU implementation.\n"); | 
|  | 81 | rcu_bootup_announce_oddness(); | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 82 | } | 
|  | 83 |  | 
|  | 84 | /* | 
|  | 85 | * Return the number of RCU-preempt batches processed thus far | 
|  | 86 | * for debug and statistics. | 
|  | 87 | */ | 
|  | 88 | long rcu_batches_completed_preempt(void) | 
|  | 89 | { | 
|  | 90 | return rcu_preempt_state.completed; | 
|  | 91 | } | 
|  | 92 | EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt); | 
|  | 93 |  | 
|  | 94 | /* | 
|  | 95 | * Return the number of RCU batches processed thus far for debug & stats. | 
|  | 96 | */ | 
|  | 97 | long rcu_batches_completed(void) | 
|  | 98 | { | 
|  | 99 | return rcu_batches_completed_preempt(); | 
|  | 100 | } | 
|  | 101 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | 
|  | 102 |  | 
|  | 103 | /* | 
| Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 104 | * Force a quiescent state for preemptible RCU. | 
|  | 105 | */ | 
|  | 106 | void rcu_force_quiescent_state(void) | 
|  | 107 | { | 
|  | 108 | force_quiescent_state(&rcu_preempt_state, 0); | 
|  | 109 | } | 
|  | 110 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | 
|  | 111 |  | 
|  | 112 | /* | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 113 | * Record a preemptable-RCU quiescent state for the specified CPU.  Note | 
|  | 114 | * that this just means that the task currently running on the CPU is | 
|  | 115 | * not in a quiescent state.  There might be any number of tasks blocked | 
|  | 116 | * while in an RCU read-side critical section. | 
| Paul E. McKenney | 25502a6 | 2010-04-01 17:37:01 -0700 | [diff] [blame] | 117 | * | 
|  | 118 | * Unlike the other rcu_*_qs() functions, callers to this function | 
|  | 119 | * must disable irqs in order to protect the assignment to | 
|  | 120 | * ->rcu_read_unlock_special. | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 121 | */ | 
| Paul E. McKenney | c3422be | 2009-09-13 09:15:10 -0700 | [diff] [blame] | 122 | static void rcu_preempt_qs(int cpu) | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 123 | { | 
|  | 124 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | 
| Paul E. McKenney | 25502a6 | 2010-04-01 17:37:01 -0700 | [diff] [blame] | 125 |  | 
| Paul E. McKenney | c64ac3c | 2009-11-10 13:37:22 -0800 | [diff] [blame] | 126 | rdp->passed_quiesc_completed = rdp->gpnum - 1; | 
| Paul E. McKenney | c3422be | 2009-09-13 09:15:10 -0700 | [diff] [blame] | 127 | barrier(); | 
|  | 128 | rdp->passed_quiesc = 1; | 
| Paul E. McKenney | 25502a6 | 2010-04-01 17:37:01 -0700 | [diff] [blame] | 129 | current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 130 | } | 
|  | 131 |  | 
|  | 132 | /* | 
| Paul E. McKenney | c3422be | 2009-09-13 09:15:10 -0700 | [diff] [blame] | 133 | * We have entered the scheduler, and the current task might soon be | 
|  | 134 | * context-switched away from.  If this task is in an RCU read-side | 
|  | 135 | * critical section, we will no longer be able to rely on the CPU to | 
|  | 136 | * record that fact, so we enqueue the task on the appropriate entry | 
|  | 137 | * of the blocked_tasks[] array.  The task will dequeue itself when | 
|  | 138 | * it exits the outermost enclosing RCU read-side critical section. | 
|  | 139 | * Therefore, the current grace period cannot be permitted to complete | 
|  | 140 | * until the blocked_tasks[] entry indexed by the low-order bit of | 
|  | 141 | * rnp->gpnum empties. | 
|  | 142 | * | 
|  | 143 | * Caller must disable preemption. | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 144 | */ | 
| Paul E. McKenney | c3422be | 2009-09-13 09:15:10 -0700 | [diff] [blame] | 145 | static void rcu_preempt_note_context_switch(int cpu) | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 146 | { | 
|  | 147 | struct task_struct *t = current; | 
| Paul E. McKenney | c3422be | 2009-09-13 09:15:10 -0700 | [diff] [blame] | 148 | unsigned long flags; | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 149 | int phase; | 
|  | 150 | struct rcu_data *rdp; | 
|  | 151 | struct rcu_node *rnp; | 
|  | 152 |  | 
|  | 153 | if (t->rcu_read_lock_nesting && | 
|  | 154 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { | 
|  | 155 |  | 
|  | 156 | /* Possibly blocking in an RCU read-side critical section. */ | 
|  | 157 | rdp = rcu_preempt_state.rda[cpu]; | 
|  | 158 | rnp = rdp->mynode; | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 159 | raw_spin_lock_irqsave(&rnp->lock, flags); | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 160 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; | 
| Paul E. McKenney | 8684896 | 2009-08-27 15:00:12 -0700 | [diff] [blame] | 161 | t->rcu_blocked_node = rnp; | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 162 |  | 
|  | 163 | /* | 
|  | 164 | * If this CPU has already checked in, then this task | 
|  | 165 | * will hold up the next grace period rather than the | 
|  | 166 | * current grace period.  Queue the task accordingly. | 
|  | 167 | * If the task is queued for the current grace period | 
|  | 168 | * (i.e., this CPU has not yet passed through a quiescent | 
|  | 169 | * state for the current grace period), then as long | 
|  | 170 | * as that task remains queued, the current grace period | 
|  | 171 | * cannot end. | 
| Paul E. McKenney | b0e165c | 2009-09-13 09:15:09 -0700 | [diff] [blame] | 172 | * | 
|  | 173 | * But first, note that the current CPU must still be | 
|  | 174 | * on line! | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 175 | */ | 
| Paul E. McKenney | b0e165c | 2009-09-13 09:15:09 -0700 | [diff] [blame] | 176 | WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); | 
| Paul E. McKenney | e7d8842 | 2009-09-18 09:50:18 -0700 | [diff] [blame] | 177 | WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); | 
|  | 178 | phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1; | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 179 | list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]); | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 180 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 181 | } | 
|  | 182 |  | 
|  | 183 | /* | 
|  | 184 | * Either we were not in an RCU read-side critical section to | 
|  | 185 | * begin with, or we have now recorded that critical section | 
|  | 186 | * globally.  Either way, we can now note a quiescent state | 
|  | 187 | * for this CPU.  Again, if we were in an RCU read-side critical | 
|  | 188 | * section, and if that critical section was blocking the current | 
|  | 189 | * grace period, then the fact that the task has been enqueued | 
|  | 190 | * means that we continue to block the current grace period. | 
|  | 191 | */ | 
| Paul E. McKenney | e7d8842 | 2009-09-18 09:50:18 -0700 | [diff] [blame] | 192 | local_irq_save(flags); | 
| Paul E. McKenney | 25502a6 | 2010-04-01 17:37:01 -0700 | [diff] [blame] | 193 | rcu_preempt_qs(cpu); | 
| Paul E. McKenney | e7d8842 | 2009-09-18 09:50:18 -0700 | [diff] [blame] | 194 | local_irq_restore(flags); | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 195 | } | 
|  | 196 |  | 
|  | 197 | /* | 
|  | 198 | * Tree-preemptable RCU implementation for rcu_read_lock(). | 
|  | 199 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | 
|  | 200 | * if we block. | 
|  | 201 | */ | 
|  | 202 | void __rcu_read_lock(void) | 
|  | 203 | { | 
|  | 204 | ACCESS_ONCE(current->rcu_read_lock_nesting)++; | 
|  | 205 | barrier();  /* needed if we ever invoke rcu_read_lock in rcutree.c */ | 
|  | 206 | } | 
|  | 207 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | 
|  | 208 |  | 
| Paul E. McKenney | fc2219d | 2009-09-23 09:50:41 -0700 | [diff] [blame] | 209 | /* | 
|  | 210 | * Check for preempted RCU readers blocking the current grace period | 
|  | 211 | * for the specified rcu_node structure.  If the caller needs a reliable | 
|  | 212 | * answer, it must hold the rcu_node's ->lock. | 
|  | 213 | */ | 
|  | 214 | static int rcu_preempted_readers(struct rcu_node *rnp) | 
|  | 215 | { | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 216 | int phase = rnp->gpnum & 0x1; | 
|  | 217 |  | 
|  | 218 | return !list_empty(&rnp->blocked_tasks[phase]) || | 
|  | 219 | !list_empty(&rnp->blocked_tasks[phase + 2]); | 
| Paul E. McKenney | fc2219d | 2009-09-23 09:50:41 -0700 | [diff] [blame] | 220 | } | 
|  | 221 |  | 
| Paul E. McKenney | b668c9c | 2009-11-22 08:53:48 -0800 | [diff] [blame] | 222 | /* | 
|  | 223 | * Record a quiescent state for all tasks that were previously queued | 
|  | 224 | * on the specified rcu_node structure and that were blocking the current | 
|  | 225 | * RCU grace period.  The caller must hold the specified rnp->lock with | 
|  | 226 | * irqs disabled, and this lock is released upon return, but irqs remain | 
|  | 227 | * disabled. | 
|  | 228 | */ | 
| Paul E. McKenney | d3f6bad | 2009-12-02 12:10:13 -0800 | [diff] [blame] | 229 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) | 
| Paul E. McKenney | b668c9c | 2009-11-22 08:53:48 -0800 | [diff] [blame] | 230 | __releases(rnp->lock) | 
|  | 231 | { | 
|  | 232 | unsigned long mask; | 
|  | 233 | struct rcu_node *rnp_p; | 
|  | 234 |  | 
|  | 235 | if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 236 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 
| Paul E. McKenney | b668c9c | 2009-11-22 08:53:48 -0800 | [diff] [blame] | 237 | return;  /* Still need more quiescent states! */ | 
|  | 238 | } | 
|  | 239 |  | 
|  | 240 | rnp_p = rnp->parent; | 
|  | 241 | if (rnp_p == NULL) { | 
|  | 242 | /* | 
|  | 243 | * Either there is only one rcu_node in the tree, | 
|  | 244 | * or tasks were kicked up to root rcu_node due to | 
|  | 245 | * CPUs going offline. | 
|  | 246 | */ | 
| Paul E. McKenney | d3f6bad | 2009-12-02 12:10:13 -0800 | [diff] [blame] | 247 | rcu_report_qs_rsp(&rcu_preempt_state, flags); | 
| Paul E. McKenney | b668c9c | 2009-11-22 08:53:48 -0800 | [diff] [blame] | 248 | return; | 
|  | 249 | } | 
|  | 250 |  | 
|  | 251 | /* Report up the rest of the hierarchy. */ | 
|  | 252 | mask = rnp->grpmask; | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 253 | raw_spin_unlock(&rnp->lock);	/* irqs remain disabled. */ | 
|  | 254 | raw_spin_lock(&rnp_p->lock);	/* irqs already disabled. */ | 
| Paul E. McKenney | d3f6bad | 2009-12-02 12:10:13 -0800 | [diff] [blame] | 255 | rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags); | 
| Paul E. McKenney | b668c9c | 2009-11-22 08:53:48 -0800 | [diff] [blame] | 256 | } | 
|  | 257 |  | 
|  | 258 | /* | 
|  | 259 | * Handle special cases during rcu_read_unlock(), such as needing to | 
|  | 260 | * notify RCU core processing or task having blocked during the RCU | 
|  | 261 | * read-side critical section. | 
|  | 262 | */ | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 263 | static void rcu_read_unlock_special(struct task_struct *t) | 
|  | 264 | { | 
|  | 265 | int empty; | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 266 | int empty_exp; | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 267 | unsigned long flags; | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 268 | struct rcu_node *rnp; | 
|  | 269 | int special; | 
|  | 270 |  | 
|  | 271 | /* NMI handlers cannot block and cannot safely manipulate state. */ | 
|  | 272 | if (in_nmi()) | 
|  | 273 | return; | 
|  | 274 |  | 
|  | 275 | local_irq_save(flags); | 
|  | 276 |  | 
|  | 277 | /* | 
|  | 278 | * If RCU core is waiting for this CPU to exit critical section, | 
|  | 279 | * let it know that we have done so. | 
|  | 280 | */ | 
|  | 281 | special = t->rcu_read_unlock_special; | 
|  | 282 | if (special & RCU_READ_UNLOCK_NEED_QS) { | 
| Paul E. McKenney | c3422be | 2009-09-13 09:15:10 -0700 | [diff] [blame] | 283 | rcu_preempt_qs(smp_processor_id()); | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 284 | } | 
|  | 285 |  | 
|  | 286 | /* Hardware IRQ handlers cannot block. */ | 
|  | 287 | if (in_irq()) { | 
|  | 288 | local_irq_restore(flags); | 
|  | 289 | return; | 
|  | 290 | } | 
|  | 291 |  | 
|  | 292 | /* Clean up if blocked during RCU read-side critical section. */ | 
|  | 293 | if (special & RCU_READ_UNLOCK_BLOCKED) { | 
|  | 294 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED; | 
|  | 295 |  | 
| Paul E. McKenney | dd5d19b | 2009-08-27 14:58:16 -0700 | [diff] [blame] | 296 | /* | 
|  | 297 | * Remove this task from the list it blocked on.  The | 
|  | 298 | * task can migrate while we acquire the lock, but at | 
|  | 299 | * most one time.  So at most two passes through loop. | 
|  | 300 | */ | 
|  | 301 | for (;;) { | 
| Paul E. McKenney | 8684896 | 2009-08-27 15:00:12 -0700 | [diff] [blame] | 302 | rnp = t->rcu_blocked_node; | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 303 | raw_spin_lock(&rnp->lock);  /* irqs already disabled. */ | 
| Paul E. McKenney | 8684896 | 2009-08-27 15:00:12 -0700 | [diff] [blame] | 304 | if (rnp == t->rcu_blocked_node) | 
| Paul E. McKenney | dd5d19b | 2009-08-27 14:58:16 -0700 | [diff] [blame] | 305 | break; | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 306 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 
| Paul E. McKenney | dd5d19b | 2009-08-27 14:58:16 -0700 | [diff] [blame] | 307 | } | 
| Paul E. McKenney | fc2219d | 2009-09-23 09:50:41 -0700 | [diff] [blame] | 308 | empty = !rcu_preempted_readers(rnp); | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 309 | empty_exp = !rcu_preempted_readers_exp(rnp); | 
|  | 310 | smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 311 | list_del_init(&t->rcu_node_entry); | 
| Paul E. McKenney | dd5d19b | 2009-08-27 14:58:16 -0700 | [diff] [blame] | 312 | t->rcu_blocked_node = NULL; | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 313 |  | 
|  | 314 | /* | 
|  | 315 | * If this was the last task on the current list, and if | 
|  | 316 | * we aren't waiting on any CPUs, report the quiescent state. | 
| Paul E. McKenney | d3f6bad | 2009-12-02 12:10:13 -0800 | [diff] [blame] | 317 | * Note that rcu_report_unblock_qs_rnp() releases rnp->lock. | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 318 | */ | 
| Paul E. McKenney | b668c9c | 2009-11-22 08:53:48 -0800 | [diff] [blame] | 319 | if (empty) | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 320 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 
| Paul E. McKenney | b668c9c | 2009-11-22 08:53:48 -0800 | [diff] [blame] | 321 | else | 
| Paul E. McKenney | d3f6bad | 2009-12-02 12:10:13 -0800 | [diff] [blame] | 322 | rcu_report_unblock_qs_rnp(rnp, flags); | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 323 |  | 
|  | 324 | /* | 
|  | 325 | * If this was the last task on the expedited lists, | 
|  | 326 | * then we need to report up the rcu_node hierarchy. | 
|  | 327 | */ | 
|  | 328 | if (!empty_exp && !rcu_preempted_readers_exp(rnp)) | 
|  | 329 | rcu_report_exp_rnp(&rcu_preempt_state, rnp); | 
| Paul E. McKenney | b668c9c | 2009-11-22 08:53:48 -0800 | [diff] [blame] | 330 | } else { | 
|  | 331 | local_irq_restore(flags); | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 332 | } | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 333 | } | 
|  | 334 |  | 
|  | 335 | /* | 
|  | 336 | * Tree-preemptable RCU implementation for rcu_read_unlock(). | 
|  | 337 | * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost | 
|  | 338 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | 
|  | 339 | * invoke rcu_read_unlock_special() to clean up after a context switch | 
|  | 340 | * in an RCU read-side critical section and other special cases. | 
|  | 341 | */ | 
|  | 342 | void __rcu_read_unlock(void) | 
|  | 343 | { | 
|  | 344 | struct task_struct *t = current; | 
|  | 345 |  | 
|  | 346 | barrier();  /* needed if we ever invoke rcu_read_unlock in rcutree.c */ | 
|  | 347 | if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 && | 
|  | 348 | unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | 
|  | 349 | rcu_read_unlock_special(t); | 
| Paul E. McKenney | cba8244 | 2010-01-04 16:04:01 -0800 | [diff] [blame] | 350 | #ifdef CONFIG_PROVE_LOCKING | 
|  | 351 | WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0); | 
|  | 352 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 353 | } | 
|  | 354 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | 
|  | 355 |  | 
|  | 356 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 
|  | 357 |  | 
| Paul E. McKenney | 1ed509a | 2010-02-22 17:05:05 -0800 | [diff] [blame] | 358 | #ifdef CONFIG_RCU_CPU_STALL_VERBOSE | 
|  | 359 |  | 
|  | 360 | /* | 
|  | 361 | * Dump detailed information for all tasks blocking the current RCU | 
|  | 362 | * grace period on the specified rcu_node structure. | 
|  | 363 | */ | 
|  | 364 | static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) | 
|  | 365 | { | 
|  | 366 | unsigned long flags; | 
|  | 367 | struct list_head *lp; | 
|  | 368 | int phase; | 
|  | 369 | struct task_struct *t; | 
|  | 370 |  | 
|  | 371 | if (rcu_preempted_readers(rnp)) { | 
|  | 372 | raw_spin_lock_irqsave(&rnp->lock, flags); | 
|  | 373 | phase = rnp->gpnum & 0x1; | 
|  | 374 | lp = &rnp->blocked_tasks[phase]; | 
|  | 375 | list_for_each_entry(t, lp, rcu_node_entry) | 
|  | 376 | sched_show_task(t); | 
|  | 377 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 
|  | 378 | } | 
|  | 379 | } | 
|  | 380 |  | 
|  | 381 | /* | 
|  | 382 | * Dump detailed information for all tasks blocking the current RCU | 
|  | 383 | * grace period. | 
|  | 384 | */ | 
|  | 385 | static void rcu_print_detail_task_stall(struct rcu_state *rsp) | 
|  | 386 | { | 
|  | 387 | struct rcu_node *rnp = rcu_get_root(rsp); | 
|  | 388 |  | 
|  | 389 | rcu_print_detail_task_stall_rnp(rnp); | 
|  | 390 | rcu_for_each_leaf_node(rsp, rnp) | 
|  | 391 | rcu_print_detail_task_stall_rnp(rnp); | 
|  | 392 | } | 
|  | 393 |  | 
|  | 394 | #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ | 
|  | 395 |  | 
|  | 396 | static void rcu_print_detail_task_stall(struct rcu_state *rsp) | 
|  | 397 | { | 
|  | 398 | } | 
|  | 399 |  | 
|  | 400 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */ | 
|  | 401 |  | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 402 | /* | 
|  | 403 | * Scan the current list of tasks blocked within RCU read-side critical | 
|  | 404 | * sections, printing out the tid of each. | 
|  | 405 | */ | 
|  | 406 | static void rcu_print_task_stall(struct rcu_node *rnp) | 
|  | 407 | { | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 408 | struct list_head *lp; | 
| Paul E. McKenney | fc2219d | 2009-09-23 09:50:41 -0700 | [diff] [blame] | 409 | int phase; | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 410 | struct task_struct *t; | 
|  | 411 |  | 
| Paul E. McKenney | fc2219d | 2009-09-23 09:50:41 -0700 | [diff] [blame] | 412 | if (rcu_preempted_readers(rnp)) { | 
| Paul E. McKenney | fc2219d | 2009-09-23 09:50:41 -0700 | [diff] [blame] | 413 | phase = rnp->gpnum & 0x1; | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 414 | lp = &rnp->blocked_tasks[phase]; | 
|  | 415 | list_for_each_entry(t, lp, rcu_node_entry) | 
|  | 416 | printk(" P%d", t->pid); | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 417 | } | 
|  | 418 | } | 
|  | 419 |  | 
|  | 420 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 
|  | 421 |  | 
|  | 422 | /* | 
| Paul E. McKenney | b0e165c | 2009-09-13 09:15:09 -0700 | [diff] [blame] | 423 | * Check that the list of blocked tasks for the newly completed grace | 
|  | 424 | * period is in fact empty.  It is a serious bug to complete a grace | 
|  | 425 | * period that still has RCU readers blocked!  This function must be | 
|  | 426 | * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock | 
|  | 427 | * must be held by the caller. | 
|  | 428 | */ | 
|  | 429 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | 
|  | 430 | { | 
| Paul E. McKenney | fc2219d | 2009-09-23 09:50:41 -0700 | [diff] [blame] | 431 | WARN_ON_ONCE(rcu_preempted_readers(rnp)); | 
| Paul E. McKenney | 28ecd58 | 2009-09-18 09:50:17 -0700 | [diff] [blame] | 432 | WARN_ON_ONCE(rnp->qsmask); | 
| Paul E. McKenney | b0e165c | 2009-09-13 09:15:09 -0700 | [diff] [blame] | 433 | } | 
|  | 434 |  | 
| Paul E. McKenney | 33f7614 | 2009-08-24 09:42:01 -0700 | [diff] [blame] | 435 | #ifdef CONFIG_HOTPLUG_CPU | 
|  | 436 |  | 
|  | 437 | /* | 
| Paul E. McKenney | dd5d19b | 2009-08-27 14:58:16 -0700 | [diff] [blame] | 438 | * Handle tasklist migration for case in which all CPUs covered by the | 
|  | 439 | * specified rcu_node have gone offline.  Move them up to the root | 
|  | 440 | * rcu_node.  The reason for not just moving them to the immediate | 
|  | 441 | * parent is to remove the need for rcu_read_unlock_special() to | 
|  | 442 | * make more than two attempts to acquire the target rcu_node's lock. | 
| Paul E. McKenney | b668c9c | 2009-11-22 08:53:48 -0800 | [diff] [blame] | 443 | * Returns true if there were tasks blocking the current RCU grace | 
|  | 444 | * period. | 
| Paul E. McKenney | dd5d19b | 2009-08-27 14:58:16 -0700 | [diff] [blame] | 445 | * | 
| Paul E. McKenney | 237c80c | 2009-10-15 09:26:14 -0700 | [diff] [blame] | 446 | * Returns 1 if there was previously a task blocking the current grace | 
|  | 447 | * period on the specified rcu_node structure. | 
|  | 448 | * | 
| Paul E. McKenney | dd5d19b | 2009-08-27 14:58:16 -0700 | [diff] [blame] | 449 | * The caller must hold rnp->lock with irqs disabled. | 
|  | 450 | */ | 
| Paul E. McKenney | 237c80c | 2009-10-15 09:26:14 -0700 | [diff] [blame] | 451 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | 
|  | 452 | struct rcu_node *rnp, | 
|  | 453 | struct rcu_data *rdp) | 
| Paul E. McKenney | dd5d19b | 2009-08-27 14:58:16 -0700 | [diff] [blame] | 454 | { | 
|  | 455 | int i; | 
|  | 456 | struct list_head *lp; | 
|  | 457 | struct list_head *lp_root; | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 458 | int retval = 0; | 
| Paul E. McKenney | dd5d19b | 2009-08-27 14:58:16 -0700 | [diff] [blame] | 459 | struct rcu_node *rnp_root = rcu_get_root(rsp); | 
|  | 460 | struct task_struct *tp; | 
|  | 461 |  | 
| Paul E. McKenney | 8684896 | 2009-08-27 15:00:12 -0700 | [diff] [blame] | 462 | if (rnp == rnp_root) { | 
|  | 463 | WARN_ONCE(1, "Last CPU thought to be offlined?"); | 
| Paul E. McKenney | 237c80c | 2009-10-15 09:26:14 -0700 | [diff] [blame] | 464 | return 0;  /* Shouldn't happen: at least one CPU online. */ | 
| Paul E. McKenney | 8684896 | 2009-08-27 15:00:12 -0700 | [diff] [blame] | 465 | } | 
| Paul E. McKenney | 28ecd58 | 2009-09-18 09:50:17 -0700 | [diff] [blame] | 466 | WARN_ON_ONCE(rnp != rdp->mynode && | 
|  | 467 | (!list_empty(&rnp->blocked_tasks[0]) || | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 468 | !list_empty(&rnp->blocked_tasks[1]) || | 
|  | 469 | !list_empty(&rnp->blocked_tasks[2]) || | 
|  | 470 | !list_empty(&rnp->blocked_tasks[3]))); | 
| Paul E. McKenney | dd5d19b | 2009-08-27 14:58:16 -0700 | [diff] [blame] | 471 |  | 
|  | 472 | /* | 
|  | 473 | * Move tasks up to root rcu_node.  Rely on the fact that the | 
|  | 474 | * root rcu_node can be at most one ahead of the rest of the | 
|  | 475 | * rcu_nodes in terms of gp_num value.  This fact allows us to | 
|  | 476 | * move the blocked_tasks[] array directly, element by element. | 
|  | 477 | */ | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 478 | if (rcu_preempted_readers(rnp)) | 
|  | 479 | retval |= RCU_OFL_TASKS_NORM_GP; | 
|  | 480 | if (rcu_preempted_readers_exp(rnp)) | 
|  | 481 | retval |= RCU_OFL_TASKS_EXP_GP; | 
|  | 482 | for (i = 0; i < 4; i++) { | 
| Paul E. McKenney | dd5d19b | 2009-08-27 14:58:16 -0700 | [diff] [blame] | 483 | lp = &rnp->blocked_tasks[i]; | 
|  | 484 | lp_root = &rnp_root->blocked_tasks[i]; | 
|  | 485 | while (!list_empty(lp)) { | 
|  | 486 | tp = list_entry(lp->next, typeof(*tp), rcu_node_entry); | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 487 | raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ | 
| Paul E. McKenney | dd5d19b | 2009-08-27 14:58:16 -0700 | [diff] [blame] | 488 | list_del(&tp->rcu_node_entry); | 
|  | 489 | tp->rcu_blocked_node = rnp_root; | 
|  | 490 | list_add(&tp->rcu_node_entry, lp_root); | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 491 | raw_spin_unlock(&rnp_root->lock); /* irqs remain disabled */ | 
| Paul E. McKenney | dd5d19b | 2009-08-27 14:58:16 -0700 | [diff] [blame] | 492 | } | 
|  | 493 | } | 
| Paul E. McKenney | 237c80c | 2009-10-15 09:26:14 -0700 | [diff] [blame] | 494 | return retval; | 
| Paul E. McKenney | dd5d19b | 2009-08-27 14:58:16 -0700 | [diff] [blame] | 495 | } | 
|  | 496 |  | 
|  | 497 | /* | 
| Paul E. McKenney | 33f7614 | 2009-08-24 09:42:01 -0700 | [diff] [blame] | 498 | * Do CPU-offline processing for preemptable RCU. | 
|  | 499 | */ | 
|  | 500 | static void rcu_preempt_offline_cpu(int cpu) | 
|  | 501 | { | 
|  | 502 | __rcu_offline_cpu(cpu, &rcu_preempt_state); | 
|  | 503 | } | 
|  | 504 |  | 
|  | 505 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 
|  | 506 |  | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 507 | /* | 
|  | 508 | * Check for a quiescent state from the current CPU.  When a task blocks, | 
|  | 509 | * the task is recorded in the corresponding CPU's rcu_node structure, | 
|  | 510 | * which is checked elsewhere. | 
|  | 511 | * | 
|  | 512 | * Caller must disable hard irqs. | 
|  | 513 | */ | 
|  | 514 | static void rcu_preempt_check_callbacks(int cpu) | 
|  | 515 | { | 
|  | 516 | struct task_struct *t = current; | 
|  | 517 |  | 
|  | 518 | if (t->rcu_read_lock_nesting == 0) { | 
| Paul E. McKenney | c3422be | 2009-09-13 09:15:10 -0700 | [diff] [blame] | 519 | rcu_preempt_qs(cpu); | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 520 | return; | 
|  | 521 | } | 
| Paul E. McKenney | a71fca5 | 2009-09-18 10:28:19 -0700 | [diff] [blame] | 522 | if (per_cpu(rcu_preempt_data, cpu).qs_pending) | 
| Paul E. McKenney | c3422be | 2009-09-13 09:15:10 -0700 | [diff] [blame] | 523 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 524 | } | 
|  | 525 |  | 
|  | 526 | /* | 
|  | 527 | * Process callbacks for preemptable RCU. | 
|  | 528 | */ | 
|  | 529 | static void rcu_preempt_process_callbacks(void) | 
|  | 530 | { | 
|  | 531 | __rcu_process_callbacks(&rcu_preempt_state, | 
|  | 532 | &__get_cpu_var(rcu_preempt_data)); | 
|  | 533 | } | 
|  | 534 |  | 
|  | 535 | /* | 
|  | 536 | * Queue a preemptable-RCU callback for invocation after a grace period. | 
|  | 537 | */ | 
|  | 538 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | 
|  | 539 | { | 
|  | 540 | __call_rcu(head, func, &rcu_preempt_state); | 
|  | 541 | } | 
|  | 542 | EXPORT_SYMBOL_GPL(call_rcu); | 
|  | 543 |  | 
| Paul E. McKenney | 6ebb237 | 2009-11-22 08:53:50 -0800 | [diff] [blame] | 544 | /** | 
|  | 545 | * synchronize_rcu - wait until a grace period has elapsed. | 
|  | 546 | * | 
|  | 547 | * Control will return to the caller some time after a full grace | 
|  | 548 | * period has elapsed, in other words after all currently executing RCU | 
|  | 549 | * read-side critical sections have completed.  RCU read-side critical | 
|  | 550 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | 
|  | 551 | * and may be nested. | 
|  | 552 | */ | 
|  | 553 | void synchronize_rcu(void) | 
|  | 554 | { | 
|  | 555 | struct rcu_synchronize rcu; | 
|  | 556 |  | 
|  | 557 | if (!rcu_scheduler_active) | 
|  | 558 | return; | 
|  | 559 |  | 
| Paul E. McKenney | 72d5a9f | 2010-05-10 17:12:17 -0700 | [diff] [blame] | 560 | init_rcu_head_on_stack(&rcu.head); | 
| Paul E. McKenney | 6ebb237 | 2009-11-22 08:53:50 -0800 | [diff] [blame] | 561 | init_completion(&rcu.completion); | 
|  | 562 | /* Will wake me after RCU finished. */ | 
|  | 563 | call_rcu(&rcu.head, wakeme_after_rcu); | 
|  | 564 | /* Wait for it. */ | 
|  | 565 | wait_for_completion(&rcu.completion); | 
| Paul E. McKenney | 72d5a9f | 2010-05-10 17:12:17 -0700 | [diff] [blame] | 566 | destroy_rcu_head_on_stack(&rcu.head); | 
| Paul E. McKenney | 6ebb237 | 2009-11-22 08:53:50 -0800 | [diff] [blame] | 567 | } | 
|  | 568 | EXPORT_SYMBOL_GPL(synchronize_rcu); | 
|  | 569 |  | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 570 | static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); | 
|  | 571 | static long sync_rcu_preempt_exp_count; | 
|  | 572 | static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); | 
|  | 573 |  | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 574 | /* | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 575 | * Return non-zero if there are any tasks in RCU read-side critical | 
|  | 576 | * sections blocking the current preemptible-RCU expedited grace period. | 
|  | 577 | * If there is no preemptible-RCU expedited grace period currently in | 
|  | 578 | * progress, returns zero unconditionally. | 
|  | 579 | */ | 
|  | 580 | static int rcu_preempted_readers_exp(struct rcu_node *rnp) | 
|  | 581 | { | 
|  | 582 | return !list_empty(&rnp->blocked_tasks[2]) || | 
|  | 583 | !list_empty(&rnp->blocked_tasks[3]); | 
|  | 584 | } | 
|  | 585 |  | 
|  | 586 | /* | 
|  | 587 | * return non-zero if there is no RCU expedited grace period in progress | 
|  | 588 | * for the specified rcu_node structure, in other words, if all CPUs and | 
|  | 589 | * tasks covered by the specified rcu_node structure have done their bit | 
|  | 590 | * for the current expedited grace period.  Works only for preemptible | 
|  | 591 | * RCU -- other RCU implementation use other means. | 
|  | 592 | * | 
|  | 593 | * Caller must hold sync_rcu_preempt_exp_mutex. | 
|  | 594 | */ | 
|  | 595 | static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) | 
|  | 596 | { | 
|  | 597 | return !rcu_preempted_readers_exp(rnp) && | 
|  | 598 | ACCESS_ONCE(rnp->expmask) == 0; | 
|  | 599 | } | 
|  | 600 |  | 
|  | 601 | /* | 
|  | 602 | * Report the exit from RCU read-side critical section for the last task | 
|  | 603 | * that queued itself during or before the current expedited preemptible-RCU | 
|  | 604 | * grace period.  This event is reported either to the rcu_node structure on | 
|  | 605 | * which the task was queued or to one of that rcu_node structure's ancestors, | 
|  | 606 | * recursively up the tree.  (Calm down, calm down, we do the recursion | 
|  | 607 | * iteratively!) | 
|  | 608 | * | 
|  | 609 | * Caller must hold sync_rcu_preempt_exp_mutex. | 
|  | 610 | */ | 
|  | 611 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | 
|  | 612 | { | 
|  | 613 | unsigned long flags; | 
|  | 614 | unsigned long mask; | 
|  | 615 |  | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 616 | raw_spin_lock_irqsave(&rnp->lock, flags); | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 617 | for (;;) { | 
|  | 618 | if (!sync_rcu_preempt_exp_done(rnp)) | 
|  | 619 | break; | 
|  | 620 | if (rnp->parent == NULL) { | 
|  | 621 | wake_up(&sync_rcu_preempt_exp_wq); | 
|  | 622 | break; | 
|  | 623 | } | 
|  | 624 | mask = rnp->grpmask; | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 625 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 626 | rnp = rnp->parent; | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 627 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 628 | rnp->expmask &= ~mask; | 
|  | 629 | } | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 630 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 631 | } | 
|  | 632 |  | 
|  | 633 | /* | 
|  | 634 | * Snapshot the tasks blocking the newly started preemptible-RCU expedited | 
|  | 635 | * grace period for the specified rcu_node structure.  If there are no such | 
|  | 636 | * tasks, report it up the rcu_node hierarchy. | 
|  | 637 | * | 
|  | 638 | * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock. | 
|  | 639 | */ | 
|  | 640 | static void | 
|  | 641 | sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) | 
|  | 642 | { | 
|  | 643 | int must_wait; | 
|  | 644 |  | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 645 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 646 | list_splice_init(&rnp->blocked_tasks[0], &rnp->blocked_tasks[2]); | 
|  | 647 | list_splice_init(&rnp->blocked_tasks[1], &rnp->blocked_tasks[3]); | 
|  | 648 | must_wait = rcu_preempted_readers_exp(rnp); | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 649 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 650 | if (!must_wait) | 
|  | 651 | rcu_report_exp_rnp(rsp, rnp); | 
|  | 652 | } | 
|  | 653 |  | 
|  | 654 | /* | 
|  | 655 | * Wait for an rcu-preempt grace period, but expedite it.  The basic idea | 
|  | 656 | * is to invoke synchronize_sched_expedited() to push all the tasks to | 
|  | 657 | * the ->blocked_tasks[] lists, move all entries from the first set of | 
|  | 658 | * ->blocked_tasks[] lists to the second set, and finally wait for this | 
|  | 659 | * second set to drain. | 
| Paul E. McKenney | 019129d | 2009-10-14 10:15:56 -0700 | [diff] [blame] | 660 | */ | 
|  | 661 | void synchronize_rcu_expedited(void) | 
|  | 662 | { | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 663 | unsigned long flags; | 
|  | 664 | struct rcu_node *rnp; | 
|  | 665 | struct rcu_state *rsp = &rcu_preempt_state; | 
|  | 666 | long snap; | 
|  | 667 | int trycount = 0; | 
|  | 668 |  | 
|  | 669 | smp_mb(); /* Caller's modifications seen first by other CPUs. */ | 
|  | 670 | snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1; | 
|  | 671 | smp_mb(); /* Above access cannot bleed into critical section. */ | 
|  | 672 |  | 
|  | 673 | /* | 
|  | 674 | * Acquire lock, falling back to synchronize_rcu() if too many | 
|  | 675 | * lock-acquisition failures.  Of course, if someone does the | 
|  | 676 | * expedited grace period for us, just leave. | 
|  | 677 | */ | 
|  | 678 | while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { | 
|  | 679 | if (trycount++ < 10) | 
|  | 680 | udelay(trycount * num_online_cpus()); | 
|  | 681 | else { | 
|  | 682 | synchronize_rcu(); | 
|  | 683 | return; | 
|  | 684 | } | 
|  | 685 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | 
|  | 686 | goto mb_ret; /* Others did our work for us. */ | 
|  | 687 | } | 
|  | 688 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | 
|  | 689 | goto unlock_mb_ret; /* Others did our work for us. */ | 
|  | 690 |  | 
|  | 691 | /* force all RCU readers onto blocked_tasks[]. */ | 
|  | 692 | synchronize_sched_expedited(); | 
|  | 693 |  | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 694 | raw_spin_lock_irqsave(&rsp->onofflock, flags); | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 695 |  | 
|  | 696 | /* Initialize ->expmask for all non-leaf rcu_node structures. */ | 
|  | 697 | rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 698 | raw_spin_lock(&rnp->lock); /* irqs already disabled. */ | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 699 | rnp->expmask = rnp->qsmaskinit; | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 700 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 701 | } | 
|  | 702 |  | 
|  | 703 | /* Snapshot current state of ->blocked_tasks[] lists. */ | 
|  | 704 | rcu_for_each_leaf_node(rsp, rnp) | 
|  | 705 | sync_rcu_preempt_exp_init(rsp, rnp); | 
|  | 706 | if (NUM_RCU_NODES > 1) | 
|  | 707 | sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp)); | 
|  | 708 |  | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 709 | raw_spin_unlock_irqrestore(&rsp->onofflock, flags); | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 710 |  | 
|  | 711 | /* Wait for snapshotted ->blocked_tasks[] lists to drain. */ | 
|  | 712 | rnp = rcu_get_root(rsp); | 
|  | 713 | wait_event(sync_rcu_preempt_exp_wq, | 
|  | 714 | sync_rcu_preempt_exp_done(rnp)); | 
|  | 715 |  | 
|  | 716 | /* Clean up and exit. */ | 
|  | 717 | smp_mb(); /* ensure expedited GP seen before counter increment. */ | 
|  | 718 | ACCESS_ONCE(sync_rcu_preempt_exp_count)++; | 
|  | 719 | unlock_mb_ret: | 
|  | 720 | mutex_unlock(&sync_rcu_preempt_exp_mutex); | 
|  | 721 | mb_ret: | 
|  | 722 | smp_mb(); /* ensure subsequent action seen after grace period. */ | 
| Paul E. McKenney | 019129d | 2009-10-14 10:15:56 -0700 | [diff] [blame] | 723 | } | 
|  | 724 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | 
|  | 725 |  | 
|  | 726 | /* | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 727 | * Check to see if there is any immediate preemptable-RCU-related work | 
|  | 728 | * to be done. | 
|  | 729 | */ | 
|  | 730 | static int rcu_preempt_pending(int cpu) | 
|  | 731 | { | 
|  | 732 | return __rcu_pending(&rcu_preempt_state, | 
|  | 733 | &per_cpu(rcu_preempt_data, cpu)); | 
|  | 734 | } | 
|  | 735 |  | 
|  | 736 | /* | 
|  | 737 | * Does preemptable RCU need the CPU to stay out of dynticks mode? | 
|  | 738 | */ | 
|  | 739 | static int rcu_preempt_needs_cpu(int cpu) | 
|  | 740 | { | 
|  | 741 | return !!per_cpu(rcu_preempt_data, cpu).nxtlist; | 
|  | 742 | } | 
|  | 743 |  | 
| Paul E. McKenney | e74f4c4 | 2009-10-06 21:48:17 -0700 | [diff] [blame] | 744 | /** | 
|  | 745 | * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. | 
|  | 746 | */ | 
|  | 747 | void rcu_barrier(void) | 
|  | 748 | { | 
|  | 749 | _rcu_barrier(&rcu_preempt_state, call_rcu); | 
|  | 750 | } | 
|  | 751 | EXPORT_SYMBOL_GPL(rcu_barrier); | 
|  | 752 |  | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 753 | /* | 
|  | 754 | * Initialize preemptable RCU's per-CPU data. | 
|  | 755 | */ | 
|  | 756 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | 
|  | 757 | { | 
|  | 758 | rcu_init_percpu_data(cpu, &rcu_preempt_state, 1); | 
|  | 759 | } | 
|  | 760 |  | 
|  | 761 | /* | 
| Paul E. McKenney | e74f4c4 | 2009-10-06 21:48:17 -0700 | [diff] [blame] | 762 | * Move preemptable RCU's callbacks to ->orphan_cbs_list. | 
|  | 763 | */ | 
|  | 764 | static void rcu_preempt_send_cbs_to_orphanage(void) | 
|  | 765 | { | 
|  | 766 | rcu_send_cbs_to_orphanage(&rcu_preempt_state); | 
|  | 767 | } | 
|  | 768 |  | 
|  | 769 | /* | 
| Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 770 | * Initialize preemptable RCU's state structures. | 
|  | 771 | */ | 
|  | 772 | static void __init __rcu_init_preempt(void) | 
|  | 773 | { | 
| Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 774 | RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data); | 
|  | 775 | } | 
|  | 776 |  | 
|  | 777 | /* | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 778 | * Check for a task exiting while in a preemptable-RCU read-side | 
|  | 779 | * critical section, clean up if so.  No need to issue warnings, | 
|  | 780 | * as debug_check_no_locks_held() already does this if lockdep | 
|  | 781 | * is enabled. | 
|  | 782 | */ | 
|  | 783 | void exit_rcu(void) | 
|  | 784 | { | 
|  | 785 | struct task_struct *t = current; | 
|  | 786 |  | 
|  | 787 | if (t->rcu_read_lock_nesting == 0) | 
|  | 788 | return; | 
|  | 789 | t->rcu_read_lock_nesting = 1; | 
|  | 790 | rcu_read_unlock(); | 
|  | 791 | } | 
|  | 792 |  | 
|  | 793 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 
|  | 794 |  | 
|  | 795 | /* | 
|  | 796 | * Tell them what RCU they are running. | 
|  | 797 | */ | 
| Paul E. McKenney | 0e0fc1c | 2009-11-11 11:28:06 -0800 | [diff] [blame] | 798 | static void __init rcu_bootup_announce(void) | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 799 | { | 
|  | 800 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | 
| Paul E. McKenney | 26845c2 | 2010-04-13 14:19:23 -0700 | [diff] [blame] | 801 | rcu_bootup_announce_oddness(); | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 802 | } | 
|  | 803 |  | 
|  | 804 | /* | 
|  | 805 | * Return the number of RCU batches processed thus far for debug & stats. | 
|  | 806 | */ | 
|  | 807 | long rcu_batches_completed(void) | 
|  | 808 | { | 
|  | 809 | return rcu_batches_completed_sched(); | 
|  | 810 | } | 
|  | 811 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | 
|  | 812 |  | 
|  | 813 | /* | 
| Paul E. McKenney | bf66f18 | 2010-01-04 15:09:10 -0800 | [diff] [blame] | 814 | * Force a quiescent state for RCU, which, because there is no preemptible | 
|  | 815 | * RCU, becomes the same as rcu-sched. | 
|  | 816 | */ | 
|  | 817 | void rcu_force_quiescent_state(void) | 
|  | 818 | { | 
|  | 819 | rcu_sched_force_quiescent_state(); | 
|  | 820 | } | 
|  | 821 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | 
|  | 822 |  | 
|  | 823 | /* | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 824 | * Because preemptable RCU does not exist, we never have to check for | 
|  | 825 | * CPUs being in quiescent states. | 
|  | 826 | */ | 
| Paul E. McKenney | c3422be | 2009-09-13 09:15:10 -0700 | [diff] [blame] | 827 | static void rcu_preempt_note_context_switch(int cpu) | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 828 | { | 
|  | 829 | } | 
|  | 830 |  | 
| Paul E. McKenney | fc2219d | 2009-09-23 09:50:41 -0700 | [diff] [blame] | 831 | /* | 
|  | 832 | * Because preemptable RCU does not exist, there are never any preempted | 
|  | 833 | * RCU readers. | 
|  | 834 | */ | 
|  | 835 | static int rcu_preempted_readers(struct rcu_node *rnp) | 
|  | 836 | { | 
|  | 837 | return 0; | 
|  | 838 | } | 
|  | 839 |  | 
| Paul E. McKenney | b668c9c | 2009-11-22 08:53:48 -0800 | [diff] [blame] | 840 | #ifdef CONFIG_HOTPLUG_CPU | 
|  | 841 |  | 
|  | 842 | /* Because preemptible RCU does not exist, no quieting of tasks. */ | 
| Paul E. McKenney | d3f6bad | 2009-12-02 12:10:13 -0800 | [diff] [blame] | 843 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) | 
| Paul E. McKenney | b668c9c | 2009-11-22 08:53:48 -0800 | [diff] [blame] | 844 | { | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 845 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 
| Paul E. McKenney | b668c9c | 2009-11-22 08:53:48 -0800 | [diff] [blame] | 846 | } | 
|  | 847 |  | 
|  | 848 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 
|  | 849 |  | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 850 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 
|  | 851 |  | 
|  | 852 | /* | 
|  | 853 | * Because preemptable RCU does not exist, we never have to check for | 
|  | 854 | * tasks blocked within RCU read-side critical sections. | 
|  | 855 | */ | 
| Paul E. McKenney | 1ed509a | 2010-02-22 17:05:05 -0800 | [diff] [blame] | 856 | static void rcu_print_detail_task_stall(struct rcu_state *rsp) | 
|  | 857 | { | 
|  | 858 | } | 
|  | 859 |  | 
|  | 860 | /* | 
|  | 861 | * Because preemptable RCU does not exist, we never have to check for | 
|  | 862 | * tasks blocked within RCU read-side critical sections. | 
|  | 863 | */ | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 864 | static void rcu_print_task_stall(struct rcu_node *rnp) | 
|  | 865 | { | 
|  | 866 | } | 
|  | 867 |  | 
|  | 868 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 
|  | 869 |  | 
|  | 870 | /* | 
| Paul E. McKenney | b0e165c | 2009-09-13 09:15:09 -0700 | [diff] [blame] | 871 | * Because there is no preemptable RCU, there can be no readers blocked, | 
| Paul E. McKenney | 49e2912 | 2009-09-18 09:50:19 -0700 | [diff] [blame] | 872 | * so there is no need to check for blocked tasks.  So check only for | 
|  | 873 | * bogus qsmask values. | 
| Paul E. McKenney | b0e165c | 2009-09-13 09:15:09 -0700 | [diff] [blame] | 874 | */ | 
|  | 875 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | 
|  | 876 | { | 
| Paul E. McKenney | 49e2912 | 2009-09-18 09:50:19 -0700 | [diff] [blame] | 877 | WARN_ON_ONCE(rnp->qsmask); | 
| Paul E. McKenney | b0e165c | 2009-09-13 09:15:09 -0700 | [diff] [blame] | 878 | } | 
|  | 879 |  | 
| Paul E. McKenney | 33f7614 | 2009-08-24 09:42:01 -0700 | [diff] [blame] | 880 | #ifdef CONFIG_HOTPLUG_CPU | 
|  | 881 |  | 
|  | 882 | /* | 
| Paul E. McKenney | dd5d19b | 2009-08-27 14:58:16 -0700 | [diff] [blame] | 883 | * Because preemptable RCU does not exist, it never needs to migrate | 
| Paul E. McKenney | 237c80c | 2009-10-15 09:26:14 -0700 | [diff] [blame] | 884 | * tasks that were blocked within RCU read-side critical sections, and | 
|  | 885 | * such non-existent tasks cannot possibly have been blocking the current | 
|  | 886 | * grace period. | 
| Paul E. McKenney | dd5d19b | 2009-08-27 14:58:16 -0700 | [diff] [blame] | 887 | */ | 
| Paul E. McKenney | 237c80c | 2009-10-15 09:26:14 -0700 | [diff] [blame] | 888 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | 
|  | 889 | struct rcu_node *rnp, | 
|  | 890 | struct rcu_data *rdp) | 
| Paul E. McKenney | dd5d19b | 2009-08-27 14:58:16 -0700 | [diff] [blame] | 891 | { | 
| Paul E. McKenney | 237c80c | 2009-10-15 09:26:14 -0700 | [diff] [blame] | 892 | return 0; | 
| Paul E. McKenney | dd5d19b | 2009-08-27 14:58:16 -0700 | [diff] [blame] | 893 | } | 
|  | 894 |  | 
|  | 895 | /* | 
| Paul E. McKenney | 33f7614 | 2009-08-24 09:42:01 -0700 | [diff] [blame] | 896 | * Because preemptable RCU does not exist, it never needs CPU-offline | 
|  | 897 | * processing. | 
|  | 898 | */ | 
|  | 899 | static void rcu_preempt_offline_cpu(int cpu) | 
|  | 900 | { | 
|  | 901 | } | 
|  | 902 |  | 
|  | 903 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 
|  | 904 |  | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 905 | /* | 
|  | 906 | * Because preemptable RCU does not exist, it never has any callbacks | 
|  | 907 | * to check. | 
|  | 908 | */ | 
| Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 909 | static void rcu_preempt_check_callbacks(int cpu) | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 910 | { | 
|  | 911 | } | 
|  | 912 |  | 
|  | 913 | /* | 
|  | 914 | * Because preemptable RCU does not exist, it never has any callbacks | 
|  | 915 | * to process. | 
|  | 916 | */ | 
| Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 917 | static void rcu_preempt_process_callbacks(void) | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 918 | { | 
|  | 919 | } | 
|  | 920 |  | 
|  | 921 | /* | 
|  | 922 | * In classic RCU, call_rcu() is just call_rcu_sched(). | 
|  | 923 | */ | 
|  | 924 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | 
|  | 925 | { | 
|  | 926 | call_rcu_sched(head, func); | 
|  | 927 | } | 
|  | 928 | EXPORT_SYMBOL_GPL(call_rcu); | 
|  | 929 |  | 
|  | 930 | /* | 
| Paul E. McKenney | 019129d | 2009-10-14 10:15:56 -0700 | [diff] [blame] | 931 | * Wait for an rcu-preempt grace period, but make it happen quickly. | 
|  | 932 | * But because preemptable RCU does not exist, map to rcu-sched. | 
|  | 933 | */ | 
|  | 934 | void synchronize_rcu_expedited(void) | 
|  | 935 | { | 
|  | 936 | synchronize_sched_expedited(); | 
|  | 937 | } | 
|  | 938 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | 
|  | 939 |  | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 940 | #ifdef CONFIG_HOTPLUG_CPU | 
|  | 941 |  | 
|  | 942 | /* | 
|  | 943 | * Because preemptable RCU does not exist, there is never any need to | 
|  | 944 | * report on tasks preempted in RCU read-side critical sections during | 
|  | 945 | * expedited RCU grace periods. | 
|  | 946 | */ | 
|  | 947 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | 
|  | 948 | { | 
|  | 949 | return; | 
|  | 950 | } | 
|  | 951 |  | 
|  | 952 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 
|  | 953 |  | 
| Paul E. McKenney | 019129d | 2009-10-14 10:15:56 -0700 | [diff] [blame] | 954 | /* | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 955 | * Because preemptable RCU does not exist, it never has any work to do. | 
|  | 956 | */ | 
|  | 957 | static int rcu_preempt_pending(int cpu) | 
|  | 958 | { | 
|  | 959 | return 0; | 
|  | 960 | } | 
|  | 961 |  | 
|  | 962 | /* | 
|  | 963 | * Because preemptable RCU does not exist, it never needs any CPU. | 
|  | 964 | */ | 
|  | 965 | static int rcu_preempt_needs_cpu(int cpu) | 
|  | 966 | { | 
|  | 967 | return 0; | 
|  | 968 | } | 
|  | 969 |  | 
|  | 970 | /* | 
| Paul E. McKenney | e74f4c4 | 2009-10-06 21:48:17 -0700 | [diff] [blame] | 971 | * Because preemptable RCU does not exist, rcu_barrier() is just | 
|  | 972 | * another name for rcu_barrier_sched(). | 
|  | 973 | */ | 
|  | 974 | void rcu_barrier(void) | 
|  | 975 | { | 
|  | 976 | rcu_barrier_sched(); | 
|  | 977 | } | 
|  | 978 | EXPORT_SYMBOL_GPL(rcu_barrier); | 
|  | 979 |  | 
|  | 980 | /* | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 981 | * Because preemptable RCU does not exist, there is no per-CPU | 
|  | 982 | * data to initialize. | 
|  | 983 | */ | 
|  | 984 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu) | 
|  | 985 | { | 
|  | 986 | } | 
|  | 987 |  | 
| Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 988 | /* | 
| Paul E. McKenney | e74f4c4 | 2009-10-06 21:48:17 -0700 | [diff] [blame] | 989 | * Because there is no preemptable RCU, there are no callbacks to move. | 
|  | 990 | */ | 
|  | 991 | static void rcu_preempt_send_cbs_to_orphanage(void) | 
|  | 992 | { | 
|  | 993 | } | 
|  | 994 |  | 
|  | 995 | /* | 
| Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 996 | * Because preemptable RCU does not exist, it need not be initialized. | 
|  | 997 | */ | 
|  | 998 | static void __init __rcu_init_preempt(void) | 
|  | 999 | { | 
|  | 1000 | } | 
|  | 1001 |  | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 1002 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | 
| Paul E. McKenney | 8bd93a2 | 2010-02-22 17:04:59 -0800 | [diff] [blame] | 1003 |  | 
|  | 1004 | #if !defined(CONFIG_RCU_FAST_NO_HZ) | 
|  | 1005 |  | 
|  | 1006 | /* | 
|  | 1007 | * Check to see if any future RCU-related work will need to be done | 
|  | 1008 | * by the current CPU, even if none need be done immediately, returning | 
|  | 1009 | * 1 if so.  This function is part of the RCU implementation; it is -not- | 
|  | 1010 | * an exported member of the RCU API. | 
|  | 1011 | * | 
|  | 1012 | * Because we have preemptible RCU, just check whether this CPU needs | 
|  | 1013 | * any flavor of RCU.  Do not chew up lots of CPU cycles with preemption | 
|  | 1014 | * disabled in a most-likely vain attempt to cause RCU not to need this CPU. | 
|  | 1015 | */ | 
|  | 1016 | int rcu_needs_cpu(int cpu) | 
|  | 1017 | { | 
|  | 1018 | return rcu_needs_cpu_quick_check(cpu); | 
|  | 1019 | } | 
|  | 1020 |  | 
| Paul E. McKenney | a47cd88 | 2010-02-26 16:38:56 -0800 | [diff] [blame] | 1021 | /* | 
|  | 1022 | * Check to see if we need to continue a callback-flush operations to | 
|  | 1023 | * allow the last CPU to enter dyntick-idle mode.  But fast dyntick-idle | 
|  | 1024 | * entry is not configured, so we never do need to. | 
|  | 1025 | */ | 
|  | 1026 | static void rcu_needs_cpu_flush(void) | 
|  | 1027 | { | 
|  | 1028 | } | 
|  | 1029 |  | 
| Paul E. McKenney | 8bd93a2 | 2010-02-22 17:04:59 -0800 | [diff] [blame] | 1030 | #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ | 
|  | 1031 |  | 
|  | 1032 | #define RCU_NEEDS_CPU_FLUSHES 5 | 
| Paul E. McKenney | a47cd88 | 2010-02-26 16:38:56 -0800 | [diff] [blame] | 1033 | static DEFINE_PER_CPU(int, rcu_dyntick_drain); | 
| Paul E. McKenney | 71da813 | 2010-02-26 16:38:58 -0800 | [diff] [blame] | 1034 | static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); | 
| Paul E. McKenney | 8bd93a2 | 2010-02-22 17:04:59 -0800 | [diff] [blame] | 1035 |  | 
|  | 1036 | /* | 
|  | 1037 | * Check to see if any future RCU-related work will need to be done | 
|  | 1038 | * by the current CPU, even if none need be done immediately, returning | 
|  | 1039 | * 1 if so.  This function is part of the RCU implementation; it is -not- | 
|  | 1040 | * an exported member of the RCU API. | 
|  | 1041 | * | 
|  | 1042 | * Because we are not supporting preemptible RCU, attempt to accelerate | 
|  | 1043 | * any current grace periods so that RCU no longer needs this CPU, but | 
|  | 1044 | * only if all other CPUs are already in dynticks-idle mode.  This will | 
|  | 1045 | * allow the CPU cores to be powered down immediately, as opposed to after | 
|  | 1046 | * waiting many milliseconds for grace periods to elapse. | 
| Paul E. McKenney | a47cd88 | 2010-02-26 16:38:56 -0800 | [diff] [blame] | 1047 | * | 
|  | 1048 | * Because it is not legal to invoke rcu_process_callbacks() with irqs | 
|  | 1049 | * disabled, we do one pass of force_quiescent_state(), then do a | 
|  | 1050 | * raise_softirq() to cause rcu_process_callbacks() to be invoked later. | 
|  | 1051 | * The per-cpu rcu_dyntick_drain variable controls the sequencing. | 
| Paul E. McKenney | 8bd93a2 | 2010-02-22 17:04:59 -0800 | [diff] [blame] | 1052 | */ | 
|  | 1053 | int rcu_needs_cpu(int cpu) | 
|  | 1054 | { | 
| Paul E. McKenney | a47cd88 | 2010-02-26 16:38:56 -0800 | [diff] [blame] | 1055 | int c = 0; | 
| Paul E. McKenney | 77e38ed | 2010-04-25 21:04:29 -0700 | [diff] [blame] | 1056 | int snap; | 
|  | 1057 | int snap_nmi; | 
| Paul E. McKenney | 8bd93a2 | 2010-02-22 17:04:59 -0800 | [diff] [blame] | 1058 | int thatcpu; | 
|  | 1059 |  | 
| Paul E. McKenney | 622ea68 | 2010-02-27 14:53:07 -0800 | [diff] [blame] | 1060 | /* Check for being in the holdoff period. */ | 
|  | 1061 | if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) | 
|  | 1062 | return rcu_needs_cpu_quick_check(cpu); | 
|  | 1063 |  | 
| Paul E. McKenney | 8bd93a2 | 2010-02-22 17:04:59 -0800 | [diff] [blame] | 1064 | /* Don't bother unless we are the last non-dyntick-idle CPU. */ | 
| Paul E. McKenney | 77e38ed | 2010-04-25 21:04:29 -0700 | [diff] [blame] | 1065 | for_each_online_cpu(thatcpu) { | 
|  | 1066 | if (thatcpu == cpu) | 
|  | 1067 | continue; | 
| Paul E. McKenney | d822ed1 | 2010-05-08 19:58:22 -0700 | [diff] [blame] | 1068 | snap = per_cpu(rcu_dynticks, thatcpu).dynticks; | 
|  | 1069 | snap_nmi = per_cpu(rcu_dynticks, thatcpu).dynticks_nmi; | 
| Paul E. McKenney | 77e38ed | 2010-04-25 21:04:29 -0700 | [diff] [blame] | 1070 | smp_mb(); /* Order sampling of snap with end of grace period. */ | 
|  | 1071 | if (((snap & 0x1) != 0) || ((snap_nmi & 0x1) != 0)) { | 
| Paul E. McKenney | a47cd88 | 2010-02-26 16:38:56 -0800 | [diff] [blame] | 1072 | per_cpu(rcu_dyntick_drain, cpu) = 0; | 
| Paul E. McKenney | 71da813 | 2010-02-26 16:38:58 -0800 | [diff] [blame] | 1073 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; | 
| Paul E. McKenney | 8bd93a2 | 2010-02-22 17:04:59 -0800 | [diff] [blame] | 1074 | return rcu_needs_cpu_quick_check(cpu); | 
| Paul E. McKenney | a47cd88 | 2010-02-26 16:38:56 -0800 | [diff] [blame] | 1075 | } | 
| Paul E. McKenney | 77e38ed | 2010-04-25 21:04:29 -0700 | [diff] [blame] | 1076 | } | 
| Paul E. McKenney | 8bd93a2 | 2010-02-22 17:04:59 -0800 | [diff] [blame] | 1077 |  | 
| Paul E. McKenney | a47cd88 | 2010-02-26 16:38:56 -0800 | [diff] [blame] | 1078 | /* Check and update the rcu_dyntick_drain sequencing. */ | 
|  | 1079 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { | 
|  | 1080 | /* First time through, initialize the counter. */ | 
|  | 1081 | per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES; | 
|  | 1082 | } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { | 
|  | 1083 | /* We have hit the limit, so time to give up. */ | 
| Paul E. McKenney | 71da813 | 2010-02-26 16:38:58 -0800 | [diff] [blame] | 1084 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; | 
| Paul E. McKenney | a47cd88 | 2010-02-26 16:38:56 -0800 | [diff] [blame] | 1085 | return rcu_needs_cpu_quick_check(cpu); | 
|  | 1086 | } | 
|  | 1087 |  | 
|  | 1088 | /* Do one step pushing remaining RCU callbacks through. */ | 
|  | 1089 | if (per_cpu(rcu_sched_data, cpu).nxtlist) { | 
|  | 1090 | rcu_sched_qs(cpu); | 
|  | 1091 | force_quiescent_state(&rcu_sched_state, 0); | 
|  | 1092 | c = c || per_cpu(rcu_sched_data, cpu).nxtlist; | 
|  | 1093 | } | 
|  | 1094 | if (per_cpu(rcu_bh_data, cpu).nxtlist) { | 
|  | 1095 | rcu_bh_qs(cpu); | 
|  | 1096 | force_quiescent_state(&rcu_bh_state, 0); | 
|  | 1097 | c = c || per_cpu(rcu_bh_data, cpu).nxtlist; | 
| Paul E. McKenney | 8bd93a2 | 2010-02-22 17:04:59 -0800 | [diff] [blame] | 1098 | } | 
|  | 1099 |  | 
|  | 1100 | /* If RCU callbacks are still pending, RCU still needs this CPU. */ | 
| Paul E. McKenney | 622ea68 | 2010-02-27 14:53:07 -0800 | [diff] [blame] | 1101 | if (c) | 
| Paul E. McKenney | a47cd88 | 2010-02-26 16:38:56 -0800 | [diff] [blame] | 1102 | raise_softirq(RCU_SOFTIRQ); | 
| Paul E. McKenney | 8bd93a2 | 2010-02-22 17:04:59 -0800 | [diff] [blame] | 1103 | return c; | 
|  | 1104 | } | 
|  | 1105 |  | 
| Paul E. McKenney | a47cd88 | 2010-02-26 16:38:56 -0800 | [diff] [blame] | 1106 | /* | 
|  | 1107 | * Check to see if we need to continue a callback-flush operations to | 
|  | 1108 | * allow the last CPU to enter dyntick-idle mode. | 
|  | 1109 | */ | 
|  | 1110 | static void rcu_needs_cpu_flush(void) | 
|  | 1111 | { | 
|  | 1112 | int cpu = smp_processor_id(); | 
| Paul E. McKenney | 71da813 | 2010-02-26 16:38:58 -0800 | [diff] [blame] | 1113 | unsigned long flags; | 
| Paul E. McKenney | a47cd88 | 2010-02-26 16:38:56 -0800 | [diff] [blame] | 1114 |  | 
|  | 1115 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) | 
|  | 1116 | return; | 
| Paul E. McKenney | 71da813 | 2010-02-26 16:38:58 -0800 | [diff] [blame] | 1117 | local_irq_save(flags); | 
| Paul E. McKenney | a47cd88 | 2010-02-26 16:38:56 -0800 | [diff] [blame] | 1118 | (void)rcu_needs_cpu(cpu); | 
| Paul E. McKenney | 71da813 | 2010-02-26 16:38:58 -0800 | [diff] [blame] | 1119 | local_irq_restore(flags); | 
| Paul E. McKenney | a47cd88 | 2010-02-26 16:38:56 -0800 | [diff] [blame] | 1120 | } | 
|  | 1121 |  | 
| Paul E. McKenney | 8bd93a2 | 2010-02-22 17:04:59 -0800 | [diff] [blame] | 1122 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |