| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Read-Copy Update mechanism for mutual exclusion  | 
 | 3 |  * | 
 | 4 |  * This program is free software; you can redistribute it and/or modify | 
 | 5 |  * it under the terms of the GNU General Public License as published by | 
 | 6 |  * the Free Software Foundation; either version 2 of the License, or | 
 | 7 |  * (at your option) any later version. | 
 | 8 |  * | 
 | 9 |  * This program is distributed in the hope that it will be useful, | 
 | 10 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 11 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 12 |  * GNU General Public License for more details. | 
 | 13 |  * | 
 | 14 |  * You should have received a copy of the GNU General Public License | 
 | 15 |  * along with this program; if not, write to the Free Software | 
 | 16 |  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 
 | 17 |  * | 
| Paul E. McKenney | 01c1c66 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 18 |  * Copyright IBM Corporation, 2001 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 |  * | 
 | 20 |  * Author: Dipankar Sarma <dipankar@in.ibm.com> | 
 | 21 |  *  | 
| Josh Triplett | 595182b | 2006-10-04 02:17:21 -0700 | [diff] [blame] | 22 |  * Based on the original work by Paul McKenney <paulmck@us.ibm.com> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 |  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | 
 | 24 |  * Papers: | 
 | 25 |  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | 
 | 26 |  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | 
 | 27 |  * | 
 | 28 |  * For detailed explanation of Read-Copy Update mechanism see - | 
 | 29 |  * 		http://lse.sourceforge.net/locking/rcupdate.html | 
 | 30 |  * | 
 | 31 |  */ | 
 | 32 |  | 
 | 33 | #ifndef __LINUX_RCUPDATE_H | 
 | 34 | #define __LINUX_RCUPDATE_H | 
 | 35 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include <linux/cache.h> | 
 | 37 | #include <linux/spinlock.h> | 
 | 38 | #include <linux/threads.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | #include <linux/cpumask.h> | 
 | 40 | #include <linux/seqlock.h> | 
| Peter Zijlstra | 851a67b | 2007-10-11 22:11:12 +0200 | [diff] [blame] | 41 | #include <linux/lockdep.h> | 
| Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 42 | #include <linux/completion.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 |  | 
 | 44 | /** | 
 | 45 |  * struct rcu_head - callback structure for use with RCU | 
 | 46 |  * @next: next update requests in a list | 
 | 47 |  * @func: actual update function to call after the grace period. | 
 | 48 |  */ | 
 | 49 | struct rcu_head { | 
 | 50 | 	struct rcu_head *next; | 
 | 51 | 	void (*func)(struct rcu_head *head); | 
 | 52 | }; | 
 | 53 |  | 
| Paul E. McKenney | a682604 | 2009-02-25 18:03:42 -0800 | [diff] [blame] | 54 | /* Internal to kernel, but needed by rcupreempt.h. */ | 
 | 55 | extern int rcu_scheduler_active; | 
 | 56 |  | 
| Paul E. McKenney | 64db4cf | 2008-12-18 21:55:32 +0100 | [diff] [blame] | 57 | #if defined(CONFIG_CLASSIC_RCU) | 
| Paul E. McKenney | 01c1c66 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 58 | #include <linux/rcuclassic.h> | 
| Paul E. McKenney | 64db4cf | 2008-12-18 21:55:32 +0100 | [diff] [blame] | 59 | #elif defined(CONFIG_TREE_RCU) | 
 | 60 | #include <linux/rcutree.h> | 
 | 61 | #elif defined(CONFIG_PREEMPT_RCU) | 
| Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 62 | #include <linux/rcupreempt.h> | 
| Paul E. McKenney | 64db4cf | 2008-12-18 21:55:32 +0100 | [diff] [blame] | 63 | #else | 
 | 64 | #error "Unknown RCU implementation specified to kernel configuration" | 
 | 65 | #endif /* #else #if defined(CONFIG_CLASSIC_RCU) */ | 
| Paul E. McKenney | 01c1c66 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 66 |  | 
| Dipankar Sarma | 8b6490e | 2005-09-09 13:04:07 -0700 | [diff] [blame] | 67 | #define RCU_HEAD_INIT 	{ .next = NULL, .func = NULL } | 
 | 68 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | #define INIT_RCU_HEAD(ptr) do { \ | 
 | 70 |        (ptr)->next = NULL; (ptr)->func = NULL; \ | 
 | 71 | } while (0) | 
 | 72 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | /** | 
 | 74 |  * rcu_read_lock - mark the beginning of an RCU read-side critical section. | 
 | 75 |  * | 
| Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 76 |  * When synchronize_rcu() is invoked on one CPU while other CPUs | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 |  * are within RCU read-side critical sections, then the | 
| Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 78 |  * synchronize_rcu() is guaranteed to block until after all the other | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 |  * CPUs exit their critical sections.  Similarly, if call_rcu() is invoked | 
 | 80 |  * on one CPU while other CPUs are within RCU read-side critical | 
 | 81 |  * sections, invocation of the corresponding RCU callback is deferred | 
 | 82 |  * until after the all the other CPUs exit their critical sections. | 
 | 83 |  * | 
 | 84 |  * Note, however, that RCU callbacks are permitted to run concurrently | 
 | 85 |  * with RCU read-side critical sections.  One way that this can happen | 
 | 86 |  * is via the following sequence of events: (1) CPU 0 enters an RCU | 
 | 87 |  * read-side critical section, (2) CPU 1 invokes call_rcu() to register | 
 | 88 |  * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, | 
 | 89 |  * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU | 
 | 90 |  * callback is invoked.  This is legal, because the RCU read-side critical | 
 | 91 |  * section that was running concurrently with the call_rcu() (and which | 
 | 92 |  * therefore might be referencing something that the corresponding RCU | 
 | 93 |  * callback would free up) has completed before the corresponding | 
 | 94 |  * RCU callback is invoked. | 
 | 95 |  * | 
 | 96 |  * RCU read-side critical sections may be nested.  Any deferred actions | 
 | 97 |  * will be deferred until the outermost RCU read-side critical section | 
 | 98 |  * completes. | 
 | 99 |  * | 
 | 100 |  * It is illegal to block while in an RCU read-side critical section. | 
 | 101 |  */ | 
| Paul E. McKenney | 01c1c66 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 102 | #define rcu_read_lock() __rcu_read_lock() | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 |  | 
 | 104 | /** | 
 | 105 |  * rcu_read_unlock - marks the end of an RCU read-side critical section. | 
 | 106 |  * | 
 | 107 |  * See rcu_read_lock() for more information. | 
 | 108 |  */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 |  | 
 | 110 | /* | 
 | 111 |  * So where is rcu_write_lock()?  It does not exist, as there is no | 
 | 112 |  * way for writers to lock out RCU readers.  This is a feature, not | 
 | 113 |  * a bug -- this property is what provides RCU's performance benefits. | 
 | 114 |  * Of course, writers must coordinate with each other.  The normal | 
 | 115 |  * spinlock primitives work well for this, but any other technique may be | 
 | 116 |  * used as well.  RCU does not care how the writers keep out of each | 
 | 117 |  * others' way, as long as they do so. | 
 | 118 |  */ | 
| Paul E. McKenney | 01c1c66 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 119 | #define rcu_read_unlock() __rcu_read_unlock() | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 |  | 
 | 121 | /** | 
 | 122 |  * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section | 
 | 123 |  * | 
 | 124 |  * This is equivalent of rcu_read_lock(), but to be used when updates | 
 | 125 |  * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks | 
 | 126 |  * consider completion of a softirq handler to be a quiescent state, | 
 | 127 |  * a process in RCU read-side critical section must be protected by | 
 | 128 |  * disabling softirqs. Read-side critical sections in interrupt context | 
 | 129 |  * can use just rcu_read_lock(). | 
 | 130 |  * | 
 | 131 |  */ | 
| Paul E. McKenney | 01c1c66 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 132 | #define rcu_read_lock_bh() __rcu_read_lock_bh() | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 |  | 
 | 134 | /* | 
 | 135 |  * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section | 
 | 136 |  * | 
 | 137 |  * See rcu_read_lock_bh() for more information. | 
 | 138 |  */ | 
| Paul E. McKenney | 01c1c66 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 139 | #define rcu_read_unlock_bh() __rcu_read_unlock_bh() | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 |  | 
 | 141 | /** | 
| Mathieu Desnoyers | 1c50b72 | 2008-09-29 11:06:46 -0400 | [diff] [blame] | 142 |  * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section | 
 | 143 |  * | 
 | 144 |  * Should be used with either | 
 | 145 |  * - synchronize_sched() | 
 | 146 |  * or | 
 | 147 |  * - call_rcu_sched() and rcu_barrier_sched() | 
 | 148 |  * on the write-side to insure proper synchronization. | 
 | 149 |  */ | 
 | 150 | #define rcu_read_lock_sched() preempt_disable() | 
| Mathieu Desnoyers | 954e100 | 2008-11-14 17:47:34 -0500 | [diff] [blame] | 151 | #define rcu_read_lock_sched_notrace() preempt_disable_notrace() | 
| Mathieu Desnoyers | 1c50b72 | 2008-09-29 11:06:46 -0400 | [diff] [blame] | 152 |  | 
 | 153 | /* | 
 | 154 |  * rcu_read_unlock_sched - marks the end of a RCU-classic critical section | 
 | 155 |  * | 
 | 156 |  * See rcu_read_lock_sched for more information. | 
 | 157 |  */ | 
 | 158 | #define rcu_read_unlock_sched() preempt_enable() | 
| Mathieu Desnoyers | 954e100 | 2008-11-14 17:47:34 -0500 | [diff] [blame] | 159 | #define rcu_read_unlock_sched_notrace() preempt_enable_notrace() | 
| Mathieu Desnoyers | 1c50b72 | 2008-09-29 11:06:46 -0400 | [diff] [blame] | 160 |  | 
 | 161 |  | 
 | 162 |  | 
 | 163 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 |  * rcu_dereference - fetch an RCU-protected pointer in an | 
 | 165 |  * RCU read-side critical section.  This pointer may later | 
 | 166 |  * be safely dereferenced. | 
 | 167 |  * | 
 | 168 |  * Inserts memory barriers on architectures that require them | 
 | 169 |  * (currently only the Alpha), and, more importantly, documents | 
 | 170 |  * exactly which pointers are protected by RCU. | 
 | 171 |  */ | 
 | 172 |  | 
 | 173 | #define rcu_dereference(p)     ({ \ | 
| Paul E. McKenney | 97b4303 | 2007-10-16 23:26:04 -0700 | [diff] [blame] | 174 | 				typeof(p) _________p1 = ACCESS_ONCE(p); \ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | 				smp_read_barrier_depends(); \ | 
 | 176 | 				(_________p1); \ | 
 | 177 | 				}) | 
 | 178 |  | 
 | 179 | /** | 
 | 180 |  * rcu_assign_pointer - assign (publicize) a pointer to a newly | 
 | 181 |  * initialized structure that will be dereferenced by RCU read-side | 
 | 182 |  * critical sections.  Returns the value assigned. | 
 | 183 |  * | 
 | 184 |  * Inserts memory barriers on architectures that require them | 
 | 185 |  * (pretty much all of them other than x86), and also prevents | 
 | 186 |  * the compiler from reordering the code that initializes the | 
 | 187 |  * structure after the pointer assignment.  More importantly, this | 
 | 188 |  * call documents which pointers will be dereferenced by RCU read-side | 
 | 189 |  * code. | 
 | 190 |  */ | 
 | 191 |  | 
| Paul E. McKenney | d99c4f6 | 2008-02-06 01:37:25 -0800 | [diff] [blame] | 192 | #define rcu_assign_pointer(p, v) \ | 
 | 193 | 	({ \ | 
 | 194 | 		if (!__builtin_constant_p(v) || \ | 
 | 195 | 		    ((v) != NULL)) \ | 
 | 196 | 			smp_wmb(); \ | 
 | 197 | 		(p) = (v); \ | 
 | 198 | 	}) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 |  | 
| Paul E. McKenney | 4446a36 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 200 | /* Infrastructure to implement the synchronize_() primitives. */ | 
 | 201 |  | 
 | 202 | struct rcu_synchronize { | 
 | 203 | 	struct rcu_head head; | 
 | 204 | 	struct completion completion; | 
 | 205 | }; | 
 | 206 |  | 
 | 207 | extern void wakeme_after_rcu(struct rcu_head  *head); | 
 | 208 |  | 
| Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 209 | /** | 
 | 210 |  * synchronize_sched - block until all CPUs have exited any non-preemptive | 
 | 211 |  * kernel code sequences. | 
 | 212 |  * | 
 | 213 |  * This means that all preempt_disable code sequences, including NMI and | 
 | 214 |  * hardware-interrupt handlers, in progress on entry will have completed | 
 | 215 |  * before this primitive returns.  However, this does not guarantee that | 
| Paul E. McKenney | bb3b9cf | 2006-02-03 03:04:38 -0800 | [diff] [blame] | 216 |  * softirq handlers will have completed, since in some kernels, these | 
 | 217 |  * handlers can run in process context, and can block. | 
| Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 218 |  * | 
| Paul E. McKenney | d83015b | 2006-06-23 02:05:51 -0700 | [diff] [blame] | 219 |  * This primitive provides the guarantees made by the (now removed) | 
| Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 220 |  * synchronize_kernel() API.  In contrast, synchronize_rcu() only | 
 | 221 |  * guarantees that rcu_read_lock() sections will have completed. | 
| Paul E. McKenney | bb3b9cf | 2006-02-03 03:04:38 -0800 | [diff] [blame] | 222 |  * In "classic RCU", these two guarantees happen to be one and | 
 | 223 |  * the same, but can differ in realtime RCU implementations. | 
| Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 224 |  */ | 
| Paul E. McKenney | 01c1c66 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 225 | #define synchronize_sched() __synchronize_sched() | 
| Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 226 |  | 
| Paul E. McKenney | 01c1c66 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 227 | /** | 
 | 228 |  * call_rcu - Queue an RCU callback for invocation after a grace period. | 
 | 229 |  * @head: structure to be used for queueing the RCU updates. | 
 | 230 |  * @func: actual update function to be invoked after the grace period | 
 | 231 |  * | 
 | 232 |  * The update function will be invoked some time after a full grace | 
 | 233 |  * period elapses, in other words after all currently executing RCU | 
 | 234 |  * read-side critical sections have completed.  RCU read-side critical | 
 | 235 |  * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | 
 | 236 |  * and may be nested. | 
 | 237 |  */ | 
 | 238 | extern void call_rcu(struct rcu_head *head, | 
 | 239 | 			      void (*func)(struct rcu_head *head)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 |  | 
| Paul E. McKenney | 01c1c66 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 241 | /** | 
 | 242 |  * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. | 
 | 243 |  * @head: structure to be used for queueing the RCU updates. | 
 | 244 |  * @func: actual update function to be invoked after the grace period | 
 | 245 |  * | 
 | 246 |  * The update function will be invoked some time after a full grace | 
 | 247 |  * period elapses, in other words after all currently executing RCU | 
 | 248 |  * read-side critical sections have completed. call_rcu_bh() assumes | 
 | 249 |  * that the read-side critical sections end on completion of a softirq | 
 | 250 |  * handler. This means that read-side critical sections in process | 
 | 251 |  * context must not be interrupted by softirqs. This interface is to be | 
 | 252 |  * used when most of the read-side critical sections are in softirq context. | 
 | 253 |  * RCU read-side critical sections are delimited by : | 
 | 254 |  *  - rcu_read_lock() and  rcu_read_unlock(), if in interrupt context. | 
 | 255 |  *  OR | 
 | 256 |  *  - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. | 
 | 257 |  *  These may be nested. | 
 | 258 |  */ | 
 | 259 | extern void call_rcu_bh(struct rcu_head *head, | 
 | 260 | 			void (*func)(struct rcu_head *head)); | 
 | 261 |  | 
 | 262 | /* Exported common interfaces */ | 
| Paul E. McKenney | 9b06e81 | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 263 | extern void synchronize_rcu(void); | 
| Dipankar Sarma | ab4720e | 2005-12-12 00:37:05 -0800 | [diff] [blame] | 264 | extern void rcu_barrier(void); | 
| Paul E. McKenney | 70f12f8 | 2008-05-12 21:21:05 +0200 | [diff] [blame] | 265 | extern void rcu_barrier_bh(void); | 
 | 266 | extern void rcu_barrier_sched(void); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 |  | 
| Paul E. McKenney | 01c1c66 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 268 | /* Internal to kernel */ | 
 | 269 | extern void rcu_init(void); | 
| Paul E. McKenney | a682604 | 2009-02-25 18:03:42 -0800 | [diff] [blame] | 270 | extern void rcu_scheduler_starting(void); | 
| Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 271 | extern int rcu_needs_cpu(int cpu); | 
| Paul E. McKenney | 01c1c66 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 272 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | #endif /* __LINUX_RCUPDATE_H */ |