| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Read-Copy Update mechanism for mutual exclusion (tree-based version) | 
 | 3 |  * Internal non-public definitions. | 
 | 4 |  * | 
 | 5 |  * This program is free software; you can redistribute it and/or modify | 
 | 6 |  * it under the terms of the GNU General Public License as published by | 
 | 7 |  * the Free Software Foundation; either version 2 of the License, or | 
 | 8 |  * (at your option) any later version. | 
 | 9 |  * | 
 | 10 |  * This program is distributed in the hope that it will be useful, | 
 | 11 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 12 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 13 |  * GNU General Public License for more details. | 
 | 14 |  * | 
 | 15 |  * You should have received a copy of the GNU General Public License | 
 | 16 |  * along with this program; if not, write to the Free Software | 
 | 17 |  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 
 | 18 |  * | 
 | 19 |  * Copyright IBM Corporation, 2008 | 
 | 20 |  * | 
 | 21 |  * Author: Ingo Molnar <mingo@elte.hu> | 
 | 22 |  *	   Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 
 | 23 |  */ | 
 | 24 |  | 
 | 25 | #include <linux/cache.h> | 
 | 26 | #include <linux/spinlock.h> | 
 | 27 | #include <linux/threads.h> | 
 | 28 | #include <linux/cpumask.h> | 
 | 29 | #include <linux/seqlock.h> | 
 | 30 |  | 
 | 31 | /* | 
 | 32 |  * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT. | 
 | 33 |  * In theory, it should be possible to add more levels straightforwardly. | 
| Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 34 |  * In practice, this did work well going from three levels to four. | 
 | 35 |  * Of course, your mileage may vary. | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 36 |  */ | 
| Paul E. McKenney | cf244dc | 2009-12-02 12:10:14 -0800 | [diff] [blame] | 37 | #define MAX_RCU_LVLS 4 | 
| Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 38 | #if CONFIG_RCU_FANOUT > 16 | 
 | 39 | #define RCU_FANOUT_LEAF       16 | 
 | 40 | #else /* #if CONFIG_RCU_FANOUT > 16 */ | 
 | 41 | #define RCU_FANOUT_LEAF       (CONFIG_RCU_FANOUT) | 
 | 42 | #endif /* #else #if CONFIG_RCU_FANOUT > 16 */ | 
 | 43 | #define RCU_FANOUT_1	      (RCU_FANOUT_LEAF) | 
 | 44 | #define RCU_FANOUT_2	      (RCU_FANOUT_1 * CONFIG_RCU_FANOUT) | 
 | 45 | #define RCU_FANOUT_3	      (RCU_FANOUT_2 * CONFIG_RCU_FANOUT) | 
 | 46 | #define RCU_FANOUT_4	      (RCU_FANOUT_3 * CONFIG_RCU_FANOUT) | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 47 |  | 
| Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 48 | #if NR_CPUS <= RCU_FANOUT_1 | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 49 | #  define NUM_RCU_LVLS	      1 | 
 | 50 | #  define NUM_RCU_LVL_0	      1 | 
 | 51 | #  define NUM_RCU_LVL_1	      (NR_CPUS) | 
 | 52 | #  define NUM_RCU_LVL_2	      0 | 
 | 53 | #  define NUM_RCU_LVL_3	      0 | 
| Paul E. McKenney | cf244dc | 2009-12-02 12:10:14 -0800 | [diff] [blame] | 54 | #  define NUM_RCU_LVL_4	      0 | 
| Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 55 | #elif NR_CPUS <= RCU_FANOUT_2 | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 56 | #  define NUM_RCU_LVLS	      2 | 
 | 57 | #  define NUM_RCU_LVL_0	      1 | 
| Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 58 | #  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 59 | #  define NUM_RCU_LVL_2	      (NR_CPUS) | 
 | 60 | #  define NUM_RCU_LVL_3	      0 | 
| Paul E. McKenney | cf244dc | 2009-12-02 12:10:14 -0800 | [diff] [blame] | 61 | #  define NUM_RCU_LVL_4	      0 | 
| Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 62 | #elif NR_CPUS <= RCU_FANOUT_3 | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 63 | #  define NUM_RCU_LVLS	      3 | 
 | 64 | #  define NUM_RCU_LVL_0	      1 | 
| Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 65 | #  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) | 
 | 66 | #  define NUM_RCU_LVL_2	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) | 
 | 67 | #  define NUM_RCU_LVL_3	      (NR_CPUS) | 
| Paul E. McKenney | cf244dc | 2009-12-02 12:10:14 -0800 | [diff] [blame] | 68 | #  define NUM_RCU_LVL_4	      0 | 
| Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 69 | #elif NR_CPUS <= RCU_FANOUT_4 | 
| Paul E. McKenney | cf244dc | 2009-12-02 12:10:14 -0800 | [diff] [blame] | 70 | #  define NUM_RCU_LVLS	      4 | 
 | 71 | #  define NUM_RCU_LVL_0	      1 | 
| Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 72 | #  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3) | 
 | 73 | #  define NUM_RCU_LVL_2	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) | 
 | 74 | #  define NUM_RCU_LVL_3	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) | 
 | 75 | #  define NUM_RCU_LVL_4	      (NR_CPUS) | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 76 | #else | 
 | 77 | # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" | 
| Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 78 | #endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 79 |  | 
| Paul E. McKenney | cf244dc | 2009-12-02 12:10:14 -0800 | [diff] [blame] | 80 | #define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4) | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 81 | #define NUM_RCU_NODES (RCU_SUM - NR_CPUS) | 
 | 82 |  | 
 | 83 | /* | 
 | 84 |  * Dynticks per-CPU state. | 
 | 85 |  */ | 
 | 86 | struct rcu_dynticks { | 
| Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 87 | 	long long dynticks_nesting; /* Track irq/process nesting level. */ | 
 | 88 | 				    /* Process level is worth LLONG_MAX/2. */ | 
 | 89 | 	int dynticks_nmi_nesting;   /* Track NMI nesting level. */ | 
 | 90 | 	atomic_t dynticks;	    /* Even value for idle, else odd. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 91 | }; | 
 | 92 |  | 
| Paul E. McKenney | d71df90 | 2011-03-29 17:48:28 -0700 | [diff] [blame] | 93 | /* RCU's kthread states for tracing. */ | 
 | 94 | #define RCU_KTHREAD_STOPPED  0 | 
 | 95 | #define RCU_KTHREAD_RUNNING  1 | 
 | 96 | #define RCU_KTHREAD_WAITING  2 | 
| Paul E. McKenney | 15ba0ba | 2011-04-06 16:01:16 -0700 | [diff] [blame] | 97 | #define RCU_KTHREAD_OFFCPU   3 | 
 | 98 | #define RCU_KTHREAD_YIELDING 4 | 
 | 99 | #define RCU_KTHREAD_MAX      4 | 
| Paul E. McKenney | d71df90 | 2011-03-29 17:48:28 -0700 | [diff] [blame] | 100 |  | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 101 | /* | 
 | 102 |  * Definition for node within the RCU grace-period-detection hierarchy. | 
 | 103 |  */ | 
 | 104 | struct rcu_node { | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 105 | 	raw_spinlock_t lock;	/* Root rcu_node's lock protects some */ | 
| Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 106 | 				/*  rcu_state fields as well as following. */ | 
| Paul E. McKenney | 20133cf | 2010-02-22 17:05:01 -0800 | [diff] [blame] | 107 | 	unsigned long gpnum;	/* Current grace period for this node. */ | 
| Paul E. McKenney | 8684896 | 2009-08-27 15:00:12 -0700 | [diff] [blame] | 108 | 				/*  This will either be equal to or one */ | 
 | 109 | 				/*  behind the root rcu_node's gpnum. */ | 
| Paul E. McKenney | 20133cf | 2010-02-22 17:05:01 -0800 | [diff] [blame] | 110 | 	unsigned long completed; /* Last GP completed for this node. */ | 
| Paul E. McKenney | d09b62d | 2009-11-02 13:52:28 -0800 | [diff] [blame] | 111 | 				/*  This will either be equal to or one */ | 
 | 112 | 				/*  behind the root rcu_node's gpnum. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 113 | 	unsigned long qsmask;	/* CPUs or groups that need to switch in */ | 
 | 114 | 				/*  order for current grace period to proceed.*/ | 
| Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 115 | 				/*  In leaf rcu_node, each bit corresponds to */ | 
 | 116 | 				/*  an rcu_data structure, otherwise, each */ | 
 | 117 | 				/*  bit corresponds to a child rcu_node */ | 
 | 118 | 				/*  structure. */ | 
| Paul E. McKenney | 12f5f52 | 2010-11-29 21:56:39 -0800 | [diff] [blame] | 119 | 	unsigned long expmask;	/* Groups that have ->blkd_tasks */ | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 120 | 				/*  elements that need to drain to allow the */ | 
 | 121 | 				/*  current expedited grace period to */ | 
 | 122 | 				/*  complete (only for TREE_PREEMPT_RCU). */ | 
| Paul E. McKenney | 8826f3b | 2011-05-11 05:41:41 -0700 | [diff] [blame] | 123 | 	atomic_t wakemask;	/* CPUs whose kthread needs to be awakened. */ | 
 | 124 | 				/*  Since this has meaning only for leaf */ | 
 | 125 | 				/*  rcu_node structures, 32 bits suffices. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 126 | 	unsigned long qsmaskinit; | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 127 | 				/* Per-GP initial value for qsmask & expmask. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 128 | 	unsigned long grpmask;	/* Mask to apply to parent qsmask. */ | 
| Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 129 | 				/*  Only one bit will be set in this mask. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 130 | 	int	grplo;		/* lowest-numbered CPU or group here. */ | 
 | 131 | 	int	grphi;		/* highest-numbered CPU or group here. */ | 
 | 132 | 	u8	grpnum;		/* CPU/group number for next level up. */ | 
 | 133 | 	u8	level;		/* root is at level 0. */ | 
 | 134 | 	struct rcu_node *parent; | 
| Paul E. McKenney | 12f5f52 | 2010-11-29 21:56:39 -0800 | [diff] [blame] | 135 | 	struct list_head blkd_tasks; | 
 | 136 | 				/* Tasks blocked in RCU read-side critical */ | 
 | 137 | 				/*  section.  Tasks are placed at the head */ | 
 | 138 | 				/*  of this list and age towards the tail. */ | 
 | 139 | 	struct list_head *gp_tasks; | 
 | 140 | 				/* Pointer to the first task blocking the */ | 
 | 141 | 				/*  current grace period, or NULL if there */ | 
 | 142 | 				/*  is no such task. */ | 
 | 143 | 	struct list_head *exp_tasks; | 
 | 144 | 				/* Pointer to the first task blocking the */ | 
 | 145 | 				/*  current expedited grace period, or NULL */ | 
 | 146 | 				/*  if there is no such task.  If there */ | 
 | 147 | 				/*  is no current expedited grace period, */ | 
 | 148 | 				/*  then there can cannot be any such task. */ | 
| Paul E. McKenney | 27f4d28 | 2011-02-07 12:47:15 -0800 | [diff] [blame] | 149 | #ifdef CONFIG_RCU_BOOST | 
 | 150 | 	struct list_head *boost_tasks; | 
 | 151 | 				/* Pointer to first task that needs to be */ | 
 | 152 | 				/*  priority boosted, or NULL if no priority */ | 
 | 153 | 				/*  boosting is needed for this rcu_node */ | 
 | 154 | 				/*  structure.  If there are no tasks */ | 
 | 155 | 				/*  queued on this rcu_node structure that */ | 
 | 156 | 				/*  are blocking the current grace period, */ | 
 | 157 | 				/*  there can be no such task. */ | 
 | 158 | 	unsigned long boost_time; | 
 | 159 | 				/* When to start boosting (jiffies). */ | 
 | 160 | 	struct task_struct *boost_kthread_task; | 
 | 161 | 				/* kthread that takes care of priority */ | 
 | 162 | 				/*  boosting for this rcu_node structure. */ | 
| Paul E. McKenney | d71df90 | 2011-03-29 17:48:28 -0700 | [diff] [blame] | 163 | 	unsigned int boost_kthread_status; | 
 | 164 | 				/* State of boost_kthread_task for tracing. */ | 
| Paul E. McKenney | 0ea1f2e | 2011-02-22 13:42:43 -0800 | [diff] [blame] | 165 | 	unsigned long n_tasks_boosted; | 
 | 166 | 				/* Total number of tasks boosted. */ | 
 | 167 | 	unsigned long n_exp_boosts; | 
 | 168 | 				/* Number of tasks boosted for expedited GP. */ | 
 | 169 | 	unsigned long n_normal_boosts; | 
 | 170 | 				/* Number of tasks boosted for normal GP. */ | 
 | 171 | 	unsigned long n_balk_blkd_tasks; | 
 | 172 | 				/* Refused to boost: no blocked tasks. */ | 
 | 173 | 	unsigned long n_balk_exp_gp_tasks; | 
 | 174 | 				/* Refused to boost: nothing blocking GP. */ | 
 | 175 | 	unsigned long n_balk_boost_tasks; | 
 | 176 | 				/* Refused to boost: already boosting. */ | 
 | 177 | 	unsigned long n_balk_notblocked; | 
 | 178 | 				/* Refused to boost: RCU RS CS still running. */ | 
 | 179 | 	unsigned long n_balk_notyet; | 
 | 180 | 				/* Refused to boost: not yet time. */ | 
 | 181 | 	unsigned long n_balk_nos; | 
 | 182 | 				/* Refused to boost: not sure why, though. */ | 
 | 183 | 				/*  This can happen due to race conditions. */ | 
| Paul E. McKenney | 27f4d28 | 2011-02-07 12:47:15 -0800 | [diff] [blame] | 184 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 
| Paul E. McKenney | a26ac24 | 2011-01-12 14:10:23 -0800 | [diff] [blame] | 185 | 	struct task_struct *node_kthread_task; | 
 | 186 | 				/* kthread that takes care of this rcu_node */ | 
 | 187 | 				/*  structure, for example, awakening the */ | 
 | 188 | 				/*  per-CPU kthreads as needed. */ | 
| Paul E. McKenney | d71df90 | 2011-03-29 17:48:28 -0700 | [diff] [blame] | 189 | 	unsigned int node_kthread_status; | 
 | 190 | 				/* State of node_kthread_task for tracing. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 191 | } ____cacheline_internodealigned_in_smp; | 
 | 192 |  | 
| Paul E. McKenney | a0b6c9a | 2009-09-28 07:46:33 -0700 | [diff] [blame] | 193 | /* | 
 | 194 |  * Do a full breadth-first scan of the rcu_node structures for the | 
 | 195 |  * specified rcu_state structure. | 
 | 196 |  */ | 
 | 197 | #define rcu_for_each_node_breadth_first(rsp, rnp) \ | 
 | 198 | 	for ((rnp) = &(rsp)->node[0]; \ | 
 | 199 | 	     (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) | 
 | 200 |  | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 201 | /* | 
 | 202 |  * Do a breadth-first scan of the non-leaf rcu_node structures for the | 
 | 203 |  * specified rcu_state structure.  Note that if there is a singleton | 
 | 204 |  * rcu_node tree with but one rcu_node structure, this loop is a no-op. | 
 | 205 |  */ | 
 | 206 | #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \ | 
 | 207 | 	for ((rnp) = &(rsp)->node[0]; \ | 
 | 208 | 	     (rnp) < (rsp)->level[NUM_RCU_LVLS - 1]; (rnp)++) | 
 | 209 |  | 
 | 210 | /* | 
 | 211 |  * Scan the leaves of the rcu_node hierarchy for the specified rcu_state | 
 | 212 |  * structure.  Note that if there is a singleton rcu_node tree with but | 
 | 213 |  * one rcu_node structure, this loop -will- visit the rcu_node structure. | 
 | 214 |  * It is still a leaf node, even if it is also the root node. | 
 | 215 |  */ | 
| Paul E. McKenney | a0b6c9a | 2009-09-28 07:46:33 -0700 | [diff] [blame] | 216 | #define rcu_for_each_leaf_node(rsp, rnp) \ | 
 | 217 | 	for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \ | 
 | 218 | 	     (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) | 
 | 219 |  | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 220 | /* Index values for nxttail array in struct rcu_data. */ | 
 | 221 | #define RCU_DONE_TAIL		0	/* Also RCU_WAIT head. */ | 
 | 222 | #define RCU_WAIT_TAIL		1	/* Also RCU_NEXT_READY head. */ | 
 | 223 | #define RCU_NEXT_READY_TAIL	2	/* Also RCU_NEXT head. */ | 
 | 224 | #define RCU_NEXT_TAIL		3 | 
 | 225 | #define RCU_NEXT_SIZE		4 | 
 | 226 |  | 
 | 227 | /* Per-CPU data for read-copy update. */ | 
 | 228 | struct rcu_data { | 
 | 229 | 	/* 1) quiescent-state and grace-period handling : */ | 
| Paul E. McKenney | 20133cf | 2010-02-22 17:05:01 -0800 | [diff] [blame] | 230 | 	unsigned long	completed;	/* Track rsp->completed gp number */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 231 | 					/*  in order to detect GP end. */ | 
| Paul E. McKenney | 20133cf | 2010-02-22 17:05:01 -0800 | [diff] [blame] | 232 | 	unsigned long	gpnum;		/* Highest gp number that this CPU */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 233 | 					/*  is aware of having started. */ | 
| Paul E. McKenney | e4cc1f2 | 2011-06-27 00:17:43 -0700 | [diff] [blame] | 234 | 	unsigned long	passed_quiesce_gpnum; | 
 | 235 | 					/* gpnum at time of quiescent state. */ | 
 | 236 | 	bool		passed_quiesce;	/* User-mode/idle loop etc. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 237 | 	bool		qs_pending;	/* Core waits for quiesc state. */ | 
 | 238 | 	bool		beenonline;	/* CPU online at least once. */ | 
| Paul E. McKenney | 6cc6879 | 2011-03-02 13:15:15 -0800 | [diff] [blame] | 239 | 	bool		preemptible;	/* Preemptible RCU? */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 240 | 	struct rcu_node *mynode;	/* This CPU's leaf of hierarchy */ | 
 | 241 | 	unsigned long grpmask;		/* Mask to apply to leaf qsmask. */ | 
 | 242 |  | 
 | 243 | 	/* 2) batch handling */ | 
 | 244 | 	/* | 
 | 245 | 	 * If nxtlist is not NULL, it is partitioned as follows. | 
 | 246 | 	 * Any of the partitions might be empty, in which case the | 
 | 247 | 	 * pointer to that partition will be equal to the pointer for | 
 | 248 | 	 * the following partition.  When the list is empty, all of | 
| Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 249 | 	 * the nxttail elements point to the ->nxtlist pointer itself, | 
 | 250 | 	 * which in that case is NULL. | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 251 | 	 * | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 252 | 	 * [nxtlist, *nxttail[RCU_DONE_TAIL]): | 
 | 253 | 	 *	Entries that batch # <= ->completed | 
 | 254 | 	 *	The grace period for these entries has completed, and | 
 | 255 | 	 *	the other grace-period-completed entries may be moved | 
 | 256 | 	 *	here temporarily in rcu_process_callbacks(). | 
| Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 257 | 	 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]): | 
 | 258 | 	 *	Entries that batch # <= ->completed - 1: waiting for current GP | 
 | 259 | 	 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]): | 
 | 260 | 	 *	Entries known to have arrived before current GP ended | 
 | 261 | 	 * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]): | 
 | 262 | 	 *	Entries that might have arrived after current GP ended | 
 | 263 | 	 *	Note that the value of *nxttail[RCU_NEXT_TAIL] will | 
 | 264 | 	 *	always be NULL, as this is the end of the list. | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 265 | 	 */ | 
 | 266 | 	struct rcu_head *nxtlist; | 
 | 267 | 	struct rcu_head **nxttail[RCU_NEXT_SIZE]; | 
| Paul E. McKenney | a71fca5 | 2009-09-18 10:28:19 -0700 | [diff] [blame] | 268 | 	long		qlen;		/* # of queued callbacks */ | 
| Paul E. McKenney | 37c72e5 | 2009-10-14 10:15:55 -0700 | [diff] [blame] | 269 | 	long		qlen_last_fqs_check; | 
 | 270 | 					/* qlen at last check for QS forcing */ | 
| Paul E. McKenney | 269dcc1 | 2010-09-07 14:23:09 -0700 | [diff] [blame] | 271 | 	unsigned long	n_cbs_invoked;	/* count of RCU cbs invoked. */ | 
| Lai Jiangshan | 29494be | 2010-10-20 14:13:06 +0800 | [diff] [blame] | 272 | 	unsigned long   n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */ | 
 | 273 | 	unsigned long   n_cbs_adopted;  /* RCU cbs adopted from dying CPU */ | 
| Paul E. McKenney | 37c72e5 | 2009-10-14 10:15:55 -0700 | [diff] [blame] | 274 | 	unsigned long	n_force_qs_snap; | 
 | 275 | 					/* did other CPU force QS recently? */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 276 | 	long		blimit;		/* Upper limit on a processed batch */ | 
 | 277 |  | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 278 | 	/* 3) dynticks interface. */ | 
 | 279 | 	struct rcu_dynticks *dynticks;	/* Shared per-CPU dynticks state. */ | 
 | 280 | 	int dynticks_snap;		/* Per-GP tracking for dynticks. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 281 |  | 
 | 282 | 	/* 4) reasons this CPU needed to be kicked by force_quiescent_state */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 283 | 	unsigned long dynticks_fqs;	/* Kicked due to dynticks idle. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 284 | 	unsigned long offline_fqs;	/* Kicked due to being offline. */ | 
 | 285 | 	unsigned long resched_ipi;	/* Sent a resched IPI. */ | 
 | 286 |  | 
 | 287 | 	/* 5) __rcu_pending() statistics. */ | 
| Paul E. McKenney | 20133cf | 2010-02-22 17:05:01 -0800 | [diff] [blame] | 288 | 	unsigned long n_rcu_pending;	/* rcu_pending() calls since boot. */ | 
 | 289 | 	unsigned long n_rp_qs_pending; | 
| Paul E. McKenney | d21670a | 2010-04-14 17:39:26 -0700 | [diff] [blame] | 290 | 	unsigned long n_rp_report_qs; | 
| Paul E. McKenney | 20133cf | 2010-02-22 17:05:01 -0800 | [diff] [blame] | 291 | 	unsigned long n_rp_cb_ready; | 
 | 292 | 	unsigned long n_rp_cpu_needs_gp; | 
 | 293 | 	unsigned long n_rp_gp_completed; | 
 | 294 | 	unsigned long n_rp_gp_started; | 
 | 295 | 	unsigned long n_rp_need_fqs; | 
 | 296 | 	unsigned long n_rp_need_nothing; | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 297 |  | 
 | 298 | 	int cpu; | 
| Paul E. McKenney | d4c08f2 | 2011-06-25 06:36:56 -0700 | [diff] [blame] | 299 | 	struct rcu_state *rsp; | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 300 | }; | 
 | 301 |  | 
| Paul E. McKenney | af446b7 | 2011-09-10 21:54:08 -0700 | [diff] [blame] | 302 | /* Values for fqs_state field in struct rcu_state. */ | 
| Paul E. McKenney | 83f5b01 | 2009-10-28 08:14:49 -0700 | [diff] [blame] | 303 | #define RCU_GP_IDLE		0	/* No grace period in progress. */ | 
 | 304 | #define RCU_GP_INIT		1	/* Grace period being initialized. */ | 
 | 305 | #define RCU_SAVE_DYNTICK	2	/* Need to scan dyntick state. */ | 
| Paul E. McKenney | ee47eb9 | 2010-01-04 15:09:07 -0800 | [diff] [blame] | 306 | #define RCU_FORCE_QS		3	/* Need to force quiescent state. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 307 | #define RCU_SIGNAL_INIT		RCU_SAVE_DYNTICK | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 308 |  | 
 | 309 | #define RCU_JIFFIES_TILL_FORCE_QS	 3	/* for rsp->jiffies_force_qs */ | 
| Paul E. McKenney | 007b092 | 2010-03-05 15:03:26 -0800 | [diff] [blame] | 310 |  | 
 | 311 | #ifdef CONFIG_PROVE_RCU | 
 | 312 | #define RCU_STALL_DELAY_DELTA	       (5 * HZ) | 
 | 313 | #else | 
 | 314 | #define RCU_STALL_DELAY_DELTA	       0 | 
 | 315 | #endif | 
 | 316 |  | 
| Paul E. McKenney | b163760 | 2010-06-02 16:21:38 -0700 | [diff] [blame] | 317 | #define RCU_SECONDS_TILL_STALL_CHECK   (CONFIG_RCU_CPU_STALL_TIMEOUT * HZ + \ | 
 | 318 | 					RCU_STALL_DELAY_DELTA) | 
| Paul E. McKenney | 007b092 | 2010-03-05 15:03:26 -0800 | [diff] [blame] | 319 | 						/* for rsp->jiffies_stall */ | 
| Paul E. McKenney | b163760 | 2010-06-02 16:21:38 -0700 | [diff] [blame] | 320 | #define RCU_SECONDS_TILL_STALL_RECHECK (3 * RCU_SECONDS_TILL_STALL_CHECK + 30) | 
| Paul E. McKenney | 007b092 | 2010-03-05 15:03:26 -0800 | [diff] [blame] | 321 | 						/* for rsp->jiffies_stall */ | 
 | 322 | #define RCU_STALL_RAT_DELAY		2	/* Allow other CPUs time */ | 
 | 323 | 						/*  to take at least one */ | 
 | 324 | 						/*  scheduling clock irq */ | 
 | 325 | 						/*  before ratting on them. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 326 |  | 
| Peter Zijlstra | 08bca60 | 2011-05-20 16:06:29 -0700 | [diff] [blame] | 327 | #define rcu_wait(cond)							\ | 
 | 328 | do {									\ | 
 | 329 | 	for (;;) {							\ | 
 | 330 | 		set_current_state(TASK_INTERRUPTIBLE);			\ | 
 | 331 | 		if (cond)						\ | 
 | 332 | 			break;						\ | 
 | 333 | 		schedule();						\ | 
 | 334 | 	}								\ | 
 | 335 | 	__set_current_state(TASK_RUNNING);				\ | 
 | 336 | } while (0) | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 337 |  | 
 | 338 | /* | 
 | 339 |  * RCU global state, including node hierarchy.  This hierarchy is | 
 | 340 |  * represented in "heap" form in a dense array.  The root (first level) | 
 | 341 |  * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second | 
 | 342 |  * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]), | 
 | 343 |  * and the third level in ->node[m+1] and following (->node[m+1] referenced | 
 | 344 |  * by ->level[2]).  The number of levels is determined by the number of | 
 | 345 |  * CPUs and by CONFIG_RCU_FANOUT.  Small systems will have a "hierarchy" | 
 | 346 |  * consisting of a single rcu_node. | 
 | 347 |  */ | 
 | 348 | struct rcu_state { | 
 | 349 | 	struct rcu_node node[NUM_RCU_NODES];	/* Hierarchy. */ | 
 | 350 | 	struct rcu_node *level[NUM_RCU_LVLS];	/* Hierarchy levels. */ | 
 | 351 | 	u32 levelcnt[MAX_RCU_LVLS + 1];		/* # nodes in each level. */ | 
 | 352 | 	u8 levelspread[NUM_RCU_LVLS];		/* kids/node in each level. */ | 
| Lai Jiangshan | 394f99a | 2010-06-28 16:25:04 +0800 | [diff] [blame] | 353 | 	struct rcu_data __percpu *rda;		/* pointer of percu rcu_data. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 354 |  | 
 | 355 | 	/* The following fields are guarded by the root rcu_node's lock. */ | 
 | 356 |  | 
| Paul E. McKenney | af446b7 | 2011-09-10 21:54:08 -0700 | [diff] [blame] | 357 | 	u8	fqs_state ____cacheline_internodealigned_in_smp; | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 358 | 						/* Force QS state. */ | 
| Paul E. McKenney | 07079d5 | 2010-01-04 15:09:02 -0800 | [diff] [blame] | 359 | 	u8	fqs_active;			/* force_quiescent_state() */ | 
 | 360 | 						/*  is running. */ | 
| Paul E. McKenney | 46a1e34 | 2010-01-04 15:09:09 -0800 | [diff] [blame] | 361 | 	u8	fqs_need_gp;			/* A CPU was prevented from */ | 
 | 362 | 						/*  starting a new grace */ | 
 | 363 | 						/*  period because */ | 
 | 364 | 						/*  force_quiescent_state() */ | 
 | 365 | 						/*  was running. */ | 
| Paul E. McKenney | a46e089 | 2011-06-15 15:47:09 -0700 | [diff] [blame] | 366 | 	u8	boost;				/* Subject to priority boost. */ | 
| Paul E. McKenney | 20133cf | 2010-02-22 17:05:01 -0800 | [diff] [blame] | 367 | 	unsigned long gpnum;			/* Current gp number. */ | 
 | 368 | 	unsigned long completed;		/* # of last completed gp. */ | 
| Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 369 |  | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 370 | 	/* End of fields guarded by root rcu_node's lock. */ | 
| Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 371 |  | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 372 | 	raw_spinlock_t onofflock;		/* exclude on/offline and */ | 
| Lai Jiangshan | 29494be | 2010-10-20 14:13:06 +0800 | [diff] [blame] | 373 | 						/*  starting new GP. */ | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 374 | 	raw_spinlock_t fqslock;			/* Only one task forcing */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 375 | 						/*  quiescent states. */ | 
 | 376 | 	unsigned long jiffies_force_qs;		/* Time at which to invoke */ | 
 | 377 | 						/*  force_quiescent_state(). */ | 
 | 378 | 	unsigned long n_force_qs;		/* Number of calls to */ | 
 | 379 | 						/*  force_quiescent_state(). */ | 
 | 380 | 	unsigned long n_force_qs_lh;		/* ~Number of calls leaving */ | 
 | 381 | 						/*  due to lock unavailable. */ | 
 | 382 | 	unsigned long n_force_qs_ngp;		/* Number of calls leaving */ | 
 | 383 | 						/*  due to no GP active. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 384 | 	unsigned long gp_start;			/* Time at which GP started, */ | 
 | 385 | 						/*  but in jiffies. */ | 
 | 386 | 	unsigned long jiffies_stall;		/* Time at which to check */ | 
 | 387 | 						/*  for CPU stalls. */ | 
| Paul E. McKenney | 15ba0ba | 2011-04-06 16:01:16 -0700 | [diff] [blame] | 388 | 	unsigned long gp_max;			/* Maximum GP duration in */ | 
 | 389 | 						/*  jiffies. */ | 
| Paul E. McKenney | 4300aa6 | 2010-04-13 16:18:22 -0700 | [diff] [blame] | 390 | 	char *name;				/* Name of structure. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 391 | }; | 
 | 392 |  | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 393 | /* Return values for rcu_preempt_offline_tasks(). */ | 
 | 394 |  | 
 | 395 | #define RCU_OFL_TASKS_NORM_GP	0x1		/* Tasks blocking normal */ | 
 | 396 | 						/*  GP were moved to root. */ | 
 | 397 | #define RCU_OFL_TASKS_EXP_GP	0x2		/* Tasks blocking expedited */ | 
 | 398 | 						/*  GP were moved to root. */ | 
 | 399 |  | 
| Ingo Molnar | 6258c4f | 2009-03-25 16:42:24 +0100 | [diff] [blame] | 400 | /* | 
 | 401 |  * RCU implementation internal declarations: | 
 | 402 |  */ | 
| Paul E. McKenney | d6714c2 | 2009-08-22 13:56:46 -0700 | [diff] [blame] | 403 | extern struct rcu_state rcu_sched_state; | 
 | 404 | DECLARE_PER_CPU(struct rcu_data, rcu_sched_data); | 
| Ingo Molnar | 6258c4f | 2009-03-25 16:42:24 +0100 | [diff] [blame] | 405 |  | 
 | 406 | extern struct rcu_state rcu_bh_state; | 
 | 407 | DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); | 
 | 408 |  | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 409 | #ifdef CONFIG_TREE_PREEMPT_RCU | 
 | 410 | extern struct rcu_state rcu_preempt_state; | 
 | 411 | DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data); | 
 | 412 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 
 | 413 |  | 
| Paul E. McKenney | eab0993 | 2011-06-21 01:59:33 -0700 | [diff] [blame] | 414 | #ifdef CONFIG_RCU_BOOST | 
 | 415 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); | 
 | 416 | DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu); | 
 | 417 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); | 
 | 418 | DECLARE_PER_CPU(char, rcu_cpu_has_work); | 
 | 419 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 
 | 420 |  | 
| Paul E. McKenney | 017c426 | 2010-01-14 16:10:58 -0800 | [diff] [blame] | 421 | #ifndef RCU_TREE_NONCORE | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 422 |  | 
| Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 423 | /* Forward declarations for rcutree_plugin.h */ | 
| Paul E. McKenney | dbe0135 | 2009-11-10 13:37:19 -0800 | [diff] [blame] | 424 | static void rcu_bootup_announce(void); | 
| Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 425 | long rcu_batches_completed(void); | 
 | 426 | static void rcu_preempt_note_context_switch(int cpu); | 
| Paul E. McKenney | 27f4d28 | 2011-02-07 12:47:15 -0800 | [diff] [blame] | 427 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); | 
| Paul E. McKenney | b668c9c | 2009-11-22 08:53:48 -0800 | [diff] [blame] | 428 | #ifdef CONFIG_HOTPLUG_CPU | 
| Paul E. McKenney | d3f6bad | 2009-12-02 12:10:13 -0800 | [diff] [blame] | 429 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, | 
 | 430 | 				      unsigned long flags); | 
| Paul E. McKenney | f8b7fc6 | 2011-06-16 08:26:32 -0700 | [diff] [blame] | 431 | static void rcu_stop_cpu_kthread(int cpu); | 
| Paul E. McKenney | b668c9c | 2009-11-22 08:53:48 -0800 | [diff] [blame] | 432 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 
| Paul E. McKenney | 1ed509a | 2010-02-22 17:05:05 -0800 | [diff] [blame] | 433 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); | 
| Paul E. McKenney | 9bc8b55 | 2011-08-13 13:31:47 -0700 | [diff] [blame] | 434 | static int rcu_print_task_stall(struct rcu_node *rnp); | 
| Paul E. McKenney | 53d84e0 | 2010-08-10 14:28:53 -0700 | [diff] [blame] | 435 | static void rcu_preempt_stall_reset(void); | 
| Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 436 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); | 
 | 437 | #ifdef CONFIG_HOTPLUG_CPU | 
| Paul E. McKenney | 237c80c | 2009-10-15 09:26:14 -0700 | [diff] [blame] | 438 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | 
 | 439 | 				     struct rcu_node *rnp, | 
 | 440 | 				     struct rcu_data *rdp); | 
| Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 441 | static void rcu_preempt_offline_cpu(int cpu); | 
 | 442 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 
 | 443 | static void rcu_preempt_check_callbacks(int cpu); | 
 | 444 | static void rcu_preempt_process_callbacks(void); | 
 | 445 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 446 | #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) | 
| Thomas Gleixner | b40d293 | 2011-10-22 07:12:34 -0700 | [diff] [blame] | 447 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, | 
 | 448 | 			       bool wake); | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 449 | #endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */ | 
| Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 450 | static int rcu_preempt_pending(int cpu); | 
 | 451 | static int rcu_preempt_needs_cpu(int cpu); | 
 | 452 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu); | 
| Lai Jiangshan | 29494be | 2010-10-20 14:13:06 +0800 | [diff] [blame] | 453 | static void rcu_preempt_send_cbs_to_online(void); | 
| Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 454 | static void __init __rcu_init_preempt(void); | 
| Paul E. McKenney | 1217ed1 | 2011-05-04 21:43:49 -0700 | [diff] [blame] | 455 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); | 
| Paul E. McKenney | a46e089 | 2011-06-15 15:47:09 -0700 | [diff] [blame] | 456 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); | 
 | 457 | static void invoke_rcu_callbacks_kthread(void); | 
| Paul E. McKenney | dff1672 | 2011-11-29 15:57:13 -0800 | [diff] [blame] | 458 | static bool rcu_is_callbacks_kthread(void); | 
| Paul E. McKenney | a46e089 | 2011-06-15 15:47:09 -0700 | [diff] [blame] | 459 | #ifdef CONFIG_RCU_BOOST | 
 | 460 | static void rcu_preempt_do_callbacks(void); | 
| Paul E. McKenney | 27f4d28 | 2011-02-07 12:47:15 -0800 | [diff] [blame] | 461 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, | 
 | 462 | 					  cpumask_var_t cm); | 
| Paul E. McKenney | 27f4d28 | 2011-02-07 12:47:15 -0800 | [diff] [blame] | 463 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | 
 | 464 | 						 struct rcu_node *rnp, | 
 | 465 | 						 int rnp_index); | 
| Paul E. McKenney | f8b7fc6 | 2011-06-16 08:26:32 -0700 | [diff] [blame] | 466 | static void invoke_rcu_node_kthread(struct rcu_node *rnp); | 
 | 467 | static void rcu_yield(void (*f)(unsigned long), unsigned long arg); | 
| Paul E. McKenney | a46e089 | 2011-06-15 15:47:09 -0700 | [diff] [blame] | 468 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 
| Paul E. McKenney | f8b7fc6 | 2011-06-16 08:26:32 -0700 | [diff] [blame] | 469 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt); | 
 | 470 | static void __cpuinit rcu_prepare_kthreads(int cpu); | 
| Paul E. McKenney | 7cb9249 | 2011-11-28 12:28:34 -0800 | [diff] [blame] | 471 | static void rcu_prepare_for_idle_init(int cpu); | 
 | 472 | static void rcu_cleanup_after_idle(int cpu); | 
| Paul E. McKenney | aea1b35 | 2011-11-02 06:54:54 -0700 | [diff] [blame] | 473 | static void rcu_prepare_for_idle(int cpu); | 
| Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 474 |  | 
| Paul E. McKenney | 017c426 | 2010-01-14 16:10:58 -0800 | [diff] [blame] | 475 | #endif /* #ifndef RCU_TREE_NONCORE */ |