| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | 
|  | 3 | * Internal non-public definitions. | 
|  | 4 | * | 
|  | 5 | * This program is free software; you can redistribute it and/or modify | 
|  | 6 | * it under the terms of the GNU General Public License as published by | 
|  | 7 | * the Free Software Foundation; either version 2 of the License, or | 
|  | 8 | * (at your option) any later version. | 
|  | 9 | * | 
|  | 10 | * This program is distributed in the hope that it will be useful, | 
|  | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | 13 | * GNU General Public License for more details. | 
|  | 14 | * | 
|  | 15 | * You should have received a copy of the GNU General Public License | 
|  | 16 | * along with this program; if not, write to the Free Software | 
|  | 17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 
|  | 18 | * | 
|  | 19 | * Copyright IBM Corporation, 2008 | 
|  | 20 | * | 
|  | 21 | * Author: Ingo Molnar <mingo@elte.hu> | 
|  | 22 | *	   Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 
|  | 23 | */ | 
|  | 24 |  | 
|  | 25 | #include <linux/cache.h> | 
|  | 26 | #include <linux/spinlock.h> | 
|  | 27 | #include <linux/threads.h> | 
|  | 28 | #include <linux/cpumask.h> | 
|  | 29 | #include <linux/seqlock.h> | 
|  | 30 |  | 
|  | 31 | /* | 
| Paul E. McKenney | 8932a63 | 2012-04-19 12:20:14 -0700 | [diff] [blame] | 32 | * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and | 
|  | 33 | * CONFIG_RCU_FANOUT_LEAF. | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 34 | * In theory, it should be possible to add more levels straightforwardly. | 
| Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 35 | * In practice, this did work well going from three levels to four. | 
|  | 36 | * Of course, your mileage may vary. | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 37 | */ | 
| Paul E. McKenney | cf244dc | 2009-12-02 12:10:14 -0800 | [diff] [blame] | 38 | #define MAX_RCU_LVLS 4 | 
| Paul E. McKenney | 8932a63 | 2012-04-19 12:20:14 -0700 | [diff] [blame] | 39 | #define RCU_FANOUT_1	      (CONFIG_RCU_FANOUT_LEAF) | 
| Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 40 | #define RCU_FANOUT_2	      (RCU_FANOUT_1 * CONFIG_RCU_FANOUT) | 
|  | 41 | #define RCU_FANOUT_3	      (RCU_FANOUT_2 * CONFIG_RCU_FANOUT) | 
|  | 42 | #define RCU_FANOUT_4	      (RCU_FANOUT_3 * CONFIG_RCU_FANOUT) | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 43 |  | 
| Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 44 | #if NR_CPUS <= RCU_FANOUT_1 | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 45 | #  define NUM_RCU_LVLS	      1 | 
|  | 46 | #  define NUM_RCU_LVL_0	      1 | 
|  | 47 | #  define NUM_RCU_LVL_1	      (NR_CPUS) | 
|  | 48 | #  define NUM_RCU_LVL_2	      0 | 
|  | 49 | #  define NUM_RCU_LVL_3	      0 | 
| Paul E. McKenney | cf244dc | 2009-12-02 12:10:14 -0800 | [diff] [blame] | 50 | #  define NUM_RCU_LVL_4	      0 | 
| Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 51 | #elif NR_CPUS <= RCU_FANOUT_2 | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 52 | #  define NUM_RCU_LVLS	      2 | 
|  | 53 | #  define NUM_RCU_LVL_0	      1 | 
| Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 54 | #  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 55 | #  define NUM_RCU_LVL_2	      (NR_CPUS) | 
|  | 56 | #  define NUM_RCU_LVL_3	      0 | 
| Paul E. McKenney | cf244dc | 2009-12-02 12:10:14 -0800 | [diff] [blame] | 57 | #  define NUM_RCU_LVL_4	      0 | 
| Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 58 | #elif NR_CPUS <= RCU_FANOUT_3 | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 59 | #  define NUM_RCU_LVLS	      3 | 
|  | 60 | #  define NUM_RCU_LVL_0	      1 | 
| Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 61 | #  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) | 
|  | 62 | #  define NUM_RCU_LVL_2	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) | 
|  | 63 | #  define NUM_RCU_LVL_3	      (NR_CPUS) | 
| Paul E. McKenney | cf244dc | 2009-12-02 12:10:14 -0800 | [diff] [blame] | 64 | #  define NUM_RCU_LVL_4	      0 | 
| Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 65 | #elif NR_CPUS <= RCU_FANOUT_4 | 
| Paul E. McKenney | cf244dc | 2009-12-02 12:10:14 -0800 | [diff] [blame] | 66 | #  define NUM_RCU_LVLS	      4 | 
|  | 67 | #  define NUM_RCU_LVL_0	      1 | 
| Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 68 | #  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3) | 
|  | 69 | #  define NUM_RCU_LVL_2	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) | 
|  | 70 | #  define NUM_RCU_LVL_3	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) | 
|  | 71 | #  define NUM_RCU_LVL_4	      (NR_CPUS) | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 72 | #else | 
|  | 73 | # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" | 
| Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 74 | #endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 75 |  | 
| Paul E. McKenney | cf244dc | 2009-12-02 12:10:14 -0800 | [diff] [blame] | 76 | #define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4) | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 77 | #define NUM_RCU_NODES (RCU_SUM - NR_CPUS) | 
|  | 78 |  | 
|  | 79 | /* | 
|  | 80 | * Dynticks per-CPU state. | 
|  | 81 | */ | 
|  | 82 | struct rcu_dynticks { | 
| Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 83 | long long dynticks_nesting; /* Track irq/process nesting level. */ | 
|  | 84 | /* Process level is worth LLONG_MAX/2. */ | 
|  | 85 | int dynticks_nmi_nesting;   /* Track NMI nesting level. */ | 
|  | 86 | atomic_t dynticks;	    /* Even value for idle, else odd. */ | 
| Paul E. McKenney | 5955f7e | 2012-05-09 12:07:05 -0700 | [diff] [blame] | 87 | #ifdef CONFIG_RCU_FAST_NO_HZ | 
|  | 88 | int dyntick_drain;	    /* Prepare-for-idle state variable. */ | 
|  | 89 | unsigned long dyntick_holdoff; | 
|  | 90 | /* No retries for the jiffy of failure. */ | 
|  | 91 | struct timer_list idle_gp_timer; | 
|  | 92 | /* Wake up CPU sleeping with callbacks. */ | 
|  | 93 | unsigned long idle_gp_timer_expires; | 
|  | 94 | /* When to wake up CPU (for repost). */ | 
|  | 95 | bool idle_first_pass;	    /* First pass of attempt to go idle? */ | 
|  | 96 | unsigned long nonlazy_posted; | 
|  | 97 | /* # times non-lazy CBs posted to CPU. */ | 
|  | 98 | unsigned long nonlazy_posted_snap; | 
|  | 99 | /* idle-period nonlazy_posted snapshot. */ | 
|  | 100 | #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 101 | }; | 
|  | 102 |  | 
| Paul E. McKenney | d71df90 | 2011-03-29 17:48:28 -0700 | [diff] [blame] | 103 | /* RCU's kthread states for tracing. */ | 
|  | 104 | #define RCU_KTHREAD_STOPPED  0 | 
|  | 105 | #define RCU_KTHREAD_RUNNING  1 | 
|  | 106 | #define RCU_KTHREAD_WAITING  2 | 
| Paul E. McKenney | 15ba0ba | 2011-04-06 16:01:16 -0700 | [diff] [blame] | 107 | #define RCU_KTHREAD_OFFCPU   3 | 
|  | 108 | #define RCU_KTHREAD_YIELDING 4 | 
|  | 109 | #define RCU_KTHREAD_MAX      4 | 
| Paul E. McKenney | d71df90 | 2011-03-29 17:48:28 -0700 | [diff] [blame] | 110 |  | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 111 | /* | 
|  | 112 | * Definition for node within the RCU grace-period-detection hierarchy. | 
|  | 113 | */ | 
|  | 114 | struct rcu_node { | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 115 | raw_spinlock_t lock;	/* Root rcu_node's lock protects some */ | 
| Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 116 | /*  rcu_state fields as well as following. */ | 
| Paul E. McKenney | 20133cf | 2010-02-22 17:05:01 -0800 | [diff] [blame] | 117 | unsigned long gpnum;	/* Current grace period for this node. */ | 
| Paul E. McKenney | 8684896 | 2009-08-27 15:00:12 -0700 | [diff] [blame] | 118 | /*  This will either be equal to or one */ | 
|  | 119 | /*  behind the root rcu_node's gpnum. */ | 
| Paul E. McKenney | 20133cf | 2010-02-22 17:05:01 -0800 | [diff] [blame] | 120 | unsigned long completed; /* Last GP completed for this node. */ | 
| Paul E. McKenney | d09b62d | 2009-11-02 13:52:28 -0800 | [diff] [blame] | 121 | /*  This will either be equal to or one */ | 
|  | 122 | /*  behind the root rcu_node's gpnum. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 123 | unsigned long qsmask;	/* CPUs or groups that need to switch in */ | 
|  | 124 | /*  order for current grace period to proceed.*/ | 
| Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 125 | /*  In leaf rcu_node, each bit corresponds to */ | 
|  | 126 | /*  an rcu_data structure, otherwise, each */ | 
|  | 127 | /*  bit corresponds to a child rcu_node */ | 
|  | 128 | /*  structure. */ | 
| Paul E. McKenney | 12f5f52 | 2010-11-29 21:56:39 -0800 | [diff] [blame] | 129 | unsigned long expmask;	/* Groups that have ->blkd_tasks */ | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 130 | /*  elements that need to drain to allow the */ | 
|  | 131 | /*  current expedited grace period to */ | 
|  | 132 | /*  complete (only for TREE_PREEMPT_RCU). */ | 
| Paul E. McKenney | 8826f3b | 2011-05-11 05:41:41 -0700 | [diff] [blame] | 133 | atomic_t wakemask;	/* CPUs whose kthread needs to be awakened. */ | 
|  | 134 | /*  Since this has meaning only for leaf */ | 
|  | 135 | /*  rcu_node structures, 32 bits suffices. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 136 | unsigned long qsmaskinit; | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 137 | /* Per-GP initial value for qsmask & expmask. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 138 | unsigned long grpmask;	/* Mask to apply to parent qsmask. */ | 
| Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 139 | /*  Only one bit will be set in this mask. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 140 | int	grplo;		/* lowest-numbered CPU or group here. */ | 
|  | 141 | int	grphi;		/* highest-numbered CPU or group here. */ | 
|  | 142 | u8	grpnum;		/* CPU/group number for next level up. */ | 
|  | 143 | u8	level;		/* root is at level 0. */ | 
|  | 144 | struct rcu_node *parent; | 
| Paul E. McKenney | 12f5f52 | 2010-11-29 21:56:39 -0800 | [diff] [blame] | 145 | struct list_head blkd_tasks; | 
|  | 146 | /* Tasks blocked in RCU read-side critical */ | 
|  | 147 | /*  section.  Tasks are placed at the head */ | 
|  | 148 | /*  of this list and age towards the tail. */ | 
|  | 149 | struct list_head *gp_tasks; | 
|  | 150 | /* Pointer to the first task blocking the */ | 
|  | 151 | /*  current grace period, or NULL if there */ | 
|  | 152 | /*  is no such task. */ | 
|  | 153 | struct list_head *exp_tasks; | 
|  | 154 | /* Pointer to the first task blocking the */ | 
|  | 155 | /*  current expedited grace period, or NULL */ | 
|  | 156 | /*  if there is no such task.  If there */ | 
|  | 157 | /*  is no current expedited grace period, */ | 
|  | 158 | /*  then there can cannot be any such task. */ | 
| Paul E. McKenney | 27f4d28 | 2011-02-07 12:47:15 -0800 | [diff] [blame] | 159 | #ifdef CONFIG_RCU_BOOST | 
|  | 160 | struct list_head *boost_tasks; | 
|  | 161 | /* Pointer to first task that needs to be */ | 
|  | 162 | /*  priority boosted, or NULL if no priority */ | 
|  | 163 | /*  boosting is needed for this rcu_node */ | 
|  | 164 | /*  structure.  If there are no tasks */ | 
|  | 165 | /*  queued on this rcu_node structure that */ | 
|  | 166 | /*  are blocking the current grace period, */ | 
|  | 167 | /*  there can be no such task. */ | 
|  | 168 | unsigned long boost_time; | 
|  | 169 | /* When to start boosting (jiffies). */ | 
|  | 170 | struct task_struct *boost_kthread_task; | 
|  | 171 | /* kthread that takes care of priority */ | 
|  | 172 | /*  boosting for this rcu_node structure. */ | 
| Paul E. McKenney | d71df90 | 2011-03-29 17:48:28 -0700 | [diff] [blame] | 173 | unsigned int boost_kthread_status; | 
|  | 174 | /* State of boost_kthread_task for tracing. */ | 
| Paul E. McKenney | 0ea1f2e | 2011-02-22 13:42:43 -0800 | [diff] [blame] | 175 | unsigned long n_tasks_boosted; | 
|  | 176 | /* Total number of tasks boosted. */ | 
|  | 177 | unsigned long n_exp_boosts; | 
|  | 178 | /* Number of tasks boosted for expedited GP. */ | 
|  | 179 | unsigned long n_normal_boosts; | 
|  | 180 | /* Number of tasks boosted for normal GP. */ | 
|  | 181 | unsigned long n_balk_blkd_tasks; | 
|  | 182 | /* Refused to boost: no blocked tasks. */ | 
|  | 183 | unsigned long n_balk_exp_gp_tasks; | 
|  | 184 | /* Refused to boost: nothing blocking GP. */ | 
|  | 185 | unsigned long n_balk_boost_tasks; | 
|  | 186 | /* Refused to boost: already boosting. */ | 
|  | 187 | unsigned long n_balk_notblocked; | 
|  | 188 | /* Refused to boost: RCU RS CS still running. */ | 
|  | 189 | unsigned long n_balk_notyet; | 
|  | 190 | /* Refused to boost: not yet time. */ | 
|  | 191 | unsigned long n_balk_nos; | 
|  | 192 | /* Refused to boost: not sure why, though. */ | 
|  | 193 | /*  This can happen due to race conditions. */ | 
| Paul E. McKenney | 27f4d28 | 2011-02-07 12:47:15 -0800 | [diff] [blame] | 194 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 
| Paul E. McKenney | a26ac24 | 2011-01-12 14:10:23 -0800 | [diff] [blame] | 195 | struct task_struct *node_kthread_task; | 
|  | 196 | /* kthread that takes care of this rcu_node */ | 
|  | 197 | /*  structure, for example, awakening the */ | 
|  | 198 | /*  per-CPU kthreads as needed. */ | 
| Paul E. McKenney | d71df90 | 2011-03-29 17:48:28 -0700 | [diff] [blame] | 199 | unsigned int node_kthread_status; | 
|  | 200 | /* State of node_kthread_task for tracing. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 201 | } ____cacheline_internodealigned_in_smp; | 
|  | 202 |  | 
| Paul E. McKenney | a0b6c9a | 2009-09-28 07:46:33 -0700 | [diff] [blame] | 203 | /* | 
|  | 204 | * Do a full breadth-first scan of the rcu_node structures for the | 
|  | 205 | * specified rcu_state structure. | 
|  | 206 | */ | 
|  | 207 | #define rcu_for_each_node_breadth_first(rsp, rnp) \ | 
|  | 208 | for ((rnp) = &(rsp)->node[0]; \ | 
|  | 209 | (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) | 
|  | 210 |  | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 211 | /* | 
|  | 212 | * Do a breadth-first scan of the non-leaf rcu_node structures for the | 
|  | 213 | * specified rcu_state structure.  Note that if there is a singleton | 
|  | 214 | * rcu_node tree with but one rcu_node structure, this loop is a no-op. | 
|  | 215 | */ | 
|  | 216 | #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \ | 
|  | 217 | for ((rnp) = &(rsp)->node[0]; \ | 
|  | 218 | (rnp) < (rsp)->level[NUM_RCU_LVLS - 1]; (rnp)++) | 
|  | 219 |  | 
|  | 220 | /* | 
|  | 221 | * Scan the leaves of the rcu_node hierarchy for the specified rcu_state | 
|  | 222 | * structure.  Note that if there is a singleton rcu_node tree with but | 
|  | 223 | * one rcu_node structure, this loop -will- visit the rcu_node structure. | 
|  | 224 | * It is still a leaf node, even if it is also the root node. | 
|  | 225 | */ | 
| Paul E. McKenney | a0b6c9a | 2009-09-28 07:46:33 -0700 | [diff] [blame] | 226 | #define rcu_for_each_leaf_node(rsp, rnp) \ | 
|  | 227 | for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \ | 
|  | 228 | (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++) | 
|  | 229 |  | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 230 | /* Index values for nxttail array in struct rcu_data. */ | 
|  | 231 | #define RCU_DONE_TAIL		0	/* Also RCU_WAIT head. */ | 
|  | 232 | #define RCU_WAIT_TAIL		1	/* Also RCU_NEXT_READY head. */ | 
|  | 233 | #define RCU_NEXT_READY_TAIL	2	/* Also RCU_NEXT head. */ | 
|  | 234 | #define RCU_NEXT_TAIL		3 | 
|  | 235 | #define RCU_NEXT_SIZE		4 | 
|  | 236 |  | 
|  | 237 | /* Per-CPU data for read-copy update. */ | 
|  | 238 | struct rcu_data { | 
|  | 239 | /* 1) quiescent-state and grace-period handling : */ | 
| Paul E. McKenney | 20133cf | 2010-02-22 17:05:01 -0800 | [diff] [blame] | 240 | unsigned long	completed;	/* Track rsp->completed gp number */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 241 | /*  in order to detect GP end. */ | 
| Paul E. McKenney | 20133cf | 2010-02-22 17:05:01 -0800 | [diff] [blame] | 242 | unsigned long	gpnum;		/* Highest gp number that this CPU */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 243 | /*  is aware of having started. */ | 
| Paul E. McKenney | e4cc1f2 | 2011-06-27 00:17:43 -0700 | [diff] [blame] | 244 | unsigned long	passed_quiesce_gpnum; | 
|  | 245 | /* gpnum at time of quiescent state. */ | 
|  | 246 | bool		passed_quiesce;	/* User-mode/idle loop etc. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 247 | bool		qs_pending;	/* Core waits for quiesc state. */ | 
|  | 248 | bool		beenonline;	/* CPU online at least once. */ | 
| Paul E. McKenney | 6cc6879 | 2011-03-02 13:15:15 -0800 | [diff] [blame] | 249 | bool		preemptible;	/* Preemptible RCU? */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 250 | struct rcu_node *mynode;	/* This CPU's leaf of hierarchy */ | 
|  | 251 | unsigned long grpmask;		/* Mask to apply to leaf qsmask. */ | 
| Paul E. McKenney | a858af2 | 2012-01-16 13:29:10 -0800 | [diff] [blame] | 252 | #ifdef CONFIG_RCU_CPU_STALL_INFO | 
|  | 253 | unsigned long	ticks_this_gp;	/* The number of scheduling-clock */ | 
|  | 254 | /*  ticks this CPU has handled */ | 
|  | 255 | /*  during and after the last grace */ | 
|  | 256 | /* period it is aware of. */ | 
|  | 257 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_INFO */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 258 |  | 
|  | 259 | /* 2) batch handling */ | 
|  | 260 | /* | 
|  | 261 | * If nxtlist is not NULL, it is partitioned as follows. | 
|  | 262 | * Any of the partitions might be empty, in which case the | 
|  | 263 | * pointer to that partition will be equal to the pointer for | 
|  | 264 | * the following partition.  When the list is empty, all of | 
| Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 265 | * the nxttail elements point to the ->nxtlist pointer itself, | 
|  | 266 | * which in that case is NULL. | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 267 | * | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 268 | * [nxtlist, *nxttail[RCU_DONE_TAIL]): | 
|  | 269 | *	Entries that batch # <= ->completed | 
|  | 270 | *	The grace period for these entries has completed, and | 
|  | 271 | *	the other grace-period-completed entries may be moved | 
|  | 272 | *	here temporarily in rcu_process_callbacks(). | 
| Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 273 | * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]): | 
|  | 274 | *	Entries that batch # <= ->completed - 1: waiting for current GP | 
|  | 275 | * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]): | 
|  | 276 | *	Entries known to have arrived before current GP ended | 
|  | 277 | * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]): | 
|  | 278 | *	Entries that might have arrived after current GP ended | 
|  | 279 | *	Note that the value of *nxttail[RCU_NEXT_TAIL] will | 
|  | 280 | *	always be NULL, as this is the end of the list. | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 281 | */ | 
|  | 282 | struct rcu_head *nxtlist; | 
|  | 283 | struct rcu_head **nxttail[RCU_NEXT_SIZE]; | 
| Paul E. McKenney | 486e259 | 2012-01-06 14:11:30 -0800 | [diff] [blame] | 284 | long		qlen_lazy;	/* # of lazy queued callbacks */ | 
|  | 285 | long		qlen;		/* # of queued callbacks, incl lazy */ | 
| Paul E. McKenney | 37c72e5 | 2009-10-14 10:15:55 -0700 | [diff] [blame] | 286 | long		qlen_last_fqs_check; | 
|  | 287 | /* qlen at last check for QS forcing */ | 
| Paul E. McKenney | 269dcc1 | 2010-09-07 14:23:09 -0700 | [diff] [blame] | 288 | unsigned long	n_cbs_invoked;	/* count of RCU cbs invoked. */ | 
| Lai Jiangshan | 29494be | 2010-10-20 14:13:06 +0800 | [diff] [blame] | 289 | unsigned long   n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */ | 
|  | 290 | unsigned long   n_cbs_adopted;  /* RCU cbs adopted from dying CPU */ | 
| Paul E. McKenney | 37c72e5 | 2009-10-14 10:15:55 -0700 | [diff] [blame] | 291 | unsigned long	n_force_qs_snap; | 
|  | 292 | /* did other CPU force QS recently? */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 293 | long		blimit;		/* Upper limit on a processed batch */ | 
|  | 294 |  | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 295 | /* 3) dynticks interface. */ | 
|  | 296 | struct rcu_dynticks *dynticks;	/* Shared per-CPU dynticks state. */ | 
|  | 297 | int dynticks_snap;		/* Per-GP tracking for dynticks. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 298 |  | 
|  | 299 | /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 300 | unsigned long dynticks_fqs;	/* Kicked due to dynticks idle. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 301 | unsigned long offline_fqs;	/* Kicked due to being offline. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 302 |  | 
|  | 303 | /* 5) __rcu_pending() statistics. */ | 
| Paul E. McKenney | 20133cf | 2010-02-22 17:05:01 -0800 | [diff] [blame] | 304 | unsigned long n_rcu_pending;	/* rcu_pending() calls since boot. */ | 
|  | 305 | unsigned long n_rp_qs_pending; | 
| Paul E. McKenney | d21670a | 2010-04-14 17:39:26 -0700 | [diff] [blame] | 306 | unsigned long n_rp_report_qs; | 
| Paul E. McKenney | 20133cf | 2010-02-22 17:05:01 -0800 | [diff] [blame] | 307 | unsigned long n_rp_cb_ready; | 
|  | 308 | unsigned long n_rp_cpu_needs_gp; | 
|  | 309 | unsigned long n_rp_gp_completed; | 
|  | 310 | unsigned long n_rp_gp_started; | 
|  | 311 | unsigned long n_rp_need_fqs; | 
|  | 312 | unsigned long n_rp_need_nothing; | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 313 |  | 
|  | 314 | int cpu; | 
| Paul E. McKenney | d4c08f2 | 2011-06-25 06:36:56 -0700 | [diff] [blame] | 315 | struct rcu_state *rsp; | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 316 | }; | 
|  | 317 |  | 
| Paul E. McKenney | af446b7 | 2011-09-10 21:54:08 -0700 | [diff] [blame] | 318 | /* Values for fqs_state field in struct rcu_state. */ | 
| Paul E. McKenney | 83f5b01 | 2009-10-28 08:14:49 -0700 | [diff] [blame] | 319 | #define RCU_GP_IDLE		0	/* No grace period in progress. */ | 
|  | 320 | #define RCU_GP_INIT		1	/* Grace period being initialized. */ | 
|  | 321 | #define RCU_SAVE_DYNTICK	2	/* Need to scan dyntick state. */ | 
| Paul E. McKenney | ee47eb9 | 2010-01-04 15:09:07 -0800 | [diff] [blame] | 322 | #define RCU_FORCE_QS		3	/* Need to force quiescent state. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 323 | #define RCU_SIGNAL_INIT		RCU_SAVE_DYNTICK | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 324 |  | 
|  | 325 | #define RCU_JIFFIES_TILL_FORCE_QS	 3	/* for rsp->jiffies_force_qs */ | 
| Paul E. McKenney | 007b092 | 2010-03-05 15:03:26 -0800 | [diff] [blame] | 326 |  | 
|  | 327 | #ifdef CONFIG_PROVE_RCU | 
|  | 328 | #define RCU_STALL_DELAY_DELTA	       (5 * HZ) | 
|  | 329 | #else | 
|  | 330 | #define RCU_STALL_DELAY_DELTA	       0 | 
|  | 331 | #endif | 
| Paul E. McKenney | 007b092 | 2010-03-05 15:03:26 -0800 | [diff] [blame] | 332 | #define RCU_STALL_RAT_DELAY		2	/* Allow other CPUs time */ | 
|  | 333 | /*  to take at least one */ | 
|  | 334 | /*  scheduling clock irq */ | 
|  | 335 | /*  before ratting on them. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 336 |  | 
| Peter Zijlstra | 08bca60 | 2011-05-20 16:06:29 -0700 | [diff] [blame] | 337 | #define rcu_wait(cond)							\ | 
|  | 338 | do {									\ | 
|  | 339 | for (;;) {							\ | 
|  | 340 | set_current_state(TASK_INTERRUPTIBLE);			\ | 
|  | 341 | if (cond)						\ | 
|  | 342 | break;						\ | 
|  | 343 | schedule();						\ | 
|  | 344 | }								\ | 
|  | 345 | __set_current_state(TASK_RUNNING);				\ | 
|  | 346 | } while (0) | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 347 |  | 
|  | 348 | /* | 
|  | 349 | * RCU global state, including node hierarchy.  This hierarchy is | 
|  | 350 | * represented in "heap" form in a dense array.  The root (first level) | 
|  | 351 | * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second | 
|  | 352 | * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]), | 
|  | 353 | * and the third level in ->node[m+1] and following (->node[m+1] referenced | 
|  | 354 | * by ->level[2]).  The number of levels is determined by the number of | 
|  | 355 | * CPUs and by CONFIG_RCU_FANOUT.  Small systems will have a "hierarchy" | 
|  | 356 | * consisting of a single rcu_node. | 
|  | 357 | */ | 
|  | 358 | struct rcu_state { | 
|  | 359 | struct rcu_node node[NUM_RCU_NODES];	/* Hierarchy. */ | 
|  | 360 | struct rcu_node *level[NUM_RCU_LVLS];	/* Hierarchy levels. */ | 
|  | 361 | u32 levelcnt[MAX_RCU_LVLS + 1];		/* # nodes in each level. */ | 
|  | 362 | u8 levelspread[NUM_RCU_LVLS];		/* kids/node in each level. */ | 
| Lai Jiangshan | 394f99a | 2010-06-28 16:25:04 +0800 | [diff] [blame] | 363 | struct rcu_data __percpu *rda;		/* pointer of percu rcu_data. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 364 |  | 
|  | 365 | /* The following fields are guarded by the root rcu_node's lock. */ | 
|  | 366 |  | 
| Paul E. McKenney | af446b7 | 2011-09-10 21:54:08 -0700 | [diff] [blame] | 367 | u8	fqs_state ____cacheline_internodealigned_in_smp; | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 368 | /* Force QS state. */ | 
| Paul E. McKenney | 07079d5 | 2010-01-04 15:09:02 -0800 | [diff] [blame] | 369 | u8	fqs_active;			/* force_quiescent_state() */ | 
|  | 370 | /*  is running. */ | 
| Paul E. McKenney | 46a1e34 | 2010-01-04 15:09:09 -0800 | [diff] [blame] | 371 | u8	fqs_need_gp;			/* A CPU was prevented from */ | 
|  | 372 | /*  starting a new grace */ | 
|  | 373 | /*  period because */ | 
|  | 374 | /*  force_quiescent_state() */ | 
|  | 375 | /*  was running. */ | 
| Paul E. McKenney | a46e089 | 2011-06-15 15:47:09 -0700 | [diff] [blame] | 376 | u8	boost;				/* Subject to priority boost. */ | 
| Paul E. McKenney | 20133cf | 2010-02-22 17:05:01 -0800 | [diff] [blame] | 377 | unsigned long gpnum;			/* Current gp number. */ | 
|  | 378 | unsigned long completed;		/* # of last completed gp. */ | 
| Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 379 |  | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 380 | /* End of fields guarded by root rcu_node's lock. */ | 
| Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 381 |  | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 382 | raw_spinlock_t onofflock;		/* exclude on/offline and */ | 
| Lai Jiangshan | 29494be | 2010-10-20 14:13:06 +0800 | [diff] [blame] | 383 | /*  starting new GP. */ | 
| Paul E. McKenney | b1420f1 | 2012-03-01 13:18:08 -0800 | [diff] [blame] | 384 | struct rcu_head *orphan_nxtlist;	/* Orphaned callbacks that */ | 
|  | 385 | /*  need a grace period. */ | 
|  | 386 | struct rcu_head **orphan_nxttail;	/* Tail of above. */ | 
|  | 387 | struct rcu_head *orphan_donelist;	/* Orphaned callbacks that */ | 
|  | 388 | /*  are ready to invoke. */ | 
|  | 389 | struct rcu_head **orphan_donetail;	/* Tail of above. */ | 
|  | 390 | long qlen_lazy;				/* Number of lazy callbacks. */ | 
|  | 391 | long qlen;				/* Total number of callbacks. */ | 
|  | 392 | struct task_struct *rcu_barrier_in_progress; | 
|  | 393 | /* Task doing rcu_barrier(), */ | 
|  | 394 | /*  or NULL if no barrier. */ | 
| Paul E. McKenney | 1304afb | 2010-02-22 17:05:02 -0800 | [diff] [blame] | 395 | raw_spinlock_t fqslock;			/* Only one task forcing */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 396 | /*  quiescent states. */ | 
|  | 397 | unsigned long jiffies_force_qs;		/* Time at which to invoke */ | 
|  | 398 | /*  force_quiescent_state(). */ | 
|  | 399 | unsigned long n_force_qs;		/* Number of calls to */ | 
|  | 400 | /*  force_quiescent_state(). */ | 
|  | 401 | unsigned long n_force_qs_lh;		/* ~Number of calls leaving */ | 
|  | 402 | /*  due to lock unavailable. */ | 
|  | 403 | unsigned long n_force_qs_ngp;		/* Number of calls leaving */ | 
|  | 404 | /*  due to no GP active. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 405 | unsigned long gp_start;			/* Time at which GP started, */ | 
|  | 406 | /*  but in jiffies. */ | 
|  | 407 | unsigned long jiffies_stall;		/* Time at which to check */ | 
|  | 408 | /*  for CPU stalls. */ | 
| Paul E. McKenney | 15ba0ba | 2011-04-06 16:01:16 -0700 | [diff] [blame] | 409 | unsigned long gp_max;			/* Maximum GP duration in */ | 
|  | 410 | /*  jiffies. */ | 
| Paul E. McKenney | 4300aa6 | 2010-04-13 16:18:22 -0700 | [diff] [blame] | 411 | char *name;				/* Name of structure. */ | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 412 | }; | 
|  | 413 |  | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 414 | /* Return values for rcu_preempt_offline_tasks(). */ | 
|  | 415 |  | 
|  | 416 | #define RCU_OFL_TASKS_NORM_GP	0x1		/* Tasks blocking normal */ | 
|  | 417 | /*  GP were moved to root. */ | 
|  | 418 | #define RCU_OFL_TASKS_EXP_GP	0x2		/* Tasks blocking expedited */ | 
|  | 419 | /*  GP were moved to root. */ | 
|  | 420 |  | 
| Ingo Molnar | 6258c4f | 2009-03-25 16:42:24 +0100 | [diff] [blame] | 421 | /* | 
|  | 422 | * RCU implementation internal declarations: | 
|  | 423 | */ | 
| Paul E. McKenney | d6714c2 | 2009-08-22 13:56:46 -0700 | [diff] [blame] | 424 | extern struct rcu_state rcu_sched_state; | 
|  | 425 | DECLARE_PER_CPU(struct rcu_data, rcu_sched_data); | 
| Ingo Molnar | 6258c4f | 2009-03-25 16:42:24 +0100 | [diff] [blame] | 426 |  | 
|  | 427 | extern struct rcu_state rcu_bh_state; | 
|  | 428 | DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); | 
|  | 429 |  | 
| Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 430 | #ifdef CONFIG_TREE_PREEMPT_RCU | 
|  | 431 | extern struct rcu_state rcu_preempt_state; | 
|  | 432 | DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data); | 
|  | 433 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 
|  | 434 |  | 
| Paul E. McKenney | eab0993 | 2011-06-21 01:59:33 -0700 | [diff] [blame] | 435 | #ifdef CONFIG_RCU_BOOST | 
|  | 436 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); | 
|  | 437 | DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu); | 
|  | 438 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); | 
|  | 439 | DECLARE_PER_CPU(char, rcu_cpu_has_work); | 
|  | 440 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 
|  | 441 |  | 
| Paul E. McKenney | 017c426 | 2010-01-14 16:10:58 -0800 | [diff] [blame] | 442 | #ifndef RCU_TREE_NONCORE | 
| Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 443 |  | 
| Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 444 | /* Forward declarations for rcutree_plugin.h */ | 
| Paul E. McKenney | dbe0135 | 2009-11-10 13:37:19 -0800 | [diff] [blame] | 445 | static void rcu_bootup_announce(void); | 
| Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 446 | long rcu_batches_completed(void); | 
| Paul E. McKenney | 27f4d28 | 2011-02-07 12:47:15 -0800 | [diff] [blame] | 447 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); | 
| Paul E. McKenney | b668c9c | 2009-11-22 08:53:48 -0800 | [diff] [blame] | 448 | #ifdef CONFIG_HOTPLUG_CPU | 
| Paul E. McKenney | d3f6bad | 2009-12-02 12:10:13 -0800 | [diff] [blame] | 449 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, | 
|  | 450 | unsigned long flags); | 
| Paul E. McKenney | f8b7fc6 | 2011-06-16 08:26:32 -0700 | [diff] [blame] | 451 | static void rcu_stop_cpu_kthread(int cpu); | 
| Paul E. McKenney | b668c9c | 2009-11-22 08:53:48 -0800 | [diff] [blame] | 452 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 
| Paul E. McKenney | 1ed509a | 2010-02-22 17:05:05 -0800 | [diff] [blame] | 453 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); | 
| Paul E. McKenney | 9bc8b55 | 2011-08-13 13:31:47 -0700 | [diff] [blame] | 454 | static int rcu_print_task_stall(struct rcu_node *rnp); | 
| Paul E. McKenney | 53d84e0 | 2010-08-10 14:28:53 -0700 | [diff] [blame] | 455 | static void rcu_preempt_stall_reset(void); | 
| Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 456 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); | 
|  | 457 | #ifdef CONFIG_HOTPLUG_CPU | 
| Paul E. McKenney | 237c80c | 2009-10-15 09:26:14 -0700 | [diff] [blame] | 458 | static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | 
|  | 459 | struct rcu_node *rnp, | 
|  | 460 | struct rcu_data *rdp); | 
| Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 461 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 
| Paul E. McKenney | e560140 | 2012-01-07 11:03:57 -0800 | [diff] [blame] | 462 | static void rcu_preempt_cleanup_dead_cpu(int cpu); | 
| Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 463 | static void rcu_preempt_check_callbacks(int cpu); | 
|  | 464 | static void rcu_preempt_process_callbacks(void); | 
|  | 465 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 466 | #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) | 
| Thomas Gleixner | b40d293 | 2011-10-22 07:12:34 -0700 | [diff] [blame] | 467 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, | 
|  | 468 | bool wake); | 
| Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 469 | #endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */ | 
| Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 470 | static int rcu_preempt_pending(int cpu); | 
| Paul E. McKenney | 30fbcc9 | 2012-01-12 11:01:14 -0800 | [diff] [blame] | 471 | static int rcu_preempt_cpu_has_callbacks(int cpu); | 
| Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 472 | static void __cpuinit rcu_preempt_init_percpu_data(int cpu); | 
| Paul E. McKenney | e560140 | 2012-01-07 11:03:57 -0800 | [diff] [blame] | 473 | static void rcu_preempt_cleanup_dying_cpu(void); | 
| Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 474 | static void __init __rcu_init_preempt(void); | 
| Paul E. McKenney | 1217ed1 | 2011-05-04 21:43:49 -0700 | [diff] [blame] | 475 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); | 
| Paul E. McKenney | a46e089 | 2011-06-15 15:47:09 -0700 | [diff] [blame] | 476 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); | 
|  | 477 | static void invoke_rcu_callbacks_kthread(void); | 
| Paul E. McKenney | dff1672 | 2011-11-29 15:57:13 -0800 | [diff] [blame] | 478 | static bool rcu_is_callbacks_kthread(void); | 
| Paul E. McKenney | a46e089 | 2011-06-15 15:47:09 -0700 | [diff] [blame] | 479 | #ifdef CONFIG_RCU_BOOST | 
|  | 480 | static void rcu_preempt_do_callbacks(void); | 
| Paul E. McKenney | 27f4d28 | 2011-02-07 12:47:15 -0800 | [diff] [blame] | 481 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, | 
|  | 482 | cpumask_var_t cm); | 
| Paul E. McKenney | 27f4d28 | 2011-02-07 12:47:15 -0800 | [diff] [blame] | 483 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | 
|  | 484 | struct rcu_node *rnp, | 
|  | 485 | int rnp_index); | 
| Paul E. McKenney | f8b7fc6 | 2011-06-16 08:26:32 -0700 | [diff] [blame] | 486 | static void invoke_rcu_node_kthread(struct rcu_node *rnp); | 
|  | 487 | static void rcu_yield(void (*f)(unsigned long), unsigned long arg); | 
| Paul E. McKenney | a46e089 | 2011-06-15 15:47:09 -0700 | [diff] [blame] | 488 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 
| Paul E. McKenney | f8b7fc6 | 2011-06-16 08:26:32 -0700 | [diff] [blame] | 489 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt); | 
|  | 490 | static void __cpuinit rcu_prepare_kthreads(int cpu); | 
| Paul E. McKenney | 7cb9249 | 2011-11-28 12:28:34 -0800 | [diff] [blame] | 491 | static void rcu_prepare_for_idle_init(int cpu); | 
|  | 492 | static void rcu_cleanup_after_idle(int cpu); | 
| Paul E. McKenney | aea1b35 | 2011-11-02 06:54:54 -0700 | [diff] [blame] | 493 | static void rcu_prepare_for_idle(int cpu); | 
| Paul E. McKenney | c57afe8 | 2012-02-28 11:02:21 -0800 | [diff] [blame] | 494 | static void rcu_idle_count_callbacks_posted(void); | 
| Paul E. McKenney | a858af2 | 2012-01-16 13:29:10 -0800 | [diff] [blame] | 495 | static void print_cpu_stall_info_begin(void); | 
|  | 496 | static void print_cpu_stall_info(struct rcu_state *rsp, int cpu); | 
|  | 497 | static void print_cpu_stall_info_end(void); | 
|  | 498 | static void zero_cpu_stall_ticks(struct rcu_data *rdp); | 
|  | 499 | static void increment_cpu_stall_ticks(void); | 
| Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 500 |  | 
| Paul E. McKenney | 017c426 | 2010-01-14 16:10:58 -0800 | [diff] [blame] | 501 | #endif /* #ifndef RCU_TREE_NONCORE */ |