| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_CPUSET_H | 
 | 2 | #define _LINUX_CPUSET_H | 
 | 3 | /* | 
 | 4 |  *  cpuset interface | 
 | 5 |  * | 
 | 6 |  *  Copyright (C) 2003 BULL SA | 
| Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 7 |  *  Copyright (C) 2004-2006 Silicon Graphics, Inc. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 |  * | 
 | 9 |  */ | 
 | 10 |  | 
 | 11 | #include <linux/sched.h> | 
 | 12 | #include <linux/cpumask.h> | 
 | 13 | #include <linux/nodemask.h> | 
| Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 14 | #include <linux/cgroup.h> | 
| David Rientjes | a1bc5a4 | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 15 | #include <linux/mm.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 |  | 
 | 17 | #ifdef CONFIG_CPUSETS | 
 | 18 |  | 
| Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 19 | extern int number_of_cpusets;	/* How many cpusets are defined in system? */ | 
 | 20 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | extern int cpuset_init(void); | 
 | 22 | extern void cpuset_init_smp(void); | 
| Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 23 | extern void cpuset_update_active_cpus(void); | 
| Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 24 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); | 
| Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 25 | extern int cpuset_cpus_allowed_fallback(struct task_struct *p); | 
| Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 26 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 27 | #define cpuset_current_mems_allowed (current->mems_allowed) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | void cpuset_init_current_mems_allowed(void); | 
| Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 29 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); | 
| Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 30 |  | 
| David Rientjes | a1bc5a4 | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 31 | extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); | 
 | 32 | extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask); | 
| Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 33 |  | 
| David Rientjes | a1bc5a4 | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 34 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | 
| Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 35 | { | 
| Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 36 | 	return number_of_cpusets <= 1 || | 
| David Rientjes | a1bc5a4 | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 37 | 		__cpuset_node_allowed_softwall(node, gfp_mask); | 
| Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 38 | } | 
 | 39 |  | 
| David Rientjes | a1bc5a4 | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 40 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) | 
| Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 41 | { | 
 | 42 | 	return number_of_cpusets <= 1 || | 
| David Rientjes | a1bc5a4 | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 43 | 		__cpuset_node_allowed_hardwall(node, gfp_mask); | 
 | 44 | } | 
 | 45 |  | 
 | 46 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | 
 | 47 | { | 
 | 48 | 	return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask); | 
 | 49 | } | 
 | 50 |  | 
 | 51 | static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | 
 | 52 | { | 
 | 53 | 	return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask); | 
| Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 54 | } | 
 | 55 |  | 
| David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 56 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, | 
 | 57 | 					  const struct task_struct *tsk2); | 
| Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 58 |  | 
 | 59 | #define cpuset_memory_pressure_bump() 				\ | 
 | 60 | 	do {							\ | 
 | 61 | 		if (cpuset_memory_pressure_enabled)		\ | 
 | 62 | 			__cpuset_memory_pressure_bump();	\ | 
 | 63 | 	} while (0) | 
 | 64 | extern int cpuset_memory_pressure_enabled; | 
 | 65 | extern void __cpuset_memory_pressure_bump(void); | 
 | 66 |  | 
| Arjan van de Ven | 5404732 | 2007-02-12 00:55:28 -0800 | [diff] [blame] | 67 | extern const struct file_operations proc_cpuset_operations; | 
| Eric W. Biederman | df5f831 | 2008-02-08 04:18:33 -0800 | [diff] [blame] | 68 | struct seq_file; | 
 | 69 | extern void cpuset_task_status_allowed(struct seq_file *m, | 
 | 70 | 					struct task_struct *task); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 |  | 
| Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 72 | extern int cpuset_mem_spread_node(void); | 
| Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 73 | extern int cpuset_slab_spread_node(void); | 
| Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 74 |  | 
 | 75 | static inline int cpuset_do_page_mem_spread(void) | 
 | 76 | { | 
 | 77 | 	return current->flags & PF_SPREAD_PAGE; | 
 | 78 | } | 
 | 79 |  | 
 | 80 | static inline int cpuset_do_slab_mem_spread(void) | 
 | 81 | { | 
 | 82 | 	return current->flags & PF_SPREAD_SLAB; | 
 | 83 | } | 
 | 84 |  | 
| Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 85 | extern int current_cpuset_is_being_rebound(void); | 
 | 86 |  | 
| Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 87 | extern void rebuild_sched_domains(void); | 
 | 88 |  | 
| David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 89 | extern void cpuset_print_task_mems_allowed(struct task_struct *p); | 
 | 90 |  | 
| Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 91 | /* | 
 | 92 |  * reading current mems_allowed and mempolicy in the fastpath must protected | 
 | 93 |  * by get_mems_allowed() | 
 | 94 |  */ | 
 | 95 | static inline void get_mems_allowed(void) | 
 | 96 | { | 
 | 97 | 	current->mems_allowed_change_disable++; | 
 | 98 |  | 
 | 99 | 	/* | 
 | 100 | 	 * ensure that reading mems_allowed and mempolicy happens after the | 
 | 101 | 	 * update of ->mems_allowed_change_disable. | 
 | 102 | 	 * | 
 | 103 | 	 * the write-side task finds ->mems_allowed_change_disable is not 0, | 
 | 104 | 	 * and knows the read-side task is reading mems_allowed or mempolicy, | 
 | 105 | 	 * so it will clear old bits lazily. | 
 | 106 | 	 */ | 
 | 107 | 	smp_mb(); | 
 | 108 | } | 
 | 109 |  | 
 | 110 | static inline void put_mems_allowed(void) | 
 | 111 | { | 
 | 112 | 	/* | 
 | 113 | 	 * ensure that reading mems_allowed and mempolicy before reducing | 
 | 114 | 	 * mems_allowed_change_disable. | 
 | 115 | 	 * | 
 | 116 | 	 * the write-side task will know that the read-side task is still | 
 | 117 | 	 * reading mems_allowed or mempolicy, don't clears old bits in the | 
 | 118 | 	 * nodemask. | 
 | 119 | 	 */ | 
 | 120 | 	smp_mb(); | 
 | 121 | 	--ACCESS_ONCE(current->mems_allowed_change_disable); | 
 | 122 | } | 
 | 123 |  | 
| Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 124 | static inline void set_mems_allowed(nodemask_t nodemask) | 
 | 125 | { | 
| Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 126 | 	task_lock(current); | 
| Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 127 | 	current->mems_allowed = nodemask; | 
| Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 128 | 	task_unlock(current); | 
| Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 129 | } | 
 | 130 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | #else /* !CONFIG_CPUSETS */ | 
 | 132 |  | 
 | 133 | static inline int cpuset_init(void) { return 0; } | 
 | 134 | static inline void cpuset_init_smp(void) {} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 |  | 
| Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 136 | static inline void cpuset_update_active_cpus(void) | 
 | 137 | { | 
 | 138 | 	partition_sched_domains(1, NULL, NULL); | 
 | 139 | } | 
 | 140 |  | 
| Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 141 | static inline void cpuset_cpus_allowed(struct task_struct *p, | 
 | 142 | 				       struct cpumask *mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | { | 
| Rusty Russell | aa85ea5 | 2009-03-30 22:05:15 -0600 | [diff] [blame] | 144 | 	cpumask_copy(mask, cpu_possible_mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | } | 
 | 146 |  | 
| Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 147 | static inline int cpuset_cpus_allowed_fallback(struct task_struct *p) | 
 | 148 | { | 
| KOSAKI Motohiro | 1e1b6c5 | 2011-05-19 15:08:58 +0900 | [diff] [blame] | 149 | 	do_set_cpus_allowed(p, cpu_possible_mask); | 
| Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 150 | 	return cpumask_any(cpu_active_mask); | 
 | 151 | } | 
 | 152 |  | 
| Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 153 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) | 
 | 154 | { | 
 | 155 | 	return node_possible_map; | 
 | 156 | } | 
 | 157 |  | 
| Christoph Lameter | 0e1e7c7 | 2007-10-16 01:25:38 -0700 | [diff] [blame] | 158 | #define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY]) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | static inline void cpuset_init_current_mems_allowed(void) {} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 |  | 
| Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 161 | static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | { | 
 | 163 | 	return 1; | 
 | 164 | } | 
 | 165 |  | 
| David Rientjes | a1bc5a4 | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 166 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | 
 | 167 | { | 
 | 168 | 	return 1; | 
 | 169 | } | 
 | 170 |  | 
 | 171 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) | 
 | 172 | { | 
 | 173 | 	return 1; | 
 | 174 | } | 
 | 175 |  | 
| Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 176 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | 
 | 177 | { | 
 | 178 | 	return 1; | 
 | 179 | } | 
 | 180 |  | 
 | 181 | static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | { | 
 | 183 | 	return 1; | 
 | 184 | } | 
 | 185 |  | 
| David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 186 | static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, | 
 | 187 | 						 const struct task_struct *tsk2) | 
| Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 188 | { | 
 | 189 | 	return 1; | 
 | 190 | } | 
 | 191 |  | 
| Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 192 | static inline void cpuset_memory_pressure_bump(void) {} | 
 | 193 |  | 
| Eric W. Biederman | df5f831 | 2008-02-08 04:18:33 -0800 | [diff] [blame] | 194 | static inline void cpuset_task_status_allowed(struct seq_file *m, | 
 | 195 | 						struct task_struct *task) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | } | 
 | 198 |  | 
| Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 199 | static inline int cpuset_mem_spread_node(void) | 
 | 200 | { | 
 | 201 | 	return 0; | 
 | 202 | } | 
 | 203 |  | 
| Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 204 | static inline int cpuset_slab_spread_node(void) | 
 | 205 | { | 
 | 206 | 	return 0; | 
 | 207 | } | 
 | 208 |  | 
| Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 209 | static inline int cpuset_do_page_mem_spread(void) | 
 | 210 | { | 
 | 211 | 	return 0; | 
 | 212 | } | 
 | 213 |  | 
 | 214 | static inline int cpuset_do_slab_mem_spread(void) | 
 | 215 | { | 
 | 216 | 	return 0; | 
 | 217 | } | 
 | 218 |  | 
| Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 219 | static inline int current_cpuset_is_being_rebound(void) | 
 | 220 | { | 
 | 221 | 	return 0; | 
 | 222 | } | 
 | 223 |  | 
| Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 224 | static inline void rebuild_sched_domains(void) | 
 | 225 | { | 
| Max Krasnyansky | dfb512e | 2008-08-29 13:11:41 -0700 | [diff] [blame] | 226 | 	partition_sched_domains(1, NULL, NULL); | 
| Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 227 | } | 
 | 228 |  | 
| David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 229 | static inline void cpuset_print_task_mems_allowed(struct task_struct *p) | 
 | 230 | { | 
 | 231 | } | 
 | 232 |  | 
| Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 233 | static inline void set_mems_allowed(nodemask_t nodemask) | 
 | 234 | { | 
 | 235 | } | 
 | 236 |  | 
| Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 237 | static inline void get_mems_allowed(void) | 
 | 238 | { | 
 | 239 | } | 
 | 240 |  | 
 | 241 | static inline void put_mems_allowed(void) | 
 | 242 | { | 
 | 243 | } | 
 | 244 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | #endif /* !CONFIG_CPUSETS */ | 
 | 246 |  | 
 | 247 | #endif /* _LINUX_CPUSET_H */ |