| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_CPUSET_H | 
 | 2 | #define _LINUX_CPUSET_H | 
 | 3 | /* | 
 | 4 |  *  cpuset interface | 
 | 5 |  * | 
 | 6 |  *  Copyright (C) 2003 BULL SA | 
| Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 7 |  *  Copyright (C) 2004-2006 Silicon Graphics, Inc. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 |  * | 
 | 9 |  */ | 
 | 10 |  | 
 | 11 | #include <linux/sched.h> | 
 | 12 | #include <linux/cpumask.h> | 
 | 13 | #include <linux/nodemask.h> | 
| Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 14 | #include <linux/cgroup.h> | 
| David Rientjes | a1bc5a4 | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 15 | #include <linux/mm.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 |  | 
 | 17 | #ifdef CONFIG_CPUSETS | 
 | 18 |  | 
| Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 19 | extern int number_of_cpusets;	/* How many cpusets are defined in system? */ | 
 | 20 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | extern int cpuset_init(void); | 
 | 22 | extern void cpuset_init_smp(void); | 
| Srivatsa S. Bhat | 7ddf96b | 2012-05-24 19:46:55 +0530 | [diff] [blame] | 23 | extern void cpuset_update_active_cpus(bool cpu_online); | 
| Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 24 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); | 
| Peter Zijlstra | 2baab4e | 2012-03-20 15:57:01 +0100 | [diff] [blame] | 25 | extern void cpuset_cpus_allowed_fallback(struct task_struct *p); | 
| Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 26 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); | 
| Paul Jackson | 9276b1bc | 2006-12-06 20:31:48 -0800 | [diff] [blame] | 27 | #define cpuset_current_mems_allowed (current->mems_allowed) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | void cpuset_init_current_mems_allowed(void); | 
| Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 29 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); | 
| Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 30 |  | 
| David Rientjes | a1bc5a4 | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 31 | extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); | 
 | 32 | extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask); | 
| Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 33 |  | 
| David Rientjes | a1bc5a4 | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 34 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | 
| Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 35 | { | 
| Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 36 | 	return number_of_cpusets <= 1 || | 
| David Rientjes | a1bc5a4 | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 37 | 		__cpuset_node_allowed_softwall(node, gfp_mask); | 
| Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 38 | } | 
 | 39 |  | 
| David Rientjes | a1bc5a4 | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 40 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) | 
| Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 41 | { | 
 | 42 | 	return number_of_cpusets <= 1 || | 
| David Rientjes | a1bc5a4 | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 43 | 		__cpuset_node_allowed_hardwall(node, gfp_mask); | 
 | 44 | } | 
 | 45 |  | 
 | 46 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | 
 | 47 | { | 
 | 48 | 	return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask); | 
 | 49 | } | 
 | 50 |  | 
 | 51 | static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | 
 | 52 | { | 
 | 53 | 	return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask); | 
| Paul Jackson | 202f72d | 2006-01-08 01:01:57 -0800 | [diff] [blame] | 54 | } | 
 | 55 |  | 
| David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 56 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, | 
 | 57 | 					  const struct task_struct *tsk2); | 
| Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 58 |  | 
 | 59 | #define cpuset_memory_pressure_bump() 				\ | 
 | 60 | 	do {							\ | 
 | 61 | 		if (cpuset_memory_pressure_enabled)		\ | 
 | 62 | 			__cpuset_memory_pressure_bump();	\ | 
 | 63 | 	} while (0) | 
 | 64 | extern int cpuset_memory_pressure_enabled; | 
 | 65 | extern void __cpuset_memory_pressure_bump(void); | 
 | 66 |  | 
| Arjan van de Ven | 5404732 | 2007-02-12 00:55:28 -0800 | [diff] [blame] | 67 | extern const struct file_operations proc_cpuset_operations; | 
| Eric W. Biederman | df5f831 | 2008-02-08 04:18:33 -0800 | [diff] [blame] | 68 | struct seq_file; | 
 | 69 | extern void cpuset_task_status_allowed(struct seq_file *m, | 
 | 70 | 					struct task_struct *task); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 |  | 
| Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 72 | extern int cpuset_mem_spread_node(void); | 
| Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 73 | extern int cpuset_slab_spread_node(void); | 
| Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 74 |  | 
 | 75 | static inline int cpuset_do_page_mem_spread(void) | 
 | 76 | { | 
 | 77 | 	return current->flags & PF_SPREAD_PAGE; | 
 | 78 | } | 
 | 79 |  | 
 | 80 | static inline int cpuset_do_slab_mem_spread(void) | 
 | 81 | { | 
 | 82 | 	return current->flags & PF_SPREAD_SLAB; | 
 | 83 | } | 
 | 84 |  | 
| Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 85 | extern int current_cpuset_is_being_rebound(void); | 
 | 86 |  | 
| Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 87 | extern void rebuild_sched_domains(void); | 
 | 88 |  | 
| David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 89 | extern void cpuset_print_task_mems_allowed(struct task_struct *p); | 
 | 90 |  | 
| Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 91 | /* | 
| Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 92 |  * get_mems_allowed is required when making decisions involving mems_allowed | 
 | 93 |  * such as during page allocation. mems_allowed can be updated in parallel | 
 | 94 |  * and depending on the new value an operation can fail potentially causing | 
 | 95 |  * process failure. A retry loop with get_mems_allowed and put_mems_allowed | 
 | 96 |  * prevents these artificial failures. | 
| Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 97 |  */ | 
| Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 98 | static inline unsigned int get_mems_allowed(void) | 
| Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 99 | { | 
| Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 100 | 	return read_seqcount_begin(¤t->mems_allowed_seq); | 
| Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 101 | } | 
 | 102 |  | 
| Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 103 | /* | 
 | 104 |  * If this returns false, the operation that took place after get_mems_allowed | 
 | 105 |  * may have failed. It is up to the caller to retry the operation if | 
 | 106 |  * appropriate. | 
 | 107 |  */ | 
 | 108 | static inline bool put_mems_allowed(unsigned int seq) | 
| Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 109 | { | 
| Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 110 | 	return !read_seqcount_retry(¤t->mems_allowed_seq, seq); | 
| Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 111 | } | 
 | 112 |  | 
| Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 113 | static inline void set_mems_allowed(nodemask_t nodemask) | 
 | 114 | { | 
| Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 115 | 	task_lock(current); | 
| Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 116 | 	write_seqcount_begin(¤t->mems_allowed_seq); | 
| Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 117 | 	current->mems_allowed = nodemask; | 
| Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 118 | 	write_seqcount_end(¤t->mems_allowed_seq); | 
| Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 119 | 	task_unlock(current); | 
| Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 120 | } | 
 | 121 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | #else /* !CONFIG_CPUSETS */ | 
 | 123 |  | 
 | 124 | static inline int cpuset_init(void) { return 0; } | 
 | 125 | static inline void cpuset_init_smp(void) {} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 |  | 
| Srivatsa S. Bhat | 7ddf96b | 2012-05-24 19:46:55 +0530 | [diff] [blame] | 127 | static inline void cpuset_update_active_cpus(bool cpu_online) | 
| Tejun Heo | 3a101d0 | 2010-06-08 21:40:36 +0200 | [diff] [blame] | 128 | { | 
 | 129 | 	partition_sched_domains(1, NULL, NULL); | 
 | 130 | } | 
 | 131 |  | 
| Li Zefan | 6af866a | 2009-01-07 18:08:45 -0800 | [diff] [blame] | 132 | static inline void cpuset_cpus_allowed(struct task_struct *p, | 
 | 133 | 				       struct cpumask *mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | { | 
| Rusty Russell | aa85ea5 | 2009-03-30 22:05:15 -0600 | [diff] [blame] | 135 | 	cpumask_copy(mask, cpu_possible_mask); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | } | 
 | 137 |  | 
| Peter Zijlstra | 2baab4e | 2012-03-20 15:57:01 +0100 | [diff] [blame] | 138 | static inline void cpuset_cpus_allowed_fallback(struct task_struct *p) | 
| Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 139 | { | 
| Oleg Nesterov | 9084bb8 | 2010-03-15 10:10:27 +0100 | [diff] [blame] | 140 | } | 
 | 141 |  | 
| Paul Jackson | 909d75a | 2006-01-08 01:01:55 -0800 | [diff] [blame] | 142 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) | 
 | 143 | { | 
 | 144 | 	return node_possible_map; | 
 | 145 | } | 
 | 146 |  | 
| Lai Jiangshan | 38d7bee | 2012-12-12 13:51:24 -0800 | [diff] [blame] | 147 | #define cpuset_current_mems_allowed (node_states[N_MEMORY]) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | static inline void cpuset_init_current_mems_allowed(void) {} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 |  | 
| Mel Gorman | 19770b3 | 2008-04-28 02:12:18 -0700 | [diff] [blame] | 150 | static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | { | 
 | 152 | 	return 1; | 
 | 153 | } | 
 | 154 |  | 
| David Rientjes | a1bc5a4 | 2009-04-02 16:57:54 -0700 | [diff] [blame] | 155 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | 
 | 156 | { | 
 | 157 | 	return 1; | 
 | 158 | } | 
 | 159 |  | 
 | 160 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) | 
 | 161 | { | 
 | 162 | 	return 1; | 
 | 163 | } | 
 | 164 |  | 
| Paul Jackson | 02a0e53 | 2006-12-13 00:34:25 -0800 | [diff] [blame] | 165 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | 
 | 166 | { | 
 | 167 | 	return 1; | 
 | 168 | } | 
 | 169 |  | 
 | 170 | static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | { | 
 | 172 | 	return 1; | 
 | 173 | } | 
 | 174 |  | 
| David Rientjes | bbe373f | 2007-10-16 23:25:58 -0700 | [diff] [blame] | 175 | static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, | 
 | 176 | 						 const struct task_struct *tsk2) | 
| Paul Jackson | ef08e3b | 2005-09-06 15:18:13 -0700 | [diff] [blame] | 177 | { | 
 | 178 | 	return 1; | 
 | 179 | } | 
 | 180 |  | 
| Paul Jackson | 3e0d98b | 2006-01-08 01:01:49 -0800 | [diff] [blame] | 181 | static inline void cpuset_memory_pressure_bump(void) {} | 
 | 182 |  | 
| Eric W. Biederman | df5f831 | 2008-02-08 04:18:33 -0800 | [diff] [blame] | 183 | static inline void cpuset_task_status_allowed(struct seq_file *m, | 
 | 184 | 						struct task_struct *task) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | } | 
 | 187 |  | 
| Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 188 | static inline int cpuset_mem_spread_node(void) | 
 | 189 | { | 
 | 190 | 	return 0; | 
 | 191 | } | 
 | 192 |  | 
| Jack Steiner | 6adef3e | 2010-05-26 14:42:49 -0700 | [diff] [blame] | 193 | static inline int cpuset_slab_spread_node(void) | 
 | 194 | { | 
 | 195 | 	return 0; | 
 | 196 | } | 
 | 197 |  | 
| Paul Jackson | 825a46a | 2006-03-24 03:16:03 -0800 | [diff] [blame] | 198 | static inline int cpuset_do_page_mem_spread(void) | 
 | 199 | { | 
 | 200 | 	return 0; | 
 | 201 | } | 
 | 202 |  | 
 | 203 | static inline int cpuset_do_slab_mem_spread(void) | 
 | 204 | { | 
 | 205 | 	return 0; | 
 | 206 | } | 
 | 207 |  | 
| Paul Menage | 8793d85 | 2007-10-18 23:39:39 -0700 | [diff] [blame] | 208 | static inline int current_cpuset_is_being_rebound(void) | 
 | 209 | { | 
 | 210 | 	return 0; | 
 | 211 | } | 
 | 212 |  | 
| Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 213 | static inline void rebuild_sched_domains(void) | 
 | 214 | { | 
| Max Krasnyansky | dfb512e | 2008-08-29 13:11:41 -0700 | [diff] [blame] | 215 | 	partition_sched_domains(1, NULL, NULL); | 
| Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 216 | } | 
 | 217 |  | 
| David Rientjes | 75aa199 | 2009-01-06 14:39:01 -0800 | [diff] [blame] | 218 | static inline void cpuset_print_task_mems_allowed(struct task_struct *p) | 
 | 219 | { | 
 | 220 | } | 
 | 221 |  | 
| Miao Xie | 58568d2 | 2009-06-16 15:31:49 -0700 | [diff] [blame] | 222 | static inline void set_mems_allowed(nodemask_t nodemask) | 
 | 223 | { | 
 | 224 | } | 
 | 225 |  | 
| Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 226 | static inline unsigned int get_mems_allowed(void) | 
| Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 227 | { | 
| Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 228 | 	return 0; | 
| Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 229 | } | 
 | 230 |  | 
| Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 231 | static inline bool put_mems_allowed(unsigned int seq) | 
| Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 232 | { | 
| Mel Gorman | cc9a6c8 | 2012-03-21 16:34:11 -0700 | [diff] [blame] | 233 | 	return true; | 
| Miao Xie | c0ff745 | 2010-05-24 14:32:08 -0700 | [diff] [blame] | 234 | } | 
 | 235 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | #endif /* !CONFIG_CPUSETS */ | 
 | 237 |  | 
 | 238 | #endif /* _LINUX_CPUSET_H */ |