| Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 1 | #ifdef CONFIG_SCHED_AUTOGROUP | 
|  | 2 |  | 
|  | 3 | #include <linux/proc_fs.h> | 
|  | 4 | #include <linux/seq_file.h> | 
|  | 5 | #include <linux/kallsyms.h> | 
|  | 6 | #include <linux/utsname.h> | 
|  | 7 |  | 
|  | 8 | unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; | 
|  | 9 | static struct autogroup autogroup_default; | 
|  | 10 | static atomic_t autogroup_seq_nr; | 
|  | 11 |  | 
| Yong Zhang | 0ca0873 | 2011-01-07 12:43:45 +0800 | [diff] [blame] | 12 | static void __init autogroup_init(struct task_struct *init_task) | 
| Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 13 | { | 
| Yong Zhang | 07e06b0 | 2011-01-07 15:17:36 +0800 | [diff] [blame] | 14 | autogroup_default.tg = &root_task_group; | 
|  | 15 | root_task_group.autogroup = &autogroup_default; | 
| Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 16 | kref_init(&autogroup_default.kref); | 
|  | 17 | init_rwsem(&autogroup_default.lock); | 
|  | 18 | init_task->signal->autogroup = &autogroup_default; | 
|  | 19 | } | 
|  | 20 |  | 
|  | 21 | static inline void autogroup_free(struct task_group *tg) | 
|  | 22 | { | 
|  | 23 | kfree(tg->autogroup); | 
|  | 24 | } | 
|  | 25 |  | 
|  | 26 | static inline void autogroup_destroy(struct kref *kref) | 
|  | 27 | { | 
|  | 28 | struct autogroup *ag = container_of(kref, struct autogroup, kref); | 
|  | 29 |  | 
| Mike Galbraith | f449377 | 2011-01-13 04:54:50 +0100 | [diff] [blame] | 30 | #ifdef CONFIG_RT_GROUP_SCHED | 
|  | 31 | /* We've redirected RT tasks to the root task group... */ | 
|  | 32 | ag->tg->rt_se = NULL; | 
|  | 33 | ag->tg->rt_rq = NULL; | 
|  | 34 | #endif | 
| Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 35 | sched_destroy_group(ag->tg); | 
|  | 36 | } | 
|  | 37 |  | 
|  | 38 | static inline void autogroup_kref_put(struct autogroup *ag) | 
|  | 39 | { | 
|  | 40 | kref_put(&ag->kref, autogroup_destroy); | 
|  | 41 | } | 
|  | 42 |  | 
|  | 43 | static inline struct autogroup *autogroup_kref_get(struct autogroup *ag) | 
|  | 44 | { | 
|  | 45 | kref_get(&ag->kref); | 
|  | 46 | return ag; | 
|  | 47 | } | 
|  | 48 |  | 
| Mike Galbraith | 4f82198 | 2010-12-16 15:09:52 +0100 | [diff] [blame] | 49 | static inline struct autogroup *autogroup_task_get(struct task_struct *p) | 
|  | 50 | { | 
|  | 51 | struct autogroup *ag; | 
|  | 52 | unsigned long flags; | 
|  | 53 |  | 
|  | 54 | if (!lock_task_sighand(p, &flags)) | 
|  | 55 | return autogroup_kref_get(&autogroup_default); | 
|  | 56 |  | 
|  | 57 | ag = autogroup_kref_get(p->signal->autogroup); | 
|  | 58 | unlock_task_sighand(p, &flags); | 
|  | 59 |  | 
|  | 60 | return ag; | 
|  | 61 | } | 
|  | 62 |  | 
| Mike Galbraith | f449377 | 2011-01-13 04:54:50 +0100 | [diff] [blame] | 63 | #ifdef CONFIG_RT_GROUP_SCHED | 
|  | 64 | static void free_rt_sched_group(struct task_group *tg); | 
|  | 65 | #endif | 
|  | 66 |  | 
| Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 67 | static inline struct autogroup *autogroup_create(void) | 
|  | 68 | { | 
|  | 69 | struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL); | 
|  | 70 | struct task_group *tg; | 
|  | 71 |  | 
|  | 72 | if (!ag) | 
|  | 73 | goto out_fail; | 
|  | 74 |  | 
| Yong Zhang | 07e06b0 | 2011-01-07 15:17:36 +0800 | [diff] [blame] | 75 | tg = sched_create_group(&root_task_group); | 
| Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 76 |  | 
|  | 77 | if (IS_ERR(tg)) | 
|  | 78 | goto out_free; | 
|  | 79 |  | 
|  | 80 | kref_init(&ag->kref); | 
|  | 81 | init_rwsem(&ag->lock); | 
|  | 82 | ag->id = atomic_inc_return(&autogroup_seq_nr); | 
|  | 83 | ag->tg = tg; | 
| Mike Galbraith | f449377 | 2011-01-13 04:54:50 +0100 | [diff] [blame] | 84 | #ifdef CONFIG_RT_GROUP_SCHED | 
|  | 85 | /* | 
|  | 86 | * Autogroup RT tasks are redirected to the root task group | 
|  | 87 | * so we don't have to move tasks around upon policy change, | 
|  | 88 | * or flail around trying to allocate bandwidth on the fly. | 
|  | 89 | * A bandwidth exception in __sched_setscheduler() allows | 
|  | 90 | * the policy change to proceed.  Thereafter, task_group() | 
|  | 91 | * returns &root_task_group, so zero bandwidth is required. | 
|  | 92 | */ | 
|  | 93 | free_rt_sched_group(tg); | 
|  | 94 | tg->rt_se = root_task_group.rt_se; | 
|  | 95 | tg->rt_rq = root_task_group.rt_rq; | 
|  | 96 | #endif | 
| Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 97 | tg->autogroup = ag; | 
|  | 98 |  | 
|  | 99 | return ag; | 
|  | 100 |  | 
|  | 101 | out_free: | 
|  | 102 | kfree(ag); | 
|  | 103 | out_fail: | 
|  | 104 | if (printk_ratelimit()) { | 
|  | 105 | printk(KERN_WARNING "autogroup_create: %s failure.\n", | 
|  | 106 | ag ? "sched_create_group()" : "kmalloc()"); | 
|  | 107 | } | 
|  | 108 |  | 
|  | 109 | return autogroup_kref_get(&autogroup_default); | 
|  | 110 | } | 
|  | 111 |  | 
|  | 112 | static inline bool | 
|  | 113 | task_wants_autogroup(struct task_struct *p, struct task_group *tg) | 
|  | 114 | { | 
|  | 115 | if (tg != &root_task_group) | 
|  | 116 | return false; | 
|  | 117 |  | 
|  | 118 | if (p->sched_class != &fair_sched_class) | 
|  | 119 | return false; | 
|  | 120 |  | 
|  | 121 | /* | 
|  | 122 | * We can only assume the task group can't go away on us if | 
|  | 123 | * autogroup_move_group() can see us on ->thread_group list. | 
|  | 124 | */ | 
|  | 125 | if (p->flags & PF_EXITING) | 
|  | 126 | return false; | 
|  | 127 |  | 
|  | 128 | return true; | 
|  | 129 | } | 
|  | 130 |  | 
| Mike Galbraith | f449377 | 2011-01-13 04:54:50 +0100 | [diff] [blame] | 131 | static inline bool task_group_is_autogroup(struct task_group *tg) | 
|  | 132 | { | 
|  | 133 | return tg != &root_task_group && tg->autogroup; | 
|  | 134 | } | 
|  | 135 |  | 
| Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 136 | static inline struct task_group * | 
|  | 137 | autogroup_task_group(struct task_struct *p, struct task_group *tg) | 
|  | 138 | { | 
|  | 139 | int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled); | 
|  | 140 |  | 
|  | 141 | if (enabled && task_wants_autogroup(p, tg)) | 
|  | 142 | return p->signal->autogroup->tg; | 
|  | 143 |  | 
|  | 144 | return tg; | 
|  | 145 | } | 
|  | 146 |  | 
|  | 147 | static void | 
|  | 148 | autogroup_move_group(struct task_struct *p, struct autogroup *ag) | 
|  | 149 | { | 
|  | 150 | struct autogroup *prev; | 
|  | 151 | struct task_struct *t; | 
|  | 152 | unsigned long flags; | 
|  | 153 |  | 
|  | 154 | BUG_ON(!lock_task_sighand(p, &flags)); | 
|  | 155 |  | 
|  | 156 | prev = p->signal->autogroup; | 
|  | 157 | if (prev == ag) { | 
|  | 158 | unlock_task_sighand(p, &flags); | 
|  | 159 | return; | 
|  | 160 | } | 
|  | 161 |  | 
|  | 162 | p->signal->autogroup = autogroup_kref_get(ag); | 
|  | 163 |  | 
|  | 164 | t = p; | 
|  | 165 | do { | 
|  | 166 | sched_move_task(t); | 
|  | 167 | } while_each_thread(p, t); | 
|  | 168 |  | 
|  | 169 | unlock_task_sighand(p, &flags); | 
|  | 170 | autogroup_kref_put(prev); | 
|  | 171 | } | 
|  | 172 |  | 
|  | 173 | /* Allocates GFP_KERNEL, cannot be called under any spinlock */ | 
|  | 174 | void sched_autogroup_create_attach(struct task_struct *p) | 
|  | 175 | { | 
|  | 176 | struct autogroup *ag = autogroup_create(); | 
|  | 177 |  | 
|  | 178 | autogroup_move_group(p, ag); | 
|  | 179 | /* drop extra refrence added by autogroup_create() */ | 
|  | 180 | autogroup_kref_put(ag); | 
|  | 181 | } | 
|  | 182 | EXPORT_SYMBOL(sched_autogroup_create_attach); | 
|  | 183 |  | 
|  | 184 | /* Cannot be called under siglock.  Currently has no users */ | 
|  | 185 | void sched_autogroup_detach(struct task_struct *p) | 
|  | 186 | { | 
|  | 187 | autogroup_move_group(p, &autogroup_default); | 
|  | 188 | } | 
|  | 189 | EXPORT_SYMBOL(sched_autogroup_detach); | 
|  | 190 |  | 
|  | 191 | void sched_autogroup_fork(struct signal_struct *sig) | 
|  | 192 | { | 
| Mike Galbraith | 4f82198 | 2010-12-16 15:09:52 +0100 | [diff] [blame] | 193 | sig->autogroup = autogroup_task_get(current); | 
| Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 194 | } | 
|  | 195 |  | 
|  | 196 | void sched_autogroup_exit(struct signal_struct *sig) | 
|  | 197 | { | 
|  | 198 | autogroup_kref_put(sig->autogroup); | 
|  | 199 | } | 
|  | 200 |  | 
|  | 201 | static int __init setup_autogroup(char *str) | 
|  | 202 | { | 
|  | 203 | sysctl_sched_autogroup_enabled = 0; | 
|  | 204 |  | 
|  | 205 | return 1; | 
|  | 206 | } | 
|  | 207 |  | 
|  | 208 | __setup("noautogroup", setup_autogroup); | 
|  | 209 |  | 
|  | 210 | #ifdef CONFIG_PROC_FS | 
|  | 211 |  | 
| Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 212 | int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice) | 
|  | 213 | { | 
|  | 214 | static unsigned long next = INITIAL_JIFFIES; | 
|  | 215 | struct autogroup *ag; | 
|  | 216 | int err; | 
|  | 217 |  | 
|  | 218 | if (*nice < -20 || *nice > 19) | 
|  | 219 | return -EINVAL; | 
|  | 220 |  | 
|  | 221 | err = security_task_setnice(current, *nice); | 
|  | 222 | if (err) | 
|  | 223 | return err; | 
|  | 224 |  | 
|  | 225 | if (*nice < 0 && !can_nice(current, *nice)) | 
|  | 226 | return -EPERM; | 
|  | 227 |  | 
|  | 228 | /* this is a heavy operation taking global locks.. */ | 
|  | 229 | if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next)) | 
|  | 230 | return -EAGAIN; | 
|  | 231 |  | 
|  | 232 | next = HZ / 10 + jiffies; | 
| Mike Galbraith | 4f82198 | 2010-12-16 15:09:52 +0100 | [diff] [blame] | 233 | ag = autogroup_task_get(p); | 
| Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 234 |  | 
|  | 235 | down_write(&ag->lock); | 
|  | 236 | err = sched_group_set_shares(ag->tg, prio_to_weight[*nice + 20]); | 
|  | 237 | if (!err) | 
|  | 238 | ag->nice = *nice; | 
|  | 239 | up_write(&ag->lock); | 
|  | 240 |  | 
|  | 241 | autogroup_kref_put(ag); | 
|  | 242 |  | 
|  | 243 | return err; | 
|  | 244 | } | 
|  | 245 |  | 
|  | 246 | void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m) | 
|  | 247 | { | 
| Mike Galbraith | 4f82198 | 2010-12-16 15:09:52 +0100 | [diff] [blame] | 248 | struct autogroup *ag = autogroup_task_get(p); | 
| Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 249 |  | 
|  | 250 | down_read(&ag->lock); | 
|  | 251 | seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice); | 
|  | 252 | up_read(&ag->lock); | 
|  | 253 |  | 
|  | 254 | autogroup_kref_put(ag); | 
|  | 255 | } | 
|  | 256 | #endif /* CONFIG_PROC_FS */ | 
|  | 257 |  | 
|  | 258 | #ifdef CONFIG_SCHED_DEBUG | 
|  | 259 | static inline int autogroup_path(struct task_group *tg, char *buf, int buflen) | 
|  | 260 | { | 
| Bharata B Rao | 8ecedd7 | 2011-01-11 15:42:57 +0530 | [diff] [blame] | 261 | int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled); | 
|  | 262 |  | 
|  | 263 | if (!enabled || !tg->autogroup) | 
|  | 264 | return 0; | 
|  | 265 |  | 
| Mike Galbraith | 5091faa | 2010-11-30 14:18:03 +0100 | [diff] [blame] | 266 | return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id); | 
|  | 267 | } | 
|  | 268 | #endif /* CONFIG_SCHED_DEBUG */ | 
|  | 269 |  | 
|  | 270 | #endif /* CONFIG_SCHED_AUTOGROUP */ |