blob: 01970c8e64df64def4585bf3bd517c3bdb8a9354 [file] [log] [blame]
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
Peter Zijlstra029632f2011-10-25 10:00:11 +02006#include "sched.h"
7
8#include <linux/slab.h>
9
Clark Williamsce0dbbb2013-02-07 09:47:04 -060010int sched_rr_timeslice = RR_TIMESLICE;
11
Peter Zijlstra029632f2011-10-25 10:00:11 +020012static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
13
14struct rt_bandwidth def_rt_bandwidth;
15
16static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
17{
18 struct rt_bandwidth *rt_b =
19 container_of(timer, struct rt_bandwidth, rt_period_timer);
20 ktime_t now;
21 int overrun;
22 int idle = 0;
23
24 for (;;) {
25 now = hrtimer_cb_get_time(timer);
26 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
27
28 if (!overrun)
29 break;
30
31 idle = do_sched_rt_period_timer(rt_b, overrun);
32 }
33
34 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
35}
36
37void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
38{
39 rt_b->rt_period = ns_to_ktime(period);
40 rt_b->rt_runtime = runtime;
41
42 raw_spin_lock_init(&rt_b->rt_runtime_lock);
43
44 hrtimer_init(&rt_b->rt_period_timer,
45 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
46 rt_b->rt_period_timer.function = sched_rt_period_timer;
47}
48
49static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
50{
51 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
52 return;
53
54 if (hrtimer_active(&rt_b->rt_period_timer))
55 return;
56
57 raw_spin_lock(&rt_b->rt_runtime_lock);
58 start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
59 raw_spin_unlock(&rt_b->rt_runtime_lock);
60}
61
62void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
63{
64 struct rt_prio_array *array;
65 int i;
66
67 array = &rt_rq->active;
68 for (i = 0; i < MAX_RT_PRIO; i++) {
69 INIT_LIST_HEAD(array->queue + i);
70 __clear_bit(i, array->bitmap);
71 }
72 /* delimiter for bitsearch: */
73 __set_bit(MAX_RT_PRIO, array->bitmap);
74
75#if defined CONFIG_SMP
76 rt_rq->highest_prio.curr = MAX_RT_PRIO;
77 rt_rq->highest_prio.next = MAX_RT_PRIO;
78 rt_rq->rt_nr_migratory = 0;
79 rt_rq->overloaded = 0;
80 plist_head_init(&rt_rq->pushable_tasks);
81#endif
82
83 rt_rq->rt_time = 0;
84 rt_rq->rt_throttled = 0;
85 rt_rq->rt_runtime = 0;
86 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
87}
88
Gregory Haskins398a1532009-01-14 09:10:04 -050089#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra029632f2011-10-25 10:00:11 +020090static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
91{
92 hrtimer_cancel(&rt_b->rt_period_timer);
93}
Gregory Haskins398a1532009-01-14 09:10:04 -050094
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +020095#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
96
Peter Zijlstra8f488942009-07-24 12:25:30 +020097static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
98{
99#ifdef CONFIG_SCHED_DEBUG
100 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
101#endif
102 return container_of(rt_se, struct task_struct, rt);
103}
104
Gregory Haskins398a1532009-01-14 09:10:04 -0500105static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
106{
107 return rt_rq->rq;
108}
109
110static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
111{
112 return rt_se->rt_rq;
113}
114
Peter Zijlstra029632f2011-10-25 10:00:11 +0200115void free_rt_sched_group(struct task_group *tg)
116{
117 int i;
118
119 if (tg->rt_se)
120 destroy_rt_bandwidth(&tg->rt_bandwidth);
121
122 for_each_possible_cpu(i) {
123 if (tg->rt_rq)
124 kfree(tg->rt_rq[i]);
125 if (tg->rt_se)
126 kfree(tg->rt_se[i]);
127 }
128
129 kfree(tg->rt_rq);
130 kfree(tg->rt_se);
131}
132
133void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
134 struct sched_rt_entity *rt_se, int cpu,
135 struct sched_rt_entity *parent)
136{
137 struct rq *rq = cpu_rq(cpu);
138
139 rt_rq->highest_prio.curr = MAX_RT_PRIO;
140 rt_rq->rt_nr_boosted = 0;
141 rt_rq->rq = rq;
142 rt_rq->tg = tg;
143
144 tg->rt_rq[cpu] = rt_rq;
145 tg->rt_se[cpu] = rt_se;
146
147 if (!rt_se)
148 return;
149
150 if (!parent)
151 rt_se->rt_rq = &rq->rt;
152 else
153 rt_se->rt_rq = parent->my_q;
154
155 rt_se->my_q = rt_rq;
156 rt_se->parent = parent;
157 INIT_LIST_HEAD(&rt_se->run_list);
158}
159
160int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
161{
162 struct rt_rq *rt_rq;
163 struct sched_rt_entity *rt_se;
164 int i;
165
166 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
167 if (!tg->rt_rq)
168 goto err;
169 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
170 if (!tg->rt_se)
171 goto err;
172
173 init_rt_bandwidth(&tg->rt_bandwidth,
174 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
175
176 for_each_possible_cpu(i) {
177 rt_rq = kzalloc_node(sizeof(struct rt_rq),
178 GFP_KERNEL, cpu_to_node(i));
179 if (!rt_rq)
180 goto err;
181
182 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
183 GFP_KERNEL, cpu_to_node(i));
184 if (!rt_se)
185 goto err_free_rq;
186
187 init_rt_rq(rt_rq, cpu_rq(i));
188 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
189 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
190 }
191
192 return 1;
193
194err_free_rq:
195 kfree(rt_rq);
196err:
197 return 0;
198}
199
Gregory Haskins398a1532009-01-14 09:10:04 -0500200#else /* CONFIG_RT_GROUP_SCHED */
201
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200202#define rt_entity_is_task(rt_se) (1)
203
Peter Zijlstra8f488942009-07-24 12:25:30 +0200204static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
205{
206 return container_of(rt_se, struct task_struct, rt);
207}
208
Gregory Haskins398a1532009-01-14 09:10:04 -0500209static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
210{
211 return container_of(rt_rq, struct rq, rt);
212}
213
214static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
215{
216 struct task_struct *p = rt_task_of(rt_se);
217 struct rq *rq = task_rq(p);
218
219 return &rq->rt;
220}
221
Peter Zijlstra029632f2011-10-25 10:00:11 +0200222void free_rt_sched_group(struct task_group *tg) { }
223
224int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
225{
226 return 1;
227}
Gregory Haskins398a1532009-01-14 09:10:04 -0500228#endif /* CONFIG_RT_GROUP_SCHED */
229
Steven Rostedt4fd29172008-01-25 21:08:06 +0100230#ifdef CONFIG_SMP
Ingo Molnar84de4272008-01-25 21:08:15 +0100231
Gregory Haskins637f5082008-01-25 21:08:18 +0100232static inline int rt_overloaded(struct rq *rq)
Steven Rostedt4fd29172008-01-25 21:08:06 +0100233{
Gregory Haskins637f5082008-01-25 21:08:18 +0100234 return atomic_read(&rq->rd->rto_count);
Steven Rostedt4fd29172008-01-25 21:08:06 +0100235}
Ingo Molnar84de4272008-01-25 21:08:15 +0100236
Steven Rostedt4fd29172008-01-25 21:08:06 +0100237static inline void rt_set_overload(struct rq *rq)
238{
Gregory Haskins1f11eb62008-06-04 15:04:05 -0400239 if (!rq->online)
240 return;
241
Rusty Russellc6c49272008-11-25 02:35:05 +1030242 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
Steven Rostedt4fd29172008-01-25 21:08:06 +0100243 /*
244 * Make sure the mask is visible before we set
245 * the overload count. That is checked to determine
246 * if we should look at the mask. It would be a shame
247 * if we looked at the mask, but the mask was not
248 * updated yet.
249 */
250 wmb();
Gregory Haskins637f5082008-01-25 21:08:18 +0100251 atomic_inc(&rq->rd->rto_count);
Steven Rostedt4fd29172008-01-25 21:08:06 +0100252}
Ingo Molnar84de4272008-01-25 21:08:15 +0100253
Steven Rostedt4fd29172008-01-25 21:08:06 +0100254static inline void rt_clear_overload(struct rq *rq)
255{
Gregory Haskins1f11eb62008-06-04 15:04:05 -0400256 if (!rq->online)
257 return;
258
Steven Rostedt4fd29172008-01-25 21:08:06 +0100259 /* the order here really doesn't matter */
Gregory Haskins637f5082008-01-25 21:08:18 +0100260 atomic_dec(&rq->rd->rto_count);
Rusty Russellc6c49272008-11-25 02:35:05 +1030261 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
Steven Rostedt4fd29172008-01-25 21:08:06 +0100262}
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100263
Gregory Haskins398a1532009-01-14 09:10:04 -0500264static void update_rt_migration(struct rt_rq *rt_rq)
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100265{
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200266 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
Gregory Haskins398a1532009-01-14 09:10:04 -0500267 if (!rt_rq->overloaded) {
268 rt_set_overload(rq_of_rt_rq(rt_rq));
269 rt_rq->overloaded = 1;
Gregory Haskinscdc8eb92008-01-25 21:08:23 +0100270 }
Gregory Haskins398a1532009-01-14 09:10:04 -0500271 } else if (rt_rq->overloaded) {
272 rt_clear_overload(rq_of_rt_rq(rt_rq));
273 rt_rq->overloaded = 0;
Gregory Haskins637f5082008-01-25 21:08:18 +0100274 }
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100275}
Steven Rostedt4fd29172008-01-25 21:08:06 +0100276
Gregory Haskins398a1532009-01-14 09:10:04 -0500277static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100278{
Peter Zijlstra29baa742012-04-23 12:11:21 +0200279 struct task_struct *p;
280
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200281 if (!rt_entity_is_task(rt_se))
282 return;
283
Peter Zijlstra29baa742012-04-23 12:11:21 +0200284 p = rt_task_of(rt_se);
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200285 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
286
287 rt_rq->rt_nr_total++;
Peter Zijlstra29baa742012-04-23 12:11:21 +0200288 if (p->nr_cpus_allowed > 1)
Gregory Haskins398a1532009-01-14 09:10:04 -0500289 rt_rq->rt_nr_migratory++;
290
291 update_rt_migration(rt_rq);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100292}
293
Gregory Haskins398a1532009-01-14 09:10:04 -0500294static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
295{
Peter Zijlstra29baa742012-04-23 12:11:21 +0200296 struct task_struct *p;
297
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200298 if (!rt_entity_is_task(rt_se))
299 return;
300
Peter Zijlstra29baa742012-04-23 12:11:21 +0200301 p = rt_task_of(rt_se);
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200302 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
303
304 rt_rq->rt_nr_total--;
Peter Zijlstra29baa742012-04-23 12:11:21 +0200305 if (p->nr_cpus_allowed > 1)
Gregory Haskins398a1532009-01-14 09:10:04 -0500306 rt_rq->rt_nr_migratory--;
307
308 update_rt_migration(rt_rq);
309}
310
Steven Rostedt5181f4a42011-06-16 21:55:23 -0400311static inline int has_pushable_tasks(struct rq *rq)
312{
313 return !plist_head_empty(&rq->rt.pushable_tasks);
314}
315
Gregory Haskins917b6272008-12-29 09:39:53 -0500316static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
317{
318 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
319 plist_node_init(&p->pushable_tasks, p->prio);
320 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
Steven Rostedt5181f4a42011-06-16 21:55:23 -0400321
322 /* Update the highest prio pushable task */
323 if (p->prio < rq->rt.highest_prio.next)
324 rq->rt.highest_prio.next = p->prio;
Gregory Haskins917b6272008-12-29 09:39:53 -0500325}
326
327static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
328{
329 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
Gregory Haskins917b6272008-12-29 09:39:53 -0500330
Steven Rostedt5181f4a42011-06-16 21:55:23 -0400331 /* Update the new highest prio pushable task */
332 if (has_pushable_tasks(rq)) {
333 p = plist_first_entry(&rq->rt.pushable_tasks,
334 struct task_struct, pushable_tasks);
335 rq->rt.highest_prio.next = p->prio;
336 } else
337 rq->rt.highest_prio.next = MAX_RT_PRIO;
Ingo Molnarbcf08df2008-04-19 12:11:10 +0200338}
339
Gregory Haskins917b6272008-12-29 09:39:53 -0500340#else
341
Peter Zijlstraceacc2c2009-01-16 14:46:40 +0100342static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
343{
344}
345
346static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
347{
348}
349
Gregory Haskinsb07430a2009-01-14 08:55:39 -0500350static inline
Peter Zijlstraceacc2c2009-01-16 14:46:40 +0100351void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
352{
353}
354
Gregory Haskinsb07430a2009-01-14 08:55:39 -0500355static inline
Peter Zijlstraceacc2c2009-01-16 14:46:40 +0100356void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
357{
358}
Gregory Haskins917b6272008-12-29 09:39:53 -0500359
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200360#endif /* CONFIG_SMP */
361
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100362static inline int on_rt_rq(struct sched_rt_entity *rt_se)
363{
364 return !list_empty(&rt_se->run_list);
365}
366
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100367#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100368
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100369static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100370{
371 if (!rt_rq->tg)
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100372 return RUNTIME_INF;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100373
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200374 return rt_rq->rt_runtime;
375}
376
377static inline u64 sched_rt_period(struct rt_rq *rt_rq)
378{
379 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100380}
381
Cheng Xuec514c42011-05-14 14:20:02 +0800382typedef struct task_group *rt_rq_iter_t;
383
Yong Zhang1c09ab02011-06-28 10:51:31 +0800384static inline struct task_group *next_task_group(struct task_group *tg)
385{
386 do {
387 tg = list_entry_rcu(tg->list.next,
388 typeof(struct task_group), list);
389 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
390
391 if (&tg->list == &task_groups)
392 tg = NULL;
393
394 return tg;
395}
396
397#define for_each_rt_rq(rt_rq, iter, rq) \
398 for (iter = container_of(&task_groups, typeof(*iter), list); \
399 (iter = next_task_group(iter)) && \
400 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
Cheng Xuec514c42011-05-14 14:20:02 +0800401
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100402#define for_each_sched_rt_entity(rt_se) \
403 for (; rt_se; rt_se = rt_se->parent)
404
405static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
406{
407 return rt_se->my_q;
408}
409
Thomas Gleixner37dad3f2010-01-20 20:59:01 +0000410static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100411static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
412
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100413static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100414{
Dario Faggiolif6121f42008-10-03 17:40:46 +0200415 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
Yong Zhang74b7eb52010-01-29 14:57:52 +0800416 struct sched_rt_entity *rt_se;
417
Balbir Singh0c3b9162011-03-03 17:04:35 +0530418 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
419
420 rt_se = rt_rq->tg->rt_se[cpu];
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100421
Dario Faggiolif6121f42008-10-03 17:40:46 +0200422 if (rt_rq->rt_nr_running) {
423 if (rt_se && !on_rt_rq(rt_se))
Thomas Gleixner37dad3f2010-01-20 20:59:01 +0000424 enqueue_rt_entity(rt_se, false);
Gregory Haskinse864c492008-12-29 09:39:49 -0500425 if (rt_rq->highest_prio.curr < curr->prio)
Peter Zijlstra10203872008-01-25 21:08:32 +0100426 resched_task(curr);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100427 }
428}
429
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100430static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100431{
Yong Zhang74b7eb52010-01-29 14:57:52 +0800432 struct sched_rt_entity *rt_se;
Balbir Singh0c3b9162011-03-03 17:04:35 +0530433 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
Yong Zhang74b7eb52010-01-29 14:57:52 +0800434
Balbir Singh0c3b9162011-03-03 17:04:35 +0530435 rt_se = rt_rq->tg->rt_se[cpu];
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100436
437 if (rt_se && on_rt_rq(rt_se))
438 dequeue_rt_entity(rt_se);
439}
440
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100441static inline int rt_rq_throttled(struct rt_rq *rt_rq)
442{
443 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
444}
445
446static int rt_se_boosted(struct sched_rt_entity *rt_se)
447{
448 struct rt_rq *rt_rq = group_rt_rq(rt_se);
449 struct task_struct *p;
450
451 if (rt_rq)
452 return !!rt_rq->rt_nr_boosted;
453
454 p = rt_task_of(rt_se);
455 return p->prio != p->normal_prio;
456}
457
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200458#ifdef CONFIG_SMP
Rusty Russellc6c49272008-11-25 02:35:05 +1030459static inline const struct cpumask *sched_rt_period_mask(void)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200460{
Nathan Zimmer424c93f2013-05-09 11:24:03 -0500461 return this_rq()->rd->span;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200462}
463#else
Rusty Russellc6c49272008-11-25 02:35:05 +1030464static inline const struct cpumask *sched_rt_period_mask(void)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200465{
Rusty Russellc6c49272008-11-25 02:35:05 +1030466 return cpu_online_mask;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200467}
468#endif
469
470static inline
471struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
472{
473 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
474}
475
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200476static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
477{
478 return &rt_rq->tg->rt_bandwidth;
479}
480
Dhaval Giani55e12e52008-06-24 23:39:43 +0530481#else /* !CONFIG_RT_GROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100482
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100483static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100484{
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200485 return rt_rq->rt_runtime;
486}
487
488static inline u64 sched_rt_period(struct rt_rq *rt_rq)
489{
490 return ktime_to_ns(def_rt_bandwidth.rt_period);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100491}
492
Cheng Xuec514c42011-05-14 14:20:02 +0800493typedef struct rt_rq *rt_rq_iter_t;
494
495#define for_each_rt_rq(rt_rq, iter, rq) \
496 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
497
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100498#define for_each_sched_rt_entity(rt_se) \
499 for (; rt_se; rt_se = NULL)
500
501static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
502{
503 return NULL;
504}
505
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100506static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100507{
John Blackwoodf3ade832008-08-26 15:09:43 -0400508 if (rt_rq->rt_nr_running)
509 resched_task(rq_of_rt_rq(rt_rq)->curr);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100510}
511
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100512static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100513{
514}
515
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100516static inline int rt_rq_throttled(struct rt_rq *rt_rq)
517{
518 return rt_rq->rt_throttled;
519}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200520
Rusty Russellc6c49272008-11-25 02:35:05 +1030521static inline const struct cpumask *sched_rt_period_mask(void)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200522{
Rusty Russellc6c49272008-11-25 02:35:05 +1030523 return cpu_online_mask;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200524}
525
526static inline
527struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
528{
529 return &cpu_rq(cpu)->rt;
530}
531
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200532static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
533{
534 return &def_rt_bandwidth;
535}
536
Dhaval Giani55e12e52008-06-24 23:39:43 +0530537#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100538
Peter Zijlstrab79f3832008-06-19 14:22:25 +0200539#ifdef CONFIG_SMP
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200540/*
541 * We ran out of runtime, see if we can borrow some from our neighbours.
542 */
Peter Zijlstrab79f3832008-06-19 14:22:25 +0200543static int do_balance_runtime(struct rt_rq *rt_rq)
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200544{
545 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
Shawn Bohreraa7f6732013-01-14 11:55:31 -0600546 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200547 int i, weight, more = 0;
548 u64 rt_period;
549
Rusty Russellc6c49272008-11-25 02:35:05 +1030550 weight = cpumask_weight(rd->span);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200551
Thomas Gleixner0986b112009-11-17 15:32:06 +0100552 raw_spin_lock(&rt_b->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200553 rt_period = ktime_to_ns(rt_b->rt_period);
Rusty Russellc6c49272008-11-25 02:35:05 +1030554 for_each_cpu(i, rd->span) {
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200555 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
556 s64 diff;
557
558 if (iter == rt_rq)
559 continue;
560
Thomas Gleixner0986b112009-11-17 15:32:06 +0100561 raw_spin_lock(&iter->rt_runtime_lock);
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200562 /*
563 * Either all rqs have inf runtime and there's nothing to steal
564 * or __disable_runtime() below sets a specific rq to inf to
565 * indicate its been disabled and disalow stealing.
566 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200567 if (iter->rt_runtime == RUNTIME_INF)
568 goto next;
569
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200570 /*
571 * From runqueues with spare time, take 1/n part of their
572 * spare time, but no more than our period.
573 */
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200574 diff = iter->rt_runtime - iter->rt_time;
575 if (diff > 0) {
Peter Zijlstra58838cf2008-07-24 12:43:13 +0200576 diff = div_u64((u64)diff, weight);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200577 if (rt_rq->rt_runtime + diff > rt_period)
578 diff = rt_period - rt_rq->rt_runtime;
579 iter->rt_runtime -= diff;
580 rt_rq->rt_runtime += diff;
581 more = 1;
582 if (rt_rq->rt_runtime == rt_period) {
Thomas Gleixner0986b112009-11-17 15:32:06 +0100583 raw_spin_unlock(&iter->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200584 break;
585 }
586 }
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200587next:
Thomas Gleixner0986b112009-11-17 15:32:06 +0100588 raw_spin_unlock(&iter->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200589 }
Thomas Gleixner0986b112009-11-17 15:32:06 +0100590 raw_spin_unlock(&rt_b->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200591
592 return more;
593}
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200594
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200595/*
596 * Ensure this RQ takes back all the runtime it lend to its neighbours.
597 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200598static void __disable_runtime(struct rq *rq)
599{
600 struct root_domain *rd = rq->rd;
Cheng Xuec514c42011-05-14 14:20:02 +0800601 rt_rq_iter_t iter;
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200602 struct rt_rq *rt_rq;
603
604 if (unlikely(!scheduler_running))
605 return;
606
Cheng Xuec514c42011-05-14 14:20:02 +0800607 for_each_rt_rq(rt_rq, iter, rq) {
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200608 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
609 s64 want;
610 int i;
611
Thomas Gleixner0986b112009-11-17 15:32:06 +0100612 raw_spin_lock(&rt_b->rt_runtime_lock);
613 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200614 /*
615 * Either we're all inf and nobody needs to borrow, or we're
616 * already disabled and thus have nothing to do, or we have
617 * exactly the right amount of runtime to take out.
618 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200619 if (rt_rq->rt_runtime == RUNTIME_INF ||
620 rt_rq->rt_runtime == rt_b->rt_runtime)
621 goto balanced;
Thomas Gleixner0986b112009-11-17 15:32:06 +0100622 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200623
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200624 /*
625 * Calculate the difference between what we started out with
626 * and what we current have, that's the amount of runtime
627 * we lend and now have to reclaim.
628 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200629 want = rt_b->rt_runtime - rt_rq->rt_runtime;
630
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200631 /*
632 * Greedy reclaim, take back as much as we can.
633 */
Rusty Russellc6c49272008-11-25 02:35:05 +1030634 for_each_cpu(i, rd->span) {
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200635 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
636 s64 diff;
637
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200638 /*
639 * Can't reclaim from ourselves or disabled runqueues.
640 */
Peter Zijlstraf1679d02008-08-14 15:49:00 +0200641 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200642 continue;
643
Thomas Gleixner0986b112009-11-17 15:32:06 +0100644 raw_spin_lock(&iter->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200645 if (want > 0) {
646 diff = min_t(s64, iter->rt_runtime, want);
647 iter->rt_runtime -= diff;
648 want -= diff;
649 } else {
650 iter->rt_runtime -= want;
651 want -= want;
652 }
Thomas Gleixner0986b112009-11-17 15:32:06 +0100653 raw_spin_unlock(&iter->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200654
655 if (!want)
656 break;
657 }
658
Thomas Gleixner0986b112009-11-17 15:32:06 +0100659 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200660 /*
661 * We cannot be left wanting - that would mean some runtime
662 * leaked out of the system.
663 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200664 BUG_ON(want);
665balanced:
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200666 /*
667 * Disable all the borrow logic by pretending we have inf
668 * runtime - in which case borrowing doesn't make sense.
669 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200670 rt_rq->rt_runtime = RUNTIME_INF;
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -0700671 rt_rq->rt_throttled = 0;
Thomas Gleixner0986b112009-11-17 15:32:06 +0100672 raw_spin_unlock(&rt_rq->rt_runtime_lock);
673 raw_spin_unlock(&rt_b->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200674 }
675}
676
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200677static void __enable_runtime(struct rq *rq)
678{
Cheng Xuec514c42011-05-14 14:20:02 +0800679 rt_rq_iter_t iter;
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200680 struct rt_rq *rt_rq;
681
682 if (unlikely(!scheduler_running))
683 return;
684
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200685 /*
686 * Reset each runqueue's bandwidth settings
687 */
Cheng Xuec514c42011-05-14 14:20:02 +0800688 for_each_rt_rq(rt_rq, iter, rq) {
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200689 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
690
Thomas Gleixner0986b112009-11-17 15:32:06 +0100691 raw_spin_lock(&rt_b->rt_runtime_lock);
692 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200693 rt_rq->rt_runtime = rt_b->rt_runtime;
694 rt_rq->rt_time = 0;
Zhang, Yanminbaf25732008-09-09 11:26:33 +0800695 rt_rq->rt_throttled = 0;
Thomas Gleixner0986b112009-11-17 15:32:06 +0100696 raw_spin_unlock(&rt_rq->rt_runtime_lock);
697 raw_spin_unlock(&rt_b->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200698 }
699}
700
Peter Zijlstraeff65492008-06-19 14:22:26 +0200701static int balance_runtime(struct rt_rq *rt_rq)
702{
703 int more = 0;
704
Peter Zijlstra4a6184c2011-10-06 22:39:14 +0200705 if (!sched_feat(RT_RUNTIME_SHARE))
706 return more;
707
Peter Zijlstraeff65492008-06-19 14:22:26 +0200708 if (rt_rq->rt_time > rt_rq->rt_runtime) {
Thomas Gleixner0986b112009-11-17 15:32:06 +0100709 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstraeff65492008-06-19 14:22:26 +0200710 more = do_balance_runtime(rt_rq);
Thomas Gleixner0986b112009-11-17 15:32:06 +0100711 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraeff65492008-06-19 14:22:26 +0200712 }
713
714 return more;
715}
Dhaval Giani55e12e52008-06-24 23:39:43 +0530716#else /* !CONFIG_SMP */
Peter Zijlstraeff65492008-06-19 14:22:26 +0200717static inline int balance_runtime(struct rt_rq *rt_rq)
718{
719 return 0;
720}
Dhaval Giani55e12e52008-06-24 23:39:43 +0530721#endif /* CONFIG_SMP */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100722
723static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
724{
Peter Zijlstra42c62a52011-10-18 22:03:48 +0200725 int i, idle = 1, throttled = 0;
Rusty Russellc6c49272008-11-25 02:35:05 +1030726 const struct cpumask *span;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200727
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200728 span = sched_rt_period_mask();
Mike Galbraithe221d022012-08-07 10:02:38 +0200729#ifdef CONFIG_RT_GROUP_SCHED
730 /*
731 * FIXME: isolated CPUs should really leave the root task group,
732 * whether they are isolcpus or were isolated via cpusets, lest
733 * the timer run on a CPU which does not service all runqueues,
734 * potentially leaving other CPUs indefinitely throttled. If
735 * isolation is really required, the user will turn the throttle
736 * off to kill the perturbations it causes anyway. Meanwhile,
737 * this maintains functionality for boot and/or troubleshooting.
738 */
739 if (rt_b == &root_task_group.rt_bandwidth)
740 span = cpu_online_mask;
741#endif
Rusty Russellc6c49272008-11-25 02:35:05 +1030742 for_each_cpu(i, span) {
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200743 int enqueue = 0;
744 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
745 struct rq *rq = rq_of_rt_rq(rt_rq);
746
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100747 raw_spin_lock(&rq->lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200748 if (rt_rq->rt_time) {
749 u64 runtime;
750
Thomas Gleixner0986b112009-11-17 15:32:06 +0100751 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraeff65492008-06-19 14:22:26 +0200752 if (rt_rq->rt_throttled)
753 balance_runtime(rt_rq);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200754 runtime = rt_rq->rt_runtime;
755 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
756 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
757 rt_rq->rt_throttled = 0;
758 enqueue = 1;
Mike Galbraith61eadef2011-04-29 08:36:50 +0200759
760 /*
761 * Force a clock update if the CPU was idle,
762 * lest wakeup -> unthrottle time accumulate.
763 */
764 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
765 rq->skip_clock_update = -1;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200766 }
767 if (rt_rq->rt_time || rt_rq->rt_nr_running)
768 idle = 0;
Thomas Gleixner0986b112009-11-17 15:32:06 +0100769 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Balbir Singh0c3b9162011-03-03 17:04:35 +0530770 } else if (rt_rq->rt_nr_running) {
Peter Zijlstra8a8cde12008-06-19 14:22:28 +0200771 idle = 0;
Balbir Singh0c3b9162011-03-03 17:04:35 +0530772 if (!rt_rq_throttled(rt_rq))
773 enqueue = 1;
774 }
Peter Zijlstra42c62a52011-10-18 22:03:48 +0200775 if (rt_rq->rt_throttled)
776 throttled = 1;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200777
778 if (enqueue)
779 sched_rt_rq_enqueue(rt_rq);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100780 raw_spin_unlock(&rq->lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200781 }
782
Peter Zijlstra42c62a52011-10-18 22:03:48 +0200783 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
784 return 1;
785
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200786 return idle;
787}
788
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100789static inline int rt_se_prio(struct sched_rt_entity *rt_se)
790{
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100791#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100792 struct rt_rq *rt_rq = group_rt_rq(rt_se);
793
794 if (rt_rq)
Gregory Haskinse864c492008-12-29 09:39:49 -0500795 return rt_rq->highest_prio.curr;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100796#endif
797
798 return rt_task_of(rt_se)->prio;
799}
800
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100801static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100802{
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100803 u64 runtime = sched_rt_runtime(rt_rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100804
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100805 if (rt_rq->rt_throttled)
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100806 return rt_rq_throttled(rt_rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100807
Shan Hai5b680fd2011-11-29 11:03:56 +0800808 if (runtime >= sched_rt_period(rt_rq))
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200809 return 0;
810
Peter Zijlstrab79f3832008-06-19 14:22:25 +0200811 balance_runtime(rt_rq);
812 runtime = sched_rt_runtime(rt_rq);
813 if (runtime == RUNTIME_INF)
814 return 0;
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200815
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100816 if (rt_rq->rt_time > runtime) {
Peter Zijlstra7abc63b2011-10-18 22:03:48 +0200817 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
818
819 /*
820 * Don't actually throttle groups that have no runtime assigned
821 * but accrue some time due to boosting.
822 */
823 if (likely(rt_b->rt_runtime)) {
Peter Zijlstra3ccf3e82012-02-27 10:47:00 +0100824 static bool once = false;
825
Peter Zijlstra7abc63b2011-10-18 22:03:48 +0200826 rt_rq->rt_throttled = 1;
Peter Zijlstra3ccf3e82012-02-27 10:47:00 +0100827
828 if (!once) {
829 once = true;
830 printk_sched("sched: RT throttling activated\n");
831 }
Peter Zijlstra7abc63b2011-10-18 22:03:48 +0200832 } else {
833 /*
834 * In case we did anyway, make it go away,
835 * replenishment is a joke, since it will replenish us
836 * with exactly 0 ns.
837 */
838 rt_rq->rt_time = 0;
839 }
840
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100841 if (rt_rq_throttled(rt_rq)) {
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100842 sched_rt_rq_dequeue(rt_rq);
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100843 return 1;
844 }
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100845 }
846
847 return 0;
848}
849
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200850/*
851 * Update the current task's runtime statistics. Skip current tasks that
852 * are not in our scheduling class.
853 */
Alexey Dobriyana9957442007-10-15 17:00:13 +0200854static void update_curr_rt(struct rq *rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200855{
856 struct task_struct *curr = rq->curr;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100857 struct sched_rt_entity *rt_se = &curr->rt;
858 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200859 u64 delta_exec;
860
Peter Zijlstra06c3bc62011-02-02 13:19:48 +0100861 if (curr->sched_class != &rt_sched_class)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200862 return;
863
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200864 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
Kirill Tkhaifc79e242013-01-30 16:50:36 +0400865 if (unlikely((s64)delta_exec <= 0))
866 return;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200867
Peter Zijlstra42c62a52011-10-18 22:03:48 +0200868 schedstat_set(curr->se.statistics.exec_max,
869 max(curr->se.statistics.exec_max, delta_exec));
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200870
871 curr->se.sum_exec_runtime += delta_exec;
Frank Mayharf06febc2008-09-12 09:54:39 -0700872 account_group_exec_runtime(curr, delta_exec);
873
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200874 curr->se.exec_start = rq_clock_task(rq);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100875 cpuacct_charge(curr, delta_exec);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100876
Peter Zijlstrae9e92502009-09-01 10:34:37 +0200877 sched_rt_avg_update(rq, delta_exec);
878
Peter Zijlstra0b148fa2008-08-19 12:33:04 +0200879 if (!rt_bandwidth_enabled())
880 return;
881
Dhaval Giani354d60c2008-04-19 19:44:59 +0200882 for_each_sched_rt_entity(rt_se) {
883 rt_rq = rt_rq_of_se(rt_se);
884
Peter Zijlstracc2991c2008-08-19 12:33:03 +0200885 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
Thomas Gleixner0986b112009-11-17 15:32:06 +0100886 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstracc2991c2008-08-19 12:33:03 +0200887 rt_rq->rt_time += delta_exec;
888 if (sched_rt_runtime_exceeded(rt_rq))
889 resched_task(curr);
Thomas Gleixner0986b112009-11-17 15:32:06 +0100890 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstracc2991c2008-08-19 12:33:03 +0200891 }
Dhaval Giani354d60c2008-04-19 19:44:59 +0200892 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200893}
894
Gregory Haskins398a1532009-01-14 09:10:04 -0500895#if defined CONFIG_SMP
Gregory Haskinse864c492008-12-29 09:39:49 -0500896
Gregory Haskins398a1532009-01-14 09:10:04 -0500897static void
898inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
Steven Rostedt63489e42008-01-25 21:08:03 +0100899{
Gregory Haskins4d984272008-12-29 09:39:49 -0500900 struct rq *rq = rq_of_rt_rq(rt_rq);
Gregory Haskins4d984272008-12-29 09:39:49 -0500901
Steven Rostedt5181f4a42011-06-16 21:55:23 -0400902 if (rq->online && prio < prev_prio)
903 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
Steven Rostedt63489e42008-01-25 21:08:03 +0100904}
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100905
Gregory Haskins398a1532009-01-14 09:10:04 -0500906static void
907dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
Steven Rostedt63489e42008-01-25 21:08:03 +0100908{
Gregory Haskins4d984272008-12-29 09:39:49 -0500909 struct rq *rq = rq_of_rt_rq(rt_rq);
Gregory Haskins6e0534f2008-05-12 21:21:01 +0200910
Gregory Haskins398a1532009-01-14 09:10:04 -0500911 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
912 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
913}
914
915#else /* CONFIG_SMP */
916
917static inline
918void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
919static inline
920void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
921
922#endif /* CONFIG_SMP */
923
Steven Rostedt63489e42008-01-25 21:08:03 +0100924#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Gregory Haskins398a1532009-01-14 09:10:04 -0500925static void
926inc_rt_prio(struct rt_rq *rt_rq, int prio)
927{
928 int prev_prio = rt_rq->highest_prio.curr;
Steven Rostedt63489e42008-01-25 21:08:03 +0100929
Gregory Haskins398a1532009-01-14 09:10:04 -0500930 if (prio < prev_prio)
931 rt_rq->highest_prio.curr = prio;
932
933 inc_rt_prio_smp(rt_rq, prio, prev_prio);
934}
935
936static void
937dec_rt_prio(struct rt_rq *rt_rq, int prio)
938{
939 int prev_prio = rt_rq->highest_prio.curr;
940
941 if (rt_rq->rt_nr_running) {
942
943 WARN_ON(prio < prev_prio);
Gregory Haskinse864c492008-12-29 09:39:49 -0500944
945 /*
Gregory Haskins398a1532009-01-14 09:10:04 -0500946 * This may have been our highest task, and therefore
947 * we may have some recomputation to do
Gregory Haskinse864c492008-12-29 09:39:49 -0500948 */
Gregory Haskins398a1532009-01-14 09:10:04 -0500949 if (prio == prev_prio) {
Gregory Haskinse864c492008-12-29 09:39:49 -0500950 struct rt_prio_array *array = &rt_rq->active;
951
952 rt_rq->highest_prio.curr =
Steven Rostedt764a9d62008-01-25 21:08:04 +0100953 sched_find_first_bit(array->bitmap);
Gregory Haskinse864c492008-12-29 09:39:49 -0500954 }
955
Steven Rostedt764a9d62008-01-25 21:08:04 +0100956 } else
Gregory Haskinse864c492008-12-29 09:39:49 -0500957 rt_rq->highest_prio.curr = MAX_RT_PRIO;
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100958
Gregory Haskins398a1532009-01-14 09:10:04 -0500959 dec_rt_prio_smp(rt_rq, prio, prev_prio);
960}
Gregory Haskins1f11eb62008-06-04 15:04:05 -0400961
Gregory Haskins398a1532009-01-14 09:10:04 -0500962#else
963
964static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
965static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
966
967#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
968
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100969#ifdef CONFIG_RT_GROUP_SCHED
Gregory Haskins398a1532009-01-14 09:10:04 -0500970
971static void
972inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
973{
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100974 if (rt_se_boosted(rt_se))
Steven Rostedt764a9d62008-01-25 21:08:04 +0100975 rt_rq->rt_nr_boosted++;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100976
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100977 if (rt_rq->tg)
978 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
Gregory Haskins398a1532009-01-14 09:10:04 -0500979}
980
981static void
982dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
983{
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100984 if (rt_se_boosted(rt_se))
985 rt_rq->rt_nr_boosted--;
986
987 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
Gregory Haskins398a1532009-01-14 09:10:04 -0500988}
989
990#else /* CONFIG_RT_GROUP_SCHED */
991
992static void
993inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
994{
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200995 start_rt_bandwidth(&def_rt_bandwidth);
Gregory Haskins398a1532009-01-14 09:10:04 -0500996}
997
998static inline
999void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1000
1001#endif /* CONFIG_RT_GROUP_SCHED */
1002
1003static inline
1004void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1005{
1006 int prio = rt_se_prio(rt_se);
1007
1008 WARN_ON(!rt_prio(prio));
1009 rt_rq->rt_nr_running++;
1010
1011 inc_rt_prio(rt_rq, prio);
1012 inc_rt_migration(rt_se, rt_rq);
1013 inc_rt_group(rt_se, rt_rq);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001014}
1015
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +01001016static inline
1017void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1018{
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001019 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001020 WARN_ON(!rt_rq->rt_nr_running);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001021 rt_rq->rt_nr_running--;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001022
Gregory Haskins398a1532009-01-14 09:10:04 -05001023 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1024 dec_rt_migration(rt_se, rt_rq);
1025 dec_rt_group(rt_se, rt_rq);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001026}
1027
Thomas Gleixner37dad3f2010-01-20 20:59:01 +00001028static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001029{
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001030 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1031 struct rt_prio_array *array = &rt_rq->active;
1032 struct rt_rq *group_rq = group_rt_rq(rt_se);
Dmitry Adamushko20b63312008-06-11 00:58:30 +02001033 struct list_head *queue = array->queue + rt_se_prio(rt_se);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001034
Peter Zijlstraad2a3f12008-06-19 09:06:57 +02001035 /*
1036 * Don't enqueue the group if its throttled, or when empty.
1037 * The latter is a consequence of the former when a child group
1038 * get throttled and the current group doesn't have any other
1039 * active members.
1040 */
1041 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001042 return;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001043
Thomas Gleixner37dad3f2010-01-20 20:59:01 +00001044 if (head)
1045 list_add(&rt_se->run_list, queue);
1046 else
1047 list_add_tail(&rt_se->run_list, queue);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001048 __set_bit(rt_se_prio(rt_se), array->bitmap);
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001049
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001050 inc_rt_tasks(rt_se, rt_rq);
1051}
1052
Peter Zijlstraad2a3f12008-06-19 09:06:57 +02001053static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001054{
1055 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1056 struct rt_prio_array *array = &rt_rq->active;
1057
1058 list_del_init(&rt_se->run_list);
1059 if (list_empty(array->queue + rt_se_prio(rt_se)))
1060 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1061
1062 dec_rt_tasks(rt_se, rt_rq);
1063}
1064
1065/*
1066 * Because the prio of an upper entry depends on the lower
1067 * entries, we must remove entries top - down.
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001068 */
Peter Zijlstraad2a3f12008-06-19 09:06:57 +02001069static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001070{
Peter Zijlstraad2a3f12008-06-19 09:06:57 +02001071 struct sched_rt_entity *back = NULL;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001072
Peter Zijlstra58d6c2d2008-04-19 19:45:00 +02001073 for_each_sched_rt_entity(rt_se) {
1074 rt_se->back = back;
1075 back = rt_se;
1076 }
1077
1078 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1079 if (on_rt_rq(rt_se))
Peter Zijlstraad2a3f12008-06-19 09:06:57 +02001080 __dequeue_rt_entity(rt_se);
1081 }
1082}
1083
Thomas Gleixner37dad3f2010-01-20 20:59:01 +00001084static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
Peter Zijlstraad2a3f12008-06-19 09:06:57 +02001085{
1086 dequeue_rt_stack(rt_se);
1087 for_each_sched_rt_entity(rt_se)
Thomas Gleixner37dad3f2010-01-20 20:59:01 +00001088 __enqueue_rt_entity(rt_se, head);
Peter Zijlstraad2a3f12008-06-19 09:06:57 +02001089}
1090
1091static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1092{
1093 dequeue_rt_stack(rt_se);
1094
1095 for_each_sched_rt_entity(rt_se) {
1096 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1097
1098 if (rt_rq && rt_rq->rt_nr_running)
Thomas Gleixner37dad3f2010-01-20 20:59:01 +00001099 __enqueue_rt_entity(rt_se, false);
Peter Zijlstra58d6c2d2008-04-19 19:45:00 +02001100 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001101}
1102
1103/*
1104 * Adding/removing a task to/from a priority array:
1105 */
Thomas Gleixnerea87bb72010-01-20 20:58:57 +00001106static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001107enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001108{
1109 struct sched_rt_entity *rt_se = &p->rt;
1110
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001111 if (flags & ENQUEUE_WAKEUP)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001112 rt_se->timeout = 0;
1113
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001114 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001115
Peter Zijlstra29baa742012-04-23 12:11:21 +02001116 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
Gregory Haskins917b6272008-12-29 09:39:53 -05001117 enqueue_pushable_task(rq, p);
Paul Turner953bfcd2011-07-21 09:43:27 -07001118
1119 inc_nr_running(rq);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001120}
1121
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001122static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001123{
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001124 struct sched_rt_entity *rt_se = &p->rt;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001125
1126 update_curr_rt(rq);
Peter Zijlstraad2a3f12008-06-19 09:06:57 +02001127 dequeue_rt_entity(rt_se);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001128
Gregory Haskins917b6272008-12-29 09:39:53 -05001129 dequeue_pushable_task(rq, p);
Paul Turner953bfcd2011-07-21 09:43:27 -07001130
1131 dec_nr_running(rq);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001132}
1133
1134/*
Richard Weinberger60686312011-11-12 18:07:57 +01001135 * Put task to the head or the end of the run list without the overhead of
1136 * dequeue followed by enqueue.
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001137 */
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001138static void
1139requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001140{
Ingo Molnar1cdad712008-06-19 09:09:15 +02001141 if (on_rt_rq(rt_se)) {
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001142 struct rt_prio_array *array = &rt_rq->active;
1143 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1144
1145 if (head)
1146 list_move(&rt_se->run_list, queue);
1147 else
1148 list_move_tail(&rt_se->run_list, queue);
Ingo Molnar1cdad712008-06-19 09:09:15 +02001149 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001150}
1151
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001152static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001153{
1154 struct sched_rt_entity *rt_se = &p->rt;
1155 struct rt_rq *rt_rq;
1156
1157 for_each_sched_rt_entity(rt_se) {
1158 rt_rq = rt_rq_of_se(rt_se);
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001159 requeue_rt_entity(rt_rq, rt_se, head);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001160 }
1161}
1162
1163static void yield_task_rt(struct rq *rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001164{
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001165 requeue_task_rt(rq, rq->curr, 0);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001166}
1167
Gregory Haskinse7693a32008-01-25 21:08:09 +01001168#ifdef CONFIG_SMP
Gregory Haskins318e0892008-01-25 21:08:10 +01001169static int find_lowest_rq(struct task_struct *task);
1170
Peter Zijlstra0017d732010-03-24 18:34:10 +01001171static int
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001172select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
Gregory Haskinse7693a32008-01-25 21:08:09 +01001173{
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001174 struct task_struct *curr;
1175 struct rq *rq;
1176 int cpu;
1177
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001178 cpu = task_cpu(p);
Steven Rostedtc37495f2011-06-16 21:55:22 -04001179
Peter Zijlstra29baa742012-04-23 12:11:21 +02001180 if (p->nr_cpus_allowed == 1)
Mike Galbraith76854c72011-11-22 15:18:24 +01001181 goto out;
1182
Steven Rostedtc37495f2011-06-16 21:55:22 -04001183 /* For anything but wake ups, just return the task_cpu */
1184 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1185 goto out;
1186
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001187 rq = cpu_rq(cpu);
1188
1189 rcu_read_lock();
1190 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1191
Gregory Haskins318e0892008-01-25 21:08:10 +01001192 /*
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001193 * If the current task on @p's runqueue is an RT task, then
Steven Rostedte1f47d82008-01-25 21:08:12 +01001194 * try to see if we can wake this RT task up on another
1195 * runqueue. Otherwise simply start this RT task
1196 * on its current runqueue.
1197 *
Steven Rostedt43fa5462010-09-20 22:40:03 -04001198 * We want to avoid overloading runqueues. If the woken
1199 * task is a higher priority, then it will stay on this CPU
1200 * and the lower prio task should be moved to another CPU.
1201 * Even though this will probably make the lower prio task
1202 * lose its cache, we do not want to bounce a higher task
1203 * around just because it gave up its CPU, perhaps for a
1204 * lock?
1205 *
1206 * For equal prio tasks, we just let the scheduler sort it out.
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001207 *
Gregory Haskins318e0892008-01-25 21:08:10 +01001208 * Otherwise, just let it ride on the affined RQ and the
1209 * post-schedule router will push the preempted task away
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001210 *
1211 * This test is optimistic, if we get it wrong the load-balancer
1212 * will have to sort it out.
Gregory Haskins318e0892008-01-25 21:08:10 +01001213 */
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001214 if (curr && unlikely(rt_task(curr)) &&
Peter Zijlstra29baa742012-04-23 12:11:21 +02001215 (curr->nr_cpus_allowed < 2 ||
Shawn Bohrer3be209a2011-09-12 09:28:04 -05001216 curr->prio <= p->prio) &&
Peter Zijlstra29baa742012-04-23 12:11:21 +02001217 (p->nr_cpus_allowed > 1)) {
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001218 int target = find_lowest_rq(p);
1219
1220 if (target != -1)
1221 cpu = target;
1222 }
1223 rcu_read_unlock();
1224
Steven Rostedtc37495f2011-06-16 21:55:22 -04001225out:
Peter Zijlstra7608dec2011-04-05 17:23:46 +02001226 return cpu;
Gregory Haskinse7693a32008-01-25 21:08:09 +01001227}
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001228
1229static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1230{
Peter Zijlstra29baa742012-04-23 12:11:21 +02001231 if (rq->curr->nr_cpus_allowed == 1)
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001232 return;
1233
Peter Zijlstra29baa742012-04-23 12:11:21 +02001234 if (p->nr_cpus_allowed != 1
Rusty Russell13b8bd02009-03-25 15:01:22 +10301235 && cpupri_find(&rq->rd->cpupri, p, NULL))
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001236 return;
1237
Rusty Russell13b8bd02009-03-25 15:01:22 +10301238 if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1239 return;
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001240
1241 /*
1242 * There appears to be other cpus that can accept
1243 * current and none to run 'p', so lets reschedule
1244 * to try and push current away:
1245 */
1246 requeue_task_rt(rq, p, 1);
1247 resched_task(rq->curr);
1248}
1249
Gregory Haskinse7693a32008-01-25 21:08:09 +01001250#endif /* CONFIG_SMP */
1251
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001252/*
1253 * Preempt the current task with a newly woken task if needed:
1254 */
Peter Zijlstra7d478722009-09-14 19:55:44 +02001255static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001256{
Gregory Haskins45c01e82008-05-12 21:20:41 +02001257 if (p->prio < rq->curr->prio) {
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001258 resched_task(rq->curr);
Gregory Haskins45c01e82008-05-12 21:20:41 +02001259 return;
1260 }
1261
1262#ifdef CONFIG_SMP
1263 /*
1264 * If:
1265 *
1266 * - the newly woken task is of equal priority to the current task
1267 * - the newly woken task is non-migratable while current is migratable
1268 * - current will be preempted on the next reschedule
1269 *
1270 * we should check to see if current can readily move to a different
1271 * cpu. If so, we will reschedule to allow the push logic to try
1272 * to move current somewhere else, making room for our non-migratable
1273 * task.
1274 */
Hillf Danton8dd0de82011-06-14 18:36:24 -04001275 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001276 check_preempt_equal_prio(rq, p);
Gregory Haskins45c01e82008-05-12 21:20:41 +02001277#endif
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001278}
1279
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001280static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1281 struct rt_rq *rt_rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001282{
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001283 struct rt_prio_array *array = &rt_rq->active;
1284 struct sched_rt_entity *next = NULL;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001285 struct list_head *queue;
1286 int idx;
1287
1288 idx = sched_find_first_bit(array->bitmap);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001289 BUG_ON(idx >= MAX_RT_PRIO);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001290
1291 queue = array->queue + idx;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001292 next = list_entry(queue->next, struct sched_rt_entity, run_list);
Dmitry Adamushko326587b2008-01-25 21:08:34 +01001293
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001294 return next;
1295}
1296
Gregory Haskins917b6272008-12-29 09:39:53 -05001297static struct task_struct *_pick_next_task_rt(struct rq *rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001298{
1299 struct sched_rt_entity *rt_se;
1300 struct task_struct *p;
1301 struct rt_rq *rt_rq;
1302
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001303 rt_rq = &rq->rt;
1304
Steven Rostedt8e54a2c2010-12-06 11:28:30 -05001305 if (!rt_rq->rt_nr_running)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001306 return NULL;
1307
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +01001308 if (rt_rq_throttled(rt_rq))
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001309 return NULL;
1310
1311 do {
1312 rt_se = pick_next_rt_entity(rq, rt_rq);
Dmitry Adamushko326587b2008-01-25 21:08:34 +01001313 BUG_ON(!rt_se);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001314 rt_rq = group_rt_rq(rt_se);
1315 } while (rt_rq);
1316
1317 p = rt_task_of(rt_se);
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001318 p->se.exec_start = rq_clock_task(rq);
Gregory Haskins917b6272008-12-29 09:39:53 -05001319
1320 return p;
1321}
1322
1323static struct task_struct *pick_next_task_rt(struct rq *rq)
1324{
1325 struct task_struct *p = _pick_next_task_rt(rq);
1326
1327 /* The running task is never eligible for pushing */
1328 if (p)
1329 dequeue_pushable_task(rq, p);
1330
Ingo Molnarbcf08df2008-04-19 12:11:10 +02001331#ifdef CONFIG_SMP
Gregory Haskins3f029d32009-07-29 11:08:47 -04001332 /*
1333 * We detect this state here so that we can avoid taking the RQ
1334 * lock again later if there is no need to push
1335 */
1336 rq->post_schedule = has_pushable_tasks(rq);
Ingo Molnarbcf08df2008-04-19 12:11:10 +02001337#endif
Gregory Haskins3f029d32009-07-29 11:08:47 -04001338
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001339 return p;
1340}
1341
Ingo Molnar31ee5292007-08-09 11:16:49 +02001342static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001343{
Ingo Molnarf1e14ef2007-08-09 11:16:48 +02001344 update_curr_rt(rq);
Gregory Haskins917b6272008-12-29 09:39:53 -05001345
1346 /*
1347 * The previous task needs to be made eligible for pushing
1348 * if it is still active
1349 */
Peter Zijlstra29baa742012-04-23 12:11:21 +02001350 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
Gregory Haskins917b6272008-12-29 09:39:53 -05001351 enqueue_pushable_task(rq, p);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001352}
1353
Peter Williams681f3e62007-10-24 18:23:51 +02001354#ifdef CONFIG_SMP
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001355
Steven Rostedte8fa1362008-01-25 21:08:05 +01001356/* Only try algorithms three times */
1357#define RT_MAX_TRIES 3
1358
Steven Rostedtf65eda42008-01-25 21:08:07 +01001359static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1360{
1361 if (!task_running(rq, p) &&
Kirill Tkhai60334ca2013-01-31 18:56:17 +04001362 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
Steven Rostedtf65eda42008-01-25 21:08:07 +01001363 return 1;
1364 return 0;
1365}
1366
Kirill Tkhaie23ee742013-06-07 15:37:43 -04001367/*
1368 * Return the highest pushable rq's task, which is suitable to be executed
1369 * on the cpu, NULL otherwise
1370 */
1371static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001372{
Kirill Tkhaie23ee742013-06-07 15:37:43 -04001373 struct plist_head *head = &rq->rt.pushable_tasks;
1374 struct task_struct *p;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001375
Kirill Tkhaie23ee742013-06-07 15:37:43 -04001376 if (!has_pushable_tasks(rq))
1377 return NULL;
Peter Zijlstra3d074672010-03-10 17:07:24 +01001378
Kirill Tkhaie23ee742013-06-07 15:37:43 -04001379 plist_for_each_entry(p, head, pushable_tasks) {
1380 if (pick_rt_task(rq, p, cpu))
1381 return p;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001382 }
1383
Kirill Tkhaie23ee742013-06-07 15:37:43 -04001384 return NULL;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001385}
1386
Rusty Russell0e3900e2008-11-25 02:35:13 +10301387static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001388
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001389static int find_lowest_rq(struct task_struct *task)
1390{
1391 struct sched_domain *sd;
Rusty Russell96f874e2008-11-25 02:35:14 +10301392 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001393 int this_cpu = smp_processor_id();
1394 int cpu = task_cpu(task);
1395
Steven Rostedt0da938c2011-06-14 18:36:25 -04001396 /* Make sure the mask is initialized first */
1397 if (unlikely(!lowest_mask))
1398 return -1;
1399
Peter Zijlstra29baa742012-04-23 12:11:21 +02001400 if (task->nr_cpus_allowed == 1)
Gregory Haskins6e0534f2008-05-12 21:21:01 +02001401 return -1; /* No other targets possible */
1402
1403 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
Gregory Haskins06f90db2008-01-25 21:08:13 +01001404 return -1; /* No targets found */
1405
1406 /*
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001407 * At this point we have built a mask of cpus representing the
1408 * lowest priority tasks in the system. Now we want to elect
1409 * the best one based on our affinity and topology.
1410 *
1411 * We prioritize the last cpu that the task executed on since
1412 * it is most likely cache-hot in that location.
1413 */
Rusty Russell96f874e2008-11-25 02:35:14 +10301414 if (cpumask_test_cpu(cpu, lowest_mask))
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001415 return cpu;
1416
1417 /*
1418 * Otherwise, we consult the sched_domains span maps to figure
1419 * out which cpu is logically closest to our hot cache data.
1420 */
Rusty Russelle2c88062009-11-03 14:53:15 +10301421 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1422 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001423
Xiaotian Fengcd4ae6a2011-04-22 18:53:54 +08001424 rcu_read_lock();
Rusty Russelle2c88062009-11-03 14:53:15 +10301425 for_each_domain(cpu, sd) {
1426 if (sd->flags & SD_WAKE_AFFINE) {
1427 int best_cpu;
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001428
Rusty Russelle2c88062009-11-03 14:53:15 +10301429 /*
1430 * "this_cpu" is cheaper to preempt than a
1431 * remote processor.
1432 */
1433 if (this_cpu != -1 &&
Xiaotian Fengcd4ae6a2011-04-22 18:53:54 +08001434 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1435 rcu_read_unlock();
Rusty Russelle2c88062009-11-03 14:53:15 +10301436 return this_cpu;
Xiaotian Fengcd4ae6a2011-04-22 18:53:54 +08001437 }
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001438
Rusty Russelle2c88062009-11-03 14:53:15 +10301439 best_cpu = cpumask_first_and(lowest_mask,
1440 sched_domain_span(sd));
Xiaotian Fengcd4ae6a2011-04-22 18:53:54 +08001441 if (best_cpu < nr_cpu_ids) {
1442 rcu_read_unlock();
Rusty Russelle2c88062009-11-03 14:53:15 +10301443 return best_cpu;
Xiaotian Fengcd4ae6a2011-04-22 18:53:54 +08001444 }
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001445 }
1446 }
Xiaotian Fengcd4ae6a2011-04-22 18:53:54 +08001447 rcu_read_unlock();
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001448
1449 /*
1450 * And finally, if there were no matches within the domains
1451 * just give the caller *something* to work with from the compatible
1452 * locations.
1453 */
Rusty Russelle2c88062009-11-03 14:53:15 +10301454 if (this_cpu != -1)
1455 return this_cpu;
1456
1457 cpu = cpumask_any(lowest_mask);
1458 if (cpu < nr_cpu_ids)
1459 return cpu;
1460 return -1;
Gregory Haskins07b40322008-01-25 21:08:10 +01001461}
1462
Steven Rostedte8fa1362008-01-25 21:08:05 +01001463/* Will lock the rq it finds */
Ingo Molnar4df64c02008-01-25 21:08:15 +01001464static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001465{
1466 struct rq *lowest_rq = NULL;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001467 int tries;
Ingo Molnar4df64c02008-01-25 21:08:15 +01001468 int cpu;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001469
1470 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
Gregory Haskins07b40322008-01-25 21:08:10 +01001471 cpu = find_lowest_rq(task);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001472
Gregory Haskins2de0b462008-01-25 21:08:10 +01001473 if ((cpu == -1) || (cpu == rq->cpu))
Steven Rostedte8fa1362008-01-25 21:08:05 +01001474 break;
1475
Gregory Haskins07b40322008-01-25 21:08:10 +01001476 lowest_rq = cpu_rq(cpu);
1477
Steven Rostedte8fa1362008-01-25 21:08:05 +01001478 /* if the prio of this runqueue changed, try again */
Gregory Haskins07b40322008-01-25 21:08:10 +01001479 if (double_lock_balance(rq, lowest_rq)) {
Steven Rostedte8fa1362008-01-25 21:08:05 +01001480 /*
1481 * We had to unlock the run queue. In
1482 * the mean time, task could have
1483 * migrated already or had its affinity changed.
1484 * Also make sure that it wasn't scheduled on its rq.
1485 */
Gregory Haskins07b40322008-01-25 21:08:10 +01001486 if (unlikely(task_rq(task) != rq ||
Rusty Russell96f874e2008-11-25 02:35:14 +10301487 !cpumask_test_cpu(lowest_rq->cpu,
Peter Zijlstrafa17b502011-06-16 12:23:22 +02001488 tsk_cpus_allowed(task)) ||
Gregory Haskins07b40322008-01-25 21:08:10 +01001489 task_running(rq, task) ||
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02001490 !task->on_rq)) {
Ingo Molnar4df64c02008-01-25 21:08:15 +01001491
Peter Zijlstra7f1b4392012-05-17 21:19:46 +02001492 double_unlock_balance(rq, lowest_rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001493 lowest_rq = NULL;
1494 break;
1495 }
1496 }
1497
1498 /* If this rq is still suitable use it. */
Gregory Haskinse864c492008-12-29 09:39:49 -05001499 if (lowest_rq->rt.highest_prio.curr > task->prio)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001500 break;
1501
1502 /* try again */
Peter Zijlstra1b12bbc2008-08-11 09:30:22 +02001503 double_unlock_balance(rq, lowest_rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001504 lowest_rq = NULL;
1505 }
1506
1507 return lowest_rq;
1508}
1509
Gregory Haskins917b6272008-12-29 09:39:53 -05001510static struct task_struct *pick_next_pushable_task(struct rq *rq)
1511{
1512 struct task_struct *p;
1513
1514 if (!has_pushable_tasks(rq))
1515 return NULL;
1516
1517 p = plist_first_entry(&rq->rt.pushable_tasks,
1518 struct task_struct, pushable_tasks);
1519
1520 BUG_ON(rq->cpu != task_cpu(p));
1521 BUG_ON(task_current(rq, p));
Peter Zijlstra29baa742012-04-23 12:11:21 +02001522 BUG_ON(p->nr_cpus_allowed <= 1);
Gregory Haskins917b6272008-12-29 09:39:53 -05001523
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02001524 BUG_ON(!p->on_rq);
Gregory Haskins917b6272008-12-29 09:39:53 -05001525 BUG_ON(!rt_task(p));
1526
1527 return p;
1528}
1529
Steven Rostedte8fa1362008-01-25 21:08:05 +01001530/*
1531 * If the current CPU has more than one RT task, see if the non
1532 * running task can migrate over to a CPU that is running a task
1533 * of lesser priority.
1534 */
Gregory Haskins697f0a42008-01-25 21:08:09 +01001535static int push_rt_task(struct rq *rq)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001536{
1537 struct task_struct *next_task;
1538 struct rq *lowest_rq;
Hillf Danton311e8002011-06-16 21:55:20 -04001539 int ret = 0;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001540
Gregory Haskinsa22d7fc2008-01-25 21:08:12 +01001541 if (!rq->rt.overloaded)
1542 return 0;
1543
Gregory Haskins917b6272008-12-29 09:39:53 -05001544 next_task = pick_next_pushable_task(rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001545 if (!next_task)
1546 return 0;
1547
Peter Zijlstra49246272010-10-17 21:46:10 +02001548retry:
Gregory Haskins697f0a42008-01-25 21:08:09 +01001549 if (unlikely(next_task == rq->curr)) {
Steven Rostedtf65eda42008-01-25 21:08:07 +01001550 WARN_ON(1);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001551 return 0;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001552 }
Steven Rostedte8fa1362008-01-25 21:08:05 +01001553
1554 /*
1555 * It's possible that the next_task slipped in of
1556 * higher priority than current. If that's the case
1557 * just reschedule current.
1558 */
Gregory Haskins697f0a42008-01-25 21:08:09 +01001559 if (unlikely(next_task->prio < rq->curr->prio)) {
1560 resched_task(rq->curr);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001561 return 0;
1562 }
1563
Gregory Haskins697f0a42008-01-25 21:08:09 +01001564 /* We might release rq lock */
Steven Rostedte8fa1362008-01-25 21:08:05 +01001565 get_task_struct(next_task);
1566
1567 /* find_lock_lowest_rq locks the rq if found */
Gregory Haskins697f0a42008-01-25 21:08:09 +01001568 lowest_rq = find_lock_lowest_rq(next_task, rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001569 if (!lowest_rq) {
1570 struct task_struct *task;
1571 /*
Hillf Danton311e8002011-06-16 21:55:20 -04001572 * find_lock_lowest_rq releases rq->lock
Gregory Haskins15635132008-12-29 09:39:53 -05001573 * so it is possible that next_task has migrated.
1574 *
1575 * We need to make sure that the task is still on the same
1576 * run-queue and is also still the next task eligible for
1577 * pushing.
Steven Rostedte8fa1362008-01-25 21:08:05 +01001578 */
Gregory Haskins917b6272008-12-29 09:39:53 -05001579 task = pick_next_pushable_task(rq);
Gregory Haskins15635132008-12-29 09:39:53 -05001580 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1581 /*
Hillf Danton311e8002011-06-16 21:55:20 -04001582 * The task hasn't migrated, and is still the next
1583 * eligible task, but we failed to find a run-queue
1584 * to push it to. Do not retry in this case, since
1585 * other cpus will pull from us when ready.
Gregory Haskins15635132008-12-29 09:39:53 -05001586 */
Gregory Haskins15635132008-12-29 09:39:53 -05001587 goto out;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001588 }
Gregory Haskins917b6272008-12-29 09:39:53 -05001589
Gregory Haskins15635132008-12-29 09:39:53 -05001590 if (!task)
1591 /* No more tasks, just exit */
1592 goto out;
1593
Gregory Haskins917b6272008-12-29 09:39:53 -05001594 /*
Gregory Haskins15635132008-12-29 09:39:53 -05001595 * Something has shifted, try again.
Gregory Haskins917b6272008-12-29 09:39:53 -05001596 */
Gregory Haskins15635132008-12-29 09:39:53 -05001597 put_task_struct(next_task);
1598 next_task = task;
1599 goto retry;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001600 }
1601
Gregory Haskins697f0a42008-01-25 21:08:09 +01001602 deactivate_task(rq, next_task, 0);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001603 set_task_cpu(next_task, lowest_rq->cpu);
1604 activate_task(lowest_rq, next_task, 0);
Hillf Danton311e8002011-06-16 21:55:20 -04001605 ret = 1;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001606
1607 resched_task(lowest_rq->curr);
1608
Peter Zijlstra1b12bbc2008-08-11 09:30:22 +02001609 double_unlock_balance(rq, lowest_rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001610
Steven Rostedte8fa1362008-01-25 21:08:05 +01001611out:
1612 put_task_struct(next_task);
1613
Hillf Danton311e8002011-06-16 21:55:20 -04001614 return ret;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001615}
1616
Steven Rostedte8fa1362008-01-25 21:08:05 +01001617static void push_rt_tasks(struct rq *rq)
1618{
1619 /* push_rt_task will return true if it moved an RT */
1620 while (push_rt_task(rq))
1621 ;
1622}
1623
Steven Rostedtf65eda42008-01-25 21:08:07 +01001624static int pull_rt_task(struct rq *this_rq)
1625{
Ingo Molnar80bf3172008-01-25 21:08:17 +01001626 int this_cpu = this_rq->cpu, ret = 0, cpu;
Gregory Haskinsa8728942008-12-29 09:39:49 -05001627 struct task_struct *p;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001628 struct rq *src_rq;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001629
Gregory Haskins637f5082008-01-25 21:08:18 +01001630 if (likely(!rt_overloaded(this_rq)))
Steven Rostedtf65eda42008-01-25 21:08:07 +01001631 return 0;
1632
Rusty Russellc6c49272008-11-25 02:35:05 +10301633 for_each_cpu(cpu, this_rq->rd->rto_mask) {
Steven Rostedtf65eda42008-01-25 21:08:07 +01001634 if (this_cpu == cpu)
1635 continue;
1636
1637 src_rq = cpu_rq(cpu);
Gregory Haskins74ab8e42008-12-29 09:39:50 -05001638
1639 /*
1640 * Don't bother taking the src_rq->lock if the next highest
1641 * task is known to be lower-priority than our current task.
1642 * This may look racy, but if this value is about to go
1643 * logically higher, the src_rq will push this task away.
1644 * And if its going logically lower, we do not care
1645 */
1646 if (src_rq->rt.highest_prio.next >=
1647 this_rq->rt.highest_prio.curr)
1648 continue;
1649
Steven Rostedtf65eda42008-01-25 21:08:07 +01001650 /*
1651 * We can potentially drop this_rq's lock in
1652 * double_lock_balance, and another CPU could
Gregory Haskinsa8728942008-12-29 09:39:49 -05001653 * alter this_rq
Steven Rostedtf65eda42008-01-25 21:08:07 +01001654 */
Gregory Haskinsa8728942008-12-29 09:39:49 -05001655 double_lock_balance(this_rq, src_rq);
Steven Rostedtf65eda42008-01-25 21:08:07 +01001656
1657 /*
Kirill Tkhaie23ee742013-06-07 15:37:43 -04001658 * We can pull only a task, which is pushable
1659 * on its rq, and no others.
Steven Rostedtf65eda42008-01-25 21:08:07 +01001660 */
Kirill Tkhaie23ee742013-06-07 15:37:43 -04001661 p = pick_highest_pushable_task(src_rq, this_cpu);
Steven Rostedtf65eda42008-01-25 21:08:07 +01001662
1663 /*
1664 * Do we have an RT task that preempts
1665 * the to-be-scheduled task?
1666 */
Gregory Haskinsa8728942008-12-29 09:39:49 -05001667 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
Steven Rostedtf65eda42008-01-25 21:08:07 +01001668 WARN_ON(p == src_rq->curr);
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02001669 WARN_ON(!p->on_rq);
Steven Rostedtf65eda42008-01-25 21:08:07 +01001670
1671 /*
1672 * There's a chance that p is higher in priority
1673 * than what's currently running on its cpu.
1674 * This is just that p is wakeing up and hasn't
1675 * had a chance to schedule. We only pull
1676 * p if it is lower in priority than the
Gregory Haskinsa8728942008-12-29 09:39:49 -05001677 * current task on the run queue
Steven Rostedtf65eda42008-01-25 21:08:07 +01001678 */
Gregory Haskinsa8728942008-12-29 09:39:49 -05001679 if (p->prio < src_rq->curr->prio)
Mike Galbraith614ee1f2008-01-25 21:08:30 +01001680 goto skip;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001681
1682 ret = 1;
1683
1684 deactivate_task(src_rq, p, 0);
1685 set_task_cpu(p, this_cpu);
1686 activate_task(this_rq, p, 0);
1687 /*
1688 * We continue with the search, just in
1689 * case there's an even higher prio task
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001690 * in another runqueue. (low likelihood
Steven Rostedtf65eda42008-01-25 21:08:07 +01001691 * but possible)
Steven Rostedtf65eda42008-01-25 21:08:07 +01001692 */
Steven Rostedtf65eda42008-01-25 21:08:07 +01001693 }
Peter Zijlstra49246272010-10-17 21:46:10 +02001694skip:
Peter Zijlstra1b12bbc2008-08-11 09:30:22 +02001695 double_unlock_balance(this_rq, src_rq);
Steven Rostedtf65eda42008-01-25 21:08:07 +01001696 }
1697
1698 return ret;
1699}
1700
Steven Rostedt9a897c52008-01-25 21:08:22 +01001701static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
Steven Rostedtf65eda42008-01-25 21:08:07 +01001702{
1703 /* Try to pull RT tasks here if we lower this rq's prio */
Yong Zhang33c3d6c2010-02-09 14:43:59 -05001704 if (rq->rt.highest_prio.curr > prev->prio)
Steven Rostedtf65eda42008-01-25 21:08:07 +01001705 pull_rt_task(rq);
1706}
1707
Steven Rostedt9a897c52008-01-25 21:08:22 +01001708static void post_schedule_rt(struct rq *rq)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001709{
Gregory Haskins967fc042008-12-29 09:39:52 -05001710 push_rt_tasks(rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001711}
1712
Gregory Haskins8ae121a2008-04-23 07:13:29 -04001713/*
1714 * If we are not running and we are not going to reschedule soon, we should
1715 * try to push tasks away now
1716 */
Peter Zijlstraefbbd052009-12-16 18:04:40 +01001717static void task_woken_rt(struct rq *rq, struct task_struct *p)
Steven Rostedt4642daf2008-01-25 21:08:07 +01001718{
Steven Rostedt9a897c52008-01-25 21:08:22 +01001719 if (!task_running(rq, p) &&
Gregory Haskins8ae121a2008-04-23 07:13:29 -04001720 !test_tsk_need_resched(rq->curr) &&
Gregory Haskins917b6272008-12-29 09:39:53 -05001721 has_pushable_tasks(rq) &&
Peter Zijlstra29baa742012-04-23 12:11:21 +02001722 p->nr_cpus_allowed > 1 &&
Steven Rostedt43fa5462010-09-20 22:40:03 -04001723 rt_task(rq->curr) &&
Peter Zijlstra29baa742012-04-23 12:11:21 +02001724 (rq->curr->nr_cpus_allowed < 2 ||
Shawn Bohrer3be209a2011-09-12 09:28:04 -05001725 rq->curr->prio <= p->prio))
Steven Rostedt4642daf2008-01-25 21:08:07 +01001726 push_rt_tasks(rq);
1727}
1728
Mike Traviscd8ba7c2008-03-26 14:23:49 -07001729static void set_cpus_allowed_rt(struct task_struct *p,
Rusty Russell96f874e2008-11-25 02:35:14 +10301730 const struct cpumask *new_mask)
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001731{
Kirill Tkhai8d3d5ad2012-04-11 09:06:04 +04001732 struct rq *rq;
1733 int weight;
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001734
1735 BUG_ON(!rt_task(p));
1736
Kirill Tkhai8d3d5ad2012-04-11 09:06:04 +04001737 if (!p->on_rq)
1738 return;
1739
1740 weight = cpumask_weight(new_mask);
1741
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001742 /*
Kirill Tkhai8d3d5ad2012-04-11 09:06:04 +04001743 * Only update if the process changes its state from whether it
1744 * can migrate or not.
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001745 */
Peter Zijlstra29baa742012-04-23 12:11:21 +02001746 if ((p->nr_cpus_allowed > 1) == (weight > 1))
Kirill Tkhai8d3d5ad2012-04-11 09:06:04 +04001747 return;
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001748
Kirill Tkhai8d3d5ad2012-04-11 09:06:04 +04001749 rq = task_rq(p);
Gregory Haskins917b6272008-12-29 09:39:53 -05001750
Kirill Tkhai8d3d5ad2012-04-11 09:06:04 +04001751 /*
1752 * The process used to be able to migrate OR it can now migrate
1753 */
1754 if (weight <= 1) {
1755 if (!task_current(rq, p))
1756 dequeue_pushable_task(rq, p);
1757 BUG_ON(!rq->rt.rt_nr_migratory);
1758 rq->rt.rt_nr_migratory--;
1759 } else {
1760 if (!task_current(rq, p))
1761 enqueue_pushable_task(rq, p);
1762 rq->rt.rt_nr_migratory++;
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001763 }
Kirill Tkhai8d3d5ad2012-04-11 09:06:04 +04001764
1765 update_rt_migration(&rq->rt);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001766}
Ingo Molnardeeeccd2008-01-25 21:08:15 +01001767
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001768/* Assumes rq->lock is held */
Gregory Haskins1f11eb62008-06-04 15:04:05 -04001769static void rq_online_rt(struct rq *rq)
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001770{
1771 if (rq->rt.overloaded)
1772 rt_set_overload(rq);
Gregory Haskins6e0534f2008-05-12 21:21:01 +02001773
Peter Zijlstra7def2be2008-06-05 14:49:58 +02001774 __enable_runtime(rq);
1775
Gregory Haskinse864c492008-12-29 09:39:49 -05001776 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001777}
1778
1779/* Assumes rq->lock is held */
Gregory Haskins1f11eb62008-06-04 15:04:05 -04001780static void rq_offline_rt(struct rq *rq)
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001781{
1782 if (rq->rt.overloaded)
1783 rt_clear_overload(rq);
Gregory Haskins6e0534f2008-05-12 21:21:01 +02001784
Peter Zijlstra7def2be2008-06-05 14:49:58 +02001785 __disable_runtime(rq);
1786
Gregory Haskins6e0534f2008-05-12 21:21:01 +02001787 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001788}
Steven Rostedtcb469842008-01-25 21:08:22 +01001789
1790/*
1791 * When switch from the rt queue, we bring ourselves to a position
1792 * that we might want to pull RT tasks from other runqueues.
1793 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01001794static void switched_from_rt(struct rq *rq, struct task_struct *p)
Steven Rostedtcb469842008-01-25 21:08:22 +01001795{
1796 /*
1797 * If there are other RT tasks then we will reschedule
1798 * and the scheduling of the other RT tasks will handle
1799 * the balancing. But if we are the last RT task
1800 * we may need to handle the pulling of RT tasks
1801 * now.
1802 */
Kirill Tkhai1158ddb2012-11-23 00:02:15 +04001803 if (!p->on_rq || rq->rt.rt_nr_running)
1804 return;
1805
1806 if (pull_rt_task(rq))
1807 resched_task(rq->curr);
Steven Rostedtcb469842008-01-25 21:08:22 +01001808}
Rusty Russell3d8cbdf2008-11-25 09:58:41 +10301809
Peter Zijlstra029632f2011-10-25 10:00:11 +02001810void init_sched_rt_class(void)
Rusty Russell3d8cbdf2008-11-25 09:58:41 +10301811{
1812 unsigned int i;
1813
Peter Zijlstra029632f2011-10-25 10:00:11 +02001814 for_each_possible_cpu(i) {
Yinghai Lueaa958402009-06-06 14:51:36 -07001815 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
Mike Travis6ca09df2008-12-31 18:08:45 -08001816 GFP_KERNEL, cpu_to_node(i));
Peter Zijlstra029632f2011-10-25 10:00:11 +02001817 }
Rusty Russell3d8cbdf2008-11-25 09:58:41 +10301818}
Steven Rostedte8fa1362008-01-25 21:08:05 +01001819#endif /* CONFIG_SMP */
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001820
Steven Rostedtcb469842008-01-25 21:08:22 +01001821/*
1822 * When switching a task to RT, we may overload the runqueue
1823 * with RT tasks. In this case we try to push them off to
1824 * other runqueues.
1825 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01001826static void switched_to_rt(struct rq *rq, struct task_struct *p)
Steven Rostedtcb469842008-01-25 21:08:22 +01001827{
1828 int check_resched = 1;
1829
1830 /*
1831 * If we are already running, then there's nothing
1832 * that needs to be done. But if we are not running
1833 * we may need to preempt the current running task.
1834 * If that current running task is also an RT task
1835 * then see if we can move to another run queue.
1836 */
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02001837 if (p->on_rq && rq->curr != p) {
Steven Rostedtcb469842008-01-25 21:08:22 +01001838#ifdef CONFIG_SMP
1839 if (rq->rt.overloaded && push_rt_task(rq) &&
1840 /* Don't resched if we changed runqueues */
1841 rq != task_rq(p))
1842 check_resched = 0;
1843#endif /* CONFIG_SMP */
1844 if (check_resched && p->prio < rq->curr->prio)
1845 resched_task(rq->curr);
1846 }
1847}
1848
1849/*
1850 * Priority of the task has changed. This may cause
1851 * us to initiate a push or pull.
1852 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01001853static void
1854prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
Steven Rostedtcb469842008-01-25 21:08:22 +01001855{
Peter Zijlstrafd2f4412011-04-05 17:23:44 +02001856 if (!p->on_rq)
Peter Zijlstrada7a7352011-01-17 17:03:27 +01001857 return;
1858
1859 if (rq->curr == p) {
Steven Rostedtcb469842008-01-25 21:08:22 +01001860#ifdef CONFIG_SMP
1861 /*
1862 * If our priority decreases while running, we
1863 * may need to pull tasks to this runqueue.
1864 */
1865 if (oldprio < p->prio)
1866 pull_rt_task(rq);
1867 /*
1868 * If there's a higher priority task waiting to run
Steven Rostedt6fa46fa2008-03-05 10:00:12 -05001869 * then reschedule. Note, the above pull_rt_task
1870 * can release the rq lock and p could migrate.
1871 * Only reschedule if p is still on the same runqueue.
Steven Rostedtcb469842008-01-25 21:08:22 +01001872 */
Gregory Haskinse864c492008-12-29 09:39:49 -05001873 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
Steven Rostedtcb469842008-01-25 21:08:22 +01001874 resched_task(p);
1875#else
1876 /* For UP simply resched on drop of prio */
1877 if (oldprio < p->prio)
1878 resched_task(p);
1879#endif /* CONFIG_SMP */
1880 } else {
1881 /*
1882 * This task is not running, but if it is
1883 * greater than the current running task
1884 * then reschedule.
1885 */
1886 if (p->prio < rq->curr->prio)
1887 resched_task(rq->curr);
1888 }
1889}
1890
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001891static void watchdog(struct rq *rq, struct task_struct *p)
1892{
1893 unsigned long soft, hard;
1894
Jiri Slaby78d7d402010-03-05 13:42:54 -08001895 /* max may change after cur was read, this will be fixed next tick */
1896 soft = task_rlimit(p, RLIMIT_RTTIME);
1897 hard = task_rlimit_max(p, RLIMIT_RTTIME);
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001898
1899 if (soft != RLIM_INFINITY) {
1900 unsigned long next;
1901
Ying Xue57d2aa02012-07-17 15:03:43 +08001902 if (p->rt.watchdog_stamp != jiffies) {
1903 p->rt.timeout++;
1904 p->rt.watchdog_stamp = jiffies;
1905 }
1906
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001907 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
Peter Zijlstra5a52dd52008-01-25 21:08:32 +01001908 if (p->rt.timeout > next)
Frank Mayharf06febc2008-09-12 09:54:39 -07001909 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001910 }
1911}
Steven Rostedtcb469842008-01-25 21:08:22 +01001912
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001913static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001914{
Colin Cross454c7992012-05-16 21:34:23 -07001915 struct sched_rt_entity *rt_se = &p->rt;
1916
Peter Zijlstra67e2be02007-12-20 15:01:17 +01001917 update_curr_rt(rq);
1918
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001919 watchdog(rq, p);
1920
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001921 /*
1922 * RR tasks need a special form of timeslice management.
1923 * FIFO tasks have no timeslices.
1924 */
1925 if (p->policy != SCHED_RR)
1926 return;
1927
Peter Zijlstrafa717062008-01-25 21:08:27 +01001928 if (--p->rt.time_slice)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001929 return;
1930
Clark Williamsce0dbbb2013-02-07 09:47:04 -06001931 p->rt.time_slice = sched_rr_timeslice;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001932
Dmitry Adamushko98fbc792007-08-24 20:39:10 +02001933 /*
Colin Cross454c7992012-05-16 21:34:23 -07001934 * Requeue to the end of queue if we (and all of our ancestors) are the
1935 * only element on the queue
Dmitry Adamushko98fbc792007-08-24 20:39:10 +02001936 */
Colin Cross454c7992012-05-16 21:34:23 -07001937 for_each_sched_rt_entity(rt_se) {
1938 if (rt_se->run_list.prev != rt_se->run_list.next) {
1939 requeue_task_rt(rq, p, 0);
1940 set_tsk_need_resched(p);
1941 return;
1942 }
Dmitry Adamushko98fbc792007-08-24 20:39:10 +02001943 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001944}
1945
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001946static void set_curr_task_rt(struct rq *rq)
1947{
1948 struct task_struct *p = rq->curr;
1949
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001950 p->se.exec_start = rq_clock_task(rq);
Gregory Haskins917b6272008-12-29 09:39:53 -05001951
1952 /* The running task is never eligible for pushing */
1953 dequeue_pushable_task(rq, p);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001954}
1955
H Hartley Sweeten6d686f42010-01-13 20:21:52 -07001956static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
Peter Williams0d721ce2009-09-21 01:31:53 +00001957{
1958 /*
1959 * Time slice is 0 for SCHED_FIFO tasks
1960 */
1961 if (task->policy == SCHED_RR)
Clark Williamsce0dbbb2013-02-07 09:47:04 -06001962 return sched_rr_timeslice;
Peter Williams0d721ce2009-09-21 01:31:53 +00001963 else
1964 return 0;
1965}
1966
Peter Zijlstra029632f2011-10-25 10:00:11 +02001967const struct sched_class rt_sched_class = {
Ingo Molnar5522d5d2007-10-15 17:00:12 +02001968 .next = &fair_sched_class,
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001969 .enqueue_task = enqueue_task_rt,
1970 .dequeue_task = dequeue_task_rt,
1971 .yield_task = yield_task_rt,
1972
1973 .check_preempt_curr = check_preempt_curr_rt,
1974
1975 .pick_next_task = pick_next_task_rt,
1976 .put_prev_task = put_prev_task_rt,
1977
Peter Williams681f3e62007-10-24 18:23:51 +02001978#ifdef CONFIG_SMP
Li Zefan4ce72a22008-10-22 15:25:26 +08001979 .select_task_rq = select_task_rq_rt,
1980
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001981 .set_cpus_allowed = set_cpus_allowed_rt,
Gregory Haskins1f11eb62008-06-04 15:04:05 -04001982 .rq_online = rq_online_rt,
1983 .rq_offline = rq_offline_rt,
Steven Rostedt9a897c52008-01-25 21:08:22 +01001984 .pre_schedule = pre_schedule_rt,
1985 .post_schedule = post_schedule_rt,
Peter Zijlstraefbbd052009-12-16 18:04:40 +01001986 .task_woken = task_woken_rt,
Steven Rostedtcb469842008-01-25 21:08:22 +01001987 .switched_from = switched_from_rt,
Peter Williams681f3e62007-10-24 18:23:51 +02001988#endif
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001989
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001990 .set_curr_task = set_curr_task_rt,
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001991 .task_tick = task_tick_rt,
Steven Rostedtcb469842008-01-25 21:08:22 +01001992
Peter Williams0d721ce2009-09-21 01:31:53 +00001993 .get_rr_interval = get_rr_interval_rt,
1994
Steven Rostedtcb469842008-01-25 21:08:22 +01001995 .prio_changed = prio_changed_rt,
1996 .switched_to = switched_to_rt,
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001997};
Peter Zijlstraada18de2008-06-19 14:22:24 +02001998
1999#ifdef CONFIG_SCHED_DEBUG
2000extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2001
Peter Zijlstra029632f2011-10-25 10:00:11 +02002002void print_rt_stats(struct seq_file *m, int cpu)
Peter Zijlstraada18de2008-06-19 14:22:24 +02002003{
Cheng Xuec514c42011-05-14 14:20:02 +08002004 rt_rq_iter_t iter;
Peter Zijlstraada18de2008-06-19 14:22:24 +02002005 struct rt_rq *rt_rq;
2006
2007 rcu_read_lock();
Cheng Xuec514c42011-05-14 14:20:02 +08002008 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
Peter Zijlstraada18de2008-06-19 14:22:24 +02002009 print_rt_rq(m, cpu, rt_rq);
2010 rcu_read_unlock();
2011}
Dhaval Giani55e12e52008-06-24 23:39:43 +05302012#endif /* CONFIG_SCHED_DEBUG */