blob: ca49ceb01201c16b26ea87331466e95f718e27bd [file] [log] [blame]
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
Gregory Haskins398a1532009-01-14 09:10:04 -05006#ifdef CONFIG_RT_GROUP_SCHED
7
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +02008#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
9
Peter Zijlstra8f488942009-07-24 12:25:30 +020010static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
11{
12#ifdef CONFIG_SCHED_DEBUG
13 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
14#endif
15 return container_of(rt_se, struct task_struct, rt);
16}
17
Gregory Haskins398a1532009-01-14 09:10:04 -050018static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
19{
20 return rt_rq->rq;
21}
22
23static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
24{
25 return rt_se->rt_rq;
26}
27
28#else /* CONFIG_RT_GROUP_SCHED */
29
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +020030#define rt_entity_is_task(rt_se) (1)
31
Peter Zijlstra8f488942009-07-24 12:25:30 +020032static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
33{
34 return container_of(rt_se, struct task_struct, rt);
35}
36
Gregory Haskins398a1532009-01-14 09:10:04 -050037static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
38{
39 return container_of(rt_rq, struct rq, rt);
40}
41
42static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
43{
44 struct task_struct *p = rt_task_of(rt_se);
45 struct rq *rq = task_rq(p);
46
47 return &rq->rt;
48}
49
50#endif /* CONFIG_RT_GROUP_SCHED */
51
Steven Rostedt4fd29172008-01-25 21:08:06 +010052#ifdef CONFIG_SMP
Ingo Molnar84de4272008-01-25 21:08:15 +010053
Gregory Haskins637f5082008-01-25 21:08:18 +010054static inline int rt_overloaded(struct rq *rq)
Steven Rostedt4fd29172008-01-25 21:08:06 +010055{
Gregory Haskins637f5082008-01-25 21:08:18 +010056 return atomic_read(&rq->rd->rto_count);
Steven Rostedt4fd29172008-01-25 21:08:06 +010057}
Ingo Molnar84de4272008-01-25 21:08:15 +010058
Steven Rostedt4fd29172008-01-25 21:08:06 +010059static inline void rt_set_overload(struct rq *rq)
60{
Gregory Haskins1f11eb62008-06-04 15:04:05 -040061 if (!rq->online)
62 return;
63
Rusty Russellc6c49272008-11-25 02:35:05 +103064 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
Steven Rostedt4fd29172008-01-25 21:08:06 +010065 /*
66 * Make sure the mask is visible before we set
67 * the overload count. That is checked to determine
68 * if we should look at the mask. It would be a shame
69 * if we looked at the mask, but the mask was not
70 * updated yet.
71 */
72 wmb();
Gregory Haskins637f5082008-01-25 21:08:18 +010073 atomic_inc(&rq->rd->rto_count);
Steven Rostedt4fd29172008-01-25 21:08:06 +010074}
Ingo Molnar84de4272008-01-25 21:08:15 +010075
Steven Rostedt4fd29172008-01-25 21:08:06 +010076static inline void rt_clear_overload(struct rq *rq)
77{
Gregory Haskins1f11eb62008-06-04 15:04:05 -040078 if (!rq->online)
79 return;
80
Steven Rostedt4fd29172008-01-25 21:08:06 +010081 /* the order here really doesn't matter */
Gregory Haskins637f5082008-01-25 21:08:18 +010082 atomic_dec(&rq->rd->rto_count);
Rusty Russellc6c49272008-11-25 02:35:05 +103083 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
Steven Rostedt4fd29172008-01-25 21:08:06 +010084}
Gregory Haskins73fe6aa2008-01-25 21:08:07 +010085
Gregory Haskins398a1532009-01-14 09:10:04 -050086static void update_rt_migration(struct rt_rq *rt_rq)
Gregory Haskins73fe6aa2008-01-25 21:08:07 +010087{
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +020088 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
Gregory Haskins398a1532009-01-14 09:10:04 -050089 if (!rt_rq->overloaded) {
90 rt_set_overload(rq_of_rt_rq(rt_rq));
91 rt_rq->overloaded = 1;
Gregory Haskinscdc8eb92008-01-25 21:08:23 +010092 }
Gregory Haskins398a1532009-01-14 09:10:04 -050093 } else if (rt_rq->overloaded) {
94 rt_clear_overload(rq_of_rt_rq(rt_rq));
95 rt_rq->overloaded = 0;
Gregory Haskins637f5082008-01-25 21:08:18 +010096 }
Gregory Haskins73fe6aa2008-01-25 21:08:07 +010097}
Steven Rostedt4fd29172008-01-25 21:08:06 +010098
Gregory Haskins398a1532009-01-14 09:10:04 -050099static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100100{
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200101 if (!rt_entity_is_task(rt_se))
102 return;
103
104 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
105
106 rt_rq->rt_nr_total++;
Gregory Haskins398a1532009-01-14 09:10:04 -0500107 if (rt_se->nr_cpus_allowed > 1)
108 rt_rq->rt_nr_migratory++;
109
110 update_rt_migration(rt_rq);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100111}
112
Gregory Haskins398a1532009-01-14 09:10:04 -0500113static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
114{
Peter Zijlstraa1ba4d82009-04-01 18:40:15 +0200115 if (!rt_entity_is_task(rt_se))
116 return;
117
118 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
119
120 rt_rq->rt_nr_total--;
Gregory Haskins398a1532009-01-14 09:10:04 -0500121 if (rt_se->nr_cpus_allowed > 1)
122 rt_rq->rt_nr_migratory--;
123
124 update_rt_migration(rt_rq);
125}
126
Gregory Haskins917b6272008-12-29 09:39:53 -0500127static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
128{
129 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
130 plist_node_init(&p->pushable_tasks, p->prio);
131 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
132}
133
134static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
135{
136 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
137}
138
Ingo Molnarbcf08df2008-04-19 12:11:10 +0200139static inline int has_pushable_tasks(struct rq *rq)
140{
141 return !plist_head_empty(&rq->rt.pushable_tasks);
142}
143
Gregory Haskins917b6272008-12-29 09:39:53 -0500144#else
145
Peter Zijlstraceacc2c2009-01-16 14:46:40 +0100146static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
147{
148}
149
150static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
151{
152}
153
Gregory Haskinsb07430a2009-01-14 08:55:39 -0500154static inline
Peter Zijlstraceacc2c2009-01-16 14:46:40 +0100155void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
156{
157}
158
Gregory Haskinsb07430a2009-01-14 08:55:39 -0500159static inline
Peter Zijlstraceacc2c2009-01-16 14:46:40 +0100160void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
161{
162}
Gregory Haskins917b6272008-12-29 09:39:53 -0500163
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200164#endif /* CONFIG_SMP */
165
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100166static inline int on_rt_rq(struct sched_rt_entity *rt_se)
167{
168 return !list_empty(&rt_se->run_list);
169}
170
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100171#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100172
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100173static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100174{
175 if (!rt_rq->tg)
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100176 return RUNTIME_INF;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100177
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200178 return rt_rq->rt_runtime;
179}
180
181static inline u64 sched_rt_period(struct rt_rq *rt_rq)
182{
183 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100184}
185
186#define for_each_leaf_rt_rq(rt_rq, rq) \
Bharata B Rao80f40ee2008-12-15 11:56:48 +0530187 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100188
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100189#define for_each_sched_rt_entity(rt_se) \
190 for (; rt_se; rt_se = rt_se->parent)
191
192static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
193{
194 return rt_se->my_q;
195}
196
Thomas Gleixner37dad3f2010-01-20 20:59:01 +0000197static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100198static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
199
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100200static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100201{
Dario Faggiolif6121f42008-10-03 17:40:46 +0200202 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100203 struct sched_rt_entity *rt_se = rt_rq->rt_se;
204
Dario Faggiolif6121f42008-10-03 17:40:46 +0200205 if (rt_rq->rt_nr_running) {
206 if (rt_se && !on_rt_rq(rt_se))
Thomas Gleixner37dad3f2010-01-20 20:59:01 +0000207 enqueue_rt_entity(rt_se, false);
Gregory Haskinse864c492008-12-29 09:39:49 -0500208 if (rt_rq->highest_prio.curr < curr->prio)
Peter Zijlstra10203872008-01-25 21:08:32 +0100209 resched_task(curr);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100210 }
211}
212
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100213static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100214{
215 struct sched_rt_entity *rt_se = rt_rq->rt_se;
216
217 if (rt_se && on_rt_rq(rt_se))
218 dequeue_rt_entity(rt_se);
219}
220
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100221static inline int rt_rq_throttled(struct rt_rq *rt_rq)
222{
223 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
224}
225
226static int rt_se_boosted(struct sched_rt_entity *rt_se)
227{
228 struct rt_rq *rt_rq = group_rt_rq(rt_se);
229 struct task_struct *p;
230
231 if (rt_rq)
232 return !!rt_rq->rt_nr_boosted;
233
234 p = rt_task_of(rt_se);
235 return p->prio != p->normal_prio;
236}
237
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200238#ifdef CONFIG_SMP
Rusty Russellc6c49272008-11-25 02:35:05 +1030239static inline const struct cpumask *sched_rt_period_mask(void)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200240{
241 return cpu_rq(smp_processor_id())->rd->span;
242}
243#else
Rusty Russellc6c49272008-11-25 02:35:05 +1030244static inline const struct cpumask *sched_rt_period_mask(void)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200245{
Rusty Russellc6c49272008-11-25 02:35:05 +1030246 return cpu_online_mask;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200247}
248#endif
249
250static inline
251struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
252{
253 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
254}
255
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200256static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
257{
258 return &rt_rq->tg->rt_bandwidth;
259}
260
Dhaval Giani55e12e52008-06-24 23:39:43 +0530261#else /* !CONFIG_RT_GROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100262
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100263static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100264{
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200265 return rt_rq->rt_runtime;
266}
267
268static inline u64 sched_rt_period(struct rt_rq *rt_rq)
269{
270 return ktime_to_ns(def_rt_bandwidth.rt_period);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100271}
272
273#define for_each_leaf_rt_rq(rt_rq, rq) \
274 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
275
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100276#define for_each_sched_rt_entity(rt_se) \
277 for (; rt_se; rt_se = NULL)
278
279static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
280{
281 return NULL;
282}
283
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100284static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100285{
John Blackwoodf3ade832008-08-26 15:09:43 -0400286 if (rt_rq->rt_nr_running)
287 resched_task(rq_of_rt_rq(rt_rq)->curr);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100288}
289
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100290static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100291{
292}
293
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100294static inline int rt_rq_throttled(struct rt_rq *rt_rq)
295{
296 return rt_rq->rt_throttled;
297}
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200298
Rusty Russellc6c49272008-11-25 02:35:05 +1030299static inline const struct cpumask *sched_rt_period_mask(void)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200300{
Rusty Russellc6c49272008-11-25 02:35:05 +1030301 return cpu_online_mask;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200302}
303
304static inline
305struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
306{
307 return &cpu_rq(cpu)->rt;
308}
309
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200310static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
311{
312 return &def_rt_bandwidth;
313}
314
Dhaval Giani55e12e52008-06-24 23:39:43 +0530315#endif /* CONFIG_RT_GROUP_SCHED */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100316
Peter Zijlstrab79f3832008-06-19 14:22:25 +0200317#ifdef CONFIG_SMP
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200318/*
319 * We ran out of runtime, see if we can borrow some from our neighbours.
320 */
Peter Zijlstrab79f3832008-06-19 14:22:25 +0200321static int do_balance_runtime(struct rt_rq *rt_rq)
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200322{
323 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
324 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
325 int i, weight, more = 0;
326 u64 rt_period;
327
Rusty Russellc6c49272008-11-25 02:35:05 +1030328 weight = cpumask_weight(rd->span);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200329
Thomas Gleixner0986b112009-11-17 15:32:06 +0100330 raw_spin_lock(&rt_b->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200331 rt_period = ktime_to_ns(rt_b->rt_period);
Rusty Russellc6c49272008-11-25 02:35:05 +1030332 for_each_cpu(i, rd->span) {
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200333 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
334 s64 diff;
335
336 if (iter == rt_rq)
337 continue;
338
Thomas Gleixner0986b112009-11-17 15:32:06 +0100339 raw_spin_lock(&iter->rt_runtime_lock);
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200340 /*
341 * Either all rqs have inf runtime and there's nothing to steal
342 * or __disable_runtime() below sets a specific rq to inf to
343 * indicate its been disabled and disalow stealing.
344 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200345 if (iter->rt_runtime == RUNTIME_INF)
346 goto next;
347
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200348 /*
349 * From runqueues with spare time, take 1/n part of their
350 * spare time, but no more than our period.
351 */
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200352 diff = iter->rt_runtime - iter->rt_time;
353 if (diff > 0) {
Peter Zijlstra58838cf2008-07-24 12:43:13 +0200354 diff = div_u64((u64)diff, weight);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200355 if (rt_rq->rt_runtime + diff > rt_period)
356 diff = rt_period - rt_rq->rt_runtime;
357 iter->rt_runtime -= diff;
358 rt_rq->rt_runtime += diff;
359 more = 1;
360 if (rt_rq->rt_runtime == rt_period) {
Thomas Gleixner0986b112009-11-17 15:32:06 +0100361 raw_spin_unlock(&iter->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200362 break;
363 }
364 }
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200365next:
Thomas Gleixner0986b112009-11-17 15:32:06 +0100366 raw_spin_unlock(&iter->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200367 }
Thomas Gleixner0986b112009-11-17 15:32:06 +0100368 raw_spin_unlock(&rt_b->rt_runtime_lock);
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200369
370 return more;
371}
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200372
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200373/*
374 * Ensure this RQ takes back all the runtime it lend to its neighbours.
375 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200376static void __disable_runtime(struct rq *rq)
377{
378 struct root_domain *rd = rq->rd;
379 struct rt_rq *rt_rq;
380
381 if (unlikely(!scheduler_running))
382 return;
383
384 for_each_leaf_rt_rq(rt_rq, rq) {
385 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
386 s64 want;
387 int i;
388
Thomas Gleixner0986b112009-11-17 15:32:06 +0100389 raw_spin_lock(&rt_b->rt_runtime_lock);
390 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200391 /*
392 * Either we're all inf and nobody needs to borrow, or we're
393 * already disabled and thus have nothing to do, or we have
394 * exactly the right amount of runtime to take out.
395 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200396 if (rt_rq->rt_runtime == RUNTIME_INF ||
397 rt_rq->rt_runtime == rt_b->rt_runtime)
398 goto balanced;
Thomas Gleixner0986b112009-11-17 15:32:06 +0100399 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200400
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200401 /*
402 * Calculate the difference between what we started out with
403 * and what we current have, that's the amount of runtime
404 * we lend and now have to reclaim.
405 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200406 want = rt_b->rt_runtime - rt_rq->rt_runtime;
407
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200408 /*
409 * Greedy reclaim, take back as much as we can.
410 */
Rusty Russellc6c49272008-11-25 02:35:05 +1030411 for_each_cpu(i, rd->span) {
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200412 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
413 s64 diff;
414
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200415 /*
416 * Can't reclaim from ourselves or disabled runqueues.
417 */
Peter Zijlstraf1679d02008-08-14 15:49:00 +0200418 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200419 continue;
420
Thomas Gleixner0986b112009-11-17 15:32:06 +0100421 raw_spin_lock(&iter->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200422 if (want > 0) {
423 diff = min_t(s64, iter->rt_runtime, want);
424 iter->rt_runtime -= diff;
425 want -= diff;
426 } else {
427 iter->rt_runtime -= want;
428 want -= want;
429 }
Thomas Gleixner0986b112009-11-17 15:32:06 +0100430 raw_spin_unlock(&iter->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200431
432 if (!want)
433 break;
434 }
435
Thomas Gleixner0986b112009-11-17 15:32:06 +0100436 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200437 /*
438 * We cannot be left wanting - that would mean some runtime
439 * leaked out of the system.
440 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200441 BUG_ON(want);
442balanced:
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200443 /*
444 * Disable all the borrow logic by pretending we have inf
445 * runtime - in which case borrowing doesn't make sense.
446 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200447 rt_rq->rt_runtime = RUNTIME_INF;
Thomas Gleixner0986b112009-11-17 15:32:06 +0100448 raw_spin_unlock(&rt_rq->rt_runtime_lock);
449 raw_spin_unlock(&rt_b->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200450 }
451}
452
453static void disable_runtime(struct rq *rq)
454{
455 unsigned long flags;
456
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100457 raw_spin_lock_irqsave(&rq->lock, flags);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200458 __disable_runtime(rq);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100459 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200460}
461
462static void __enable_runtime(struct rq *rq)
463{
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200464 struct rt_rq *rt_rq;
465
466 if (unlikely(!scheduler_running))
467 return;
468
Peter Zijlstra78333cd2008-09-23 15:33:43 +0200469 /*
470 * Reset each runqueue's bandwidth settings
471 */
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200472 for_each_leaf_rt_rq(rt_rq, rq) {
473 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
474
Thomas Gleixner0986b112009-11-17 15:32:06 +0100475 raw_spin_lock(&rt_b->rt_runtime_lock);
476 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200477 rt_rq->rt_runtime = rt_b->rt_runtime;
478 rt_rq->rt_time = 0;
Zhang, Yanminbaf25732008-09-09 11:26:33 +0800479 rt_rq->rt_throttled = 0;
Thomas Gleixner0986b112009-11-17 15:32:06 +0100480 raw_spin_unlock(&rt_rq->rt_runtime_lock);
481 raw_spin_unlock(&rt_b->rt_runtime_lock);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200482 }
483}
484
485static void enable_runtime(struct rq *rq)
486{
487 unsigned long flags;
488
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100489 raw_spin_lock_irqsave(&rq->lock, flags);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200490 __enable_runtime(rq);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100491 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra7def2be2008-06-05 14:49:58 +0200492}
493
Peter Zijlstraeff65492008-06-19 14:22:26 +0200494static int balance_runtime(struct rt_rq *rt_rq)
495{
496 int more = 0;
497
498 if (rt_rq->rt_time > rt_rq->rt_runtime) {
Thomas Gleixner0986b112009-11-17 15:32:06 +0100499 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstraeff65492008-06-19 14:22:26 +0200500 more = do_balance_runtime(rt_rq);
Thomas Gleixner0986b112009-11-17 15:32:06 +0100501 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraeff65492008-06-19 14:22:26 +0200502 }
503
504 return more;
505}
Dhaval Giani55e12e52008-06-24 23:39:43 +0530506#else /* !CONFIG_SMP */
Peter Zijlstraeff65492008-06-19 14:22:26 +0200507static inline int balance_runtime(struct rt_rq *rt_rq)
508{
509 return 0;
510}
Dhaval Giani55e12e52008-06-24 23:39:43 +0530511#endif /* CONFIG_SMP */
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100512
513static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
514{
515 int i, idle = 1;
Rusty Russellc6c49272008-11-25 02:35:05 +1030516 const struct cpumask *span;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200517
Peter Zijlstra0b148fa2008-08-19 12:33:04 +0200518 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200519 return 1;
520
521 span = sched_rt_period_mask();
Rusty Russellc6c49272008-11-25 02:35:05 +1030522 for_each_cpu(i, span) {
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200523 int enqueue = 0;
524 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
525 struct rq *rq = rq_of_rt_rq(rt_rq);
526
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100527 raw_spin_lock(&rq->lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200528 if (rt_rq->rt_time) {
529 u64 runtime;
530
Thomas Gleixner0986b112009-11-17 15:32:06 +0100531 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstraeff65492008-06-19 14:22:26 +0200532 if (rt_rq->rt_throttled)
533 balance_runtime(rt_rq);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200534 runtime = rt_rq->rt_runtime;
535 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
536 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
537 rt_rq->rt_throttled = 0;
538 enqueue = 1;
539 }
540 if (rt_rq->rt_time || rt_rq->rt_nr_running)
541 idle = 0;
Thomas Gleixner0986b112009-11-17 15:32:06 +0100542 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstra8a8cde12008-06-19 14:22:28 +0200543 } else if (rt_rq->rt_nr_running)
544 idle = 0;
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200545
546 if (enqueue)
547 sched_rt_rq_enqueue(rt_rq);
Thomas Gleixner05fa7852009-11-17 14:28:38 +0100548 raw_spin_unlock(&rq->lock);
Peter Zijlstrad0b27fa2008-04-19 19:44:57 +0200549 }
550
551 return idle;
552}
553
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100554static inline int rt_se_prio(struct sched_rt_entity *rt_se)
555{
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100556#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100557 struct rt_rq *rt_rq = group_rt_rq(rt_se);
558
559 if (rt_rq)
Gregory Haskinse864c492008-12-29 09:39:49 -0500560 return rt_rq->highest_prio.curr;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100561#endif
562
563 return rt_task_of(rt_se)->prio;
564}
565
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100566static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100567{
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100568 u64 runtime = sched_rt_runtime(rt_rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100569
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100570 if (rt_rq->rt_throttled)
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100571 return rt_rq_throttled(rt_rq);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100572
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200573 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
574 return 0;
575
Peter Zijlstrab79f3832008-06-19 14:22:25 +0200576 balance_runtime(rt_rq);
577 runtime = sched_rt_runtime(rt_rq);
578 if (runtime == RUNTIME_INF)
579 return 0;
Peter Zijlstraac086bc2008-04-19 19:44:58 +0200580
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100581 if (rt_rq->rt_time > runtime) {
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100582 rt_rq->rt_throttled = 1;
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100583 if (rt_rq_throttled(rt_rq)) {
Peter Zijlstra9f0c1e52008-02-13 15:45:39 +0100584 sched_rt_rq_dequeue(rt_rq);
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100585 return 1;
586 }
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100587 }
588
589 return 0;
590}
591
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200592/*
593 * Update the current task's runtime statistics. Skip current tasks that
594 * are not in our scheduling class.
595 */
Alexey Dobriyana9957442007-10-15 17:00:13 +0200596static void update_curr_rt(struct rq *rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200597{
598 struct task_struct *curr = rq->curr;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100599 struct sched_rt_entity *rt_se = &curr->rt;
600 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200601 u64 delta_exec;
602
603 if (!task_has_rt_policy(curr))
604 return;
605
Ingo Molnard2819182007-08-09 11:16:47 +0200606 delta_exec = rq->clock - curr->se.exec_start;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200607 if (unlikely((s64)delta_exec < 0))
608 delta_exec = 0;
Ingo Molnar6cfb0d52007-08-02 17:41:40 +0200609
610 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200611
612 curr->se.sum_exec_runtime += delta_exec;
Frank Mayharf06febc2008-09-12 09:54:39 -0700613 account_group_exec_runtime(curr, delta_exec);
614
Ingo Molnard2819182007-08-09 11:16:47 +0200615 curr->se.exec_start = rq->clock;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100616 cpuacct_charge(curr, delta_exec);
Peter Zijlstrafa85ae22008-01-25 21:08:29 +0100617
Peter Zijlstrae9e92502009-09-01 10:34:37 +0200618 sched_rt_avg_update(rq, delta_exec);
619
Peter Zijlstra0b148fa2008-08-19 12:33:04 +0200620 if (!rt_bandwidth_enabled())
621 return;
622
Dhaval Giani354d60c2008-04-19 19:44:59 +0200623 for_each_sched_rt_entity(rt_se) {
624 rt_rq = rt_rq_of_se(rt_se);
625
Peter Zijlstracc2991c2008-08-19 12:33:03 +0200626 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
Thomas Gleixner0986b112009-11-17 15:32:06 +0100627 raw_spin_lock(&rt_rq->rt_runtime_lock);
Peter Zijlstracc2991c2008-08-19 12:33:03 +0200628 rt_rq->rt_time += delta_exec;
629 if (sched_rt_runtime_exceeded(rt_rq))
630 resched_task(curr);
Thomas Gleixner0986b112009-11-17 15:32:06 +0100631 raw_spin_unlock(&rt_rq->rt_runtime_lock);
Peter Zijlstracc2991c2008-08-19 12:33:03 +0200632 }
Dhaval Giani354d60c2008-04-19 19:44:59 +0200633 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200634}
635
Gregory Haskins398a1532009-01-14 09:10:04 -0500636#if defined CONFIG_SMP
Gregory Haskinse864c492008-12-29 09:39:49 -0500637
638static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
639
640static inline int next_prio(struct rq *rq)
Steven Rostedt63489e42008-01-25 21:08:03 +0100641{
Gregory Haskinse864c492008-12-29 09:39:49 -0500642 struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
Gregory Haskins1f11eb62008-06-04 15:04:05 -0400643
Gregory Haskinse864c492008-12-29 09:39:49 -0500644 if (next && rt_prio(next->prio))
645 return next->prio;
646 else
647 return MAX_RT_PRIO;
648}
Gregory Haskinse864c492008-12-29 09:39:49 -0500649
Gregory Haskins398a1532009-01-14 09:10:04 -0500650static void
651inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200652{
Gregory Haskins4d984272008-12-29 09:39:49 -0500653 struct rq *rq = rq_of_rt_rq(rt_rq);
Gregory Haskins4d984272008-12-29 09:39:49 -0500654
Gregory Haskins398a1532009-01-14 09:10:04 -0500655 if (prio < prev_prio) {
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100656
Gregory Haskinse864c492008-12-29 09:39:49 -0500657 /*
658 * If the new task is higher in priority than anything on the
Gregory Haskins398a1532009-01-14 09:10:04 -0500659 * run-queue, we know that the previous high becomes our
660 * next-highest.
Gregory Haskinse864c492008-12-29 09:39:49 -0500661 */
Gregory Haskins398a1532009-01-14 09:10:04 -0500662 rt_rq->highest_prio.next = prev_prio;
663
Gregory Haskins1f11eb62008-06-04 15:04:05 -0400664 if (rq->online)
Gregory Haskins4d984272008-12-29 09:39:49 -0500665 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
Ingo Molnar1100ac92008-06-05 12:25:37 +0200666
Gregory Haskinse864c492008-12-29 09:39:49 -0500667 } else if (prio == rt_rq->highest_prio.curr)
668 /*
669 * If the next task is equal in priority to the highest on
670 * the run-queue, then we implicitly know that the next highest
671 * task cannot be any lower than current
672 */
673 rt_rq->highest_prio.next = prio;
674 else if (prio < rt_rq->highest_prio.next)
675 /*
676 * Otherwise, we need to recompute next-highest
677 */
678 rt_rq->highest_prio.next = next_prio(rq);
Steven Rostedt63489e42008-01-25 21:08:03 +0100679}
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100680
Gregory Haskins398a1532009-01-14 09:10:04 -0500681static void
682dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
Steven Rostedt63489e42008-01-25 21:08:03 +0100683{
Gregory Haskins4d984272008-12-29 09:39:49 -0500684 struct rq *rq = rq_of_rt_rq(rt_rq);
Gregory Haskins6e0534f2008-05-12 21:21:01 +0200685
Gregory Haskins398a1532009-01-14 09:10:04 -0500686 if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
687 rt_rq->highest_prio.next = next_prio(rq);
688
689 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
690 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
691}
692
693#else /* CONFIG_SMP */
694
695static inline
696void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
697static inline
698void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
699
700#endif /* CONFIG_SMP */
701
Steven Rostedt63489e42008-01-25 21:08:03 +0100702#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
Gregory Haskins398a1532009-01-14 09:10:04 -0500703static void
704inc_rt_prio(struct rt_rq *rt_rq, int prio)
705{
706 int prev_prio = rt_rq->highest_prio.curr;
Steven Rostedt63489e42008-01-25 21:08:03 +0100707
Gregory Haskins398a1532009-01-14 09:10:04 -0500708 if (prio < prev_prio)
709 rt_rq->highest_prio.curr = prio;
710
711 inc_rt_prio_smp(rt_rq, prio, prev_prio);
712}
713
714static void
715dec_rt_prio(struct rt_rq *rt_rq, int prio)
716{
717 int prev_prio = rt_rq->highest_prio.curr;
718
719 if (rt_rq->rt_nr_running) {
720
721 WARN_ON(prio < prev_prio);
Gregory Haskinse864c492008-12-29 09:39:49 -0500722
723 /*
Gregory Haskins398a1532009-01-14 09:10:04 -0500724 * This may have been our highest task, and therefore
725 * we may have some recomputation to do
Gregory Haskinse864c492008-12-29 09:39:49 -0500726 */
Gregory Haskins398a1532009-01-14 09:10:04 -0500727 if (prio == prev_prio) {
Gregory Haskinse864c492008-12-29 09:39:49 -0500728 struct rt_prio_array *array = &rt_rq->active;
729
730 rt_rq->highest_prio.curr =
Steven Rostedt764a9d62008-01-25 21:08:04 +0100731 sched_find_first_bit(array->bitmap);
Gregory Haskinse864c492008-12-29 09:39:49 -0500732 }
733
Steven Rostedt764a9d62008-01-25 21:08:04 +0100734 } else
Gregory Haskinse864c492008-12-29 09:39:49 -0500735 rt_rq->highest_prio.curr = MAX_RT_PRIO;
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100736
Gregory Haskins398a1532009-01-14 09:10:04 -0500737 dec_rt_prio_smp(rt_rq, prio, prev_prio);
738}
Gregory Haskins1f11eb62008-06-04 15:04:05 -0400739
Gregory Haskins398a1532009-01-14 09:10:04 -0500740#else
741
742static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
743static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
744
745#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
746
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100747#ifdef CONFIG_RT_GROUP_SCHED
Gregory Haskins398a1532009-01-14 09:10:04 -0500748
749static void
750inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
751{
Gregory Haskins73fe6aa2008-01-25 21:08:07 +0100752 if (rt_se_boosted(rt_se))
Steven Rostedt764a9d62008-01-25 21:08:04 +0100753 rt_rq->rt_nr_boosted++;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100754
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100755 if (rt_rq->tg)
756 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
Gregory Haskins398a1532009-01-14 09:10:04 -0500757}
758
759static void
760dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
761{
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100762 if (rt_se_boosted(rt_se))
763 rt_rq->rt_nr_boosted--;
764
765 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
Gregory Haskins398a1532009-01-14 09:10:04 -0500766}
767
768#else /* CONFIG_RT_GROUP_SCHED */
769
770static void
771inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
772{
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200773 start_rt_bandwidth(&def_rt_bandwidth);
Gregory Haskins398a1532009-01-14 09:10:04 -0500774}
775
776static inline
777void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
778
779#endif /* CONFIG_RT_GROUP_SCHED */
780
781static inline
782void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
783{
784 int prio = rt_se_prio(rt_se);
785
786 WARN_ON(!rt_prio(prio));
787 rt_rq->rt_nr_running++;
788
789 inc_rt_prio(rt_rq, prio);
790 inc_rt_migration(rt_se, rt_rq);
791 inc_rt_group(rt_se, rt_rq);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200792}
793
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +0100794static inline
795void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
796{
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200797 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100798 WARN_ON(!rt_rq->rt_nr_running);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200799 rt_rq->rt_nr_running--;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200800
Gregory Haskins398a1532009-01-14 09:10:04 -0500801 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
802 dec_rt_migration(rt_se, rt_rq);
803 dec_rt_group(rt_se, rt_rq);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200804}
805
Thomas Gleixner37dad3f2010-01-20 20:59:01 +0000806static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200807{
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100808 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
809 struct rt_prio_array *array = &rt_rq->active;
810 struct rt_rq *group_rq = group_rt_rq(rt_se);
Dmitry Adamushko20b63312008-06-11 00:58:30 +0200811 struct list_head *queue = array->queue + rt_se_prio(rt_se);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200812
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200813 /*
814 * Don't enqueue the group if its throttled, or when empty.
815 * The latter is a consequence of the former when a child group
816 * get throttled and the current group doesn't have any other
817 * active members.
818 */
819 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100820 return;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200821
Thomas Gleixner37dad3f2010-01-20 20:59:01 +0000822 if (head)
823 list_add(&rt_se->run_list, queue);
824 else
825 list_add_tail(&rt_se->run_list, queue);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100826 __set_bit(rt_se_prio(rt_se), array->bitmap);
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100827
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100828 inc_rt_tasks(rt_se, rt_rq);
829}
830
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200831static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100832{
833 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
834 struct rt_prio_array *array = &rt_rq->active;
835
836 list_del_init(&rt_se->run_list);
837 if (list_empty(array->queue + rt_se_prio(rt_se)))
838 __clear_bit(rt_se_prio(rt_se), array->bitmap);
839
840 dec_rt_tasks(rt_se, rt_rq);
841}
842
843/*
844 * Because the prio of an upper entry depends on the lower
845 * entries, we must remove entries top - down.
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100846 */
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200847static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100848{
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200849 struct sched_rt_entity *back = NULL;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100850
Peter Zijlstra58d6c2d2008-04-19 19:45:00 +0200851 for_each_sched_rt_entity(rt_se) {
852 rt_se->back = back;
853 back = rt_se;
854 }
855
856 for (rt_se = back; rt_se; rt_se = rt_se->back) {
857 if (on_rt_rq(rt_se))
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200858 __dequeue_rt_entity(rt_se);
859 }
860}
861
Thomas Gleixner37dad3f2010-01-20 20:59:01 +0000862static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200863{
864 dequeue_rt_stack(rt_se);
865 for_each_sched_rt_entity(rt_se)
Thomas Gleixner37dad3f2010-01-20 20:59:01 +0000866 __enqueue_rt_entity(rt_se, head);
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200867}
868
869static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
870{
871 dequeue_rt_stack(rt_se);
872
873 for_each_sched_rt_entity(rt_se) {
874 struct rt_rq *rt_rq = group_rt_rq(rt_se);
875
876 if (rt_rq && rt_rq->rt_nr_running)
Thomas Gleixner37dad3f2010-01-20 20:59:01 +0000877 __enqueue_rt_entity(rt_se, false);
Peter Zijlstra58d6c2d2008-04-19 19:45:00 +0200878 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200879}
880
881/*
882 * Adding/removing a task to/from a priority array:
883 */
Thomas Gleixnerea87bb72010-01-20 20:58:57 +0000884static void
885enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, bool head)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100886{
887 struct sched_rt_entity *rt_se = &p->rt;
888
889 if (wakeup)
890 rt_se->timeout = 0;
891
Thomas Gleixner37dad3f2010-01-20 20:59:01 +0000892 enqueue_rt_entity(rt_se, head);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200893
Gregory Haskins917b6272008-12-29 09:39:53 -0500894 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
895 enqueue_pushable_task(rq, p);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100896}
897
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200898static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
899{
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100900 struct sched_rt_entity *rt_se = &p->rt;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200901
902 update_curr_rt(rq);
Peter Zijlstraad2a3f12008-06-19 09:06:57 +0200903 dequeue_rt_entity(rt_se);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200904
Gregory Haskins917b6272008-12-29 09:39:53 -0500905 dequeue_pushable_task(rq, p);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200906}
907
908/*
909 * Put task to the end of the run list without the overhead of dequeue
910 * followed by enqueue.
911 */
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200912static void
913requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200914{
Ingo Molnar1cdad712008-06-19 09:09:15 +0200915 if (on_rt_rq(rt_se)) {
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200916 struct rt_prio_array *array = &rt_rq->active;
917 struct list_head *queue = array->queue + rt_se_prio(rt_se);
918
919 if (head)
920 list_move(&rt_se->run_list, queue);
921 else
922 list_move_tail(&rt_se->run_list, queue);
Ingo Molnar1cdad712008-06-19 09:09:15 +0200923 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200924}
925
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200926static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100927{
928 struct sched_rt_entity *rt_se = &p->rt;
929 struct rt_rq *rt_rq;
930
931 for_each_sched_rt_entity(rt_se) {
932 rt_rq = rt_rq_of_se(rt_se);
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200933 requeue_rt_entity(rt_rq, rt_se, head);
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100934 }
935}
936
937static void yield_task_rt(struct rq *rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200938{
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200939 requeue_task_rt(rq, rq->curr, 0);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +0200940}
941
Gregory Haskinse7693a32008-01-25 21:08:09 +0100942#ifdef CONFIG_SMP
Gregory Haskins318e0892008-01-25 21:08:10 +0100943static int find_lowest_rq(struct task_struct *task);
944
Peter Zijlstra7d478722009-09-14 19:55:44 +0200945static int select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
Gregory Haskinse7693a32008-01-25 21:08:09 +0100946{
Gregory Haskins318e0892008-01-25 21:08:10 +0100947 struct rq *rq = task_rq(p);
948
Peter Zijlstra0763a662009-09-14 19:37:39 +0200949 if (sd_flag != SD_BALANCE_WAKE)
Peter Zijlstra5f3edc12009-09-10 13:42:00 +0200950 return smp_processor_id();
951
Gregory Haskins318e0892008-01-25 21:08:10 +0100952 /*
Steven Rostedte1f47d82008-01-25 21:08:12 +0100953 * If the current task is an RT task, then
954 * try to see if we can wake this RT task up on another
955 * runqueue. Otherwise simply start this RT task
956 * on its current runqueue.
957 *
958 * We want to avoid overloading runqueues. Even if
959 * the RT task is of higher priority than the current RT task.
960 * RT tasks behave differently than other tasks. If
961 * one gets preempted, we try to push it off to another queue.
962 * So trying to keep a preempting RT task on the same
963 * cache hot CPU will force the running RT task to
964 * a cold CPU. So we waste all the cache for the lower
965 * RT task in hopes of saving some of a RT task
966 * that is just being woken and probably will have
967 * cold cache anyway.
Gregory Haskins318e0892008-01-25 21:08:10 +0100968 */
Gregory Haskins17b32792008-01-25 21:08:13 +0100969 if (unlikely(rt_task(rq->curr)) &&
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100970 (p->rt.nr_cpus_allowed > 1)) {
Gregory Haskins318e0892008-01-25 21:08:10 +0100971 int cpu = find_lowest_rq(p);
972
973 return (cpu == -1) ? task_cpu(p) : cpu;
974 }
975
976 /*
977 * Otherwise, just let it ride on the affined RQ and the
978 * post-schedule router will push the preempted task away
979 */
Gregory Haskinse7693a32008-01-25 21:08:09 +0100980 return task_cpu(p);
981}
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200982
983static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
984{
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200985 if (rq->curr->rt.nr_cpus_allowed == 1)
986 return;
987
Rusty Russell13b8bd02009-03-25 15:01:22 +1030988 if (p->rt.nr_cpus_allowed != 1
989 && cpupri_find(&rq->rd->cpupri, p, NULL))
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200990 return;
991
Rusty Russell13b8bd02009-03-25 15:01:22 +1030992 if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
993 return;
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +0200994
995 /*
996 * There appears to be other cpus that can accept
997 * current and none to run 'p', so lets reschedule
998 * to try and push current away:
999 */
1000 requeue_task_rt(rq, p, 1);
1001 resched_task(rq->curr);
1002}
1003
Gregory Haskinse7693a32008-01-25 21:08:09 +01001004#endif /* CONFIG_SMP */
1005
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001006/*
1007 * Preempt the current task with a newly woken task if needed:
1008 */
Peter Zijlstra7d478722009-09-14 19:55:44 +02001009static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001010{
Gregory Haskins45c01e82008-05-12 21:20:41 +02001011 if (p->prio < rq->curr->prio) {
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001012 resched_task(rq->curr);
Gregory Haskins45c01e82008-05-12 21:20:41 +02001013 return;
1014 }
1015
1016#ifdef CONFIG_SMP
1017 /*
1018 * If:
1019 *
1020 * - the newly woken task is of equal priority to the current task
1021 * - the newly woken task is non-migratable while current is migratable
1022 * - current will be preempted on the next reschedule
1023 *
1024 * we should check to see if current can readily move to a different
1025 * cpu. If so, we will reschedule to allow the push logic to try
1026 * to move current somewhere else, making room for our non-migratable
1027 * task.
1028 */
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001029 if (p->prio == rq->curr->prio && !need_resched())
1030 check_preempt_equal_prio(rq, p);
Gregory Haskins45c01e82008-05-12 21:20:41 +02001031#endif
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001032}
1033
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001034static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1035 struct rt_rq *rt_rq)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001036{
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001037 struct rt_prio_array *array = &rt_rq->active;
1038 struct sched_rt_entity *next = NULL;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001039 struct list_head *queue;
1040 int idx;
1041
1042 idx = sched_find_first_bit(array->bitmap);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001043 BUG_ON(idx >= MAX_RT_PRIO);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001044
1045 queue = array->queue + idx;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001046 next = list_entry(queue->next, struct sched_rt_entity, run_list);
Dmitry Adamushko326587b2008-01-25 21:08:34 +01001047
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001048 return next;
1049}
1050
Gregory Haskins917b6272008-12-29 09:39:53 -05001051static struct task_struct *_pick_next_task_rt(struct rq *rq)
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001052{
1053 struct sched_rt_entity *rt_se;
1054 struct task_struct *p;
1055 struct rt_rq *rt_rq;
1056
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001057 rt_rq = &rq->rt;
1058
1059 if (unlikely(!rt_rq->rt_nr_running))
1060 return NULL;
1061
Peter Zijlstra23b0fdf2008-02-13 15:45:39 +01001062 if (rt_rq_throttled(rt_rq))
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001063 return NULL;
1064
1065 do {
1066 rt_se = pick_next_rt_entity(rq, rt_rq);
Dmitry Adamushko326587b2008-01-25 21:08:34 +01001067 BUG_ON(!rt_se);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001068 rt_rq = group_rt_rq(rt_se);
1069 } while (rt_rq);
1070
1071 p = rt_task_of(rt_se);
1072 p->se.exec_start = rq->clock;
Gregory Haskins917b6272008-12-29 09:39:53 -05001073
1074 return p;
1075}
1076
1077static struct task_struct *pick_next_task_rt(struct rq *rq)
1078{
1079 struct task_struct *p = _pick_next_task_rt(rq);
1080
1081 /* The running task is never eligible for pushing */
1082 if (p)
1083 dequeue_pushable_task(rq, p);
1084
Ingo Molnarbcf08df2008-04-19 12:11:10 +02001085#ifdef CONFIG_SMP
Gregory Haskins3f029d32009-07-29 11:08:47 -04001086 /*
1087 * We detect this state here so that we can avoid taking the RQ
1088 * lock again later if there is no need to push
1089 */
1090 rq->post_schedule = has_pushable_tasks(rq);
Ingo Molnarbcf08df2008-04-19 12:11:10 +02001091#endif
Gregory Haskins3f029d32009-07-29 11:08:47 -04001092
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001093 return p;
1094}
1095
Ingo Molnar31ee5292007-08-09 11:16:49 +02001096static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001097{
Ingo Molnarf1e14ef2007-08-09 11:16:48 +02001098 update_curr_rt(rq);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001099 p->se.exec_start = 0;
Gregory Haskins917b6272008-12-29 09:39:53 -05001100
1101 /*
1102 * The previous task needs to be made eligible for pushing
1103 * if it is still active
1104 */
1105 if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
1106 enqueue_pushable_task(rq, p);
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001107}
1108
Peter Williams681f3e62007-10-24 18:23:51 +02001109#ifdef CONFIG_SMP
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001110
Steven Rostedte8fa1362008-01-25 21:08:05 +01001111/* Only try algorithms three times */
1112#define RT_MAX_TRIES 3
1113
Steven Rostedte8fa1362008-01-25 21:08:05 +01001114static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
1115
Steven Rostedtf65eda42008-01-25 21:08:07 +01001116static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1117{
1118 if (!task_running(rq, p) &&
Rusty Russell96f874e2008-11-25 02:35:14 +10301119 (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001120 (p->rt.nr_cpus_allowed > 1))
Steven Rostedtf65eda42008-01-25 21:08:07 +01001121 return 1;
1122 return 0;
1123}
1124
Steven Rostedte8fa1362008-01-25 21:08:05 +01001125/* Return the second highest RT task, NULL otherwise */
Ingo Molnar79064fb2008-01-25 21:08:14 +01001126static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001127{
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001128 struct task_struct *next = NULL;
1129 struct sched_rt_entity *rt_se;
1130 struct rt_prio_array *array;
1131 struct rt_rq *rt_rq;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001132 int idx;
1133
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001134 for_each_leaf_rt_rq(rt_rq, rq) {
1135 array = &rt_rq->active;
1136 idx = sched_find_first_bit(array->bitmap);
1137 next_idx:
1138 if (idx >= MAX_RT_PRIO)
1139 continue;
1140 if (next && next->prio < idx)
1141 continue;
1142 list_for_each_entry(rt_se, array->queue + idx, run_list) {
1143 struct task_struct *p = rt_task_of(rt_se);
1144 if (pick_rt_task(rq, p, cpu)) {
1145 next = p;
1146 break;
1147 }
1148 }
1149 if (!next) {
1150 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1151 goto next_idx;
1152 }
Steven Rostedte8fa1362008-01-25 21:08:05 +01001153 }
1154
Steven Rostedte8fa1362008-01-25 21:08:05 +01001155 return next;
1156}
1157
Rusty Russell0e3900e2008-11-25 02:35:13 +10301158static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001159
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001160static int find_lowest_rq(struct task_struct *task)
1161{
1162 struct sched_domain *sd;
Rusty Russell96f874e2008-11-25 02:35:14 +10301163 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001164 int this_cpu = smp_processor_id();
1165 int cpu = task_cpu(task);
1166
Gregory Haskins6e0534f2008-05-12 21:21:01 +02001167 if (task->rt.nr_cpus_allowed == 1)
1168 return -1; /* No other targets possible */
1169
1170 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
Gregory Haskins06f90db2008-01-25 21:08:13 +01001171 return -1; /* No targets found */
1172
1173 /*
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001174 * At this point we have built a mask of cpus representing the
1175 * lowest priority tasks in the system. Now we want to elect
1176 * the best one based on our affinity and topology.
1177 *
1178 * We prioritize the last cpu that the task executed on since
1179 * it is most likely cache-hot in that location.
1180 */
Rusty Russell96f874e2008-11-25 02:35:14 +10301181 if (cpumask_test_cpu(cpu, lowest_mask))
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001182 return cpu;
1183
1184 /*
1185 * Otherwise, we consult the sched_domains span maps to figure
1186 * out which cpu is logically closest to our hot cache data.
1187 */
Rusty Russelle2c88062009-11-03 14:53:15 +10301188 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1189 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001190
Rusty Russelle2c88062009-11-03 14:53:15 +10301191 for_each_domain(cpu, sd) {
1192 if (sd->flags & SD_WAKE_AFFINE) {
1193 int best_cpu;
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001194
Rusty Russelle2c88062009-11-03 14:53:15 +10301195 /*
1196 * "this_cpu" is cheaper to preempt than a
1197 * remote processor.
1198 */
1199 if (this_cpu != -1 &&
1200 cpumask_test_cpu(this_cpu, sched_domain_span(sd)))
1201 return this_cpu;
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001202
Rusty Russelle2c88062009-11-03 14:53:15 +10301203 best_cpu = cpumask_first_and(lowest_mask,
1204 sched_domain_span(sd));
1205 if (best_cpu < nr_cpu_ids)
1206 return best_cpu;
Gregory Haskins6e1254d2008-01-25 21:08:11 +01001207 }
1208 }
1209
1210 /*
1211 * And finally, if there were no matches within the domains
1212 * just give the caller *something* to work with from the compatible
1213 * locations.
1214 */
Rusty Russelle2c88062009-11-03 14:53:15 +10301215 if (this_cpu != -1)
1216 return this_cpu;
1217
1218 cpu = cpumask_any(lowest_mask);
1219 if (cpu < nr_cpu_ids)
1220 return cpu;
1221 return -1;
Gregory Haskins07b40322008-01-25 21:08:10 +01001222}
1223
Steven Rostedte8fa1362008-01-25 21:08:05 +01001224/* Will lock the rq it finds */
Ingo Molnar4df64c02008-01-25 21:08:15 +01001225static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001226{
1227 struct rq *lowest_rq = NULL;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001228 int tries;
Ingo Molnar4df64c02008-01-25 21:08:15 +01001229 int cpu;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001230
1231 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
Gregory Haskins07b40322008-01-25 21:08:10 +01001232 cpu = find_lowest_rq(task);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001233
Gregory Haskins2de0b462008-01-25 21:08:10 +01001234 if ((cpu == -1) || (cpu == rq->cpu))
Steven Rostedte8fa1362008-01-25 21:08:05 +01001235 break;
1236
Gregory Haskins07b40322008-01-25 21:08:10 +01001237 lowest_rq = cpu_rq(cpu);
1238
Steven Rostedte8fa1362008-01-25 21:08:05 +01001239 /* if the prio of this runqueue changed, try again */
Gregory Haskins07b40322008-01-25 21:08:10 +01001240 if (double_lock_balance(rq, lowest_rq)) {
Steven Rostedte8fa1362008-01-25 21:08:05 +01001241 /*
1242 * We had to unlock the run queue. In
1243 * the mean time, task could have
1244 * migrated already or had its affinity changed.
1245 * Also make sure that it wasn't scheduled on its rq.
1246 */
Gregory Haskins07b40322008-01-25 21:08:10 +01001247 if (unlikely(task_rq(task) != rq ||
Rusty Russell96f874e2008-11-25 02:35:14 +10301248 !cpumask_test_cpu(lowest_rq->cpu,
1249 &task->cpus_allowed) ||
Gregory Haskins07b40322008-01-25 21:08:10 +01001250 task_running(rq, task) ||
Steven Rostedte8fa1362008-01-25 21:08:05 +01001251 !task->se.on_rq)) {
Ingo Molnar4df64c02008-01-25 21:08:15 +01001252
Thomas Gleixner05fa7852009-11-17 14:28:38 +01001253 raw_spin_unlock(&lowest_rq->lock);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001254 lowest_rq = NULL;
1255 break;
1256 }
1257 }
1258
1259 /* If this rq is still suitable use it. */
Gregory Haskinse864c492008-12-29 09:39:49 -05001260 if (lowest_rq->rt.highest_prio.curr > task->prio)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001261 break;
1262
1263 /* try again */
Peter Zijlstra1b12bbc2008-08-11 09:30:22 +02001264 double_unlock_balance(rq, lowest_rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001265 lowest_rq = NULL;
1266 }
1267
1268 return lowest_rq;
1269}
1270
Gregory Haskins917b6272008-12-29 09:39:53 -05001271static struct task_struct *pick_next_pushable_task(struct rq *rq)
1272{
1273 struct task_struct *p;
1274
1275 if (!has_pushable_tasks(rq))
1276 return NULL;
1277
1278 p = plist_first_entry(&rq->rt.pushable_tasks,
1279 struct task_struct, pushable_tasks);
1280
1281 BUG_ON(rq->cpu != task_cpu(p));
1282 BUG_ON(task_current(rq, p));
1283 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1284
1285 BUG_ON(!p->se.on_rq);
1286 BUG_ON(!rt_task(p));
1287
1288 return p;
1289}
1290
Steven Rostedte8fa1362008-01-25 21:08:05 +01001291/*
1292 * If the current CPU has more than one RT task, see if the non
1293 * running task can migrate over to a CPU that is running a task
1294 * of lesser priority.
1295 */
Gregory Haskins697f0a42008-01-25 21:08:09 +01001296static int push_rt_task(struct rq *rq)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001297{
1298 struct task_struct *next_task;
1299 struct rq *lowest_rq;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001300
Gregory Haskinsa22d7fc2008-01-25 21:08:12 +01001301 if (!rq->rt.overloaded)
1302 return 0;
1303
Gregory Haskins917b6272008-12-29 09:39:53 -05001304 next_task = pick_next_pushable_task(rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001305 if (!next_task)
1306 return 0;
1307
1308 retry:
Gregory Haskins697f0a42008-01-25 21:08:09 +01001309 if (unlikely(next_task == rq->curr)) {
Steven Rostedtf65eda42008-01-25 21:08:07 +01001310 WARN_ON(1);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001311 return 0;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001312 }
Steven Rostedte8fa1362008-01-25 21:08:05 +01001313
1314 /*
1315 * It's possible that the next_task slipped in of
1316 * higher priority than current. If that's the case
1317 * just reschedule current.
1318 */
Gregory Haskins697f0a42008-01-25 21:08:09 +01001319 if (unlikely(next_task->prio < rq->curr->prio)) {
1320 resched_task(rq->curr);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001321 return 0;
1322 }
1323
Gregory Haskins697f0a42008-01-25 21:08:09 +01001324 /* We might release rq lock */
Steven Rostedte8fa1362008-01-25 21:08:05 +01001325 get_task_struct(next_task);
1326
1327 /* find_lock_lowest_rq locks the rq if found */
Gregory Haskins697f0a42008-01-25 21:08:09 +01001328 lowest_rq = find_lock_lowest_rq(next_task, rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001329 if (!lowest_rq) {
1330 struct task_struct *task;
1331 /*
Gregory Haskins697f0a42008-01-25 21:08:09 +01001332 * find lock_lowest_rq releases rq->lock
Gregory Haskins15635132008-12-29 09:39:53 -05001333 * so it is possible that next_task has migrated.
1334 *
1335 * We need to make sure that the task is still on the same
1336 * run-queue and is also still the next task eligible for
1337 * pushing.
Steven Rostedte8fa1362008-01-25 21:08:05 +01001338 */
Gregory Haskins917b6272008-12-29 09:39:53 -05001339 task = pick_next_pushable_task(rq);
Gregory Haskins15635132008-12-29 09:39:53 -05001340 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1341 /*
1342 * If we get here, the task hasnt moved at all, but
1343 * it has failed to push. We will not try again,
1344 * since the other cpus will pull from us when they
1345 * are ready.
1346 */
1347 dequeue_pushable_task(rq, next_task);
1348 goto out;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001349 }
Gregory Haskins917b6272008-12-29 09:39:53 -05001350
Gregory Haskins15635132008-12-29 09:39:53 -05001351 if (!task)
1352 /* No more tasks, just exit */
1353 goto out;
1354
Gregory Haskins917b6272008-12-29 09:39:53 -05001355 /*
Gregory Haskins15635132008-12-29 09:39:53 -05001356 * Something has shifted, try again.
Gregory Haskins917b6272008-12-29 09:39:53 -05001357 */
Gregory Haskins15635132008-12-29 09:39:53 -05001358 put_task_struct(next_task);
1359 next_task = task;
1360 goto retry;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001361 }
1362
Gregory Haskins697f0a42008-01-25 21:08:09 +01001363 deactivate_task(rq, next_task, 0);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001364 set_task_cpu(next_task, lowest_rq->cpu);
1365 activate_task(lowest_rq, next_task, 0);
1366
1367 resched_task(lowest_rq->curr);
1368
Peter Zijlstra1b12bbc2008-08-11 09:30:22 +02001369 double_unlock_balance(rq, lowest_rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001370
Steven Rostedte8fa1362008-01-25 21:08:05 +01001371out:
1372 put_task_struct(next_task);
1373
Gregory Haskins917b6272008-12-29 09:39:53 -05001374 return 1;
Steven Rostedte8fa1362008-01-25 21:08:05 +01001375}
1376
Steven Rostedte8fa1362008-01-25 21:08:05 +01001377static void push_rt_tasks(struct rq *rq)
1378{
1379 /* push_rt_task will return true if it moved an RT */
1380 while (push_rt_task(rq))
1381 ;
1382}
1383
Steven Rostedtf65eda42008-01-25 21:08:07 +01001384static int pull_rt_task(struct rq *this_rq)
1385{
Ingo Molnar80bf3172008-01-25 21:08:17 +01001386 int this_cpu = this_rq->cpu, ret = 0, cpu;
Gregory Haskinsa8728942008-12-29 09:39:49 -05001387 struct task_struct *p;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001388 struct rq *src_rq;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001389
Gregory Haskins637f5082008-01-25 21:08:18 +01001390 if (likely(!rt_overloaded(this_rq)))
Steven Rostedtf65eda42008-01-25 21:08:07 +01001391 return 0;
1392
Rusty Russellc6c49272008-11-25 02:35:05 +10301393 for_each_cpu(cpu, this_rq->rd->rto_mask) {
Steven Rostedtf65eda42008-01-25 21:08:07 +01001394 if (this_cpu == cpu)
1395 continue;
1396
1397 src_rq = cpu_rq(cpu);
Gregory Haskins74ab8e42008-12-29 09:39:50 -05001398
1399 /*
1400 * Don't bother taking the src_rq->lock if the next highest
1401 * task is known to be lower-priority than our current task.
1402 * This may look racy, but if this value is about to go
1403 * logically higher, the src_rq will push this task away.
1404 * And if its going logically lower, we do not care
1405 */
1406 if (src_rq->rt.highest_prio.next >=
1407 this_rq->rt.highest_prio.curr)
1408 continue;
1409
Steven Rostedtf65eda42008-01-25 21:08:07 +01001410 /*
1411 * We can potentially drop this_rq's lock in
1412 * double_lock_balance, and another CPU could
Gregory Haskinsa8728942008-12-29 09:39:49 -05001413 * alter this_rq
Steven Rostedtf65eda42008-01-25 21:08:07 +01001414 */
Gregory Haskinsa8728942008-12-29 09:39:49 -05001415 double_lock_balance(this_rq, src_rq);
Steven Rostedtf65eda42008-01-25 21:08:07 +01001416
1417 /*
1418 * Are there still pullable RT tasks?
1419 */
Mike Galbraith614ee1f2008-01-25 21:08:30 +01001420 if (src_rq->rt.rt_nr_running <= 1)
1421 goto skip;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001422
Steven Rostedtf65eda42008-01-25 21:08:07 +01001423 p = pick_next_highest_task_rt(src_rq, this_cpu);
1424
1425 /*
1426 * Do we have an RT task that preempts
1427 * the to-be-scheduled task?
1428 */
Gregory Haskinsa8728942008-12-29 09:39:49 -05001429 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
Steven Rostedtf65eda42008-01-25 21:08:07 +01001430 WARN_ON(p == src_rq->curr);
1431 WARN_ON(!p->se.on_rq);
1432
1433 /*
1434 * There's a chance that p is higher in priority
1435 * than what's currently running on its cpu.
1436 * This is just that p is wakeing up and hasn't
1437 * had a chance to schedule. We only pull
1438 * p if it is lower in priority than the
Gregory Haskinsa8728942008-12-29 09:39:49 -05001439 * current task on the run queue
Steven Rostedtf65eda42008-01-25 21:08:07 +01001440 */
Gregory Haskinsa8728942008-12-29 09:39:49 -05001441 if (p->prio < src_rq->curr->prio)
Mike Galbraith614ee1f2008-01-25 21:08:30 +01001442 goto skip;
Steven Rostedtf65eda42008-01-25 21:08:07 +01001443
1444 ret = 1;
1445
1446 deactivate_task(src_rq, p, 0);
1447 set_task_cpu(p, this_cpu);
1448 activate_task(this_rq, p, 0);
1449 /*
1450 * We continue with the search, just in
1451 * case there's an even higher prio task
1452 * in another runqueue. (low likelyhood
1453 * but possible)
Steven Rostedtf65eda42008-01-25 21:08:07 +01001454 */
Steven Rostedtf65eda42008-01-25 21:08:07 +01001455 }
Mike Galbraith614ee1f2008-01-25 21:08:30 +01001456 skip:
Peter Zijlstra1b12bbc2008-08-11 09:30:22 +02001457 double_unlock_balance(this_rq, src_rq);
Steven Rostedtf65eda42008-01-25 21:08:07 +01001458 }
1459
1460 return ret;
1461}
1462
Steven Rostedt9a897c52008-01-25 21:08:22 +01001463static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
Steven Rostedtf65eda42008-01-25 21:08:07 +01001464{
1465 /* Try to pull RT tasks here if we lower this rq's prio */
Gregory Haskinse864c492008-12-29 09:39:49 -05001466 if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
Steven Rostedtf65eda42008-01-25 21:08:07 +01001467 pull_rt_task(rq);
1468}
1469
Steven Rostedt9a897c52008-01-25 21:08:22 +01001470static void post_schedule_rt(struct rq *rq)
Steven Rostedte8fa1362008-01-25 21:08:05 +01001471{
Gregory Haskins967fc042008-12-29 09:39:52 -05001472 push_rt_tasks(rq);
Steven Rostedte8fa1362008-01-25 21:08:05 +01001473}
1474
Gregory Haskins8ae121a2008-04-23 07:13:29 -04001475/*
1476 * If we are not running and we are not going to reschedule soon, we should
1477 * try to push tasks away now
1478 */
Peter Zijlstraefbbd052009-12-16 18:04:40 +01001479static void task_woken_rt(struct rq *rq, struct task_struct *p)
Steven Rostedt4642daf2008-01-25 21:08:07 +01001480{
Steven Rostedt9a897c52008-01-25 21:08:22 +01001481 if (!task_running(rq, p) &&
Gregory Haskins8ae121a2008-04-23 07:13:29 -04001482 !test_tsk_need_resched(rq->curr) &&
Gregory Haskins917b6272008-12-29 09:39:53 -05001483 has_pushable_tasks(rq) &&
Gregory Haskins777c2f32008-12-29 09:39:50 -05001484 p->rt.nr_cpus_allowed > 1)
Steven Rostedt4642daf2008-01-25 21:08:07 +01001485 push_rt_tasks(rq);
1486}
1487
Mike Traviscd8ba7c2008-03-26 14:23:49 -07001488static void set_cpus_allowed_rt(struct task_struct *p,
Rusty Russell96f874e2008-11-25 02:35:14 +10301489 const struct cpumask *new_mask)
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001490{
Rusty Russell96f874e2008-11-25 02:35:14 +10301491 int weight = cpumask_weight(new_mask);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001492
1493 BUG_ON(!rt_task(p));
1494
1495 /*
1496 * Update the migration status of the RQ if we have an RT task
1497 * which is running AND changing its weight value.
1498 */
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001499 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001500 struct rq *rq = task_rq(p);
1501
Gregory Haskins917b6272008-12-29 09:39:53 -05001502 if (!task_current(rq, p)) {
1503 /*
1504 * Make sure we dequeue this task from the pushable list
1505 * before going further. It will either remain off of
1506 * the list because we are no longer pushable, or it
1507 * will be requeued.
1508 */
1509 if (p->rt.nr_cpus_allowed > 1)
1510 dequeue_pushable_task(rq, p);
1511
1512 /*
1513 * Requeue if our weight is changing and still > 1
1514 */
1515 if (weight > 1)
1516 enqueue_pushable_task(rq, p);
1517
1518 }
1519
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001520 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001521 rq->rt.rt_nr_migratory++;
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001522 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001523 BUG_ON(!rq->rt.rt_nr_migratory);
1524 rq->rt.rt_nr_migratory--;
1525 }
1526
Gregory Haskins398a1532009-01-14 09:10:04 -05001527 update_rt_migration(&rq->rt);
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001528 }
1529
Rusty Russell96f874e2008-11-25 02:35:14 +10301530 cpumask_copy(&p->cpus_allowed, new_mask);
Peter Zijlstra6f505b12008-01-25 21:08:30 +01001531 p->rt.nr_cpus_allowed = weight;
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001532}
Ingo Molnardeeeccd2008-01-25 21:08:15 +01001533
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001534/* Assumes rq->lock is held */
Gregory Haskins1f11eb62008-06-04 15:04:05 -04001535static void rq_online_rt(struct rq *rq)
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001536{
1537 if (rq->rt.overloaded)
1538 rt_set_overload(rq);
Gregory Haskins6e0534f2008-05-12 21:21:01 +02001539
Peter Zijlstra7def2be2008-06-05 14:49:58 +02001540 __enable_runtime(rq);
1541
Gregory Haskinse864c492008-12-29 09:39:49 -05001542 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001543}
1544
1545/* Assumes rq->lock is held */
Gregory Haskins1f11eb62008-06-04 15:04:05 -04001546static void rq_offline_rt(struct rq *rq)
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001547{
1548 if (rq->rt.overloaded)
1549 rt_clear_overload(rq);
Gregory Haskins6e0534f2008-05-12 21:21:01 +02001550
Peter Zijlstra7def2be2008-06-05 14:49:58 +02001551 __disable_runtime(rq);
1552
Gregory Haskins6e0534f2008-05-12 21:21:01 +02001553 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
Ingo Molnarbdd7c812008-01-25 21:08:18 +01001554}
Steven Rostedtcb469842008-01-25 21:08:22 +01001555
1556/*
1557 * When switch from the rt queue, we bring ourselves to a position
1558 * that we might want to pull RT tasks from other runqueues.
1559 */
1560static void switched_from_rt(struct rq *rq, struct task_struct *p,
1561 int running)
1562{
1563 /*
1564 * If there are other RT tasks then we will reschedule
1565 * and the scheduling of the other RT tasks will handle
1566 * the balancing. But if we are the last RT task
1567 * we may need to handle the pulling of RT tasks
1568 * now.
1569 */
1570 if (!rq->rt.rt_nr_running)
1571 pull_rt_task(rq);
1572}
Rusty Russell3d8cbdf2008-11-25 09:58:41 +10301573
1574static inline void init_sched_rt_class(void)
1575{
1576 unsigned int i;
1577
1578 for_each_possible_cpu(i)
Yinghai Lueaa95842009-06-06 14:51:36 -07001579 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
Mike Travis6ca09df2008-12-31 18:08:45 -08001580 GFP_KERNEL, cpu_to_node(i));
Rusty Russell3d8cbdf2008-11-25 09:58:41 +10301581}
Steven Rostedte8fa1362008-01-25 21:08:05 +01001582#endif /* CONFIG_SMP */
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001583
Steven Rostedtcb469842008-01-25 21:08:22 +01001584/*
1585 * When switching a task to RT, we may overload the runqueue
1586 * with RT tasks. In this case we try to push them off to
1587 * other runqueues.
1588 */
1589static void switched_to_rt(struct rq *rq, struct task_struct *p,
1590 int running)
1591{
1592 int check_resched = 1;
1593
1594 /*
1595 * If we are already running, then there's nothing
1596 * that needs to be done. But if we are not running
1597 * we may need to preempt the current running task.
1598 * If that current running task is also an RT task
1599 * then see if we can move to another run queue.
1600 */
1601 if (!running) {
1602#ifdef CONFIG_SMP
1603 if (rq->rt.overloaded && push_rt_task(rq) &&
1604 /* Don't resched if we changed runqueues */
1605 rq != task_rq(p))
1606 check_resched = 0;
1607#endif /* CONFIG_SMP */
1608 if (check_resched && p->prio < rq->curr->prio)
1609 resched_task(rq->curr);
1610 }
1611}
1612
1613/*
1614 * Priority of the task has changed. This may cause
1615 * us to initiate a push or pull.
1616 */
1617static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1618 int oldprio, int running)
1619{
1620 if (running) {
1621#ifdef CONFIG_SMP
1622 /*
1623 * If our priority decreases while running, we
1624 * may need to pull tasks to this runqueue.
1625 */
1626 if (oldprio < p->prio)
1627 pull_rt_task(rq);
1628 /*
1629 * If there's a higher priority task waiting to run
Steven Rostedt6fa46fa2008-03-05 10:00:12 -05001630 * then reschedule. Note, the above pull_rt_task
1631 * can release the rq lock and p could migrate.
1632 * Only reschedule if p is still on the same runqueue.
Steven Rostedtcb469842008-01-25 21:08:22 +01001633 */
Gregory Haskinse864c492008-12-29 09:39:49 -05001634 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
Steven Rostedtcb469842008-01-25 21:08:22 +01001635 resched_task(p);
1636#else
1637 /* For UP simply resched on drop of prio */
1638 if (oldprio < p->prio)
1639 resched_task(p);
1640#endif /* CONFIG_SMP */
1641 } else {
1642 /*
1643 * This task is not running, but if it is
1644 * greater than the current running task
1645 * then reschedule.
1646 */
1647 if (p->prio < rq->curr->prio)
1648 resched_task(rq->curr);
1649 }
1650}
1651
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001652static void watchdog(struct rq *rq, struct task_struct *p)
1653{
1654 unsigned long soft, hard;
1655
1656 if (!p->signal)
1657 return;
1658
1659 soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1660 hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1661
1662 if (soft != RLIM_INFINITY) {
1663 unsigned long next;
1664
1665 p->rt.timeout++;
1666 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
Peter Zijlstra5a52dd52008-01-25 21:08:32 +01001667 if (p->rt.timeout > next)
Frank Mayharf06febc2008-09-12 09:54:39 -07001668 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001669 }
1670}
Steven Rostedtcb469842008-01-25 21:08:22 +01001671
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001672static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001673{
Peter Zijlstra67e2be02007-12-20 15:01:17 +01001674 update_curr_rt(rq);
1675
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +01001676 watchdog(rq, p);
1677
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001678 /*
1679 * RR tasks need a special form of timeslice management.
1680 * FIFO tasks have no timeslices.
1681 */
1682 if (p->policy != SCHED_RR)
1683 return;
1684
Peter Zijlstrafa717062008-01-25 21:08:27 +01001685 if (--p->rt.time_slice)
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001686 return;
1687
Peter Zijlstrafa717062008-01-25 21:08:27 +01001688 p->rt.time_slice = DEF_TIMESLICE;
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001689
Dmitry Adamushko98fbc792007-08-24 20:39:10 +02001690 /*
1691 * Requeue to the end of queue if we are not the only element
1692 * on the queue:
1693 */
Peter Zijlstrafa717062008-01-25 21:08:27 +01001694 if (p->rt.run_list.prev != p->rt.run_list.next) {
Dmitry Adamushko7ebefa82008-07-01 23:32:15 +02001695 requeue_task_rt(rq, p, 0);
Dmitry Adamushko98fbc792007-08-24 20:39:10 +02001696 set_tsk_need_resched(p);
1697 }
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001698}
1699
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001700static void set_curr_task_rt(struct rq *rq)
1701{
1702 struct task_struct *p = rq->curr;
1703
1704 p->se.exec_start = rq->clock;
Gregory Haskins917b6272008-12-29 09:39:53 -05001705
1706 /* The running task is never eligible for pushing */
1707 dequeue_pushable_task(rq, p);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001708}
1709
H Hartley Sweeten6d686f42010-01-13 20:21:52 -07001710static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
Peter Williams0d721ce2009-09-21 01:31:53 +00001711{
1712 /*
1713 * Time slice is 0 for SCHED_FIFO tasks
1714 */
1715 if (task->policy == SCHED_RR)
1716 return DEF_TIMESLICE;
1717 else
1718 return 0;
1719}
1720
Harvey Harrison2abdad02008-04-25 10:53:13 -07001721static const struct sched_class rt_sched_class = {
Ingo Molnar5522d5d2007-10-15 17:00:12 +02001722 .next = &fair_sched_class,
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001723 .enqueue_task = enqueue_task_rt,
1724 .dequeue_task = dequeue_task_rt,
1725 .yield_task = yield_task_rt,
1726
1727 .check_preempt_curr = check_preempt_curr_rt,
1728
1729 .pick_next_task = pick_next_task_rt,
1730 .put_prev_task = put_prev_task_rt,
1731
Peter Williams681f3e62007-10-24 18:23:51 +02001732#ifdef CONFIG_SMP
Li Zefan4ce72a22008-10-22 15:25:26 +08001733 .select_task_rq = select_task_rq_rt,
1734
Gregory Haskins73fe6aa2008-01-25 21:08:07 +01001735 .set_cpus_allowed = set_cpus_allowed_rt,
Gregory Haskins1f11eb62008-06-04 15:04:05 -04001736 .rq_online = rq_online_rt,
1737 .rq_offline = rq_offline_rt,
Steven Rostedt9a897c52008-01-25 21:08:22 +01001738 .pre_schedule = pre_schedule_rt,
1739 .post_schedule = post_schedule_rt,
Peter Zijlstraefbbd052009-12-16 18:04:40 +01001740 .task_woken = task_woken_rt,
Steven Rostedtcb469842008-01-25 21:08:22 +01001741 .switched_from = switched_from_rt,
Peter Williams681f3e62007-10-24 18:23:51 +02001742#endif
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001743
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001744 .set_curr_task = set_curr_task_rt,
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001745 .task_tick = task_tick_rt,
Steven Rostedtcb469842008-01-25 21:08:22 +01001746
Peter Williams0d721ce2009-09-21 01:31:53 +00001747 .get_rr_interval = get_rr_interval_rt,
1748
Steven Rostedtcb469842008-01-25 21:08:22 +01001749 .prio_changed = prio_changed_rt,
1750 .switched_to = switched_to_rt,
Ingo Molnarbb44e5d2007-07-09 18:51:58 +02001751};
Peter Zijlstraada18de2008-06-19 14:22:24 +02001752
1753#ifdef CONFIG_SCHED_DEBUG
1754extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1755
1756static void print_rt_stats(struct seq_file *m, int cpu)
1757{
1758 struct rt_rq *rt_rq;
1759
1760 rcu_read_lock();
1761 for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
1762 print_rt_rq(m, cpu, rt_rq);
1763 rcu_read_unlock();
1764}
Dhaval Giani55e12e52008-06-24 23:39:43 +05301765#endif /* CONFIG_SCHED_DEBUG */
Rusty Russell0e3900e2008-11-25 02:35:13 +10301766