| Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 1 | /* | 
 | 2 |  * stop-task scheduling class. | 
 | 3 |  * | 
 | 4 |  * The stop task is the highest priority task in the system, it preempts | 
 | 5 |  * everything and will be preempted by nothing. | 
 | 6 |  * | 
 | 7 |  * See kernel/stop_machine.c | 
 | 8 |  */ | 
 | 9 |  | 
 | 10 | #ifdef CONFIG_SMP | 
 | 11 | static int | 
 | 12 | select_task_rq_stop(struct rq *rq, struct task_struct *p, | 
 | 13 | 		    int sd_flag, int flags) | 
 | 14 | { | 
 | 15 | 	return task_cpu(p); /* stop tasks as never migrate */ | 
 | 16 | } | 
 | 17 | #endif /* CONFIG_SMP */ | 
 | 18 |  | 
 | 19 | static void | 
 | 20 | check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) | 
 | 21 | { | 
| Peter Zijlstra | 1e5a740 | 2010-10-31 12:37:04 +0100 | [diff] [blame] | 22 | 	/* we're never preempted */ | 
| Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 23 | } | 
 | 24 |  | 
 | 25 | static struct task_struct *pick_next_task_stop(struct rq *rq) | 
 | 26 | { | 
 | 27 | 	struct task_struct *stop = rq->stop; | 
 | 28 |  | 
| Peter Zijlstra | 2d46709 | 2010-11-09 14:36:52 +0100 | [diff] [blame] | 29 | 	if (stop && stop->se.on_rq) | 
| Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 30 | 		return stop; | 
 | 31 |  | 
 | 32 | 	return NULL; | 
 | 33 | } | 
 | 34 |  | 
 | 35 | static void | 
 | 36 | enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) | 
 | 37 | { | 
 | 38 | } | 
 | 39 |  | 
 | 40 | static void | 
 | 41 | dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) | 
 | 42 | { | 
 | 43 | } | 
 | 44 |  | 
 | 45 | static void yield_task_stop(struct rq *rq) | 
 | 46 | { | 
 | 47 | 	BUG(); /* the stop task should never yield, its pointless. */ | 
 | 48 | } | 
 | 49 |  | 
 | 50 | static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) | 
 | 51 | { | 
 | 52 | } | 
 | 53 |  | 
 | 54 | static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) | 
 | 55 | { | 
 | 56 | } | 
 | 57 |  | 
 | 58 | static void set_curr_task_stop(struct rq *rq) | 
 | 59 | { | 
 | 60 | } | 
 | 61 |  | 
| Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 62 | static void switched_to_stop(struct rq *rq, struct task_struct *p) | 
| Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 63 | { | 
 | 64 | 	BUG(); /* its impossible to change to this class */ | 
 | 65 | } | 
 | 66 |  | 
| Peter Zijlstra | da7a735 | 2011-01-17 17:03:27 +0100 | [diff] [blame] | 67 | static void | 
 | 68 | prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio) | 
| Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 69 | { | 
 | 70 | 	BUG(); /* how!?, what priority? */ | 
 | 71 | } | 
 | 72 |  | 
 | 73 | static unsigned int | 
 | 74 | get_rr_interval_stop(struct rq *rq, struct task_struct *task) | 
 | 75 | { | 
 | 76 | 	return 0; | 
 | 77 | } | 
 | 78 |  | 
 | 79 | /* | 
 | 80 |  * Simple, special scheduling class for the per-CPU stop tasks: | 
 | 81 |  */ | 
 | 82 | static const struct sched_class stop_sched_class = { | 
 | 83 | 	.next			= &rt_sched_class, | 
 | 84 |  | 
 | 85 | 	.enqueue_task		= enqueue_task_stop, | 
 | 86 | 	.dequeue_task		= dequeue_task_stop, | 
 | 87 | 	.yield_task		= yield_task_stop, | 
 | 88 |  | 
 | 89 | 	.check_preempt_curr	= check_preempt_curr_stop, | 
 | 90 |  | 
 | 91 | 	.pick_next_task		= pick_next_task_stop, | 
 | 92 | 	.put_prev_task		= put_prev_task_stop, | 
 | 93 |  | 
 | 94 | #ifdef CONFIG_SMP | 
 | 95 | 	.select_task_rq		= select_task_rq_stop, | 
 | 96 | #endif | 
 | 97 |  | 
 | 98 | 	.set_curr_task          = set_curr_task_stop, | 
 | 99 | 	.task_tick		= task_tick_stop, | 
 | 100 |  | 
 | 101 | 	.get_rr_interval	= get_rr_interval_stop, | 
 | 102 |  | 
 | 103 | 	.prio_changed		= prio_changed_stop, | 
 | 104 | 	.switched_to		= switched_to_stop, | 
| Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 105 | }; |