| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1 | /* | 
 | 2 |  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR | 
 | 3 |  * policies) | 
 | 4 |  */ | 
 | 5 |  | 
 | 6 | /* | 
 | 7 |  * Update the current task's runtime statistics. Skip current tasks that | 
 | 8 |  * are not in our scheduling class. | 
 | 9 |  */ | 
| Ingo Molnar | f1e14ef | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 10 | static inline void update_curr_rt(struct rq *rq) | 
| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 11 | { | 
 | 12 | 	struct task_struct *curr = rq->curr; | 
 | 13 | 	u64 delta_exec; | 
 | 14 |  | 
 | 15 | 	if (!task_has_rt_policy(curr)) | 
 | 16 | 		return; | 
 | 17 |  | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 18 | 	delta_exec = rq->clock - curr->se.exec_start; | 
| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 19 | 	if (unlikely((s64)delta_exec < 0)) | 
 | 20 | 		delta_exec = 0; | 
| Ingo Molnar | 6cfb0d5 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 21 |  | 
 | 22 | 	schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec)); | 
| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 23 |  | 
 | 24 | 	curr->se.sum_exec_runtime += delta_exec; | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 25 | 	curr->se.exec_start = rq->clock; | 
| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 26 | } | 
 | 27 |  | 
| Ingo Molnar | fd390f6 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 28 | static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) | 
| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 29 | { | 
 | 30 | 	struct rt_prio_array *array = &rq->rt.active; | 
 | 31 |  | 
 | 32 | 	list_add_tail(&p->run_list, array->queue + p->prio); | 
 | 33 | 	__set_bit(p->prio, array->bitmap); | 
 | 34 | } | 
 | 35 |  | 
 | 36 | /* | 
 | 37 |  * Adding/removing a task to/from a priority array: | 
 | 38 |  */ | 
| Ingo Molnar | f02231e | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 39 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) | 
| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 40 | { | 
 | 41 | 	struct rt_prio_array *array = &rq->rt.active; | 
 | 42 |  | 
| Ingo Molnar | f1e14ef | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 43 | 	update_curr_rt(rq); | 
| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 44 |  | 
 | 45 | 	list_del(&p->run_list); | 
 | 46 | 	if (list_empty(array->queue + p->prio)) | 
 | 47 | 		__clear_bit(p->prio, array->bitmap); | 
 | 48 | } | 
 | 49 |  | 
 | 50 | /* | 
 | 51 |  * Put task to the end of the run list without the overhead of dequeue | 
 | 52 |  * followed by enqueue. | 
 | 53 |  */ | 
 | 54 | static void requeue_task_rt(struct rq *rq, struct task_struct *p) | 
 | 55 | { | 
 | 56 | 	struct rt_prio_array *array = &rq->rt.active; | 
 | 57 |  | 
 | 58 | 	list_move_tail(&p->run_list, array->queue + p->prio); | 
 | 59 | } | 
 | 60 |  | 
 | 61 | static void | 
 | 62 | yield_task_rt(struct rq *rq, struct task_struct *p) | 
 | 63 | { | 
 | 64 | 	requeue_task_rt(rq, p); | 
 | 65 | } | 
 | 66 |  | 
 | 67 | /* | 
 | 68 |  * Preempt the current task with a newly woken task if needed: | 
 | 69 |  */ | 
 | 70 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p) | 
 | 71 | { | 
 | 72 | 	if (p->prio < rq->curr->prio) | 
 | 73 | 		resched_task(rq->curr); | 
 | 74 | } | 
 | 75 |  | 
| Ingo Molnar | fb8d472 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 76 | static struct task_struct *pick_next_task_rt(struct rq *rq) | 
| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 77 | { | 
 | 78 | 	struct rt_prio_array *array = &rq->rt.active; | 
 | 79 | 	struct task_struct *next; | 
 | 80 | 	struct list_head *queue; | 
 | 81 | 	int idx; | 
 | 82 |  | 
 | 83 | 	idx = sched_find_first_bit(array->bitmap); | 
 | 84 | 	if (idx >= MAX_RT_PRIO) | 
 | 85 | 		return NULL; | 
 | 86 |  | 
 | 87 | 	queue = array->queue + idx; | 
 | 88 | 	next = list_entry(queue->next, struct task_struct, run_list); | 
 | 89 |  | 
| Ingo Molnar | d281918 | 2007-08-09 11:16:47 +0200 | [diff] [blame] | 90 | 	next->se.exec_start = rq->clock; | 
| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 91 |  | 
 | 92 | 	return next; | 
 | 93 | } | 
 | 94 |  | 
| Ingo Molnar | 31ee529 | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 95 | static void put_prev_task_rt(struct rq *rq, struct task_struct *p) | 
| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 96 | { | 
| Ingo Molnar | f1e14ef | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 97 | 	update_curr_rt(rq); | 
| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 98 | 	p->se.exec_start = 0; | 
 | 99 | } | 
 | 100 |  | 
 | 101 | /* | 
 | 102 |  * Load-balancing iterator. Note: while the runqueue stays locked | 
 | 103 |  * during the whole iteration, the current task might be | 
 | 104 |  * dequeued so the iterator has to be dequeue-safe. Here we | 
 | 105 |  * achieve that by always pre-iterating before returning | 
 | 106 |  * the current task: | 
 | 107 |  */ | 
 | 108 | static struct task_struct *load_balance_start_rt(void *arg) | 
 | 109 | { | 
 | 110 | 	struct rq *rq = arg; | 
 | 111 | 	struct rt_prio_array *array = &rq->rt.active; | 
 | 112 | 	struct list_head *head, *curr; | 
 | 113 | 	struct task_struct *p; | 
 | 114 | 	int idx; | 
 | 115 |  | 
 | 116 | 	idx = sched_find_first_bit(array->bitmap); | 
 | 117 | 	if (idx >= MAX_RT_PRIO) | 
 | 118 | 		return NULL; | 
 | 119 |  | 
 | 120 | 	head = array->queue + idx; | 
 | 121 | 	curr = head->prev; | 
 | 122 |  | 
 | 123 | 	p = list_entry(curr, struct task_struct, run_list); | 
 | 124 |  | 
 | 125 | 	curr = curr->prev; | 
 | 126 |  | 
 | 127 | 	rq->rt.rt_load_balance_idx = idx; | 
 | 128 | 	rq->rt.rt_load_balance_head = head; | 
 | 129 | 	rq->rt.rt_load_balance_curr = curr; | 
 | 130 |  | 
 | 131 | 	return p; | 
 | 132 | } | 
 | 133 |  | 
 | 134 | static struct task_struct *load_balance_next_rt(void *arg) | 
 | 135 | { | 
 | 136 | 	struct rq *rq = arg; | 
 | 137 | 	struct rt_prio_array *array = &rq->rt.active; | 
 | 138 | 	struct list_head *head, *curr; | 
 | 139 | 	struct task_struct *p; | 
 | 140 | 	int idx; | 
 | 141 |  | 
 | 142 | 	idx = rq->rt.rt_load_balance_idx; | 
 | 143 | 	head = rq->rt.rt_load_balance_head; | 
 | 144 | 	curr = rq->rt.rt_load_balance_curr; | 
 | 145 |  | 
 | 146 | 	/* | 
 | 147 | 	 * If we arrived back to the head again then | 
 | 148 | 	 * iterate to the next queue (if any): | 
 | 149 | 	 */ | 
 | 150 | 	if (unlikely(head == curr)) { | 
 | 151 | 		int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1); | 
 | 152 |  | 
 | 153 | 		if (next_idx >= MAX_RT_PRIO) | 
 | 154 | 			return NULL; | 
 | 155 |  | 
 | 156 | 		idx = next_idx; | 
 | 157 | 		head = array->queue + idx; | 
 | 158 | 		curr = head->prev; | 
 | 159 |  | 
 | 160 | 		rq->rt.rt_load_balance_idx = idx; | 
 | 161 | 		rq->rt.rt_load_balance_head = head; | 
 | 162 | 	} | 
 | 163 |  | 
 | 164 | 	p = list_entry(curr, struct task_struct, run_list); | 
 | 165 |  | 
 | 166 | 	curr = curr->prev; | 
 | 167 |  | 
 | 168 | 	rq->rt.rt_load_balance_curr = curr; | 
 | 169 |  | 
 | 170 | 	return p; | 
 | 171 | } | 
 | 172 |  | 
| Peter Williams | 4301065 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 173 | static unsigned long | 
| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 174 | load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, | 
 | 175 | 			unsigned long max_nr_move, unsigned long max_load_move, | 
 | 176 | 			struct sched_domain *sd, enum cpu_idle_type idle, | 
| Peter Williams | a4ac01c | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 177 | 			int *all_pinned, int *this_best_prio) | 
| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 178 | { | 
| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 179 | 	int nr_moved; | 
 | 180 | 	struct rq_iterator rt_rq_iterator; | 
| Peter Williams | 4301065 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 181 | 	unsigned long load_moved; | 
| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 182 |  | 
| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 183 | 	rt_rq_iterator.start = load_balance_start_rt; | 
 | 184 | 	rt_rq_iterator.next = load_balance_next_rt; | 
 | 185 | 	/* pass 'busiest' rq argument into | 
 | 186 | 	 * load_balance_[start|next]_rt iterators | 
 | 187 | 	 */ | 
 | 188 | 	rt_rq_iterator.arg = busiest; | 
 | 189 |  | 
 | 190 | 	nr_moved = balance_tasks(this_rq, this_cpu, busiest, max_nr_move, | 
| Peter Williams | 4301065 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 191 | 			max_load_move, sd, idle, all_pinned, &load_moved, | 
| Peter Williams | a4ac01c | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 192 | 			this_best_prio, &rt_rq_iterator); | 
| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 193 |  | 
| Peter Williams | 4301065 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 194 | 	return load_moved; | 
| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 195 | } | 
 | 196 |  | 
 | 197 | static void task_tick_rt(struct rq *rq, struct task_struct *p) | 
 | 198 | { | 
 | 199 | 	/* | 
 | 200 | 	 * RR tasks need a special form of timeslice management. | 
 | 201 | 	 * FIFO tasks have no timeslices. | 
 | 202 | 	 */ | 
 | 203 | 	if (p->policy != SCHED_RR) | 
 | 204 | 		return; | 
 | 205 |  | 
 | 206 | 	if (--p->time_slice) | 
 | 207 | 		return; | 
 | 208 |  | 
 | 209 | 	p->time_slice = static_prio_timeslice(p->static_prio); | 
| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 210 |  | 
| Dmitry Adamushko | 98fbc79 | 2007-08-24 20:39:10 +0200 | [diff] [blame] | 211 | 	/* | 
 | 212 | 	 * Requeue to the end of queue if we are not the only element | 
 | 213 | 	 * on the queue: | 
 | 214 | 	 */ | 
 | 215 | 	if (p->run_list.prev != p->run_list.next) { | 
 | 216 | 		requeue_task_rt(rq, p); | 
 | 217 | 		set_tsk_need_resched(p); | 
 | 218 | 	} | 
| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 219 | } | 
 | 220 |  | 
| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 221 | static struct sched_class rt_sched_class __read_mostly = { | 
 | 222 | 	.enqueue_task		= enqueue_task_rt, | 
 | 223 | 	.dequeue_task		= dequeue_task_rt, | 
 | 224 | 	.yield_task		= yield_task_rt, | 
 | 225 |  | 
 | 226 | 	.check_preempt_curr	= check_preempt_curr_rt, | 
 | 227 |  | 
 | 228 | 	.pick_next_task		= pick_next_task_rt, | 
 | 229 | 	.put_prev_task		= put_prev_task_rt, | 
 | 230 |  | 
 | 231 | 	.load_balance		= load_balance_rt, | 
 | 232 |  | 
 | 233 | 	.task_tick		= task_tick_rt, | 
| Ingo Molnar | bb44e5d | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 234 | }; |