| Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1 | /* | 
|  | 2 | * idle-task scheduling class. | 
|  | 3 | * | 
|  | 4 | * (NOTE: these are not related to SCHED_IDLE tasks which are | 
|  | 5 | *  handled in sched_fair.c) | 
|  | 6 | */ | 
|  | 7 |  | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 8 | #ifdef CONFIG_SMP | 
| Peter Zijlstra | 7d47872 | 2009-09-14 19:55:44 +0200 | [diff] [blame] | 9 | static int select_task_rq_idle(struct task_struct *p, int sd_flag, int flags) | 
| Gregory Haskins | e7693a3 | 2008-01-25 21:08:09 +0100 | [diff] [blame] | 10 | { | 
|  | 11 | return task_cpu(p); /* IDLE tasks as never migrated */ | 
|  | 12 | } | 
|  | 13 | #endif /* CONFIG_SMP */ | 
| Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 14 | /* | 
|  | 15 | * Idle tasks are unconditionally rescheduled: | 
|  | 16 | */ | 
| Peter Zijlstra | 7d47872 | 2009-09-14 19:55:44 +0200 | [diff] [blame] | 17 | static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) | 
| Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 18 | { | 
|  | 19 | resched_task(rq->idle); | 
|  | 20 | } | 
|  | 21 |  | 
| Ingo Molnar | fb8d472 | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 22 | static struct task_struct *pick_next_task_idle(struct rq *rq) | 
| Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 23 | { | 
|  | 24 | schedstat_inc(rq, sched_goidle); | 
| Thomas Gleixner | dce48a8 | 2009-04-11 10:43:41 +0200 | [diff] [blame] | 25 | /* adjust the active tasks as we might go into a long sleep */ | 
|  | 26 | calc_load_account_active(rq); | 
| Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 27 | return rq->idle; | 
|  | 28 | } | 
|  | 29 |  | 
|  | 30 | /* | 
|  | 31 | * It is not legal to sleep in the idle task - print a warning | 
|  | 32 | * message if some code attempts to do it: | 
|  | 33 | */ | 
|  | 34 | static void | 
| Ingo Molnar | f02231e | 2007-08-09 11:16:48 +0200 | [diff] [blame] | 35 | dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) | 
| Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 36 | { | 
|  | 37 | spin_unlock_irq(&rq->lock); | 
|  | 38 | printk(KERN_ERR "bad: scheduling from the idle thread!\n"); | 
|  | 39 | dump_stack(); | 
|  | 40 | spin_lock_irq(&rq->lock); | 
|  | 41 | } | 
|  | 42 |  | 
| Ingo Molnar | 31ee529 | 2007-08-09 11:16:49 +0200 | [diff] [blame] | 43 | static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) | 
| Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 44 | { | 
|  | 45 | } | 
|  | 46 |  | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 47 | #ifdef CONFIG_SMP | 
| Peter Williams | 4301065 | 2007-08-09 11:16:46 +0200 | [diff] [blame] | 48 | static unsigned long | 
| Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 49 | load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, | 
| Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 50 | unsigned long max_load_move, | 
|  | 51 | struct sched_domain *sd, enum cpu_idle_type idle, | 
|  | 52 | int *all_pinned, int *this_best_prio) | 
|  | 53 | { | 
|  | 54 | return 0; | 
|  | 55 | } | 
|  | 56 |  | 
|  | 57 | static int | 
|  | 58 | move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, | 
|  | 59 | struct sched_domain *sd, enum cpu_idle_type idle) | 
| Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 60 | { | 
|  | 61 | return 0; | 
|  | 62 | } | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 63 | #endif | 
| Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 64 |  | 
| Peter Zijlstra | 8f4d37e | 2008-01-25 21:08:29 +0100 | [diff] [blame] | 65 | static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) | 
| Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 66 | { | 
|  | 67 | } | 
|  | 68 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 69 | static void set_curr_task_idle(struct rq *rq) | 
|  | 70 | { | 
|  | 71 | } | 
|  | 72 |  | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 73 | static void switched_to_idle(struct rq *rq, struct task_struct *p, | 
|  | 74 | int running) | 
|  | 75 | { | 
|  | 76 | /* Can this actually happen?? */ | 
|  | 77 | if (running) | 
|  | 78 | resched_task(rq->curr); | 
|  | 79 | else | 
| Peter Zijlstra | 15afe09 | 2008-09-20 23:38:02 +0200 | [diff] [blame] | 80 | check_preempt_curr(rq, p, 0); | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 81 | } | 
|  | 82 |  | 
|  | 83 | static void prio_changed_idle(struct rq *rq, struct task_struct *p, | 
|  | 84 | int oldprio, int running) | 
|  | 85 | { | 
|  | 86 | /* This can happen for hot plug CPUS */ | 
|  | 87 |  | 
|  | 88 | /* | 
|  | 89 | * Reschedule if we are currently running on this runqueue and | 
|  | 90 | * our priority decreased, or if we are not currently running on | 
|  | 91 | * this runqueue and our priority is higher than the current's | 
|  | 92 | */ | 
|  | 93 | if (running) { | 
|  | 94 | if (p->prio > oldprio) | 
|  | 95 | resched_task(rq->curr); | 
|  | 96 | } else | 
| Peter Zijlstra | 15afe09 | 2008-09-20 23:38:02 +0200 | [diff] [blame] | 97 | check_preempt_curr(rq, p, 0); | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 98 | } | 
|  | 99 |  | 
| Thomas Gleixner | dba091b | 2009-12-09 09:32:03 +0100 | [diff] [blame] | 100 | unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task) | 
| Peter Williams | 0d721ce | 2009-09-21 01:31:53 +0000 | [diff] [blame] | 101 | { | 
|  | 102 | return 0; | 
|  | 103 | } | 
|  | 104 |  | 
| Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 105 | /* | 
|  | 106 | * Simple, special scheduling class for the per-CPU idle tasks: | 
|  | 107 | */ | 
| Harvey Harrison | 2abdad0 | 2008-04-25 10:53:13 -0700 | [diff] [blame] | 108 | static const struct sched_class idle_sched_class = { | 
| Ingo Molnar | 5522d5d | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 109 | /* .next is NULL */ | 
| Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 110 | /* no enqueue/yield_task for idle tasks */ | 
|  | 111 |  | 
|  | 112 | /* dequeue is not valid, we print a debug message there: */ | 
|  | 113 | .dequeue_task		= dequeue_task_idle, | 
|  | 114 |  | 
|  | 115 | .check_preempt_curr	= check_preempt_curr_idle, | 
|  | 116 |  | 
|  | 117 | .pick_next_task		= pick_next_task_idle, | 
|  | 118 | .put_prev_task		= put_prev_task_idle, | 
|  | 119 |  | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 120 | #ifdef CONFIG_SMP | 
| Li Zefan | 4ce72a2 | 2008-10-22 15:25:26 +0800 | [diff] [blame] | 121 | .select_task_rq		= select_task_rq_idle, | 
|  | 122 |  | 
| Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 123 | .load_balance		= load_balance_idle, | 
| Peter Williams | e1d1484 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 124 | .move_one_task		= move_one_task_idle, | 
| Peter Williams | 681f3e6 | 2007-10-24 18:23:51 +0200 | [diff] [blame] | 125 | #endif | 
| Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 126 |  | 
| Srivatsa Vaddagiri | 83b699e | 2007-10-15 17:00:08 +0200 | [diff] [blame] | 127 | .set_curr_task          = set_curr_task_idle, | 
| Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 128 | .task_tick		= task_tick_idle, | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 129 |  | 
| Peter Williams | 0d721ce | 2009-09-21 01:31:53 +0000 | [diff] [blame] | 130 | .get_rr_interval	= get_rr_interval_idle, | 
|  | 131 |  | 
| Steven Rostedt | cb46984 | 2008-01-25 21:08:22 +0100 | [diff] [blame] | 132 | .prio_changed		= prio_changed_idle, | 
|  | 133 | .switched_to		= switched_to_idle, | 
|  | 134 |  | 
| Ingo Molnar | fa72e9e | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 135 | /* no .task_new for idle tasks */ | 
|  | 136 | }; |