| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1 |  | 
|  | 2 | #ifdef CONFIG_SCHEDSTATS | 
| Alexey Dobriyan | b5aadf7 | 2008-10-06 13:23:43 +0400 | [diff] [blame] | 3 |  | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 4 | /* | 
|  | 5 | * Expects runqueue lock to be held for atomicity of update | 
|  | 6 | */ | 
|  | 7 | static inline void | 
|  | 8 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | 
|  | 9 | { | 
|  | 10 | if (rq) { | 
|  | 11 | rq->rq_sched_info.run_delay += delta; | 
| Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 12 | rq->rq_sched_info.pcount++; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 13 | } | 
|  | 14 | } | 
|  | 15 |  | 
|  | 16 | /* | 
|  | 17 | * Expects runqueue lock to be held for atomicity of update | 
|  | 18 | */ | 
|  | 19 | static inline void | 
|  | 20 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | 
|  | 21 | { | 
|  | 22 | if (rq) | 
| Ken Chen | 9c2c480 | 2008-12-16 23:41:22 -0800 | [diff] [blame] | 23 | rq->rq_cpu_time += delta; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 24 | } | 
| Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 25 |  | 
|  | 26 | static inline void | 
|  | 27 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) | 
|  | 28 | { | 
|  | 29 | if (rq) | 
|  | 30 | rq->rq_sched_info.run_delay += delta; | 
|  | 31 | } | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 32 | # define schedstat_inc(rq, field)	do { (rq)->field++; } while (0) | 
|  | 33 | # define schedstat_add(rq, field, amt)	do { (rq)->field += (amt); } while (0) | 
| Ingo Molnar | c3c7011 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 34 | # define schedstat_set(var, val)	do { var = (val); } while (0) | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 35 | #else /* !CONFIG_SCHEDSTATS */ | 
|  | 36 | static inline void | 
|  | 37 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | 
|  | 38 | {} | 
|  | 39 | static inline void | 
| Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 40 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) | 
|  | 41 | {} | 
|  | 42 | static inline void | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 43 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | 
|  | 44 | {} | 
|  | 45 | # define schedstat_inc(rq, field)	do { } while (0) | 
|  | 46 | # define schedstat_add(rq, field, amt)	do { } while (0) | 
| Ingo Molnar | c3c7011 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 47 | # define schedstat_set(var, val)	do { } while (0) | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 48 | #endif | 
|  | 49 |  | 
| Balbir Singh | 9a41785c | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 50 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 
| Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 51 | static inline void sched_info_reset_dequeued(struct task_struct *t) | 
|  | 52 | { | 
|  | 53 | t->sched_info.last_queued = 0; | 
|  | 54 | } | 
|  | 55 |  | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 56 | /* | 
| Rakib Mullick | d4a6f3c | 2010-10-24 16:28:47 +0600 | [diff] [blame] | 57 | * We are interested in knowing how long it was from the *first* time a | 
| Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 58 | * task was queued to the time that it finally hit a cpu, we call this routine | 
|  | 59 | * from dequeue_task() to account for possible rq->clock skew across cpus. The | 
|  | 60 | * delta taken on each cpu would annul the skew. | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 61 | */ | 
|  | 62 | static inline void sched_info_dequeued(struct task_struct *t) | 
|  | 63 | { | 
| Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 64 | unsigned long long now = task_rq(t)->clock, delta = 0; | 
|  | 65 |  | 
|  | 66 | if (unlikely(sched_info_on())) | 
|  | 67 | if (t->sched_info.last_queued) | 
|  | 68 | delta = now - t->sched_info.last_queued; | 
|  | 69 | sched_info_reset_dequeued(t); | 
|  | 70 | t->sched_info.run_delay += delta; | 
|  | 71 |  | 
|  | 72 | rq_sched_info_dequeued(task_rq(t), delta); | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 73 | } | 
|  | 74 |  | 
|  | 75 | /* | 
|  | 76 | * Called when a task finally hits the cpu.  We can now calculate how | 
|  | 77 | * long it was waiting to run.  We also note when it began so that we | 
|  | 78 | * can keep stats on how long its timeslice is. | 
|  | 79 | */ | 
|  | 80 | static void sched_info_arrive(struct task_struct *t) | 
|  | 81 | { | 
| Balbir Singh | 9a41785c | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 82 | unsigned long long now = task_rq(t)->clock, delta = 0; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 83 |  | 
|  | 84 | if (t->sched_info.last_queued) | 
|  | 85 | delta = now - t->sched_info.last_queued; | 
| Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 86 | sched_info_reset_dequeued(t); | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 87 | t->sched_info.run_delay += delta; | 
|  | 88 | t->sched_info.last_arrival = now; | 
| Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 89 | t->sched_info.pcount++; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 90 |  | 
|  | 91 | rq_sched_info_arrive(task_rq(t), delta); | 
|  | 92 | } | 
|  | 93 |  | 
|  | 94 | /* | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 95 | * This function is only called from enqueue_task(), but also only updates | 
|  | 96 | * the timestamp if it is already not set.  It's assumed that | 
|  | 97 | * sched_info_dequeued() will clear that stamp when appropriate. | 
|  | 98 | */ | 
|  | 99 | static inline void sched_info_queued(struct task_struct *t) | 
|  | 100 | { | 
|  | 101 | if (unlikely(sched_info_on())) | 
|  | 102 | if (!t->sched_info.last_queued) | 
| Balbir Singh | 9a41785c | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 103 | t->sched_info.last_queued = task_rq(t)->clock; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 104 | } | 
|  | 105 |  | 
|  | 106 | /* | 
|  | 107 | * Called when a process ceases being the active-running process, either | 
|  | 108 | * voluntarily or involuntarily.  Now we can calculate how long we ran. | 
| Bharath Ravi | d4abc23 | 2008-06-16 15:11:01 +0530 | [diff] [blame] | 109 | * Also, if the process is still in the TASK_RUNNING state, call | 
|  | 110 | * sched_info_queued() to mark that it has now again started waiting on | 
|  | 111 | * the runqueue. | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 112 | */ | 
|  | 113 | static inline void sched_info_depart(struct task_struct *t) | 
|  | 114 | { | 
| Balbir Singh | 9a41785c | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 115 | unsigned long long delta = task_rq(t)->clock - | 
|  | 116 | t->sched_info.last_arrival; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 117 |  | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 118 | rq_sched_info_depart(task_rq(t), delta); | 
| Bharath Ravi | d4abc23 | 2008-06-16 15:11:01 +0530 | [diff] [blame] | 119 |  | 
|  | 120 | if (t->state == TASK_RUNNING) | 
|  | 121 | sched_info_queued(t); | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 122 | } | 
|  | 123 |  | 
|  | 124 | /* | 
|  | 125 | * Called when tasks are switched involuntarily due, typically, to expiring | 
|  | 126 | * their time slice.  (This may also be called when switching to or from | 
|  | 127 | * the idle task.)  We are only called when prev != next. | 
|  | 128 | */ | 
|  | 129 | static inline void | 
|  | 130 | __sched_info_switch(struct task_struct *prev, struct task_struct *next) | 
|  | 131 | { | 
|  | 132 | struct rq *rq = task_rq(prev); | 
|  | 133 |  | 
|  | 134 | /* | 
|  | 135 | * prev now departs the cpu.  It's not interesting to record | 
|  | 136 | * stats about how efficient we were at scheduling the idle | 
|  | 137 | * process, however. | 
|  | 138 | */ | 
|  | 139 | if (prev != rq->idle) | 
|  | 140 | sched_info_depart(prev); | 
|  | 141 |  | 
|  | 142 | if (next != rq->idle) | 
|  | 143 | sched_info_arrive(next); | 
|  | 144 | } | 
|  | 145 | static inline void | 
|  | 146 | sched_info_switch(struct task_struct *prev, struct task_struct *next) | 
|  | 147 | { | 
|  | 148 | if (unlikely(sched_info_on())) | 
|  | 149 | __sched_info_switch(prev, next); | 
|  | 150 | } | 
|  | 151 | #else | 
| Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 152 | #define sched_info_queued(t)			do { } while (0) | 
|  | 153 | #define sched_info_reset_dequeued(t)	do { } while (0) | 
|  | 154 | #define sched_info_dequeued(t)			do { } while (0) | 
|  | 155 | #define sched_info_switch(t, next)		do { } while (0) | 
| Balbir Singh | 9a41785c | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 156 | #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 157 |  | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 158 | /* | 
|  | 159 | * The following are functions that support scheduler-internal time accounting. | 
|  | 160 | * These functions are generally called at the timer tick.  None of this depends | 
|  | 161 | * on CONFIG_SCHEDSTATS. | 
|  | 162 | */ | 
|  | 163 |  | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 164 | /** | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 165 | * account_group_user_time - Maintain utime for a thread group. | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 166 | * | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 167 | * @tsk:	Pointer to task structure. | 
|  | 168 | * @cputime:	Time value by which to increment the utime field of the | 
|  | 169 | *		thread_group_cputime structure. | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 170 | * | 
|  | 171 | * If thread group time is being maintained, get the structure for the | 
|  | 172 | * running CPU and update the utime field there. | 
|  | 173 | */ | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 174 | static inline void account_group_user_time(struct task_struct *tsk, | 
|  | 175 | cputime_t cputime) | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 176 | { | 
| Oleg Nesterov | 48286d5 | 2010-06-11 01:09:52 +0200 | [diff] [blame] | 177 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 178 |  | 
| Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 179 | if (!cputimer->running) | 
|  | 180 | return; | 
|  | 181 |  | 
| Thomas Gleixner | ee30a7b | 2009-07-25 18:56:56 +0200 | [diff] [blame] | 182 | raw_spin_lock(&cputimer->lock); | 
| Martin Schwidefsky | 6486163 | 2011-12-15 14:56:09 +0100 | [diff] [blame] | 183 | cputimer->cputime.utime += cputime; | 
| Thomas Gleixner | ee30a7b | 2009-07-25 18:56:56 +0200 | [diff] [blame] | 184 | raw_spin_unlock(&cputimer->lock); | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 185 | } | 
|  | 186 |  | 
|  | 187 | /** | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 188 | * account_group_system_time - Maintain stime for a thread group. | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 189 | * | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 190 | * @tsk:	Pointer to task structure. | 
|  | 191 | * @cputime:	Time value by which to increment the stime field of the | 
|  | 192 | *		thread_group_cputime structure. | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 193 | * | 
|  | 194 | * If thread group time is being maintained, get the structure for the | 
|  | 195 | * running CPU and update the stime field there. | 
|  | 196 | */ | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 197 | static inline void account_group_system_time(struct task_struct *tsk, | 
|  | 198 | cputime_t cputime) | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 199 | { | 
| Oleg Nesterov | 48286d5 | 2010-06-11 01:09:52 +0200 | [diff] [blame] | 200 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 201 |  | 
| Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 202 | if (!cputimer->running) | 
|  | 203 | return; | 
|  | 204 |  | 
| Thomas Gleixner | ee30a7b | 2009-07-25 18:56:56 +0200 | [diff] [blame] | 205 | raw_spin_lock(&cputimer->lock); | 
| Martin Schwidefsky | 6486163 | 2011-12-15 14:56:09 +0100 | [diff] [blame] | 206 | cputimer->cputime.stime += cputime; | 
| Thomas Gleixner | ee30a7b | 2009-07-25 18:56:56 +0200 | [diff] [blame] | 207 | raw_spin_unlock(&cputimer->lock); | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 208 | } | 
|  | 209 |  | 
|  | 210 | /** | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 211 | * account_group_exec_runtime - Maintain exec runtime for a thread group. | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 212 | * | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 213 | * @tsk:	Pointer to task structure. | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 214 | * @ns:		Time value by which to increment the sum_exec_runtime field | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 215 | *		of the thread_group_cputime structure. | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 216 | * | 
|  | 217 | * If thread group time is being maintained, get the structure for the | 
|  | 218 | * running CPU and update the sum_exec_runtime field there. | 
|  | 219 | */ | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 220 | static inline void account_group_exec_runtime(struct task_struct *tsk, | 
|  | 221 | unsigned long long ns) | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 222 | { | 
| Oleg Nesterov | 48286d5 | 2010-06-11 01:09:52 +0200 | [diff] [blame] | 223 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 224 |  | 
| Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 225 | if (!cputimer->running) | 
|  | 226 | return; | 
|  | 227 |  | 
| Thomas Gleixner | ee30a7b | 2009-07-25 18:56:56 +0200 | [diff] [blame] | 228 | raw_spin_lock(&cputimer->lock); | 
| Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 229 | cputimer->cputime.sum_exec_runtime += ns; | 
| Thomas Gleixner | ee30a7b | 2009-07-25 18:56:56 +0200 | [diff] [blame] | 230 | raw_spin_unlock(&cputimer->lock); | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 231 | } |