| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1 |  | 
|  | 2 | #ifdef CONFIG_SCHEDSTATS | 
|  | 3 | /* | 
|  | 4 | * bump this up when changing the output format or the meaning of an existing | 
|  | 5 | * format, so that tools can adapt (or abort) | 
|  | 6 | */ | 
| Luis Henriques | 67aa0f7 | 2009-03-24 22:10:02 +0000 | [diff] [blame] | 7 | #define SCHEDSTAT_VERSION 15 | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 8 |  | 
|  | 9 | static int show_schedstat(struct seq_file *seq, void *v) | 
|  | 10 | { | 
|  | 11 | int cpu; | 
| Peter Zijlstra | b968905 | 2008-10-17 12:58:19 +0200 | [diff] [blame] | 12 | int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9; | 
| Mike Travis | 39106dc | 2008-04-08 11:43:03 -0700 | [diff] [blame] | 13 | char *mask_str = kmalloc(mask_len, GFP_KERNEL); | 
|  | 14 |  | 
|  | 15 | if (mask_str == NULL) | 
|  | 16 | return -ENOMEM; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 17 |  | 
|  | 18 | seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); | 
|  | 19 | seq_printf(seq, "timestamp %lu\n", jiffies); | 
|  | 20 | for_each_online_cpu(cpu) { | 
|  | 21 | struct rq *rq = cpu_rq(cpu); | 
|  | 22 | #ifdef CONFIG_SMP | 
|  | 23 | struct sched_domain *sd; | 
| Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 24 | int dcount = 0; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 25 | #endif | 
|  | 26 |  | 
|  | 27 | /* runqueue-specific stats */ | 
|  | 28 | seq_printf(seq, | 
| Luis Henriques | 67aa0f7 | 2009-03-24 22:10:02 +0000 | [diff] [blame] | 29 | "cpu%d %u %u %u %u %u %u %llu %llu %lu", | 
|  | 30 | cpu, rq->yld_count, | 
| Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 31 | rq->sched_switch, rq->sched_count, rq->sched_goidle, | 
|  | 32 | rq->ttwu_count, rq->ttwu_local, | 
| Ken Chen | 9c2c480 | 2008-12-16 23:41:22 -0800 | [diff] [blame] | 33 | rq->rq_cpu_time, | 
| Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 34 | rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 35 |  | 
|  | 36 | seq_printf(seq, "\n"); | 
|  | 37 |  | 
|  | 38 | #ifdef CONFIG_SMP | 
|  | 39 | /* domain-specific stats */ | 
|  | 40 | preempt_disable(); | 
|  | 41 | for_each_domain(cpu, sd) { | 
|  | 42 | enum cpu_idle_type itype; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 43 |  | 
| Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 44 | cpumask_scnprintf(mask_str, mask_len, | 
| Rusty Russell | 968ea6d | 2008-12-13 21:55:51 +1030 | [diff] [blame] | 45 | sched_domain_span(sd)); | 
| Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 46 | seq_printf(seq, "domain%d %s", dcount++, mask_str); | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 47 | for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; | 
|  | 48 | itype++) { | 
| Ken Chen | 480b943 | 2007-10-18 21:32:56 +0200 | [diff] [blame] | 49 | seq_printf(seq, " %u %u %u %u %u %u %u %u", | 
| Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 50 | sd->lb_count[itype], | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 51 | sd->lb_balanced[itype], | 
|  | 52 | sd->lb_failed[itype], | 
|  | 53 | sd->lb_imbalance[itype], | 
|  | 54 | sd->lb_gained[itype], | 
|  | 55 | sd->lb_hot_gained[itype], | 
|  | 56 | sd->lb_nobusyq[itype], | 
|  | 57 | sd->lb_nobusyg[itype]); | 
|  | 58 | } | 
| Ingo Molnar | f95e0d1 | 2007-11-28 15:52:56 +0100 | [diff] [blame] | 59 | seq_printf(seq, | 
|  | 60 | " %u %u %u %u %u %u %u %u %u %u %u %u\n", | 
| Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 61 | sd->alb_count, sd->alb_failed, sd->alb_pushed, | 
|  | 62 | sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed, | 
|  | 63 | sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 64 | sd->ttwu_wake_remote, sd->ttwu_move_affine, | 
|  | 65 | sd->ttwu_move_balance); | 
|  | 66 | } | 
|  | 67 | preempt_enable(); | 
|  | 68 | #endif | 
|  | 69 | } | 
| Adrian Bunk | c6fba54 | 2008-05-14 16:22:59 -0700 | [diff] [blame] | 70 | kfree(mask_str); | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 71 | return 0; | 
|  | 72 | } | 
|  | 73 |  | 
|  | 74 | static int schedstat_open(struct inode *inode, struct file *file) | 
|  | 75 | { | 
|  | 76 | unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32); | 
|  | 77 | char *buf = kmalloc(size, GFP_KERNEL); | 
|  | 78 | struct seq_file *m; | 
|  | 79 | int res; | 
|  | 80 |  | 
|  | 81 | if (!buf) | 
|  | 82 | return -ENOMEM; | 
|  | 83 | res = single_open(file, show_schedstat, NULL); | 
|  | 84 | if (!res) { | 
|  | 85 | m = file->private_data; | 
|  | 86 | m->buf = buf; | 
|  | 87 | m->size = size; | 
|  | 88 | } else | 
|  | 89 | kfree(buf); | 
|  | 90 | return res; | 
|  | 91 | } | 
|  | 92 |  | 
| Alexey Dobriyan | b5aadf7 | 2008-10-06 13:23:43 +0400 | [diff] [blame] | 93 | static const struct file_operations proc_schedstat_operations = { | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 94 | .open    = schedstat_open, | 
|  | 95 | .read    = seq_read, | 
|  | 96 | .llseek  = seq_lseek, | 
|  | 97 | .release = single_release, | 
|  | 98 | }; | 
|  | 99 |  | 
| Alexey Dobriyan | b5aadf7 | 2008-10-06 13:23:43 +0400 | [diff] [blame] | 100 | static int __init proc_schedstat_init(void) | 
|  | 101 | { | 
|  | 102 | proc_create("schedstat", 0, NULL, &proc_schedstat_operations); | 
|  | 103 | return 0; | 
|  | 104 | } | 
|  | 105 | module_init(proc_schedstat_init); | 
|  | 106 |  | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 107 | /* | 
|  | 108 | * Expects runqueue lock to be held for atomicity of update | 
|  | 109 | */ | 
|  | 110 | static inline void | 
|  | 111 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | 
|  | 112 | { | 
|  | 113 | if (rq) { | 
|  | 114 | rq->rq_sched_info.run_delay += delta; | 
| Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 115 | rq->rq_sched_info.pcount++; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 116 | } | 
|  | 117 | } | 
|  | 118 |  | 
|  | 119 | /* | 
|  | 120 | * Expects runqueue lock to be held for atomicity of update | 
|  | 121 | */ | 
|  | 122 | static inline void | 
|  | 123 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | 
|  | 124 | { | 
|  | 125 | if (rq) | 
| Ken Chen | 9c2c480 | 2008-12-16 23:41:22 -0800 | [diff] [blame] | 126 | rq->rq_cpu_time += delta; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 127 | } | 
| Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 128 |  | 
|  | 129 | static inline void | 
|  | 130 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) | 
|  | 131 | { | 
|  | 132 | if (rq) | 
|  | 133 | rq->rq_sched_info.run_delay += delta; | 
|  | 134 | } | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 135 | # define schedstat_inc(rq, field)	do { (rq)->field++; } while (0) | 
|  | 136 | # define schedstat_add(rq, field, amt)	do { (rq)->field += (amt); } while (0) | 
| Ingo Molnar | c3c7011 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 137 | # define schedstat_set(var, val)	do { var = (val); } while (0) | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 138 | #else /* !CONFIG_SCHEDSTATS */ | 
|  | 139 | static inline void | 
|  | 140 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | 
|  | 141 | {} | 
|  | 142 | static inline void | 
| Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 143 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) | 
|  | 144 | {} | 
|  | 145 | static inline void | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 146 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | 
|  | 147 | {} | 
|  | 148 | # define schedstat_inc(rq, field)	do { } while (0) | 
|  | 149 | # define schedstat_add(rq, field, amt)	do { } while (0) | 
| Ingo Molnar | c3c7011 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 150 | # define schedstat_set(var, val)	do { } while (0) | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 151 | #endif | 
|  | 152 |  | 
| Balbir Singh | 9a41785 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 153 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 
| Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 154 | static inline void sched_info_reset_dequeued(struct task_struct *t) | 
|  | 155 | { | 
|  | 156 | t->sched_info.last_queued = 0; | 
|  | 157 | } | 
|  | 158 |  | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 159 | /* | 
|  | 160 | * Called when a process is dequeued from the active array and given | 
|  | 161 | * the cpu.  We should note that with the exception of interactive | 
|  | 162 | * tasks, the expired queue will become the active queue after the active | 
|  | 163 | * queue is empty, without explicitly dequeuing and requeuing tasks in the | 
|  | 164 | * expired queue.  (Interactive tasks may be requeued directly to the | 
|  | 165 | * active queue, thus delaying tasks in the expired queue from running; | 
|  | 166 | * see scheduler_tick()). | 
|  | 167 | * | 
| Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 168 | * Though we are interested in knowing how long it was from the *first* time a | 
|  | 169 | * task was queued to the time that it finally hit a cpu, we call this routine | 
|  | 170 | * from dequeue_task() to account for possible rq->clock skew across cpus. The | 
|  | 171 | * delta taken on each cpu would annul the skew. | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 172 | */ | 
|  | 173 | static inline void sched_info_dequeued(struct task_struct *t) | 
|  | 174 | { | 
| Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 175 | unsigned long long now = task_rq(t)->clock, delta = 0; | 
|  | 176 |  | 
|  | 177 | if (unlikely(sched_info_on())) | 
|  | 178 | if (t->sched_info.last_queued) | 
|  | 179 | delta = now - t->sched_info.last_queued; | 
|  | 180 | sched_info_reset_dequeued(t); | 
|  | 181 | t->sched_info.run_delay += delta; | 
|  | 182 |  | 
|  | 183 | rq_sched_info_dequeued(task_rq(t), delta); | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 184 | } | 
|  | 185 |  | 
|  | 186 | /* | 
|  | 187 | * Called when a task finally hits the cpu.  We can now calculate how | 
|  | 188 | * long it was waiting to run.  We also note when it began so that we | 
|  | 189 | * can keep stats on how long its timeslice is. | 
|  | 190 | */ | 
|  | 191 | static void sched_info_arrive(struct task_struct *t) | 
|  | 192 | { | 
| Balbir Singh | 9a41785 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 193 | unsigned long long now = task_rq(t)->clock, delta = 0; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 194 |  | 
|  | 195 | if (t->sched_info.last_queued) | 
|  | 196 | delta = now - t->sched_info.last_queued; | 
| Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 197 | sched_info_reset_dequeued(t); | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 198 | t->sched_info.run_delay += delta; | 
|  | 199 | t->sched_info.last_arrival = now; | 
| Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 200 | t->sched_info.pcount++; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 201 |  | 
|  | 202 | rq_sched_info_arrive(task_rq(t), delta); | 
|  | 203 | } | 
|  | 204 |  | 
|  | 205 | /* | 
|  | 206 | * Called when a process is queued into either the active or expired | 
|  | 207 | * array.  The time is noted and later used to determine how long we | 
|  | 208 | * had to wait for us to reach the cpu.  Since the expired queue will | 
|  | 209 | * become the active queue after active queue is empty, without dequeuing | 
|  | 210 | * and requeuing any tasks, we are interested in queuing to either. It | 
|  | 211 | * is unusual but not impossible for tasks to be dequeued and immediately | 
|  | 212 | * requeued in the same or another array: this can happen in sched_yield(), | 
|  | 213 | * set_user_nice(), and even load_balance() as it moves tasks from runqueue | 
|  | 214 | * to runqueue. | 
|  | 215 | * | 
|  | 216 | * This function is only called from enqueue_task(), but also only updates | 
|  | 217 | * the timestamp if it is already not set.  It's assumed that | 
|  | 218 | * sched_info_dequeued() will clear that stamp when appropriate. | 
|  | 219 | */ | 
|  | 220 | static inline void sched_info_queued(struct task_struct *t) | 
|  | 221 | { | 
|  | 222 | if (unlikely(sched_info_on())) | 
|  | 223 | if (!t->sched_info.last_queued) | 
| Balbir Singh | 9a41785 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 224 | t->sched_info.last_queued = task_rq(t)->clock; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 225 | } | 
|  | 226 |  | 
|  | 227 | /* | 
|  | 228 | * Called when a process ceases being the active-running process, either | 
|  | 229 | * voluntarily or involuntarily.  Now we can calculate how long we ran. | 
| Bharath Ravi | d4abc23 | 2008-06-16 15:11:01 +0530 | [diff] [blame] | 230 | * Also, if the process is still in the TASK_RUNNING state, call | 
|  | 231 | * sched_info_queued() to mark that it has now again started waiting on | 
|  | 232 | * the runqueue. | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 233 | */ | 
|  | 234 | static inline void sched_info_depart(struct task_struct *t) | 
|  | 235 | { | 
| Balbir Singh | 9a41785 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 236 | unsigned long long delta = task_rq(t)->clock - | 
|  | 237 | t->sched_info.last_arrival; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 238 |  | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 239 | rq_sched_info_depart(task_rq(t), delta); | 
| Bharath Ravi | d4abc23 | 2008-06-16 15:11:01 +0530 | [diff] [blame] | 240 |  | 
|  | 241 | if (t->state == TASK_RUNNING) | 
|  | 242 | sched_info_queued(t); | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 243 | } | 
|  | 244 |  | 
|  | 245 | /* | 
|  | 246 | * Called when tasks are switched involuntarily due, typically, to expiring | 
|  | 247 | * their time slice.  (This may also be called when switching to or from | 
|  | 248 | * the idle task.)  We are only called when prev != next. | 
|  | 249 | */ | 
|  | 250 | static inline void | 
|  | 251 | __sched_info_switch(struct task_struct *prev, struct task_struct *next) | 
|  | 252 | { | 
|  | 253 | struct rq *rq = task_rq(prev); | 
|  | 254 |  | 
|  | 255 | /* | 
|  | 256 | * prev now departs the cpu.  It's not interesting to record | 
|  | 257 | * stats about how efficient we were at scheduling the idle | 
|  | 258 | * process, however. | 
|  | 259 | */ | 
|  | 260 | if (prev != rq->idle) | 
|  | 261 | sched_info_depart(prev); | 
|  | 262 |  | 
|  | 263 | if (next != rq->idle) | 
|  | 264 | sched_info_arrive(next); | 
|  | 265 | } | 
|  | 266 | static inline void | 
|  | 267 | sched_info_switch(struct task_struct *prev, struct task_struct *next) | 
|  | 268 | { | 
|  | 269 | if (unlikely(sched_info_on())) | 
|  | 270 | __sched_info_switch(prev, next); | 
|  | 271 | } | 
|  | 272 | #else | 
| Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 273 | #define sched_info_queued(t)			do { } while (0) | 
|  | 274 | #define sched_info_reset_dequeued(t)	do { } while (0) | 
|  | 275 | #define sched_info_dequeued(t)			do { } while (0) | 
|  | 276 | #define sched_info_switch(t, next)		do { } while (0) | 
| Balbir Singh | 9a41785 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 277 | #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 278 |  | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 279 | /* | 
|  | 280 | * The following are functions that support scheduler-internal time accounting. | 
|  | 281 | * These functions are generally called at the timer tick.  None of this depends | 
|  | 282 | * on CONFIG_SCHEDSTATS. | 
|  | 283 | */ | 
|  | 284 |  | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 285 | /** | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 286 | * account_group_user_time - Maintain utime for a thread group. | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 287 | * | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 288 | * @tsk:	Pointer to task structure. | 
|  | 289 | * @cputime:	Time value by which to increment the utime field of the | 
|  | 290 | *		thread_group_cputime structure. | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 291 | * | 
|  | 292 | * If thread group time is being maintained, get the structure for the | 
|  | 293 | * running CPU and update the utime field there. | 
|  | 294 | */ | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 295 | static inline void account_group_user_time(struct task_struct *tsk, | 
|  | 296 | cputime_t cputime) | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 297 | { | 
| Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 298 | struct thread_group_cputimer *cputimer; | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 299 |  | 
| Oleg Nesterov | ad133ba | 2008-11-17 15:39:47 +0100 | [diff] [blame] | 300 | /* tsk == current, ensure it is safe to use ->signal */ | 
|  | 301 | if (unlikely(tsk->exit_state)) | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 302 | return; | 
| Oleg Nesterov | ad133ba | 2008-11-17 15:39:47 +0100 | [diff] [blame] | 303 |  | 
| Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 304 | cputimer = &tsk->signal->cputimer; | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 305 |  | 
| Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 306 | if (!cputimer->running) | 
|  | 307 | return; | 
|  | 308 |  | 
|  | 309 | spin_lock(&cputimer->lock); | 
|  | 310 | cputimer->cputime.utime = | 
|  | 311 | cputime_add(cputimer->cputime.utime, cputime); | 
|  | 312 | spin_unlock(&cputimer->lock); | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 313 | } | 
|  | 314 |  | 
|  | 315 | /** | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 316 | * account_group_system_time - Maintain stime for a thread group. | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 317 | * | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 318 | * @tsk:	Pointer to task structure. | 
|  | 319 | * @cputime:	Time value by which to increment the stime field of the | 
|  | 320 | *		thread_group_cputime structure. | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 321 | * | 
|  | 322 | * If thread group time is being maintained, get the structure for the | 
|  | 323 | * running CPU and update the stime field there. | 
|  | 324 | */ | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 325 | static inline void account_group_system_time(struct task_struct *tsk, | 
|  | 326 | cputime_t cputime) | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 327 | { | 
| Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 328 | struct thread_group_cputimer *cputimer; | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 329 |  | 
| Oleg Nesterov | ad133ba | 2008-11-17 15:39:47 +0100 | [diff] [blame] | 330 | /* tsk == current, ensure it is safe to use ->signal */ | 
|  | 331 | if (unlikely(tsk->exit_state)) | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 332 | return; | 
| Oleg Nesterov | ad133ba | 2008-11-17 15:39:47 +0100 | [diff] [blame] | 333 |  | 
| Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 334 | cputimer = &tsk->signal->cputimer; | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 335 |  | 
| Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 336 | if (!cputimer->running) | 
|  | 337 | return; | 
|  | 338 |  | 
|  | 339 | spin_lock(&cputimer->lock); | 
|  | 340 | cputimer->cputime.stime = | 
|  | 341 | cputime_add(cputimer->cputime.stime, cputime); | 
|  | 342 | spin_unlock(&cputimer->lock); | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 343 | } | 
|  | 344 |  | 
|  | 345 | /** | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 346 | * account_group_exec_runtime - Maintain exec runtime for a thread group. | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 347 | * | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 348 | * @tsk:	Pointer to task structure. | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 349 | * @ns:		Time value by which to increment the sum_exec_runtime field | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 350 | *		of the thread_group_cputime structure. | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 351 | * | 
|  | 352 | * If thread group time is being maintained, get the structure for the | 
|  | 353 | * running CPU and update the sum_exec_runtime field there. | 
|  | 354 | */ | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 355 | static inline void account_group_exec_runtime(struct task_struct *tsk, | 
|  | 356 | unsigned long long ns) | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 357 | { | 
| Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 358 | struct thread_group_cputimer *cputimer; | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 359 | struct signal_struct *sig; | 
|  | 360 |  | 
|  | 361 | sig = tsk->signal; | 
| Oleg Nesterov | ad133ba | 2008-11-17 15:39:47 +0100 | [diff] [blame] | 362 | /* see __exit_signal()->task_rq_unlock_wait() */ | 
|  | 363 | barrier(); | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 364 | if (unlikely(!sig)) | 
|  | 365 | return; | 
| Oleg Nesterov | ad133ba | 2008-11-17 15:39:47 +0100 | [diff] [blame] | 366 |  | 
| Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 367 | cputimer = &sig->cputimer; | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 368 |  | 
| Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 369 | if (!cputimer->running) | 
|  | 370 | return; | 
|  | 371 |  | 
|  | 372 | spin_lock(&cputimer->lock); | 
|  | 373 | cputimer->cputime.sum_exec_runtime += ns; | 
|  | 374 | spin_unlock(&cputimer->lock); | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 375 | } |