| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1 |  | 
 | 2 | #ifdef CONFIG_SCHEDSTATS | 
 | 3 | /* | 
 | 4 |  * bump this up when changing the output format or the meaning of an existing | 
 | 5 |  * format, so that tools can adapt (or abort) | 
 | 6 |  */ | 
| Luis Henriques | 67aa0f7 | 2009-03-24 22:10:02 +0000 | [diff] [blame] | 7 | #define SCHEDSTAT_VERSION 15 | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 8 |  | 
 | 9 | static int show_schedstat(struct seq_file *seq, void *v) | 
 | 10 | { | 
 | 11 | 	int cpu; | 
| Peter Zijlstra | b968905 | 2008-10-17 12:58:19 +0200 | [diff] [blame] | 12 | 	int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9; | 
| Mike Travis | 39106dc | 2008-04-08 11:43:03 -0700 | [diff] [blame] | 13 | 	char *mask_str = kmalloc(mask_len, GFP_KERNEL); | 
 | 14 |  | 
 | 15 | 	if (mask_str == NULL) | 
 | 16 | 		return -ENOMEM; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 17 |  | 
 | 18 | 	seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION); | 
 | 19 | 	seq_printf(seq, "timestamp %lu\n", jiffies); | 
 | 20 | 	for_each_online_cpu(cpu) { | 
 | 21 | 		struct rq *rq = cpu_rq(cpu); | 
 | 22 | #ifdef CONFIG_SMP | 
 | 23 | 		struct sched_domain *sd; | 
| Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 24 | 		int dcount = 0; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 25 | #endif | 
 | 26 |  | 
 | 27 | 		/* runqueue-specific stats */ | 
 | 28 | 		seq_printf(seq, | 
| Luis Henriques | 67aa0f7 | 2009-03-24 22:10:02 +0000 | [diff] [blame] | 29 | 		    "cpu%d %u %u %u %u %u %u %llu %llu %lu", | 
 | 30 | 		    cpu, rq->yld_count, | 
| Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 31 | 		    rq->sched_switch, rq->sched_count, rq->sched_goidle, | 
 | 32 | 		    rq->ttwu_count, rq->ttwu_local, | 
| Ken Chen | 9c2c480 | 2008-12-16 23:41:22 -0800 | [diff] [blame] | 33 | 		    rq->rq_cpu_time, | 
| Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 34 | 		    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 35 |  | 
 | 36 | 		seq_printf(seq, "\n"); | 
 | 37 |  | 
 | 38 | #ifdef CONFIG_SMP | 
 | 39 | 		/* domain-specific stats */ | 
 | 40 | 		preempt_disable(); | 
 | 41 | 		for_each_domain(cpu, sd) { | 
 | 42 | 			enum cpu_idle_type itype; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 43 |  | 
| Rusty Russell | 758b2cd | 2008-11-25 02:35:04 +1030 | [diff] [blame] | 44 | 			cpumask_scnprintf(mask_str, mask_len, | 
| Rusty Russell | 968ea6d | 2008-12-13 21:55:51 +1030 | [diff] [blame] | 45 | 					  sched_domain_span(sd)); | 
| Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 46 | 			seq_printf(seq, "domain%d %s", dcount++, mask_str); | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 47 | 			for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; | 
 | 48 | 					itype++) { | 
| Ken Chen | 480b943 | 2007-10-18 21:32:56 +0200 | [diff] [blame] | 49 | 				seq_printf(seq, " %u %u %u %u %u %u %u %u", | 
| Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 50 | 				    sd->lb_count[itype], | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 51 | 				    sd->lb_balanced[itype], | 
 | 52 | 				    sd->lb_failed[itype], | 
 | 53 | 				    sd->lb_imbalance[itype], | 
 | 54 | 				    sd->lb_gained[itype], | 
 | 55 | 				    sd->lb_hot_gained[itype], | 
 | 56 | 				    sd->lb_nobusyq[itype], | 
 | 57 | 				    sd->lb_nobusyg[itype]); | 
 | 58 | 			} | 
| Ingo Molnar | f95e0d1 | 2007-11-28 15:52:56 +0100 | [diff] [blame] | 59 | 			seq_printf(seq, | 
 | 60 | 				   " %u %u %u %u %u %u %u %u %u %u %u %u\n", | 
| Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 61 | 			    sd->alb_count, sd->alb_failed, sd->alb_pushed, | 
 | 62 | 			    sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed, | 
 | 63 | 			    sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 64 | 			    sd->ttwu_wake_remote, sd->ttwu_move_affine, | 
 | 65 | 			    sd->ttwu_move_balance); | 
 | 66 | 		} | 
 | 67 | 		preempt_enable(); | 
 | 68 | #endif | 
 | 69 | 	} | 
| Adrian Bunk | c6fba54 | 2008-05-14 16:22:59 -0700 | [diff] [blame] | 70 | 	kfree(mask_str); | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 71 | 	return 0; | 
 | 72 | } | 
 | 73 |  | 
 | 74 | static int schedstat_open(struct inode *inode, struct file *file) | 
 | 75 | { | 
 | 76 | 	unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32); | 
 | 77 | 	char *buf = kmalloc(size, GFP_KERNEL); | 
 | 78 | 	struct seq_file *m; | 
 | 79 | 	int res; | 
 | 80 |  | 
 | 81 | 	if (!buf) | 
 | 82 | 		return -ENOMEM; | 
 | 83 | 	res = single_open(file, show_schedstat, NULL); | 
 | 84 | 	if (!res) { | 
 | 85 | 		m = file->private_data; | 
 | 86 | 		m->buf = buf; | 
 | 87 | 		m->size = size; | 
 | 88 | 	} else | 
 | 89 | 		kfree(buf); | 
 | 90 | 	return res; | 
 | 91 | } | 
 | 92 |  | 
| Alexey Dobriyan | b5aadf7 | 2008-10-06 13:23:43 +0400 | [diff] [blame] | 93 | static const struct file_operations proc_schedstat_operations = { | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 94 | 	.open    = schedstat_open, | 
 | 95 | 	.read    = seq_read, | 
 | 96 | 	.llseek  = seq_lseek, | 
 | 97 | 	.release = single_release, | 
 | 98 | }; | 
 | 99 |  | 
| Alexey Dobriyan | b5aadf7 | 2008-10-06 13:23:43 +0400 | [diff] [blame] | 100 | static int __init proc_schedstat_init(void) | 
 | 101 | { | 
 | 102 | 	proc_create("schedstat", 0, NULL, &proc_schedstat_operations); | 
 | 103 | 	return 0; | 
 | 104 | } | 
 | 105 | module_init(proc_schedstat_init); | 
 | 106 |  | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 107 | /* | 
 | 108 |  * Expects runqueue lock to be held for atomicity of update | 
 | 109 |  */ | 
 | 110 | static inline void | 
 | 111 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | 
 | 112 | { | 
 | 113 | 	if (rq) { | 
 | 114 | 		rq->rq_sched_info.run_delay += delta; | 
| Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 115 | 		rq->rq_sched_info.pcount++; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 116 | 	} | 
 | 117 | } | 
 | 118 |  | 
 | 119 | /* | 
 | 120 |  * Expects runqueue lock to be held for atomicity of update | 
 | 121 |  */ | 
 | 122 | static inline void | 
 | 123 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | 
 | 124 | { | 
 | 125 | 	if (rq) | 
| Ken Chen | 9c2c480 | 2008-12-16 23:41:22 -0800 | [diff] [blame] | 126 | 		rq->rq_cpu_time += delta; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 127 | } | 
| Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 128 |  | 
 | 129 | static inline void | 
 | 130 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) | 
 | 131 | { | 
 | 132 | 	if (rq) | 
 | 133 | 		rq->rq_sched_info.run_delay += delta; | 
 | 134 | } | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 135 | # define schedstat_inc(rq, field)	do { (rq)->field++; } while (0) | 
 | 136 | # define schedstat_add(rq, field, amt)	do { (rq)->field += (amt); } while (0) | 
| Ingo Molnar | c3c7011 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 137 | # define schedstat_set(var, val)	do { var = (val); } while (0) | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 138 | #else /* !CONFIG_SCHEDSTATS */ | 
 | 139 | static inline void | 
 | 140 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | 
 | 141 | {} | 
 | 142 | static inline void | 
| Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 143 | rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) | 
 | 144 | {} | 
 | 145 | static inline void | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 146 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | 
 | 147 | {} | 
 | 148 | # define schedstat_inc(rq, field)	do { } while (0) | 
 | 149 | # define schedstat_add(rq, field, amt)	do { } while (0) | 
| Ingo Molnar | c3c7011 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 150 | # define schedstat_set(var, val)	do { } while (0) | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 151 | #endif | 
 | 152 |  | 
| Balbir Singh | 9a41785 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 153 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 
| Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 154 | static inline void sched_info_reset_dequeued(struct task_struct *t) | 
 | 155 | { | 
 | 156 | 	t->sched_info.last_queued = 0; | 
 | 157 | } | 
 | 158 |  | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 159 | /* | 
| Rakib Mullick | d4a6f3c | 2010-10-24 16:28:47 +0600 | [diff] [blame] | 160 |  * We are interested in knowing how long it was from the *first* time a | 
| Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 161 |  * task was queued to the time that it finally hit a cpu, we call this routine | 
 | 162 |  * from dequeue_task() to account for possible rq->clock skew across cpus. The | 
 | 163 |  * delta taken on each cpu would annul the skew. | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 164 |  */ | 
 | 165 | static inline void sched_info_dequeued(struct task_struct *t) | 
 | 166 | { | 
| Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 167 | 	unsigned long long now = task_rq(t)->clock, delta = 0; | 
 | 168 |  | 
 | 169 | 	if (unlikely(sched_info_on())) | 
 | 170 | 		if (t->sched_info.last_queued) | 
 | 171 | 			delta = now - t->sched_info.last_queued; | 
 | 172 | 	sched_info_reset_dequeued(t); | 
 | 173 | 	t->sched_info.run_delay += delta; | 
 | 174 |  | 
 | 175 | 	rq_sched_info_dequeued(task_rq(t), delta); | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 176 | } | 
 | 177 |  | 
 | 178 | /* | 
 | 179 |  * Called when a task finally hits the cpu.  We can now calculate how | 
 | 180 |  * long it was waiting to run.  We also note when it began so that we | 
 | 181 |  * can keep stats on how long its timeslice is. | 
 | 182 |  */ | 
 | 183 | static void sched_info_arrive(struct task_struct *t) | 
 | 184 | { | 
| Balbir Singh | 9a41785 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 185 | 	unsigned long long now = task_rq(t)->clock, delta = 0; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 186 |  | 
 | 187 | 	if (t->sched_info.last_queued) | 
 | 188 | 		delta = now - t->sched_info.last_queued; | 
| Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 189 | 	sched_info_reset_dequeued(t); | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 190 | 	t->sched_info.run_delay += delta; | 
 | 191 | 	t->sched_info.last_arrival = now; | 
| Ingo Molnar | 2d72376 | 2007-10-15 17:00:12 +0200 | [diff] [blame] | 192 | 	t->sched_info.pcount++; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 193 |  | 
 | 194 | 	rq_sched_info_arrive(task_rq(t), delta); | 
 | 195 | } | 
 | 196 |  | 
 | 197 | /* | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 198 |  * This function is only called from enqueue_task(), but also only updates | 
 | 199 |  * the timestamp if it is already not set.  It's assumed that | 
 | 200 |  * sched_info_dequeued() will clear that stamp when appropriate. | 
 | 201 |  */ | 
 | 202 | static inline void sched_info_queued(struct task_struct *t) | 
 | 203 | { | 
 | 204 | 	if (unlikely(sched_info_on())) | 
 | 205 | 		if (!t->sched_info.last_queued) | 
| Balbir Singh | 9a41785 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 206 | 			t->sched_info.last_queued = task_rq(t)->clock; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 207 | } | 
 | 208 |  | 
 | 209 | /* | 
 | 210 |  * Called when a process ceases being the active-running process, either | 
 | 211 |  * voluntarily or involuntarily.  Now we can calculate how long we ran. | 
| Bharath Ravi | d4abc23 | 2008-06-16 15:11:01 +0530 | [diff] [blame] | 212 |  * Also, if the process is still in the TASK_RUNNING state, call | 
 | 213 |  * sched_info_queued() to mark that it has now again started waiting on | 
 | 214 |  * the runqueue. | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 215 |  */ | 
 | 216 | static inline void sched_info_depart(struct task_struct *t) | 
 | 217 | { | 
| Balbir Singh | 9a41785 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 218 | 	unsigned long long delta = task_rq(t)->clock - | 
 | 219 | 					t->sched_info.last_arrival; | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 220 |  | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 221 | 	rq_sched_info_depart(task_rq(t), delta); | 
| Bharath Ravi | d4abc23 | 2008-06-16 15:11:01 +0530 | [diff] [blame] | 222 |  | 
 | 223 | 	if (t->state == TASK_RUNNING) | 
 | 224 | 		sched_info_queued(t); | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 225 | } | 
 | 226 |  | 
 | 227 | /* | 
 | 228 |  * Called when tasks are switched involuntarily due, typically, to expiring | 
 | 229 |  * their time slice.  (This may also be called when switching to or from | 
 | 230 |  * the idle task.)  We are only called when prev != next. | 
 | 231 |  */ | 
 | 232 | static inline void | 
 | 233 | __sched_info_switch(struct task_struct *prev, struct task_struct *next) | 
 | 234 | { | 
 | 235 | 	struct rq *rq = task_rq(prev); | 
 | 236 |  | 
 | 237 | 	/* | 
 | 238 | 	 * prev now departs the cpu.  It's not interesting to record | 
 | 239 | 	 * stats about how efficient we were at scheduling the idle | 
 | 240 | 	 * process, however. | 
 | 241 | 	 */ | 
 | 242 | 	if (prev != rq->idle) | 
 | 243 | 		sched_info_depart(prev); | 
 | 244 |  | 
 | 245 | 	if (next != rq->idle) | 
 | 246 | 		sched_info_arrive(next); | 
 | 247 | } | 
 | 248 | static inline void | 
 | 249 | sched_info_switch(struct task_struct *prev, struct task_struct *next) | 
 | 250 | { | 
 | 251 | 	if (unlikely(sched_info_on())) | 
 | 252 | 		__sched_info_switch(prev, next); | 
 | 253 | } | 
 | 254 | #else | 
| Ankita Garg | 46ac22b | 2008-07-01 14:30:06 +0530 | [diff] [blame] | 255 | #define sched_info_queued(t)			do { } while (0) | 
 | 256 | #define sched_info_reset_dequeued(t)	do { } while (0) | 
 | 257 | #define sched_info_dequeued(t)			do { } while (0) | 
 | 258 | #define sched_info_switch(t, next)		do { } while (0) | 
| Balbir Singh | 9a41785 | 2007-11-09 22:39:37 +0100 | [diff] [blame] | 259 | #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ | 
| Ingo Molnar | 425e096 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 260 |  | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 261 | /* | 
 | 262 |  * The following are functions that support scheduler-internal time accounting. | 
 | 263 |  * These functions are generally called at the timer tick.  None of this depends | 
 | 264 |  * on CONFIG_SCHEDSTATS. | 
 | 265 |  */ | 
 | 266 |  | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 267 | /** | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 268 |  * account_group_user_time - Maintain utime for a thread group. | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 269 |  * | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 270 |  * @tsk:	Pointer to task structure. | 
 | 271 |  * @cputime:	Time value by which to increment the utime field of the | 
 | 272 |  *		thread_group_cputime structure. | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 273 |  * | 
 | 274 |  * If thread group time is being maintained, get the structure for the | 
 | 275 |  * running CPU and update the utime field there. | 
 | 276 |  */ | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 277 | static inline void account_group_user_time(struct task_struct *tsk, | 
 | 278 | 					   cputime_t cputime) | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 279 | { | 
| Oleg Nesterov | 48286d5 | 2010-06-11 01:09:52 +0200 | [diff] [blame] | 280 | 	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 281 |  | 
| Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 282 | 	if (!cputimer->running) | 
 | 283 | 		return; | 
 | 284 |  | 
 | 285 | 	spin_lock(&cputimer->lock); | 
 | 286 | 	cputimer->cputime.utime = | 
 | 287 | 		cputime_add(cputimer->cputime.utime, cputime); | 
 | 288 | 	spin_unlock(&cputimer->lock); | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 289 | } | 
 | 290 |  | 
 | 291 | /** | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 292 |  * account_group_system_time - Maintain stime for a thread group. | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 293 |  * | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 294 |  * @tsk:	Pointer to task structure. | 
 | 295 |  * @cputime:	Time value by which to increment the stime field of the | 
 | 296 |  *		thread_group_cputime structure. | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 297 |  * | 
 | 298 |  * If thread group time is being maintained, get the structure for the | 
 | 299 |  * running CPU and update the stime field there. | 
 | 300 |  */ | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 301 | static inline void account_group_system_time(struct task_struct *tsk, | 
 | 302 | 					     cputime_t cputime) | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 303 | { | 
| Oleg Nesterov | 48286d5 | 2010-06-11 01:09:52 +0200 | [diff] [blame] | 304 | 	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 305 |  | 
| Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 306 | 	if (!cputimer->running) | 
 | 307 | 		return; | 
 | 308 |  | 
 | 309 | 	spin_lock(&cputimer->lock); | 
 | 310 | 	cputimer->cputime.stime = | 
 | 311 | 		cputime_add(cputimer->cputime.stime, cputime); | 
 | 312 | 	spin_unlock(&cputimer->lock); | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 313 | } | 
 | 314 |  | 
 | 315 | /** | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 316 |  * account_group_exec_runtime - Maintain exec runtime for a thread group. | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 317 |  * | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 318 |  * @tsk:	Pointer to task structure. | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 319 |  * @ns:		Time value by which to increment the sum_exec_runtime field | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 320 |  *		of the thread_group_cputime structure. | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 321 |  * | 
 | 322 |  * If thread group time is being maintained, get the structure for the | 
 | 323 |  * running CPU and update the sum_exec_runtime field there. | 
 | 324 |  */ | 
| Frank Mayhar | 7086efe | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 325 | static inline void account_group_exec_runtime(struct task_struct *tsk, | 
 | 326 | 					      unsigned long long ns) | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 327 | { | 
| Oleg Nesterov | 48286d5 | 2010-06-11 01:09:52 +0200 | [diff] [blame] | 328 | 	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 329 |  | 
| Peter Zijlstra | 4cd4c1b | 2009-02-05 12:24:16 +0100 | [diff] [blame] | 330 | 	if (!cputimer->running) | 
 | 331 | 		return; | 
 | 332 |  | 
 | 333 | 	spin_lock(&cputimer->lock); | 
 | 334 | 	cputimer->cputime.sum_exec_runtime += ns; | 
 | 335 | 	spin_unlock(&cputimer->lock); | 
| Frank Mayhar | bb34d92 | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 336 | } |