sched: Introduce task_times() to replace task_{u,s}time() pair
Functions task_{u,s}time() are called in pair in almost all
cases. However task_stime() is implemented to call task_utime()
from its inside, so such paired calls run task_utime() twice.
It means we do heavy divisions (div_u64 + do_div) twice to get
utime and stime which can be obtained at same time by one set
of divisions.
This patch introduces a function task_times(*tsk, *utime,
*stime) to retrieve utime and stime at once in better, optimized
way.
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Stanislaw Gruszka <sgruszka@redhat.com>
Cc: Spencer Candland <spencer@bluehost.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Americo Wang <xiyou.wangcong@gmail.com>
LKML-Reference: <4B0E16AE.906@jp.fujitsu.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched.c b/kernel/sched.c
index 315ba40..475a6f2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5191,6 +5191,14 @@
{
return p->stime;
}
+
+void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
+{
+ if (ut)
+ *ut = task_utime(p);
+ if (st)
+ *st = task_stime(p);
+}
#else
#ifndef nsecs_to_cputime
@@ -5198,41 +5206,48 @@
msecs_to_cputime(div_u64((__nsecs), NSEC_PER_MSEC))
#endif
-cputime_t task_utime(struct task_struct *p)
+void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
- cputime_t utime = p->utime, total = utime + p->stime;
- u64 temp;
+ cputime_t rtime, utime = p->utime, total = utime + p->stime;
/*
* Use CFS's precise accounting:
*/
- temp = (u64)nsecs_to_cputime(p->se.sum_exec_runtime);
+ rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
if (total) {
- temp *= utime;
- do_div(temp, total);
- }
- utime = (cputime_t)temp;
+ u64 temp;
+ temp = (u64)(rtime * utime);
+ do_div(temp, total);
+ utime = (cputime_t)temp;
+ } else
+ utime = rtime;
+
+ /*
+ * Compare with previous values, to keep monotonicity:
+ */
p->prev_utime = max(p->prev_utime, utime);
- return p->prev_utime;
+ p->prev_stime = max(p->prev_stime, rtime - p->prev_utime);
+
+ if (ut)
+ *ut = p->prev_utime;
+ if (st)
+ *st = p->prev_stime;
+}
+
+cputime_t task_utime(struct task_struct *p)
+{
+ cputime_t utime;
+ task_times(p, &utime, NULL);
+ return utime;
}
cputime_t task_stime(struct task_struct *p)
{
cputime_t stime;
-
- /*
- * Use CFS's precise accounting. (we subtract utime from
- * the total, to make sure the total observed by userspace
- * grows monotonically - apps rely on that):
- */
- stime = nsecs_to_cputime(p->se.sum_exec_runtime) - task_utime(p);
-
- if (stime >= 0)
- p->prev_stime = max(p->prev_stime, stime);
-
- return p->prev_stime;
+ task_times(p, NULL, &stime);
+ return stime;
}
#endif